1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) |
29 | |
30 | #include "AssemblerBuffer.h" |
31 | #include "AssemblerCommon.h" |
32 | #include "JITCompilationEffort.h" |
33 | #include "RegisterInfo.h" |
34 | #include <limits.h> |
35 | #include <stdint.h> |
36 | #include <wtf/Assertions.h> |
37 | #include <wtf/Vector.h> |
38 | |
39 | namespace JSC { |
40 | |
41 | inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; } |
42 | |
43 | namespace RegisterNames { |
44 | |
45 | #if COMPILER(MSVC) |
46 | #define JSC_X86_ASM_REGISTER_ID_ENUM_BASE_TYPE |
47 | #else |
48 | #define JSC_X86_ASM_REGISTER_ID_ENUM_BASE_TYPE : int8_t |
49 | #endif |
50 | |
51 | #define REGISTER_ID(id, name, res, cs) id, |
52 | |
53 | typedef enum JSC_X86_ASM_REGISTER_ID_ENUM_BASE_TYPE { |
54 | FOR_EACH_GP_REGISTER(REGISTER_ID) |
55 | InvalidGPRReg = -1, |
56 | } RegisterID; |
57 | |
58 | typedef enum JSC_X86_ASM_REGISTER_ID_ENUM_BASE_TYPE { |
59 | FOR_EACH_SP_REGISTER(REGISTER_ID) |
60 | } SPRegisterID; |
61 | |
62 | typedef enum JSC_X86_ASM_REGISTER_ID_ENUM_BASE_TYPE { |
63 | FOR_EACH_FP_REGISTER(REGISTER_ID) |
64 | InvalidFPRReg = -1, |
65 | } XMMRegisterID; |
66 | |
67 | #undef REGISTER_ID |
68 | |
69 | } // namespace X86Registers |
70 | |
71 | class X86Assembler { |
72 | public: |
73 | typedef X86Registers::RegisterID RegisterID; |
74 | |
75 | static constexpr RegisterID firstRegister() { return X86Registers::eax; } |
76 | static constexpr RegisterID lastRegister() |
77 | { |
78 | #if CPU(X86_64) |
79 | return X86Registers::r15; |
80 | #else |
81 | return X86Registers::edi; |
82 | #endif |
83 | } |
84 | static constexpr unsigned numberOfRegisters() { return lastRegister() - firstRegister() + 1; } |
85 | |
86 | typedef X86Registers::SPRegisterID SPRegisterID; |
87 | |
88 | static constexpr SPRegisterID firstSPRegister() { return X86Registers::eip; } |
89 | static constexpr SPRegisterID lastSPRegister() { return X86Registers::eflags; } |
90 | static constexpr unsigned numberOfSPRegisters() { return lastSPRegister() - firstSPRegister() + 1; } |
91 | |
92 | typedef X86Registers::XMMRegisterID XMMRegisterID; |
93 | typedef XMMRegisterID FPRegisterID; |
94 | |
95 | static constexpr FPRegisterID firstFPRegister() { return X86Registers::xmm0; } |
96 | static constexpr FPRegisterID lastFPRegister() |
97 | { |
98 | #if CPU(X86_64) |
99 | return X86Registers::xmm15; |
100 | #else |
101 | return X86Registers::xmm7; |
102 | #endif |
103 | } |
104 | static constexpr unsigned numberOfFPRegisters() { return lastFPRegister() - firstFPRegister() + 1; } |
105 | |
106 | static const char* gprName(RegisterID id) |
107 | { |
108 | ASSERT(id >= firstRegister() && id <= lastRegister()); |
109 | static const char* const nameForRegister[numberOfRegisters()] = { |
110 | #define REGISTER_NAME(id, name, res, cs) name, |
111 | FOR_EACH_GP_REGISTER(REGISTER_NAME) |
112 | #undef REGISTER_NAME |
113 | }; |
114 | return nameForRegister[id]; |
115 | } |
116 | |
117 | static const char* sprName(SPRegisterID id) |
118 | { |
119 | ASSERT(id >= firstSPRegister() && id <= lastSPRegister()); |
120 | static const char* const nameForRegister[numberOfSPRegisters()] = { |
121 | #define REGISTER_NAME(id, name, res, cs) name, |
122 | FOR_EACH_SP_REGISTER(REGISTER_NAME) |
123 | #undef REGISTER_NAME |
124 | }; |
125 | return nameForRegister[id]; |
126 | } |
127 | |
128 | static const char* fprName(FPRegisterID reg) |
129 | { |
130 | ASSERT(reg >= firstFPRegister() && reg <= lastFPRegister()); |
131 | static const char* const nameForRegister[numberOfFPRegisters()] = { |
132 | #define REGISTER_NAME(id, name, res, cs) name, |
133 | FOR_EACH_FP_REGISTER(REGISTER_NAME) |
134 | #undef REGISTER_NAME |
135 | }; |
136 | return nameForRegister[reg]; |
137 | } |
138 | |
139 | typedef enum { |
140 | ConditionO, |
141 | ConditionNO, |
142 | ConditionB, |
143 | ConditionAE, |
144 | ConditionE, |
145 | ConditionNE, |
146 | ConditionBE, |
147 | ConditionA, |
148 | ConditionS, |
149 | ConditionNS, |
150 | ConditionP, |
151 | ConditionNP, |
152 | ConditionL, |
153 | ConditionGE, |
154 | ConditionLE, |
155 | ConditionG, |
156 | |
157 | ConditionC = ConditionB, |
158 | ConditionNC = ConditionAE, |
159 | } Condition; |
160 | |
161 | private: |
162 | // OneByteOpcodeID defines the bytecode for 1 byte instruction. It also contains the prefixes |
163 | // for two bytes instructions. |
164 | // TwoByteOpcodeID, ThreeByteOpcodeID define the opcodes for the multibytes instructions. |
165 | // |
166 | // The encoding for each instruction can be found in the Intel Architecture Manual in the appendix |
167 | // "Opcode Map." |
168 | // |
169 | // Each opcode can have a suffix describing the type of argument. The full list of suffixes is |
170 | // in the "Key to Abbreviations" section of the "Opcode Map". |
171 | // The most common argument types are: |
172 | // -E: The argument is either a GPR or a memory address. |
173 | // -G: The argument is a GPR. |
174 | // -I: The argument is an immediate. |
175 | // The most common sizes are: |
176 | // -v: 32 or 64bit depending on the operand-size attribute. |
177 | // -z: 32bit in both 32bit and 64bit mode. Common for immediate values. |
178 | typedef enum { |
179 | OP_ADD_EbGb = 0x00, |
180 | OP_ADD_EvGv = 0x01, |
181 | OP_ADD_GvEv = 0x03, |
182 | OP_ADD_EAXIv = 0x05, |
183 | OP_OR_EvGb = 0x08, |
184 | OP_OR_EvGv = 0x09, |
185 | OP_OR_GvEv = 0x0B, |
186 | OP_OR_EAXIv = 0x0D, |
187 | OP_2BYTE_ESCAPE = 0x0F, |
188 | OP_AND_EvGb = 0x20, |
189 | OP_AND_EvGv = 0x21, |
190 | OP_AND_GvEv = 0x23, |
191 | OP_SUB_EvGb = 0x28, |
192 | OP_SUB_EvGv = 0x29, |
193 | OP_SUB_GvEv = 0x2B, |
194 | OP_SUB_EAXIv = 0x2D, |
195 | PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E, |
196 | OP_XOR_EvGb = 0x30, |
197 | OP_XOR_EvGv = 0x31, |
198 | OP_XOR_GvEv = 0x33, |
199 | OP_XOR_EAXIv = 0x35, |
200 | OP_CMP_EvGv = 0x39, |
201 | OP_CMP_GvEv = 0x3B, |
202 | OP_CMP_EAXIv = 0x3D, |
203 | #if CPU(X86_64) |
204 | PRE_REX = 0x40, |
205 | #endif |
206 | OP_PUSH_EAX = 0x50, |
207 | OP_POP_EAX = 0x58, |
208 | #if CPU(X86_64) |
209 | OP_MOVSXD_GvEv = 0x63, |
210 | #endif |
211 | PRE_GS = 0x65, |
212 | PRE_OPERAND_SIZE = 0x66, |
213 | PRE_SSE_66 = 0x66, |
214 | OP_PUSH_Iz = 0x68, |
215 | OP_IMUL_GvEvIz = 0x69, |
216 | OP_GROUP1_EbIb = 0x80, |
217 | OP_GROUP1_EvIz = 0x81, |
218 | OP_GROUP1_EvIb = 0x83, |
219 | OP_TEST_EbGb = 0x84, |
220 | OP_TEST_EvGv = 0x85, |
221 | OP_XCHG_EvGb = 0x86, |
222 | OP_XCHG_EvGv = 0x87, |
223 | OP_MOV_EbGb = 0x88, |
224 | OP_MOV_EvGv = 0x89, |
225 | OP_MOV_GvEv = 0x8B, |
226 | OP_LEA = 0x8D, |
227 | OP_GROUP1A_Ev = 0x8F, |
228 | OP_NOP = 0x90, |
229 | OP_XCHG_EAX = 0x90, |
230 | OP_PAUSE = 0x90, |
231 | OP_CDQ = 0x99, |
232 | OP_MOV_EAXOv = 0xA1, |
233 | OP_MOV_OvEAX = 0xA3, |
234 | OP_TEST_ALIb = 0xA8, |
235 | OP_TEST_EAXIv = 0xA9, |
236 | OP_MOV_EAXIv = 0xB8, |
237 | OP_GROUP2_EvIb = 0xC1, |
238 | OP_RET = 0xC3, |
239 | OP_GROUP11_EvIb = 0xC6, |
240 | OP_GROUP11_EvIz = 0xC7, |
241 | OP_INT3 = 0xCC, |
242 | OP_GROUP2_Ev1 = 0xD1, |
243 | OP_GROUP2_EvCL = 0xD3, |
244 | OP_ESCAPE_D9 = 0xD9, |
245 | OP_ESCAPE_DD = 0xDD, |
246 | OP_CALL_rel32 = 0xE8, |
247 | OP_JMP_rel32 = 0xE9, |
248 | PRE_LOCK = 0xF0, |
249 | PRE_SSE_F2 = 0xF2, |
250 | PRE_SSE_F3 = 0xF3, |
251 | OP_HLT = 0xF4, |
252 | OP_GROUP3_Eb = 0xF6, |
253 | OP_GROUP3_EbIb = 0xF6, |
254 | OP_GROUP3_Ev = 0xF7, |
255 | OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test. |
256 | OP_GROUP5_Ev = 0xFF, |
257 | } OneByteOpcodeID; |
258 | |
259 | typedef enum { |
260 | OP2_UD2 = 0xB, |
261 | OP2_MOVSD_VsdWsd = 0x10, |
262 | OP2_MOVSD_WsdVsd = 0x11, |
263 | OP2_MOVSS_VsdWsd = 0x10, |
264 | OP2_MOVSS_WsdVsd = 0x11, |
265 | OP2_MOVAPD_VpdWpd = 0x28, |
266 | OP2_MOVAPS_VpdWpd = 0x28, |
267 | OP2_CVTSI2SD_VsdEd = 0x2A, |
268 | OP2_CVTTSD2SI_GdWsd = 0x2C, |
269 | OP2_CVTTSS2SI_GdWsd = 0x2C, |
270 | OP2_UCOMISD_VsdWsd = 0x2E, |
271 | OP2_RDTSC = 0x31, |
272 | OP2_3BYTE_ESCAPE_3A = 0x3A, |
273 | OP2_CMOVCC = 0x40, |
274 | OP2_ADDSD_VsdWsd = 0x58, |
275 | OP2_MULSD_VsdWsd = 0x59, |
276 | OP2_CVTSD2SS_VsdWsd = 0x5A, |
277 | OP2_CVTSS2SD_VsdWsd = 0x5A, |
278 | OP2_SUBSD_VsdWsd = 0x5C, |
279 | OP2_DIVSD_VsdWsd = 0x5E, |
280 | OP2_MOVMSKPD_VdEd = 0x50, |
281 | OP2_SQRTSD_VsdWsd = 0x51, |
282 | OP2_ANDPS_VpdWpd = 0x54, |
283 | OP2_ANDNPD_VpdWpd = 0x55, |
284 | OP2_ORPS_VpdWpd = 0x56, |
285 | OP2_XORPD_VpdWpd = 0x57, |
286 | OP2_MOVD_VdEd = 0x6E, |
287 | OP2_MOVD_EdVd = 0x7E, |
288 | OP2_JCC_rel32 = 0x80, |
289 | OP_SETCC = 0x90, |
290 | OP2_CPUID = 0xA2, |
291 | OP2_3BYTE_ESCAPE_AE = 0xAE, |
292 | OP2_IMUL_GvEv = 0xAF, |
293 | OP2_CMPXCHGb = 0xB0, |
294 | OP2_CMPXCHG = 0xB1, |
295 | OP2_MOVZX_GvEb = 0xB6, |
296 | OP2_POPCNT = 0xB8, |
297 | OP2_GROUP_BT_EvIb = 0xBA, |
298 | OP2_BT_EvEv = 0xA3, |
299 | OP2_BSF = 0xBC, |
300 | OP2_TZCNT = 0xBC, |
301 | OP2_BSR = 0xBD, |
302 | OP2_LZCNT = 0xBD, |
303 | OP2_MOVSX_GvEb = 0xBE, |
304 | OP2_MOVZX_GvEw = 0xB7, |
305 | OP2_MOVSX_GvEw = 0xBF, |
306 | OP2_XADDb = 0xC0, |
307 | OP2_XADD = 0xC1, |
308 | OP2_PEXTRW_GdUdIb = 0xC5, |
309 | OP2_BSWAP = 0xC8, |
310 | OP2_PSLLQ_UdqIb = 0x73, |
311 | OP2_PSRLQ_UdqIb = 0x73, |
312 | OP2_POR_VdqWdq = 0XEB, |
313 | } TwoByteOpcodeID; |
314 | |
315 | typedef enum { |
316 | OP3_ROUNDSS_VssWssIb = 0x0A, |
317 | OP3_ROUNDSD_VsdWsdIb = 0x0B, |
318 | OP3_LFENCE = 0xE8, |
319 | OP3_MFENCE = 0xF0, |
320 | OP3_SFENCE = 0xF8, |
321 | } ThreeByteOpcodeID; |
322 | |
323 | struct VexPrefix { |
324 | enum : uint8_t { |
325 | TwoBytes = 0xC5, |
326 | ThreeBytes = 0xC4 |
327 | }; |
328 | }; |
329 | enum class VexImpliedBytes : uint8_t { |
330 | TwoBytesOp = 1, |
331 | ThreeBytesOp38 = 2, |
332 | ThreeBytesOp3A = 3 |
333 | }; |
334 | |
335 | TwoByteOpcodeID cmovcc(Condition cond) |
336 | { |
337 | return (TwoByteOpcodeID)(OP2_CMOVCC + cond); |
338 | } |
339 | |
340 | TwoByteOpcodeID jccRel32(Condition cond) |
341 | { |
342 | return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond); |
343 | } |
344 | |
345 | TwoByteOpcodeID setccOpcode(Condition cond) |
346 | { |
347 | return (TwoByteOpcodeID)(OP_SETCC + cond); |
348 | } |
349 | |
350 | typedef enum { |
351 | GROUP1_OP_ADD = 0, |
352 | GROUP1_OP_OR = 1, |
353 | GROUP1_OP_ADC = 2, |
354 | GROUP1_OP_AND = 4, |
355 | GROUP1_OP_SUB = 5, |
356 | GROUP1_OP_XOR = 6, |
357 | GROUP1_OP_CMP = 7, |
358 | |
359 | GROUP1A_OP_POP = 0, |
360 | |
361 | GROUP2_OP_ROL = 0, |
362 | GROUP2_OP_ROR = 1, |
363 | GROUP2_OP_RCL = 2, |
364 | GROUP2_OP_RCR = 3, |
365 | |
366 | GROUP2_OP_SHL = 4, |
367 | GROUP2_OP_SHR = 5, |
368 | GROUP2_OP_SAR = 7, |
369 | |
370 | GROUP3_OP_TEST = 0, |
371 | GROUP3_OP_NOT = 2, |
372 | GROUP3_OP_NEG = 3, |
373 | GROUP3_OP_DIV = 6, |
374 | GROUP3_OP_IDIV = 7, |
375 | |
376 | GROUP5_OP_CALLN = 2, |
377 | GROUP5_OP_JMPN = 4, |
378 | GROUP5_OP_PUSH = 6, |
379 | |
380 | GROUP11_MOV = 0, |
381 | |
382 | GROUP14_OP_PSLLQ = 6, |
383 | GROUP14_OP_PSRLQ = 2, |
384 | |
385 | ESCAPE_D9_FSTP_singleReal = 3, |
386 | ESCAPE_DD_FSTP_doubleReal = 3, |
387 | |
388 | GROUP_BT_OP_BT = 4, |
389 | } GroupOpcodeID; |
390 | |
391 | class X86InstructionFormatter; |
392 | public: |
393 | |
394 | X86Assembler() |
395 | : m_indexOfLastWatchpoint(INT_MIN) |
396 | , m_indexOfTailOfLastWatchpoint(INT_MIN) |
397 | { |
398 | } |
399 | |
400 | AssemblerBuffer& buffer() { return m_formatter.m_buffer; } |
401 | |
402 | // Stack operations: |
403 | |
404 | void push_r(RegisterID reg) |
405 | { |
406 | m_formatter.oneByteOp(OP_PUSH_EAX, reg); |
407 | } |
408 | |
409 | void pop_r(RegisterID reg) |
410 | { |
411 | m_formatter.oneByteOp(OP_POP_EAX, reg); |
412 | } |
413 | |
414 | void push_i32(int imm) |
415 | { |
416 | m_formatter.oneByteOp(OP_PUSH_Iz); |
417 | m_formatter.immediate32(imm); |
418 | } |
419 | |
420 | void push_m(int offset, RegisterID base) |
421 | { |
422 | m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset); |
423 | } |
424 | |
425 | void pop_m(int offset, RegisterID base) |
426 | { |
427 | m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset); |
428 | } |
429 | |
430 | // Arithmetic operations: |
431 | |
432 | #if !CPU(X86_64) |
433 | void adcl_im(int imm, const void* addr) |
434 | { |
435 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
436 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIb, GROUP1_OP_ADC, bitwise_cast<uint32_t>(addr)); |
437 | m_formatter.immediate8(imm); |
438 | } else { |
439 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIz, GROUP1_OP_ADC, bitwise_cast<uint32_t>(addr)); |
440 | m_formatter.immediate32(imm); |
441 | } |
442 | } |
443 | #endif |
444 | |
445 | void addl_rr(RegisterID src, RegisterID dst) |
446 | { |
447 | m_formatter.oneByteOp(OP_ADD_EvGv, src, dst); |
448 | } |
449 | |
450 | void addl_mr(int offset, RegisterID base, RegisterID dst) |
451 | { |
452 | m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset); |
453 | } |
454 | |
455 | void addl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
456 | { |
457 | m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, index, scale, offset); |
458 | } |
459 | |
460 | #if !CPU(X86_64) |
461 | void addl_mr(const void* addr, RegisterID dst) |
462 | { |
463 | m_formatter.oneByteOpAddr(OP_ADD_GvEv, dst, bitwise_cast<uint32_t>(addr)); |
464 | } |
465 | #endif |
466 | |
467 | void addl_rm(RegisterID src, int offset, RegisterID base) |
468 | { |
469 | m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset); |
470 | } |
471 | |
472 | void addl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
473 | { |
474 | m_formatter.oneByteOp(OP_ADD_EvGv, src, base, index, scale, offset); |
475 | } |
476 | |
477 | void addb_rm(RegisterID src, int offset, RegisterID base) |
478 | { |
479 | m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, offset); |
480 | } |
481 | |
482 | void addb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
483 | { |
484 | m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, index, scale, offset); |
485 | } |
486 | |
487 | void addw_rm(RegisterID src, int offset, RegisterID base) |
488 | { |
489 | m_formatter.prefix(PRE_OPERAND_SIZE); |
490 | m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, offset); |
491 | } |
492 | |
493 | void addw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
494 | { |
495 | m_formatter.prefix(PRE_OPERAND_SIZE); |
496 | m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, index, scale, offset); |
497 | } |
498 | |
499 | void addl_ir(int imm, RegisterID dst) |
500 | { |
501 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
502 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst); |
503 | m_formatter.immediate8(imm); |
504 | } else { |
505 | if (dst == X86Registers::eax) |
506 | m_formatter.oneByteOp(OP_ADD_EAXIv); |
507 | else |
508 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); |
509 | m_formatter.immediate32(imm); |
510 | } |
511 | } |
512 | |
513 | void addl_im(int imm, int offset, RegisterID base) |
514 | { |
515 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
516 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset); |
517 | m_formatter.immediate8(imm); |
518 | } else { |
519 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset); |
520 | m_formatter.immediate32(imm); |
521 | } |
522 | } |
523 | |
524 | void addl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
525 | { |
526 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
527 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset); |
528 | m_formatter.immediate8(imm); |
529 | } else { |
530 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset); |
531 | m_formatter.immediate32(imm); |
532 | } |
533 | } |
534 | |
535 | void addb_im(int imm, int offset, RegisterID base) |
536 | { |
537 | m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, offset); |
538 | m_formatter.immediate8(imm); |
539 | } |
540 | |
541 | void addb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
542 | { |
543 | m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, index, scale, offset); |
544 | m_formatter.immediate8(imm); |
545 | } |
546 | |
547 | void addw_im(int imm, int offset, RegisterID base) |
548 | { |
549 | m_formatter.prefix(PRE_OPERAND_SIZE); |
550 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
551 | m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset); |
552 | m_formatter.immediate8(imm); |
553 | } else { |
554 | m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset); |
555 | m_formatter.immediate16(imm); |
556 | } |
557 | } |
558 | |
559 | void addw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
560 | { |
561 | m_formatter.prefix(PRE_OPERAND_SIZE); |
562 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
563 | m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset); |
564 | m_formatter.immediate8(imm); |
565 | } else { |
566 | m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset); |
567 | m_formatter.immediate16(imm); |
568 | } |
569 | } |
570 | |
571 | #if CPU(X86_64) |
572 | void addq_rr(RegisterID src, RegisterID dst) |
573 | { |
574 | m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst); |
575 | } |
576 | |
577 | void addq_mr(int offset, RegisterID base, RegisterID dst) |
578 | { |
579 | m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset); |
580 | } |
581 | |
582 | void addq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
583 | { |
584 | m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, index, scale, offset); |
585 | } |
586 | |
587 | void addq_rm(RegisterID src, int offset, RegisterID base) |
588 | { |
589 | m_formatter.oneByteOp64(OP_ADD_EvGv, src, base, offset); |
590 | } |
591 | |
592 | void addq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
593 | { |
594 | m_formatter.oneByteOp64(OP_ADD_EvGv, src, base, index, scale, offset); |
595 | } |
596 | |
597 | void addq_ir(int imm, RegisterID dst) |
598 | { |
599 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
600 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst); |
601 | m_formatter.immediate8(imm); |
602 | } else { |
603 | if (dst == X86Registers::eax) |
604 | m_formatter.oneByteOp64(OP_ADD_EAXIv); |
605 | else |
606 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); |
607 | m_formatter.immediate32(imm); |
608 | } |
609 | } |
610 | |
611 | void addq_im(int imm, int offset, RegisterID base) |
612 | { |
613 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
614 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset); |
615 | m_formatter.immediate8(imm); |
616 | } else { |
617 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset); |
618 | m_formatter.immediate32(imm); |
619 | } |
620 | } |
621 | |
622 | void addq_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
623 | { |
624 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
625 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset); |
626 | m_formatter.immediate8(imm); |
627 | } else { |
628 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset); |
629 | m_formatter.immediate32(imm); |
630 | } |
631 | } |
632 | #else |
633 | void addl_im(int imm, const void* addr) |
634 | { |
635 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
636 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIb, GROUP1_OP_ADD, bitwise_cast<uint32_t>(addr)); |
637 | m_formatter.immediate8(imm); |
638 | } else { |
639 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIz, GROUP1_OP_ADD, bitwise_cast<uint32_t>(addr)); |
640 | m_formatter.immediate32(imm); |
641 | } |
642 | } |
643 | #endif |
644 | |
645 | void andl_rr(RegisterID src, RegisterID dst) |
646 | { |
647 | m_formatter.oneByteOp(OP_AND_EvGv, src, dst); |
648 | } |
649 | |
650 | void andl_mr(int offset, RegisterID base, RegisterID dst) |
651 | { |
652 | m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset); |
653 | } |
654 | |
655 | void andl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
656 | { |
657 | m_formatter.oneByteOp(OP_AND_GvEv, dst, base, index, scale, offset); |
658 | } |
659 | |
660 | void andw_mr(int offset, RegisterID base, RegisterID dst) |
661 | { |
662 | m_formatter.prefix(PRE_OPERAND_SIZE); |
663 | andl_mr(offset, base, dst); |
664 | } |
665 | |
666 | void andw_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
667 | { |
668 | m_formatter.prefix(PRE_OPERAND_SIZE); |
669 | andl_mr(offset, base, index, scale, dst); |
670 | } |
671 | |
672 | void andl_rm(RegisterID src, int offset, RegisterID base) |
673 | { |
674 | m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset); |
675 | } |
676 | |
677 | void andl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
678 | { |
679 | m_formatter.oneByteOp(OP_AND_EvGv, src, base, index, scale, offset); |
680 | } |
681 | |
682 | void andw_rm(RegisterID src, int offset, RegisterID base) |
683 | { |
684 | m_formatter.prefix(PRE_OPERAND_SIZE); |
685 | andl_rm(src, offset, base); |
686 | } |
687 | |
688 | void andw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
689 | { |
690 | m_formatter.prefix(PRE_OPERAND_SIZE); |
691 | andl_rm(src, offset, base, index, scale); |
692 | } |
693 | |
694 | void andb_rm(RegisterID src, int offset, RegisterID base) |
695 | { |
696 | m_formatter.oneByteOp(OP_AND_EvGb, src, base, offset); |
697 | } |
698 | |
699 | void andb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
700 | { |
701 | m_formatter.oneByteOp(OP_AND_EvGb, src, base, index, scale, offset); |
702 | } |
703 | |
704 | void andl_ir(int imm, RegisterID dst) |
705 | { |
706 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
707 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst); |
708 | m_formatter.immediate8(imm); |
709 | } else { |
710 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst); |
711 | m_formatter.immediate32(imm); |
712 | } |
713 | } |
714 | |
715 | void andl_im(int imm, int offset, RegisterID base) |
716 | { |
717 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
718 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset); |
719 | m_formatter.immediate8(imm); |
720 | } else { |
721 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset); |
722 | m_formatter.immediate32(imm); |
723 | } |
724 | } |
725 | |
726 | void andl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
727 | { |
728 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
729 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, index, scale, offset); |
730 | m_formatter.immediate8(imm); |
731 | } else { |
732 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, index, scale, offset); |
733 | m_formatter.immediate32(imm); |
734 | } |
735 | } |
736 | |
737 | void andw_im(int imm, int offset, RegisterID base) |
738 | { |
739 | m_formatter.prefix(PRE_OPERAND_SIZE); |
740 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
741 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset); |
742 | m_formatter.immediate8(imm); |
743 | } else { |
744 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset); |
745 | m_formatter.immediate16(imm); |
746 | } |
747 | } |
748 | |
749 | void andw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
750 | { |
751 | m_formatter.prefix(PRE_OPERAND_SIZE); |
752 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
753 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, index, scale, offset); |
754 | m_formatter.immediate8(imm); |
755 | } else { |
756 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, index, scale, offset); |
757 | m_formatter.immediate16(imm); |
758 | } |
759 | } |
760 | |
761 | void andb_im(int imm, int offset, RegisterID base) |
762 | { |
763 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_AND, base, offset); |
764 | m_formatter.immediate8(imm); |
765 | } |
766 | |
767 | void andb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
768 | { |
769 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_AND, base, index, scale, offset); |
770 | m_formatter.immediate8(imm); |
771 | } |
772 | |
773 | #if CPU(X86_64) |
774 | void andq_rr(RegisterID src, RegisterID dst) |
775 | { |
776 | m_formatter.oneByteOp64(OP_AND_EvGv, src, dst); |
777 | } |
778 | |
779 | void andq_ir(int imm, RegisterID dst) |
780 | { |
781 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
782 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst); |
783 | m_formatter.immediate8(imm); |
784 | } else { |
785 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst); |
786 | m_formatter.immediate32(imm); |
787 | } |
788 | } |
789 | |
790 | void andq_mr(int offset, RegisterID base, RegisterID dst) |
791 | { |
792 | m_formatter.oneByteOp64(OP_AND_GvEv, dst, base, offset); |
793 | } |
794 | |
795 | void andq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
796 | { |
797 | m_formatter.oneByteOp64(OP_AND_GvEv, dst, base, index, scale, offset); |
798 | } |
799 | |
800 | void andq_rm(RegisterID src, int offset, RegisterID base) |
801 | { |
802 | m_formatter.oneByteOp64(OP_AND_EvGv, src, base, offset); |
803 | } |
804 | |
805 | void andq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
806 | { |
807 | m_formatter.oneByteOp64(OP_AND_EvGv, src, base, index, scale, offset); |
808 | } |
809 | |
810 | void andq_im(int imm, int offset, RegisterID base) |
811 | { |
812 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
813 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset); |
814 | m_formatter.immediate8(imm); |
815 | } else { |
816 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset); |
817 | m_formatter.immediate32(imm); |
818 | } |
819 | } |
820 | |
821 | void andq_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
822 | { |
823 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
824 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, base, index, scale, offset); |
825 | m_formatter.immediate8(imm); |
826 | } else { |
827 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, base, index, scale, offset); |
828 | m_formatter.immediate32(imm); |
829 | } |
830 | } |
831 | #else |
832 | void andl_im(int imm, const void* addr) |
833 | { |
834 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
835 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIb, GROUP1_OP_AND, bitwise_cast<uint32_t>(addr)); |
836 | m_formatter.immediate8(imm); |
837 | } else { |
838 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIz, GROUP1_OP_AND, bitwise_cast<uint32_t>(addr)); |
839 | m_formatter.immediate32(imm); |
840 | } |
841 | } |
842 | #endif |
843 | |
844 | void dec_r(RegisterID dst) |
845 | { |
846 | m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_OR, dst); |
847 | } |
848 | |
849 | #if CPU(X86_64) |
850 | void decq_r(RegisterID dst) |
851 | { |
852 | m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_OR, dst); |
853 | } |
854 | #endif // CPU(X86_64) |
855 | |
856 | // Only used for testing purposes. |
857 | void illegalInstruction() |
858 | { |
859 | m_formatter.twoByteOp(OP2_UD2); |
860 | } |
861 | |
862 | void inc_r(RegisterID dst) |
863 | { |
864 | m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_ADD, dst); |
865 | } |
866 | |
867 | #if CPU(X86_64) |
868 | void incq_r(RegisterID dst) |
869 | { |
870 | m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst); |
871 | } |
872 | |
873 | void incq_m(int offset, RegisterID base) |
874 | { |
875 | m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset); |
876 | } |
877 | |
878 | void incq_m(int offset, RegisterID base, RegisterID index, int scale) |
879 | { |
880 | m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, index, scale, offset); |
881 | } |
882 | #endif // CPU(X86_64) |
883 | |
884 | void negl_r(RegisterID dst) |
885 | { |
886 | m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst); |
887 | } |
888 | |
889 | #if CPU(X86_64) |
890 | void negq_r(RegisterID dst) |
891 | { |
892 | m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst); |
893 | } |
894 | |
895 | void negq_m(int offset, RegisterID base) |
896 | { |
897 | m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset); |
898 | } |
899 | |
900 | void negq_m(int offset, RegisterID base, RegisterID index, int scale) |
901 | { |
902 | m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, base, index, scale, offset); |
903 | } |
904 | #endif |
905 | |
906 | void negl_m(int offset, RegisterID base) |
907 | { |
908 | m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset); |
909 | } |
910 | |
911 | void negl_m(int offset, RegisterID base, RegisterID index, int scale) |
912 | { |
913 | m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, index, scale, offset); |
914 | } |
915 | |
916 | void negw_m(int offset, RegisterID base) |
917 | { |
918 | m_formatter.prefix(PRE_OPERAND_SIZE); |
919 | negl_m(offset, base); |
920 | } |
921 | |
922 | void negw_m(int offset, RegisterID base, RegisterID index, int scale) |
923 | { |
924 | m_formatter.prefix(PRE_OPERAND_SIZE); |
925 | negl_m(offset, base, index, scale); |
926 | } |
927 | |
928 | void negb_m(int offset, RegisterID base) |
929 | { |
930 | m_formatter.oneByteOp(OP_GROUP3_Eb, GROUP3_OP_NEG, base, offset); |
931 | } |
932 | |
933 | void negb_m(int offset, RegisterID base, RegisterID index, int scale) |
934 | { |
935 | m_formatter.oneByteOp(OP_GROUP3_Eb, GROUP3_OP_NEG, base, index, scale, offset); |
936 | } |
937 | |
938 | void notl_r(RegisterID dst) |
939 | { |
940 | m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst); |
941 | } |
942 | |
943 | void notl_m(int offset, RegisterID base) |
944 | { |
945 | m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset); |
946 | } |
947 | |
948 | void notl_m(int offset, RegisterID base, RegisterID index, int scale) |
949 | { |
950 | m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, index, scale, offset); |
951 | } |
952 | |
953 | void notw_m(int offset, RegisterID base) |
954 | { |
955 | m_formatter.prefix(PRE_OPERAND_SIZE); |
956 | notl_m(offset, base); |
957 | } |
958 | |
959 | void notw_m(int offset, RegisterID base, RegisterID index, int scale) |
960 | { |
961 | m_formatter.prefix(PRE_OPERAND_SIZE); |
962 | notl_m(offset, base, index, scale); |
963 | } |
964 | |
965 | void notb_m(int offset, RegisterID base) |
966 | { |
967 | m_formatter.oneByteOp(OP_GROUP3_Eb, GROUP3_OP_NOT, base, offset); |
968 | } |
969 | |
970 | void notb_m(int offset, RegisterID base, RegisterID index, int scale) |
971 | { |
972 | m_formatter.oneByteOp(OP_GROUP3_Eb, GROUP3_OP_NOT, base, index, scale, offset); |
973 | } |
974 | |
975 | #if CPU(X86_64) |
976 | void notq_r(RegisterID dst) |
977 | { |
978 | m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, dst); |
979 | } |
980 | |
981 | void notq_m(int offset, RegisterID base) |
982 | { |
983 | m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset); |
984 | } |
985 | |
986 | void notq_m(int offset, RegisterID base, RegisterID index, int scale) |
987 | { |
988 | m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, base, index, scale, offset); |
989 | } |
990 | #endif |
991 | |
992 | void orl_rr(RegisterID src, RegisterID dst) |
993 | { |
994 | m_formatter.oneByteOp(OP_OR_EvGv, src, dst); |
995 | } |
996 | |
997 | void orl_mr(int offset, RegisterID base, RegisterID dst) |
998 | { |
999 | m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset); |
1000 | } |
1001 | |
1002 | void orl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
1003 | { |
1004 | m_formatter.oneByteOp(OP_OR_GvEv, dst, base, index, scale, offset); |
1005 | } |
1006 | |
1007 | void orl_rm(RegisterID src, int offset, RegisterID base) |
1008 | { |
1009 | m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset); |
1010 | } |
1011 | |
1012 | void orl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1013 | { |
1014 | m_formatter.oneByteOp(OP_OR_EvGv, src, base, index, scale, offset); |
1015 | } |
1016 | |
1017 | void orw_rm(RegisterID src, int offset, RegisterID base) |
1018 | { |
1019 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1020 | orl_rm(src, offset, base); |
1021 | } |
1022 | |
1023 | void orw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1024 | { |
1025 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1026 | orl_rm(src, offset, base, index, scale); |
1027 | } |
1028 | |
1029 | void orb_rm(RegisterID src, int offset, RegisterID base) |
1030 | { |
1031 | m_formatter.oneByteOp(OP_OR_EvGb, src, base, offset); |
1032 | } |
1033 | |
1034 | void orb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1035 | { |
1036 | m_formatter.oneByteOp(OP_OR_EvGb, src, base, index, scale, offset); |
1037 | } |
1038 | |
1039 | void orl_ir(int imm, RegisterID dst) |
1040 | { |
1041 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1042 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst); |
1043 | m_formatter.immediate8(imm); |
1044 | } else { |
1045 | if (dst == X86Registers::eax) |
1046 | m_formatter.oneByteOp(OP_OR_EAXIv); |
1047 | else |
1048 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); |
1049 | m_formatter.immediate32(imm); |
1050 | } |
1051 | } |
1052 | |
1053 | void orl_im(int imm, int offset, RegisterID base) |
1054 | { |
1055 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1056 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset); |
1057 | m_formatter.immediate8(imm); |
1058 | } else { |
1059 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset); |
1060 | m_formatter.immediate32(imm); |
1061 | } |
1062 | } |
1063 | |
1064 | void orl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1065 | { |
1066 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1067 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, index, scale, offset); |
1068 | m_formatter.immediate8(imm); |
1069 | } else { |
1070 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, index, scale, offset); |
1071 | m_formatter.immediate32(imm); |
1072 | } |
1073 | } |
1074 | |
1075 | void orw_im(int imm, int offset, RegisterID base) |
1076 | { |
1077 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1078 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1079 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset); |
1080 | m_formatter.immediate8(imm); |
1081 | } else { |
1082 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset); |
1083 | m_formatter.immediate16(imm); |
1084 | } |
1085 | } |
1086 | |
1087 | void orw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1088 | { |
1089 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1090 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1091 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, index, scale, offset); |
1092 | m_formatter.immediate8(imm); |
1093 | } else { |
1094 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, index, scale, offset); |
1095 | m_formatter.immediate16(imm); |
1096 | } |
1097 | } |
1098 | |
1099 | void orb_im(int imm, int offset, RegisterID base) |
1100 | { |
1101 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_OR, base, offset); |
1102 | m_formatter.immediate8(imm); |
1103 | } |
1104 | |
1105 | void orb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1106 | { |
1107 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_OR, base, index, scale, offset); |
1108 | m_formatter.immediate8(imm); |
1109 | } |
1110 | |
1111 | #if CPU(X86_64) |
1112 | void orq_rr(RegisterID src, RegisterID dst) |
1113 | { |
1114 | m_formatter.oneByteOp64(OP_OR_EvGv, src, dst); |
1115 | } |
1116 | |
1117 | void orq_mr(int offset, RegisterID base, RegisterID dst) |
1118 | { |
1119 | m_formatter.oneByteOp64(OP_OR_GvEv, dst, base, offset); |
1120 | } |
1121 | |
1122 | void orq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
1123 | { |
1124 | m_formatter.oneByteOp64(OP_OR_GvEv, dst, base, index, scale, offset); |
1125 | } |
1126 | |
1127 | void orq_rm(RegisterID src, int offset, RegisterID base) |
1128 | { |
1129 | m_formatter.oneByteOp64(OP_OR_EvGv, src, base, offset); |
1130 | } |
1131 | |
1132 | void orq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1133 | { |
1134 | m_formatter.oneByteOp64(OP_OR_EvGv, src, base, index, scale, offset); |
1135 | } |
1136 | |
1137 | void orq_im(int imm, int offset, RegisterID base) |
1138 | { |
1139 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1140 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset); |
1141 | m_formatter.immediate8(imm); |
1142 | } else { |
1143 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset); |
1144 | m_formatter.immediate32(imm); |
1145 | } |
1146 | } |
1147 | |
1148 | void orq_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1149 | { |
1150 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1151 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, base, index, scale, offset); |
1152 | m_formatter.immediate8(imm); |
1153 | } else { |
1154 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, base, index, scale, offset); |
1155 | m_formatter.immediate32(imm); |
1156 | } |
1157 | } |
1158 | |
1159 | void orq_ir(int imm, RegisterID dst) |
1160 | { |
1161 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1162 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst); |
1163 | m_formatter.immediate8(imm); |
1164 | } else { |
1165 | if (dst == X86Registers::eax) |
1166 | m_formatter.oneByteOp64(OP_OR_EAXIv); |
1167 | else |
1168 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); |
1169 | m_formatter.immediate32(imm); |
1170 | } |
1171 | } |
1172 | #else |
1173 | void orl_im(int imm, const void* addr) |
1174 | { |
1175 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1176 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIb, GROUP1_OP_OR, bitwise_cast<uint32_t>(addr)); |
1177 | m_formatter.immediate8(imm); |
1178 | } else { |
1179 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIz, GROUP1_OP_OR, bitwise_cast<uint32_t>(addr)); |
1180 | m_formatter.immediate32(imm); |
1181 | } |
1182 | } |
1183 | |
1184 | void orl_rm(RegisterID src, const void* addr) |
1185 | { |
1186 | m_formatter.oneByteOpAddr(OP_OR_EvGv, src, bitwise_cast<uint32_t>(addr)); |
1187 | } |
1188 | #endif |
1189 | |
1190 | void subl_rr(RegisterID src, RegisterID dst) |
1191 | { |
1192 | m_formatter.oneByteOp(OP_SUB_EvGv, src, dst); |
1193 | } |
1194 | |
1195 | void subl_mr(int offset, RegisterID base, RegisterID dst) |
1196 | { |
1197 | m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset); |
1198 | } |
1199 | |
1200 | void subl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
1201 | { |
1202 | m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, index, scale, offset); |
1203 | } |
1204 | |
1205 | void subl_rm(RegisterID src, int offset, RegisterID base) |
1206 | { |
1207 | m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset); |
1208 | } |
1209 | |
1210 | void subl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1211 | { |
1212 | m_formatter.oneByteOp(OP_SUB_EvGv, src, base, index, scale, offset); |
1213 | } |
1214 | |
1215 | void subw_rm(RegisterID src, int offset, RegisterID base) |
1216 | { |
1217 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1218 | m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset); |
1219 | } |
1220 | |
1221 | void subw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1222 | { |
1223 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1224 | m_formatter.oneByteOp(OP_SUB_EvGv, src, base, index, scale, offset); |
1225 | } |
1226 | |
1227 | void subb_rm(RegisterID src, int offset, RegisterID base) |
1228 | { |
1229 | m_formatter.oneByteOp(OP_SUB_EvGb, src, base, offset); |
1230 | } |
1231 | |
1232 | void subb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1233 | { |
1234 | m_formatter.oneByteOp(OP_SUB_EvGb, src, base, index, scale, offset); |
1235 | } |
1236 | |
1237 | void subl_ir(int imm, RegisterID dst) |
1238 | { |
1239 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1240 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst); |
1241 | m_formatter.immediate8(imm); |
1242 | } else { |
1243 | if (dst == X86Registers::eax) |
1244 | m_formatter.oneByteOp(OP_SUB_EAXIv); |
1245 | else |
1246 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); |
1247 | m_formatter.immediate32(imm); |
1248 | } |
1249 | } |
1250 | |
1251 | void subl_im(int imm, int offset, RegisterID base) |
1252 | { |
1253 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1254 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset); |
1255 | m_formatter.immediate8(imm); |
1256 | } else { |
1257 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset); |
1258 | m_formatter.immediate32(imm); |
1259 | } |
1260 | } |
1261 | |
1262 | void subl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1263 | { |
1264 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1265 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, index, scale, offset); |
1266 | m_formatter.immediate8(imm); |
1267 | } else { |
1268 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, index, scale, offset); |
1269 | m_formatter.immediate32(imm); |
1270 | } |
1271 | } |
1272 | |
1273 | void subw_im(int imm, int offset, RegisterID base) |
1274 | { |
1275 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1276 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1277 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset); |
1278 | m_formatter.immediate8(imm); |
1279 | } else { |
1280 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset); |
1281 | m_formatter.immediate16(imm); |
1282 | } |
1283 | } |
1284 | |
1285 | void subw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1286 | { |
1287 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1288 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1289 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, index, scale, offset); |
1290 | m_formatter.immediate8(imm); |
1291 | } else { |
1292 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, index, scale, offset); |
1293 | m_formatter.immediate16(imm); |
1294 | } |
1295 | } |
1296 | |
1297 | void subb_im(int imm, int offset, RegisterID base) |
1298 | { |
1299 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_SUB, base, offset); |
1300 | m_formatter.immediate8(imm); |
1301 | } |
1302 | |
1303 | void subb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1304 | { |
1305 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_SUB, base, index, scale, offset); |
1306 | m_formatter.immediate8(imm); |
1307 | } |
1308 | |
1309 | #if CPU(X86_64) |
1310 | void subq_rr(RegisterID src, RegisterID dst) |
1311 | { |
1312 | m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst); |
1313 | } |
1314 | |
1315 | void subq_mr(int offset, RegisterID base, RegisterID dst) |
1316 | { |
1317 | m_formatter.oneByteOp64(OP_SUB_GvEv, dst, base, offset); |
1318 | } |
1319 | |
1320 | void subq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
1321 | { |
1322 | m_formatter.oneByteOp64(OP_SUB_GvEv, dst, base, index, scale, offset); |
1323 | } |
1324 | |
1325 | void subq_rm(RegisterID src, int offset, RegisterID base) |
1326 | { |
1327 | m_formatter.oneByteOp64(OP_SUB_EvGv, src, base, offset); |
1328 | } |
1329 | |
1330 | void subq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1331 | { |
1332 | m_formatter.oneByteOp64(OP_SUB_EvGv, src, base, index, scale, offset); |
1333 | } |
1334 | |
1335 | void subq_ir(int imm, RegisterID dst) |
1336 | { |
1337 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1338 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst); |
1339 | m_formatter.immediate8(imm); |
1340 | } else { |
1341 | if (dst == X86Registers::eax) |
1342 | m_formatter.oneByteOp64(OP_SUB_EAXIv); |
1343 | else |
1344 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); |
1345 | m_formatter.immediate32(imm); |
1346 | } |
1347 | } |
1348 | |
1349 | void subq_im(int imm, int offset, RegisterID base) |
1350 | { |
1351 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1352 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset); |
1353 | m_formatter.immediate8(imm); |
1354 | } else { |
1355 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset); |
1356 | m_formatter.immediate32(imm); |
1357 | } |
1358 | } |
1359 | |
1360 | void subq_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1361 | { |
1362 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1363 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, index, scale, offset); |
1364 | m_formatter.immediate8(imm); |
1365 | } else { |
1366 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, index, scale, offset); |
1367 | m_formatter.immediate32(imm); |
1368 | } |
1369 | } |
1370 | #else |
1371 | void subl_im(int imm, const void* addr) |
1372 | { |
1373 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1374 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIb, GROUP1_OP_SUB, bitwise_cast<uint32_t>(addr)); |
1375 | m_formatter.immediate8(imm); |
1376 | } else { |
1377 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIz, GROUP1_OP_SUB, bitwise_cast<uint32_t>(addr)); |
1378 | m_formatter.immediate32(imm); |
1379 | } |
1380 | } |
1381 | #endif |
1382 | |
1383 | void xorl_rr(RegisterID src, RegisterID dst) |
1384 | { |
1385 | m_formatter.oneByteOp(OP_XOR_EvGv, src, dst); |
1386 | } |
1387 | |
1388 | void xorl_mr(int offset, RegisterID base, RegisterID dst) |
1389 | { |
1390 | m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset); |
1391 | } |
1392 | |
1393 | void xorl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
1394 | { |
1395 | m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, index, scale, offset); |
1396 | } |
1397 | |
1398 | void xorl_rm(RegisterID src, int offset, RegisterID base) |
1399 | { |
1400 | m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset); |
1401 | } |
1402 | |
1403 | void xorl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1404 | { |
1405 | m_formatter.oneByteOp(OP_XOR_EvGv, src, base, index, scale, offset); |
1406 | } |
1407 | |
1408 | void xorl_im(int imm, int offset, RegisterID base) |
1409 | { |
1410 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1411 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset); |
1412 | m_formatter.immediate8(imm); |
1413 | } else { |
1414 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset); |
1415 | m_formatter.immediate32(imm); |
1416 | } |
1417 | } |
1418 | |
1419 | void xorl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1420 | { |
1421 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1422 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, index, scale, offset); |
1423 | m_formatter.immediate8(imm); |
1424 | } else { |
1425 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, index, scale, offset); |
1426 | m_formatter.immediate32(imm); |
1427 | } |
1428 | } |
1429 | |
1430 | void xorw_rm(RegisterID src, int offset, RegisterID base) |
1431 | { |
1432 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1433 | xorl_rm(src, offset, base); |
1434 | } |
1435 | |
1436 | void xorw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1437 | { |
1438 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1439 | xorl_rm(src, offset, base, index, scale); |
1440 | } |
1441 | |
1442 | void xorw_im(int imm, int offset, RegisterID base) |
1443 | { |
1444 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1445 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1446 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset); |
1447 | m_formatter.immediate8(imm); |
1448 | } else { |
1449 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset); |
1450 | m_formatter.immediate16(imm); |
1451 | } |
1452 | } |
1453 | |
1454 | void xorw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1455 | { |
1456 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1457 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1458 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, index, scale, offset); |
1459 | m_formatter.immediate8(imm); |
1460 | } else { |
1461 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, index, scale, offset); |
1462 | m_formatter.immediate16(imm); |
1463 | } |
1464 | } |
1465 | |
1466 | void xorb_rm(RegisterID src, int offset, RegisterID base) |
1467 | { |
1468 | m_formatter.oneByteOp(OP_XOR_EvGb, src, base, offset); |
1469 | } |
1470 | |
1471 | void xorb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1472 | { |
1473 | m_formatter.oneByteOp(OP_XOR_EvGb, src, base, index, scale, offset); |
1474 | } |
1475 | |
1476 | void xorb_im(int imm, int offset, RegisterID base) |
1477 | { |
1478 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_XOR, base, offset); |
1479 | m_formatter.immediate8(imm); |
1480 | } |
1481 | |
1482 | void xorb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1483 | { |
1484 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_XOR, base, index, scale, offset); |
1485 | m_formatter.immediate8(imm); |
1486 | } |
1487 | |
1488 | void xorl_ir(int imm, RegisterID dst) |
1489 | { |
1490 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1491 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst); |
1492 | m_formatter.immediate8(imm); |
1493 | } else { |
1494 | if (dst == X86Registers::eax) |
1495 | m_formatter.oneByteOp(OP_XOR_EAXIv); |
1496 | else |
1497 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); |
1498 | m_formatter.immediate32(imm); |
1499 | } |
1500 | } |
1501 | |
1502 | #if CPU(X86_64) |
1503 | void xorq_rr(RegisterID src, RegisterID dst) |
1504 | { |
1505 | m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst); |
1506 | } |
1507 | |
1508 | void xorq_ir(int imm, RegisterID dst) |
1509 | { |
1510 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1511 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst); |
1512 | m_formatter.immediate8(imm); |
1513 | } else { |
1514 | if (dst == X86Registers::eax) |
1515 | m_formatter.oneByteOp64(OP_XOR_EAXIv); |
1516 | else |
1517 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); |
1518 | m_formatter.immediate32(imm); |
1519 | } |
1520 | } |
1521 | |
1522 | void xorq_im(int imm, int offset, RegisterID base) |
1523 | { |
1524 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1525 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset); |
1526 | m_formatter.immediate8(imm); |
1527 | } else { |
1528 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset); |
1529 | m_formatter.immediate32(imm); |
1530 | } |
1531 | } |
1532 | |
1533 | void xorq_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1534 | { |
1535 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1536 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, index, scale, offset); |
1537 | m_formatter.immediate8(imm); |
1538 | } else { |
1539 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, index, scale, offset); |
1540 | m_formatter.immediate32(imm); |
1541 | } |
1542 | } |
1543 | |
1544 | void xorq_rm(RegisterID src, int offset, RegisterID base) |
1545 | { |
1546 | m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset); |
1547 | } |
1548 | |
1549 | void xorq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1550 | { |
1551 | m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, index, scale, offset); |
1552 | } |
1553 | |
1554 | void xorq_mr(int offset, RegisterID base, RegisterID dest) |
1555 | { |
1556 | m_formatter.oneByteOp64(OP_XOR_GvEv, dest, base, offset); |
1557 | } |
1558 | |
1559 | void xorq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dest) |
1560 | { |
1561 | m_formatter.oneByteOp64(OP_XOR_GvEv, dest, base, index, scale, offset); |
1562 | } |
1563 | #endif |
1564 | |
1565 | void lzcnt_rr(RegisterID src, RegisterID dst) |
1566 | { |
1567 | m_formatter.prefix(PRE_SSE_F3); |
1568 | m_formatter.twoByteOp(OP2_LZCNT, dst, src); |
1569 | } |
1570 | |
1571 | void lzcnt_mr(int offset, RegisterID base, RegisterID dst) |
1572 | { |
1573 | m_formatter.prefix(PRE_SSE_F3); |
1574 | m_formatter.twoByteOp(OP2_LZCNT, dst, base, offset); |
1575 | } |
1576 | |
1577 | #if CPU(X86_64) |
1578 | void lzcntq_rr(RegisterID src, RegisterID dst) |
1579 | { |
1580 | m_formatter.prefix(PRE_SSE_F3); |
1581 | m_formatter.twoByteOp64(OP2_LZCNT, dst, src); |
1582 | } |
1583 | |
1584 | void lzcntq_mr(int offset, RegisterID base, RegisterID dst) |
1585 | { |
1586 | m_formatter.prefix(PRE_SSE_F3); |
1587 | m_formatter.twoByteOp64(OP2_LZCNT, dst, base, offset); |
1588 | } |
1589 | #endif |
1590 | |
1591 | void bsr_rr(RegisterID src, RegisterID dst) |
1592 | { |
1593 | m_formatter.twoByteOp(OP2_BSR, dst, src); |
1594 | } |
1595 | |
1596 | void bsr_mr(int offset, RegisterID base, RegisterID dst) |
1597 | { |
1598 | m_formatter.twoByteOp(OP2_BSR, dst, base, offset); |
1599 | } |
1600 | |
1601 | #if CPU(X86_64) |
1602 | void bsrq_rr(RegisterID src, RegisterID dst) |
1603 | { |
1604 | m_formatter.twoByteOp64(OP2_BSR, dst, src); |
1605 | } |
1606 | |
1607 | void bsrq_mr(int offset, RegisterID base, RegisterID dst) |
1608 | { |
1609 | m_formatter.twoByteOp64(OP2_BSR, dst, base, offset); |
1610 | } |
1611 | #endif |
1612 | |
1613 | void bswapl_r(RegisterID dst) |
1614 | { |
1615 | m_formatter.twoByteOp(OP2_BSWAP, dst); |
1616 | } |
1617 | |
1618 | #if CPU(X86_64) |
1619 | void bswapq_r(RegisterID dst) |
1620 | { |
1621 | m_formatter.twoByteOp64(OP2_BSWAP, dst); |
1622 | } |
1623 | #endif |
1624 | |
1625 | void tzcnt_rr(RegisterID src, RegisterID dst) |
1626 | { |
1627 | m_formatter.prefix(PRE_SSE_F3); |
1628 | m_formatter.twoByteOp(OP2_TZCNT, dst, src); |
1629 | } |
1630 | |
1631 | #if CPU(X86_64) |
1632 | void tzcntq_rr(RegisterID src, RegisterID dst) |
1633 | { |
1634 | m_formatter.prefix(PRE_SSE_F3); |
1635 | m_formatter.twoByteOp64(OP2_TZCNT, dst, src); |
1636 | } |
1637 | #endif |
1638 | |
1639 | void bsf_rr(RegisterID src, RegisterID dst) |
1640 | { |
1641 | m_formatter.twoByteOp(OP2_BSF, dst, src); |
1642 | } |
1643 | |
1644 | #if CPU(X86_64) |
1645 | void bsfq_rr(RegisterID src, RegisterID dst) |
1646 | { |
1647 | m_formatter.twoByteOp64(OP2_BSF, dst, src); |
1648 | } |
1649 | #endif |
1650 | |
1651 | void popcnt_rr(RegisterID src, RegisterID dst) |
1652 | { |
1653 | m_formatter.prefix(PRE_SSE_F3); |
1654 | m_formatter.twoByteOp(OP2_POPCNT, dst, src); |
1655 | } |
1656 | |
1657 | void popcnt_mr(int offset, RegisterID base, RegisterID dst) |
1658 | { |
1659 | m_formatter.prefix(PRE_SSE_F3); |
1660 | m_formatter.twoByteOp(OP2_POPCNT, dst, base, offset); |
1661 | } |
1662 | |
1663 | #if CPU(X86_64) |
1664 | void popcntq_rr(RegisterID src, RegisterID dst) |
1665 | { |
1666 | m_formatter.prefix(PRE_SSE_F3); |
1667 | m_formatter.twoByteOp64(OP2_POPCNT, dst, src); |
1668 | } |
1669 | |
1670 | void popcntq_mr(int offset, RegisterID base, RegisterID dst) |
1671 | { |
1672 | m_formatter.prefix(PRE_SSE_F3); |
1673 | m_formatter.twoByteOp64(OP2_POPCNT, dst, base, offset); |
1674 | } |
1675 | #endif |
1676 | |
1677 | private: |
1678 | template<GroupOpcodeID op> |
1679 | void shiftInstruction32(int imm, RegisterID dst) |
1680 | { |
1681 | if (imm == 1) |
1682 | m_formatter.oneByteOp(OP_GROUP2_Ev1, op, dst); |
1683 | else { |
1684 | m_formatter.oneByteOp(OP_GROUP2_EvIb, op, dst); |
1685 | m_formatter.immediate8(imm); |
1686 | } |
1687 | } |
1688 | |
1689 | template<GroupOpcodeID op> |
1690 | void shiftInstruction16(int imm, RegisterID dst) |
1691 | { |
1692 | m_formatter.prefix(PRE_OPERAND_SIZE); |
1693 | if (imm == 1) |
1694 | m_formatter.oneByteOp(OP_GROUP2_Ev1, op, dst); |
1695 | else { |
1696 | m_formatter.oneByteOp(OP_GROUP2_EvIb, op, dst); |
1697 | m_formatter.immediate8(imm); |
1698 | } |
1699 | } |
1700 | public: |
1701 | |
1702 | void sarl_i8r(int imm, RegisterID dst) |
1703 | { |
1704 | shiftInstruction32<GROUP2_OP_SAR>(imm, dst); |
1705 | } |
1706 | |
1707 | void sarl_CLr(RegisterID dst) |
1708 | { |
1709 | m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst); |
1710 | } |
1711 | |
1712 | void shrl_i8r(int imm, RegisterID dst) |
1713 | { |
1714 | shiftInstruction32<GROUP2_OP_SHR>(imm, dst); |
1715 | } |
1716 | |
1717 | void shrl_CLr(RegisterID dst) |
1718 | { |
1719 | m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst); |
1720 | } |
1721 | |
1722 | void shll_i8r(int imm, RegisterID dst) |
1723 | { |
1724 | shiftInstruction32<GROUP2_OP_SHL>(imm, dst); |
1725 | } |
1726 | |
1727 | void shll_CLr(RegisterID dst) |
1728 | { |
1729 | m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst); |
1730 | } |
1731 | |
1732 | void rorl_i8r(int imm, RegisterID dst) |
1733 | { |
1734 | shiftInstruction32<GROUP2_OP_ROR>(imm, dst); |
1735 | } |
1736 | |
1737 | void rorl_CLr(RegisterID dst) |
1738 | { |
1739 | m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_ROR, dst); |
1740 | } |
1741 | |
1742 | void roll_i8r(int imm, RegisterID dst) |
1743 | { |
1744 | shiftInstruction32<GROUP2_OP_ROL>(imm, dst); |
1745 | } |
1746 | |
1747 | void roll_CLr(RegisterID dst) |
1748 | { |
1749 | m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_ROL, dst); |
1750 | } |
1751 | |
1752 | void rolw_i8r(int imm, RegisterID dst) |
1753 | { |
1754 | shiftInstruction16<GROUP2_OP_ROL>(imm, dst); |
1755 | } |
1756 | |
1757 | #if CPU(X86_64) |
1758 | private: |
1759 | template<GroupOpcodeID op> |
1760 | void shiftInstruction64(int imm, RegisterID dst) |
1761 | { |
1762 | if (imm == 1) |
1763 | m_formatter.oneByteOp64(OP_GROUP2_Ev1, op, dst); |
1764 | else { |
1765 | m_formatter.oneByteOp64(OP_GROUP2_EvIb, op, dst); |
1766 | m_formatter.immediate8(imm); |
1767 | } |
1768 | } |
1769 | public: |
1770 | void sarq_CLr(RegisterID dst) |
1771 | { |
1772 | m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst); |
1773 | } |
1774 | |
1775 | void sarq_i8r(int imm, RegisterID dst) |
1776 | { |
1777 | shiftInstruction64<GROUP2_OP_SAR>(imm, dst); |
1778 | } |
1779 | |
1780 | void shrq_i8r(int imm, RegisterID dst) |
1781 | { |
1782 | shiftInstruction64<GROUP2_OP_SHR>(imm, dst); |
1783 | } |
1784 | |
1785 | void shrq_CLr(RegisterID dst) |
1786 | { |
1787 | m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst); |
1788 | } |
1789 | |
1790 | void shlq_i8r(int imm, RegisterID dst) |
1791 | { |
1792 | shiftInstruction64<GROUP2_OP_SHL>(imm, dst); |
1793 | } |
1794 | |
1795 | void shlq_CLr(RegisterID dst) |
1796 | { |
1797 | m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst); |
1798 | } |
1799 | |
1800 | void rorq_i8r(int imm, RegisterID dst) |
1801 | { |
1802 | shiftInstruction64<GROUP2_OP_ROR>(imm, dst); |
1803 | } |
1804 | |
1805 | void rorq_CLr(RegisterID dst) |
1806 | { |
1807 | m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_ROR, dst); |
1808 | } |
1809 | |
1810 | void rolq_i8r(int imm, RegisterID dst) |
1811 | { |
1812 | shiftInstruction64<GROUP2_OP_ROL>(imm, dst); |
1813 | } |
1814 | |
1815 | void rolq_CLr(RegisterID dst) |
1816 | { |
1817 | m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_ROL, dst); |
1818 | } |
1819 | #endif // CPU(X86_64) |
1820 | |
1821 | void imull_rr(RegisterID src, RegisterID dst) |
1822 | { |
1823 | m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src); |
1824 | } |
1825 | |
1826 | #if CPU(X86_64) |
1827 | void imulq_rr(RegisterID src, RegisterID dst) |
1828 | { |
1829 | m_formatter.twoByteOp64(OP2_IMUL_GvEv, dst, src); |
1830 | } |
1831 | #endif // CPU(X86_64) |
1832 | |
1833 | void imull_mr(int offset, RegisterID base, RegisterID dst) |
1834 | { |
1835 | m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset); |
1836 | } |
1837 | |
1838 | void imull_i32r(RegisterID src, int32_t value, RegisterID dst) |
1839 | { |
1840 | m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src); |
1841 | m_formatter.immediate32(value); |
1842 | } |
1843 | |
1844 | void divl_r(RegisterID dst) |
1845 | { |
1846 | m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_DIV, dst); |
1847 | } |
1848 | |
1849 | void idivl_r(RegisterID dst) |
1850 | { |
1851 | m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst); |
1852 | } |
1853 | |
1854 | #if CPU(X86_64) |
1855 | void divq_r(RegisterID dst) |
1856 | { |
1857 | m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_DIV, dst); |
1858 | } |
1859 | |
1860 | void idivq_r(RegisterID dst) |
1861 | { |
1862 | m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst); |
1863 | } |
1864 | #endif // CPU(X86_64) |
1865 | |
1866 | // Comparisons: |
1867 | |
1868 | void cmpl_rr(RegisterID src, RegisterID dst) |
1869 | { |
1870 | m_formatter.oneByteOp(OP_CMP_EvGv, src, dst); |
1871 | } |
1872 | |
1873 | void cmpl_rm(RegisterID src, int offset, RegisterID base) |
1874 | { |
1875 | m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset); |
1876 | } |
1877 | |
1878 | void cmpl_mr(int offset, RegisterID base, RegisterID src) |
1879 | { |
1880 | m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset); |
1881 | } |
1882 | |
1883 | void cmpl_ir(int imm, RegisterID dst) |
1884 | { |
1885 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1886 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); |
1887 | m_formatter.immediate8(imm); |
1888 | } else { |
1889 | if (dst == X86Registers::eax) |
1890 | m_formatter.oneByteOp(OP_CMP_EAXIv); |
1891 | else |
1892 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); |
1893 | m_formatter.immediate32(imm); |
1894 | } |
1895 | } |
1896 | |
1897 | void cmpl_ir_force32(int imm, RegisterID dst) |
1898 | { |
1899 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); |
1900 | m_formatter.immediate32(imm); |
1901 | } |
1902 | |
1903 | void cmpl_im(int imm, int offset, RegisterID base) |
1904 | { |
1905 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1906 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset); |
1907 | m_formatter.immediate8(imm); |
1908 | } else { |
1909 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset); |
1910 | m_formatter.immediate32(imm); |
1911 | } |
1912 | } |
1913 | |
1914 | void cmpb_im(int imm, int offset, RegisterID base) |
1915 | { |
1916 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset); |
1917 | m_formatter.immediate8(imm); |
1918 | } |
1919 | |
1920 | void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1921 | { |
1922 | m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset); |
1923 | m_formatter.immediate8(imm); |
1924 | } |
1925 | |
1926 | #if CPU(X86) |
1927 | void cmpb_im(int imm, const void* addr) |
1928 | { |
1929 | m_formatter.oneByteOpAddr(OP_GROUP1_EbIb, GROUP1_OP_CMP, bitwise_cast<uint32_t>(addr)); |
1930 | m_formatter.immediate8(imm); |
1931 | } |
1932 | #endif |
1933 | |
1934 | void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1935 | { |
1936 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1937 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset); |
1938 | m_formatter.immediate8(imm); |
1939 | } else { |
1940 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset); |
1941 | m_formatter.immediate32(imm); |
1942 | } |
1943 | } |
1944 | |
1945 | void cmpl_im_force32(int imm, int offset, RegisterID base) |
1946 | { |
1947 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset); |
1948 | m_formatter.immediate32(imm); |
1949 | } |
1950 | |
1951 | #if CPU(X86_64) |
1952 | void cmpq_rr(RegisterID src, RegisterID dst) |
1953 | { |
1954 | m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst); |
1955 | } |
1956 | |
1957 | void cmpq_rm(RegisterID src, int offset, RegisterID base) |
1958 | { |
1959 | m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset); |
1960 | } |
1961 | |
1962 | void cmpq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
1963 | { |
1964 | m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, index, scale, offset); |
1965 | } |
1966 | |
1967 | void cmpq_mr(int offset, RegisterID base, RegisterID src) |
1968 | { |
1969 | m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset); |
1970 | } |
1971 | |
1972 | void cmpq_ir(int imm, RegisterID dst) |
1973 | { |
1974 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1975 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); |
1976 | m_formatter.immediate8(imm); |
1977 | } else { |
1978 | if (dst == X86Registers::eax) |
1979 | m_formatter.oneByteOp64(OP_CMP_EAXIv); |
1980 | else |
1981 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); |
1982 | m_formatter.immediate32(imm); |
1983 | } |
1984 | } |
1985 | |
1986 | void cmpq_im(int imm, int offset, RegisterID base) |
1987 | { |
1988 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
1989 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset); |
1990 | m_formatter.immediate8(imm); |
1991 | } else { |
1992 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset); |
1993 | m_formatter.immediate32(imm); |
1994 | } |
1995 | } |
1996 | |
1997 | void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
1998 | { |
1999 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
2000 | m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset); |
2001 | m_formatter.immediate8(imm); |
2002 | } else { |
2003 | m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset); |
2004 | m_formatter.immediate32(imm); |
2005 | } |
2006 | } |
2007 | #else |
2008 | void cmpl_rm(RegisterID reg, const void* addr) |
2009 | { |
2010 | m_formatter.oneByteOpAddr(OP_CMP_EvGv, reg, bitwise_cast<uint32_t>(addr)); |
2011 | } |
2012 | |
2013 | void cmpl_im(int imm, const void* addr) |
2014 | { |
2015 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
2016 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIb, GROUP1_OP_CMP, bitwise_cast<uint32_t>(addr)); |
2017 | m_formatter.immediate8(imm); |
2018 | } else { |
2019 | m_formatter.oneByteOpAddr(OP_GROUP1_EvIz, GROUP1_OP_CMP, bitwise_cast<uint32_t>(addr)); |
2020 | m_formatter.immediate32(imm); |
2021 | } |
2022 | } |
2023 | #endif |
2024 | |
2025 | void cmpw_ir(int imm, RegisterID dst) |
2026 | { |
2027 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
2028 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2029 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); |
2030 | m_formatter.immediate8(imm); |
2031 | } else { |
2032 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2033 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); |
2034 | m_formatter.immediate16(imm); |
2035 | } |
2036 | } |
2037 | |
2038 | void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2039 | { |
2040 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2041 | m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset); |
2042 | } |
2043 | |
2044 | void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
2045 | { |
2046 | if (CAN_SIGN_EXTEND_8_32(imm)) { |
2047 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2048 | m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset); |
2049 | m_formatter.immediate8(imm); |
2050 | } else { |
2051 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2052 | m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset); |
2053 | m_formatter.immediate16(imm); |
2054 | } |
2055 | } |
2056 | |
2057 | void testl_rr(RegisterID src, RegisterID dst) |
2058 | { |
2059 | m_formatter.oneByteOp(OP_TEST_EvGv, src, dst); |
2060 | } |
2061 | |
2062 | void testl_i32r(int imm, RegisterID dst) |
2063 | { |
2064 | if (dst == X86Registers::eax) |
2065 | m_formatter.oneByteOp(OP_TEST_EAXIv); |
2066 | else |
2067 | m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); |
2068 | m_formatter.immediate32(imm); |
2069 | } |
2070 | |
2071 | void testl_i32m(int imm, int offset, RegisterID base) |
2072 | { |
2073 | m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset); |
2074 | m_formatter.immediate32(imm); |
2075 | } |
2076 | |
2077 | void testb_rr(RegisterID src, RegisterID dst) |
2078 | { |
2079 | m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst); |
2080 | } |
2081 | |
2082 | void testb_im(int imm, int offset, RegisterID base) |
2083 | { |
2084 | m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset); |
2085 | m_formatter.immediate8(imm); |
2086 | } |
2087 | |
2088 | void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
2089 | { |
2090 | m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset); |
2091 | m_formatter.immediate8(imm); |
2092 | } |
2093 | |
2094 | #if CPU(X86) |
2095 | void testb_im(int imm, const void* addr) |
2096 | { |
2097 | m_formatter.oneByteOpAddr(OP_GROUP3_EbIb, GROUP3_OP_TEST, bitwise_cast<uint32_t>(addr)); |
2098 | m_formatter.immediate8(imm); |
2099 | } |
2100 | #endif |
2101 | |
2102 | void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale) |
2103 | { |
2104 | m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset); |
2105 | m_formatter.immediate32(imm); |
2106 | } |
2107 | |
2108 | #if CPU(X86_64) |
2109 | void testq_rr(RegisterID src, RegisterID dst) |
2110 | { |
2111 | m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst); |
2112 | } |
2113 | |
2114 | void testq_rm(RegisterID src, int offset, RegisterID base) |
2115 | { |
2116 | m_formatter.oneByteOp64(OP_TEST_EvGv, src, base, offset); |
2117 | } |
2118 | |
2119 | void testq_i32r(int imm, RegisterID dst) |
2120 | { |
2121 | if (dst == X86Registers::eax) |
2122 | m_formatter.oneByteOp64(OP_TEST_EAXIv); |
2123 | else |
2124 | m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); |
2125 | m_formatter.immediate32(imm); |
2126 | } |
2127 | |
2128 | void testq_i32m(int imm, int offset, RegisterID base) |
2129 | { |
2130 | m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset); |
2131 | m_formatter.immediate32(imm); |
2132 | } |
2133 | |
2134 | void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale) |
2135 | { |
2136 | m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset); |
2137 | m_formatter.immediate32(imm); |
2138 | } |
2139 | #endif |
2140 | |
2141 | void testw_rr(RegisterID src, RegisterID dst) |
2142 | { |
2143 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2144 | m_formatter.oneByteOp(OP_TEST_EvGv, src, dst); |
2145 | } |
2146 | |
2147 | void testb_i8r(int imm, RegisterID dst) |
2148 | { |
2149 | if (dst == X86Registers::eax) |
2150 | m_formatter.oneByteOp(OP_TEST_ALIb); |
2151 | else |
2152 | m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst); |
2153 | m_formatter.immediate8(imm); |
2154 | } |
2155 | |
2156 | void bt_ir(int bitOffset, RegisterID testValue) |
2157 | { |
2158 | ASSERT(-128 <= bitOffset && bitOffset < 128); |
2159 | m_formatter.twoByteOp(OP2_GROUP_BT_EvIb, GROUP_BT_OP_BT, testValue); |
2160 | m_formatter.immediate8(bitOffset); |
2161 | } |
2162 | |
2163 | void bt_im(int bitOffset, int offset, RegisterID base) |
2164 | { |
2165 | ASSERT(-128 <= bitOffset && bitOffset < 128); |
2166 | m_formatter.twoByteOp(OP2_GROUP_BT_EvIb, GROUP_BT_OP_BT, base, offset); |
2167 | m_formatter.immediate8(bitOffset); |
2168 | } |
2169 | |
2170 | void bt_ir(RegisterID bitOffset, RegisterID testValue) |
2171 | { |
2172 | m_formatter.twoByteOp(OP2_BT_EvEv, bitOffset, testValue); |
2173 | } |
2174 | |
2175 | void bt_im(RegisterID bitOffset, int offset, RegisterID base) |
2176 | { |
2177 | m_formatter.twoByteOp(OP2_BT_EvEv, bitOffset, base, offset); |
2178 | } |
2179 | |
2180 | #if CPU(X86_64) |
2181 | void btw_ir(int bitOffset, RegisterID testValue) |
2182 | { |
2183 | ASSERT(-128 <= bitOffset && bitOffset < 128); |
2184 | m_formatter.twoByteOp64(OP2_GROUP_BT_EvIb, GROUP_BT_OP_BT, testValue); |
2185 | m_formatter.immediate8(bitOffset); |
2186 | } |
2187 | |
2188 | void btw_im(int bitOffset, int offset, RegisterID base) |
2189 | { |
2190 | ASSERT(-128 <= bitOffset && bitOffset < 128); |
2191 | m_formatter.twoByteOp64(OP2_GROUP_BT_EvIb, GROUP_BT_OP_BT, base, offset); |
2192 | m_formatter.immediate8(bitOffset); |
2193 | } |
2194 | |
2195 | void btw_ir(RegisterID bitOffset, RegisterID testValue) |
2196 | { |
2197 | m_formatter.twoByteOp64(OP2_BT_EvEv, bitOffset, testValue); |
2198 | } |
2199 | |
2200 | void btw_im(RegisterID bitOffset, int offset, RegisterID base) |
2201 | { |
2202 | m_formatter.twoByteOp64(OP2_BT_EvEv, bitOffset, base, offset); |
2203 | } |
2204 | #endif |
2205 | |
2206 | void setCC_r(Condition cond, RegisterID dst) |
2207 | { |
2208 | m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst); |
2209 | } |
2210 | |
2211 | void sete_r(RegisterID dst) |
2212 | { |
2213 | m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst); |
2214 | } |
2215 | |
2216 | void setz_r(RegisterID dst) |
2217 | { |
2218 | sete_r(dst); |
2219 | } |
2220 | |
2221 | void setne_r(RegisterID dst) |
2222 | { |
2223 | m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst); |
2224 | } |
2225 | |
2226 | void setnz_r(RegisterID dst) |
2227 | { |
2228 | setne_r(dst); |
2229 | } |
2230 | |
2231 | void setnp_r(RegisterID dst) |
2232 | { |
2233 | m_formatter.twoByteOp8(setccOpcode(ConditionNP), (GroupOpcodeID)0, dst); |
2234 | } |
2235 | |
2236 | void setp_r(RegisterID dst) |
2237 | { |
2238 | m_formatter.twoByteOp8(setccOpcode(ConditionP), (GroupOpcodeID)0, dst); |
2239 | } |
2240 | |
2241 | // Various move ops: |
2242 | |
2243 | void cdq() |
2244 | { |
2245 | m_formatter.oneByteOp(OP_CDQ); |
2246 | } |
2247 | |
2248 | #if CPU(X86_64) |
2249 | void cqo() |
2250 | { |
2251 | m_formatter.oneByteOp64(OP_CDQ); |
2252 | } |
2253 | #endif |
2254 | |
2255 | void fstps(int offset, RegisterID base) |
2256 | { |
2257 | m_formatter.oneByteOp(OP_ESCAPE_D9, ESCAPE_D9_FSTP_singleReal, base, offset); |
2258 | } |
2259 | |
2260 | void fstpl(int offset, RegisterID base) |
2261 | { |
2262 | m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset); |
2263 | } |
2264 | |
2265 | void xchgl_rr(RegisterID src, RegisterID dst) |
2266 | { |
2267 | if (src == X86Registers::eax) |
2268 | m_formatter.oneByteOp(OP_XCHG_EAX, dst); |
2269 | else if (dst == X86Registers::eax) |
2270 | m_formatter.oneByteOp(OP_XCHG_EAX, src); |
2271 | else |
2272 | m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst); |
2273 | } |
2274 | |
2275 | void xchgb_rm(RegisterID src, int offset, RegisterID base) |
2276 | { |
2277 | m_formatter.oneByteOp8(OP_XCHG_EvGb, src, base, offset); |
2278 | } |
2279 | |
2280 | void xchgb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2281 | { |
2282 | m_formatter.oneByteOp8(OP_XCHG_EvGb, src, base, index, scale, offset); |
2283 | } |
2284 | |
2285 | void xchgw_rm(RegisterID src, int offset, RegisterID base) |
2286 | { |
2287 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2288 | m_formatter.oneByteOp(OP_XCHG_EvGv, src, base, offset); |
2289 | } |
2290 | |
2291 | void xchgw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2292 | { |
2293 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2294 | m_formatter.oneByteOp(OP_XCHG_EvGv, src, base, index, scale, offset); |
2295 | } |
2296 | |
2297 | void xchgl_rm(RegisterID src, int offset, RegisterID base) |
2298 | { |
2299 | m_formatter.oneByteOp(OP_XCHG_EvGv, src, base, offset); |
2300 | } |
2301 | |
2302 | void xchgl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2303 | { |
2304 | m_formatter.oneByteOp(OP_XCHG_EvGv, src, base, index, scale, offset); |
2305 | } |
2306 | |
2307 | #if CPU(X86_64) |
2308 | void xchgq_rr(RegisterID src, RegisterID dst) |
2309 | { |
2310 | if (src == X86Registers::eax) |
2311 | m_formatter.oneByteOp64(OP_XCHG_EAX, dst); |
2312 | else if (dst == X86Registers::eax) |
2313 | m_formatter.oneByteOp64(OP_XCHG_EAX, src); |
2314 | else |
2315 | m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst); |
2316 | } |
2317 | |
2318 | void xchgq_rm(RegisterID src, int offset, RegisterID base) |
2319 | { |
2320 | m_formatter.oneByteOp64(OP_XCHG_EvGv, src, base, offset); |
2321 | } |
2322 | |
2323 | void xchgq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2324 | { |
2325 | m_formatter.oneByteOp64(OP_XCHG_EvGv, src, base, index, scale, offset); |
2326 | } |
2327 | #endif |
2328 | |
2329 | void movl_rr(RegisterID src, RegisterID dst) |
2330 | { |
2331 | m_formatter.oneByteOp(OP_MOV_EvGv, src, dst); |
2332 | } |
2333 | |
2334 | void movl_rm(RegisterID src, int offset, RegisterID base) |
2335 | { |
2336 | m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset); |
2337 | } |
2338 | |
2339 | void movl_rm_disp32(RegisterID src, int offset, RegisterID base) |
2340 | { |
2341 | m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset); |
2342 | } |
2343 | |
2344 | void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2345 | { |
2346 | m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset); |
2347 | } |
2348 | |
2349 | void movl_mEAX(const void* addr) |
2350 | { |
2351 | m_formatter.oneByteOp(OP_MOV_EAXOv); |
2352 | #if CPU(X86_64) |
2353 | m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); |
2354 | #else |
2355 | m_formatter.immediate32(reinterpret_cast<int>(addr)); |
2356 | #endif |
2357 | } |
2358 | |
2359 | void movl_mr(int offset, RegisterID base, RegisterID dst) |
2360 | { |
2361 | m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset); |
2362 | } |
2363 | |
2364 | void movl_mr_disp32(int offset, RegisterID base, RegisterID dst) |
2365 | { |
2366 | m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset); |
2367 | } |
2368 | |
2369 | void movl_mr_disp8(int offset, RegisterID base, RegisterID dst) |
2370 | { |
2371 | m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset); |
2372 | } |
2373 | |
2374 | void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2375 | { |
2376 | m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset); |
2377 | } |
2378 | |
2379 | void movl_i32r(int imm, RegisterID dst) |
2380 | { |
2381 | m_formatter.oneByteOp(OP_MOV_EAXIv, dst); |
2382 | m_formatter.immediate32(imm); |
2383 | } |
2384 | |
2385 | void movl_i32m(int imm, int offset, RegisterID base) |
2386 | { |
2387 | m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset); |
2388 | m_formatter.immediate32(imm); |
2389 | } |
2390 | |
2391 | void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale) |
2392 | { |
2393 | m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset); |
2394 | m_formatter.immediate32(imm); |
2395 | } |
2396 | |
2397 | #if !CPU(X86_64) |
2398 | void movb_i8m(int imm, const void* addr) |
2399 | { |
2400 | ASSERT(-128 <= imm && imm < 128); |
2401 | m_formatter.oneByteOpAddr(OP_GROUP11_EvIb, GROUP11_MOV, bitwise_cast<uint32_t>(addr)); |
2402 | m_formatter.immediate8(imm); |
2403 | } |
2404 | #endif |
2405 | |
2406 | void movb_i8m(int imm, int offset, RegisterID base) |
2407 | { |
2408 | ASSERT(-128 <= imm && imm < 128); |
2409 | m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset); |
2410 | m_formatter.immediate8(imm); |
2411 | } |
2412 | |
2413 | void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale) |
2414 | { |
2415 | ASSERT(-128 <= imm && imm < 128); |
2416 | m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset); |
2417 | m_formatter.immediate8(imm); |
2418 | } |
2419 | |
2420 | #if !CPU(X86_64) |
2421 | void movb_rm(RegisterID src, const void* addr) |
2422 | { |
2423 | m_formatter.oneByteOpAddr(OP_MOV_EbGb, src, bitwise_cast<uint32_t>(addr)); |
2424 | } |
2425 | #endif |
2426 | |
2427 | void movb_rm(RegisterID src, int offset, RegisterID base) |
2428 | { |
2429 | m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, offset); |
2430 | } |
2431 | |
2432 | void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2433 | { |
2434 | m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset); |
2435 | } |
2436 | |
2437 | void movw_rm(RegisterID src, int offset, RegisterID base) |
2438 | { |
2439 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2440 | |
2441 | // FIXME: We often use oneByteOp8 for 16-bit operations. It's not clear that this is |
2442 | // necessary. https://bugs.webkit.org/show_bug.cgi?id=153433 |
2443 | m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, offset); |
2444 | } |
2445 | |
2446 | void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2447 | { |
2448 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2449 | m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset); |
2450 | } |
2451 | |
2452 | void movw_im(int imm, int offset, RegisterID base) |
2453 | { |
2454 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2455 | m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset); |
2456 | m_formatter.immediate16(imm); |
2457 | } |
2458 | |
2459 | void movw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) |
2460 | { |
2461 | m_formatter.prefix(PRE_OPERAND_SIZE); |
2462 | m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset); |
2463 | m_formatter.immediate16(imm); |
2464 | } |
2465 | |
2466 | void movl_EAXm(const void* addr) |
2467 | { |
2468 | m_formatter.oneByteOp(OP_MOV_OvEAX); |
2469 | #if CPU(X86_64) |
2470 | m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); |
2471 | #else |
2472 | m_formatter.immediate32(reinterpret_cast<int>(addr)); |
2473 | #endif |
2474 | } |
2475 | |
2476 | void movl_mr(uint32_t addr, RegisterID dst) |
2477 | { |
2478 | m_formatter.oneByteOpAddr(OP_MOV_GvEv, dst, addr); |
2479 | } |
2480 | |
2481 | void movl_rm(RegisterID src, uint32_t addr) |
2482 | { |
2483 | m_formatter.oneByteOpAddr(OP_MOV_EvGv, src, addr); |
2484 | } |
2485 | |
2486 | #if CPU(X86_64) |
2487 | void movq_rr(RegisterID src, RegisterID dst) |
2488 | { |
2489 | m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst); |
2490 | } |
2491 | |
2492 | void movq_rm(RegisterID src, int offset, RegisterID base) |
2493 | { |
2494 | m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset); |
2495 | } |
2496 | |
2497 | void movq_rm_disp32(RegisterID src, int offset, RegisterID base) |
2498 | { |
2499 | m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset); |
2500 | } |
2501 | |
2502 | void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
2503 | { |
2504 | m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset); |
2505 | } |
2506 | |
2507 | void movq_rm(RegisterID src, int offset) |
2508 | { |
2509 | m_formatter.oneByteOp64Addr(OP_MOV_EvGv, src, offset); |
2510 | } |
2511 | |
2512 | void movq_mEAX(const void* addr) |
2513 | { |
2514 | m_formatter.oneByteOp64(OP_MOV_EAXOv); |
2515 | m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); |
2516 | } |
2517 | |
2518 | void movq_EAXm(const void* addr) |
2519 | { |
2520 | m_formatter.oneByteOp64(OP_MOV_OvEAX); |
2521 | m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); |
2522 | } |
2523 | |
2524 | void movq_mr(int offset, RegisterID base, RegisterID dst) |
2525 | { |
2526 | m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset); |
2527 | } |
2528 | |
2529 | void movq_mr_disp32(int offset, RegisterID base, RegisterID dst) |
2530 | { |
2531 | m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset); |
2532 | } |
2533 | |
2534 | void movq_mr_disp8(int offset, RegisterID base, RegisterID dst) |
2535 | { |
2536 | m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset); |
2537 | } |
2538 | |
2539 | void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2540 | { |
2541 | m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset); |
2542 | } |
2543 | |
2544 | void movq_mr(uint32_t addr, RegisterID dst) |
2545 | { |
2546 | m_formatter.oneByteOp64Addr(OP_MOV_GvEv, dst, addr); |
2547 | } |
2548 | |
2549 | void movq_i32m(int imm, int offset, RegisterID base) |
2550 | { |
2551 | m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset); |
2552 | m_formatter.immediate32(imm); |
2553 | } |
2554 | |
2555 | void movq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale) |
2556 | { |
2557 | m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset); |
2558 | m_formatter.immediate32(imm); |
2559 | } |
2560 | |
2561 | void movq_i64r(int64_t imm, RegisterID dst) |
2562 | { |
2563 | m_formatter.oneByteOp64(OP_MOV_EAXIv, dst); |
2564 | m_formatter.immediate64(imm); |
2565 | } |
2566 | |
2567 | void mov_i32r(int32_t imm, RegisterID dst) |
2568 | { |
2569 | m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, dst); |
2570 | m_formatter.immediate32(imm); |
2571 | } |
2572 | |
2573 | void movsxd_rr(RegisterID src, RegisterID dst) |
2574 | { |
2575 | m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src); |
2576 | } |
2577 | #else |
2578 | void movl_mr(const void* addr, RegisterID dst) |
2579 | { |
2580 | if (dst == X86Registers::eax) |
2581 | movl_mEAX(addr); |
2582 | else |
2583 | m_formatter.oneByteOpAddr(OP_MOV_GvEv, dst, bitwise_cast<uint32_t>(addr)); |
2584 | } |
2585 | |
2586 | void movl_rm(RegisterID src, const void* addr) |
2587 | { |
2588 | if (src == X86Registers::eax) |
2589 | movl_EAXm(addr); |
2590 | else |
2591 | m_formatter.oneByteOpAddr(OP_MOV_EvGv, src, bitwise_cast<uint32_t>(addr)); |
2592 | } |
2593 | |
2594 | void movl_i32m(int imm, const void* addr) |
2595 | { |
2596 | m_formatter.oneByteOpAddr(OP_GROUP11_EvIz, GROUP11_MOV, bitwise_cast<uint32_t>(addr)); |
2597 | m_formatter.immediate32(imm); |
2598 | } |
2599 | #endif |
2600 | |
2601 | void movzwl_mr(int offset, RegisterID base, RegisterID dst) |
2602 | { |
2603 | m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset); |
2604 | } |
2605 | |
2606 | void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2607 | { |
2608 | m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset); |
2609 | } |
2610 | |
2611 | void movswl_mr(int offset, RegisterID base, RegisterID dst) |
2612 | { |
2613 | m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset); |
2614 | } |
2615 | |
2616 | void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2617 | { |
2618 | m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset); |
2619 | } |
2620 | |
2621 | void movzbl_mr(int offset, RegisterID base, RegisterID dst) |
2622 | { |
2623 | m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset); |
2624 | } |
2625 | |
2626 | void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2627 | { |
2628 | m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset); |
2629 | } |
2630 | |
2631 | #if !CPU(X86_64) |
2632 | void movzbl_mr(const void* address, RegisterID dst) |
2633 | { |
2634 | m_formatter.twoByteOpAddr(OP2_MOVZX_GvEb, dst, bitwise_cast<uint32_t>(address)); |
2635 | } |
2636 | #endif |
2637 | |
2638 | void movsbl_mr(int offset, RegisterID base, RegisterID dst) |
2639 | { |
2640 | m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset); |
2641 | } |
2642 | |
2643 | void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2644 | { |
2645 | m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset); |
2646 | } |
2647 | |
2648 | void movzbl_rr(RegisterID src, RegisterID dst) |
2649 | { |
2650 | // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register |
2651 | // is in the range ESP-EDI, and the src would not have required a REX). Unneeded |
2652 | // REX prefixes are defined to be silently ignored by the processor. |
2653 | m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src); |
2654 | } |
2655 | |
2656 | void movsbl_rr(RegisterID src, RegisterID dst) |
2657 | { |
2658 | m_formatter.twoByteOp8(OP2_MOVSX_GvEb, dst, src); |
2659 | } |
2660 | |
2661 | void movzwl_rr(RegisterID src, RegisterID dst) |
2662 | { |
2663 | m_formatter.twoByteOp8(OP2_MOVZX_GvEw, dst, src); |
2664 | } |
2665 | |
2666 | void movswl_rr(RegisterID src, RegisterID dst) |
2667 | { |
2668 | m_formatter.twoByteOp8(OP2_MOVSX_GvEw, dst, src); |
2669 | } |
2670 | |
2671 | void cmovl_rr(Condition cond, RegisterID src, RegisterID dst) |
2672 | { |
2673 | m_formatter.twoByteOp(cmovcc(cond), dst, src); |
2674 | } |
2675 | |
2676 | void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID dst) |
2677 | { |
2678 | m_formatter.twoByteOp(cmovcc(cond), dst, base, offset); |
2679 | } |
2680 | |
2681 | void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2682 | { |
2683 | m_formatter.twoByteOp(cmovcc(cond), dst, base, index, scale, offset); |
2684 | } |
2685 | |
2686 | void cmovel_rr(RegisterID src, RegisterID dst) |
2687 | { |
2688 | m_formatter.twoByteOp(cmovcc(ConditionE), dst, src); |
2689 | } |
2690 | |
2691 | void cmovnel_rr(RegisterID src, RegisterID dst) |
2692 | { |
2693 | m_formatter.twoByteOp(cmovcc(ConditionNE), dst, src); |
2694 | } |
2695 | |
2696 | void cmovpl_rr(RegisterID src, RegisterID dst) |
2697 | { |
2698 | m_formatter.twoByteOp(cmovcc(ConditionP), dst, src); |
2699 | } |
2700 | |
2701 | void cmovnpl_rr(RegisterID src, RegisterID dst) |
2702 | { |
2703 | m_formatter.twoByteOp(cmovcc(ConditionNP), dst, src); |
2704 | } |
2705 | |
2706 | #if CPU(X86_64) |
2707 | void cmovq_rr(Condition cond, RegisterID src, RegisterID dst) |
2708 | { |
2709 | m_formatter.twoByteOp64(cmovcc(cond), dst, src); |
2710 | } |
2711 | |
2712 | void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID dst) |
2713 | { |
2714 | m_formatter.twoByteOp64(cmovcc(cond), dst, base, offset); |
2715 | } |
2716 | |
2717 | void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2718 | { |
2719 | m_formatter.twoByteOp64(cmovcc(cond), dst, base, index, scale, offset); |
2720 | } |
2721 | |
2722 | void cmoveq_rr(RegisterID src, RegisterID dst) |
2723 | { |
2724 | m_formatter.twoByteOp64(cmovcc(ConditionE), dst, src); |
2725 | } |
2726 | |
2727 | void cmovneq_rr(RegisterID src, RegisterID dst) |
2728 | { |
2729 | m_formatter.twoByteOp64(cmovcc(ConditionNE), dst, src); |
2730 | } |
2731 | |
2732 | void cmovpq_rr(RegisterID src, RegisterID dst) |
2733 | { |
2734 | m_formatter.twoByteOp64(cmovcc(ConditionP), dst, src); |
2735 | } |
2736 | |
2737 | void cmovnpq_rr(RegisterID src, RegisterID dst) |
2738 | { |
2739 | m_formatter.twoByteOp64(cmovcc(ConditionNP), dst, src); |
2740 | } |
2741 | #else |
2742 | void cmovl_mr(Condition cond, const void* addr, RegisterID dst) |
2743 | { |
2744 | m_formatter.twoByteOpAddr(cmovcc(cond), dst, bitwise_cast<uint32_t>(addr)); |
2745 | } |
2746 | #endif |
2747 | |
2748 | void leal_mr(int offset, RegisterID base, RegisterID dst) |
2749 | { |
2750 | m_formatter.oneByteOp(OP_LEA, dst, base, offset); |
2751 | } |
2752 | |
2753 | void leal_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2754 | { |
2755 | m_formatter.oneByteOp(OP_LEA, dst, base, index, scale, offset); |
2756 | } |
2757 | |
2758 | #if CPU(X86_64) |
2759 | void leaq_mr(int offset, RegisterID base, RegisterID dst) |
2760 | { |
2761 | m_formatter.oneByteOp64(OP_LEA, dst, base, offset); |
2762 | } |
2763 | |
2764 | void leaq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) |
2765 | { |
2766 | m_formatter.oneByteOp64(OP_LEA, dst, base, index, scale, offset); |
2767 | } |
2768 | #endif |
2769 | |
2770 | // Flow control: |
2771 | |
2772 | AssemblerLabel call() |
2773 | { |
2774 | m_formatter.oneByteOp(OP_CALL_rel32); |
2775 | return m_formatter.immediateRel32(); |
2776 | } |
2777 | |
2778 | AssemblerLabel call(RegisterID dst) |
2779 | { |
2780 | m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst); |
2781 | return m_formatter.label(); |
2782 | } |
2783 | |
2784 | void call_m(int offset, RegisterID base) |
2785 | { |
2786 | m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset); |
2787 | } |
2788 | |
2789 | AssemblerLabel jmp() |
2790 | { |
2791 | m_formatter.oneByteOp(OP_JMP_rel32); |
2792 | return m_formatter.immediateRel32(); |
2793 | } |
2794 | |
2795 | // Return a AssemblerLabel so we have a label to the jump, so we can use this |
2796 | // To make a tail recursive call on x86-64. The MacroAssembler |
2797 | // really shouldn't wrap this as a Jump, since it can't be linked. :-/ |
2798 | AssemblerLabel jmp_r(RegisterID dst) |
2799 | { |
2800 | m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst); |
2801 | return m_formatter.label(); |
2802 | } |
2803 | |
2804 | void jmp_m(int offset, RegisterID base) |
2805 | { |
2806 | m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset); |
2807 | } |
2808 | |
2809 | void jmp_m(int offset, RegisterID base, RegisterID index, int scale) |
2810 | { |
2811 | m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, index, scale, offset); |
2812 | } |
2813 | |
2814 | #if !CPU(X86_64) |
2815 | void jmp_m(const void* address) |
2816 | { |
2817 | m_formatter.oneByteOpAddr(OP_GROUP5_Ev, GROUP5_OP_JMPN, bitwise_cast<uint32_t>(address)); |
2818 | } |
2819 | #endif |
2820 | |
2821 | AssemblerLabel jne() |
2822 | { |
2823 | m_formatter.twoByteOp(jccRel32(ConditionNE)); |
2824 | return m_formatter.immediateRel32(); |
2825 | } |
2826 | |
2827 | AssemblerLabel jnz() |
2828 | { |
2829 | return jne(); |
2830 | } |
2831 | |
2832 | AssemblerLabel je() |
2833 | { |
2834 | m_formatter.twoByteOp(jccRel32(ConditionE)); |
2835 | return m_formatter.immediateRel32(); |
2836 | } |
2837 | |
2838 | AssemblerLabel jz() |
2839 | { |
2840 | return je(); |
2841 | } |
2842 | |
2843 | AssemblerLabel jl() |
2844 | { |
2845 | m_formatter.twoByteOp(jccRel32(ConditionL)); |
2846 | return m_formatter.immediateRel32(); |
2847 | } |
2848 | |
2849 | AssemblerLabel jb() |
2850 | { |
2851 | m_formatter.twoByteOp(jccRel32(ConditionB)); |
2852 | return m_formatter.immediateRel32(); |
2853 | } |
2854 | |
2855 | AssemblerLabel jle() |
2856 | { |
2857 | m_formatter.twoByteOp(jccRel32(ConditionLE)); |
2858 | return m_formatter.immediateRel32(); |
2859 | } |
2860 | |
2861 | AssemblerLabel jbe() |
2862 | { |
2863 | m_formatter.twoByteOp(jccRel32(ConditionBE)); |
2864 | return m_formatter.immediateRel32(); |
2865 | } |
2866 | |
2867 | AssemblerLabel jge() |
2868 | { |
2869 | m_formatter.twoByteOp(jccRel32(ConditionGE)); |
2870 | return m_formatter.immediateRel32(); |
2871 | } |
2872 | |
2873 | AssemblerLabel jg() |
2874 | { |
2875 | m_formatter.twoByteOp(jccRel32(ConditionG)); |
2876 | return m_formatter.immediateRel32(); |
2877 | } |
2878 | |
2879 | AssemblerLabel ja() |
2880 | { |
2881 | m_formatter.twoByteOp(jccRel32(ConditionA)); |
2882 | return m_formatter.immediateRel32(); |
2883 | } |
2884 | |
2885 | AssemblerLabel jae() |
2886 | { |
2887 | m_formatter.twoByteOp(jccRel32(ConditionAE)); |
2888 | return m_formatter.immediateRel32(); |
2889 | } |
2890 | |
2891 | AssemblerLabel jo() |
2892 | { |
2893 | m_formatter.twoByteOp(jccRel32(ConditionO)); |
2894 | return m_formatter.immediateRel32(); |
2895 | } |
2896 | |
2897 | AssemblerLabel jnp() |
2898 | { |
2899 | m_formatter.twoByteOp(jccRel32(ConditionNP)); |
2900 | return m_formatter.immediateRel32(); |
2901 | } |
2902 | |
2903 | AssemblerLabel jp() |
2904 | { |
2905 | m_formatter.twoByteOp(jccRel32(ConditionP)); |
2906 | return m_formatter.immediateRel32(); |
2907 | } |
2908 | |
2909 | AssemblerLabel js() |
2910 | { |
2911 | m_formatter.twoByteOp(jccRel32(ConditionS)); |
2912 | return m_formatter.immediateRel32(); |
2913 | } |
2914 | |
2915 | AssemblerLabel jCC(Condition cond) |
2916 | { |
2917 | m_formatter.twoByteOp(jccRel32(cond)); |
2918 | return m_formatter.immediateRel32(); |
2919 | } |
2920 | |
2921 | // SSE operations: |
2922 | |
2923 | void addsd_rr(XMMRegisterID src, XMMRegisterID dst) |
2924 | { |
2925 | m_formatter.prefix(PRE_SSE_F2); |
2926 | m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
2927 | } |
2928 | |
2929 | void vaddsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) |
2930 | { |
2931 | m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); |
2932 | } |
2933 | |
2934 | void addsd_mr(int offset, RegisterID base, XMMRegisterID dst) |
2935 | { |
2936 | m_formatter.prefix(PRE_SSE_F2); |
2937 | m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset); |
2938 | } |
2939 | |
2940 | void addsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
2941 | { |
2942 | m_formatter.prefix(PRE_SSE_F2); |
2943 | m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, dst, base, index, scale, offset); |
2944 | } |
2945 | |
2946 | void vaddsd_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst) |
2947 | { |
2948 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); |
2949 | } |
2950 | |
2951 | void vaddsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst) |
2952 | { |
2953 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); |
2954 | } |
2955 | |
2956 | void addss_rr(XMMRegisterID src, XMMRegisterID dst) |
2957 | { |
2958 | m_formatter.prefix(PRE_SSE_F3); |
2959 | m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
2960 | } |
2961 | |
2962 | void vaddss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) |
2963 | { |
2964 | m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); |
2965 | } |
2966 | |
2967 | void addss_mr(int offset, RegisterID base, XMMRegisterID dst) |
2968 | { |
2969 | m_formatter.prefix(PRE_SSE_F3); |
2970 | m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset); |
2971 | } |
2972 | |
2973 | void addss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
2974 | { |
2975 | m_formatter.prefix(PRE_SSE_F3); |
2976 | m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, dst, base, index, scale, offset); |
2977 | } |
2978 | |
2979 | void vaddss_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst) |
2980 | { |
2981 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); |
2982 | } |
2983 | |
2984 | void vaddss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst) |
2985 | { |
2986 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); |
2987 | } |
2988 | |
2989 | #if !CPU(X86_64) |
2990 | void addsd_mr(const void* address, XMMRegisterID dst) |
2991 | { |
2992 | m_formatter.prefix(PRE_SSE_F2); |
2993 | m_formatter.twoByteOpAddr(OP2_ADDSD_VsdWsd, (RegisterID)dst, bitwise_cast<uint32_t>(address)); |
2994 | } |
2995 | #endif |
2996 | |
2997 | void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst) |
2998 | { |
2999 | m_formatter.prefix(PRE_SSE_F2); |
3000 | m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); |
3001 | } |
3002 | |
3003 | void cvtsi2ss_rr(RegisterID src, XMMRegisterID dst) |
3004 | { |
3005 | m_formatter.prefix(PRE_SSE_F3); |
3006 | m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); |
3007 | } |
3008 | |
3009 | #if CPU(X86_64) |
3010 | void cvtsi2sdq_rr(RegisterID src, XMMRegisterID dst) |
3011 | { |
3012 | m_formatter.prefix(PRE_SSE_F2); |
3013 | m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); |
3014 | } |
3015 | |
3016 | void cvtsi2ssq_rr(RegisterID src, XMMRegisterID dst) |
3017 | { |
3018 | m_formatter.prefix(PRE_SSE_F3); |
3019 | m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); |
3020 | } |
3021 | |
3022 | void cvtsi2sdq_mr(int offset, RegisterID base, XMMRegisterID dst) |
3023 | { |
3024 | m_formatter.prefix(PRE_SSE_F2); |
3025 | m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); |
3026 | } |
3027 | |
3028 | void cvtsi2ssq_mr(int offset, RegisterID base, XMMRegisterID dst) |
3029 | { |
3030 | m_formatter.prefix(PRE_SSE_F3); |
3031 | m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); |
3032 | } |
3033 | #endif |
3034 | |
3035 | void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst) |
3036 | { |
3037 | m_formatter.prefix(PRE_SSE_F2); |
3038 | m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); |
3039 | } |
3040 | |
3041 | void cvtsi2ss_mr(int offset, RegisterID base, XMMRegisterID dst) |
3042 | { |
3043 | m_formatter.prefix(PRE_SSE_F3); |
3044 | m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); |
3045 | } |
3046 | |
3047 | #if !CPU(X86_64) |
3048 | void cvtsi2sd_mr(const void* address, XMMRegisterID dst) |
3049 | { |
3050 | m_formatter.prefix(PRE_SSE_F2); |
3051 | m_formatter.twoByteOpAddr(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, bitwise_cast<uint32_t>(address)); |
3052 | } |
3053 | #endif |
3054 | |
3055 | void cvttsd2si_rr(XMMRegisterID src, RegisterID dst) |
3056 | { |
3057 | m_formatter.prefix(PRE_SSE_F2); |
3058 | m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src); |
3059 | } |
3060 | |
3061 | void cvttss2si_rr(XMMRegisterID src, RegisterID dst) |
3062 | { |
3063 | m_formatter.prefix(PRE_SSE_F3); |
3064 | m_formatter.twoByteOp(OP2_CVTTSS2SI_GdWsd, dst, (RegisterID)src); |
3065 | } |
3066 | |
3067 | #if CPU(X86_64) |
3068 | void cvttss2siq_rr(XMMRegisterID src, RegisterID dst) |
3069 | { |
3070 | m_formatter.prefix(PRE_SSE_F3); |
3071 | m_formatter.twoByteOp64(OP2_CVTTSS2SI_GdWsd, dst, (RegisterID)src); |
3072 | } |
3073 | #endif |
3074 | |
3075 | void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst) |
3076 | { |
3077 | m_formatter.prefix(PRE_SSE_F2); |
3078 | m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src); |
3079 | } |
3080 | |
3081 | void cvtsd2ss_mr(int offset, RegisterID base, XMMRegisterID dst) |
3082 | { |
3083 | m_formatter.prefix(PRE_SSE_F2); |
3084 | m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, base, offset); |
3085 | } |
3086 | |
3087 | void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst) |
3088 | { |
3089 | m_formatter.prefix(PRE_SSE_F3); |
3090 | m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src); |
3091 | } |
3092 | |
3093 | void cvtss2sd_mr(int offset, RegisterID base, XMMRegisterID dst) |
3094 | { |
3095 | m_formatter.prefix(PRE_SSE_F3); |
3096 | m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, base, offset); |
3097 | } |
3098 | |
3099 | #if CPU(X86_64) |
3100 | void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst) |
3101 | { |
3102 | m_formatter.prefix(PRE_SSE_F2); |
3103 | m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src); |
3104 | } |
3105 | #endif |
3106 | |
3107 | void movd_rr(XMMRegisterID src, RegisterID dst) |
3108 | { |
3109 | m_formatter.prefix(PRE_SSE_66); |
3110 | m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst); |
3111 | } |
3112 | |
3113 | void movd_rr(RegisterID src, XMMRegisterID dst) |
3114 | { |
3115 | m_formatter.prefix(PRE_SSE_66); |
3116 | m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src); |
3117 | } |
3118 | |
3119 | #if CPU(X86_64) |
3120 | void movmskpd_rr(XMMRegisterID src, RegisterID dst) |
3121 | { |
3122 | m_formatter.prefix(PRE_SSE_66); |
3123 | m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src); |
3124 | } |
3125 | |
3126 | void movq_rr(XMMRegisterID src, RegisterID dst) |
3127 | { |
3128 | m_formatter.prefix(PRE_SSE_66); |
3129 | m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst); |
3130 | } |
3131 | |
3132 | void movq_rr(RegisterID src, XMMRegisterID dst) |
3133 | { |
3134 | m_formatter.prefix(PRE_SSE_66); |
3135 | m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src); |
3136 | } |
3137 | #endif |
3138 | |
3139 | void movapd_rr(XMMRegisterID src, XMMRegisterID dst) |
3140 | { |
3141 | m_formatter.prefix(PRE_SSE_66); |
3142 | m_formatter.twoByteOp(OP2_MOVAPD_VpdWpd, (RegisterID)dst, (RegisterID)src); |
3143 | } |
3144 | |
3145 | void movaps_rr(XMMRegisterID src, XMMRegisterID dst) |
3146 | { |
3147 | m_formatter.twoByteOp(OP2_MOVAPS_VpdWpd, (RegisterID)dst, (RegisterID)src); |
3148 | } |
3149 | |
3150 | void movsd_rr(XMMRegisterID src, XMMRegisterID dst) |
3151 | { |
3152 | m_formatter.prefix(PRE_SSE_F2); |
3153 | m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3154 | } |
3155 | |
3156 | void movsd_rm(XMMRegisterID src, int offset, RegisterID base) |
3157 | { |
3158 | m_formatter.prefix(PRE_SSE_F2); |
3159 | m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset); |
3160 | } |
3161 | |
3162 | void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3163 | { |
3164 | m_formatter.prefix(PRE_SSE_F2); |
3165 | m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset); |
3166 | } |
3167 | |
3168 | void movss_rm(XMMRegisterID src, int offset, RegisterID base) |
3169 | { |
3170 | m_formatter.prefix(PRE_SSE_F3); |
3171 | m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset); |
3172 | } |
3173 | |
3174 | void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3175 | { |
3176 | m_formatter.prefix(PRE_SSE_F3); |
3177 | m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset); |
3178 | } |
3179 | |
3180 | void movsd_mr(int offset, RegisterID base, XMMRegisterID dst) |
3181 | { |
3182 | m_formatter.prefix(PRE_SSE_F2); |
3183 | m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset); |
3184 | } |
3185 | |
3186 | void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
3187 | { |
3188 | m_formatter.prefix(PRE_SSE_F2); |
3189 | m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset); |
3190 | } |
3191 | |
3192 | void movss_mr(int offset, RegisterID base, XMMRegisterID dst) |
3193 | { |
3194 | m_formatter.prefix(PRE_SSE_F3); |
3195 | m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset); |
3196 | } |
3197 | |
3198 | void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
3199 | { |
3200 | m_formatter.prefix(PRE_SSE_F3); |
3201 | m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset); |
3202 | } |
3203 | |
3204 | #if !CPU(X86_64) |
3205 | void movsd_mr(const void* address, XMMRegisterID dst) |
3206 | { |
3207 | m_formatter.prefix(PRE_SSE_F2); |
3208 | m_formatter.twoByteOpAddr(OP2_MOVSD_VsdWsd, (RegisterID)dst, bitwise_cast<uint32_t>(address)); |
3209 | } |
3210 | void movsd_rm(XMMRegisterID src, const void* address) |
3211 | { |
3212 | m_formatter.prefix(PRE_SSE_F2); |
3213 | m_formatter.twoByteOpAddr(OP2_MOVSD_WsdVsd, (RegisterID)src, bitwise_cast<uint32_t>(address)); |
3214 | } |
3215 | void movss_mr(const void* address, XMMRegisterID dst) |
3216 | { |
3217 | m_formatter.prefix(PRE_SSE_F3); |
3218 | m_formatter.twoByteOpAddr(OP2_MOVSD_VsdWsd, (RegisterID)dst, bitwise_cast<uint32_t>(address)); |
3219 | } |
3220 | void movss_rm(XMMRegisterID src, const void* address) |
3221 | { |
3222 | m_formatter.prefix(PRE_SSE_F3); |
3223 | m_formatter.twoByteOpAddr(OP2_MOVSD_WsdVsd, (RegisterID)src, bitwise_cast<uint32_t>(address)); |
3224 | } |
3225 | #endif |
3226 | |
3227 | void mulsd_rr(XMMRegisterID src, XMMRegisterID dst) |
3228 | { |
3229 | m_formatter.prefix(PRE_SSE_F2); |
3230 | m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3231 | } |
3232 | |
3233 | void vmulsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) |
3234 | { |
3235 | m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); |
3236 | } |
3237 | |
3238 | void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst) |
3239 | { |
3240 | m_formatter.prefix(PRE_SSE_F2); |
3241 | m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset); |
3242 | } |
3243 | |
3244 | void mulsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
3245 | { |
3246 | m_formatter.prefix(PRE_SSE_F2); |
3247 | m_formatter.twoByteOp(OP2_MULSD_VsdWsd, dst, base, index, scale, offset); |
3248 | } |
3249 | |
3250 | void vmulsd_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst) |
3251 | { |
3252 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); |
3253 | } |
3254 | |
3255 | void vmulsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst) |
3256 | { |
3257 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); |
3258 | } |
3259 | |
3260 | void mulss_rr(XMMRegisterID src, XMMRegisterID dst) |
3261 | { |
3262 | m_formatter.prefix(PRE_SSE_F3); |
3263 | m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3264 | } |
3265 | |
3266 | void vmulss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) |
3267 | { |
3268 | m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); |
3269 | } |
3270 | |
3271 | void mulss_mr(int offset, RegisterID base, XMMRegisterID dst) |
3272 | { |
3273 | m_formatter.prefix(PRE_SSE_F3); |
3274 | m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset); |
3275 | } |
3276 | |
3277 | void mulss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
3278 | { |
3279 | m_formatter.prefix(PRE_SSE_F3); |
3280 | m_formatter.twoByteOp(OP2_MULSD_VsdWsd, dst, base, index, scale, offset); |
3281 | } |
3282 | |
3283 | void vmulss_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst) |
3284 | { |
3285 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); |
3286 | } |
3287 | |
3288 | void vmulss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst) |
3289 | { |
3290 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); |
3291 | } |
3292 | |
3293 | void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst) |
3294 | { |
3295 | m_formatter.prefix(PRE_SSE_66); |
3296 | m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src); |
3297 | m_formatter.immediate8(whichWord); |
3298 | } |
3299 | |
3300 | void psllq_i8r(int imm, XMMRegisterID dst) |
3301 | { |
3302 | m_formatter.prefix(PRE_SSE_66); |
3303 | m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst); |
3304 | m_formatter.immediate8(imm); |
3305 | } |
3306 | |
3307 | void psrlq_i8r(int imm, XMMRegisterID dst) |
3308 | { |
3309 | m_formatter.prefix(PRE_SSE_66); |
3310 | m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst); |
3311 | m_formatter.immediate8(imm); |
3312 | } |
3313 | |
3314 | void por_rr(XMMRegisterID src, XMMRegisterID dst) |
3315 | { |
3316 | m_formatter.prefix(PRE_SSE_66); |
3317 | m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src); |
3318 | } |
3319 | |
3320 | void subsd_rr(XMMRegisterID src, XMMRegisterID dst) |
3321 | { |
3322 | m_formatter.prefix(PRE_SSE_F2); |
3323 | m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3324 | } |
3325 | |
3326 | void vsubsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) |
3327 | { |
3328 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); |
3329 | } |
3330 | |
3331 | void subsd_mr(int offset, RegisterID base, XMMRegisterID dst) |
3332 | { |
3333 | m_formatter.prefix(PRE_SSE_F2); |
3334 | m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset); |
3335 | } |
3336 | |
3337 | void subsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
3338 | { |
3339 | m_formatter.prefix(PRE_SSE_F2); |
3340 | m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, dst, base, index, scale, offset); |
3341 | } |
3342 | |
3343 | void vsubsd_mr(XMMRegisterID b, int offset, RegisterID base, XMMRegisterID dst) |
3344 | { |
3345 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); |
3346 | } |
3347 | |
3348 | void vsubsd_mr(XMMRegisterID b, int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
3349 | { |
3350 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); |
3351 | } |
3352 | |
3353 | void subss_rr(XMMRegisterID src, XMMRegisterID dst) |
3354 | { |
3355 | m_formatter.prefix(PRE_SSE_F3); |
3356 | m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3357 | } |
3358 | |
3359 | void vsubss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) |
3360 | { |
3361 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); |
3362 | } |
3363 | |
3364 | void subss_mr(int offset, RegisterID base, XMMRegisterID dst) |
3365 | { |
3366 | m_formatter.prefix(PRE_SSE_F3); |
3367 | m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset); |
3368 | } |
3369 | |
3370 | void subss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
3371 | { |
3372 | m_formatter.prefix(PRE_SSE_F3); |
3373 | m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, dst, base, index, scale, offset); |
3374 | } |
3375 | |
3376 | void vsubss_mr(XMMRegisterID b, int offset, RegisterID base, XMMRegisterID dst) |
3377 | { |
3378 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); |
3379 | } |
3380 | |
3381 | void vsubss_mr(XMMRegisterID b, int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) |
3382 | { |
3383 | m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); |
3384 | } |
3385 | |
3386 | void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst) |
3387 | { |
3388 | m_formatter.prefix(PRE_SSE_66); |
3389 | m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3390 | } |
3391 | |
3392 | void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst) |
3393 | { |
3394 | m_formatter.prefix(PRE_SSE_66); |
3395 | m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset); |
3396 | } |
3397 | |
3398 | void ucomiss_rr(XMMRegisterID src, XMMRegisterID dst) |
3399 | { |
3400 | m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3401 | } |
3402 | |
3403 | void ucomiss_mr(int offset, RegisterID base, XMMRegisterID dst) |
3404 | { |
3405 | m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset); |
3406 | } |
3407 | |
3408 | void divsd_rr(XMMRegisterID src, XMMRegisterID dst) |
3409 | { |
3410 | m_formatter.prefix(PRE_SSE_F2); |
3411 | m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3412 | } |
3413 | |
3414 | void divsd_mr(int offset, RegisterID base, XMMRegisterID dst) |
3415 | { |
3416 | m_formatter.prefix(PRE_SSE_F2); |
3417 | m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset); |
3418 | } |
3419 | |
3420 | void divss_rr(XMMRegisterID src, XMMRegisterID dst) |
3421 | { |
3422 | m_formatter.prefix(PRE_SSE_F3); |
3423 | m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3424 | } |
3425 | |
3426 | void divss_mr(int offset, RegisterID base, XMMRegisterID dst) |
3427 | { |
3428 | m_formatter.prefix(PRE_SSE_F3); |
3429 | m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset); |
3430 | } |
3431 | |
3432 | void andps_rr(XMMRegisterID src, XMMRegisterID dst) |
3433 | { |
3434 | m_formatter.twoByteOp(OP2_ANDPS_VpdWpd, (RegisterID)dst, (RegisterID)src); |
3435 | } |
3436 | |
3437 | void orps_rr(XMMRegisterID src, XMMRegisterID dst) |
3438 | { |
3439 | m_formatter.twoByteOp(OP2_ORPS_VpdWpd, (RegisterID)dst, (RegisterID)src); |
3440 | } |
3441 | |
3442 | void xorps_rr(XMMRegisterID src, XMMRegisterID dst) |
3443 | { |
3444 | m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src); |
3445 | } |
3446 | |
3447 | void xorpd_rr(XMMRegisterID src, XMMRegisterID dst) |
3448 | { |
3449 | if (src == dst) { |
3450 | xorps_rr(src, dst); |
3451 | return; |
3452 | } |
3453 | m_formatter.prefix(PRE_SSE_66); |
3454 | m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src); |
3455 | } |
3456 | |
3457 | void andnpd_rr(XMMRegisterID src, XMMRegisterID dst) |
3458 | { |
3459 | m_formatter.prefix(PRE_SSE_66); |
3460 | m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src); |
3461 | } |
3462 | |
3463 | void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst) |
3464 | { |
3465 | m_formatter.prefix(PRE_SSE_F2); |
3466 | m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3467 | } |
3468 | |
3469 | void sqrtsd_mr(int offset, RegisterID base, XMMRegisterID dst) |
3470 | { |
3471 | m_formatter.prefix(PRE_SSE_F2); |
3472 | m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset); |
3473 | } |
3474 | |
3475 | void sqrtss_rr(XMMRegisterID src, XMMRegisterID dst) |
3476 | { |
3477 | m_formatter.prefix(PRE_SSE_F3); |
3478 | m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src); |
3479 | } |
3480 | |
3481 | void sqrtss_mr(int offset, RegisterID base, XMMRegisterID dst) |
3482 | { |
3483 | m_formatter.prefix(PRE_SSE_F3); |
3484 | m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset); |
3485 | } |
3486 | |
3487 | enum class RoundingType : uint8_t { |
3488 | ToNearestWithTiesToEven = 0, |
3489 | TowardNegativeInfiniti = 1, |
3490 | TowardInfiniti = 2, |
3491 | TowardZero = 3 |
3492 | }; |
3493 | |
3494 | void roundss_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding) |
3495 | { |
3496 | m_formatter.prefix(PRE_SSE_66); |
3497 | m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, (RegisterID)src); |
3498 | m_formatter.immediate8(static_cast<uint8_t>(rounding)); |
3499 | } |
3500 | |
3501 | void roundss_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding) |
3502 | { |
3503 | m_formatter.prefix(PRE_SSE_66); |
3504 | m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, base, offset); |
3505 | m_formatter.immediate8(static_cast<uint8_t>(rounding)); |
3506 | } |
3507 | |
3508 | void roundsd_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding) |
3509 | { |
3510 | m_formatter.prefix(PRE_SSE_66); |
3511 | m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, (RegisterID)src); |
3512 | m_formatter.immediate8(static_cast<uint8_t>(rounding)); |
3513 | } |
3514 | |
3515 | void roundsd_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding) |
3516 | { |
3517 | m_formatter.prefix(PRE_SSE_66); |
3518 | m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, base, offset); |
3519 | m_formatter.immediate8(static_cast<uint8_t>(rounding)); |
3520 | } |
3521 | |
3522 | // Misc instructions: |
3523 | |
3524 | void int3() |
3525 | { |
3526 | m_formatter.oneByteOp(OP_INT3); |
3527 | } |
3528 | |
3529 | static bool isInt3(void* address) |
3530 | { |
3531 | uint8_t candidateInstruction = *reinterpret_cast<uint8_t*>(address); |
3532 | return candidateInstruction == OP_INT3; |
3533 | } |
3534 | |
3535 | void ret() |
3536 | { |
3537 | m_formatter.oneByteOp(OP_RET); |
3538 | } |
3539 | |
3540 | void predictNotTaken() |
3541 | { |
3542 | m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN); |
3543 | } |
3544 | |
3545 | void lock() |
3546 | { |
3547 | m_formatter.prefix(PRE_LOCK); |
3548 | } |
3549 | |
3550 | // Causes the memory access in the next instruction to be offset by %gs. Usually you use |
3551 | // this with a 32-bit absolute address load. That "address" ends up being the offset to |
3552 | // %gs. This prefix is ignored by lea. Getting the value of %gs is hard - you can pretty |
3553 | // much just use it as a secret offset. |
3554 | void gs() |
3555 | { |
3556 | m_formatter.prefix(PRE_GS); |
3557 | } |
3558 | |
3559 | void cmpxchgb_rm(RegisterID src, int offset, RegisterID base) |
3560 | { |
3561 | m_formatter.twoByteOp8(OP2_CMPXCHGb, src, base, offset); |
3562 | } |
3563 | |
3564 | void cmpxchgb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3565 | { |
3566 | m_formatter.twoByteOp8(OP2_CMPXCHGb, src, base, index, scale, offset); |
3567 | } |
3568 | |
3569 | void cmpxchgw_rm(RegisterID src, int offset, RegisterID base) |
3570 | { |
3571 | m_formatter.prefix(PRE_OPERAND_SIZE); |
3572 | m_formatter.twoByteOp(OP2_CMPXCHG, src, base, offset); |
3573 | } |
3574 | |
3575 | void cmpxchgw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3576 | { |
3577 | m_formatter.prefix(PRE_OPERAND_SIZE); |
3578 | m_formatter.twoByteOp(OP2_CMPXCHG, src, base, index, scale, offset); |
3579 | } |
3580 | |
3581 | void cmpxchgl_rm(RegisterID src, int offset, RegisterID base) |
3582 | { |
3583 | m_formatter.twoByteOp(OP2_CMPXCHG, src, base, offset); |
3584 | } |
3585 | |
3586 | void cmpxchgl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3587 | { |
3588 | m_formatter.twoByteOp(OP2_CMPXCHG, src, base, index, scale, offset); |
3589 | } |
3590 | |
3591 | #if CPU(X86_64) |
3592 | void cmpxchgq_rm(RegisterID src, int offset, RegisterID base) |
3593 | { |
3594 | m_formatter.twoByteOp64(OP2_CMPXCHG, src, base, offset); |
3595 | } |
3596 | |
3597 | void cmpxchgq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3598 | { |
3599 | m_formatter.twoByteOp64(OP2_CMPXCHG, src, base, index, scale, offset); |
3600 | } |
3601 | #endif // CPU(X86_64) |
3602 | |
3603 | void xaddb_rm(RegisterID src, int offset, RegisterID base) |
3604 | { |
3605 | m_formatter.twoByteOp8(OP2_XADDb, src, base, offset); |
3606 | } |
3607 | |
3608 | void xaddb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3609 | { |
3610 | m_formatter.twoByteOp8(OP2_XADDb, src, base, index, scale, offset); |
3611 | } |
3612 | |
3613 | void xaddw_rm(RegisterID src, int offset, RegisterID base) |
3614 | { |
3615 | m_formatter.prefix(PRE_OPERAND_SIZE); |
3616 | m_formatter.twoByteOp(OP2_XADD, src, base, offset); |
3617 | } |
3618 | |
3619 | void xaddw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3620 | { |
3621 | m_formatter.prefix(PRE_OPERAND_SIZE); |
3622 | m_formatter.twoByteOp(OP2_XADD, src, base, index, scale, offset); |
3623 | } |
3624 | |
3625 | void xaddl_rm(RegisterID src, int offset, RegisterID base) |
3626 | { |
3627 | m_formatter.twoByteOp(OP2_XADD, src, base, offset); |
3628 | } |
3629 | |
3630 | void xaddl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3631 | { |
3632 | m_formatter.twoByteOp(OP2_XADD, src, base, index, scale, offset); |
3633 | } |
3634 | |
3635 | #if CPU(X86_64) |
3636 | void xaddq_rm(RegisterID src, int offset, RegisterID base) |
3637 | { |
3638 | m_formatter.twoByteOp64(OP2_XADD, src, base, offset); |
3639 | } |
3640 | |
3641 | void xaddq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) |
3642 | { |
3643 | m_formatter.twoByteOp64(OP2_XADD, src, base, index, scale, offset); |
3644 | } |
3645 | #endif // CPU(X86_64) |
3646 | |
3647 | void lfence() |
3648 | { |
3649 | m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_AE, OP3_LFENCE); |
3650 | } |
3651 | |
3652 | void mfence() |
3653 | { |
3654 | m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_AE, OP3_MFENCE); |
3655 | } |
3656 | |
3657 | void sfence() |
3658 | { |
3659 | m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_AE, OP3_SFENCE); |
3660 | } |
3661 | |
3662 | void rdtsc() |
3663 | { |
3664 | m_formatter.twoByteOp(OP2_RDTSC); |
3665 | } |
3666 | |
3667 | void pause() |
3668 | { |
3669 | m_formatter.prefix(PRE_SSE_F3); |
3670 | m_formatter.oneByteOp(OP_PAUSE); |
3671 | } |
3672 | |
3673 | void cpuid() |
3674 | { |
3675 | m_formatter.twoByteOp(OP2_CPUID); |
3676 | } |
3677 | |
3678 | // Assembler admin methods: |
3679 | |
3680 | size_t codeSize() const |
3681 | { |
3682 | return m_formatter.codeSize(); |
3683 | } |
3684 | |
3685 | AssemblerLabel labelForWatchpoint() |
3686 | { |
3687 | AssemblerLabel result = m_formatter.label(); |
3688 | if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint) |
3689 | result = label(); |
3690 | m_indexOfLastWatchpoint = result.m_offset; |
3691 | m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize(); |
3692 | return result; |
3693 | } |
3694 | |
3695 | AssemblerLabel labelIgnoringWatchpoints() |
3696 | { |
3697 | return m_formatter.label(); |
3698 | } |
3699 | |
3700 | AssemblerLabel label() |
3701 | { |
3702 | AssemblerLabel result = m_formatter.label(); |
3703 | while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) { |
3704 | nop(); |
3705 | result = m_formatter.label(); |
3706 | } |
3707 | return result; |
3708 | } |
3709 | |
3710 | AssemblerLabel align(int alignment) |
3711 | { |
3712 | while (!m_formatter.isAligned(alignment)) |
3713 | m_formatter.oneByteOp(OP_HLT); |
3714 | |
3715 | return label(); |
3716 | } |
3717 | |
3718 | // Linking & patching: |
3719 | // |
3720 | // 'link' and 'patch' methods are for use on unprotected code - such as the code |
3721 | // within the AssemblerBuffer, and code being patched by the patch buffer. Once |
3722 | // code has been finalized it is (platform support permitting) within a non- |
3723 | // writable region of memory; to modify the code in an execute-only execuable |
3724 | // pool the 'repatch' and 'relink' methods should be used. |
3725 | |
3726 | void linkJump(AssemblerLabel from, AssemblerLabel to) |
3727 | { |
3728 | ASSERT(from.isSet()); |
3729 | ASSERT(to.isSet()); |
3730 | |
3731 | char* code = reinterpret_cast<char*>(m_formatter.data()); |
3732 | ASSERT(!WTF::unalignedLoad<int32_t>(bitwise_cast<int32_t*>(code + from.m_offset) - 1)); |
3733 | setRel32(code + from.m_offset, code + to.m_offset); |
3734 | } |
3735 | |
3736 | static void linkJump(void* code, AssemblerLabel from, void* to) |
3737 | { |
3738 | ASSERT(from.isSet()); |
3739 | |
3740 | setRel32(reinterpret_cast<char*>(code) + from.m_offset, to); |
3741 | } |
3742 | |
3743 | static void linkCall(void* code, AssemblerLabel from, void* to) |
3744 | { |
3745 | ASSERT(from.isSet()); |
3746 | |
3747 | setRel32(reinterpret_cast<char*>(code) + from.m_offset, to); |
3748 | } |
3749 | |
3750 | static void linkPointer(void* code, AssemblerLabel where, void* value) |
3751 | { |
3752 | ASSERT(where.isSet()); |
3753 | |
3754 | setPointer(reinterpret_cast<char*>(code) + where.m_offset, value); |
3755 | } |
3756 | |
3757 | static void relinkJump(void* from, void* to) |
3758 | { |
3759 | setRel32(from, to); |
3760 | } |
3761 | |
3762 | static void relinkJumpToNop(void* from) |
3763 | { |
3764 | setInt32(from, 0); |
3765 | } |
3766 | |
3767 | static void relinkCall(void* from, void* to) |
3768 | { |
3769 | setRel32(from, to); |
3770 | } |
3771 | |
3772 | static void repatchCompact(void* where, int32_t value) |
3773 | { |
3774 | ASSERT(value >= std::numeric_limits<int8_t>::min()); |
3775 | ASSERT(value <= std::numeric_limits<int8_t>::max()); |
3776 | setInt8(where, value); |
3777 | } |
3778 | |
3779 | static void repatchInt32(void* where, int32_t value) |
3780 | { |
3781 | setInt32(where, value); |
3782 | } |
3783 | |
3784 | static void repatchPointer(void* where, void* value) |
3785 | { |
3786 | setPointer(where, value); |
3787 | } |
3788 | |
3789 | static void* readPointer(void* where) |
3790 | { |
3791 | return WTF::unalignedLoad<void*>(bitwise_cast<void**>(where) - 1); |
3792 | } |
3793 | |
3794 | static void replaceWithHlt(void* instructionStart) |
3795 | { |
3796 | WTF::unalignedStore<uint8_t>(instructionStart, static_cast<uint8_t>(OP_HLT)); |
3797 | } |
3798 | |
3799 | static void replaceWithJump(void* instructionStart, void* to) |
3800 | { |
3801 | uint8_t* ptr = bitwise_cast<uint8_t*>(instructionStart); |
3802 | uint8_t* dstPtr = bitwise_cast<uint8_t*>(to); |
3803 | intptr_t distance = (intptr_t)(dstPtr - (ptr + 5)); |
3804 | WTF::unalignedStore<uint8_t>(ptr, static_cast<uint8_t>(OP_JMP_rel32)); |
3805 | WTF::unalignedStore<int32_t>(ptr + 1, static_cast<int32_t>(distance)); |
3806 | } |
3807 | |
3808 | static ptrdiff_t maxJumpReplacementSize() |
3809 | { |
3810 | return 5; |
3811 | } |
3812 | |
3813 | static constexpr ptrdiff_t patchableJumpSize() |
3814 | { |
3815 | return 5; |
3816 | } |
3817 | |
3818 | #if CPU(X86_64) |
3819 | static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst) |
3820 | { |
3821 | const unsigned instructionSize = 10; // REX.W MOV IMM64 |
3822 | const int rexBytes = 1; |
3823 | const int opcodeBytes = 1; |
3824 | uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); |
3825 | ptr[0] = PRE_REX | (1 << 3) | (dst >> 3); |
3826 | ptr[1] = OP_MOV_EAXIv | (dst & 7); |
3827 | |
3828 | union { |
3829 | uint64_t asWord; |
3830 | uint8_t asBytes[8]; |
3831 | } u; |
3832 | u.asWord = imm; |
3833 | for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i) |
3834 | ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; |
3835 | } |
3836 | |
3837 | static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst) |
3838 | { |
3839 | // We only revert jumps on inline caches, and inline caches always use the scratch register (r11). |
3840 | // FIXME: If the above is ever false then we need to make this smarter with respect to emitting |
3841 | // the REX byte. |
3842 | ASSERT(dst == X86Registers::r11); |
3843 | const unsigned instructionSize = 6; // REX MOV IMM32 |
3844 | const int rexBytes = 1; |
3845 | const int opcodeBytes = 1; |
3846 | uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); |
3847 | ptr[0] = PRE_REX | (dst >> 3); |
3848 | ptr[1] = OP_MOV_EAXIv | (dst & 7); |
3849 | |
3850 | union { |
3851 | uint32_t asWord; |
3852 | uint8_t asBytes[4]; |
3853 | } u; |
3854 | u.asWord = imm; |
3855 | for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i) |
3856 | ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; |
3857 | } |
3858 | #endif |
3859 | |
3860 | static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst) |
3861 | { |
3862 | const int opcodeBytes = 1; |
3863 | const int modRMBytes = 1; |
3864 | ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize()); |
3865 | uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); |
3866 | ptr[0] = OP_GROUP1_EvIz; |
3867 | ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst; |
3868 | union { |
3869 | uint32_t asWord; |
3870 | uint8_t asBytes[4]; |
3871 | } u; |
3872 | u.asWord = imm; |
3873 | for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i) |
3874 | ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes]; |
3875 | } |
3876 | |
3877 | static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst) |
3878 | { |
3879 | ASSERT_UNUSED(offset, !offset); |
3880 | const int opcodeBytes = 1; |
3881 | const int modRMBytes = 1; |
3882 | ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize()); |
3883 | uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); |
3884 | ptr[0] = OP_GROUP1_EvIz; |
3885 | ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst; |
3886 | union { |
3887 | uint32_t asWord; |
3888 | uint8_t asBytes[4]; |
3889 | } u; |
3890 | u.asWord = imm; |
3891 | for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i) |
3892 | ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes]; |
3893 | } |
3894 | |
3895 | static void replaceWithLoad(void* instructionStart) |
3896 | { |
3897 | uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); |
3898 | #if CPU(X86_64) |
3899 | if ((*ptr & ~15) == PRE_REX) |
3900 | ptr++; |
3901 | #endif |
3902 | switch (*ptr) { |
3903 | case OP_MOV_GvEv: |
3904 | break; |
3905 | case OP_LEA: |
3906 | *ptr = OP_MOV_GvEv; |
3907 | break; |
3908 | default: |
3909 | RELEASE_ASSERT_NOT_REACHED(); |
3910 | } |
3911 | } |
3912 | |
3913 | static void replaceWithAddressComputation(void* instructionStart) |
3914 | { |
3915 | uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); |
3916 | #if CPU(X86_64) |
3917 | if ((*ptr & ~15) == PRE_REX) |
3918 | ptr++; |
3919 | #endif |
3920 | switch (*ptr) { |
3921 | case OP_MOV_GvEv: |
3922 | *ptr = OP_LEA; |
3923 | break; |
3924 | case OP_LEA: |
3925 | break; |
3926 | default: |
3927 | RELEASE_ASSERT_NOT_REACHED(); |
3928 | } |
3929 | } |
3930 | |
3931 | static unsigned getCallReturnOffset(AssemblerLabel call) |
3932 | { |
3933 | ASSERT(call.isSet()); |
3934 | return call.m_offset; |
3935 | } |
3936 | |
3937 | static void* getRelocatedAddress(void* code, AssemblerLabel label) |
3938 | { |
3939 | ASSERT(label.isSet()); |
3940 | return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset); |
3941 | } |
3942 | |
3943 | static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b) |
3944 | { |
3945 | return b.m_offset - a.m_offset; |
3946 | } |
3947 | |
3948 | unsigned debugOffset() { return m_formatter.debugOffset(); } |
3949 | |
3950 | void nop() |
3951 | { |
3952 | m_formatter.oneByteOp(OP_NOP); |
3953 | } |
3954 | |
3955 | using CopyFunction = void*(&)(void*, const void*, size_t); |
3956 | |
3957 | template <CopyFunction copy> |
3958 | static void fillNops(void* base, size_t size) |
3959 | { |
3960 | UNUSED_PARAM(copy); |
3961 | #if CPU(X86_64) |
3962 | static const uint8_t nops[10][10] = { |
3963 | // nop |
3964 | {0x90}, |
3965 | // xchg %ax,%ax |
3966 | {0x66, 0x90}, |
3967 | // nopl (%[re]ax) |
3968 | {0x0f, 0x1f, 0x00}, |
3969 | // nopl 8(%[re]ax) |
3970 | {0x0f, 0x1f, 0x40, 0x08}, |
3971 | // nopl 8(%[re]ax,%[re]ax,1) |
3972 | {0x0f, 0x1f, 0x44, 0x00, 0x08}, |
3973 | // nopw 8(%[re]ax,%[re]ax,1) |
3974 | {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08}, |
3975 | // nopl 512(%[re]ax) |
3976 | {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00}, |
3977 | // nopl 512(%[re]ax,%[re]ax,1) |
3978 | {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, |
3979 | // nopw 512(%[re]ax,%[re]ax,1) |
3980 | {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, |
3981 | // nopw %cs:512(%[re]ax,%[re]ax,1) |
3982 | {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00} |
3983 | }; |
3984 | |
3985 | uint8_t* where = reinterpret_cast<uint8_t*>(base); |
3986 | while (size) { |
3987 | unsigned nopSize = static_cast<unsigned>(std::min<size_t>(size, 15)); |
3988 | unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10; |
3989 | for (unsigned i = 0; i != numPrefixes; ++i) |
3990 | *where++ = 0x66; |
3991 | |
3992 | unsigned nopRest = nopSize - numPrefixes; |
3993 | for (unsigned i = 0; i != nopRest; ++i) |
3994 | *where++ = nops[nopRest-1][i]; |
3995 | |
3996 | size -= nopSize; |
3997 | } |
3998 | #else |
3999 | memset(base, OP_NOP, size); |
4000 | #endif |
4001 | } |
4002 | |
4003 | // This is a no-op on x86 |
4004 | ALWAYS_INLINE static void cacheFlush(void*, size_t) { } |
4005 | |
4006 | private: |
4007 | |
4008 | static void setPointer(void* where, void* value) |
4009 | { |
4010 | WTF::unalignedStore<void*>(bitwise_cast<void**>(where) - 1, value); |
4011 | } |
4012 | |
4013 | static void setInt32(void* where, int32_t value) |
4014 | { |
4015 | WTF::unalignedStore<int32_t>(bitwise_cast<int32_t*>(where) - 1, value); |
4016 | } |
4017 | |
4018 | static void setInt8(void* where, int8_t value) |
4019 | { |
4020 | WTF::unalignedStore<int8_t>(bitwise_cast<int8_t*>(where) - 1, value); |
4021 | } |
4022 | |
4023 | static void setRel32(void* from, void* to) |
4024 | { |
4025 | intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from); |
4026 | ASSERT(offset == static_cast<int32_t>(offset)); |
4027 | |
4028 | setInt32(from, offset); |
4029 | } |
4030 | |
4031 | class X86InstructionFormatter { |
4032 | static constexpr int maxInstructionSize = 16; |
4033 | |
4034 | public: |
4035 | enum ModRmMode { |
4036 | ModRmMemoryNoDisp = 0, |
4037 | ModRmMemoryDisp8 = 1 << 6, |
4038 | ModRmMemoryDisp32 = 2 << 6, |
4039 | ModRmRegister = 3 << 6, |
4040 | }; |
4041 | |
4042 | // Legacy prefix bytes: |
4043 | // |
4044 | // These are emmitted prior to the instruction. |
4045 | |
4046 | void prefix(OneByteOpcodeID pre) |
4047 | { |
4048 | m_buffer.putByte(pre); |
4049 | } |
4050 | |
4051 | #if CPU(X86_64) |
4052 | // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed). |
4053 | static bool byteRegRequiresRex(int reg) |
4054 | { |
4055 | static_assert(X86Registers::esp == 4, "Necessary condition for OR-masking" ); |
4056 | return (reg >= X86Registers::esp); |
4057 | } |
4058 | static bool byteRegRequiresRex(int a, int b) |
4059 | { |
4060 | return byteRegRequiresRex(a | b); |
4061 | } |
4062 | |
4063 | // Registers r8 & above require a REX prefixe. |
4064 | static bool regRequiresRex(int reg) |
4065 | { |
4066 | static_assert(X86Registers::r8 == 8, "Necessary condition for OR-masking" ); |
4067 | return (reg >= X86Registers::r8); |
4068 | } |
4069 | static bool regRequiresRex(int a, int b) |
4070 | { |
4071 | return regRequiresRex(a | b); |
4072 | } |
4073 | static bool regRequiresRex(int a, int b, int c) |
4074 | { |
4075 | return regRequiresRex(a | b | c); |
4076 | } |
4077 | #else |
4078 | static bool byteRegRequiresRex(int) { return false; } |
4079 | static bool byteRegRequiresRex(int, int) { return false; } |
4080 | static bool regRequiresRex(int) { return false; } |
4081 | static bool regRequiresRex(int, int) { return false; } |
4082 | static bool regRequiresRex(int, int, int) { return false; } |
4083 | #endif |
4084 | |
4085 | class SingleInstructionBufferWriter : public AssemblerBuffer::LocalWriter { |
4086 | public: |
4087 | SingleInstructionBufferWriter(AssemblerBuffer& buffer) |
4088 | : AssemblerBuffer::LocalWriter(buffer, maxInstructionSize) |
4089 | { |
4090 | } |
4091 | |
4092 | // Internals; ModRm and REX formatters. |
4093 | |
4094 | static constexpr RegisterID noBase = X86Registers::ebp; |
4095 | static constexpr RegisterID hasSib = X86Registers::esp; |
4096 | static constexpr RegisterID noIndex = X86Registers::esp; |
4097 | |
4098 | #if CPU(X86_64) |
4099 | static constexpr RegisterID noBase2 = X86Registers::r13; |
4100 | static constexpr RegisterID hasSib2 = X86Registers::r12; |
4101 | |
4102 | // Format a REX prefix byte. |
4103 | ALWAYS_INLINE void emitRex(bool w, int r, int x, int b) |
4104 | { |
4105 | ASSERT(r >= 0); |
4106 | ASSERT(x >= 0); |
4107 | ASSERT(b >= 0); |
4108 | putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3)); |
4109 | } |
4110 | |
4111 | // Used to plant a REX byte with REX.w set (for 64-bit operations). |
4112 | ALWAYS_INLINE void emitRexW(int r, int x, int b) |
4113 | { |
4114 | emitRex(true, r, x, b); |
4115 | } |
4116 | |
4117 | // Used for operations with byte operands - use byteRegRequiresRex() to check register operands, |
4118 | // regRequiresRex() to check other registers (i.e. address base & index). |
4119 | ALWAYS_INLINE void emitRexIf(bool condition, int r, int x, int b) |
4120 | { |
4121 | if (condition) |
4122 | emitRex(false, r, x, b); |
4123 | } |
4124 | |
4125 | // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above). |
4126 | ALWAYS_INLINE void emitRexIfNeeded(int r, int x, int b) |
4127 | { |
4128 | emitRexIf(regRequiresRex(r, x, b), r, x, b); |
4129 | } |
4130 | #else |
4131 | // No REX prefix bytes on 32-bit x86. |
4132 | ALWAYS_INLINE void emitRexIf(bool, int, int, int) { } |
4133 | ALWAYS_INLINE void emitRexIfNeeded(int, int, int) { } |
4134 | #endif |
4135 | |
4136 | ALWAYS_INLINE void putModRm(ModRmMode mode, int reg, RegisterID rm) |
4137 | { |
4138 | putByteUnchecked(mode | ((reg & 7) << 3) | (rm & 7)); |
4139 | } |
4140 | |
4141 | ALWAYS_INLINE void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale) |
4142 | { |
4143 | ASSERT(mode != ModRmRegister); |
4144 | |
4145 | putModRm(mode, reg, hasSib); |
4146 | putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7)); |
4147 | } |
4148 | |
4149 | ALWAYS_INLINE void registerModRM(int reg, RegisterID rm) |
4150 | { |
4151 | putModRm(ModRmRegister, reg, rm); |
4152 | } |
4153 | |
4154 | ALWAYS_INLINE void memoryModRM(int reg, RegisterID base, int offset) |
4155 | { |
4156 | // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. |
4157 | #if CPU(X86_64) |
4158 | if ((base == hasSib) || (base == hasSib2)) { |
4159 | #else |
4160 | if (base == hasSib) { |
4161 | #endif |
4162 | if (!offset) // No need to check if the base is noBase, since we know it is hasSib! |
4163 | putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0); |
4164 | else if (CAN_SIGN_EXTEND_8_32(offset)) { |
4165 | putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0); |
4166 | putByteUnchecked(offset); |
4167 | } else { |
4168 | putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0); |
4169 | putIntUnchecked(offset); |
4170 | } |
4171 | } else { |
4172 | #if CPU(X86_64) |
4173 | if (!offset && (base != noBase) && (base != noBase2)) |
4174 | #else |
4175 | if (!offset && (base != noBase)) |
4176 | #endif |
4177 | putModRm(ModRmMemoryNoDisp, reg, base); |
4178 | else if (CAN_SIGN_EXTEND_8_32(offset)) { |
4179 | putModRm(ModRmMemoryDisp8, reg, base); |
4180 | putByteUnchecked(offset); |
4181 | } else { |
4182 | putModRm(ModRmMemoryDisp32, reg, base); |
4183 | putIntUnchecked(offset); |
4184 | } |
4185 | } |
4186 | } |
4187 | |
4188 | ALWAYS_INLINE void memoryModRM_disp8(int reg, RegisterID base, int offset) |
4189 | { |
4190 | // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. |
4191 | ASSERT(CAN_SIGN_EXTEND_8_32(offset)); |
4192 | #if CPU(X86_64) |
4193 | if ((base == hasSib) || (base == hasSib2)) { |
4194 | #else |
4195 | if (base == hasSib) { |
4196 | #endif |
4197 | putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0); |
4198 | putByteUnchecked(offset); |
4199 | } else { |
4200 | putModRm(ModRmMemoryDisp8, reg, base); |
4201 | putByteUnchecked(offset); |
4202 | } |
4203 | } |
4204 | |
4205 | ALWAYS_INLINE void memoryModRM_disp32(int reg, RegisterID base, int offset) |
4206 | { |
4207 | // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. |
4208 | #if CPU(X86_64) |
4209 | if ((base == hasSib) || (base == hasSib2)) { |
4210 | #else |
4211 | if (base == hasSib) { |
4212 | #endif |
4213 | putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0); |
4214 | putIntUnchecked(offset); |
4215 | } else { |
4216 | putModRm(ModRmMemoryDisp32, reg, base); |
4217 | putIntUnchecked(offset); |
4218 | } |
4219 | } |
4220 | |
4221 | ALWAYS_INLINE void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset) |
4222 | { |
4223 | ASSERT(index != noIndex); |
4224 | |
4225 | #if CPU(X86_64) |
4226 | if (!offset && (base != noBase) && (base != noBase2)) |
4227 | #else |
4228 | if (!offset && (base != noBase)) |
4229 | #endif |
4230 | putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale); |
4231 | else if (CAN_SIGN_EXTEND_8_32(offset)) { |
4232 | putModRmSib(ModRmMemoryDisp8, reg, base, index, scale); |
4233 | putByteUnchecked(offset); |
4234 | } else { |
4235 | putModRmSib(ModRmMemoryDisp32, reg, base, index, scale); |
4236 | putIntUnchecked(offset); |
4237 | } |
4238 | } |
4239 | |
4240 | ALWAYS_INLINE void memoryModRMAddr(int reg, uint32_t address) |
4241 | { |
4242 | #if CPU(X86_64) |
4243 | putModRmSib(ModRmMemoryNoDisp, reg, noBase, noIndex, 0); |
4244 | #else |
4245 | // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32! |
4246 | putModRm(ModRmMemoryNoDisp, reg, noBase); |
4247 | #endif |
4248 | putIntUnchecked(address); |
4249 | } |
4250 | |
4251 | ALWAYS_INLINE void twoBytesVex(OneByteOpcodeID simdPrefix, RegisterID inOpReg, RegisterID r) |
4252 | { |
4253 | putByteUnchecked(VexPrefix::TwoBytes); |
4254 | |
4255 | uint8_t secondByte = vexEncodeSimdPrefix(simdPrefix); |
4256 | secondByte |= (~inOpReg & 0xf) << 3; |
4257 | secondByte |= !regRequiresRex(r) << 7; |
4258 | putByteUnchecked(secondByte); |
4259 | } |
4260 | |
4261 | ALWAYS_INLINE void threeBytesVexNds(OneByteOpcodeID simdPrefix, VexImpliedBytes impliedBytes, RegisterID r, RegisterID inOpReg, RegisterID x, RegisterID b) |
4262 | { |
4263 | putByteUnchecked(VexPrefix::ThreeBytes); |
4264 | |
4265 | uint8_t secondByte = static_cast<uint8_t>(impliedBytes); |
4266 | secondByte |= !regRequiresRex(r) << 7; |
4267 | secondByte |= !regRequiresRex(x) << 6; |
4268 | secondByte |= !regRequiresRex(b) << 5; |
4269 | putByteUnchecked(secondByte); |
4270 | |
4271 | uint8_t thirdByte = vexEncodeSimdPrefix(simdPrefix); |
4272 | thirdByte |= (~inOpReg & 0xf) << 3; |
4273 | putByteUnchecked(thirdByte); |
4274 | } |
4275 | |
4276 | ALWAYS_INLINE void threeBytesVexNds(OneByteOpcodeID simdPrefix, VexImpliedBytes impliedBytes, RegisterID r, RegisterID inOpReg, RegisterID b) |
4277 | { |
4278 | putByteUnchecked(VexPrefix::ThreeBytes); |
4279 | |
4280 | uint8_t secondByte = static_cast<uint8_t>(impliedBytes); |
4281 | secondByte |= !regRequiresRex(r) << 7; |
4282 | secondByte |= 1 << 6; // REX.X |
4283 | secondByte |= !regRequiresRex(b) << 5; |
4284 | putByteUnchecked(secondByte); |
4285 | |
4286 | uint8_t thirdByte = vexEncodeSimdPrefix(simdPrefix); |
4287 | thirdByte |= (~inOpReg & 0xf) << 3; |
4288 | putByteUnchecked(thirdByte); |
4289 | } |
4290 | private: |
4291 | uint8_t vexEncodeSimdPrefix(OneByteOpcodeID simdPrefix) |
4292 | { |
4293 | switch (simdPrefix) { |
4294 | case 0x66: |
4295 | return 1; |
4296 | case 0xF3: |
4297 | return 2; |
4298 | case 0xF2: |
4299 | return 3; |
4300 | default: |
4301 | RELEASE_ASSERT_NOT_REACHED(); |
4302 | } |
4303 | return 0; |
4304 | } |
4305 | |
4306 | }; |
4307 | |
4308 | // Word-sized operands / no operand instruction formatters. |
4309 | // |
4310 | // In addition to the opcode, the following operand permutations are supported: |
4311 | // * None - instruction takes no operands. |
4312 | // * One register - the low three bits of the RegisterID are added into the opcode. |
4313 | // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place). |
4314 | // * Three argument ModRM - a register, and a register and an offset describing a memory operand. |
4315 | // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand. |
4316 | // |
4317 | // For 32-bit x86 targets, the address operand may also be provided as a void*. |
4318 | // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used. |
4319 | // |
4320 | // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F). |
4321 | |
4322 | void oneByteOp(OneByteOpcodeID opcode) |
4323 | { |
4324 | SingleInstructionBufferWriter writer(m_buffer); |
4325 | writer.putByteUnchecked(opcode); |
4326 | } |
4327 | |
4328 | void oneByteOp(OneByteOpcodeID opcode, RegisterID reg) |
4329 | { |
4330 | SingleInstructionBufferWriter writer(m_buffer); |
4331 | writer.emitRexIfNeeded(0, 0, reg); |
4332 | writer.putByteUnchecked(opcode + (reg & 7)); |
4333 | } |
4334 | |
4335 | void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm) |
4336 | { |
4337 | SingleInstructionBufferWriter writer(m_buffer); |
4338 | writer.emitRexIfNeeded(reg, 0, rm); |
4339 | writer.putByteUnchecked(opcode); |
4340 | writer.registerModRM(reg, rm); |
4341 | } |
4342 | |
4343 | void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4344 | { |
4345 | SingleInstructionBufferWriter writer(m_buffer); |
4346 | writer.emitRexIfNeeded(reg, 0, base); |
4347 | writer.putByteUnchecked(opcode); |
4348 | writer.memoryModRM(reg, base, offset); |
4349 | } |
4350 | |
4351 | void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4352 | { |
4353 | SingleInstructionBufferWriter writer(m_buffer); |
4354 | writer.emitRexIfNeeded(reg, 0, base); |
4355 | writer.putByteUnchecked(opcode); |
4356 | writer.memoryModRM_disp32(reg, base, offset); |
4357 | } |
4358 | |
4359 | void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4360 | { |
4361 | SingleInstructionBufferWriter writer(m_buffer); |
4362 | writer.emitRexIfNeeded(reg, 0, base); |
4363 | writer.putByteUnchecked(opcode); |
4364 | writer.memoryModRM_disp8(reg, base, offset); |
4365 | } |
4366 | |
4367 | void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) |
4368 | { |
4369 | SingleInstructionBufferWriter writer(m_buffer); |
4370 | writer.emitRexIfNeeded(reg, index, base); |
4371 | writer.putByteUnchecked(opcode); |
4372 | writer.memoryModRM(reg, base, index, scale, offset); |
4373 | } |
4374 | |
4375 | void oneByteOpAddr(OneByteOpcodeID opcode, int reg, uint32_t address) |
4376 | { |
4377 | SingleInstructionBufferWriter writer(m_buffer); |
4378 | writer.putByteUnchecked(opcode); |
4379 | writer.memoryModRMAddr(reg, address); |
4380 | } |
4381 | |
4382 | void twoByteOp(TwoByteOpcodeID opcode) |
4383 | { |
4384 | SingleInstructionBufferWriter writer(m_buffer); |
4385 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4386 | writer.putByteUnchecked(opcode); |
4387 | } |
4388 | |
4389 | void twoByteOp(TwoByteOpcodeID opcode, int reg) |
4390 | { |
4391 | SingleInstructionBufferWriter writer(m_buffer); |
4392 | writer.emitRexIfNeeded(0, 0, reg); |
4393 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4394 | writer.putByteUnchecked(opcode + (reg & 7)); |
4395 | } |
4396 | |
4397 | void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm) |
4398 | { |
4399 | SingleInstructionBufferWriter writer(m_buffer); |
4400 | writer.emitRexIfNeeded(reg, 0, rm); |
4401 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4402 | writer.putByteUnchecked(opcode); |
4403 | writer.registerModRM(reg, rm); |
4404 | } |
4405 | |
4406 | void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4407 | { |
4408 | SingleInstructionBufferWriter writer(m_buffer); |
4409 | writer.emitRexIfNeeded(reg, 0, base); |
4410 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4411 | writer.putByteUnchecked(opcode); |
4412 | writer.memoryModRM(reg, base, offset); |
4413 | } |
4414 | |
4415 | void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) |
4416 | { |
4417 | SingleInstructionBufferWriter writer(m_buffer); |
4418 | writer.emitRexIfNeeded(reg, index, base); |
4419 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4420 | writer.putByteUnchecked(opcode); |
4421 | writer.memoryModRM(reg, base, index, scale, offset); |
4422 | } |
4423 | |
4424 | void twoByteOpAddr(TwoByteOpcodeID opcode, int reg, uint32_t address) |
4425 | { |
4426 | SingleInstructionBufferWriter writer(m_buffer); |
4427 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4428 | writer.putByteUnchecked(opcode); |
4429 | writer.memoryModRMAddr(reg, address); |
4430 | } |
4431 | |
4432 | void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID b) |
4433 | { |
4434 | SingleInstructionBufferWriter writer(m_buffer); |
4435 | if (regRequiresRex(b)) |
4436 | writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, b); |
4437 | else |
4438 | writer.twoBytesVex(simdPrefix, a, dest); |
4439 | writer.putByteUnchecked(opcode); |
4440 | writer.registerModRM(dest, b); |
4441 | } |
4442 | |
4443 | void vexNdsLigWigCommutativeTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID b) |
4444 | { |
4445 | // Since this is a commutative operation, we can try switching the arguments. |
4446 | if (regRequiresRex(b)) |
4447 | std::swap(a, b); |
4448 | vexNdsLigWigTwoByteOp(simdPrefix, opcode, dest, a, b); |
4449 | } |
4450 | |
4451 | void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID base, int offset) |
4452 | { |
4453 | SingleInstructionBufferWriter writer(m_buffer); |
4454 | if (regRequiresRex(base)) |
4455 | writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, base); |
4456 | else |
4457 | writer.twoBytesVex(simdPrefix, a, dest); |
4458 | writer.putByteUnchecked(opcode); |
4459 | writer.memoryModRM(dest, base, offset); |
4460 | } |
4461 | |
4462 | void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, int offset, RegisterID base, RegisterID index, int scale) |
4463 | { |
4464 | SingleInstructionBufferWriter writer(m_buffer); |
4465 | if (regRequiresRex(base, index)) |
4466 | writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, index, base); |
4467 | else |
4468 | writer.twoBytesVex(simdPrefix, a, dest); |
4469 | writer.putByteUnchecked(opcode); |
4470 | writer.memoryModRM(dest, base, index, scale, offset); |
4471 | } |
4472 | |
4473 | void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode) |
4474 | { |
4475 | SingleInstructionBufferWriter writer(m_buffer); |
4476 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4477 | writer.putByteUnchecked(twoBytePrefix); |
4478 | writer.putByteUnchecked(opcode); |
4479 | } |
4480 | |
4481 | void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID rm) |
4482 | { |
4483 | SingleInstructionBufferWriter writer(m_buffer); |
4484 | writer.emitRexIfNeeded(reg, 0, rm); |
4485 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4486 | writer.putByteUnchecked(twoBytePrefix); |
4487 | writer.putByteUnchecked(opcode); |
4488 | writer.registerModRM(reg, rm); |
4489 | } |
4490 | |
4491 | void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID base, int displacement) |
4492 | { |
4493 | SingleInstructionBufferWriter writer(m_buffer); |
4494 | writer.emitRexIfNeeded(reg, 0, base); |
4495 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4496 | writer.putByteUnchecked(twoBytePrefix); |
4497 | writer.putByteUnchecked(opcode); |
4498 | writer.memoryModRM(reg, base, displacement); |
4499 | } |
4500 | |
4501 | #if CPU(X86_64) |
4502 | // Quad-word-sized operands: |
4503 | // |
4504 | // Used to format 64-bit operantions, planting a REX.w prefix. |
4505 | // When planting d64 or f64 instructions, not requiring a REX.w prefix, |
4506 | // the normal (non-'64'-postfixed) formatters should be used. |
4507 | |
4508 | void oneByteOp64(OneByteOpcodeID opcode) |
4509 | { |
4510 | SingleInstructionBufferWriter writer(m_buffer); |
4511 | writer.emitRexW(0, 0, 0); |
4512 | writer.putByteUnchecked(opcode); |
4513 | } |
4514 | |
4515 | void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg) |
4516 | { |
4517 | SingleInstructionBufferWriter writer(m_buffer); |
4518 | writer.emitRexW(0, 0, reg); |
4519 | writer.putByteUnchecked(opcode + (reg & 7)); |
4520 | } |
4521 | |
4522 | void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm) |
4523 | { |
4524 | SingleInstructionBufferWriter writer(m_buffer); |
4525 | writer.emitRexW(reg, 0, rm); |
4526 | writer.putByteUnchecked(opcode); |
4527 | writer.registerModRM(reg, rm); |
4528 | } |
4529 | |
4530 | void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4531 | { |
4532 | SingleInstructionBufferWriter writer(m_buffer); |
4533 | writer.emitRexW(reg, 0, base); |
4534 | writer.putByteUnchecked(opcode); |
4535 | writer.memoryModRM(reg, base, offset); |
4536 | } |
4537 | |
4538 | void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4539 | { |
4540 | SingleInstructionBufferWriter writer(m_buffer); |
4541 | writer.emitRexW(reg, 0, base); |
4542 | writer.putByteUnchecked(opcode); |
4543 | writer.memoryModRM_disp32(reg, base, offset); |
4544 | } |
4545 | |
4546 | void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4547 | { |
4548 | SingleInstructionBufferWriter writer(m_buffer); |
4549 | writer.emitRexW(reg, 0, base); |
4550 | writer.putByteUnchecked(opcode); |
4551 | writer.memoryModRM_disp8(reg, base, offset); |
4552 | } |
4553 | |
4554 | void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) |
4555 | { |
4556 | SingleInstructionBufferWriter writer(m_buffer); |
4557 | writer.emitRexW(reg, index, base); |
4558 | writer.putByteUnchecked(opcode); |
4559 | writer.memoryModRM(reg, base, index, scale, offset); |
4560 | } |
4561 | |
4562 | void oneByteOp64Addr(OneByteOpcodeID opcode, int reg, uint32_t address) |
4563 | { |
4564 | SingleInstructionBufferWriter writer(m_buffer); |
4565 | writer.emitRexW(reg, 0, 0); |
4566 | writer.putByteUnchecked(opcode); |
4567 | writer.memoryModRMAddr(reg, address); |
4568 | } |
4569 | |
4570 | void twoByteOp64(TwoByteOpcodeID opcode, int reg) |
4571 | { |
4572 | SingleInstructionBufferWriter writer(m_buffer); |
4573 | writer.emitRexW(0, 0, reg); |
4574 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4575 | writer.putByteUnchecked(opcode + (reg & 7)); |
4576 | } |
4577 | |
4578 | void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm) |
4579 | { |
4580 | SingleInstructionBufferWriter writer(m_buffer); |
4581 | writer.emitRexW(reg, 0, rm); |
4582 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4583 | writer.putByteUnchecked(opcode); |
4584 | writer.registerModRM(reg, rm); |
4585 | } |
4586 | |
4587 | void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4588 | { |
4589 | SingleInstructionBufferWriter writer(m_buffer); |
4590 | writer.emitRexW(reg, 0, base); |
4591 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4592 | writer.putByteUnchecked(opcode); |
4593 | writer.memoryModRM(reg, base, offset); |
4594 | } |
4595 | |
4596 | void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) |
4597 | { |
4598 | SingleInstructionBufferWriter writer(m_buffer); |
4599 | writer.emitRexW(reg, index, base); |
4600 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4601 | writer.putByteUnchecked(opcode); |
4602 | writer.memoryModRM(reg, base, index, scale, offset); |
4603 | } |
4604 | #endif |
4605 | |
4606 | // Byte-operands: |
4607 | // |
4608 | // These methods format byte operations. Byte operations differ from the normal |
4609 | // formatters in the circumstances under which they will decide to emit REX prefixes. |
4610 | // These should be used where any register operand signifies a byte register. |
4611 | // |
4612 | // The disctinction is due to the handling of register numbers in the range 4..7 on |
4613 | // x86-64. These register numbers may either represent the second byte of the first |
4614 | // four registers (ah..bh) or the first byte of the second four registers (spl..dil). |
4615 | // |
4616 | // Since ah..bh cannot be used in all permutations of operands (specifically cannot |
4617 | // be accessed where a REX prefix is present), these are likely best treated as |
4618 | // deprecated. In order to ensure the correct registers spl..dil are selected a |
4619 | // REX prefix will be emitted for any byte register operand in the range 4..15. |
4620 | // |
4621 | // These formatters may be used in instructions where a mix of operand sizes, in which |
4622 | // case an unnecessary REX will be emitted, for example: |
4623 | // movzbl %al, %edi |
4624 | // In this case a REX will be planted since edi is 7 (and were this a byte operand |
4625 | // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will |
4626 | // be silently ignored by the processor. |
4627 | // |
4628 | // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex() |
4629 | // is provided to check byte register operands. |
4630 | |
4631 | void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm) |
4632 | { |
4633 | SingleInstructionBufferWriter writer(m_buffer); |
4634 | writer.emitRexIf(byteRegRequiresRex(rm), 0, 0, rm); |
4635 | writer.putByteUnchecked(opcode); |
4636 | writer.registerModRM(groupOp, rm); |
4637 | } |
4638 | |
4639 | void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm) |
4640 | { |
4641 | SingleInstructionBufferWriter writer(m_buffer); |
4642 | writer.emitRexIf(byteRegRequiresRex(reg, rm), reg, 0, rm); |
4643 | writer.putByteUnchecked(opcode); |
4644 | writer.registerModRM(reg, rm); |
4645 | } |
4646 | |
4647 | void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4648 | { |
4649 | SingleInstructionBufferWriter writer(m_buffer); |
4650 | writer.emitRexIf(byteRegRequiresRex(reg, base), reg, 0, base); |
4651 | writer.putByteUnchecked(opcode); |
4652 | writer.memoryModRM(reg, base, offset); |
4653 | } |
4654 | |
4655 | void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) |
4656 | { |
4657 | SingleInstructionBufferWriter writer(m_buffer); |
4658 | writer.emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index, base), reg, index, base); |
4659 | writer.putByteUnchecked(opcode); |
4660 | writer.memoryModRM(reg, base, index, scale, offset); |
4661 | } |
4662 | |
4663 | void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm) |
4664 | { |
4665 | SingleInstructionBufferWriter writer(m_buffer); |
4666 | writer.emitRexIf(byteRegRequiresRex(reg, rm), reg, 0, rm); |
4667 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4668 | writer.putByteUnchecked(opcode); |
4669 | writer.registerModRM(reg, rm); |
4670 | } |
4671 | |
4672 | void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm) |
4673 | { |
4674 | SingleInstructionBufferWriter writer(m_buffer); |
4675 | writer.emitRexIf(byteRegRequiresRex(rm), 0, 0, rm); |
4676 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4677 | writer.putByteUnchecked(opcode); |
4678 | writer.registerModRM(groupOp, rm); |
4679 | } |
4680 | |
4681 | void twoByteOp8(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset) |
4682 | { |
4683 | SingleInstructionBufferWriter writer(m_buffer); |
4684 | writer.emitRexIf(byteRegRequiresRex(reg, base), reg, 0, base); |
4685 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4686 | writer.putByteUnchecked(opcode); |
4687 | writer.memoryModRM(reg, base, offset); |
4688 | } |
4689 | |
4690 | void twoByteOp8(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) |
4691 | { |
4692 | SingleInstructionBufferWriter writer(m_buffer); |
4693 | writer.emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index, base), reg, index, base); |
4694 | writer.putByteUnchecked(OP_2BYTE_ESCAPE); |
4695 | writer.putByteUnchecked(opcode); |
4696 | writer.memoryModRM(reg, base, index, scale, offset); |
4697 | } |
4698 | |
4699 | // Immediates: |
4700 | // |
4701 | // An immedaite should be appended where appropriate after an op has been emitted. |
4702 | // The writes are unchecked since the opcode formatters above will have ensured space. |
4703 | |
4704 | void immediate8(int imm) |
4705 | { |
4706 | m_buffer.putByteUnchecked(imm); |
4707 | } |
4708 | |
4709 | void immediate16(int imm) |
4710 | { |
4711 | m_buffer.putShortUnchecked(imm); |
4712 | } |
4713 | |
4714 | void immediate32(int imm) |
4715 | { |
4716 | m_buffer.putIntUnchecked(imm); |
4717 | } |
4718 | |
4719 | void immediate64(int64_t imm) |
4720 | { |
4721 | m_buffer.putInt64Unchecked(imm); |
4722 | } |
4723 | |
4724 | AssemblerLabel immediateRel32() |
4725 | { |
4726 | m_buffer.putIntUnchecked(0); |
4727 | return label(); |
4728 | } |
4729 | |
4730 | // Administrative methods: |
4731 | |
4732 | size_t codeSize() const { return m_buffer.codeSize(); } |
4733 | AssemblerLabel label() const { return m_buffer.label(); } |
4734 | bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); } |
4735 | void* data() const { return m_buffer.data(); } |
4736 | |
4737 | unsigned debugOffset() { return m_buffer.debugOffset(); } |
4738 | |
4739 | public: |
4740 | AssemblerBuffer m_buffer; |
4741 | } m_formatter; |
4742 | int m_indexOfLastWatchpoint; |
4743 | int m_indexOfTailOfLastWatchpoint; |
4744 | }; |
4745 | |
4746 | } // namespace JSC |
4747 | |
4748 | #endif // ENABLE(ASSEMBLER) && CPU(X86) |
4749 | |