1 | /* |
2 | * Copyright (C) 2008-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #include "AbortReason.h" |
29 | #include "AssemblerBuffer.h" |
30 | #include "AssemblerCommon.h" |
31 | #include "CPU.h" |
32 | #include "CodeLocation.h" |
33 | #include "JSCJSValue.h" |
34 | #include "JSCPtrTag.h" |
35 | #include "MacroAssemblerCodeRef.h" |
36 | #include "MacroAssemblerHelpers.h" |
37 | #include "Options.h" |
38 | #include <wtf/CryptographicallyRandomNumber.h> |
39 | #include <wtf/Noncopyable.h> |
40 | #include <wtf/SharedTask.h> |
41 | #include <wtf/Vector.h> |
42 | #include <wtf/WeakRandom.h> |
43 | |
44 | namespace JSC { |
45 | |
46 | #if ENABLE(ASSEMBLER) |
47 | |
48 | class AllowMacroScratchRegisterUsage; |
49 | class DisallowMacroScratchRegisterUsage; |
50 | class LinkBuffer; |
51 | class Watchpoint; |
52 | namespace DFG { |
53 | struct OSRExit; |
54 | } |
55 | |
56 | class AbstractMacroAssemblerBase { |
57 | public: |
58 | enum StatusCondition { |
59 | Success, |
60 | Failure |
61 | }; |
62 | |
63 | static StatusCondition invert(StatusCondition condition) |
64 | { |
65 | switch (condition) { |
66 | case Success: |
67 | return Failure; |
68 | case Failure: |
69 | return Success; |
70 | } |
71 | RELEASE_ASSERT_NOT_REACHED(); |
72 | return Success; |
73 | } |
74 | }; |
75 | |
76 | template <class AssemblerType> |
77 | class AbstractMacroAssembler : public AbstractMacroAssemblerBase { |
78 | public: |
79 | typedef AbstractMacroAssembler<AssemblerType> AbstractMacroAssemblerType; |
80 | typedef AssemblerType AssemblerType_T; |
81 | |
82 | template<PtrTag tag> using CodePtr = MacroAssemblerCodePtr<tag>; |
83 | template<PtrTag tag> using CodeRef = MacroAssemblerCodeRef<tag>; |
84 | |
85 | enum class CPUIDCheckState { |
86 | NotChecked, |
87 | Clear, |
88 | Set |
89 | }; |
90 | |
91 | class Jump; |
92 | |
93 | typedef typename AssemblerType::RegisterID RegisterID; |
94 | typedef typename AssemblerType::SPRegisterID SPRegisterID; |
95 | typedef typename AssemblerType::FPRegisterID FPRegisterID; |
96 | |
97 | static constexpr RegisterID firstRegister() { return AssemblerType::firstRegister(); } |
98 | static constexpr RegisterID lastRegister() { return AssemblerType::lastRegister(); } |
99 | static constexpr unsigned numberOfRegisters() { return AssemblerType::numberOfRegisters(); } |
100 | static const char* gprName(RegisterID id) { return AssemblerType::gprName(id); } |
101 | |
102 | static constexpr SPRegisterID firstSPRegister() { return AssemblerType::firstSPRegister(); } |
103 | static constexpr SPRegisterID lastSPRegister() { return AssemblerType::lastSPRegister(); } |
104 | static constexpr unsigned numberOfSPRegisters() { return AssemblerType::numberOfSPRegisters(); } |
105 | static const char* sprName(SPRegisterID id) { return AssemblerType::sprName(id); } |
106 | |
107 | static constexpr FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); } |
108 | static constexpr FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); } |
109 | static constexpr unsigned numberOfFPRegisters() { return AssemblerType::numberOfFPRegisters(); } |
110 | static const char* fprName(FPRegisterID id) { return AssemblerType::fprName(id); } |
111 | |
112 | // Section 1: MacroAssembler operand types |
113 | // |
114 | // The following types are used as operands to MacroAssembler operations, |
115 | // describing immediate and memory operands to the instructions to be planted. |
116 | |
117 | enum Scale { |
118 | TimesOne, |
119 | TimesTwo, |
120 | TimesFour, |
121 | TimesEight, |
122 | }; |
123 | |
124 | static Scale timesPtr() |
125 | { |
126 | if (sizeof(void*) == 4) |
127 | return TimesFour; |
128 | return TimesEight; |
129 | } |
130 | |
131 | struct BaseIndex; |
132 | |
133 | static RegisterID withSwappedRegister(RegisterID original, RegisterID left, RegisterID right) |
134 | { |
135 | if (original == left) |
136 | return right; |
137 | if (original == right) |
138 | return left; |
139 | return original; |
140 | } |
141 | |
142 | // Address: |
143 | // |
144 | // Describes a simple base-offset address. |
145 | struct Address { |
146 | explicit Address(RegisterID base, int32_t offset = 0) |
147 | : base(base) |
148 | , offset(offset) |
149 | { |
150 | } |
151 | |
152 | Address withOffset(int32_t additionalOffset) |
153 | { |
154 | return Address(base, offset + additionalOffset); |
155 | } |
156 | |
157 | Address withSwappedRegister(RegisterID left, RegisterID right) |
158 | { |
159 | return Address(AbstractMacroAssembler::withSwappedRegister(base, left, right), offset); |
160 | } |
161 | |
162 | BaseIndex indexedBy(RegisterID index, Scale) const; |
163 | |
164 | RegisterID base; |
165 | int32_t offset; |
166 | }; |
167 | |
168 | struct ExtendedAddress { |
169 | explicit ExtendedAddress(RegisterID base, intptr_t offset = 0) |
170 | : base(base) |
171 | , offset(offset) |
172 | { |
173 | } |
174 | |
175 | RegisterID base; |
176 | intptr_t offset; |
177 | }; |
178 | |
179 | // ImplicitAddress: |
180 | // |
181 | // This class is used for explicit 'load' and 'store' operations |
182 | // (as opposed to situations in which a memory operand is provided |
183 | // to a generic operation, such as an integer arithmetic instruction). |
184 | // |
185 | // In the case of a load (or store) operation we want to permit |
186 | // addresses to be implicitly constructed, e.g. the two calls: |
187 | // |
188 | // load32(Address(addrReg), destReg); |
189 | // load32(addrReg, destReg); |
190 | // |
191 | // Are equivalent, and the explicit wrapping of the Address in the former |
192 | // is unnecessary. |
193 | struct ImplicitAddress { |
194 | ImplicitAddress(RegisterID base) |
195 | : base(base) |
196 | , offset(0) |
197 | { |
198 | } |
199 | |
200 | ImplicitAddress(Address address) |
201 | : base(address.base) |
202 | , offset(address.offset) |
203 | { |
204 | } |
205 | |
206 | RegisterID base; |
207 | int32_t offset; |
208 | }; |
209 | |
210 | // BaseIndex: |
211 | // |
212 | // Describes a complex addressing mode. |
213 | struct BaseIndex { |
214 | BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0) |
215 | : base(base) |
216 | , index(index) |
217 | , scale(scale) |
218 | , offset(offset) |
219 | { |
220 | } |
221 | |
222 | RegisterID base; |
223 | RegisterID index; |
224 | Scale scale; |
225 | int32_t offset; |
226 | |
227 | BaseIndex withOffset(int32_t additionalOffset) |
228 | { |
229 | return BaseIndex(base, index, scale, offset + additionalOffset); |
230 | } |
231 | |
232 | BaseIndex withSwappedRegister(RegisterID left, RegisterID right) |
233 | { |
234 | return BaseIndex(AbstractMacroAssembler::withSwappedRegister(base, left, right), AbstractMacroAssembler::withSwappedRegister(index, left, right), scale, offset); |
235 | } |
236 | }; |
237 | |
238 | // AbsoluteAddress: |
239 | // |
240 | // Describes an memory operand given by a pointer. For regular load & store |
241 | // operations an unwrapped void* will be used, rather than using this. |
242 | struct AbsoluteAddress { |
243 | explicit AbsoluteAddress(const void* ptr) |
244 | : m_ptr(ptr) |
245 | { |
246 | } |
247 | |
248 | const void* m_ptr; |
249 | }; |
250 | |
251 | // TrustedImm: |
252 | // |
253 | // An empty super class of each of the TrustedImm types. This class is used for template overloads |
254 | // on a TrustedImm type via std::is_base_of. |
255 | struct TrustedImm { }; |
256 | |
257 | // TrustedImmPtr: |
258 | // |
259 | // A pointer sized immediate operand to an instruction - this is wrapped |
260 | // in a class requiring explicit construction in order to differentiate |
261 | // from pointers used as absolute addresses to memory operations |
262 | struct TrustedImmPtr : public TrustedImm { |
263 | TrustedImmPtr() { } |
264 | |
265 | explicit TrustedImmPtr(const void* value) |
266 | : m_value(value) |
267 | { |
268 | } |
269 | |
270 | template<typename ReturnType, typename... Arguments> |
271 | explicit TrustedImmPtr(ReturnType(*value)(Arguments...)) |
272 | : m_value(reinterpret_cast<void*>(value)) |
273 | { |
274 | } |
275 | |
276 | explicit TrustedImmPtr(std::nullptr_t) |
277 | { |
278 | } |
279 | |
280 | explicit TrustedImmPtr(size_t value) |
281 | : m_value(reinterpret_cast<void*>(value)) |
282 | { |
283 | } |
284 | |
285 | intptr_t asIntptr() |
286 | { |
287 | return reinterpret_cast<intptr_t>(m_value); |
288 | } |
289 | |
290 | void* asPtr() |
291 | { |
292 | return const_cast<void*>(m_value); |
293 | } |
294 | |
295 | const void* m_value { 0 }; |
296 | }; |
297 | |
298 | struct ImmPtr : private TrustedImmPtr |
299 | { |
300 | explicit ImmPtr(const void* value) |
301 | : TrustedImmPtr(value) |
302 | { |
303 | } |
304 | |
305 | TrustedImmPtr asTrustedImmPtr() { return *this; } |
306 | }; |
307 | |
308 | // TrustedImm32: |
309 | // |
310 | // A 32bit immediate operand to an instruction - this is wrapped in a |
311 | // class requiring explicit construction in order to prevent RegisterIDs |
312 | // (which are implemented as an enum) from accidentally being passed as |
313 | // immediate values. |
314 | struct TrustedImm32 : public TrustedImm { |
315 | TrustedImm32() { } |
316 | |
317 | explicit TrustedImm32(int32_t value) |
318 | : m_value(value) |
319 | { |
320 | } |
321 | |
322 | #if !CPU(X86_64) |
323 | explicit TrustedImm32(TrustedImmPtr ptr) |
324 | : m_value(ptr.asIntptr()) |
325 | { |
326 | } |
327 | #endif |
328 | |
329 | int32_t m_value; |
330 | }; |
331 | |
332 | |
333 | struct Imm32 : private TrustedImm32 { |
334 | explicit Imm32(int32_t value) |
335 | : TrustedImm32(value) |
336 | { |
337 | } |
338 | #if !CPU(X86_64) |
339 | explicit Imm32(TrustedImmPtr ptr) |
340 | : TrustedImm32(ptr) |
341 | { |
342 | } |
343 | #endif |
344 | const TrustedImm32& asTrustedImm32() const { return *this; } |
345 | |
346 | }; |
347 | |
348 | // TrustedImm64: |
349 | // |
350 | // A 64bit immediate operand to an instruction - this is wrapped in a |
351 | // class requiring explicit construction in order to prevent RegisterIDs |
352 | // (which are implemented as an enum) from accidentally being passed as |
353 | // immediate values. |
354 | struct TrustedImm64 : TrustedImm { |
355 | TrustedImm64() { } |
356 | |
357 | explicit TrustedImm64(int64_t value) |
358 | : m_value(value) |
359 | { |
360 | } |
361 | |
362 | #if CPU(X86_64) || CPU(ARM64) |
363 | explicit TrustedImm64(TrustedImmPtr ptr) |
364 | : m_value(ptr.asIntptr()) |
365 | { |
366 | } |
367 | #endif |
368 | |
369 | int64_t m_value; |
370 | }; |
371 | |
372 | struct Imm64 : private TrustedImm64 |
373 | { |
374 | explicit Imm64(int64_t value) |
375 | : TrustedImm64(value) |
376 | { |
377 | } |
378 | #if CPU(X86_64) || CPU(ARM64) |
379 | explicit Imm64(TrustedImmPtr ptr) |
380 | : TrustedImm64(ptr) |
381 | { |
382 | } |
383 | #endif |
384 | const TrustedImm64& asTrustedImm64() const { return *this; } |
385 | }; |
386 | |
387 | // Section 2: MacroAssembler code buffer handles |
388 | // |
389 | // The following types are used to reference items in the code buffer |
390 | // during JIT code generation. For example, the type Jump is used to |
391 | // track the location of a jump instruction so that it may later be |
392 | // linked to a label marking its destination. |
393 | |
394 | |
395 | // Label: |
396 | // |
397 | // A Label records a point in the generated instruction stream, typically such that |
398 | // it may be used as a destination for a jump. |
399 | class Label { |
400 | friend class AbstractMacroAssembler<AssemblerType>; |
401 | friend struct DFG::OSRExit; |
402 | friend class Jump; |
403 | template<PtrTag> friend class MacroAssemblerCodeRef; |
404 | friend class LinkBuffer; |
405 | friend class Watchpoint; |
406 | |
407 | public: |
408 | Label() |
409 | { |
410 | } |
411 | |
412 | Label(AbstractMacroAssemblerType* masm) |
413 | : m_label(masm->m_assembler.label()) |
414 | { |
415 | masm->invalidateAllTempRegisters(); |
416 | } |
417 | |
418 | bool operator==(const Label& other) const { return m_label == other.m_label; } |
419 | |
420 | bool isSet() const { return m_label.isSet(); } |
421 | private: |
422 | AssemblerLabel m_label; |
423 | }; |
424 | |
425 | // ConvertibleLoadLabel: |
426 | // |
427 | // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr |
428 | // so that: |
429 | // |
430 | // loadPtr(Address(a, i), b) |
431 | // |
432 | // becomes: |
433 | // |
434 | // addPtr(TrustedImmPtr(i), a, b) |
435 | class ConvertibleLoadLabel { |
436 | friend class AbstractMacroAssembler<AssemblerType>; |
437 | friend class LinkBuffer; |
438 | |
439 | public: |
440 | ConvertibleLoadLabel() |
441 | { |
442 | } |
443 | |
444 | ConvertibleLoadLabel(AbstractMacroAssemblerType* masm) |
445 | : m_label(masm->m_assembler.labelIgnoringWatchpoints()) |
446 | { |
447 | } |
448 | |
449 | bool isSet() const { return m_label.isSet(); } |
450 | private: |
451 | AssemblerLabel m_label; |
452 | }; |
453 | |
454 | // DataLabelPtr: |
455 | // |
456 | // A DataLabelPtr is used to refer to a location in the code containing a pointer to be |
457 | // patched after the code has been generated. |
458 | class DataLabelPtr { |
459 | friend class AbstractMacroAssembler<AssemblerType>; |
460 | friend class LinkBuffer; |
461 | public: |
462 | DataLabelPtr() |
463 | { |
464 | } |
465 | |
466 | DataLabelPtr(AbstractMacroAssemblerType* masm) |
467 | : m_label(masm->m_assembler.label()) |
468 | { |
469 | } |
470 | |
471 | bool isSet() const { return m_label.isSet(); } |
472 | |
473 | private: |
474 | AssemblerLabel m_label; |
475 | }; |
476 | |
477 | // DataLabel32: |
478 | // |
479 | // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be |
480 | // patched after the code has been generated. |
481 | class DataLabel32 { |
482 | friend class AbstractMacroAssembler<AssemblerType>; |
483 | friend class LinkBuffer; |
484 | public: |
485 | DataLabel32() |
486 | { |
487 | } |
488 | |
489 | DataLabel32(AbstractMacroAssemblerType* masm) |
490 | : m_label(masm->m_assembler.label()) |
491 | { |
492 | } |
493 | |
494 | AssemblerLabel label() const { return m_label; } |
495 | |
496 | private: |
497 | AssemblerLabel m_label; |
498 | }; |
499 | |
500 | // DataLabelCompact: |
501 | // |
502 | // A DataLabelCompact is used to refer to a location in the code containing a |
503 | // compact immediate to be patched after the code has been generated. |
504 | class DataLabelCompact { |
505 | friend class AbstractMacroAssembler<AssemblerType>; |
506 | friend class LinkBuffer; |
507 | public: |
508 | DataLabelCompact() |
509 | { |
510 | } |
511 | |
512 | DataLabelCompact(AbstractMacroAssemblerType* masm) |
513 | : m_label(masm->m_assembler.label()) |
514 | { |
515 | } |
516 | |
517 | DataLabelCompact(AssemblerLabel label) |
518 | : m_label(label) |
519 | { |
520 | } |
521 | |
522 | AssemblerLabel label() const { return m_label; } |
523 | |
524 | private: |
525 | AssemblerLabel m_label; |
526 | }; |
527 | |
528 | // Call: |
529 | // |
530 | // A Call object is a reference to a call instruction that has been planted |
531 | // into the code buffer - it is typically used to link the call, setting the |
532 | // relative offset such that when executed it will call to the desired |
533 | // destination. |
534 | class Call { |
535 | friend class AbstractMacroAssembler<AssemblerType>; |
536 | |
537 | public: |
538 | enum Flags { |
539 | None = 0x0, |
540 | Linkable = 0x1, |
541 | Near = 0x2, |
542 | Tail = 0x4, |
543 | LinkableNear = 0x3, |
544 | LinkableNearTail = 0x7, |
545 | }; |
546 | |
547 | Call() |
548 | : m_flags(None) |
549 | { |
550 | } |
551 | |
552 | Call(AssemblerLabel jmp, Flags flags) |
553 | : m_label(jmp) |
554 | , m_flags(flags) |
555 | { |
556 | } |
557 | |
558 | bool isFlagSet(Flags flag) |
559 | { |
560 | return m_flags & flag; |
561 | } |
562 | |
563 | static Call fromTailJump(Jump jump) |
564 | { |
565 | return Call(jump.m_label, Linkable); |
566 | } |
567 | |
568 | AssemblerLabel m_label; |
569 | private: |
570 | Flags m_flags; |
571 | }; |
572 | |
573 | // Jump: |
574 | // |
575 | // A jump object is a reference to a jump instruction that has been planted |
576 | // into the code buffer - it is typically used to link the jump, setting the |
577 | // relative offset such that when executed it will jump to the desired |
578 | // destination. |
579 | class Jump { |
580 | friend class AbstractMacroAssembler<AssemblerType>; |
581 | friend class Call; |
582 | friend struct DFG::OSRExit; |
583 | friend class LinkBuffer; |
584 | public: |
585 | Jump() |
586 | { |
587 | } |
588 | |
589 | #if CPU(ARM_THUMB2) |
590 | // Fixme: this information should be stored in the instruction stream, not in the Jump object. |
591 | Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid) |
592 | : m_label(jmp) |
593 | , m_type(type) |
594 | , m_condition(condition) |
595 | { |
596 | } |
597 | #elif CPU(ARM64) |
598 | Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid) |
599 | : m_label(jmp) |
600 | , m_type(type) |
601 | , m_condition(condition) |
602 | { |
603 | } |
604 | |
605 | Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister) |
606 | : m_label(jmp) |
607 | , m_type(type) |
608 | , m_condition(condition) |
609 | , m_is64Bit(is64Bit) |
610 | , m_compareRegister(compareRegister) |
611 | { |
612 | ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize)); |
613 | } |
614 | |
615 | Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister) |
616 | : m_label(jmp) |
617 | , m_type(type) |
618 | , m_condition(condition) |
619 | , m_bitNumber(bitNumber) |
620 | , m_compareRegister(compareRegister) |
621 | { |
622 | ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); |
623 | } |
624 | #else |
625 | Jump(AssemblerLabel jmp) |
626 | : m_label(jmp) |
627 | { |
628 | } |
629 | #endif |
630 | |
631 | Label label() const |
632 | { |
633 | Label result; |
634 | result.m_label = m_label; |
635 | return result; |
636 | } |
637 | |
638 | void link(AbstractMacroAssemblerType* masm) const |
639 | { |
640 | masm->invalidateAllTempRegisters(); |
641 | |
642 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
643 | masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset()); |
644 | #endif |
645 | |
646 | #if CPU(ARM_THUMB2) |
647 | masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); |
648 | #elif CPU(ARM64) |
649 | if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) |
650 | masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister); |
651 | else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) |
652 | masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister); |
653 | else |
654 | masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); |
655 | #else |
656 | masm->m_assembler.linkJump(m_label, masm->m_assembler.label()); |
657 | #endif |
658 | } |
659 | |
660 | void linkTo(Label label, AbstractMacroAssemblerType* masm) const |
661 | { |
662 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
663 | masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset); |
664 | #endif |
665 | |
666 | #if CPU(ARM_THUMB2) |
667 | masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); |
668 | #elif CPU(ARM64) |
669 | if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) |
670 | masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister); |
671 | else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) |
672 | masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister); |
673 | else |
674 | masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); |
675 | #else |
676 | masm->m_assembler.linkJump(m_label, label.m_label); |
677 | #endif |
678 | } |
679 | |
680 | bool isSet() const { return m_label.isSet(); } |
681 | |
682 | private: |
683 | AssemblerLabel m_label; |
684 | #if CPU(ARM_THUMB2) |
685 | ARMv7Assembler::JumpType m_type; |
686 | ARMv7Assembler::Condition m_condition; |
687 | #elif CPU(ARM64) |
688 | ARM64Assembler::JumpType m_type; |
689 | ARM64Assembler::Condition m_condition; |
690 | bool m_is64Bit; |
691 | unsigned m_bitNumber; |
692 | ARM64Assembler::RegisterID m_compareRegister; |
693 | #endif |
694 | }; |
695 | |
696 | struct PatchableJump { |
697 | PatchableJump() |
698 | { |
699 | } |
700 | |
701 | explicit PatchableJump(Jump jump) |
702 | : m_jump(jump) |
703 | { |
704 | } |
705 | |
706 | operator Jump&() { return m_jump; } |
707 | |
708 | Jump m_jump; |
709 | }; |
710 | |
711 | // JumpList: |
712 | // |
713 | // A JumpList is a set of Jump objects. |
714 | // All jumps in the set will be linked to the same destination. |
715 | class JumpList { |
716 | public: |
717 | typedef Vector<Jump, 2> JumpVector; |
718 | |
719 | JumpList() { } |
720 | |
721 | JumpList(Jump jump) |
722 | { |
723 | if (jump.isSet()) |
724 | append(jump); |
725 | } |
726 | |
727 | void link(AbstractMacroAssemblerType* masm) const |
728 | { |
729 | size_t size = m_jumps.size(); |
730 | for (size_t i = 0; i < size; ++i) |
731 | m_jumps[i].link(masm); |
732 | } |
733 | |
734 | void linkTo(Label label, AbstractMacroAssemblerType* masm) const |
735 | { |
736 | size_t size = m_jumps.size(); |
737 | for (size_t i = 0; i < size; ++i) |
738 | m_jumps[i].linkTo(label, masm); |
739 | } |
740 | |
741 | void append(Jump jump) |
742 | { |
743 | if (jump.isSet()) |
744 | m_jumps.append(jump); |
745 | } |
746 | |
747 | void append(const JumpList& other) |
748 | { |
749 | m_jumps.append(other.m_jumps.begin(), other.m_jumps.size()); |
750 | } |
751 | |
752 | bool empty() const |
753 | { |
754 | return !m_jumps.size(); |
755 | } |
756 | |
757 | void clear() |
758 | { |
759 | m_jumps.clear(); |
760 | } |
761 | |
762 | const JumpVector& jumps() const { return m_jumps; } |
763 | |
764 | private: |
765 | JumpVector m_jumps; |
766 | }; |
767 | |
768 | |
769 | // Section 3: Misc admin methods |
770 | #if ENABLE(DFG_JIT) |
771 | Label labelIgnoringWatchpoints() |
772 | { |
773 | Label result; |
774 | result.m_label = m_assembler.labelIgnoringWatchpoints(); |
775 | return result; |
776 | } |
777 | #else |
778 | Label labelIgnoringWatchpoints() |
779 | { |
780 | return label(); |
781 | } |
782 | #endif |
783 | |
784 | Label label() |
785 | { |
786 | return Label(this); |
787 | } |
788 | |
789 | void padBeforePatch() |
790 | { |
791 | // Rely on the fact that asking for a label already does the padding. |
792 | (void)label(); |
793 | } |
794 | |
795 | Label watchpointLabel() |
796 | { |
797 | Label result; |
798 | result.m_label = m_assembler.labelForWatchpoint(); |
799 | return result; |
800 | } |
801 | |
802 | Label align() |
803 | { |
804 | m_assembler.align(16); |
805 | return Label(this); |
806 | } |
807 | |
808 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
809 | class RegisterAllocationOffset { |
810 | public: |
811 | RegisterAllocationOffset(unsigned offset) |
812 | : m_offset(offset) |
813 | { |
814 | } |
815 | |
816 | void checkOffsets(unsigned low, unsigned high) |
817 | { |
818 | RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u" , m_offset, low, high); |
819 | } |
820 | |
821 | private: |
822 | unsigned m_offset; |
823 | }; |
824 | |
825 | void addRegisterAllocationAtOffset(unsigned offset) |
826 | { |
827 | m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset)); |
828 | } |
829 | |
830 | void clearRegisterAllocationOffsets() |
831 | { |
832 | m_registerAllocationForOffsets.clear(); |
833 | } |
834 | |
835 | void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2) |
836 | { |
837 | if (offset1 > offset2) |
838 | std::swap(offset1, offset2); |
839 | |
840 | size_t size = m_registerAllocationForOffsets.size(); |
841 | for (size_t i = 0; i < size; ++i) |
842 | m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2); |
843 | } |
844 | #endif |
845 | |
846 | template<typename T, typename U> |
847 | static ptrdiff_t differenceBetween(T from, U to) |
848 | { |
849 | return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); |
850 | } |
851 | |
852 | template<PtrTag aTag, PtrTag bTag> |
853 | static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr<aTag>& a, const MacroAssemblerCodePtr<bTag>& b) |
854 | { |
855 | return b.template dataLocation<ptrdiff_t>() - a.template dataLocation<ptrdiff_t>(); |
856 | } |
857 | |
858 | unsigned debugOffset() { return m_assembler.debugOffset(); } |
859 | |
860 | ALWAYS_INLINE static void cacheFlush(void* code, size_t size) |
861 | { |
862 | AssemblerType::cacheFlush(code, size); |
863 | } |
864 | |
865 | template<PtrTag tag> |
866 | static void linkJump(void* code, Jump jump, CodeLocationLabel<tag> target) |
867 | { |
868 | AssemblerType::linkJump(code, jump.m_label, target.dataLocation()); |
869 | } |
870 | |
871 | static void linkPointer(void* code, AssemblerLabel label, void* value) |
872 | { |
873 | AssemblerType::linkPointer(code, label, value); |
874 | } |
875 | |
876 | template<PtrTag tag> |
877 | static void linkPointer(void* code, AssemblerLabel label, MacroAssemblerCodePtr<tag> value) |
878 | { |
879 | AssemblerType::linkPointer(code, label, value.executableAddress()); |
880 | } |
881 | |
882 | template<PtrTag tag> |
883 | static void* getLinkerAddress(void* code, AssemblerLabel label) |
884 | { |
885 | return tagCodePtr(AssemblerType::getRelocatedAddress(code, label), tag); |
886 | } |
887 | |
888 | static unsigned getLinkerCallReturnOffset(Call call) |
889 | { |
890 | return AssemblerType::getCallReturnOffset(call.m_label); |
891 | } |
892 | |
893 | template<PtrTag jumpTag, PtrTag destTag> |
894 | static void repatchJump(CodeLocationJump<jumpTag> jump, CodeLocationLabel<destTag> destination) |
895 | { |
896 | AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation()); |
897 | } |
898 | |
899 | template<PtrTag jumpTag> |
900 | static void repatchJumpToNop(CodeLocationJump<jumpTag> jump) |
901 | { |
902 | AssemblerType::relinkJumpToNop(jump.dataLocation()); |
903 | } |
904 | |
905 | template<PtrTag callTag, PtrTag destTag> |
906 | static void repatchNearCall(CodeLocationNearCall<callTag> nearCall, CodeLocationLabel<destTag> destination) |
907 | { |
908 | switch (nearCall.callMode()) { |
909 | case NearCallMode::Tail: |
910 | AssemblerType::relinkJump(nearCall.dataLocation(), destination.dataLocation()); |
911 | return; |
912 | case NearCallMode::Regular: |
913 | AssemblerType::relinkCall(nearCall.dataLocation(), destination.untaggedExecutableAddress()); |
914 | return; |
915 | } |
916 | RELEASE_ASSERT_NOT_REACHED(); |
917 | } |
918 | |
919 | template<PtrTag tag> |
920 | static void repatchCompact(CodeLocationDataLabelCompact<tag> dataLabelCompact, int32_t value) |
921 | { |
922 | AssemblerType::repatchCompact(dataLabelCompact.template dataLocation(), value); |
923 | } |
924 | |
925 | template<PtrTag tag> |
926 | static void repatchInt32(CodeLocationDataLabel32<tag> dataLabel32, int32_t value) |
927 | { |
928 | AssemblerType::repatchInt32(dataLabel32.dataLocation(), value); |
929 | } |
930 | |
931 | template<PtrTag tag> |
932 | static void repatchPointer(CodeLocationDataLabelPtr<tag> dataLabelPtr, void* value) |
933 | { |
934 | AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value); |
935 | } |
936 | |
937 | template<PtrTag tag> |
938 | static void* readPointer(CodeLocationDataLabelPtr<tag> dataLabelPtr) |
939 | { |
940 | return AssemblerType::readPointer(dataLabelPtr.dataLocation()); |
941 | } |
942 | |
943 | template<PtrTag tag> |
944 | static void replaceWithLoad(CodeLocationConvertibleLoad<tag> label) |
945 | { |
946 | AssemblerType::replaceWithLoad(label.dataLocation()); |
947 | } |
948 | |
949 | template<PtrTag tag> |
950 | static void replaceWithAddressComputation(CodeLocationConvertibleLoad<tag> label) |
951 | { |
952 | AssemblerType::replaceWithAddressComputation(label.dataLocation()); |
953 | } |
954 | |
955 | template<typename Functor> |
956 | void addLinkTask(const Functor& functor) |
957 | { |
958 | m_linkTasks.append(createSharedTask<void(LinkBuffer&)>(functor)); |
959 | } |
960 | |
961 | void emitNops(size_t memoryToFillWithNopsInBytes) |
962 | { |
963 | #if CPU(ARM64) |
964 | RELEASE_ASSERT(memoryToFillWithNopsInBytes % 4 == 0); |
965 | for (unsigned i = 0; i < memoryToFillWithNopsInBytes / 4; ++i) |
966 | m_assembler.nop(); |
967 | #else |
968 | AssemblerBuffer& buffer = m_assembler.buffer(); |
969 | size_t startCodeSize = buffer.codeSize(); |
970 | size_t targetCodeSize = startCodeSize + memoryToFillWithNopsInBytes; |
971 | buffer.ensureSpace(memoryToFillWithNopsInBytes); |
972 | AssemblerType::fillNops(static_cast<char*>(buffer.data()) + startCodeSize, memoryToFillWithNopsInBytes, memcpy); |
973 | buffer.setCodeSize(targetCodeSize); |
974 | #endif |
975 | } |
976 | |
977 | ALWAYS_INLINE void tagReturnAddress() { } |
978 | ALWAYS_INLINE void untagReturnAddress() { } |
979 | |
980 | ALWAYS_INLINE void tagPtr(PtrTag, RegisterID) { } |
981 | ALWAYS_INLINE void tagPtr(RegisterID, RegisterID) { } |
982 | ALWAYS_INLINE void untagPtr(PtrTag, RegisterID) { } |
983 | ALWAYS_INLINE void untagPtr(RegisterID, RegisterID) { } |
984 | ALWAYS_INLINE void removePtrTag(RegisterID) { } |
985 | |
986 | protected: |
987 | AbstractMacroAssembler() |
988 | : m_randomSource(0) |
989 | , m_assembler() |
990 | { |
991 | invalidateAllTempRegisters(); |
992 | } |
993 | |
994 | uint32_t random() |
995 | { |
996 | if (!m_randomSourceIsInitialized) { |
997 | m_randomSourceIsInitialized = true; |
998 | m_randomSource.setSeed(cryptographicallyRandomNumber()); |
999 | } |
1000 | return m_randomSource.getUint32(); |
1001 | } |
1002 | |
1003 | bool m_randomSourceIsInitialized { false }; |
1004 | WeakRandom m_randomSource; |
1005 | public: |
1006 | AssemblerType m_assembler; |
1007 | protected: |
1008 | |
1009 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
1010 | Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets; |
1011 | #endif |
1012 | |
1013 | static bool haveScratchRegisterForBlinding() |
1014 | { |
1015 | return false; |
1016 | } |
1017 | static RegisterID scratchRegisterForBlinding() |
1018 | { |
1019 | UNREACHABLE_FOR_PLATFORM(); |
1020 | return firstRegister(); |
1021 | } |
1022 | static bool canBlind() { return false; } |
1023 | static bool shouldBlindForSpecificArch(uint32_t) { return false; } |
1024 | static bool shouldBlindForSpecificArch(uint64_t) { return false; } |
1025 | |
1026 | class CachedTempRegister { |
1027 | friend class DataLabelPtr; |
1028 | friend class DataLabel32; |
1029 | friend class DataLabelCompact; |
1030 | friend class Jump; |
1031 | friend class Label; |
1032 | |
1033 | public: |
1034 | CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID) |
1035 | : m_masm(masm) |
1036 | , m_registerID(registerID) |
1037 | , m_value(0) |
1038 | , m_validBit(1 << static_cast<unsigned>(registerID)) |
1039 | { |
1040 | ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8)); |
1041 | } |
1042 | |
1043 | ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; } |
1044 | |
1045 | ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; } |
1046 | |
1047 | bool value(intptr_t& value) |
1048 | { |
1049 | value = m_value; |
1050 | return m_masm->isTempRegisterValid(m_validBit); |
1051 | } |
1052 | |
1053 | void setValue(intptr_t value) |
1054 | { |
1055 | m_value = value; |
1056 | m_masm->setTempRegisterValid(m_validBit); |
1057 | } |
1058 | |
1059 | ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); } |
1060 | |
1061 | private: |
1062 | AbstractMacroAssemblerType* m_masm; |
1063 | RegisterID m_registerID; |
1064 | intptr_t m_value; |
1065 | unsigned m_validBit; |
1066 | }; |
1067 | |
1068 | ALWAYS_INLINE void invalidateAllTempRegisters() |
1069 | { |
1070 | m_tempRegistersValidBits = 0; |
1071 | } |
1072 | |
1073 | ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask) |
1074 | { |
1075 | return (m_tempRegistersValidBits & registerMask); |
1076 | } |
1077 | |
1078 | ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask) |
1079 | { |
1080 | m_tempRegistersValidBits &= ~registerMask; |
1081 | } |
1082 | |
1083 | ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask) |
1084 | { |
1085 | m_tempRegistersValidBits |= registerMask; |
1086 | } |
1087 | |
1088 | friend class AllowMacroScratchRegisterUsage; |
1089 | friend class AllowMacroScratchRegisterUsageIf; |
1090 | friend class DisallowMacroScratchRegisterUsage; |
1091 | unsigned m_tempRegistersValidBits; |
1092 | bool m_allowScratchRegister { true }; |
1093 | |
1094 | Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks; |
1095 | |
1096 | friend class LinkBuffer; |
1097 | }; // class AbstractMacroAssembler |
1098 | |
1099 | template <class AssemblerType> |
1100 | inline typename AbstractMacroAssembler<AssemblerType>::BaseIndex |
1101 | AbstractMacroAssembler<AssemblerType>::Address::indexedBy( |
1102 | typename AbstractMacroAssembler<AssemblerType>::RegisterID index, |
1103 | typename AbstractMacroAssembler<AssemblerType>::Scale scale) const |
1104 | { |
1105 | return BaseIndex(base, index, scale, offset); |
1106 | } |
1107 | |
1108 | #endif // ENABLE(ASSEMBLER) |
1109 | |
1110 | } // namespace JSC |
1111 | |
1112 | #if ENABLE(ASSEMBLER) |
1113 | |
1114 | namespace WTF { |
1115 | |
1116 | class PrintStream; |
1117 | |
1118 | void printInternal(PrintStream& out, JSC::AbstractMacroAssemblerBase::StatusCondition); |
1119 | |
1120 | } // namespace WTF |
1121 | |
1122 | #endif // ENABLE(ASSEMBLER) |
1123 | |
1124 | |