1/*
2 * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "AbortReason.h"
29#include "AssemblerBuffer.h"
30#include "AssemblerCommon.h"
31#include "CPU.h"
32#include "CodeLocation.h"
33#include "JSCJSValue.h"
34#include "JSCPtrTag.h"
35#include "MacroAssemblerCodeRef.h"
36#include "MacroAssemblerHelpers.h"
37#include "Options.h"
38#include <wtf/CryptographicallyRandomNumber.h>
39#include <wtf/Noncopyable.h>
40#include <wtf/SharedTask.h>
41#include <wtf/Vector.h>
42#include <wtf/WeakRandom.h>
43
44namespace JSC {
45
46#if ENABLE(ASSEMBLER)
47
48class AllowMacroScratchRegisterUsage;
49class DisallowMacroScratchRegisterUsage;
50class LinkBuffer;
51class Watchpoint;
52namespace DFG {
53struct OSRExit;
54}
55
56class AbstractMacroAssemblerBase {
57 WTF_MAKE_FAST_ALLOCATED;
58public:
59 enum StatusCondition {
60 Success,
61 Failure
62 };
63
64 static StatusCondition invert(StatusCondition condition)
65 {
66 switch (condition) {
67 case Success:
68 return Failure;
69 case Failure:
70 return Success;
71 }
72 RELEASE_ASSERT_NOT_REACHED();
73 return Success;
74 }
75};
76
77template <class AssemblerType>
78class AbstractMacroAssembler : public AbstractMacroAssemblerBase {
79public:
80 typedef AbstractMacroAssembler<AssemblerType> AbstractMacroAssemblerType;
81 typedef AssemblerType AssemblerType_T;
82
83 template<PtrTag tag> using CodePtr = MacroAssemblerCodePtr<tag>;
84 template<PtrTag tag> using CodeRef = MacroAssemblerCodeRef<tag>;
85
86 enum class CPUIDCheckState {
87 NotChecked,
88 Clear,
89 Set
90 };
91
92 class Jump;
93
94 typedef typename AssemblerType::RegisterID RegisterID;
95 typedef typename AssemblerType::SPRegisterID SPRegisterID;
96 typedef typename AssemblerType::FPRegisterID FPRegisterID;
97
98 static constexpr RegisterID firstRegister() { return AssemblerType::firstRegister(); }
99 static constexpr RegisterID lastRegister() { return AssemblerType::lastRegister(); }
100 static constexpr unsigned numberOfRegisters() { return AssemblerType::numberOfRegisters(); }
101 static const char* gprName(RegisterID id) { return AssemblerType::gprName(id); }
102
103 static constexpr SPRegisterID firstSPRegister() { return AssemblerType::firstSPRegister(); }
104 static constexpr SPRegisterID lastSPRegister() { return AssemblerType::lastSPRegister(); }
105 static constexpr unsigned numberOfSPRegisters() { return AssemblerType::numberOfSPRegisters(); }
106 static const char* sprName(SPRegisterID id) { return AssemblerType::sprName(id); }
107
108 static constexpr FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); }
109 static constexpr FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); }
110 static constexpr unsigned numberOfFPRegisters() { return AssemblerType::numberOfFPRegisters(); }
111 static const char* fprName(FPRegisterID id) { return AssemblerType::fprName(id); }
112
113 // Section 1: MacroAssembler operand types
114 //
115 // The following types are used as operands to MacroAssembler operations,
116 // describing immediate and memory operands to the instructions to be planted.
117
118 enum Scale {
119 TimesOne,
120 TimesTwo,
121 TimesFour,
122 TimesEight,
123 };
124
125 static Scale timesPtr()
126 {
127 if (sizeof(void*) == 4)
128 return TimesFour;
129 return TimesEight;
130 }
131
132 struct BaseIndex;
133
134 static RegisterID withSwappedRegister(RegisterID original, RegisterID left, RegisterID right)
135 {
136 if (original == left)
137 return right;
138 if (original == right)
139 return left;
140 return original;
141 }
142
143 // Address:
144 //
145 // Describes a simple base-offset address.
146 struct Address {
147 explicit Address(RegisterID base, int32_t offset = 0)
148 : base(base)
149 , offset(offset)
150 {
151 }
152
153 Address withOffset(int32_t additionalOffset)
154 {
155 return Address(base, offset + additionalOffset);
156 }
157
158 Address withSwappedRegister(RegisterID left, RegisterID right)
159 {
160 return Address(AbstractMacroAssembler::withSwappedRegister(base, left, right), offset);
161 }
162
163 BaseIndex indexedBy(RegisterID index, Scale) const;
164
165 RegisterID base;
166 int32_t offset;
167 };
168
169 struct ExtendedAddress {
170 explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
171 : base(base)
172 , offset(offset)
173 {
174 }
175
176 RegisterID base;
177 intptr_t offset;
178 };
179
180 // ImplicitAddress:
181 //
182 // This class is used for explicit 'load' and 'store' operations
183 // (as opposed to situations in which a memory operand is provided
184 // to a generic operation, such as an integer arithmetic instruction).
185 //
186 // In the case of a load (or store) operation we want to permit
187 // addresses to be implicitly constructed, e.g. the two calls:
188 //
189 // load32(Address(addrReg), destReg);
190 // load32(addrReg, destReg);
191 //
192 // Are equivalent, and the explicit wrapping of the Address in the former
193 // is unnecessary.
194 struct ImplicitAddress {
195 ImplicitAddress(RegisterID base)
196 : base(base)
197 , offset(0)
198 {
199 ASSERT(base != RegisterID::InvalidGPRReg);
200 }
201
202 ImplicitAddress(Address address)
203 : base(address.base)
204 , offset(address.offset)
205 {
206 ASSERT(base != RegisterID::InvalidGPRReg);
207 }
208
209 RegisterID base;
210 int32_t offset;
211 };
212
213 // BaseIndex:
214 //
215 // Describes a complex addressing mode.
216 struct BaseIndex {
217 BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
218 : base(base)
219 , index(index)
220 , scale(scale)
221 , offset(offset)
222 {
223 }
224
225 RegisterID base;
226 RegisterID index;
227 Scale scale;
228 int32_t offset;
229
230 BaseIndex withOffset(int32_t additionalOffset)
231 {
232 return BaseIndex(base, index, scale, offset + additionalOffset);
233 }
234
235 BaseIndex withSwappedRegister(RegisterID left, RegisterID right)
236 {
237 return BaseIndex(AbstractMacroAssembler::withSwappedRegister(base, left, right), AbstractMacroAssembler::withSwappedRegister(index, left, right), scale, offset);
238 }
239 };
240
241 // AbsoluteAddress:
242 //
243 // Describes an memory operand given by a pointer. For regular load & store
244 // operations an unwrapped void* will be used, rather than using this.
245 struct AbsoluteAddress {
246 explicit AbsoluteAddress(const void* ptr)
247 : m_ptr(ptr)
248 {
249 }
250
251 const void* m_ptr;
252 };
253
254 // TrustedImm:
255 //
256 // An empty super class of each of the TrustedImm types. This class is used for template overloads
257 // on a TrustedImm type via std::is_base_of.
258 struct TrustedImm { };
259
260 // TrustedImmPtr:
261 //
262 // A pointer sized immediate operand to an instruction - this is wrapped
263 // in a class requiring explicit construction in order to differentiate
264 // from pointers used as absolute addresses to memory operations
265 struct TrustedImmPtr : public TrustedImm {
266 TrustedImmPtr() { }
267
268 explicit TrustedImmPtr(const void* value)
269 : m_value(value)
270 {
271 }
272
273 template<typename ReturnType, typename... Arguments>
274 explicit TrustedImmPtr(ReturnType(*value)(Arguments...))
275 : m_value(reinterpret_cast<void*>(value))
276 {
277 }
278
279 explicit TrustedImmPtr(std::nullptr_t)
280 {
281 }
282
283 explicit TrustedImmPtr(size_t value)
284 : m_value(reinterpret_cast<void*>(value))
285 {
286 }
287
288 intptr_t asIntptr()
289 {
290 return reinterpret_cast<intptr_t>(m_value);
291 }
292
293 void* asPtr()
294 {
295 return const_cast<void*>(m_value);
296 }
297
298 const void* m_value { 0 };
299 };
300
301 struct ImmPtr : private TrustedImmPtr
302 {
303 explicit ImmPtr(const void* value)
304 : TrustedImmPtr(value)
305 {
306 }
307
308 TrustedImmPtr asTrustedImmPtr() { return *this; }
309 };
310
311 // TrustedImm32:
312 //
313 // A 32bit immediate operand to an instruction - this is wrapped in a
314 // class requiring explicit construction in order to prevent RegisterIDs
315 // (which are implemented as an enum) from accidentally being passed as
316 // immediate values.
317 struct TrustedImm32 : public TrustedImm {
318 TrustedImm32() { }
319
320 explicit TrustedImm32(int32_t value)
321 : m_value(value)
322 {
323 }
324
325#if !CPU(X86_64)
326 explicit TrustedImm32(TrustedImmPtr ptr)
327 : m_value(ptr.asIntptr())
328 {
329 }
330#endif
331
332 int32_t m_value;
333 };
334
335
336 struct Imm32 : private TrustedImm32 {
337 explicit Imm32(int32_t value)
338 : TrustedImm32(value)
339 {
340 }
341#if !CPU(X86_64)
342 explicit Imm32(TrustedImmPtr ptr)
343 : TrustedImm32(ptr)
344 {
345 }
346#endif
347 const TrustedImm32& asTrustedImm32() const { return *this; }
348
349 };
350
351 // TrustedImm64:
352 //
353 // A 64bit immediate operand to an instruction - this is wrapped in a
354 // class requiring explicit construction in order to prevent RegisterIDs
355 // (which are implemented as an enum) from accidentally being passed as
356 // immediate values.
357 struct TrustedImm64 : TrustedImm {
358 TrustedImm64() { }
359
360 explicit TrustedImm64(int64_t value)
361 : m_value(value)
362 {
363 }
364
365#if CPU(X86_64) || CPU(ARM64)
366 explicit TrustedImm64(TrustedImmPtr ptr)
367 : m_value(ptr.asIntptr())
368 {
369 }
370#endif
371
372 int64_t m_value;
373 };
374
375 struct Imm64 : private TrustedImm64
376 {
377 explicit Imm64(int64_t value)
378 : TrustedImm64(value)
379 {
380 }
381#if CPU(X86_64) || CPU(ARM64)
382 explicit Imm64(TrustedImmPtr ptr)
383 : TrustedImm64(ptr)
384 {
385 }
386#endif
387 const TrustedImm64& asTrustedImm64() const { return *this; }
388 };
389
390 // Section 2: MacroAssembler code buffer handles
391 //
392 // The following types are used to reference items in the code buffer
393 // during JIT code generation. For example, the type Jump is used to
394 // track the location of a jump instruction so that it may later be
395 // linked to a label marking its destination.
396
397
398 // Label:
399 //
400 // A Label records a point in the generated instruction stream, typically such that
401 // it may be used as a destination for a jump.
402 class Label {
403 friend class AbstractMacroAssembler<AssemblerType>;
404 friend struct DFG::OSRExit;
405 friend class Jump;
406 template<PtrTag> friend class MacroAssemblerCodeRef;
407 friend class LinkBuffer;
408 friend class Watchpoint;
409
410 public:
411 Label()
412 {
413 }
414
415 Label(AbstractMacroAssemblerType* masm)
416 : m_label(masm->m_assembler.label())
417 {
418 masm->invalidateAllTempRegisters();
419 }
420
421 bool operator==(const Label& other) const { return m_label == other.m_label; }
422
423 bool isSet() const { return m_label.isSet(); }
424 private:
425 AssemblerLabel m_label;
426 };
427
428 // ConvertibleLoadLabel:
429 //
430 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
431 // so that:
432 //
433 // loadPtr(Address(a, i), b)
434 //
435 // becomes:
436 //
437 // addPtr(TrustedImmPtr(i), a, b)
438 class ConvertibleLoadLabel {
439 friend class AbstractMacroAssembler<AssemblerType>;
440 friend class LinkBuffer;
441
442 public:
443 ConvertibleLoadLabel()
444 {
445 }
446
447 ConvertibleLoadLabel(AbstractMacroAssemblerType* masm)
448 : m_label(masm->m_assembler.labelIgnoringWatchpoints())
449 {
450 }
451
452 bool isSet() const { return m_label.isSet(); }
453 private:
454 AssemblerLabel m_label;
455 };
456
457 // DataLabelPtr:
458 //
459 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
460 // patched after the code has been generated.
461 class DataLabelPtr {
462 friend class AbstractMacroAssembler<AssemblerType>;
463 friend class LinkBuffer;
464 public:
465 DataLabelPtr()
466 {
467 }
468
469 DataLabelPtr(AbstractMacroAssemblerType* masm)
470 : m_label(masm->m_assembler.label())
471 {
472 }
473
474 bool isSet() const { return m_label.isSet(); }
475
476 private:
477 AssemblerLabel m_label;
478 };
479
480 // DataLabel32:
481 //
482 // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
483 // patched after the code has been generated.
484 class DataLabel32 {
485 friend class AbstractMacroAssembler<AssemblerType>;
486 friend class LinkBuffer;
487 public:
488 DataLabel32()
489 {
490 }
491
492 DataLabel32(AbstractMacroAssemblerType* masm)
493 : m_label(masm->m_assembler.label())
494 {
495 }
496
497 AssemblerLabel label() const { return m_label; }
498
499 private:
500 AssemblerLabel m_label;
501 };
502
503 // DataLabelCompact:
504 //
505 // A DataLabelCompact is used to refer to a location in the code containing a
506 // compact immediate to be patched after the code has been generated.
507 class DataLabelCompact {
508 friend class AbstractMacroAssembler<AssemblerType>;
509 friend class LinkBuffer;
510 public:
511 DataLabelCompact()
512 {
513 }
514
515 DataLabelCompact(AbstractMacroAssemblerType* masm)
516 : m_label(masm->m_assembler.label())
517 {
518 }
519
520 DataLabelCompact(AssemblerLabel label)
521 : m_label(label)
522 {
523 }
524
525 AssemblerLabel label() const { return m_label; }
526
527 private:
528 AssemblerLabel m_label;
529 };
530
531 // Call:
532 //
533 // A Call object is a reference to a call instruction that has been planted
534 // into the code buffer - it is typically used to link the call, setting the
535 // relative offset such that when executed it will call to the desired
536 // destination.
537 class Call {
538 friend class AbstractMacroAssembler<AssemblerType>;
539
540 public:
541 enum Flags {
542 None = 0x0,
543 Linkable = 0x1,
544 Near = 0x2,
545 Tail = 0x4,
546 LinkableNear = 0x3,
547 LinkableNearTail = 0x7,
548 };
549
550 Call()
551 : m_flags(None)
552 {
553 }
554
555 Call(AssemblerLabel jmp, Flags flags)
556 : m_label(jmp)
557 , m_flags(flags)
558 {
559 }
560
561 bool isFlagSet(Flags flag)
562 {
563 return m_flags & flag;
564 }
565
566 static Call fromTailJump(Jump jump)
567 {
568 return Call(jump.m_label, Linkable);
569 }
570
571 AssemblerLabel m_label;
572 private:
573 Flags m_flags;
574 };
575
576 // Jump:
577 //
578 // A jump object is a reference to a jump instruction that has been planted
579 // into the code buffer - it is typically used to link the jump, setting the
580 // relative offset such that when executed it will jump to the desired
581 // destination.
582 class Jump {
583 friend class AbstractMacroAssembler<AssemblerType>;
584 friend class Call;
585 friend struct DFG::OSRExit;
586 friend class LinkBuffer;
587 public:
588 Jump()
589 {
590 }
591
592#if CPU(ARM_THUMB2)
593 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
594 Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
595 : m_label(jmp)
596 , m_type(type)
597 , m_condition(condition)
598 {
599 }
600#elif CPU(ARM64)
601 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
602 : m_label(jmp)
603 , m_type(type)
604 , m_condition(condition)
605 {
606 }
607
608 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
609 : m_label(jmp)
610 , m_type(type)
611 , m_condition(condition)
612 , m_is64Bit(is64Bit)
613 , m_compareRegister(compareRegister)
614 {
615 ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
616 }
617
618 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
619 : m_label(jmp)
620 , m_type(type)
621 , m_condition(condition)
622 , m_bitNumber(bitNumber)
623 , m_compareRegister(compareRegister)
624 {
625 ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
626 }
627#else
628 Jump(AssemblerLabel jmp)
629 : m_label(jmp)
630 {
631 }
632#endif
633
634 Label label() const
635 {
636 Label result;
637 result.m_label = m_label;
638 return result;
639 }
640
641 void link(AbstractMacroAssemblerType* masm) const
642 {
643 masm->invalidateAllTempRegisters();
644
645#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
646 masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
647#endif
648
649#if CPU(ARM_THUMB2)
650 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
651#elif CPU(ARM64)
652 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
653 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
654 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
655 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
656 else
657 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
658#else
659 masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
660#endif
661 }
662
663 void linkTo(Label label, AbstractMacroAssemblerType* masm) const
664 {
665#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
666 masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
667#endif
668
669#if CPU(ARM_THUMB2)
670 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
671#elif CPU(ARM64)
672 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
673 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
674 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
675 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
676 else
677 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
678#else
679 masm->m_assembler.linkJump(m_label, label.m_label);
680#endif
681 }
682
683 bool isSet() const { return m_label.isSet(); }
684
685 private:
686 AssemblerLabel m_label;
687#if CPU(ARM_THUMB2)
688 ARMv7Assembler::JumpType m_type;
689 ARMv7Assembler::Condition m_condition;
690#elif CPU(ARM64)
691 ARM64Assembler::JumpType m_type;
692 ARM64Assembler::Condition m_condition;
693 bool m_is64Bit;
694 unsigned m_bitNumber;
695 ARM64Assembler::RegisterID m_compareRegister;
696#endif
697 };
698
699 struct PatchableJump {
700 PatchableJump()
701 {
702 }
703
704 explicit PatchableJump(Jump jump)
705 : m_jump(jump)
706 {
707 }
708
709 operator Jump&() { return m_jump; }
710
711 Jump m_jump;
712 };
713
714 // JumpList:
715 //
716 // A JumpList is a set of Jump objects.
717 // All jumps in the set will be linked to the same destination.
718 class JumpList {
719 public:
720 typedef Vector<Jump, 2> JumpVector;
721
722 JumpList() { }
723
724 JumpList(Jump jump)
725 {
726 if (jump.isSet())
727 append(jump);
728 }
729
730 void link(AbstractMacroAssemblerType* masm) const
731 {
732 size_t size = m_jumps.size();
733 for (size_t i = 0; i < size; ++i)
734 m_jumps[i].link(masm);
735 }
736
737 void linkTo(Label label, AbstractMacroAssemblerType* masm) const
738 {
739 size_t size = m_jumps.size();
740 for (size_t i = 0; i < size; ++i)
741 m_jumps[i].linkTo(label, masm);
742 }
743
744 void append(Jump jump)
745 {
746 if (jump.isSet())
747 m_jumps.append(jump);
748 }
749
750 void append(const JumpList& other)
751 {
752 m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
753 }
754
755 bool empty() const
756 {
757 return !m_jumps.size();
758 }
759
760 void clear()
761 {
762 m_jumps.clear();
763 }
764
765 const JumpVector& jumps() const { return m_jumps; }
766
767 private:
768 JumpVector m_jumps;
769 };
770
771
772 // Section 3: Misc admin methods
773#if ENABLE(DFG_JIT)
774 Label labelIgnoringWatchpoints()
775 {
776 Label result;
777 result.m_label = m_assembler.labelIgnoringWatchpoints();
778 return result;
779 }
780#else
781 Label labelIgnoringWatchpoints()
782 {
783 return label();
784 }
785#endif
786
787 Label label()
788 {
789 return Label(this);
790 }
791
792 void padBeforePatch()
793 {
794 // Rely on the fact that asking for a label already does the padding.
795 (void)label();
796 }
797
798 Label watchpointLabel()
799 {
800 Label result;
801 result.m_label = m_assembler.labelForWatchpoint();
802 return result;
803 }
804
805 Label align()
806 {
807 m_assembler.align(16);
808 return Label(this);
809 }
810
811#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
812 class RegisterAllocationOffset {
813 public:
814 RegisterAllocationOffset(unsigned offset)
815 : m_offset(offset)
816 {
817 }
818
819 void checkOffsets(unsigned low, unsigned high)
820 {
821 RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
822 }
823
824 private:
825 unsigned m_offset;
826 };
827
828 void addRegisterAllocationAtOffset(unsigned offset)
829 {
830 m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
831 }
832
833 void clearRegisterAllocationOffsets()
834 {
835 m_registerAllocationForOffsets.clear();
836 }
837
838 void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
839 {
840 if (offset1 > offset2)
841 std::swap(offset1, offset2);
842
843 size_t size = m_registerAllocationForOffsets.size();
844 for (size_t i = 0; i < size; ++i)
845 m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2);
846 }
847#endif
848
849 template<typename T, typename U>
850 static ptrdiff_t differenceBetween(T from, U to)
851 {
852 return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
853 }
854
855 template<PtrTag aTag, PtrTag bTag>
856 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr<aTag>& a, const MacroAssemblerCodePtr<bTag>& b)
857 {
858 return b.template dataLocation<ptrdiff_t>() - a.template dataLocation<ptrdiff_t>();
859 }
860
861 unsigned debugOffset() { return m_assembler.debugOffset(); }
862
863 ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
864 {
865 AssemblerType::cacheFlush(code, size);
866 }
867
868 template<PtrTag tag>
869 static void linkJump(void* code, Jump jump, CodeLocationLabel<tag> target)
870 {
871 AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
872 }
873
874 static void linkPointer(void* code, AssemblerLabel label, void* value)
875 {
876 AssemblerType::linkPointer(code, label, value);
877 }
878
879 template<PtrTag tag>
880 static void linkPointer(void* code, AssemblerLabel label, MacroAssemblerCodePtr<tag> value)
881 {
882 AssemblerType::linkPointer(code, label, value.executableAddress());
883 }
884
885 template<PtrTag tag>
886 static void* getLinkerAddress(void* code, AssemblerLabel label)
887 {
888 return tagCodePtr(AssemblerType::getRelocatedAddress(code, label), tag);
889 }
890
891 static unsigned getLinkerCallReturnOffset(Call call)
892 {
893 return AssemblerType::getCallReturnOffset(call.m_label);
894 }
895
896 template<PtrTag jumpTag, PtrTag destTag>
897 static void repatchJump(CodeLocationJump<jumpTag> jump, CodeLocationLabel<destTag> destination)
898 {
899 AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
900 }
901
902 template<PtrTag jumpTag>
903 static void repatchJumpToNop(CodeLocationJump<jumpTag> jump)
904 {
905 AssemblerType::relinkJumpToNop(jump.dataLocation());
906 }
907
908 template<PtrTag callTag, PtrTag destTag>
909 static void repatchNearCall(CodeLocationNearCall<callTag> nearCall, CodeLocationLabel<destTag> destination)
910 {
911 switch (nearCall.callMode()) {
912 case NearCallMode::Tail:
913 AssemblerType::relinkJump(nearCall.dataLocation(), destination.dataLocation());
914 return;
915 case NearCallMode::Regular:
916 AssemblerType::relinkCall(nearCall.dataLocation(), destination.untaggedExecutableAddress());
917 return;
918 }
919 RELEASE_ASSERT_NOT_REACHED();
920 }
921
922 template<PtrTag tag>
923 static void repatchCompact(CodeLocationDataLabelCompact<tag> dataLabelCompact, int32_t value)
924 {
925 AssemblerType::repatchCompact(dataLabelCompact.template dataLocation(), value);
926 }
927
928 template<PtrTag tag>
929 static void repatchInt32(CodeLocationDataLabel32<tag> dataLabel32, int32_t value)
930 {
931 AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
932 }
933
934 template<PtrTag tag>
935 static void repatchPointer(CodeLocationDataLabelPtr<tag> dataLabelPtr, void* value)
936 {
937 AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
938 }
939
940 template<PtrTag tag>
941 static void* readPointer(CodeLocationDataLabelPtr<tag> dataLabelPtr)
942 {
943 return AssemblerType::readPointer(dataLabelPtr.dataLocation());
944 }
945
946 template<PtrTag tag>
947 static void replaceWithLoad(CodeLocationConvertibleLoad<tag> label)
948 {
949 AssemblerType::replaceWithLoad(label.dataLocation());
950 }
951
952 template<PtrTag tag>
953 static void replaceWithAddressComputation(CodeLocationConvertibleLoad<tag> label)
954 {
955 AssemblerType::replaceWithAddressComputation(label.dataLocation());
956 }
957
958 template<typename Functor>
959 void addLinkTask(const Functor& functor)
960 {
961 m_linkTasks.append(createSharedTask<void(LinkBuffer&)>(functor));
962 }
963
964#if COMPILER(GCC)
965 // Workaround for GCC demanding that memcpy "must be the name of a function with external linkage".
966 static void* memcpy(void* dst, const void* src, size_t size)
967 {
968 return std::memcpy(dst, src, size);
969 }
970#endif
971
972 void emitNops(size_t memoryToFillWithNopsInBytes)
973 {
974#if CPU(ARM64)
975 RELEASE_ASSERT(memoryToFillWithNopsInBytes % 4 == 0);
976 for (unsigned i = 0; i < memoryToFillWithNopsInBytes / 4; ++i)
977 m_assembler.nop();
978#else
979 AssemblerBuffer& buffer = m_assembler.buffer();
980 size_t startCodeSize = buffer.codeSize();
981 size_t targetCodeSize = startCodeSize + memoryToFillWithNopsInBytes;
982 buffer.ensureSpace(memoryToFillWithNopsInBytes);
983 AssemblerType::template fillNops<memcpy>(static_cast<char*>(buffer.data()) + startCodeSize, memoryToFillWithNopsInBytes);
984 buffer.setCodeSize(targetCodeSize);
985#endif
986 }
987
988 ALWAYS_INLINE void tagReturnAddress() { }
989 ALWAYS_INLINE void untagReturnAddress() { }
990
991 ALWAYS_INLINE void tagPtr(PtrTag, RegisterID) { }
992 ALWAYS_INLINE void tagPtr(RegisterID, RegisterID) { }
993 ALWAYS_INLINE void untagPtr(PtrTag, RegisterID) { }
994 ALWAYS_INLINE void untagPtr(RegisterID, RegisterID) { }
995 ALWAYS_INLINE void removePtrTag(RegisterID) { }
996
997protected:
998 AbstractMacroAssembler()
999 : m_randomSource(0)
1000 , m_assembler()
1001 {
1002 invalidateAllTempRegisters();
1003 }
1004
1005 uint32_t random()
1006 {
1007 if (!m_randomSourceIsInitialized) {
1008 m_randomSourceIsInitialized = true;
1009 m_randomSource.setSeed(cryptographicallyRandomNumber());
1010 }
1011 return m_randomSource.getUint32();
1012 }
1013
1014 bool m_randomSourceIsInitialized { false };
1015 WeakRandom m_randomSource;
1016public:
1017 AssemblerType m_assembler;
1018protected:
1019
1020#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1021 Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
1022#endif
1023
1024 static bool haveScratchRegisterForBlinding()
1025 {
1026 return false;
1027 }
1028 static RegisterID scratchRegisterForBlinding()
1029 {
1030 UNREACHABLE_FOR_PLATFORM();
1031 return firstRegister();
1032 }
1033 static bool canBlind() { return false; }
1034 static bool shouldBlindForSpecificArch(uint32_t) { return false; }
1035 static bool shouldBlindForSpecificArch(uint64_t) { return false; }
1036
1037 class CachedTempRegister {
1038 friend class DataLabelPtr;
1039 friend class DataLabel32;
1040 friend class DataLabelCompact;
1041 friend class Jump;
1042 friend class Label;
1043
1044 public:
1045 CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
1046 : m_masm(masm)
1047 , m_registerID(registerID)
1048 , m_value(0)
1049 , m_validBit(1 << static_cast<unsigned>(registerID))
1050 {
1051 ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
1052 }
1053
1054 ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
1055
1056 ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
1057
1058 bool value(intptr_t& value)
1059 {
1060 value = m_value;
1061 return m_masm->isTempRegisterValid(m_validBit);
1062 }
1063
1064 void setValue(intptr_t value)
1065 {
1066 m_value = value;
1067 m_masm->setTempRegisterValid(m_validBit);
1068 }
1069
1070 ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
1071
1072 private:
1073 AbstractMacroAssemblerType* m_masm;
1074 RegisterID m_registerID;
1075 intptr_t m_value;
1076 unsigned m_validBit;
1077 };
1078
1079 ALWAYS_INLINE void invalidateAllTempRegisters()
1080 {
1081 m_tempRegistersValidBits = 0;
1082 }
1083
1084 ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
1085 {
1086 return (m_tempRegistersValidBits & registerMask);
1087 }
1088
1089 ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
1090 {
1091 m_tempRegistersValidBits &= ~registerMask;
1092 }
1093
1094 ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
1095 {
1096 m_tempRegistersValidBits |= registerMask;
1097 }
1098
1099 friend class AllowMacroScratchRegisterUsage;
1100 friend class AllowMacroScratchRegisterUsageIf;
1101 friend class DisallowMacroScratchRegisterUsage;
1102 unsigned m_tempRegistersValidBits;
1103 bool m_allowScratchRegister { true };
1104
1105 Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks;
1106
1107 friend class LinkBuffer;
1108}; // class AbstractMacroAssembler
1109
1110template <class AssemblerType>
1111inline typename AbstractMacroAssembler<AssemblerType>::BaseIndex
1112AbstractMacroAssembler<AssemblerType>::Address::indexedBy(
1113 typename AbstractMacroAssembler<AssemblerType>::RegisterID index,
1114 typename AbstractMacroAssembler<AssemblerType>::Scale scale) const
1115{
1116 return BaseIndex(base, index, scale, offset);
1117}
1118
1119#endif // ENABLE(ASSEMBLER)
1120
1121} // namespace JSC
1122
1123#if ENABLE(ASSEMBLER)
1124
1125namespace WTF {
1126
1127class PrintStream;
1128
1129void printInternal(PrintStream& out, JSC::AbstractMacroAssemblerBase::StatusCondition);
1130
1131} // namespace WTF
1132
1133#endif // ENABLE(ASSEMBLER)
1134
1135