1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(DFG_JIT)
29
30#include "BlockDirectory.h"
31#include "DFGAbstractInterpreter.h"
32#include "DFGGenerationInfo.h"
33#include "DFGInPlaceAbstractState.h"
34#include "DFGJITCompiler.h"
35#include "DFGOSRExit.h"
36#include "DFGOSRExitJumpPlaceholder.h"
37#include "DFGRegisterBank.h"
38#include "DFGSilentRegisterSavePlan.h"
39#include "JITMathIC.h"
40#include "JITOperations.h"
41#include "PutKind.h"
42#include "SpillRegistersMode.h"
43#include "StructureStubInfo.h"
44#include "ValueRecovery.h"
45#include "VirtualRegister.h"
46
47namespace JSC { namespace DFG {
48
49class GPRTemporary;
50class JSValueOperand;
51class SlowPathGenerator;
52class SpeculativeJIT;
53class SpeculateInt32Operand;
54class SpeculateStrictInt32Operand;
55class SpeculateDoubleOperand;
56class SpeculateCellOperand;
57class SpeculateBooleanOperand;
58
59enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandJSValue};
60
61// === SpeculativeJIT ===
62//
63// The SpeculativeJIT is used to generate a fast, but potentially
64// incomplete code path for the dataflow. When code generating
65// we may make assumptions about operand types, dynamically check,
66// and bail-out to an alternate code path if these checks fail.
67// Importantly, the speculative code path cannot be reentered once
68// a speculative check has failed. This allows the SpeculativeJIT
69// to propagate type information (including information that has
70// only speculatively been asserted) through the dataflow.
71class SpeculativeJIT {
72 WTF_MAKE_FAST_ALLOCATED;
73
74 friend struct OSRExit;
75private:
76 typedef JITCompiler::TrustedImm32 TrustedImm32;
77 typedef JITCompiler::Imm32 Imm32;
78 typedef JITCompiler::ImmPtr ImmPtr;
79 typedef JITCompiler::TrustedImm64 TrustedImm64;
80 typedef JITCompiler::Imm64 Imm64;
81
82 // These constants are used to set priorities for spill order for
83 // the register allocator.
84#if USE(JSVALUE64)
85 enum SpillOrder {
86 SpillOrderConstant = 1, // no spill, and cheap fill
87 SpillOrderSpilled = 2, // no spill
88 SpillOrderJS = 4, // needs spill
89 SpillOrderCell = 4, // needs spill
90 SpillOrderStorage = 4, // needs spill
91 SpillOrderInteger = 5, // needs spill and box
92 SpillOrderBoolean = 5, // needs spill and box
93 SpillOrderDouble = 6, // needs spill and convert
94 };
95#elif USE(JSVALUE32_64)
96 enum SpillOrder {
97 SpillOrderConstant = 1, // no spill, and cheap fill
98 SpillOrderSpilled = 2, // no spill
99 SpillOrderJS = 4, // needs spill
100 SpillOrderStorage = 4, // needs spill
101 SpillOrderDouble = 4, // needs spill
102 SpillOrderInteger = 5, // needs spill and box
103 SpillOrderCell = 5, // needs spill and box
104 SpillOrderBoolean = 5, // needs spill and box
105 };
106#endif
107
108 enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly };
109
110public:
111 SpeculativeJIT(JITCompiler&);
112 ~SpeculativeJIT();
113
114 VM& vm()
115 {
116 return m_jit.vm();
117 }
118
119 struct TrustedImmPtr {
120 template <typename T>
121 explicit TrustedImmPtr(T* value)
122 : m_value(value)
123 {
124 static_assert(!std::is_base_of<JSCell, T>::value, "To use a GC pointer, the graph must be aware of it. Use SpeculativeJIT::TrustedImmPtr::weakPointer instead.");
125 }
126
127 explicit TrustedImmPtr(RegisteredStructure structure)
128 : m_value(structure.get())
129 { }
130
131 explicit TrustedImmPtr(std::nullptr_t)
132 : m_value(nullptr)
133 { }
134
135 explicit TrustedImmPtr(FrozenValue* value)
136 : m_value(value->cell())
137 {
138 RELEASE_ASSERT(value->value().isCell());
139 }
140
141 explicit TrustedImmPtr(size_t value)
142 : m_value(bitwise_cast<void*>(value))
143 {
144 }
145
146 static TrustedImmPtr weakPointer(Graph& graph, JSCell* cell)
147 {
148 graph.m_plan.weakReferences().addLazily(cell);
149 return TrustedImmPtr(bitwise_cast<size_t>(cell));
150 }
151
152 operator MacroAssembler::TrustedImmPtr() const { return m_value; }
153 operator MacroAssembler::TrustedImm() const { return m_value; }
154
155 intptr_t asIntptr()
156 {
157 return m_value.asIntptr();
158 }
159
160 private:
161 MacroAssembler::TrustedImmPtr m_value;
162 };
163
164 bool compile();
165
166 void createOSREntries();
167 void linkOSREntries(LinkBuffer&);
168
169 BasicBlock* nextBlock()
170 {
171 for (BlockIndex resultIndex = m_block->index + 1; ; resultIndex++) {
172 if (resultIndex >= m_jit.graph().numBlocks())
173 return 0;
174 if (BasicBlock* result = m_jit.graph().block(resultIndex))
175 return result;
176 }
177 }
178
179#if USE(JSVALUE64)
180 GPRReg fillJSValue(Edge);
181#elif USE(JSVALUE32_64)
182 bool fillJSValue(Edge, GPRReg&, GPRReg&, FPRReg&);
183#endif
184 GPRReg fillStorage(Edge);
185
186 // lock and unlock GPR & FPR registers.
187 void lock(GPRReg reg)
188 {
189 m_gprs.lock(reg);
190 }
191 void lock(FPRReg reg)
192 {
193 m_fprs.lock(reg);
194 }
195 void unlock(GPRReg reg)
196 {
197 m_gprs.unlock(reg);
198 }
199 void unlock(FPRReg reg)
200 {
201 m_fprs.unlock(reg);
202 }
203
204 // Used to check whether a child node is on its last use,
205 // and its machine registers may be reused.
206 bool canReuse(Node* node)
207 {
208 return generationInfo(node).useCount() == 1;
209 }
210 bool canReuse(Node* nodeA, Node* nodeB)
211 {
212 return nodeA == nodeB && generationInfo(nodeA).useCount() == 2;
213 }
214 bool canReuse(Edge nodeUse)
215 {
216 return canReuse(nodeUse.node());
217 }
218 GPRReg reuse(GPRReg reg)
219 {
220 m_gprs.lock(reg);
221 return reg;
222 }
223 FPRReg reuse(FPRReg reg)
224 {
225 m_fprs.lock(reg);
226 return reg;
227 }
228
229 // Allocate a gpr/fpr.
230 GPRReg allocate()
231 {
232#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
233 m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
234#endif
235 VirtualRegister spillMe;
236 GPRReg gpr = m_gprs.allocate(spillMe);
237 if (spillMe.isValid()) {
238#if USE(JSVALUE32_64)
239 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
240 if ((info.registerFormat() & DataFormatJS))
241 m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
242#endif
243 spill(spillMe);
244 }
245 return gpr;
246 }
247 GPRReg allocate(GPRReg specific)
248 {
249#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
250 m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
251#endif
252 VirtualRegister spillMe = m_gprs.allocateSpecific(specific);
253 if (spillMe.isValid()) {
254#if USE(JSVALUE32_64)
255 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
256 RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble);
257 if ((info.registerFormat() & DataFormatJS))
258 m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR());
259#endif
260 spill(spillMe);
261 }
262 return specific;
263 }
264 GPRReg tryAllocate()
265 {
266 return m_gprs.tryAllocate();
267 }
268 FPRReg fprAllocate()
269 {
270#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
271 m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
272#endif
273 VirtualRegister spillMe;
274 FPRReg fpr = m_fprs.allocate(spillMe);
275 if (spillMe.isValid())
276 spill(spillMe);
277 return fpr;
278 }
279
280 // Check whether a VirtualRegsiter is currently in a machine register.
281 // We use this when filling operands to fill those that are already in
282 // machine registers first (by locking VirtualRegsiters that are already
283 // in machine register before filling those that are not we attempt to
284 // avoid spilling values we will need immediately).
285 bool isFilled(Node* node)
286 {
287 return generationInfo(node).registerFormat() != DataFormatNone;
288 }
289 bool isFilledDouble(Node* node)
290 {
291 return generationInfo(node).registerFormat() == DataFormatDouble;
292 }
293
294 // Called on an operand once it has been consumed by a parent node.
295 void use(Node* node)
296 {
297 if (!node->hasResult())
298 return;
299 GenerationInfo& info = generationInfo(node);
300
301 // use() returns true when the value becomes dead, and any
302 // associated resources may be freed.
303 if (!info.use(*m_stream))
304 return;
305
306 // Release the associated machine registers.
307 DataFormat registerFormat = info.registerFormat();
308#if USE(JSVALUE64)
309 if (registerFormat == DataFormatDouble)
310 m_fprs.release(info.fpr());
311 else if (registerFormat != DataFormatNone)
312 m_gprs.release(info.gpr());
313#elif USE(JSVALUE32_64)
314 if (registerFormat == DataFormatDouble)
315 m_fprs.release(info.fpr());
316 else if (registerFormat & DataFormatJS) {
317 m_gprs.release(info.tagGPR());
318 m_gprs.release(info.payloadGPR());
319 } else if (registerFormat != DataFormatNone)
320 m_gprs.release(info.gpr());
321#endif
322 }
323 void use(Edge nodeUse)
324 {
325 use(nodeUse.node());
326 }
327
328 RegisterSet usedRegisters();
329
330 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
331 {
332 return m_jit.graph().masqueradesAsUndefinedWatchpointIsStillValid(codeOrigin);
333 }
334 bool masqueradesAsUndefinedWatchpointIsStillValid()
335 {
336 return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->origin.semantic);
337 }
338
339 void compileStoreBarrier(Node*);
340
341 // Called by the speculative operand types, below, to fill operand to
342 // machine registers, implicitly generating speculation checks as needed.
343 GPRReg fillSpeculateInt32(Edge, DataFormat& returnFormat);
344 GPRReg fillSpeculateInt32Strict(Edge);
345 GPRReg fillSpeculateInt52(Edge, DataFormat desiredFormat);
346 FPRReg fillSpeculateDouble(Edge);
347 GPRReg fillSpeculateCell(Edge);
348 GPRReg fillSpeculateBoolean(Edge);
349 GeneratedOperandType checkGeneratedTypeForToInt32(Node*);
350
351 void addSlowPathGenerator(std::unique_ptr<SlowPathGenerator>);
352 void addSlowPathGeneratorLambda(Function<void()>&&);
353 void runSlowPathGenerators(PCToCodeOriginMapBuilder&);
354
355 void compile(Node*);
356 void noticeOSRBirth(Node*);
357 void bail(AbortReason);
358 void compileCurrentBlock();
359
360 void checkArgumentTypes();
361
362 void clearGenerationInfo();
363
364 // These methods are used when generating 'unexpected'
365 // calls out from JIT code to C++ helper routines -
366 // they spill all live values to the appropriate
367 // slots in the JSStack without changing any state
368 // in the GenerationInfo.
369 SilentRegisterSavePlan silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source);
370 SilentRegisterSavePlan silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source);
371 void silentSpill(const SilentRegisterSavePlan&);
372 void silentFill(const SilentRegisterSavePlan&);
373
374 template<typename CollectionType>
375 void silentSpill(const CollectionType& savePlans)
376 {
377 for (unsigned i = 0; i < savePlans.size(); ++i)
378 silentSpill(savePlans[i]);
379 }
380
381 template<typename CollectionType>
382 void silentFill(const CollectionType& savePlans)
383 {
384 for (unsigned i = savePlans.size(); i--;)
385 silentFill(savePlans[i]);
386 }
387
388 template<typename CollectionType>
389 void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
390 {
391 ASSERT(plans.isEmpty());
392 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
393 GPRReg gpr = iter.regID();
394 if (iter.name().isValid() && gpr != exclude && gpr != exclude2) {
395 SilentRegisterSavePlan plan = silentSavePlanForGPR(iter.name(), gpr);
396 if (doSpill)
397 silentSpill(plan);
398 plans.append(plan);
399 }
400 }
401 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
402 if (iter.name().isValid() && iter.regID() != fprExclude) {
403 SilentRegisterSavePlan plan = silentSavePlanForFPR(iter.name(), iter.regID());
404 if (doSpill)
405 silentSpill(plan);
406 plans.append(plan);
407 }
408 }
409 }
410 template<typename CollectionType>
411 void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, NoResultTag)
412 {
413 silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, InvalidFPRReg);
414 }
415 template<typename CollectionType>
416 void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, FPRReg exclude)
417 {
418 silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, exclude);
419 }
420 template<typename CollectionType>
421 void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, JSValueRegs exclude)
422 {
423#if USE(JSVALUE32_64)
424 silentSpillAllRegistersImpl(doSpill, plans, exclude.tagGPR(), exclude.payloadGPR());
425#else
426 silentSpillAllRegistersImpl(doSpill, plans, exclude.gpr());
427#endif
428 }
429
430 void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
431 {
432 silentSpillAllRegistersImpl(true, m_plans, exclude, exclude2, fprExclude);
433 }
434 void silentSpillAllRegisters(FPRReg exclude)
435 {
436 silentSpillAllRegisters(InvalidGPRReg, InvalidGPRReg, exclude);
437 }
438 void silentSpillAllRegisters(JSValueRegs exclude)
439 {
440#if USE(JSVALUE64)
441 silentSpillAllRegisters(exclude.payloadGPR());
442#else
443 silentSpillAllRegisters(exclude.payloadGPR(), exclude.tagGPR());
444#endif
445 }
446
447 void silentFillAllRegisters()
448 {
449 while (!m_plans.isEmpty()) {
450 SilentRegisterSavePlan& plan = m_plans.last();
451 silentFill(plan);
452 m_plans.removeLast();
453 }
454 }
455
456 // These methods convert between doubles, and doubles boxed and JSValues.
457#if USE(JSVALUE64)
458 GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
459 {
460 return m_jit.boxDouble(fpr, gpr);
461 }
462 FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
463 {
464 return m_jit.unboxDouble(gpr, resultGPR, fpr);
465 }
466 GPRReg boxDouble(FPRReg fpr)
467 {
468 return boxDouble(fpr, allocate());
469 }
470
471 void boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat);
472#elif USE(JSVALUE32_64)
473 void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
474 {
475 m_jit.boxDouble(fpr, tagGPR, payloadGPR);
476 }
477 void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
478 {
479 m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR);
480 }
481#endif
482 void boxDouble(FPRReg fpr, JSValueRegs regs)
483 {
484 m_jit.boxDouble(fpr, regs);
485 }
486
487 // Spill a VirtualRegister to the JSStack.
488 void spill(VirtualRegister spillMe)
489 {
490 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
491
492#if USE(JSVALUE32_64)
493 if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here
494 return;
495#endif
496 // Check the GenerationInfo to see if this value need writing
497 // to the JSStack - if not, mark it as spilled & return.
498 if (!info.needsSpill()) {
499 info.setSpilled(*m_stream, spillMe);
500 return;
501 }
502
503 DataFormat spillFormat = info.registerFormat();
504 switch (spillFormat) {
505 case DataFormatStorage: {
506 // This is special, since it's not a JS value - as in it's not visible to JS
507 // code.
508 m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
509 info.spill(*m_stream, spillMe, DataFormatStorage);
510 return;
511 }
512
513 case DataFormatInt32: {
514 m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
515 info.spill(*m_stream, spillMe, DataFormatInt32);
516 return;
517 }
518
519#if USE(JSVALUE64)
520 case DataFormatDouble: {
521 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
522 info.spill(*m_stream, spillMe, DataFormatDouble);
523 return;
524 }
525
526 case DataFormatInt52:
527 case DataFormatStrictInt52: {
528 m_jit.store64(info.gpr(), JITCompiler::addressFor(spillMe));
529 info.spill(*m_stream, spillMe, spillFormat);
530 return;
531 }
532
533 default:
534 // The following code handles JSValues, int32s, and cells.
535 RELEASE_ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
536
537 GPRReg reg = info.gpr();
538 // We need to box int32 and cell values ...
539 // but on JSVALUE64 boxing a cell is a no-op!
540 if (spillFormat == DataFormatInt32)
541 m_jit.or64(GPRInfo::numberTagRegister, reg);
542
543 // Spill the value, and record it as spilled in its boxed form.
544 m_jit.store64(reg, JITCompiler::addressFor(spillMe));
545 info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS));
546 return;
547#elif USE(JSVALUE32_64)
548 case DataFormatCell:
549 case DataFormatBoolean: {
550 m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
551 info.spill(*m_stream, spillMe, spillFormat);
552 return;
553 }
554
555 case DataFormatDouble: {
556 // On JSVALUE32_64 boxing a double is a no-op.
557 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
558 info.spill(*m_stream, spillMe, DataFormatDouble);
559 return;
560 }
561
562 default:
563 // The following code handles JSValues.
564 RELEASE_ASSERT(spillFormat & DataFormatJS);
565 m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
566 m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
567 info.spill(*m_stream, spillMe, spillFormat);
568 return;
569#endif
570 }
571 }
572
573 bool isKnownInteger(Node* node) { return m_state.forNode(node).isType(SpecInt32Only); }
574 bool isKnownCell(Node* node) { return m_state.forNode(node).isType(SpecCell); }
575
576 bool isKnownNotInteger(Node* node) { return !(m_state.forNode(node).m_type & SpecInt32Only); }
577 bool isKnownNotNumber(Node* node) { return !(m_state.forNode(node).m_type & SpecFullNumber); }
578 bool isKnownNotCell(Node* node) { return !(m_state.forNode(node).m_type & SpecCell); }
579 bool isKnownNotOther(Node* node) { return !(m_state.forNode(node).m_type & SpecOther); }
580
581 bool canBeRope(Edge&);
582
583 UniquedStringImpl* identifierUID(unsigned index)
584 {
585 return m_jit.graph().identifiers()[index];
586 }
587
588 // Spill all VirtualRegisters back to the JSStack.
589 void flushRegisters()
590 {
591 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
592 if (iter.name().isValid()) {
593 spill(iter.name());
594 iter.release();
595 }
596 }
597 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
598 if (iter.name().isValid()) {
599 spill(iter.name());
600 iter.release();
601 }
602 }
603 }
604
605 // Used to ASSERT flushRegisters() has been called prior to
606 // calling out from JIT code to a C helper function.
607 bool isFlushed()
608 {
609 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
610 if (iter.name().isValid())
611 return false;
612 }
613 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
614 if (iter.name().isValid())
615 return false;
616 }
617 return true;
618 }
619
620#if USE(JSVALUE64)
621 static MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node)
622 {
623 return MacroAssembler::Imm64(JSValue::encode(node->asJSValue()));
624 }
625#endif
626
627 // Helper functions to enable code sharing in implementations of bit/shift ops.
628 void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result)
629 {
630 switch (op) {
631 case ArithBitAnd:
632 m_jit.and32(Imm32(imm), op1, result);
633 break;
634 case ArithBitOr:
635 m_jit.or32(Imm32(imm), op1, result);
636 break;
637 case ArithBitXor:
638 m_jit.xor32(Imm32(imm), op1, result);
639 break;
640 default:
641 RELEASE_ASSERT_NOT_REACHED();
642 }
643 }
644 void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result)
645 {
646 switch (op) {
647 case ArithBitAnd:
648 m_jit.and32(op1, op2, result);
649 break;
650 case ArithBitOr:
651 m_jit.or32(op1, op2, result);
652 break;
653 case ArithBitXor:
654 m_jit.xor32(op1, op2, result);
655 break;
656 default:
657 RELEASE_ASSERT_NOT_REACHED();
658 }
659 }
660 void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result)
661 {
662 switch (op) {
663 case ArithBitRShift:
664 m_jit.rshift32(op1, Imm32(shiftAmount), result);
665 break;
666 case ArithBitLShift:
667 m_jit.lshift32(op1, Imm32(shiftAmount), result);
668 break;
669 case BitURShift:
670 m_jit.urshift32(op1, Imm32(shiftAmount), result);
671 break;
672 default:
673 RELEASE_ASSERT_NOT_REACHED();
674 }
675 }
676 void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result)
677 {
678 switch (op) {
679 case ArithBitRShift:
680 m_jit.rshift32(op1, shiftAmount, result);
681 break;
682 case ArithBitLShift:
683 m_jit.lshift32(op1, shiftAmount, result);
684 break;
685 case BitURShift:
686 m_jit.urshift32(op1, shiftAmount, result);
687 break;
688 default:
689 RELEASE_ASSERT_NOT_REACHED();
690 }
691 }
692
693 // Returns the index of the branch node if peephole is okay, UINT_MAX otherwise.
694 unsigned detectPeepHoleBranch()
695 {
696 // Check that no intervening nodes will be generated.
697 for (unsigned index = m_indexInBlock + 1; index < m_block->size() - 1; ++index) {
698 Node* node = m_block->at(index);
699 if (!node->shouldGenerate())
700 continue;
701 // Check if it's a Phantom that can be safely ignored.
702 if (node->op() == Phantom && !node->child1())
703 continue;
704 return UINT_MAX;
705 }
706
707 // Check if the lastNode is a branch on this node.
708 Node* lastNode = m_block->terminal();
709 return lastNode->op() == Branch && lastNode->child1() == m_currentNode ? m_block->size() - 1 : UINT_MAX;
710 }
711
712 void compileCheckTraps(Node*);
713
714 void compileMovHint(Node*);
715 void compileMovHintAndCheck(Node*);
716
717 void cachedGetById(CodeOrigin, JSValueRegs base, JSValueRegs result, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType);
718 void cachedPutById(CodeOrigin, GPRReg baseGPR, JSValueRegs valueRegs, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
719 void cachedGetByVal(CodeOrigin, JSValueRegs base, JSValueRegs property, JSValueRegs result, JITCompiler::Jump slowPathTarget);
720
721#if USE(JSVALUE64)
722 void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType);
723 void cachedGetByIdWithThis(CodeOrigin, GPRReg baseGPR, GPRReg thisGPR, GPRReg resultGPR, unsigned identifierNumber, const JITCompiler::JumpList& slowPathTarget = JITCompiler::JumpList());
724#elif USE(JSVALUE32_64)
725 void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType);
726 void cachedGetByIdWithThis(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg thisTagGPROrNone, GPRReg thisPayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, const JITCompiler::JumpList& slowPathTarget = JITCompiler::JumpList());
727#endif
728
729 void compileDeleteById(Node*);
730 void compileDeleteByVal(Node*);
731 void compilePushWithScope(Node*);
732 void compileGetById(Node*, AccessType);
733 void compileGetByIdFlush(Node*, AccessType);
734 void compileInById(Node*);
735 void compileInByVal(Node*);
736
737 void nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand);
738 void nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode);
739
740 void nonSpeculativePeepholeBranch(Node*, Node* branchNode, MacroAssembler::RelationalCondition, S_JITOperation_GJJ helperFunction);
741 void nonSpeculativeNonPeepholeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_GJJ helperFunction);
742
743 void nonSpeculativePeepholeStrictEq(Node*, Node* branchNode, bool invert = false);
744 void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false);
745 bool nonSpeculativeStrictEq(Node*, bool invert = false);
746
747 void compileInstanceOfForCells(Node*, JSValueRegs valueGPR, JSValueRegs prototypeGPR, GPRReg resultGPT, GPRReg scratchGPR, GPRReg scratch2GPR, JITCompiler::Jump slowCase = JITCompiler::Jump());
748 void compileInstanceOf(Node*);
749 void compileInstanceOfCustom(Node*);
750 void compileOverridesHasInstance(Node*);
751
752 void compileIsCellWithType(Node*);
753 void compileIsTypedArrayView(Node*);
754
755 void emitCall(Node*);
756
757 void emitAllocateButterfly(GPRReg storageGPR, GPRReg sizeGPR, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, MacroAssembler::JumpList& slowCases);
758 void emitInitializeButterfly(GPRReg storageGPR, GPRReg sizeGPR, JSValueRegs emptyValueRegs, GPRReg scratchGPR);
759 void compileAllocateNewArrayWithSize(JSGlobalObject*, GPRReg resultGPR, GPRReg sizeGPR, IndexingType, bool shouldConvertLargeSizeToArrayStorage = true);
760
761 // Called once a node has completed code generation but prior to setting
762 // its result, to free up its children. (This must happen prior to setting
763 // the nodes result, since the node may have the same VirtualRegister as
764 // a child, and as such will use the same GeneratioInfo).
765 void useChildren(Node*);
766
767 // These method called to initialize the GenerationInfo
768 // to describe the result of an operation.
769 void int32Result(GPRReg reg, Node* node, DataFormat format = DataFormatInt32, UseChildrenMode mode = CallUseChildren)
770 {
771 if (mode == CallUseChildren)
772 useChildren(node);
773
774 VirtualRegister virtualRegister = node->virtualRegister();
775 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
776
777 if (format == DataFormatInt32) {
778 m_jit.jitAssertIsInt32(reg);
779 m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
780 info.initInt32(node, node->refCount(), reg);
781 } else {
782#if USE(JSVALUE64)
783 RELEASE_ASSERT(format == DataFormatJSInt32);
784 m_jit.jitAssertIsJSInt32(reg);
785 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
786 info.initJSValue(node, node->refCount(), reg, format);
787#elif USE(JSVALUE32_64)
788 RELEASE_ASSERT_NOT_REACHED();
789#endif
790 }
791 }
792 void int32Result(GPRReg reg, Node* node, UseChildrenMode mode)
793 {
794 int32Result(reg, node, DataFormatInt32, mode);
795 }
796 void int52Result(GPRReg reg, Node* node, DataFormat format, UseChildrenMode mode = CallUseChildren)
797 {
798 if (mode == CallUseChildren)
799 useChildren(node);
800
801 VirtualRegister virtualRegister = node->virtualRegister();
802 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
803
804 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
805 info.initInt52(node, node->refCount(), reg, format);
806 }
807 void int52Result(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
808 {
809 int52Result(reg, node, DataFormatInt52, mode);
810 }
811 void strictInt52Result(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
812 {
813 int52Result(reg, node, DataFormatStrictInt52, mode);
814 }
815 void noResult(Node* node, UseChildrenMode mode = CallUseChildren)
816 {
817 if (mode == UseChildrenCalledExplicitly)
818 return;
819 useChildren(node);
820 }
821 void cellResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
822 {
823 if (mode == CallUseChildren)
824 useChildren(node);
825
826 VirtualRegister virtualRegister = node->virtualRegister();
827 m_gprs.retain(reg, virtualRegister, SpillOrderCell);
828 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
829 info.initCell(node, node->refCount(), reg);
830 }
831 void blessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
832 {
833#if USE(JSVALUE64)
834 jsValueResult(reg, node, DataFormatJSBoolean, mode);
835#else
836 booleanResult(reg, node, mode);
837#endif
838 }
839 void unblessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
840 {
841#if USE(JSVALUE64)
842 blessBoolean(reg);
843#endif
844 blessedBooleanResult(reg, node, mode);
845 }
846#if USE(JSVALUE64)
847 void jsValueResult(GPRReg reg, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
848 {
849 if (format == DataFormatJSInt32)
850 m_jit.jitAssertIsJSInt32(reg);
851
852 if (mode == CallUseChildren)
853 useChildren(node);
854
855 VirtualRegister virtualRegister = node->virtualRegister();
856 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
857 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
858 info.initJSValue(node, node->refCount(), reg, format);
859 }
860 void jsValueResult(GPRReg reg, Node* node, UseChildrenMode mode)
861 {
862 jsValueResult(reg, node, DataFormatJS, mode);
863 }
864#elif USE(JSVALUE32_64)
865 void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
866 {
867 if (mode == CallUseChildren)
868 useChildren(node);
869
870 VirtualRegister virtualRegister = node->virtualRegister();
871 m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
872 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
873 info.initBoolean(node, node->refCount(), reg);
874 }
875 void jsValueResult(GPRReg tag, GPRReg payload, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
876 {
877 if (mode == CallUseChildren)
878 useChildren(node);
879
880 VirtualRegister virtualRegister = node->virtualRegister();
881 m_gprs.retain(tag, virtualRegister, SpillOrderJS);
882 m_gprs.retain(payload, virtualRegister, SpillOrderJS);
883 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
884 info.initJSValue(node, node->refCount(), tag, payload, format);
885 }
886 void jsValueResult(GPRReg tag, GPRReg payload, Node* node, UseChildrenMode mode)
887 {
888 jsValueResult(tag, payload, node, DataFormatJS, mode);
889 }
890#endif
891 void jsValueResult(JSValueRegs regs, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
892 {
893#if USE(JSVALUE64)
894 jsValueResult(regs.gpr(), node, format, mode);
895#else
896 jsValueResult(regs.tagGPR(), regs.payloadGPR(), node, format, mode);
897#endif
898 }
899 void storageResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
900 {
901 if (mode == CallUseChildren)
902 useChildren(node);
903
904 VirtualRegister virtualRegister = node->virtualRegister();
905 m_gprs.retain(reg, virtualRegister, SpillOrderStorage);
906 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
907 info.initStorage(node, node->refCount(), reg);
908 }
909 void doubleResult(FPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
910 {
911 if (mode == CallUseChildren)
912 useChildren(node);
913
914 VirtualRegister virtualRegister = node->virtualRegister();
915 m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
916 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
917 info.initDouble(node, node->refCount(), reg);
918 }
919 void initConstantInfo(Node* node)
920 {
921 ASSERT(node->hasConstant());
922 generationInfo(node).initConstant(node, node->refCount());
923 }
924
925#define FIRST_ARGUMENT_TYPE typename FunctionTraits<OperationType>::template ArgumentType<0>
926
927 template<typename OperationType, typename ResultRegType, typename... Args>
928 std::enable_if_t<
929 FunctionTraits<OperationType>::hasResult,
930 JITCompiler::Call>
931 callOperation(OperationType operation, ResultRegType result, Args... args)
932 {
933 m_jit.setupArguments<OperationType>(args...);
934 return appendCallSetResult(operation, result);
935 }
936
937 template<typename OperationType, typename Arg, typename... Args>
938 std::enable_if_t<
939 !FunctionTraits<OperationType>::hasResult
940 && !std::is_same<Arg, NoResultTag>::value,
941 JITCompiler::Call>
942 callOperation(OperationType operation, Arg arg, Args... args)
943 {
944 m_jit.setupArguments<OperationType>(arg, args...);
945 return appendCall(operation);
946 }
947
948 template<typename OperationType, typename... Args>
949 std::enable_if_t<
950 !FunctionTraits<OperationType>::hasResult,
951 JITCompiler::Call>
952 callOperation(OperationType operation, NoResultTag, Args... args)
953 {
954 m_jit.setupArguments<OperationType>(args...);
955 return appendCall(operation);
956 }
957
958 template<typename OperationType>
959 std::enable_if_t<
960 !FunctionTraits<OperationType>::hasResult,
961 JITCompiler::Call>
962 callOperation(OperationType operation)
963 {
964 m_jit.setupArguments<OperationType>();
965 return appendCall(operation);
966 }
967
968#undef FIRST_ARGUMENT_TYPE
969
970 JITCompiler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_Cb operation, CodeBlock* codeBlock)
971 {
972 // Do not register CodeBlock* as a weak-pointer.
973 m_jit.setupArguments<V_JITOperation_Cb>(TrustedImmPtr(static_cast<void*>(codeBlock)));
974 return appendCallWithCallFrameRollbackOnException(operation);
975 }
976
977 JITCompiler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_G operation, GPRReg result, JSGlobalObject* globalObject)
978 {
979 m_jit.setupArguments<Z_JITOperation_G>(TrustedImmPtr::weakPointer(m_graph, globalObject));
980 return appendCallWithCallFrameRollbackOnExceptionSetResult(operation, result);
981 }
982
983 void prepareForExternalCall()
984 {
985#if !defined(NDEBUG) && !CPU(ARM_THUMB2) && !CPU(MIPS)
986 // We're about to call out to a "native" helper function. The helper
987 // function is expected to set topCallFrame itself with the CallFrame
988 // that is passed to it.
989 //
990 // We explicitly trash topCallFrame here so that we'll know if some of
991 // the helper functions are not setting topCallFrame when they should
992 // be doing so. Note: the previous value in topcallFrame was not valid
993 // anyway since it was not being updated by JIT'ed code by design.
994
995 for (unsigned i = 0; i < sizeof(void*) / 4; i++)
996 m_jit.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&vm().topCallFrame) + i * 4);
997#endif
998 m_jit.prepareCallOperation(vm());
999 }
1000
1001 // These methods add call instructions, optionally setting results, and optionally rolling back the call frame on an exception.
1002 JITCompiler::Call appendCall(const FunctionPtr<CFunctionPtrTag> function)
1003 {
1004 prepareForExternalCall();
1005 m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
1006 return m_jit.appendCall(function);
1007 }
1008
1009 JITCompiler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr<CFunctionPtrTag> function)
1010 {
1011 JITCompiler::Call call = appendCall(function);
1012 m_jit.exceptionCheckWithCallFrameRollback();
1013 return call;
1014 }
1015
1016 JITCompiler::Call appendCallWithCallFrameRollbackOnExceptionSetResult(const FunctionPtr<CFunctionPtrTag> function, GPRReg result)
1017 {
1018 JITCompiler::Call call = appendCallWithCallFrameRollbackOnException(function);
1019 if ((result != InvalidGPRReg) && (result != GPRInfo::returnValueGPR))
1020 m_jit.move(GPRInfo::returnValueGPR, result);
1021 return call;
1022 }
1023
1024 JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, GPRReg result)
1025 {
1026 JITCompiler::Call call = appendCall(function);
1027 if (result != InvalidGPRReg)
1028 m_jit.move(GPRInfo::returnValueGPR, result);
1029 return call;
1030 }
1031
1032 JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, GPRReg result1, GPRReg result2)
1033 {
1034 JITCompiler::Call call = appendCall(function);
1035 m_jit.setupResults(result1, result2);
1036 return call;
1037 }
1038
1039 JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, JSValueRegs resultRegs)
1040 {
1041#if USE(JSVALUE64)
1042 return appendCallSetResult(function, resultRegs.gpr());
1043#else
1044 return appendCallSetResult(function, resultRegs.payloadGPR(), resultRegs.tagGPR());
1045#endif
1046 }
1047
1048#if CPU(ARM_THUMB2) && !CPU(ARM_HARDFP)
1049 JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, FPRReg result)
1050 {
1051 JITCompiler::Call call = appendCall(function);
1052 if (result != InvalidFPRReg)
1053 m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
1054 return call;
1055 }
1056#else // CPU(X86_64) || (CPU(ARM_THUMB2) && CPU(ARM_HARDFP)) || CPU(ARM64) || CPU(MIPS)
1057 JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, FPRReg result)
1058 {
1059 JITCompiler::Call call = appendCall(function);
1060 if (result != InvalidFPRReg)
1061 m_jit.moveDouble(FPRInfo::returnValueFPR, result);
1062 return call;
1063 }
1064#endif
1065
1066 void branchDouble(JITCompiler::DoubleCondition cond, FPRReg left, FPRReg right, BasicBlock* destination)
1067 {
1068 return addBranch(m_jit.branchDouble(cond, left, right), destination);
1069 }
1070
1071 void branchDoubleNonZero(FPRReg value, FPRReg scratch, BasicBlock* destination)
1072 {
1073 return addBranch(m_jit.branchDoubleNonZero(value, scratch), destination);
1074 }
1075
1076 template<typename T, typename U>
1077 void branch32(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
1078 {
1079 return addBranch(m_jit.branch32(cond, left, right), destination);
1080 }
1081
1082 template<typename T, typename U>
1083 void branchTest32(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
1084 {
1085 return addBranch(m_jit.branchTest32(cond, value, mask), destination);
1086 }
1087
1088 template<typename T>
1089 void branchTest32(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
1090 {
1091 return addBranch(m_jit.branchTest32(cond, value), destination);
1092 }
1093
1094#if USE(JSVALUE64)
1095 template<typename T, typename U>
1096 void branch64(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
1097 {
1098 return addBranch(m_jit.branch64(cond, left, right), destination);
1099 }
1100#endif
1101
1102 template<typename T, typename U>
1103 void branch8(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
1104 {
1105 return addBranch(m_jit.branch8(cond, left, right), destination);
1106 }
1107
1108 template<typename T, typename U>
1109 void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
1110 {
1111 return addBranch(m_jit.branchPtr(cond, left, right), destination);
1112 }
1113
1114 template<typename T, typename U>
1115 void branchTestPtr(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
1116 {
1117 return addBranch(m_jit.branchTestPtr(cond, value, mask), destination);
1118 }
1119
1120 template<typename T>
1121 void branchTestPtr(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
1122 {
1123 return addBranch(m_jit.branchTestPtr(cond, value), destination);
1124 }
1125
1126 template<typename T, typename U>
1127 void branchTest8(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
1128 {
1129 return addBranch(m_jit.branchTest8(cond, value, mask), destination);
1130 }
1131
1132 template<typename T>
1133 void branchTest8(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
1134 {
1135 return addBranch(m_jit.branchTest8(cond, value), destination);
1136 }
1137
1138 enum FallThroughMode {
1139 AtFallThroughPoint,
1140 ForceJump
1141 };
1142 void jump(BasicBlock* destination, FallThroughMode fallThroughMode = AtFallThroughPoint)
1143 {
1144 if (destination == nextBlock()
1145 && fallThroughMode == AtFallThroughPoint)
1146 return;
1147 addBranch(m_jit.jump(), destination);
1148 }
1149
1150 void addBranch(const MacroAssembler::Jump& jump, BasicBlock* destination)
1151 {
1152 m_branches.append(BranchRecord(jump, destination));
1153 }
1154 void addBranch(const MacroAssembler::JumpList& jump, BasicBlock* destination);
1155
1156 void linkBranches();
1157
1158 void dump(const char* label = 0);
1159
1160 bool betterUseStrictInt52(Node* node)
1161 {
1162 return !generationInfo(node).isInt52();
1163 }
1164 bool betterUseStrictInt52(Edge edge)
1165 {
1166 return betterUseStrictInt52(edge.node());
1167 }
1168
1169 bool compare(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_JITOperation_GJJ);
1170 void compileCompareUnsigned(Node*, MacroAssembler::RelationalCondition);
1171 bool compilePeepHoleBranch(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_JITOperation_GJJ);
1172 void compilePeepHoleInt32Branch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
1173 void compilePeepHoleInt52Branch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
1174 void compilePeepHoleBooleanBranch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
1175 void compilePeepHoleDoubleBranch(Node*, Node* branchNode, JITCompiler::DoubleCondition);
1176 void compilePeepHoleObjectEquality(Node*, Node* branchNode);
1177 void compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode);
1178 void compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode);
1179 void compileObjectEquality(Node*);
1180 void compileObjectStrictEquality(Edge objectChild, Edge otherChild);
1181 void compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild);
1182 void compileObjectOrOtherLogicalNot(Edge value);
1183 void compileLogicalNot(Node*);
1184 void compileLogicalNotStringOrOther(Node*);
1185 void compileStringEquality(
1186 Node*, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR,
1187 GPRReg leftTempGPR, GPRReg rightTempGPR, GPRReg leftTemp2GPR,
1188 GPRReg rightTemp2GPR, const JITCompiler::JumpList& fastTrue,
1189 const JITCompiler::JumpList& fastSlow);
1190 void compileStringEquality(Node*);
1191 void compileStringIdentEquality(Node*);
1192 void compileStringToUntypedEquality(Node*, Edge stringEdge, Edge untypedEdge);
1193 void compileStringIdentToNotStringVarEquality(Node*, Edge stringEdge, Edge notStringVarEdge);
1194 void compileStringZeroLength(Node*);
1195 void compileMiscStrictEq(Node*);
1196
1197 void compileSymbolEquality(Node*);
1198 void compileBigIntEquality(Node*);
1199 void compilePeepHoleSymbolEquality(Node*, Node* branchNode);
1200 void compileSymbolUntypedEquality(Node*, Edge symbolEdge, Edge untypedEdge);
1201
1202 void emitObjectOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken);
1203 void emitStringBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken);
1204 void emitStringOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken);
1205 void emitBranch(Node*);
1206
1207 struct StringSwitchCase {
1208 StringSwitchCase() { }
1209
1210 StringSwitchCase(StringImpl* string, BasicBlock* target)
1211 : string(string)
1212 , target(target)
1213 {
1214 }
1215
1216 bool operator<(const StringSwitchCase& other) const
1217 {
1218 return stringLessThan(*string, *other.string);
1219 }
1220
1221 StringImpl* string;
1222 BasicBlock* target;
1223 };
1224
1225 void emitSwitchIntJump(SwitchData*, GPRReg value, GPRReg scratch);
1226 void emitSwitchImm(Node*, SwitchData*);
1227 void emitSwitchCharStringJump(Node*, SwitchData*, GPRReg value, GPRReg scratch);
1228 void emitSwitchChar(Node*, SwitchData*);
1229 void emitBinarySwitchStringRecurse(
1230 SwitchData*, const Vector<StringSwitchCase>&, unsigned numChecked,
1231 unsigned begin, unsigned end, GPRReg buffer, GPRReg length, GPRReg temp,
1232 unsigned alreadyCheckedLength, bool checkedExactLength);
1233 void emitSwitchStringOnString(Node*, SwitchData*, GPRReg string);
1234 void emitSwitchString(Node*, SwitchData*);
1235 void emitSwitch(Node*);
1236
1237 void compileToStringOrCallStringConstructorOrStringValueOf(Node*);
1238 void compileNumberToStringWithRadix(Node*);
1239 void compileNumberToStringWithValidRadixConstant(Node*);
1240 void compileNumberToStringWithValidRadixConstant(Node*, int32_t radix);
1241 void compileNewStringObject(Node*);
1242 void compileNewSymbol(Node*);
1243
1244 void compileNewTypedArrayWithSize(Node*);
1245
1246 void compileInt32Compare(Node*, MacroAssembler::RelationalCondition);
1247 void compileInt52Compare(Node*, MacroAssembler::RelationalCondition);
1248 void compileBooleanCompare(Node*, MacroAssembler::RelationalCondition);
1249 void compileDoubleCompare(Node*, MacroAssembler::DoubleCondition);
1250 void compileStringCompare(Node*, MacroAssembler::RelationalCondition);
1251 void compileStringIdentCompare(Node*, MacroAssembler::RelationalCondition);
1252
1253 bool compileStrictEq(Node*);
1254
1255 void compileSameValue(Node*);
1256
1257 void compileAllocatePropertyStorage(Node*);
1258 void compileReallocatePropertyStorage(Node*);
1259 void compileNukeStructureAndSetButterfly(Node*);
1260 void compileGetButterfly(Node*);
1261 void compileCallDOMGetter(Node*);
1262 void compileCallDOM(Node*);
1263 void compileCheckSubClass(Node*);
1264 void compileNormalizeMapKey(Node*);
1265 void compileGetMapBucketHead(Node*);
1266 void compileGetMapBucketNext(Node*);
1267 void compileSetAdd(Node*);
1268 void compileMapSet(Node*);
1269 void compileWeakMapGet(Node*);
1270 void compileWeakSetAdd(Node*);
1271 void compileWeakMapSet(Node*);
1272 void compileLoadKeyFromMapBucket(Node*);
1273 void compileLoadValueFromMapBucket(Node*);
1274 void compileExtractValueFromWeakMapGet(Node*);
1275 void compileGetPrototypeOf(Node*);
1276 void compileIdentity(Node*);
1277
1278#if USE(JSVALUE32_64)
1279 template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType>
1280 void compileContiguousPutByVal(Node*, BaseOperandType&, PropertyOperandType&, ValueOperandType&, GPRReg valuePayloadReg, TagType valueTag);
1281#endif
1282 void compileDoublePutByVal(Node*, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property);
1283 bool putByValWillNeedExtraRegister(ArrayMode arrayMode)
1284 {
1285 return arrayMode.mayStoreToHole();
1286 }
1287 GPRReg temporaryRegisterForPutByVal(GPRTemporary&, ArrayMode);
1288 GPRReg temporaryRegisterForPutByVal(GPRTemporary& temporary, Node* node)
1289 {
1290 return temporaryRegisterForPutByVal(temporary, node->arrayMode());
1291 }
1292
1293 void compileGetCharCodeAt(Node*);
1294 void compileGetByValOnString(Node*);
1295 void compileFromCharCode(Node*);
1296
1297 void compileGetByValOnDirectArguments(Node*);
1298 void compileGetByValOnScopedArguments(Node*);
1299
1300 void compileGetScope(Node*);
1301 void compileSkipScope(Node*);
1302 void compileGetGlobalObject(Node*);
1303 void compileGetGlobalThis(Node*);
1304
1305 void compileGetArrayLength(Node*);
1306
1307 void compileCheckTypeInfoFlags(Node*);
1308 void compileCheckIdent(Node*);
1309
1310 void compileParseInt(Node*);
1311
1312 void compileValueRep(Node*);
1313 void compileDoubleRep(Node*);
1314
1315 void compileValueToInt32(Node*);
1316 void compileUInt32ToNumber(Node*);
1317 void compileDoubleAsInt32(Node*);
1318
1319 void compileValueBitNot(Node*);
1320 void compileBitwiseNot(Node*);
1321
1322 template<typename SnippetGenerator, J_JITOperation_GJJ slowPathFunction>
1323 void emitUntypedBitOp(Node*);
1324 void compileBitwiseOp(Node*);
1325 void compileValueBitwiseOp(Node*);
1326
1327 void emitUntypedRightShiftBitOp(Node*);
1328 void compileValueLShiftOp(Node*);
1329 void compileValueBitRShift(Node*);
1330 void compileShiftOp(Node*);
1331
1332 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
1333 void compileMathIC(Node*, JITBinaryMathIC<Generator>*, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction, NonRepatchingFunction);
1334 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
1335 void compileMathIC(Node*, JITUnaryMathIC<Generator>*, bool needsScratchGPRReg, RepatchingFunction, NonRepatchingFunction);
1336
1337 void compileArithDoubleUnaryOp(Node*, double (*doubleFunction)(double), double (*operation)(JSGlobalObject*, EncodedJSValue));
1338 void compileValueAdd(Node*);
1339 void compileValueSub(Node*);
1340 void compileArithAdd(Node*);
1341 void compileMakeRope(Node*);
1342 void compileArithAbs(Node*);
1343 void compileArithClz32(Node*);
1344 void compileArithSub(Node*);
1345 void compileIncOrDec(Node*);
1346 void compileValueNegate(Node*);
1347 void compileArithNegate(Node*);
1348 void compileValueMul(Node*);
1349 void compileArithMul(Node*);
1350 void compileValueDiv(Node*);
1351 void compileArithDiv(Node*);
1352 void compileArithFRound(Node*);
1353 void compileValueMod(Node*);
1354 void compileArithMod(Node*);
1355 void compileArithPow(Node*);
1356 void compileValuePow(Node*);
1357 void compileArithRounding(Node*);
1358 void compileArithRandom(Node*);
1359 void compileArithUnary(Node*);
1360 void compileArithSqrt(Node*);
1361 void compileArithMinMax(Node*);
1362 void compileConstantStoragePointer(Node*);
1363 void compileGetIndexedPropertyStorage(Node*);
1364 JITCompiler::Jump jumpForTypedArrayOutOfBounds(Node*, GPRReg baseGPR, GPRReg indexGPR);
1365 JITCompiler::Jump jumpForTypedArrayIsNeuteredIfOutOfBounds(Node*, GPRReg baseGPR, JITCompiler::Jump outOfBounds);
1366 void emitTypedArrayBoundsCheck(Node*, GPRReg baseGPR, GPRReg indexGPR);
1367 void compileGetTypedArrayByteOffset(Node*);
1368 void compileGetByValOnIntTypedArray(Node*, TypedArrayType);
1369 void compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType);
1370 void compileGetByValOnFloatTypedArray(Node*, TypedArrayType);
1371 void compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType);
1372 void compileGetByValForObjectWithString(Node*);
1373 void compileGetByValForObjectWithSymbol(Node*);
1374 void compilePutByValForCellWithString(Node*, Edge& child1, Edge& child2, Edge& child3);
1375 void compilePutByValForCellWithSymbol(Node*, Edge& child1, Edge& child2, Edge& child3);
1376 void compileGetByValWithThis(Node*);
1377 void compileGetByOffset(Node*);
1378 void compilePutByOffset(Node*);
1379 void compileMatchStructure(Node*);
1380 // If this returns false it means that we terminated speculative execution.
1381 bool getIntTypedArrayStoreOperand(
1382 GPRTemporary& value,
1383 GPRReg property,
1384#if USE(JSVALUE32_64)
1385 GPRTemporary& propertyTag,
1386 GPRTemporary& valueTag,
1387#endif
1388 Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped = false);
1389 void loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType);
1390 void setIntTypedArrayLoadResult(Node*, GPRReg resultReg, TypedArrayType, bool canSpeculate = false);
1391 template <typename ClassType> void compileNewFunctionCommon(GPRReg, RegisteredStructure, GPRReg, GPRReg, GPRReg, MacroAssembler::JumpList&, size_t, FunctionExecutable*);
1392 void compileNewFunction(Node*);
1393 void compileSetFunctionName(Node*);
1394 void compileNewRegexp(Node*);
1395 void compileForwardVarargs(Node*);
1396 void compileLoadVarargs(Node*);
1397 void compileCreateActivation(Node*);
1398 void compileCreateDirectArguments(Node*);
1399 void compileGetFromArguments(Node*);
1400 void compilePutToArguments(Node*);
1401 void compileGetArgument(Node*);
1402 void compileCreateScopedArguments(Node*);
1403 void compileCreateClonedArguments(Node*);
1404 void compileCreateRest(Node*);
1405 void compileSpread(Node*);
1406 void compileNewArray(Node*);
1407 void compileNewArrayWithSpread(Node*);
1408 void compileGetRestLength(Node*);
1409 void compileArraySlice(Node*);
1410 void compileArrayIndexOf(Node*);
1411 void compileArrayPush(Node*);
1412 void compileNotifyWrite(Node*);
1413 void compileRegExpExec(Node*);
1414 void compileRegExpExecNonGlobalOrSticky(Node*);
1415 void compileRegExpMatchFast(Node*);
1416 void compileRegExpMatchFastGlobal(Node*);
1417 void compileRegExpTest(Node*);
1418 void compileStringReplace(Node*);
1419 void compileIsObject(Node*);
1420 void compileIsObjectOrNull(Node*);
1421 void compileIsFunction(Node*);
1422 void compileTypeOf(Node*);
1423 void compileCheckCell(Node*);
1424 void compileCheckNotEmpty(Node*);
1425 void compileCheckStructure(Node*);
1426 void emitStructureCheck(Node*, GPRReg cellGPR, GPRReg tempGPR);
1427 void compilePutAccessorById(Node*);
1428 void compilePutGetterSetterById(Node*);
1429 void compilePutAccessorByVal(Node*);
1430 void compileGetRegExpObjectLastIndex(Node*);
1431 void compileSetRegExpObjectLastIndex(Node*);
1432 void compileLazyJSConstant(Node*);
1433 void compileMaterializeNewObject(Node*);
1434 void compileRecordRegExpCachedResult(Node*);
1435 void compileToObjectOrCallObjectConstructor(Node*);
1436 void compileResolveScope(Node*);
1437 void compileResolveScopeForHoistingFuncDeclInEval(Node*);
1438 void compileGetGlobalVariable(Node*);
1439 void compilePutGlobalVariable(Node*);
1440 void compileGetDynamicVar(Node*);
1441 void compilePutDynamicVar(Node*);
1442 void compileGetClosureVar(Node*);
1443 void compilePutClosureVar(Node*);
1444 void compileGetInternalField(Node*);
1445 void compilePutInternalField(Node*);
1446 void compileCompareEqPtr(Node*);
1447 void compileDefineDataProperty(Node*);
1448 void compileDefineAccessorProperty(Node*);
1449 void compileStringSlice(Node*);
1450 void compileToLowerCase(Node*);
1451 void compileThrow(Node*);
1452 void compileThrowStaticError(Node*);
1453 void compileGetEnumerableLength(Node*);
1454 void compileHasGenericProperty(Node*);
1455 void compileToIndexString(Node*);
1456 void compilePutByIdFlush(Node*);
1457 void compilePutById(Node*);
1458 void compilePutByIdDirect(Node*);
1459 void compilePutByIdWithThis(Node*);
1460 void compileHasStructureProperty(Node*);
1461 void compileGetDirectPname(Node*);
1462 void compileGetPropertyEnumerator(Node*);
1463 void compileGetEnumeratorPname(Node*);
1464 void compileGetExecutable(Node*);
1465 void compileGetGetter(Node*);
1466 void compileGetSetter(Node*);
1467 void compileGetCallee(Node*);
1468 void compileSetCallee(Node*);
1469 void compileGetArgumentCountIncludingThis(Node*);
1470 void compileSetArgumentCountIncludingThis(Node*);
1471 void compileStrCat(Node*);
1472 void compileNewArrayBuffer(Node*);
1473 void compileNewArrayWithSize(Node*);
1474 void compileNewTypedArray(Node*);
1475 void compileToThis(Node*);
1476 void compileObjectKeys(Node*);
1477 void compileObjectCreate(Node*);
1478 void compileCreateThis(Node*);
1479 void compileCreatePromise(Node*);
1480 void compileCreateGenerator(Node*);
1481 void compileCreateAsyncGenerator(Node*);
1482 void compileNewObject(Node*);
1483 void compileNewPromise(Node*);
1484 void compileNewGenerator(Node*);
1485 void compileNewAsyncGenerator(Node*);
1486 void compileToPrimitive(Node*);
1487 void compileToNumeric(Node*);
1488 void compileLogShadowChickenPrologue(Node*);
1489 void compileLogShadowChickenTail(Node*);
1490 void compileHasIndexedProperty(Node*);
1491 void compileExtractCatchLocal(Node*);
1492 void compileClearCatchLocals(Node*);
1493 void compileProfileType(Node*);
1494 void compileStringCodePointAt(Node*);
1495 void compileDateGet(Node*);
1496
1497 template<typename JSClass, typename Operation>
1498 void compileCreateInternalFieldObject(Node*, Operation);
1499 template<typename JSClass, typename Operation>
1500 void compileNewInternalFieldObject(Node*, Operation);
1501
1502 void moveTrueTo(GPRReg);
1503 void moveFalseTo(GPRReg);
1504 void blessBoolean(GPRReg);
1505
1506 // Allocator for a cell of a specific size.
1507 template <typename StructureType> // StructureType can be GPR or ImmPtr.
1508 void emitAllocateJSCell(
1509 GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure,
1510 GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
1511 {
1512 m_jit.emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath);
1513 }
1514
1515 // Allocator for an object of a specific size.
1516 template <typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
1517 void emitAllocateJSObject(
1518 GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure,
1519 StorageType storage, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
1520 {
1521 m_jit.emitAllocateJSObject(
1522 resultGPR, allocator, allocatorGPR, structure, storage, scratchGPR, slowPath);
1523 }
1524
1525 template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
1526 void emitAllocateJSObjectWithKnownSize(
1527 GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
1528 GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath, size_t size)
1529 {
1530 m_jit.emitAllocateJSObjectWithKnownSize<ClassType>(vm(), resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, size);
1531 }
1532
1533 // Convenience allocator for a built-in object.
1534 template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
1535 void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage,
1536 GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
1537 {
1538 m_jit.emitAllocateJSObject<ClassType>(vm(), resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath);
1539 }
1540
1541 template <typename ClassType, typename StructureType> // StructureType and StorageType can be GPR or ImmPtr.
1542 void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
1543 {
1544 m_jit.emitAllocateVariableSizedJSObject<ClassType>(vm(), resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
1545 }
1546
1547 template<typename ClassType>
1548 void emitAllocateDestructibleObject(GPRReg resultGPR, RegisteredStructure structure,
1549 GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
1550 {
1551 m_jit.emitAllocateDestructibleObject<ClassType>(vm(), resultGPR, structure.get(), scratchGPR1, scratchGPR2, slowPath);
1552 }
1553
1554 void emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength);
1555
1556 void emitGetLength(InlineCallFrame*, GPRReg lengthGPR, bool includeThis = false);
1557 void emitGetLength(CodeOrigin, GPRReg lengthGPR, bool includeThis = false);
1558 void emitGetCallee(CodeOrigin, GPRReg calleeGPR);
1559 void emitGetArgumentStart(CodeOrigin, GPRReg startGPR);
1560 void emitPopulateSliceIndex(Edge&, Optional<GPRReg> indexGPR, GPRReg lengthGPR, GPRReg resultGPR);
1561
1562 // Generate an OSR exit fuzz check. Returns Jump() if OSR exit fuzz is not enabled, or if
1563 // it's in training mode.
1564 MacroAssembler::Jump emitOSRExitFuzzCheck();
1565
1566 // Add a speculation check.
1567 void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail);
1568 void speculationCheck(ExitKind, JSValueSource, Node*, const MacroAssembler::JumpList& jumpsToFail);
1569
1570 // Add a speculation check without additional recovery, and with a promise to supply a jump later.
1571 OSRExitJumpPlaceholder speculationCheck(ExitKind, JSValueSource, Node*);
1572 OSRExitJumpPlaceholder speculationCheck(ExitKind, JSValueSource, Edge);
1573 void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail);
1574 void speculationCheck(ExitKind, JSValueSource, Edge, const MacroAssembler::JumpList& jumpsToFail);
1575 // Add a speculation check with additional recovery.
1576 void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&);
1577 void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&);
1578
1579 void emitInvalidationPoint(Node*);
1580
1581 void unreachable(Node*);
1582
1583 // Called when we statically determine that a speculation will fail.
1584 void terminateSpeculativeExecution(ExitKind, JSValueRegs, Node*);
1585 void terminateSpeculativeExecution(ExitKind, JSValueRegs, Edge);
1586
1587 // Helpers for performing type checks on an edge stored in the given registers.
1588 bool needsTypeCheck(Edge edge, SpeculatedType typesPassedThrough) { return m_interpreter.needsTypeCheck(edge, typesPassedThrough); }
1589 void typeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind = BadType);
1590
1591 void speculateCellTypeWithoutTypeFiltering(Edge, GPRReg cellGPR, JSType);
1592 void speculateCellType(Edge, GPRReg cellGPR, SpeculatedType, JSType);
1593
1594 void speculateInt32(Edge);
1595#if USE(JSVALUE64)
1596 void convertAnyInt(Edge, GPRReg resultGPR);
1597 void speculateAnyInt(Edge);
1598 void speculateInt32(Edge, JSValueRegs);
1599 void speculateDoubleRepAnyInt(Edge);
1600#endif // USE(JSVALUE64)
1601 void speculateNumber(Edge);
1602 void speculateRealNumber(Edge);
1603 void speculateDoubleRepReal(Edge);
1604 void speculateBoolean(Edge);
1605 void speculateCell(Edge);
1606 void speculateCellOrOther(Edge);
1607 void speculateObject(Edge, GPRReg cell);
1608 void speculateObject(Edge);
1609 void speculateArray(Edge, GPRReg cell);
1610 void speculateArray(Edge);
1611 void speculateFunction(Edge, GPRReg cell);
1612 void speculateFunction(Edge);
1613 void speculateFinalObject(Edge, GPRReg cell);
1614 void speculateFinalObject(Edge);
1615 void speculateRegExpObject(Edge, GPRReg cell);
1616 void speculateRegExpObject(Edge);
1617 void speculatePromiseObject(Edge);
1618 void speculatePromiseObject(Edge, GPRReg cell);
1619 void speculateProxyObject(Edge, GPRReg cell);
1620 void speculateProxyObject(Edge);
1621 void speculateDerivedArray(Edge, GPRReg cell);
1622 void speculateDerivedArray(Edge);
1623 void speculateDateObject(Edge);
1624 void speculateDateObject(Edge, GPRReg cell);
1625 void speculateMapObject(Edge);
1626 void speculateMapObject(Edge, GPRReg cell);
1627 void speculateSetObject(Edge);
1628 void speculateSetObject(Edge, GPRReg cell);
1629 void speculateWeakMapObject(Edge);
1630 void speculateWeakMapObject(Edge, GPRReg cell);
1631 void speculateWeakSetObject(Edge);
1632 void speculateWeakSetObject(Edge, GPRReg cell);
1633 void speculateDataViewObject(Edge);
1634 void speculateDataViewObject(Edge, GPRReg cell);
1635 void speculateObjectOrOther(Edge);
1636 void speculateString(Edge edge, GPRReg cell);
1637 void speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage);
1638 void speculateStringIdent(Edge edge, GPRReg string);
1639 void speculateStringIdent(Edge);
1640 void speculateString(Edge);
1641 void speculateStringOrOther(Edge, JSValueRegs, GPRReg scratch);
1642 void speculateStringOrOther(Edge);
1643 void speculateNotStringVar(Edge);
1644 void speculateNotSymbol(Edge);
1645 void speculateStringObject(Edge, GPRReg);
1646 void speculateStringObject(Edge);
1647 void speculateStringOrStringObject(Edge);
1648 void speculateSymbol(Edge, GPRReg cell);
1649 void speculateSymbol(Edge);
1650 void speculateBigInt(Edge, GPRReg cell);
1651 void speculateBigInt(Edge);
1652 void speculateNotCell(Edge, JSValueRegs);
1653 void speculateNotCell(Edge);
1654 void speculateOther(Edge, JSValueRegs, GPRReg temp);
1655 void speculateOther(Edge, JSValueRegs);
1656 void speculateOther(Edge);
1657 void speculateMisc(Edge, JSValueRegs);
1658 void speculateMisc(Edge);
1659 void speculate(Node*, Edge);
1660
1661 JITCompiler::JumpList jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode);
1662 void checkArray(Node*);
1663 void arrayify(Node*, GPRReg baseReg, GPRReg propertyReg);
1664 void arrayify(Node*);
1665
1666 template<bool strict>
1667 GPRReg fillSpeculateInt32Internal(Edge, DataFormat& returnFormat);
1668
1669 void cageTypedArrayStorage(GPRReg, GPRReg);
1670
1671 void recordSetLocal(
1672 VirtualRegister bytecodeReg, VirtualRegister machineReg, DataFormat format)
1673 {
1674 m_stream->appendAndLog(VariableEvent::setLocal(bytecodeReg, machineReg, format));
1675 }
1676
1677 void recordSetLocal(DataFormat format)
1678 {
1679 VariableAccessData* variable = m_currentNode->variableAccessData();
1680 recordSetLocal(variable->local(), variable->machineLocal(), format);
1681 }
1682
1683 GenerationInfo& generationInfoFromVirtualRegister(VirtualRegister virtualRegister)
1684 {
1685 return m_generationInfo[virtualRegister.toLocal()];
1686 }
1687
1688 GenerationInfo& generationInfo(Node* node)
1689 {
1690 return generationInfoFromVirtualRegister(node->virtualRegister());
1691 }
1692
1693 GenerationInfo& generationInfo(Edge edge)
1694 {
1695 return generationInfo(edge.node());
1696 }
1697
1698 // The JIT, while also provides MacroAssembler functionality.
1699 JITCompiler& m_jit;
1700 Graph& m_graph;
1701
1702 // The current node being generated.
1703 BasicBlock* m_block;
1704 Node* m_currentNode;
1705 NodeType m_lastGeneratedNode;
1706 unsigned m_indexInBlock;
1707
1708 // Virtual and physical register maps.
1709 Vector<GenerationInfo, 32> m_generationInfo;
1710 RegisterBank<GPRInfo> m_gprs;
1711 RegisterBank<FPRInfo> m_fprs;
1712
1713 // It is possible, during speculative generation, to reach a situation in which we
1714 // can statically determine a speculation will fail (for example, when two nodes
1715 // will make conflicting speculations about the same operand). In such cases this
1716 // flag is cleared, indicating no further code generation should take place.
1717 bool m_compileOkay;
1718
1719 Vector<MacroAssembler::Label> m_osrEntryHeads;
1720
1721 struct BranchRecord {
1722 BranchRecord(MacroAssembler::Jump jump, BasicBlock* destination)
1723 : jump(jump)
1724 , destination(destination)
1725 {
1726 }
1727
1728 MacroAssembler::Jump jump;
1729 BasicBlock* destination;
1730 };
1731 Vector<BranchRecord, 8> m_branches;
1732
1733 NodeOrigin m_origin;
1734
1735 InPlaceAbstractState m_state;
1736 AbstractInterpreter<InPlaceAbstractState> m_interpreter;
1737
1738 VariableEventStream* m_stream;
1739 MinifiedGraph* m_minifiedGraph;
1740
1741 Vector<std::unique_ptr<SlowPathGenerator>, 8> m_slowPathGenerators;
1742 struct SlowPathLambda {
1743 Function<void()> generator;
1744 Node* currentNode;
1745 unsigned streamIndex;
1746 };
1747 Vector<SlowPathLambda> m_slowPathLambdas;
1748 Vector<SilentRegisterSavePlan> m_plans;
1749 Optional<unsigned> m_outOfLineStreamIndex;
1750};
1751
1752
1753// === Operand types ===
1754//
1755// These classes are used to lock the operands to a node into machine
1756// registers. These classes implement of pattern of locking a value
1757// into register at the point of construction only if it is already in
1758// registers, and otherwise loading it lazily at the point it is first
1759// used. We do so in order to attempt to avoid spilling one operand
1760// in order to make space available for another.
1761
1762class JSValueOperand {
1763 WTF_MAKE_FAST_ALLOCATED;
1764public:
1765 explicit JSValueOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
1766 : m_jit(jit)
1767 , m_edge(edge)
1768#if USE(JSVALUE64)
1769 , m_gprOrInvalid(InvalidGPRReg)
1770#elif USE(JSVALUE32_64)
1771 , m_isDouble(false)
1772#endif
1773 {
1774 ASSERT(m_jit);
1775 if (!edge)
1776 return;
1777 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse);
1778#if USE(JSVALUE64)
1779 if (jit->isFilled(node()))
1780 gpr();
1781#elif USE(JSVALUE32_64)
1782 m_register.pair.tagGPR = InvalidGPRReg;
1783 m_register.pair.payloadGPR = InvalidGPRReg;
1784 if (jit->isFilled(node()))
1785 fill();
1786#endif
1787 }
1788
1789 explicit JSValueOperand(JSValueOperand&& other)
1790 : m_jit(other.m_jit)
1791 , m_edge(other.m_edge)
1792 {
1793#if USE(JSVALUE64)
1794 m_gprOrInvalid = other.m_gprOrInvalid;
1795#elif USE(JSVALUE32_64)
1796 m_register.pair.tagGPR = InvalidGPRReg;
1797 m_register.pair.payloadGPR = InvalidGPRReg;
1798 m_isDouble = other.m_isDouble;
1799
1800 if (m_edge) {
1801 if (m_isDouble)
1802 m_register.fpr = other.m_register.fpr;
1803 else
1804 m_register.pair = other.m_register.pair;
1805 }
1806#endif
1807 other.m_edge = Edge();
1808#if USE(JSVALUE64)
1809 other.m_gprOrInvalid = InvalidGPRReg;
1810#elif USE(JSVALUE32_64)
1811 other.m_isDouble = false;
1812#endif
1813 }
1814
1815 ~JSValueOperand()
1816 {
1817 if (!m_edge)
1818 return;
1819#if USE(JSVALUE64)
1820 ASSERT(m_gprOrInvalid != InvalidGPRReg);
1821 m_jit->unlock(m_gprOrInvalid);
1822#elif USE(JSVALUE32_64)
1823 if (m_isDouble) {
1824 ASSERT(m_register.fpr != InvalidFPRReg);
1825 m_jit->unlock(m_register.fpr);
1826 } else {
1827 ASSERT(m_register.pair.tagGPR != InvalidGPRReg && m_register.pair.payloadGPR != InvalidGPRReg);
1828 m_jit->unlock(m_register.pair.tagGPR);
1829 m_jit->unlock(m_register.pair.payloadGPR);
1830 }
1831#endif
1832 }
1833
1834 Edge edge() const
1835 {
1836 return m_edge;
1837 }
1838
1839 Node* node() const
1840 {
1841 return edge().node();
1842 }
1843
1844#if USE(JSVALUE64)
1845 GPRReg gpr()
1846 {
1847 if (m_gprOrInvalid == InvalidGPRReg)
1848 m_gprOrInvalid = m_jit->fillJSValue(m_edge);
1849 return m_gprOrInvalid;
1850 }
1851 JSValueRegs jsValueRegs()
1852 {
1853 return JSValueRegs(gpr());
1854 }
1855#elif USE(JSVALUE32_64)
1856 bool isDouble() { return m_isDouble; }
1857
1858 void fill()
1859 {
1860 if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg)
1861 m_isDouble = !m_jit->fillJSValue(m_edge, m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr);
1862 }
1863
1864 GPRReg tagGPR()
1865 {
1866 fill();
1867 ASSERT(!m_isDouble);
1868 return m_register.pair.tagGPR;
1869 }
1870
1871 GPRReg payloadGPR()
1872 {
1873 fill();
1874 ASSERT(!m_isDouble);
1875 return m_register.pair.payloadGPR;
1876 }
1877
1878 JSValueRegs jsValueRegs()
1879 {
1880 return JSValueRegs(tagGPR(), payloadGPR());
1881 }
1882
1883 GPRReg gpr(WhichValueWord which)
1884 {
1885 return jsValueRegs().gpr(which);
1886 }
1887
1888 FPRReg fpr()
1889 {
1890 fill();
1891 ASSERT(m_isDouble);
1892 return m_register.fpr;
1893 }
1894#endif
1895
1896 void use()
1897 {
1898 m_jit->use(node());
1899 }
1900
1901private:
1902 SpeculativeJIT* m_jit;
1903 Edge m_edge;
1904#if USE(JSVALUE64)
1905 GPRReg m_gprOrInvalid;
1906#elif USE(JSVALUE32_64)
1907 union {
1908 struct {
1909 GPRReg tagGPR;
1910 GPRReg payloadGPR;
1911 } pair;
1912 FPRReg fpr;
1913 } m_register;
1914 bool m_isDouble;
1915#endif
1916};
1917
1918class StorageOperand {
1919 WTF_MAKE_FAST_ALLOCATED;
1920public:
1921 explicit StorageOperand(SpeculativeJIT* jit, Edge edge)
1922 : m_jit(jit)
1923 , m_edge(edge)
1924 , m_gprOrInvalid(InvalidGPRReg)
1925 {
1926 ASSERT(m_jit);
1927 ASSERT(edge.useKind() == UntypedUse || edge.useKind() == KnownCellUse);
1928 if (jit->isFilled(node()))
1929 gpr();
1930 }
1931
1932 ~StorageOperand()
1933 {
1934 ASSERT(m_gprOrInvalid != InvalidGPRReg);
1935 m_jit->unlock(m_gprOrInvalid);
1936 }
1937
1938 Edge edge() const
1939 {
1940 return m_edge;
1941 }
1942
1943 Node* node() const
1944 {
1945 return edge().node();
1946 }
1947
1948 GPRReg gpr()
1949 {
1950 if (m_gprOrInvalid == InvalidGPRReg)
1951 m_gprOrInvalid = m_jit->fillStorage(edge());
1952 return m_gprOrInvalid;
1953 }
1954
1955 void use()
1956 {
1957 m_jit->use(node());
1958 }
1959
1960private:
1961 SpeculativeJIT* m_jit;
1962 Edge m_edge;
1963 GPRReg m_gprOrInvalid;
1964};
1965
1966
1967// === Temporaries ===
1968//
1969// These classes are used to allocate temporary registers.
1970// A mechanism is provided to attempt to reuse the registers
1971// currently allocated to child nodes whose value is consumed
1972// by, and not live after, this operation.
1973
1974enum ReuseTag { Reuse };
1975
1976class GPRTemporary {
1977 WTF_MAKE_FAST_ALLOCATED;
1978public:
1979 GPRTemporary();
1980 GPRTemporary(SpeculativeJIT*);
1981 GPRTemporary(SpeculativeJIT*, GPRReg specific);
1982 template<typename T>
1983 GPRTemporary(SpeculativeJIT* jit, ReuseTag, T& operand)
1984 : m_jit(jit)
1985 , m_gpr(InvalidGPRReg)
1986 {
1987 if (m_jit->canReuse(operand.node()))
1988 m_gpr = m_jit->reuse(operand.gpr());
1989 else
1990 m_gpr = m_jit->allocate();
1991 }
1992 template<typename T1, typename T2>
1993 GPRTemporary(SpeculativeJIT* jit, ReuseTag, T1& op1, T2& op2)
1994 : m_jit(jit)
1995 , m_gpr(InvalidGPRReg)
1996 {
1997 if (m_jit->canReuse(op1.node()))
1998 m_gpr = m_jit->reuse(op1.gpr());
1999 else if (m_jit->canReuse(op2.node()))
2000 m_gpr = m_jit->reuse(op2.gpr());
2001 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.gpr() == op2.gpr())
2002 m_gpr = m_jit->reuse(op1.gpr());
2003 else
2004 m_gpr = m_jit->allocate();
2005 }
2006 GPRTemporary(SpeculativeJIT*, ReuseTag, JSValueOperand&, WhichValueWord);
2007
2008 GPRTemporary(GPRTemporary& other) = delete;
2009
2010 GPRTemporary(GPRTemporary&& other)
2011 {
2012 ASSERT(other.m_jit);
2013 ASSERT(other.m_gpr != InvalidGPRReg);
2014 m_jit = other.m_jit;
2015 m_gpr = other.m_gpr;
2016 other.m_jit = nullptr;
2017 other.m_gpr = InvalidGPRReg;
2018 }
2019
2020 GPRTemporary& operator=(GPRTemporary&& other)
2021 {
2022 ASSERT(!m_jit);
2023 ASSERT(m_gpr == InvalidGPRReg);
2024 std::swap(m_jit, other.m_jit);
2025 std::swap(m_gpr, other.m_gpr);
2026 return *this;
2027 }
2028
2029 void adopt(GPRTemporary&);
2030
2031 ~GPRTemporary()
2032 {
2033 if (m_jit && m_gpr != InvalidGPRReg)
2034 m_jit->unlock(gpr());
2035 }
2036
2037 GPRReg gpr()
2038 {
2039 return m_gpr;
2040 }
2041
2042private:
2043 SpeculativeJIT* m_jit;
2044 GPRReg m_gpr;
2045};
2046
2047class JSValueRegsTemporary {
2048 WTF_MAKE_FAST_ALLOCATED;
2049public:
2050 JSValueRegsTemporary();
2051 JSValueRegsTemporary(SpeculativeJIT*);
2052 template<typename T>
2053 JSValueRegsTemporary(SpeculativeJIT*, ReuseTag, T& operand, WhichValueWord resultRegWord = PayloadWord);
2054 JSValueRegsTemporary(SpeculativeJIT*, ReuseTag, JSValueOperand&);
2055 ~JSValueRegsTemporary();
2056
2057 JSValueRegs regs();
2058
2059private:
2060#if USE(JSVALUE64)
2061 GPRTemporary m_gpr;
2062#else
2063 GPRTemporary m_payloadGPR;
2064 GPRTemporary m_tagGPR;
2065#endif
2066};
2067
2068class FPRTemporary {
2069 WTF_MAKE_FAST_ALLOCATED;
2070public:
2071 FPRTemporary(FPRTemporary&&);
2072 FPRTemporary(SpeculativeJIT*);
2073 FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&);
2074 FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&, SpeculateDoubleOperand&);
2075#if USE(JSVALUE32_64)
2076 FPRTemporary(SpeculativeJIT*, JSValueOperand&);
2077#endif
2078
2079 ~FPRTemporary()
2080 {
2081 if (LIKELY(m_jit))
2082 m_jit->unlock(fpr());
2083 }
2084
2085 FPRReg fpr() const
2086 {
2087 ASSERT(m_jit);
2088 ASSERT(m_fpr != InvalidFPRReg);
2089 return m_fpr;
2090 }
2091
2092protected:
2093 FPRTemporary(SpeculativeJIT* jit, FPRReg lockedFPR)
2094 : m_jit(jit)
2095 , m_fpr(lockedFPR)
2096 {
2097 }
2098
2099private:
2100 SpeculativeJIT* m_jit;
2101 FPRReg m_fpr;
2102};
2103
2104
2105// === Results ===
2106//
2107// These classes lock the result of a call to a C++ helper function.
2108
2109class GPRFlushedCallResult : public GPRTemporary {
2110public:
2111 GPRFlushedCallResult(SpeculativeJIT* jit)
2112 : GPRTemporary(jit, GPRInfo::returnValueGPR)
2113 {
2114 }
2115};
2116
2117#if USE(JSVALUE32_64)
2118class GPRFlushedCallResult2 : public GPRTemporary {
2119public:
2120 GPRFlushedCallResult2(SpeculativeJIT* jit)
2121 : GPRTemporary(jit, GPRInfo::returnValueGPR2)
2122 {
2123 }
2124};
2125#endif
2126
2127class FPRResult : public FPRTemporary {
2128public:
2129 FPRResult(SpeculativeJIT* jit)
2130 : FPRTemporary(jit, lockedResult(jit))
2131 {
2132 }
2133
2134private:
2135 static FPRReg lockedResult(SpeculativeJIT* jit)
2136 {
2137 jit->lock(FPRInfo::returnValueFPR);
2138 return FPRInfo::returnValueFPR;
2139 }
2140};
2141
2142class JSValueRegsFlushedCallResult {
2143 WTF_MAKE_FAST_ALLOCATED;
2144public:
2145 JSValueRegsFlushedCallResult(SpeculativeJIT* jit)
2146#if USE(JSVALUE64)
2147 : m_gpr(jit)
2148#else
2149 : m_payloadGPR(jit)
2150 , m_tagGPR(jit)
2151#endif
2152 {
2153 }
2154
2155 JSValueRegs regs()
2156 {
2157#if USE(JSVALUE64)
2158 return JSValueRegs { m_gpr.gpr() };
2159#else
2160 return JSValueRegs { m_tagGPR.gpr(), m_payloadGPR.gpr() };
2161#endif
2162 }
2163
2164private:
2165#if USE(JSVALUE64)
2166 GPRFlushedCallResult m_gpr;
2167#else
2168 GPRFlushedCallResult m_payloadGPR;
2169 GPRFlushedCallResult2 m_tagGPR;
2170#endif
2171};
2172
2173
2174// === Speculative Operand types ===
2175//
2176// SpeculateInt32Operand, SpeculateStrictInt32Operand and SpeculateCellOperand.
2177//
2178// These are used to lock the operands to a node into machine registers within the
2179// SpeculativeJIT. The classes operate like those above, however these will
2180// perform a speculative check for a more restrictive type than we can statically
2181// determine the operand to have. If the operand does not have the requested type,
2182// a bail-out to the non-speculative path will be taken.
2183
2184class SpeculateInt32Operand {
2185 WTF_MAKE_FAST_ALLOCATED;
2186public:
2187 explicit SpeculateInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
2188 : m_jit(jit)
2189 , m_edge(edge)
2190 , m_gprOrInvalid(InvalidGPRReg)
2191#ifndef NDEBUG
2192 , m_format(DataFormatNone)
2193#endif
2194 {
2195 ASSERT(m_jit);
2196 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
2197 if (jit->isFilled(node()))
2198 gpr();
2199 }
2200
2201 ~SpeculateInt32Operand()
2202 {
2203 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2204 m_jit->unlock(m_gprOrInvalid);
2205 }
2206
2207 Edge edge() const
2208 {
2209 return m_edge;
2210 }
2211
2212 Node* node() const
2213 {
2214 return edge().node();
2215 }
2216
2217 DataFormat format()
2218 {
2219 gpr(); // m_format is set when m_gpr is locked.
2220 ASSERT(m_format == DataFormatInt32 || m_format == DataFormatJSInt32);
2221 return m_format;
2222 }
2223
2224 GPRReg gpr()
2225 {
2226 if (m_gprOrInvalid == InvalidGPRReg)
2227 m_gprOrInvalid = m_jit->fillSpeculateInt32(edge(), m_format);
2228 return m_gprOrInvalid;
2229 }
2230
2231 void use()
2232 {
2233 m_jit->use(node());
2234 }
2235
2236private:
2237 SpeculativeJIT* m_jit;
2238 Edge m_edge;
2239 GPRReg m_gprOrInvalid;
2240 DataFormat m_format;
2241};
2242
2243class SpeculateStrictInt32Operand {
2244 WTF_MAKE_FAST_ALLOCATED;
2245public:
2246 explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
2247 : m_jit(jit)
2248 , m_edge(edge)
2249 , m_gprOrInvalid(InvalidGPRReg)
2250 {
2251 ASSERT(m_jit);
2252 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
2253 if (jit->isFilled(node()))
2254 gpr();
2255 }
2256
2257 ~SpeculateStrictInt32Operand()
2258 {
2259 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2260 m_jit->unlock(m_gprOrInvalid);
2261 }
2262
2263 Edge edge() const
2264 {
2265 return m_edge;
2266 }
2267
2268 Node* node() const
2269 {
2270 return edge().node();
2271 }
2272
2273 GPRReg gpr()
2274 {
2275 if (m_gprOrInvalid == InvalidGPRReg)
2276 m_gprOrInvalid = m_jit->fillSpeculateInt32Strict(edge());
2277 return m_gprOrInvalid;
2278 }
2279
2280 void use()
2281 {
2282 m_jit->use(node());
2283 }
2284
2285private:
2286 SpeculativeJIT* m_jit;
2287 Edge m_edge;
2288 GPRReg m_gprOrInvalid;
2289};
2290
2291// Gives you a canonical Int52 (i.e. it's left-shifted by 16, low bits zero).
2292class SpeculateInt52Operand {
2293 WTF_MAKE_FAST_ALLOCATED;
2294public:
2295 explicit SpeculateInt52Operand(SpeculativeJIT* jit, Edge edge)
2296 : m_jit(jit)
2297 , m_edge(edge)
2298 , m_gprOrInvalid(InvalidGPRReg)
2299 {
2300 RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2301 if (jit->isFilled(node()))
2302 gpr();
2303 }
2304
2305 ~SpeculateInt52Operand()
2306 {
2307 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2308 m_jit->unlock(m_gprOrInvalid);
2309 }
2310
2311 Edge edge() const
2312 {
2313 return m_edge;
2314 }
2315
2316 Node* node() const
2317 {
2318 return edge().node();
2319 }
2320
2321 GPRReg gpr()
2322 {
2323 if (m_gprOrInvalid == InvalidGPRReg)
2324 m_gprOrInvalid = m_jit->fillSpeculateInt52(edge(), DataFormatInt52);
2325 return m_gprOrInvalid;
2326 }
2327
2328 void use()
2329 {
2330 m_jit->use(node());
2331 }
2332
2333private:
2334 SpeculativeJIT* m_jit;
2335 Edge m_edge;
2336 GPRReg m_gprOrInvalid;
2337};
2338
2339// Gives you a strict Int52 (i.e. the payload is in the low 48 bits, high 16 bits are sign-extended).
2340class SpeculateStrictInt52Operand {
2341 WTF_MAKE_FAST_ALLOCATED;
2342public:
2343 explicit SpeculateStrictInt52Operand(SpeculativeJIT* jit, Edge edge)
2344 : m_jit(jit)
2345 , m_edge(edge)
2346 , m_gprOrInvalid(InvalidGPRReg)
2347 {
2348 RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2349 if (jit->isFilled(node()))
2350 gpr();
2351 }
2352
2353 ~SpeculateStrictInt52Operand()
2354 {
2355 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2356 m_jit->unlock(m_gprOrInvalid);
2357 }
2358
2359 Edge edge() const
2360 {
2361 return m_edge;
2362 }
2363
2364 Node* node() const
2365 {
2366 return edge().node();
2367 }
2368
2369 GPRReg gpr()
2370 {
2371 if (m_gprOrInvalid == InvalidGPRReg)
2372 m_gprOrInvalid = m_jit->fillSpeculateInt52(edge(), DataFormatStrictInt52);
2373 return m_gprOrInvalid;
2374 }
2375
2376 void use()
2377 {
2378 m_jit->use(node());
2379 }
2380
2381private:
2382 SpeculativeJIT* m_jit;
2383 Edge m_edge;
2384 GPRReg m_gprOrInvalid;
2385};
2386
2387enum OppositeShiftTag { OppositeShift };
2388
2389class SpeculateWhicheverInt52Operand {
2390 WTF_MAKE_FAST_ALLOCATED;
2391public:
2392 explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge)
2393 : m_jit(jit)
2394 , m_edge(edge)
2395 , m_gprOrInvalid(InvalidGPRReg)
2396 , m_strict(jit->betterUseStrictInt52(edge))
2397 {
2398 RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2399 if (jit->isFilled(node()))
2400 gpr();
2401 }
2402
2403 explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, const SpeculateWhicheverInt52Operand& other)
2404 : m_jit(jit)
2405 , m_edge(edge)
2406 , m_gprOrInvalid(InvalidGPRReg)
2407 , m_strict(other.m_strict)
2408 {
2409 RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2410 if (jit->isFilled(node()))
2411 gpr();
2412 }
2413
2414 explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OppositeShiftTag, const SpeculateWhicheverInt52Operand& other)
2415 : m_jit(jit)
2416 , m_edge(edge)
2417 , m_gprOrInvalid(InvalidGPRReg)
2418 , m_strict(!other.m_strict)
2419 {
2420 RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2421 if (jit->isFilled(node()))
2422 gpr();
2423 }
2424
2425 ~SpeculateWhicheverInt52Operand()
2426 {
2427 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2428 m_jit->unlock(m_gprOrInvalid);
2429 }
2430
2431 Edge edge() const
2432 {
2433 return m_edge;
2434 }
2435
2436 Node* node() const
2437 {
2438 return edge().node();
2439 }
2440
2441 GPRReg gpr()
2442 {
2443 if (m_gprOrInvalid == InvalidGPRReg) {
2444 m_gprOrInvalid = m_jit->fillSpeculateInt52(
2445 edge(), m_strict ? DataFormatStrictInt52 : DataFormatInt52);
2446 }
2447 return m_gprOrInvalid;
2448 }
2449
2450 void use()
2451 {
2452 m_jit->use(node());
2453 }
2454
2455 DataFormat format() const
2456 {
2457 return m_strict ? DataFormatStrictInt52 : DataFormatInt52;
2458 }
2459
2460private:
2461 SpeculativeJIT* m_jit;
2462 Edge m_edge;
2463 GPRReg m_gprOrInvalid;
2464 bool m_strict;
2465};
2466
2467class SpeculateDoubleOperand {
2468 WTF_MAKE_FAST_ALLOCATED;
2469public:
2470 explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge)
2471 : m_jit(jit)
2472 , m_edge(edge)
2473 , m_fprOrInvalid(InvalidFPRReg)
2474 {
2475 ASSERT(m_jit);
2476 RELEASE_ASSERT(isDouble(edge.useKind()));
2477 if (jit->isFilled(node()))
2478 fpr();
2479 }
2480
2481 ~SpeculateDoubleOperand()
2482 {
2483 ASSERT(m_fprOrInvalid != InvalidFPRReg);
2484 m_jit->unlock(m_fprOrInvalid);
2485 }
2486
2487 Edge edge() const
2488 {
2489 return m_edge;
2490 }
2491
2492 Node* node() const
2493 {
2494 return edge().node();
2495 }
2496
2497 FPRReg fpr()
2498 {
2499 if (m_fprOrInvalid == InvalidFPRReg)
2500 m_fprOrInvalid = m_jit->fillSpeculateDouble(edge());
2501 return m_fprOrInvalid;
2502 }
2503
2504 void use()
2505 {
2506 m_jit->use(node());
2507 }
2508
2509private:
2510 SpeculativeJIT* m_jit;
2511 Edge m_edge;
2512 FPRReg m_fprOrInvalid;
2513};
2514
2515class SpeculateCellOperand {
2516 WTF_MAKE_FAST_ALLOCATED;
2517
2518public:
2519 explicit SpeculateCellOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
2520 : m_jit(jit)
2521 , m_edge(edge)
2522 , m_gprOrInvalid(InvalidGPRReg)
2523 {
2524 ASSERT(m_jit);
2525 if (!edge)
2526 return;
2527 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || isCell(edge.useKind()));
2528 if (jit->isFilled(node()))
2529 gpr();
2530 }
2531
2532 explicit SpeculateCellOperand(SpeculateCellOperand&& other)
2533 {
2534 m_jit = other.m_jit;
2535 m_edge = other.m_edge;
2536 m_gprOrInvalid = other.m_gprOrInvalid;
2537
2538 other.m_gprOrInvalid = InvalidGPRReg;
2539 other.m_edge = Edge();
2540 }
2541
2542 ~SpeculateCellOperand()
2543 {
2544 if (!m_edge)
2545 return;
2546 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2547 m_jit->unlock(m_gprOrInvalid);
2548 }
2549
2550 Edge edge() const
2551 {
2552 return m_edge;
2553 }
2554
2555 Node* node() const
2556 {
2557 return edge().node();
2558 }
2559
2560 GPRReg gpr()
2561 {
2562 ASSERT(m_edge);
2563 if (m_gprOrInvalid == InvalidGPRReg)
2564 m_gprOrInvalid = m_jit->fillSpeculateCell(edge());
2565 return m_gprOrInvalid;
2566 }
2567
2568 void use()
2569 {
2570 ASSERT(m_edge);
2571 m_jit->use(node());
2572 }
2573
2574private:
2575 SpeculativeJIT* m_jit;
2576 Edge m_edge;
2577 GPRReg m_gprOrInvalid;
2578};
2579
2580class SpeculateBooleanOperand {
2581 WTF_MAKE_FAST_ALLOCATED;
2582public:
2583 explicit SpeculateBooleanOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
2584 : m_jit(jit)
2585 , m_edge(edge)
2586 , m_gprOrInvalid(InvalidGPRReg)
2587 {
2588 ASSERT(m_jit);
2589 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse);
2590 if (jit->isFilled(node()))
2591 gpr();
2592 }
2593
2594 ~SpeculateBooleanOperand()
2595 {
2596 ASSERT(m_gprOrInvalid != InvalidGPRReg);
2597 m_jit->unlock(m_gprOrInvalid);
2598 }
2599
2600 Edge edge() const
2601 {
2602 return m_edge;
2603 }
2604
2605 Node* node() const
2606 {
2607 return edge().node();
2608 }
2609
2610 GPRReg gpr()
2611 {
2612 if (m_gprOrInvalid == InvalidGPRReg)
2613 m_gprOrInvalid = m_jit->fillSpeculateBoolean(edge());
2614 return m_gprOrInvalid;
2615 }
2616
2617 void use()
2618 {
2619 m_jit->use(node());
2620 }
2621
2622private:
2623 SpeculativeJIT* m_jit;
2624 Edge m_edge;
2625 GPRReg m_gprOrInvalid;
2626};
2627
2628#define DFG_TYPE_CHECK_WITH_EXIT_KIND(exitKind, source, edge, typesPassedThrough, jumpToFail) do { \
2629 JSValueSource _dtc_source = (source); \
2630 Edge _dtc_edge = (edge); \
2631 SpeculatedType _dtc_typesPassedThrough = typesPassedThrough; \
2632 if (!needsTypeCheck(_dtc_edge, _dtc_typesPassedThrough)) \
2633 break; \
2634 typeCheck(_dtc_source, _dtc_edge, _dtc_typesPassedThrough, (jumpToFail), exitKind); \
2635 } while (0)
2636
2637#define DFG_TYPE_CHECK(source, edge, typesPassedThrough, jumpToFail) \
2638 DFG_TYPE_CHECK_WITH_EXIT_KIND(BadType, source, edge, typesPassedThrough, jumpToFail)
2639
2640} } // namespace JSC::DFG
2641
2642#endif
2643