1/*
2 * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(DFG_JIT)
29
30#include "B3SparseCollection.h"
31#include "BasicBlockLocation.h"
32#include "CodeBlock.h"
33#include "DFGAdjacencyList.h"
34#include "DFGArithMode.h"
35#include "DFGArrayMode.h"
36#include "DFGCommon.h"
37#include "DFGEpoch.h"
38#include "DFGLazyJSValue.h"
39#include "DFGMultiGetByOffsetData.h"
40#include "DFGNodeFlags.h"
41#include "DFGNodeOrigin.h"
42#include "DFGNodeType.h"
43#include "DFGObjectMaterializationData.h"
44#include "DFGOpInfo.h"
45#include "DFGRegisteredStructure.h"
46#include "DFGRegisteredStructureSet.h"
47#include "DFGTransition.h"
48#include "DFGUseKind.h"
49#include "DFGVariableAccessData.h"
50#include "GetByIdVariant.h"
51#include "JSCJSValue.h"
52#include "Operands.h"
53#include "PutByIdVariant.h"
54#include "SpeculatedType.h"
55#include "TypeLocation.h"
56#include "ValueProfile.h"
57#include <type_traits>
58#include <wtf/ListDump.h>
59#include <wtf/LoggingHashSet.h>
60
61namespace JSC {
62
63namespace DOMJIT {
64class GetterSetter;
65class CallDOMGetterSnippet;
66class Signature;
67}
68
69namespace Profiler {
70class ExecutionCounter;
71}
72
73class Snippet;
74
75namespace DFG {
76
77class Graph;
78class PromotedLocationDescriptor;
79struct BasicBlock;
80
81struct StorageAccessData {
82 PropertyOffset offset;
83 unsigned identifierNumber;
84};
85
86struct MultiPutByOffsetData {
87 unsigned identifierNumber;
88 Vector<PutByIdVariant, 2> variants;
89
90 bool writesStructures() const;
91 bool reallocatesStorage() const;
92};
93
94struct MatchStructureVariant {
95 RegisteredStructure structure;
96 bool result;
97};
98
99struct MatchStructureData {
100 Vector<MatchStructureVariant, 2> variants;
101};
102
103struct NewArrayBufferData {
104 union {
105 struct {
106 unsigned vectorLengthHint;
107 unsigned indexingMode;
108 };
109 uint64_t asQuadWord;
110 };
111};
112static_assert(sizeof(IndexingType) <= sizeof(unsigned), "");
113static_assert(sizeof(NewArrayBufferData) == sizeof(uint64_t), "");
114
115struct DataViewData {
116 union {
117 struct {
118 uint8_t byteSize;
119 bool isSigned;
120 bool isFloatingPoint; // Used for the DataViewSet node.
121 TriState isLittleEndian;
122 };
123 uint64_t asQuadWord;
124 };
125};
126static_assert(sizeof(DataViewData) == sizeof(uint64_t), "");
127
128struct BranchTarget {
129 BranchTarget()
130 : block(0)
131 , count(PNaN)
132 {
133 }
134
135 explicit BranchTarget(BasicBlock* block)
136 : block(block)
137 , count(PNaN)
138 {
139 }
140
141 void setBytecodeIndex(unsigned bytecodeIndex)
142 {
143 block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(bytecodeIndex));
144 }
145 unsigned bytecodeIndex() const { return bitwise_cast<uintptr_t>(block); }
146
147 void dump(PrintStream&) const;
148
149 BasicBlock* block;
150 float count;
151};
152
153struct BranchData {
154 static BranchData withBytecodeIndices(
155 unsigned takenBytecodeIndex, unsigned notTakenBytecodeIndex)
156 {
157 BranchData result;
158 result.taken.block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(takenBytecodeIndex));
159 result.notTaken.block = bitwise_cast<BasicBlock*>(static_cast<uintptr_t>(notTakenBytecodeIndex));
160 return result;
161 }
162
163 unsigned takenBytecodeIndex() const { return taken.bytecodeIndex(); }
164 unsigned notTakenBytecodeIndex() const { return notTaken.bytecodeIndex(); }
165
166 BasicBlock*& forCondition(bool condition)
167 {
168 if (condition)
169 return taken.block;
170 return notTaken.block;
171 }
172
173 BranchTarget taken;
174 BranchTarget notTaken;
175};
176
177// The SwitchData and associated data structures duplicate the information in
178// JumpTable. The DFG may ultimately end up using the JumpTable, though it may
179// instead decide to do something different - this is entirely up to the DFG.
180// These data structures give the DFG a higher-level semantic description of
181// what is going on, which will allow it to make the right decision.
182//
183// Note that there will never be multiple SwitchCases in SwitchData::cases that
184// have the same SwitchCase::value, since the bytecode's JumpTables never have
185// duplicates - since the JumpTable maps a value to a target. It's a
186// one-to-many mapping. So we may have duplicate targets, but never duplicate
187// values.
188struct SwitchCase {
189 SwitchCase()
190 {
191 }
192
193 SwitchCase(LazyJSValue value, BasicBlock* target)
194 : value(value)
195 , target(target)
196 {
197 }
198
199 static SwitchCase withBytecodeIndex(LazyJSValue value, unsigned bytecodeIndex)
200 {
201 SwitchCase result;
202 result.value = value;
203 result.target.setBytecodeIndex(bytecodeIndex);
204 return result;
205 }
206
207 LazyJSValue value;
208 BranchTarget target;
209};
210
211struct SwitchData {
212 // Initializes most fields to obviously invalid values. Anyone
213 // constructing this should make sure to initialize everything they
214 // care about manually.
215 SwitchData()
216 : switchTableIndex(UINT_MAX)
217 , kind(static_cast<SwitchKind>(-1))
218 , didUseJumpTable(false)
219 {
220 }
221
222 Vector<SwitchCase> cases;
223 BranchTarget fallThrough;
224 size_t switchTableIndex;
225 SwitchKind kind;
226 bool didUseJumpTable;
227};
228
229struct EntrySwitchData {
230 Vector<BasicBlock*> cases;
231};
232
233struct CallVarargsData {
234 int firstVarArgOffset;
235};
236
237struct LoadVarargsData {
238 VirtualRegister start; // Local for the first element. This is the first actual argument, not this.
239 VirtualRegister count; // Local for the count.
240 VirtualRegister machineStart;
241 VirtualRegister machineCount;
242 unsigned offset; // Which array element to start with. Usually this is 0.
243 unsigned mandatoryMinimum; // The number of elements on the stack that must be initialized; if the array is too short then the missing elements must get undefined. Does not include "this".
244 unsigned limit; // Maximum number of elements to load. Includes "this".
245};
246
247struct StackAccessData {
248 StackAccessData()
249 : format(DeadFlush)
250 {
251 }
252
253 StackAccessData(VirtualRegister local, FlushFormat format)
254 : local(local)
255 , format(format)
256 {
257 }
258
259 VirtualRegister local;
260 VirtualRegister machineLocal;
261 FlushFormat format;
262
263 FlushedAt flushedAt() { return FlushedAt(format, machineLocal); }
264};
265
266struct CallDOMGetterData {
267 FunctionPtr<OperationPtrTag> customAccessorGetter;
268 const DOMJIT::GetterSetter* domJIT { nullptr };
269 DOMJIT::CallDOMGetterSnippet* snippet { nullptr };
270 unsigned identifierNumber { 0 };
271};
272
273enum class BucketOwnerType : uint32_t {
274 Map,
275 Set
276};
277
278// === Node ===
279//
280// Node represents a single operation in the data flow graph.
281struct Node {
282 WTF_MAKE_FAST_ALLOCATED;
283public:
284 static const char HashSetTemplateInstantiationString[];
285
286 enum VarArgTag { VarArg };
287
288 Node() { }
289
290 Node(NodeType op, NodeOrigin nodeOrigin, const AdjacencyList& children)
291 : origin(nodeOrigin)
292 , children(children)
293 , m_virtualRegister(VirtualRegister())
294 , m_refCount(1)
295 , m_prediction(SpecNone)
296 , owner(nullptr)
297 {
298 m_misc.replacement = nullptr;
299 setOpAndDefaultFlags(op);
300 }
301
302 // Construct a node with up to 3 children, no immediate value.
303 Node(NodeType op, NodeOrigin nodeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
304 : origin(nodeOrigin)
305 , children(AdjacencyList::Fixed, child1, child2, child3)
306 , m_virtualRegister(VirtualRegister())
307 , m_refCount(1)
308 , m_prediction(SpecNone)
309 , owner(nullptr)
310 {
311 m_misc.replacement = nullptr;
312 setOpAndDefaultFlags(op);
313 ASSERT(!(m_flags & NodeHasVarArgs));
314 }
315
316 // Construct a node with up to 3 children, no immediate value.
317 Node(NodeFlags result, NodeType op, NodeOrigin nodeOrigin, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
318 : origin(nodeOrigin)
319 , children(AdjacencyList::Fixed, child1, child2, child3)
320 , m_virtualRegister(VirtualRegister())
321 , m_refCount(1)
322 , m_prediction(SpecNone)
323 , owner(nullptr)
324 {
325 m_misc.replacement = nullptr;
326 setOpAndDefaultFlags(op);
327 setResult(result);
328 ASSERT(!(m_flags & NodeHasVarArgs));
329 }
330
331 // Construct a node with up to 3 children and an immediate value.
332 Node(NodeType op, NodeOrigin nodeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
333 : origin(nodeOrigin)
334 , children(AdjacencyList::Fixed, child1, child2, child3)
335 , m_virtualRegister(VirtualRegister())
336 , m_refCount(1)
337 , m_prediction(SpecNone)
338 , m_opInfo(imm.m_value)
339 , owner(nullptr)
340 {
341 m_misc.replacement = nullptr;
342 setOpAndDefaultFlags(op);
343 ASSERT(!(m_flags & NodeHasVarArgs));
344 }
345
346 // Construct a node with up to 3 children and an immediate value.
347 Node(NodeFlags result, NodeType op, NodeOrigin nodeOrigin, OpInfo imm, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
348 : origin(nodeOrigin)
349 , children(AdjacencyList::Fixed, child1, child2, child3)
350 , m_virtualRegister(VirtualRegister())
351 , m_refCount(1)
352 , m_prediction(SpecNone)
353 , m_opInfo(imm.m_value)
354 , owner(nullptr)
355 {
356 m_misc.replacement = nullptr;
357 setOpAndDefaultFlags(op);
358 setResult(result);
359 ASSERT(!(m_flags & NodeHasVarArgs));
360 }
361
362 // Construct a node with up to 3 children and two immediate values.
363 Node(NodeType op, NodeOrigin nodeOrigin, OpInfo imm1, OpInfo imm2, Edge child1 = Edge(), Edge child2 = Edge(), Edge child3 = Edge())
364 : origin(nodeOrigin)
365 , children(AdjacencyList::Fixed, child1, child2, child3)
366 , m_virtualRegister(VirtualRegister())
367 , m_refCount(1)
368 , m_prediction(SpecNone)
369 , m_opInfo(imm1.m_value)
370 , m_opInfo2(imm2.m_value)
371 , owner(nullptr)
372 {
373 m_misc.replacement = nullptr;
374 setOpAndDefaultFlags(op);
375 ASSERT(!(m_flags & NodeHasVarArgs));
376 }
377
378 // Construct a node with a variable number of children and two immediate values.
379 Node(VarArgTag, NodeType op, NodeOrigin nodeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren)
380 : origin(nodeOrigin)
381 , children(AdjacencyList::Variable, firstChild, numChildren)
382 , m_virtualRegister(VirtualRegister())
383 , m_refCount(1)
384 , m_prediction(SpecNone)
385 , m_opInfo(imm1.m_value)
386 , m_opInfo2(imm2.m_value)
387 , owner(nullptr)
388 {
389 m_misc.replacement = nullptr;
390 setOpAndDefaultFlags(op);
391 ASSERT(m_flags & NodeHasVarArgs);
392 }
393
394 NodeType op() const { return static_cast<NodeType>(m_op); }
395 NodeFlags flags() const { return m_flags; }
396
397 unsigned index() const { return m_index; }
398
399 void setOp(NodeType op)
400 {
401 m_op = op;
402 }
403
404 void setFlags(NodeFlags flags)
405 {
406 m_flags = flags;
407 }
408
409 bool mergeFlags(NodeFlags flags)
410 {
411 NodeFlags newFlags = m_flags | flags;
412 if (newFlags == m_flags)
413 return false;
414 m_flags = newFlags;
415 return true;
416 }
417
418 bool filterFlags(NodeFlags flags)
419 {
420 NodeFlags newFlags = m_flags & flags;
421 if (newFlags == m_flags)
422 return false;
423 m_flags = newFlags;
424 return true;
425 }
426
427 bool clearFlags(NodeFlags flags)
428 {
429 return filterFlags(~flags);
430 }
431
432 void setResult(NodeFlags result)
433 {
434 ASSERT(!(result & ~NodeResultMask));
435 clearFlags(NodeResultMask);
436 mergeFlags(result);
437 }
438
439 NodeFlags result() const
440 {
441 return flags() & NodeResultMask;
442 }
443
444 void setOpAndDefaultFlags(NodeType op)
445 {
446 m_op = op;
447 m_flags = defaultFlags(op);
448 }
449
450 void remove(Graph&);
451 void removeWithoutChecks();
452
453 void convertToCheckStructure(RegisteredStructureSet* set)
454 {
455 setOpAndDefaultFlags(CheckStructure);
456 m_opInfo = set;
457 }
458
459 void convertToCheckStructureOrEmpty(RegisteredStructureSet* set)
460 {
461 if (SpecCellCheck & SpecEmpty)
462 setOpAndDefaultFlags(CheckStructureOrEmpty);
463 else
464 setOpAndDefaultFlags(CheckStructure);
465 m_opInfo = set;
466 }
467
468 void convertCheckStructureOrEmptyToCheckStructure()
469 {
470 ASSERT(op() == CheckStructureOrEmpty);
471 setOpAndDefaultFlags(CheckStructure);
472 }
473
474 void convertToCheckStructureImmediate(Node* structure)
475 {
476 ASSERT(op() == CheckStructure || op() == CheckStructureOrEmpty);
477 m_op = CheckStructureImmediate;
478 children.setChild1(Edge(structure, CellUse));
479 }
480
481 void replaceWith(Graph&, Node* other);
482 void replaceWithWithoutChecks(Node* other);
483
484 void convertToIdentity();
485 void convertToIdentityOn(Node*);
486
487 bool mustGenerate()
488 {
489 return m_flags & NodeMustGenerate;
490 }
491
492 bool isConstant()
493 {
494 switch (op()) {
495 case JSConstant:
496 case DoubleConstant:
497 case Int52Constant:
498 return true;
499 default:
500 return false;
501 }
502 }
503
504 bool hasConstant()
505 {
506 switch (op()) {
507 case JSConstant:
508 case DoubleConstant:
509 case Int52Constant:
510 return true;
511
512 case PhantomDirectArguments:
513 case PhantomClonedArguments:
514 // These pretend to be the empty value constant for the benefit of the DFG backend, which
515 // otherwise wouldn't take kindly to a node that doesn't compute a value.
516 return true;
517
518 default:
519 return false;
520 }
521 }
522
523 FrozenValue* constant()
524 {
525 ASSERT(hasConstant());
526
527 if (op() == PhantomDirectArguments || op() == PhantomClonedArguments) {
528 // These pretend to be the empty value constant for the benefit of the DFG backend, which
529 // otherwise wouldn't take kindly to a node that doesn't compute a value.
530 return FrozenValue::emptySingleton();
531 }
532
533 return m_opInfo.as<FrozenValue*>();
534 }
535
536 // Don't call this directly - use Graph::convertToConstant() instead!
537 void convertToConstant(FrozenValue* value)
538 {
539 if (hasDoubleResult())
540 m_op = DoubleConstant;
541 else if (hasInt52Result())
542 m_op = Int52Constant;
543 else
544 m_op = JSConstant;
545 m_flags &= ~(NodeMustGenerate | NodeHasVarArgs);
546 m_opInfo = value;
547 children.reset();
548 }
549
550 void convertToLazyJSConstant(Graph&, LazyJSValue);
551
552 void convertToConstantStoragePointer(void* pointer)
553 {
554 ASSERT(op() == GetIndexedPropertyStorage);
555 m_op = ConstantStoragePointer;
556 m_opInfo = pointer;
557 children.reset();
558 }
559
560 void convertToPutStack(StackAccessData* data)
561 {
562 m_op = PutStack;
563 m_flags |= NodeMustGenerate;
564 m_opInfo = data;
565 m_opInfo2 = OpInfoWrapper();
566 }
567
568 void convertToGetStack(StackAccessData* data)
569 {
570 m_op = GetStack;
571 m_flags &= ~NodeMustGenerate;
572 m_opInfo = data;
573 m_opInfo2 = OpInfoWrapper();
574 children.reset();
575 }
576
577 void convertToGetByOffset(StorageAccessData& data, Edge storage, Edge base)
578 {
579 ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == GetByIdDirect || m_op == GetByIdDirectFlush || m_op == MultiGetByOffset);
580 m_opInfo = &data;
581 children.setChild1(storage);
582 children.setChild2(base);
583 m_op = GetByOffset;
584 m_flags &= ~NodeMustGenerate;
585 }
586
587 void convertToMultiGetByOffset(MultiGetByOffsetData* data)
588 {
589 RELEASE_ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == GetByIdDirect || m_op == GetByIdDirectFlush);
590 m_opInfo = data;
591 child1().setUseKind(CellUse);
592 m_op = MultiGetByOffset;
593 RELEASE_ASSERT(m_flags & NodeMustGenerate);
594 }
595
596 void convertToPutByOffset(StorageAccessData& data, Edge storage, Edge base)
597 {
598 ASSERT(m_op == PutById || m_op == PutByIdDirect || m_op == PutByIdFlush || m_op == MultiPutByOffset);
599 m_opInfo = &data;
600 children.setChild3(children.child2());
601 children.setChild2(base);
602 children.setChild1(storage);
603 m_op = PutByOffset;
604 }
605
606 void convertToMultiPutByOffset(MultiPutByOffsetData* data)
607 {
608 ASSERT(m_op == PutById || m_op == PutByIdDirect || m_op == PutByIdFlush);
609 m_opInfo = data;
610 m_op = MultiPutByOffset;
611 }
612
613 void convertToPhantomNewObject()
614 {
615 ASSERT(m_op == NewObject || m_op == MaterializeNewObject);
616 m_op = PhantomNewObject;
617 m_flags &= ~NodeHasVarArgs;
618 m_flags |= NodeMustGenerate;
619 m_opInfo = OpInfoWrapper();
620 m_opInfo2 = OpInfoWrapper();
621 children = AdjacencyList();
622 }
623
624 void convertToPhantomNewFunction()
625 {
626 ASSERT(m_op == NewFunction || m_op == NewGeneratorFunction || m_op == NewAsyncFunction || m_op == NewAsyncGeneratorFunction);
627 m_op = PhantomNewFunction;
628 m_flags |= NodeMustGenerate;
629 m_opInfo = OpInfoWrapper();
630 m_opInfo2 = OpInfoWrapper();
631 children = AdjacencyList();
632 }
633
634 void convertToPhantomNewGeneratorFunction()
635 {
636 ASSERT(m_op == NewGeneratorFunction);
637 m_op = PhantomNewGeneratorFunction;
638 m_flags |= NodeMustGenerate;
639 m_opInfo = OpInfoWrapper();
640 m_opInfo2 = OpInfoWrapper();
641 children = AdjacencyList();
642 }
643
644 void convertToPhantomNewAsyncFunction()
645 {
646 ASSERT(m_op == NewAsyncFunction);
647 m_op = PhantomNewAsyncFunction;
648 m_flags |= NodeMustGenerate;
649 m_opInfo = OpInfoWrapper();
650 m_opInfo2 = OpInfoWrapper();
651 children = AdjacencyList();
652 }
653
654 void convertToPhantomNewAsyncGeneratorFunction()
655 {
656 ASSERT(m_op == NewAsyncGeneratorFunction);
657 m_op = PhantomNewAsyncGeneratorFunction;
658 m_flags |= NodeMustGenerate;
659 m_opInfo = OpInfoWrapper();
660 m_opInfo2 = OpInfoWrapper();
661 children = AdjacencyList();
662 }
663
664 void convertToPhantomCreateActivation()
665 {
666 ASSERT(m_op == CreateActivation || m_op == MaterializeCreateActivation);
667 m_op = PhantomCreateActivation;
668 m_flags &= ~NodeHasVarArgs;
669 m_flags |= NodeMustGenerate;
670 m_opInfo = OpInfoWrapper();
671 m_opInfo2 = OpInfoWrapper();
672 children = AdjacencyList();
673 }
674
675 void convertToPhantomNewRegexp()
676 {
677 ASSERT(m_op == NewRegexp);
678 setOpAndDefaultFlags(PhantomNewRegexp);
679 m_opInfo = OpInfoWrapper();
680 m_opInfo2 = OpInfoWrapper();
681 children = AdjacencyList();
682 }
683
684 void convertPhantomToPhantomLocal()
685 {
686 ASSERT(m_op == Phantom && (child1()->op() == Phi || child1()->op() == SetLocal || child1()->op() == SetArgumentDefinitely));
687 m_op = PhantomLocal;
688 m_opInfo = child1()->m_opInfo; // Copy the variableAccessData.
689 children.setChild1(Edge());
690 }
691
692 void convertFlushToPhantomLocal()
693 {
694 ASSERT(m_op == Flush);
695 m_op = PhantomLocal;
696 children = AdjacencyList();
697 }
698
699 void convertToToString()
700 {
701 ASSERT(m_op == ToPrimitive || m_op == StringValueOf);
702 m_op = ToString;
703 }
704
705 void convertToArithNegate()
706 {
707 ASSERT(m_op == ArithAbs && child1().useKind() == Int32Use);
708 m_op = ArithNegate;
709 }
710
711 void convertToCompareEqPtr(FrozenValue* cell, Edge node)
712 {
713 ASSERT(m_op == CompareStrictEq || m_op == SameValue);
714 setOpAndDefaultFlags(CompareEqPtr);
715 children.setChild1(node);
716 children.setChild2(Edge());
717 m_opInfo = cell;
718 }
719
720 void convertToNumberToStringWithValidRadixConstant(int32_t radix)
721 {
722 ASSERT(m_op == NumberToStringWithRadix);
723 ASSERT(2 <= radix && radix <= 36);
724 setOpAndDefaultFlags(NumberToStringWithValidRadixConstant);
725 children.setChild2(Edge());
726 m_opInfo = radix;
727 }
728
729 void convertToGetGlobalThis()
730 {
731 ASSERT(m_op == ToThis);
732 setOpAndDefaultFlags(GetGlobalThis);
733 children.setChild1(Edge());
734 }
735
736 void convertToCallObjectConstructor(FrozenValue* globalObject)
737 {
738 ASSERT(m_op == ToObject);
739 setOpAndDefaultFlags(CallObjectConstructor);
740 m_opInfo = globalObject;
741 }
742
743 void convertToNewStringObject(RegisteredStructure structure)
744 {
745 ASSERT(m_op == CallObjectConstructor || m_op == ToObject);
746 setOpAndDefaultFlags(NewStringObject);
747 m_opInfo = structure;
748 m_opInfo2 = OpInfoWrapper();
749 }
750
751 void convertToNewObject(RegisteredStructure structure)
752 {
753 ASSERT(m_op == CallObjectConstructor || m_op == CreateThis || m_op == ObjectCreate);
754 setOpAndDefaultFlags(NewObject);
755 children.reset();
756 m_opInfo = structure;
757 m_opInfo2 = OpInfoWrapper();
758 }
759
760 void convertToNewArrayBuffer(FrozenValue* immutableButterfly);
761
762 void convertToDirectCall(FrozenValue*);
763
764 void convertToCallDOM(Graph&);
765
766 void convertToRegExpExecNonGlobalOrStickyWithoutChecks(FrozenValue* regExp);
767 void convertToRegExpMatchFastGlobalWithoutChecks(FrozenValue* regExp);
768
769 void convertToSetRegExpObjectLastIndex()
770 {
771 setOp(SetRegExpObjectLastIndex);
772 m_opInfo = false;
773 }
774
775 void convertToInById(unsigned identifierNumber)
776 {
777 ASSERT(m_op == InByVal);
778 setOpAndDefaultFlags(InById);
779 children.setChild2(Edge());
780 m_opInfo = identifierNumber;
781 m_opInfo2 = OpInfoWrapper();
782 }
783
784 JSValue asJSValue()
785 {
786 return constant()->value();
787 }
788
789 bool isInt32Constant()
790 {
791 return isConstant() && constant()->value().isInt32();
792 }
793
794 int32_t asInt32()
795 {
796 return asJSValue().asInt32();
797 }
798
799 uint32_t asUInt32()
800 {
801 return asInt32();
802 }
803
804 bool isDoubleConstant()
805 {
806 return isConstant() && constant()->value().isDouble();
807 }
808
809 bool isNumberConstant()
810 {
811 return isConstant() && constant()->value().isNumber();
812 }
813
814 double asNumber()
815 {
816 return asJSValue().asNumber();
817 }
818
819 bool isAnyIntConstant()
820 {
821 return isConstant() && constant()->value().isAnyInt();
822 }
823
824 int64_t asAnyInt()
825 {
826 return asJSValue().asAnyInt();
827 }
828
829 bool isBooleanConstant()
830 {
831 return isConstant() && constant()->value().isBoolean();
832 }
833
834 bool asBoolean()
835 {
836 return constant()->value().asBoolean();
837 }
838
839 bool isUndefinedOrNullConstant()
840 {
841 return isConstant() && constant()->value().isUndefinedOrNull();
842 }
843
844 bool isCellConstant()
845 {
846 return isConstant() && constant()->value() && constant()->value().isCell();
847 }
848
849 JSCell* asCell()
850 {
851 return constant()->value().asCell();
852 }
853
854 template<typename T>
855 T dynamicCastConstant(VM& vm)
856 {
857 if (!isCellConstant())
858 return nullptr;
859 return jsDynamicCast<T>(vm, asCell());
860 }
861
862 template<typename T>
863 T castConstant(VM& vm)
864 {
865 T result = dynamicCastConstant<T>(vm);
866 RELEASE_ASSERT(result);
867 return result;
868 }
869
870 bool hasLazyJSValue()
871 {
872 return op() == LazyJSConstant;
873 }
874
875 LazyJSValue lazyJSValue()
876 {
877 ASSERT(hasLazyJSValue());
878 return *m_opInfo.as<LazyJSValue*>();
879 }
880
881 String tryGetString(Graph&);
882
883 JSValue initializationValueForActivation() const
884 {
885 ASSERT(op() == CreateActivation);
886 return m_opInfo2.as<FrozenValue*>()->value();
887 }
888
889 bool hasArgumentsChild()
890 {
891 switch (op()) {
892 case GetMyArgumentByVal:
893 case GetMyArgumentByValOutOfBounds:
894 case LoadVarargs:
895 case ForwardVarargs:
896 case CallVarargs:
897 case CallForwardVarargs:
898 case ConstructVarargs:
899 case ConstructForwardVarargs:
900 case TailCallVarargs:
901 case TailCallForwardVarargs:
902 case TailCallVarargsInlinedCaller:
903 case TailCallForwardVarargsInlinedCaller:
904 return true;
905 default:
906 return false;
907 }
908 }
909
910 Edge& argumentsChild()
911 {
912 switch (op()) {
913 case GetMyArgumentByVal:
914 case GetMyArgumentByValOutOfBounds:
915 case LoadVarargs:
916 case ForwardVarargs:
917 return child1();
918 case CallVarargs:
919 case CallForwardVarargs:
920 case ConstructVarargs:
921 case ConstructForwardVarargs:
922 case TailCallVarargs:
923 case TailCallForwardVarargs:
924 case TailCallVarargsInlinedCaller:
925 case TailCallForwardVarargsInlinedCaller:
926 return child3();
927 default:
928 RELEASE_ASSERT_NOT_REACHED();
929 return child1();
930 }
931 }
932
933 bool containsMovHint()
934 {
935 switch (op()) {
936 case MovHint:
937 case ZombieHint:
938 return true;
939 default:
940 return false;
941 }
942 }
943
944 bool hasVariableAccessData(Graph&);
945 bool accessesStack(Graph& graph)
946 {
947 return hasVariableAccessData(graph);
948 }
949
950 // This is useful for debugging code, where a node that should have a variable
951 // access data doesn't have one because it hasn't been initialized yet.
952 VariableAccessData* tryGetVariableAccessData()
953 {
954 VariableAccessData* result = m_opInfo.as<VariableAccessData*>();
955 if (!result)
956 return 0;
957 return result->find();
958 }
959
960 VariableAccessData* variableAccessData()
961 {
962 return m_opInfo.as<VariableAccessData*>()->find();
963 }
964
965 VirtualRegister local()
966 {
967 return variableAccessData()->local();
968 }
969
970 VirtualRegister machineLocal()
971 {
972 return variableAccessData()->machineLocal();
973 }
974
975 bool hasUnlinkedLocal()
976 {
977 switch (op()) {
978 case ExtractOSREntryLocal:
979 case MovHint:
980 case ZombieHint:
981 case KillStack:
982 return true;
983 default:
984 return false;
985 }
986 }
987
988 VirtualRegister unlinkedLocal()
989 {
990 ASSERT(hasUnlinkedLocal());
991 return VirtualRegister(m_opInfo.as<int32_t>());
992 }
993
994 bool hasStackAccessData()
995 {
996 switch (op()) {
997 case PutStack:
998 case GetStack:
999 return true;
1000 default:
1001 return false;
1002 }
1003 }
1004
1005 StackAccessData* stackAccessData()
1006 {
1007 ASSERT(hasStackAccessData());
1008 return m_opInfo.as<StackAccessData*>();
1009 }
1010
1011 unsigned argumentCountIncludingThis()
1012 {
1013 ASSERT(op() == SetArgumentCountIncludingThis);
1014 return m_opInfo.as<unsigned>();
1015 }
1016
1017 bool hasPhi()
1018 {
1019 return op() == Upsilon;
1020 }
1021
1022 Node* phi()
1023 {
1024 ASSERT(hasPhi());
1025 return m_opInfo.as<Node*>();
1026 }
1027
1028 bool isStoreBarrier()
1029 {
1030 return op() == StoreBarrier || op() == FencedStoreBarrier;
1031 }
1032
1033 bool hasIdentifier()
1034 {
1035 switch (op()) {
1036 case TryGetById:
1037 case GetById:
1038 case GetByIdFlush:
1039 case GetByIdWithThis:
1040 case GetByIdDirect:
1041 case GetByIdDirectFlush:
1042 case PutById:
1043 case PutByIdFlush:
1044 case PutByIdDirect:
1045 case PutByIdWithThis:
1046 case PutGetterById:
1047 case PutSetterById:
1048 case PutGetterSetterById:
1049 case DeleteById:
1050 case InById:
1051 case GetDynamicVar:
1052 case PutDynamicVar:
1053 case ResolveScopeForHoistingFuncDeclInEval:
1054 case ResolveScope:
1055 case ToObject:
1056 return true;
1057 default:
1058 return false;
1059 }
1060 }
1061
1062 unsigned identifierNumber()
1063 {
1064 ASSERT(hasIdentifier());
1065 return m_opInfo.as<unsigned>();
1066 }
1067
1068 bool hasGetPutInfo()
1069 {
1070 switch (op()) {
1071 case GetDynamicVar:
1072 case PutDynamicVar:
1073 return true;
1074 default:
1075 return false;
1076 }
1077 }
1078
1079 unsigned getPutInfo()
1080 {
1081 ASSERT(hasGetPutInfo());
1082 return static_cast<unsigned>(m_opInfo.as<uint64_t>() >> 32);
1083 }
1084
1085 bool hasAccessorAttributes()
1086 {
1087 switch (op()) {
1088 case PutGetterById:
1089 case PutSetterById:
1090 case PutGetterSetterById:
1091 case PutGetterByVal:
1092 case PutSetterByVal:
1093 return true;
1094 default:
1095 return false;
1096 }
1097 }
1098
1099 int32_t accessorAttributes()
1100 {
1101 ASSERT(hasAccessorAttributes());
1102 switch (op()) {
1103 case PutGetterById:
1104 case PutSetterById:
1105 case PutGetterSetterById:
1106 return m_opInfo2.as<int32_t>();
1107 case PutGetterByVal:
1108 case PutSetterByVal:
1109 return m_opInfo.as<int32_t>();
1110 default:
1111 RELEASE_ASSERT_NOT_REACHED();
1112 return 0;
1113 }
1114 }
1115
1116 bool hasPromotedLocationDescriptor()
1117 {
1118 return op() == PutHint;
1119 }
1120
1121 PromotedLocationDescriptor promotedLocationDescriptor();
1122
1123 // This corrects the arithmetic node flags, so that irrelevant bits are
1124 // ignored. In particular, anything other than ArithMul or ValueMul does not need
1125 // to know if it can speculate on negative zero.
1126 NodeFlags arithNodeFlags()
1127 {
1128 NodeFlags result = m_flags & NodeArithFlagsMask;
1129 if (op() == ArithMul || op() == ArithDiv || op() == ValueDiv || op() == ArithMod || op() == ArithNegate || op() == ArithPow || op() == ArithRound || op() == ArithFloor || op() == ArithCeil || op() == ArithTrunc || op() == DoubleAsInt32 || op() == ValueNegate || op() == ValueMul || op() == ValueDiv)
1130 return result;
1131 return result & ~NodeBytecodeNeedsNegZero;
1132 }
1133
1134 bool mayHaveNonIntResult()
1135 {
1136 return m_flags & NodeMayHaveNonIntResult;
1137 }
1138
1139 bool mayHaveDoubleResult()
1140 {
1141 return m_flags & NodeMayHaveDoubleResult;
1142 }
1143
1144 bool mayHaveNonNumericResult()
1145 {
1146 return m_flags & NodeMayHaveNonNumericResult;
1147 }
1148
1149 bool mayHaveBigIntResult()
1150 {
1151 return m_flags & NodeMayHaveBigIntResult;
1152 }
1153
1154 bool hasNewArrayBufferData()
1155 {
1156 return op() == NewArrayBuffer || op() == PhantomNewArrayBuffer;
1157 }
1158
1159 NewArrayBufferData newArrayBufferData()
1160 {
1161 ASSERT(hasNewArrayBufferData());
1162 return m_opInfo2.asNewArrayBufferData();
1163 }
1164
1165 unsigned hasVectorLengthHint()
1166 {
1167 switch (op()) {
1168 case NewArray:
1169 case NewArrayBuffer:
1170 case PhantomNewArrayBuffer:
1171 return true;
1172 default:
1173 return false;
1174 }
1175 }
1176
1177 unsigned vectorLengthHint()
1178 {
1179 ASSERT(hasVectorLengthHint());
1180 if (op() == NewArray)
1181 return m_opInfo2.as<unsigned>();
1182 return newArrayBufferData().vectorLengthHint;
1183 }
1184
1185 bool hasIndexingType()
1186 {
1187 switch (op()) {
1188 case NewArray:
1189 case NewArrayWithSize:
1190 case NewArrayBuffer:
1191 case PhantomNewArrayBuffer:
1192 return true;
1193 default:
1194 return false;
1195 }
1196 }
1197
1198 BitVector* bitVector()
1199 {
1200 ASSERT(op() == NewArrayWithSpread || op() == PhantomNewArrayWithSpread);
1201 return m_opInfo.as<BitVector*>();
1202 }
1203
1204 // Return the indexing type that an array allocation *wants* to use. It may end up using a different
1205 // type if we're having a bad time. You can determine the actual indexing type by asking the global
1206 // object:
1207 //
1208 // m_graph.globalObjectFor(node->origin.semantic)->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())
1209 //
1210 // This will give you a Structure*, and that will have some indexing type that may be different from
1211 // the this one.
1212 IndexingType indexingType()
1213 {
1214 ASSERT(hasIndexingType());
1215 if (op() == NewArrayBuffer || op() == PhantomNewArrayBuffer)
1216 return static_cast<IndexingType>(newArrayBufferData().indexingMode) & IndexingTypeMask;
1217 return static_cast<IndexingType>(m_opInfo.as<uint32_t>());
1218 }
1219
1220 IndexingType indexingMode()
1221 {
1222 ASSERT(hasIndexingType());
1223 if (op() == NewArrayBuffer || op() == PhantomNewArrayBuffer)
1224 return static_cast<IndexingType>(newArrayBufferData().indexingMode);
1225 return static_cast<IndexingType>(m_opInfo.as<uint32_t>());
1226 }
1227
1228 bool hasTypedArrayType()
1229 {
1230 switch (op()) {
1231 case NewTypedArray:
1232 return true;
1233 default:
1234 return false;
1235 }
1236 }
1237
1238 TypedArrayType typedArrayType()
1239 {
1240 ASSERT(hasTypedArrayType());
1241 TypedArrayType result = static_cast<TypedArrayType>(m_opInfo.as<uint32_t>());
1242 ASSERT(isTypedView(result));
1243 return result;
1244 }
1245
1246 bool hasInlineCapacity()
1247 {
1248 return op() == CreateThis;
1249 }
1250
1251 unsigned inlineCapacity()
1252 {
1253 ASSERT(hasInlineCapacity());
1254 return m_opInfo.as<unsigned>();
1255 }
1256
1257 void setIndexingType(IndexingType indexingType)
1258 {
1259 ASSERT(hasIndexingType());
1260 m_opInfo = indexingType;
1261 }
1262
1263 bool hasScopeOffset()
1264 {
1265 return op() == GetClosureVar || op() == PutClosureVar;
1266 }
1267
1268 ScopeOffset scopeOffset()
1269 {
1270 ASSERT(hasScopeOffset());
1271 return ScopeOffset(m_opInfo.as<uint32_t>());
1272 }
1273
1274 bool hasDirectArgumentsOffset()
1275 {
1276 return op() == GetFromArguments || op() == PutToArguments;
1277 }
1278
1279 DirectArgumentsOffset capturedArgumentsOffset()
1280 {
1281 ASSERT(hasDirectArgumentsOffset());
1282 return DirectArgumentsOffset(m_opInfo.as<uint32_t>());
1283 }
1284
1285 bool hasRegisterPointer()
1286 {
1287 return op() == GetGlobalVar || op() == GetGlobalLexicalVariable || op() == PutGlobalVariable;
1288 }
1289
1290 WriteBarrier<Unknown>* variablePointer()
1291 {
1292 return m_opInfo.as<WriteBarrier<Unknown>*>();
1293 }
1294
1295 bool hasCallVarargsData()
1296 {
1297 switch (op()) {
1298 case CallVarargs:
1299 case CallForwardVarargs:
1300 case TailCallVarargs:
1301 case TailCallForwardVarargs:
1302 case TailCallVarargsInlinedCaller:
1303 case TailCallForwardVarargsInlinedCaller:
1304 case ConstructVarargs:
1305 case ConstructForwardVarargs:
1306 return true;
1307 default:
1308 return false;
1309 }
1310 }
1311
1312 CallVarargsData* callVarargsData()
1313 {
1314 ASSERT(hasCallVarargsData());
1315 return m_opInfo.as<CallVarargsData*>();
1316 }
1317
1318 bool hasLoadVarargsData()
1319 {
1320 return op() == LoadVarargs || op() == ForwardVarargs;
1321 }
1322
1323 LoadVarargsData* loadVarargsData()
1324 {
1325 ASSERT(hasLoadVarargsData());
1326 return m_opInfo.as<LoadVarargsData*>();
1327 }
1328
1329 InlineCallFrame* argumentsInlineCallFrame()
1330 {
1331 ASSERT(op() == GetArgumentCountIncludingThis);
1332 return m_opInfo.as<InlineCallFrame*>();
1333 }
1334
1335 bool hasQueriedType()
1336 {
1337 return op() == IsCellWithType;
1338 }
1339
1340 JSType queriedType()
1341 {
1342 static_assert(std::is_same<uint8_t, std::underlying_type<JSType>::type>::value, "Ensure that uint8_t is the underlying type for JSType.");
1343 return static_cast<JSType>(m_opInfo.as<uint32_t>());
1344 }
1345
1346 bool hasSpeculatedTypeForQuery()
1347 {
1348 return op() == IsCellWithType;
1349 }
1350
1351 SpeculatedType speculatedTypeForQuery()
1352 {
1353 return speculationFromJSType(queriedType());
1354 }
1355
1356 bool hasResult()
1357 {
1358 return !!result();
1359 }
1360
1361 bool hasInt32Result()
1362 {
1363 return result() == NodeResultInt32;
1364 }
1365
1366 bool hasInt52Result()
1367 {
1368 return result() == NodeResultInt52;
1369 }
1370
1371 bool hasNumberResult()
1372 {
1373 return result() == NodeResultNumber;
1374 }
1375
1376 bool hasNumberOrAnyIntResult()
1377 {
1378 return hasNumberResult() || hasInt32Result() || hasInt52Result();
1379 }
1380
1381 bool hasNumericResult()
1382 {
1383 switch (op()) {
1384 case ValueSub:
1385 case ValueMul:
1386 case ValueBitAnd:
1387 case ValueBitOr:
1388 case ValueBitXor:
1389 case ValueNegate:
1390 return true;
1391 default:
1392 return false;
1393 }
1394 }
1395
1396 bool hasDoubleResult()
1397 {
1398 return result() == NodeResultDouble;
1399 }
1400
1401 bool hasJSResult()
1402 {
1403 return result() == NodeResultJS;
1404 }
1405
1406 bool hasBooleanResult()
1407 {
1408 return result() == NodeResultBoolean;
1409 }
1410
1411 bool hasStorageResult()
1412 {
1413 return result() == NodeResultStorage;
1414 }
1415
1416 UseKind defaultUseKind()
1417 {
1418 return useKindForResult(result());
1419 }
1420
1421 Edge defaultEdge()
1422 {
1423 return Edge(this, defaultUseKind());
1424 }
1425
1426 bool isJump()
1427 {
1428 return op() == Jump;
1429 }
1430
1431 bool isBranch()
1432 {
1433 return op() == Branch;
1434 }
1435
1436 bool isSwitch() const
1437 {
1438 return op() == Switch;
1439 }
1440
1441 bool isEntrySwitch() const
1442 {
1443 return op() == EntrySwitch;
1444 }
1445
1446 bool isTerminal()
1447 {
1448 switch (op()) {
1449 case Jump:
1450 case Branch:
1451 case Switch:
1452 case EntrySwitch:
1453 case Return:
1454 case TailCall:
1455 case DirectTailCall:
1456 case TailCallVarargs:
1457 case TailCallForwardVarargs:
1458 case Unreachable:
1459 case Throw:
1460 case ThrowStaticError:
1461 return true;
1462 default:
1463 return false;
1464 }
1465 }
1466
1467 bool isFunctionTerminal()
1468 {
1469 if (isTerminal() && !numSuccessors())
1470 return true;
1471
1472 return false;
1473 }
1474
1475 // As is described in DFGNodeType.h's ForceOSRExit, this is a pseudo-terminal.
1476 // It means that execution should fall out of DFG at this point, but execution
1477 // does continue in the basic block - just in a different compiler.
1478 // FIXME: This is used for lightweight reachability decision. But this should
1479 // be replaced with AI-based reachability ideally.
1480 bool isPseudoTerminal()
1481 {
1482 switch (op()) {
1483 case ForceOSRExit:
1484 case CheckBadCell:
1485 return true;
1486 default:
1487 return false;
1488 }
1489 }
1490
1491 unsigned targetBytecodeOffsetDuringParsing()
1492 {
1493 ASSERT(isJump());
1494 return m_opInfo.as<unsigned>();
1495 }
1496
1497 BasicBlock*& targetBlock()
1498 {
1499 ASSERT(isJump());
1500 return *bitwise_cast<BasicBlock**>(&m_opInfo.u.pointer);
1501 }
1502
1503 BranchData* branchData()
1504 {
1505 ASSERT(isBranch());
1506 return m_opInfo.as<BranchData*>();
1507 }
1508
1509 SwitchData* switchData()
1510 {
1511 ASSERT(isSwitch());
1512 return m_opInfo.as<SwitchData*>();
1513 }
1514
1515 EntrySwitchData* entrySwitchData()
1516 {
1517 ASSERT(isEntrySwitch());
1518 return m_opInfo.as<EntrySwitchData*>();
1519 }
1520
1521 Intrinsic intrinsic()
1522 {
1523 RELEASE_ASSERT(op() == CPUIntrinsic);
1524 return m_opInfo.as<Intrinsic>();
1525 }
1526
1527 unsigned numSuccessors()
1528 {
1529 switch (op()) {
1530 case Jump:
1531 return 1;
1532 case Branch:
1533 return 2;
1534 case Switch:
1535 return switchData()->cases.size() + 1;
1536 case EntrySwitch:
1537 return entrySwitchData()->cases.size();
1538 default:
1539 return 0;
1540 }
1541 }
1542
1543 BasicBlock*& successor(unsigned index)
1544 {
1545 if (isSwitch()) {
1546 if (index < switchData()->cases.size())
1547 return switchData()->cases[index].target.block;
1548 RELEASE_ASSERT(index == switchData()->cases.size());
1549 return switchData()->fallThrough.block;
1550 } else if (isEntrySwitch())
1551 return entrySwitchData()->cases[index];
1552
1553 switch (index) {
1554 case 0:
1555 if (isJump())
1556 return targetBlock();
1557 return branchData()->taken.block;
1558 case 1:
1559 return branchData()->notTaken.block;
1560 default:
1561 RELEASE_ASSERT_NOT_REACHED();
1562 return targetBlock();
1563 }
1564 }
1565
1566 class SuccessorsIterable {
1567 public:
1568 SuccessorsIterable()
1569 : m_terminal(nullptr)
1570 {
1571 }
1572
1573 SuccessorsIterable(Node* terminal)
1574 : m_terminal(terminal)
1575 {
1576 }
1577
1578 class iterator {
1579 public:
1580 iterator()
1581 : m_terminal(nullptr)
1582 , m_index(UINT_MAX)
1583 {
1584 }
1585
1586 iterator(Node* terminal, unsigned index)
1587 : m_terminal(terminal)
1588 , m_index(index)
1589 {
1590 }
1591
1592 BasicBlock* operator*()
1593 {
1594 return m_terminal->successor(m_index);
1595 }
1596
1597 iterator& operator++()
1598 {
1599 m_index++;
1600 return *this;
1601 }
1602
1603 bool operator==(const iterator& other) const
1604 {
1605 return m_index == other.m_index;
1606 }
1607
1608 bool operator!=(const iterator& other) const
1609 {
1610 return !(*this == other);
1611 }
1612 private:
1613 Node* m_terminal;
1614 unsigned m_index;
1615 };
1616
1617 iterator begin()
1618 {
1619 return iterator(m_terminal, 0);
1620 }
1621
1622 iterator end()
1623 {
1624 return iterator(m_terminal, m_terminal->numSuccessors());
1625 }
1626
1627 size_t size() const { return m_terminal->numSuccessors(); }
1628 BasicBlock* at(size_t index) const { return m_terminal->successor(index); }
1629 BasicBlock* operator[](size_t index) const { return at(index); }
1630
1631 private:
1632 Node* m_terminal;
1633 };
1634
1635 SuccessorsIterable successors()
1636 {
1637 return SuccessorsIterable(this);
1638 }
1639
1640 BasicBlock*& successorForCondition(bool condition)
1641 {
1642 return branchData()->forCondition(condition);
1643 }
1644
1645 bool hasHeapPrediction()
1646 {
1647 switch (op()) {
1648 case ArithAbs:
1649 case ArithRound:
1650 case ArithFloor:
1651 case ArithCeil:
1652 case ArithTrunc:
1653 case GetDirectPname:
1654 case GetById:
1655 case GetByIdFlush:
1656 case GetByIdWithThis:
1657 case GetByIdDirect:
1658 case GetByIdDirectFlush:
1659 case GetPrototypeOf:
1660 case TryGetById:
1661 case GetByVal:
1662 case GetByValWithThis:
1663 case Call:
1664 case DirectCall:
1665 case TailCallInlinedCaller:
1666 case DirectTailCallInlinedCaller:
1667 case Construct:
1668 case DirectConstruct:
1669 case CallVarargs:
1670 case CallEval:
1671 case TailCallVarargsInlinedCaller:
1672 case ConstructVarargs:
1673 case CallForwardVarargs:
1674 case TailCallForwardVarargsInlinedCaller:
1675 case GetByOffset:
1676 case MultiGetByOffset:
1677 case GetClosureVar:
1678 case GetFromArguments:
1679 case GetArgument:
1680 case ArrayPop:
1681 case ArrayPush:
1682 case RegExpExec:
1683 case RegExpExecNonGlobalOrSticky:
1684 case RegExpTest:
1685 case RegExpMatchFast:
1686 case RegExpMatchFastGlobal:
1687 case GetGlobalVar:
1688 case GetGlobalLexicalVariable:
1689 case StringReplace:
1690 case StringReplaceRegExp:
1691 case ToNumber:
1692 case ToObject:
1693 case ValueBitAnd:
1694 case ValueBitOr:
1695 case ValueBitXor:
1696 case ValueBitNot:
1697 case CallObjectConstructor:
1698 case LoadKeyFromMapBucket:
1699 case LoadValueFromMapBucket:
1700 case CallDOMGetter:
1701 case CallDOM:
1702 case ParseInt:
1703 case AtomicsAdd:
1704 case AtomicsAnd:
1705 case AtomicsCompareExchange:
1706 case AtomicsExchange:
1707 case AtomicsLoad:
1708 case AtomicsOr:
1709 case AtomicsStore:
1710 case AtomicsSub:
1711 case AtomicsXor:
1712 case GetDynamicVar:
1713 case ExtractValueFromWeakMapGet:
1714 case ToThis:
1715 case DataViewGetInt:
1716 case DataViewGetFloat:
1717 return true;
1718 default:
1719 return false;
1720 }
1721 }
1722
1723 SpeculatedType getHeapPrediction()
1724 {
1725 ASSERT(hasHeapPrediction());
1726 return m_opInfo2.as<SpeculatedType>();
1727 }
1728
1729 void setHeapPrediction(SpeculatedType prediction)
1730 {
1731 ASSERT(hasHeapPrediction());
1732 m_opInfo2 = prediction;
1733 }
1734
1735 SpeculatedType getForcedPrediction()
1736 {
1737 ASSERT(op() == IdentityWithProfile);
1738 return m_opInfo.as<SpeculatedType>();
1739 }
1740
1741 uint32_t catchOSREntryIndex() const
1742 {
1743 ASSERT(op() == ExtractCatchLocal);
1744 return m_opInfo.as<uint32_t>();
1745 }
1746
1747 SpeculatedType catchLocalPrediction()
1748 {
1749 ASSERT(op() == ExtractCatchLocal);
1750 return m_opInfo2.as<SpeculatedType>();
1751 }
1752
1753 bool hasCellOperand()
1754 {
1755 switch (op()) {
1756 case CheckCell:
1757 case OverridesHasInstance:
1758 case NewFunction:
1759 case NewGeneratorFunction:
1760 case NewAsyncFunction:
1761 case NewAsyncGeneratorFunction:
1762 case CreateActivation:
1763 case MaterializeCreateActivation:
1764 case NewRegexp:
1765 case NewArrayBuffer:
1766 case PhantomNewArrayBuffer:
1767 case CompareEqPtr:
1768 case CallObjectConstructor:
1769 case DirectCall:
1770 case DirectTailCall:
1771 case DirectConstruct:
1772 case DirectTailCallInlinedCaller:
1773 case RegExpExecNonGlobalOrSticky:
1774 case RegExpMatchFastGlobal:
1775 return true;
1776 default:
1777 return false;
1778 }
1779 }
1780
1781 FrozenValue* cellOperand()
1782 {
1783 ASSERT(hasCellOperand());
1784 return m_opInfo.as<FrozenValue*>();
1785 }
1786
1787 template<typename T>
1788 T castOperand()
1789 {
1790 return cellOperand()->cast<T>();
1791 }
1792
1793 void setCellOperand(FrozenValue* value)
1794 {
1795 ASSERT(hasCellOperand());
1796 m_opInfo = value;
1797 }
1798
1799 bool hasWatchpointSet()
1800 {
1801 return op() == NotifyWrite;
1802 }
1803
1804 WatchpointSet* watchpointSet()
1805 {
1806 ASSERT(hasWatchpointSet());
1807 return m_opInfo.as<WatchpointSet*>();
1808 }
1809
1810 bool hasStoragePointer()
1811 {
1812 return op() == ConstantStoragePointer;
1813 }
1814
1815 void* storagePointer()
1816 {
1817 ASSERT(hasStoragePointer());
1818 return m_opInfo.as<void*>();
1819 }
1820
1821 bool hasUidOperand()
1822 {
1823 return op() == CheckStringIdent;
1824 }
1825
1826 UniquedStringImpl* uidOperand()
1827 {
1828 ASSERT(hasUidOperand());
1829 return m_opInfo.as<UniquedStringImpl*>();
1830 }
1831
1832 bool hasTypeInfoOperand()
1833 {
1834 return op() == CheckTypeInfoFlags;
1835 }
1836
1837 unsigned typeInfoOperand()
1838 {
1839 ASSERT(hasTypeInfoOperand() && m_opInfo.as<uint32_t>() <= static_cast<uint32_t>(UCHAR_MAX));
1840 return m_opInfo.as<uint32_t>();
1841 }
1842
1843 bool hasTransition()
1844 {
1845 switch (op()) {
1846 case PutStructure:
1847 case AllocatePropertyStorage:
1848 case ReallocatePropertyStorage:
1849 return true;
1850 default:
1851 return false;
1852 }
1853 }
1854
1855 Transition* transition()
1856 {
1857 ASSERT(hasTransition());
1858 return m_opInfo.as<Transition*>();
1859 }
1860
1861 bool hasStructureSet()
1862 {
1863 switch (op()) {
1864 case CheckStructure:
1865 case CheckStructureOrEmpty:
1866 case CheckStructureImmediate:
1867 case MaterializeNewObject:
1868 return true;
1869 default:
1870 return false;
1871 }
1872 }
1873
1874 const RegisteredStructureSet& structureSet()
1875 {
1876 ASSERT(hasStructureSet());
1877 return *m_opInfo.as<RegisteredStructureSet*>();
1878 }
1879
1880 bool hasStructure()
1881 {
1882 switch (op()) {
1883 case ArrayifyToStructure:
1884 case NewObject:
1885 case NewStringObject:
1886 return true;
1887 default:
1888 return false;
1889 }
1890 }
1891
1892 RegisteredStructure structure()
1893 {
1894 ASSERT(hasStructure());
1895 return m_opInfo.asRegisteredStructure();
1896 }
1897
1898 bool hasStorageAccessData()
1899 {
1900 switch (op()) {
1901 case GetByOffset:
1902 case PutByOffset:
1903 case GetGetterSetterByOffset:
1904 return true;
1905 default:
1906 return false;
1907 }
1908 }
1909
1910 StorageAccessData& storageAccessData()
1911 {
1912 ASSERT(hasStorageAccessData());
1913 return *m_opInfo.as<StorageAccessData*>();
1914 }
1915
1916 bool hasMultiGetByOffsetData()
1917 {
1918 return op() == MultiGetByOffset;
1919 }
1920
1921 MultiGetByOffsetData& multiGetByOffsetData()
1922 {
1923 ASSERT(hasMultiGetByOffsetData());
1924 return *m_opInfo.as<MultiGetByOffsetData*>();
1925 }
1926
1927 bool hasMultiPutByOffsetData()
1928 {
1929 return op() == MultiPutByOffset;
1930 }
1931
1932 MultiPutByOffsetData& multiPutByOffsetData()
1933 {
1934 ASSERT(hasMultiPutByOffsetData());
1935 return *m_opInfo.as<MultiPutByOffsetData*>();
1936 }
1937
1938 bool hasMatchStructureData()
1939 {
1940 return op() == MatchStructure;
1941 }
1942
1943 MatchStructureData& matchStructureData()
1944 {
1945 ASSERT(hasMatchStructureData());
1946 return *m_opInfo.as<MatchStructureData*>();
1947 }
1948
1949 bool hasObjectMaterializationData()
1950 {
1951 switch (op()) {
1952 case MaterializeNewObject:
1953 case MaterializeCreateActivation:
1954 return true;
1955
1956 default:
1957 return false;
1958 }
1959 }
1960
1961 ObjectMaterializationData& objectMaterializationData()
1962 {
1963 ASSERT(hasObjectMaterializationData());
1964 return *m_opInfo2.as<ObjectMaterializationData*>();
1965 }
1966
1967 bool isObjectAllocation()
1968 {
1969 switch (op()) {
1970 case NewObject:
1971 case MaterializeNewObject:
1972 return true;
1973 default:
1974 return false;
1975 }
1976 }
1977
1978 bool isPhantomObjectAllocation()
1979 {
1980 switch (op()) {
1981 case PhantomNewObject:
1982 return true;
1983 default:
1984 return false;
1985 }
1986 }
1987
1988 bool isActivationAllocation()
1989 {
1990 switch (op()) {
1991 case CreateActivation:
1992 case MaterializeCreateActivation:
1993 return true;
1994 default:
1995 return false;
1996 }
1997 }
1998
1999 bool isPhantomActivationAllocation()
2000 {
2001 switch (op()) {
2002 case PhantomCreateActivation:
2003 return true;
2004 default:
2005 return false;
2006 }
2007 }
2008
2009 bool isFunctionAllocation()
2010 {
2011 switch (op()) {
2012 case NewFunction:
2013 case NewGeneratorFunction:
2014 case NewAsyncGeneratorFunction:
2015 case NewAsyncFunction:
2016 return true;
2017 default:
2018 return false;
2019 }
2020 }
2021
2022 bool isPhantomFunctionAllocation()
2023 {
2024 switch (op()) {
2025 case PhantomNewFunction:
2026 case PhantomNewGeneratorFunction:
2027 case PhantomNewAsyncFunction:
2028 case PhantomNewAsyncGeneratorFunction:
2029 return true;
2030 default:
2031 return false;
2032 }
2033 }
2034
2035 bool isPhantomAllocation()
2036 {
2037 switch (op()) {
2038 case PhantomNewObject:
2039 case PhantomDirectArguments:
2040 case PhantomCreateRest:
2041 case PhantomSpread:
2042 case PhantomNewArrayWithSpread:
2043 case PhantomNewArrayBuffer:
2044 case PhantomClonedArguments:
2045 case PhantomNewFunction:
2046 case PhantomNewGeneratorFunction:
2047 case PhantomNewAsyncFunction:
2048 case PhantomNewAsyncGeneratorFunction:
2049 case PhantomCreateActivation:
2050 case PhantomNewRegexp:
2051 return true;
2052 default:
2053 return false;
2054 }
2055 }
2056
2057 bool hasArrayMode()
2058 {
2059 switch (op()) {
2060 case GetIndexedPropertyStorage:
2061 case GetArrayLength:
2062 case GetVectorLength:
2063 case InByVal:
2064 case PutByValDirect:
2065 case PutByVal:
2066 case PutByValAlias:
2067 case GetByVal:
2068 case StringCharAt:
2069 case StringCharCodeAt:
2070 case CheckArray:
2071 case Arrayify:
2072 case ArrayifyToStructure:
2073 case ArrayPush:
2074 case ArrayPop:
2075 case ArrayIndexOf:
2076 case HasIndexedProperty:
2077 case AtomicsAdd:
2078 case AtomicsAnd:
2079 case AtomicsCompareExchange:
2080 case AtomicsExchange:
2081 case AtomicsLoad:
2082 case AtomicsOr:
2083 case AtomicsStore:
2084 case AtomicsSub:
2085 case AtomicsXor:
2086 return true;
2087 default:
2088 return false;
2089 }
2090 }
2091
2092 ArrayMode arrayMode()
2093 {
2094 ASSERT(hasArrayMode());
2095 if (op() == ArrayifyToStructure)
2096 return ArrayMode::fromWord(m_opInfo2.as<uint32_t>());
2097 return ArrayMode::fromWord(m_opInfo.as<uint32_t>());
2098 }
2099
2100 bool setArrayMode(ArrayMode arrayMode)
2101 {
2102 ASSERT(hasArrayMode());
2103 if (this->arrayMode() == arrayMode)
2104 return false;
2105 m_opInfo = arrayMode.asWord();
2106 return true;
2107 }
2108
2109 bool hasArithMode()
2110 {
2111 switch (op()) {
2112 case ArithAbs:
2113 case ArithAdd:
2114 case ArithSub:
2115 case ArithNegate:
2116 case ArithMul:
2117 case ArithDiv:
2118 case ArithMod:
2119 case UInt32ToNumber:
2120 case DoubleAsInt32:
2121 return true;
2122 default:
2123 return false;
2124 }
2125 }
2126
2127 Arith::Mode arithMode()
2128 {
2129 ASSERT(hasArithMode());
2130 return static_cast<Arith::Mode>(m_opInfo.as<uint32_t>());
2131 }
2132
2133 void setArithMode(Arith::Mode mode)
2134 {
2135 m_opInfo = mode;
2136 }
2137
2138 bool hasArithRoundingMode()
2139 {
2140 return op() == ArithRound || op() == ArithFloor || op() == ArithCeil || op() == ArithTrunc;
2141 }
2142
2143 Arith::RoundingMode arithRoundingMode()
2144 {
2145 ASSERT(hasArithRoundingMode());
2146 return static_cast<Arith::RoundingMode>(m_opInfo.as<uint32_t>());
2147 }
2148
2149 void setArithRoundingMode(Arith::RoundingMode mode)
2150 {
2151 ASSERT(hasArithRoundingMode());
2152 m_opInfo = static_cast<uint32_t>(mode);
2153 }
2154
2155 bool hasArithUnaryType()
2156 {
2157 return op() == ArithUnary;
2158 }
2159
2160 Arith::UnaryType arithUnaryType()
2161 {
2162 ASSERT(hasArithUnaryType());
2163 return static_cast<Arith::UnaryType>(m_opInfo.as<uint32_t>());
2164 }
2165
2166 bool hasVirtualRegister()
2167 {
2168 return m_virtualRegister.isValid();
2169 }
2170
2171 VirtualRegister virtualRegister()
2172 {
2173 ASSERT(hasResult());
2174 ASSERT(m_virtualRegister.isValid());
2175 return m_virtualRegister;
2176 }
2177
2178 void setVirtualRegister(VirtualRegister virtualRegister)
2179 {
2180 ASSERT(hasResult());
2181 ASSERT(!m_virtualRegister.isValid());
2182 m_virtualRegister = virtualRegister;
2183 }
2184
2185 bool hasExecutionCounter()
2186 {
2187 return op() == CountExecution;
2188 }
2189
2190 Profiler::ExecutionCounter* executionCounter()
2191 {
2192 return m_opInfo.as<Profiler::ExecutionCounter*>();
2193 }
2194
2195 unsigned entrypointIndex()
2196 {
2197 ASSERT(op() == InitializeEntrypointArguments);
2198 return m_opInfo.as<unsigned>();
2199 }
2200
2201 DataViewData dataViewData()
2202 {
2203 ASSERT(op() == DataViewGetInt || op() == DataViewGetFloat || op() == DataViewSet);
2204 return bitwise_cast<DataViewData>(m_opInfo.as<uint64_t>());
2205 }
2206
2207 bool shouldGenerate()
2208 {
2209 return m_refCount;
2210 }
2211
2212 // Return true if the execution of this Node does not affect our ability to OSR to the FTL.
2213 // FIXME: Isn't this just like checking if the node has effects?
2214 bool isSemanticallySkippable()
2215 {
2216 return op() == CountExecution || op() == InvalidationPoint;
2217 }
2218
2219 unsigned refCount()
2220 {
2221 return m_refCount;
2222 }
2223
2224 unsigned postfixRef()
2225 {
2226 return m_refCount++;
2227 }
2228
2229 unsigned adjustedRefCount()
2230 {
2231 return mustGenerate() ? m_refCount - 1 : m_refCount;
2232 }
2233
2234 void setRefCount(unsigned refCount)
2235 {
2236 m_refCount = refCount;
2237 }
2238
2239 Edge& child1()
2240 {
2241 ASSERT(!(m_flags & NodeHasVarArgs));
2242 return children.child1();
2243 }
2244
2245 // This is useful if you want to do a fast check on the first child
2246 // before also doing a check on the opcode. Use this with care and
2247 // avoid it if possible.
2248 Edge child1Unchecked()
2249 {
2250 return children.child1Unchecked();
2251 }
2252
2253 Edge& child2()
2254 {
2255 ASSERT(!(m_flags & NodeHasVarArgs));
2256 return children.child2();
2257 }
2258
2259 Edge& child3()
2260 {
2261 ASSERT(!(m_flags & NodeHasVarArgs));
2262 return children.child3();
2263 }
2264
2265 unsigned firstChild()
2266 {
2267 ASSERT(m_flags & NodeHasVarArgs);
2268 return children.firstChild();
2269 }
2270
2271 unsigned numChildren()
2272 {
2273 ASSERT(m_flags & NodeHasVarArgs);
2274 return children.numChildren();
2275 }
2276
2277 UseKind binaryUseKind()
2278 {
2279 ASSERT(child1().useKind() == child2().useKind());
2280 return child1().useKind();
2281 }
2282
2283 bool isBinaryUseKind(UseKind left, UseKind right)
2284 {
2285 return child1().useKind() == left && child2().useKind() == right;
2286 }
2287
2288 bool isBinaryUseKind(UseKind useKind)
2289 {
2290 return isBinaryUseKind(useKind, useKind);
2291 }
2292
2293 Edge childFor(UseKind useKind)
2294 {
2295 if (child1().useKind() == useKind)
2296 return child1();
2297 if (child2().useKind() == useKind)
2298 return child2();
2299 if (child3().useKind() == useKind)
2300 return child3();
2301 return Edge();
2302 }
2303
2304 SpeculatedType prediction()
2305 {
2306 return m_prediction;
2307 }
2308
2309 bool predict(SpeculatedType prediction)
2310 {
2311 return mergeSpeculation(m_prediction, prediction);
2312 }
2313
2314 bool shouldSpeculateInt32()
2315 {
2316 return isInt32Speculation(prediction());
2317 }
2318
2319 bool shouldSpeculateNotInt32()
2320 {
2321 return isNotInt32Speculation(prediction());
2322 }
2323
2324 bool sawBooleans()
2325 {
2326 return !!(prediction() & SpecBoolean);
2327 }
2328
2329 bool shouldSpeculateInt32OrBoolean()
2330 {
2331 return isInt32OrBooleanSpeculation(prediction());
2332 }
2333
2334 bool shouldSpeculateInt32ForArithmetic()
2335 {
2336 return isInt32SpeculationForArithmetic(prediction());
2337 }
2338
2339 bool shouldSpeculateInt32OrBooleanForArithmetic()
2340 {
2341 return isInt32OrBooleanSpeculationForArithmetic(prediction());
2342 }
2343
2344 bool shouldSpeculateInt32OrBooleanExpectingDefined()
2345 {
2346 return isInt32OrBooleanSpeculationExpectingDefined(prediction());
2347 }
2348
2349 bool shouldSpeculateInt52()
2350 {
2351 // We have to include SpecInt32Only here for two reasons:
2352 // 1. We diligently write code that first checks if we should speculate Int32.
2353 // For example:
2354 // if (shouldSpeculateInt32()) ...
2355 // else if (shouldSpeculateInt52()) ...
2356 // This means we it's totally valid to speculate Int52 when we're dealing
2357 // with a type that's the union of Int32 and Int52.
2358 //
2359 // It would be a performance mistake to not include Int32 here because we obviously
2360 // have variables that are the union of Int32 and Int52 values, and it's better
2361 // to speculate Int52 than double in that situation.
2362 //
2363 // 2. We also write code where we ask if the inputs can be Int52, like if
2364 // we know via profiling that an Add overflows, we may not emit an Int32 add.
2365 // However, we only emit such an add if both inputs can be Int52, and Int32
2366 // can trivially become Int52.
2367 //
2368 return enableInt52() && isInt32OrInt52Speculation(prediction());
2369 }
2370
2371 bool shouldSpeculateDouble()
2372 {
2373 return isDoubleSpeculation(prediction());
2374 }
2375
2376 bool shouldSpeculateDoubleReal()
2377 {
2378 return isDoubleRealSpeculation(prediction());
2379 }
2380
2381 bool shouldSpeculateNumber()
2382 {
2383 return isFullNumberSpeculation(prediction());
2384 }
2385
2386 bool shouldSpeculateNumberOrBoolean()
2387 {
2388 return isFullNumberOrBooleanSpeculation(prediction());
2389 }
2390
2391 bool shouldSpeculateNumberOrBooleanExpectingDefined()
2392 {
2393 return isFullNumberOrBooleanSpeculationExpectingDefined(prediction());
2394 }
2395
2396 bool shouldSpeculateBoolean()
2397 {
2398 return isBooleanSpeculation(prediction());
2399 }
2400
2401 bool shouldSpeculateNotBoolean()
2402 {
2403 return isNotBooleanSpeculation(prediction());
2404 }
2405
2406 bool shouldSpeculateOther()
2407 {
2408 return isOtherSpeculation(prediction());
2409 }
2410
2411 bool shouldSpeculateMisc()
2412 {
2413 return isMiscSpeculation(prediction());
2414 }
2415
2416 bool shouldSpeculateStringIdent()
2417 {
2418 return isStringIdentSpeculation(prediction());
2419 }
2420
2421 bool shouldSpeculateNotStringVar()
2422 {
2423 return isNotStringVarSpeculation(prediction());
2424 }
2425
2426 bool shouldSpeculateString()
2427 {
2428 return isStringSpeculation(prediction());
2429 }
2430
2431 bool shouldSpeculateNotString()
2432 {
2433 return isNotStringSpeculation(prediction());
2434 }
2435
2436 bool shouldSpeculateStringOrOther()
2437 {
2438 return isStringOrOtherSpeculation(prediction());
2439 }
2440
2441 bool shouldSpeculateStringObject()
2442 {
2443 return isStringObjectSpeculation(prediction());
2444 }
2445
2446 bool shouldSpeculateStringOrStringObject()
2447 {
2448 return isStringOrStringObjectSpeculation(prediction());
2449 }
2450
2451 bool shouldSpeculateRegExpObject()
2452 {
2453 return isRegExpObjectSpeculation(prediction());
2454 }
2455
2456 bool shouldSpeculateSymbol()
2457 {
2458 return isSymbolSpeculation(prediction());
2459 }
2460
2461 bool shouldSpeculateBigInt()
2462 {
2463 return isBigIntSpeculation(prediction());
2464 }
2465
2466 bool shouldSpeculateFinalObject()
2467 {
2468 return isFinalObjectSpeculation(prediction());
2469 }
2470
2471 bool shouldSpeculateFinalObjectOrOther()
2472 {
2473 return isFinalObjectOrOtherSpeculation(prediction());
2474 }
2475
2476 bool shouldSpeculateArray()
2477 {
2478 return isArraySpeculation(prediction());
2479 }
2480
2481 bool shouldSpeculateFunction()
2482 {
2483 return isFunctionSpeculation(prediction());
2484 }
2485
2486 bool shouldSpeculateProxyObject()
2487 {
2488 return isProxyObjectSpeculation(prediction());
2489 }
2490
2491 bool shouldSpeculateDerivedArray()
2492 {
2493 return isDerivedArraySpeculation(prediction());
2494 }
2495
2496 bool shouldSpeculateDirectArguments()
2497 {
2498 return isDirectArgumentsSpeculation(prediction());
2499 }
2500
2501 bool shouldSpeculateScopedArguments()
2502 {
2503 return isScopedArgumentsSpeculation(prediction());
2504 }
2505
2506 bool shouldSpeculateInt8Array()
2507 {
2508 return isInt8ArraySpeculation(prediction());
2509 }
2510
2511 bool shouldSpeculateInt16Array()
2512 {
2513 return isInt16ArraySpeculation(prediction());
2514 }
2515
2516 bool shouldSpeculateInt32Array()
2517 {
2518 return isInt32ArraySpeculation(prediction());
2519 }
2520
2521 bool shouldSpeculateUint8Array()
2522 {
2523 return isUint8ArraySpeculation(prediction());
2524 }
2525
2526 bool shouldSpeculateUint8ClampedArray()
2527 {
2528 return isUint8ClampedArraySpeculation(prediction());
2529 }
2530
2531 bool shouldSpeculateUint16Array()
2532 {
2533 return isUint16ArraySpeculation(prediction());
2534 }
2535
2536 bool shouldSpeculateUint32Array()
2537 {
2538 return isUint32ArraySpeculation(prediction());
2539 }
2540
2541 bool shouldSpeculateFloat32Array()
2542 {
2543 return isFloat32ArraySpeculation(prediction());
2544 }
2545
2546 bool shouldSpeculateFloat64Array()
2547 {
2548 return isFloat64ArraySpeculation(prediction());
2549 }
2550
2551 bool shouldSpeculateArrayOrOther()
2552 {
2553 return isArrayOrOtherSpeculation(prediction());
2554 }
2555
2556 bool shouldSpeculateObject()
2557 {
2558 return isObjectSpeculation(prediction());
2559 }
2560
2561 bool shouldSpeculateObjectOrOther()
2562 {
2563 return isObjectOrOtherSpeculation(prediction());
2564 }
2565
2566 bool shouldSpeculateCell()
2567 {
2568 return isCellSpeculation(prediction());
2569 }
2570
2571 bool shouldSpeculateCellOrOther()
2572 {
2573 return isCellOrOtherSpeculation(prediction());
2574 }
2575
2576 bool shouldSpeculateNotCell()
2577 {
2578 return isNotCellSpeculation(prediction());
2579 }
2580
2581 bool shouldSpeculateUntypedForArithmetic()
2582 {
2583 return isUntypedSpeculationForArithmetic(prediction());
2584 }
2585
2586 static bool shouldSpeculateUntypedForArithmetic(Node* op1, Node* op2)
2587 {
2588 return op1->shouldSpeculateUntypedForArithmetic() || op2->shouldSpeculateUntypedForArithmetic();
2589 }
2590
2591 bool shouldSpeculateUntypedForBitOps()
2592 {
2593 return isUntypedSpeculationForBitOps(prediction());
2594 }
2595
2596 static bool shouldSpeculateUntypedForBitOps(Node* op1, Node* op2)
2597 {
2598 return op1->shouldSpeculateUntypedForBitOps() || op2->shouldSpeculateUntypedForBitOps();
2599 }
2600
2601 static bool shouldSpeculateBoolean(Node* op1, Node* op2)
2602 {
2603 return op1->shouldSpeculateBoolean() && op2->shouldSpeculateBoolean();
2604 }
2605
2606 static bool shouldSpeculateInt32(Node* op1, Node* op2)
2607 {
2608 return op1->shouldSpeculateInt32() && op2->shouldSpeculateInt32();
2609 }
2610
2611 static bool shouldSpeculateInt32OrBoolean(Node* op1, Node* op2)
2612 {
2613 return op1->shouldSpeculateInt32OrBoolean()
2614 && op2->shouldSpeculateInt32OrBoolean();
2615 }
2616
2617 static bool shouldSpeculateInt32OrBooleanForArithmetic(Node* op1, Node* op2)
2618 {
2619 return op1->shouldSpeculateInt32OrBooleanForArithmetic()
2620 && op2->shouldSpeculateInt32OrBooleanForArithmetic();
2621 }
2622
2623 static bool shouldSpeculateInt32OrBooleanExpectingDefined(Node* op1, Node* op2)
2624 {
2625 return op1->shouldSpeculateInt32OrBooleanExpectingDefined()
2626 && op2->shouldSpeculateInt32OrBooleanExpectingDefined();
2627 }
2628
2629 static bool shouldSpeculateInt52(Node* op1, Node* op2)
2630 {
2631 return enableInt52() && op1->shouldSpeculateInt52() && op2->shouldSpeculateInt52();
2632 }
2633
2634 static bool shouldSpeculateNumber(Node* op1, Node* op2)
2635 {
2636 return op1->shouldSpeculateNumber() && op2->shouldSpeculateNumber();
2637 }
2638
2639 static bool shouldSpeculateNumberOrBoolean(Node* op1, Node* op2)
2640 {
2641 return op1->shouldSpeculateNumberOrBoolean()
2642 && op2->shouldSpeculateNumberOrBoolean();
2643 }
2644
2645 static bool shouldSpeculateNumberOrBooleanExpectingDefined(Node* op1, Node* op2)
2646 {
2647 return op1->shouldSpeculateNumberOrBooleanExpectingDefined()
2648 && op2->shouldSpeculateNumberOrBooleanExpectingDefined();
2649 }
2650
2651 static bool shouldSpeculateSymbol(Node* op1, Node* op2)
2652 {
2653 return op1->shouldSpeculateSymbol() && op2->shouldSpeculateSymbol();
2654 }
2655
2656 static bool shouldSpeculateBigInt(Node* op1, Node* op2)
2657 {
2658 return op1->shouldSpeculateBigInt() && op2->shouldSpeculateBigInt();
2659 }
2660
2661 static bool shouldSpeculateFinalObject(Node* op1, Node* op2)
2662 {
2663 return op1->shouldSpeculateFinalObject() && op2->shouldSpeculateFinalObject();
2664 }
2665
2666 static bool shouldSpeculateArray(Node* op1, Node* op2)
2667 {
2668 return op1->shouldSpeculateArray() && op2->shouldSpeculateArray();
2669 }
2670
2671 bool canSpeculateInt32(RareCaseProfilingSource source)
2672 {
2673 return nodeCanSpeculateInt32(arithNodeFlags(), source);
2674 }
2675
2676 bool canSpeculateInt52(RareCaseProfilingSource source)
2677 {
2678 return nodeCanSpeculateInt52(arithNodeFlags(), source);
2679 }
2680
2681 RareCaseProfilingSource sourceFor(PredictionPass pass)
2682 {
2683 if (pass == PrimaryPass || child1()->sawBooleans() || (child2() && child2()->sawBooleans()))
2684 return DFGRareCase;
2685 return AllRareCases;
2686 }
2687
2688 bool canSpeculateInt32(PredictionPass pass)
2689 {
2690 return canSpeculateInt32(sourceFor(pass));
2691 }
2692
2693 bool canSpeculateInt52(PredictionPass pass)
2694 {
2695 return canSpeculateInt52(sourceFor(pass));
2696 }
2697
2698 bool hasTypeLocation()
2699 {
2700 return op() == ProfileType;
2701 }
2702
2703 TypeLocation* typeLocation()
2704 {
2705 ASSERT(hasTypeLocation());
2706 return m_opInfo.as<TypeLocation*>();
2707 }
2708
2709 bool hasBasicBlockLocation()
2710 {
2711 return op() == ProfileControlFlow;
2712 }
2713
2714 BasicBlockLocation* basicBlockLocation()
2715 {
2716 ASSERT(hasBasicBlockLocation());
2717 return m_opInfo.as<BasicBlockLocation*>();
2718 }
2719
2720 bool hasCallDOMGetterData() const
2721 {
2722 return op() == CallDOMGetter;
2723 }
2724
2725 CallDOMGetterData* callDOMGetterData()
2726 {
2727 ASSERT(hasCallDOMGetterData());
2728 return m_opInfo.as<CallDOMGetterData*>();
2729 }
2730
2731 bool hasClassInfo() const
2732 {
2733 return op() == CheckSubClass;
2734 }
2735
2736 const ClassInfo* classInfo()
2737 {
2738 return m_opInfo.as<const ClassInfo*>();
2739 }
2740
2741 bool hasSignature() const
2742 {
2743 // Note that this does not include TailCall node types intentionally.
2744 // CallDOM node types are always converted from Call.
2745 return op() == Call || op() == CallDOM;
2746 }
2747
2748 const DOMJIT::Signature* signature()
2749 {
2750 return m_opInfo.as<const DOMJIT::Signature*>();
2751 }
2752
2753 bool hasInternalMethodType() const
2754 {
2755 return op() == HasIndexedProperty;
2756 }
2757
2758 PropertySlot::InternalMethodType internalMethodType() const
2759 {
2760 ASSERT(hasInternalMethodType());
2761 return static_cast<PropertySlot::InternalMethodType>(m_opInfo2.as<uint32_t>());
2762 }
2763
2764 void setInternalMethodType(PropertySlot::InternalMethodType type)
2765 {
2766 ASSERT(hasInternalMethodType());
2767 m_opInfo2 = static_cast<uint32_t>(type);
2768 }
2769
2770 Node* replacement() const
2771 {
2772 return m_misc.replacement;
2773 }
2774
2775 void setReplacement(Node* replacement)
2776 {
2777 m_misc.replacement = replacement;
2778 }
2779
2780 Epoch epoch() const
2781 {
2782 return Epoch::fromUnsigned(m_misc.epoch);
2783 }
2784
2785 void setEpoch(Epoch epoch)
2786 {
2787 m_misc.epoch = epoch.toUnsigned();
2788 }
2789
2790 bool hasNumberOfArgumentsToSkip()
2791 {
2792 return op() == CreateRest || op() == PhantomCreateRest || op() == GetRestLength || op() == GetMyArgumentByVal || op() == GetMyArgumentByValOutOfBounds;
2793 }
2794
2795 unsigned numberOfArgumentsToSkip()
2796 {
2797 ASSERT(hasNumberOfArgumentsToSkip());
2798 return m_opInfo.as<unsigned>();
2799 }
2800
2801 bool hasArgumentIndex()
2802 {
2803 return op() == GetArgument;
2804 }
2805
2806 unsigned argumentIndex()
2807 {
2808 ASSERT(hasArgumentIndex());
2809 return m_opInfo.as<unsigned>();
2810 }
2811
2812 bool hasBucketOwnerType()
2813 {
2814 return op() == GetMapBucketNext || op() == LoadKeyFromMapBucket || op() == LoadValueFromMapBucket;
2815 }
2816
2817 BucketOwnerType bucketOwnerType()
2818 {
2819 ASSERT(hasBucketOwnerType());
2820 return m_opInfo.as<BucketOwnerType>();
2821 }
2822
2823 bool hasValidRadixConstant()
2824 {
2825 return op() == NumberToStringWithValidRadixConstant;
2826 }
2827
2828 int32_t validRadixConstant()
2829 {
2830 ASSERT(hasValidRadixConstant());
2831 return m_opInfo.as<int32_t>();
2832 }
2833
2834 bool hasIgnoreLastIndexIsWritable()
2835 {
2836 return op() == SetRegExpObjectLastIndex;
2837 }
2838
2839 bool ignoreLastIndexIsWritable()
2840 {
2841 ASSERT(hasIgnoreLastIndexIsWritable());
2842 return m_opInfo.as<uint32_t>();
2843 }
2844
2845 uint32_t errorType()
2846 {
2847 ASSERT(op() == ThrowStaticError);
2848 return m_opInfo.as<uint32_t>();
2849 }
2850
2851 bool hasCallLinkStatus()
2852 {
2853 return op() == FilterCallLinkStatus;
2854 }
2855
2856 CallLinkStatus* callLinkStatus()
2857 {
2858 ASSERT(hasCallLinkStatus());
2859 return m_opInfo.as<CallLinkStatus*>();
2860 }
2861
2862 bool hasGetByIdStatus()
2863 {
2864 return op() == FilterGetByIdStatus;
2865 }
2866
2867 GetByIdStatus* getByIdStatus()
2868 {
2869 ASSERT(hasGetByIdStatus());
2870 return m_opInfo.as<GetByIdStatus*>();
2871 }
2872
2873 bool hasInByIdStatus()
2874 {
2875 return op() == FilterInByIdStatus;
2876 }
2877
2878 InByIdStatus* inByIdStatus()
2879 {
2880 ASSERT(hasInByIdStatus());
2881 return m_opInfo.as<InByIdStatus*>();
2882 }
2883
2884 bool hasPutByIdStatus()
2885 {
2886 return op() == FilterPutByIdStatus;
2887 }
2888
2889 PutByIdStatus* putByIdStatus()
2890 {
2891 ASSERT(hasPutByIdStatus());
2892 return m_opInfo.as<PutByIdStatus*>();
2893 }
2894
2895 void dumpChildren(PrintStream& out)
2896 {
2897 if (!child1())
2898 return;
2899 out.printf("@%u", child1()->index());
2900 if (!child2())
2901 return;
2902 out.printf(", @%u", child2()->index());
2903 if (!child3())
2904 return;
2905 out.printf(", @%u", child3()->index());
2906 }
2907
2908 NodeOrigin origin;
2909
2910 // References to up to 3 children, or links to a variable length set of children.
2911 AdjacencyList children;
2912
2913private:
2914 friend class B3::SparseCollection<Node>;
2915
2916 unsigned m_index { std::numeric_limits<unsigned>::max() };
2917 unsigned m_op : 10; // real type is NodeType
2918 unsigned m_flags : 21;
2919 // The virtual register number (spill location) associated with this .
2920 VirtualRegister m_virtualRegister;
2921 // The number of uses of the result of this operation (+1 for 'must generate' nodes, which have side-effects).
2922 unsigned m_refCount;
2923 // The prediction ascribed to this node after propagation.
2924 SpeculatedType m_prediction { SpecNone };
2925 // Immediate values, accesses type-checked via accessors above.
2926 struct OpInfoWrapper {
2927 OpInfoWrapper()
2928 {
2929 u.int64 = 0;
2930 }
2931 OpInfoWrapper(uint32_t intValue)
2932 {
2933 u.int64 = 0;
2934 u.int32 = intValue;
2935 }
2936 OpInfoWrapper(uint64_t intValue)
2937 {
2938 u.int64 = intValue;
2939 }
2940 OpInfoWrapper(void* pointer)
2941 {
2942 u.int64 = 0;
2943 u.pointer = pointer;
2944 }
2945 OpInfoWrapper(const void* constPointer)
2946 {
2947 u.int64 = 0;
2948 u.constPointer = constPointer;
2949 }
2950 OpInfoWrapper(RegisteredStructure structure)
2951 {
2952 u.int64 = 0;
2953 u.pointer = bitwise_cast<void*>(structure);
2954 }
2955 OpInfoWrapper& operator=(uint32_t int32)
2956 {
2957 u.int64 = 0;
2958 u.int32 = int32;
2959 return *this;
2960 }
2961 OpInfoWrapper& operator=(int32_t int32)
2962 {
2963 u.int64 = 0;
2964 u.int32 = int32;
2965 return *this;
2966 }
2967 OpInfoWrapper& operator=(uint64_t int64)
2968 {
2969 u.int64 = int64;
2970 return *this;
2971 }
2972 OpInfoWrapper& operator=(void* pointer)
2973 {
2974 u.int64 = 0;
2975 u.pointer = pointer;
2976 return *this;
2977 }
2978 OpInfoWrapper& operator=(const void* constPointer)
2979 {
2980 u.int64 = 0;
2981 u.constPointer = constPointer;
2982 return *this;
2983 }
2984 OpInfoWrapper& operator=(RegisteredStructure structure)
2985 {
2986 u.int64 = 0;
2987 u.pointer = bitwise_cast<void*>(structure);
2988 return *this;
2989 }
2990 OpInfoWrapper& operator=(NewArrayBufferData newArrayBufferData)
2991 {
2992 u.int64 = bitwise_cast<uint64_t>(newArrayBufferData);
2993 return *this;
2994 }
2995 template <typename T>
2996 ALWAYS_INLINE auto as() const -> typename std::enable_if<std::is_pointer<T>::value && !std::is_const<typename std::remove_pointer<T>::type>::value, T>::type
2997 {
2998 return static_cast<T>(u.pointer);
2999 }
3000 template <typename T>
3001 ALWAYS_INLINE auto as() const -> typename std::enable_if<std::is_pointer<T>::value && std::is_const<typename std::remove_pointer<T>::type>::value, T>::type
3002 {
3003 return static_cast<T>(u.constPointer);
3004 }
3005 template <typename T>
3006 ALWAYS_INLINE auto as() const -> typename std::enable_if<(std::is_integral<T>::value || std::is_enum<T>::value) && sizeof(T) <= 4, T>::type
3007 {
3008 return static_cast<T>(u.int32);
3009 }
3010 template <typename T>
3011 ALWAYS_INLINE auto as() const -> typename std::enable_if<(std::is_integral<T>::value || std::is_enum<T>::value) && sizeof(T) == 8, T>::type
3012 {
3013 return static_cast<T>(u.int64);
3014 }
3015 ALWAYS_INLINE RegisteredStructure asRegisteredStructure() const
3016 {
3017 return bitwise_cast<RegisteredStructure>(u.pointer);
3018 }
3019 ALWAYS_INLINE NewArrayBufferData asNewArrayBufferData() const
3020 {
3021 return bitwise_cast<NewArrayBufferData>(u.int64);
3022 }
3023
3024 union {
3025 uint32_t int32;
3026 uint64_t int64;
3027 void* pointer;
3028 const void* constPointer;
3029 } u;
3030 };
3031 OpInfoWrapper m_opInfo;
3032 OpInfoWrapper m_opInfo2;
3033
3034 // Miscellaneous data that is usually meaningless, but can hold some analysis results
3035 // if you ask right. For example, if you do Graph::initializeNodeOwners(), Node::owner
3036 // will tell you which basic block a node belongs to. You cannot rely on this persisting
3037 // across transformations unless you do the maintenance work yourself. Other phases use
3038 // Node::replacement, but they do so manually: first you do Graph::clearReplacements()
3039 // and then you set, and use, replacement's yourself. Same thing for epoch.
3040 //
3041 // Bottom line: don't use these fields unless you initialize them yourself, or by
3042 // calling some appropriate methods that initialize them the way you want. Otherwise,
3043 // these fields are meaningless.
3044private:
3045 union {
3046 Node* replacement;
3047 unsigned epoch;
3048 } m_misc;
3049public:
3050 BasicBlock* owner;
3051};
3052
3053// Uncomment this to log NodeSet operations.
3054// typedef LoggingHashSet<Node::HashSetTemplateInstantiationString, Node*> NodeSet;
3055typedef HashSet<Node*> NodeSet;
3056
3057struct NodeComparator {
3058 template<typename NodePtrType>
3059 bool operator()(NodePtrType a, NodePtrType b) const
3060 {
3061 return a->index() < b->index();
3062 }
3063};
3064
3065template<typename T>
3066CString nodeListDump(const T& nodeList)
3067{
3068 return sortedListDump(nodeList, NodeComparator());
3069}
3070
3071template<typename T>
3072CString nodeMapDump(const T& nodeMap, DumpContext* context = 0)
3073{
3074 Vector<typename T::KeyType> keys;
3075 for (
3076 typename T::const_iterator iter = nodeMap.begin();
3077 iter != nodeMap.end(); ++iter)
3078 keys.append(iter->key);
3079 std::sort(keys.begin(), keys.end(), NodeComparator());
3080 StringPrintStream out;
3081 CommaPrinter comma;
3082 for(unsigned i = 0; i < keys.size(); ++i)
3083 out.print(comma, keys[i], "=>", inContext(nodeMap.get(keys[i]), context));
3084 return out.toCString();
3085}
3086
3087template<typename T>
3088CString nodeValuePairListDump(const T& nodeValuePairList, DumpContext* context = 0)
3089{
3090 using V = typename T::ValueType;
3091 T sortedList = nodeValuePairList;
3092 std::sort(sortedList.begin(), sortedList.end(), [](const V& a, const V& b) {
3093 return NodeComparator()(a.node, b.node);
3094 });
3095
3096 StringPrintStream out;
3097 CommaPrinter comma;
3098 for (const auto& pair : sortedList)
3099 out.print(comma, pair.node, "=>", inContext(pair.value, context));
3100 return out.toCString();
3101}
3102
3103} } // namespace JSC::DFG
3104
3105namespace WTF {
3106
3107void printInternal(PrintStream&, JSC::DFG::SwitchKind);
3108void printInternal(PrintStream&, JSC::DFG::Node*);
3109
3110inline JSC::DFG::Node* inContext(JSC::DFG::Node* node, JSC::DumpContext*) { return node; }
3111
3112template<>
3113struct LoggingHashKeyTraits<JSC::DFG::Node*> {
3114 static void print(PrintStream& out, JSC::DFG::Node* key)
3115 {
3116 out.print("bitwise_cast<::JSC::DFG::Node*>(", RawPointer(key), "lu)");
3117 }
3118};
3119
3120} // namespace WTF
3121
3122using WTF::inContext;
3123
3124#endif
3125