1/*
2 * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(DFG_JIT)
29
30#include "AssemblyHelpers.h"
31#include "BytecodeLivenessAnalysisInlines.h"
32#include "CodeBlock.h"
33#include "DFGArgumentPosition.h"
34#include "DFGBasicBlock.h"
35#include "DFGFrozenValue.h"
36#include "DFGNode.h"
37#include "DFGPlan.h"
38#include "DFGPropertyTypeKey.h"
39#include "DFGScannable.h"
40#include "FullBytecodeLiveness.h"
41#include "MethodOfGettingAValueProfile.h"
42#include <wtf/BitVector.h>
43#include <wtf/HashMap.h>
44#include <wtf/Vector.h>
45#include <wtf/StdLibExtras.h>
46#include <wtf/StdUnorderedMap.h>
47
48namespace WTF {
49template <typename T> class SingleRootGraph;
50}
51
52namespace JSC {
53
54class CodeBlock;
55class ExecState;
56
57namespace DFG {
58
59class BackwardsCFG;
60class BackwardsDominators;
61class CFG;
62class CPSCFG;
63class ControlEquivalenceAnalysis;
64template <typename T> class Dominators;
65template <typename T> class NaturalLoops;
66class FlowIndexing;
67template<typename> class FlowMap;
68
69using ArgumentsVector = Vector<Node*, 8>;
70
71using SSACFG = CFG;
72using CPSDominators = Dominators<CPSCFG>;
73using SSADominators = Dominators<SSACFG>;
74using CPSNaturalLoops = NaturalLoops<CPSCFG>;
75using SSANaturalLoops = NaturalLoops<SSACFG>;
76
77#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
78 Node* _node = (node); \
79 if (_node->flags() & NodeHasVarArgs) { \
80 for (unsigned _childIdx = _node->firstChild(); \
81 _childIdx < _node->firstChild() + _node->numChildren(); \
82 _childIdx++) { \
83 if (!!(graph).m_varArgChildren[_childIdx]) \
84 thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
85 } \
86 } else { \
87 for (unsigned _edgeIndex = 0; _edgeIndex < AdjacencyList::Size; _edgeIndex++) { \
88 Edge& _edge = _node->children.child(_edgeIndex); \
89 if (!_edge) \
90 break; \
91 thingToDo(_node, _edge); \
92 } \
93 } \
94 } while (false)
95
96#define DFG_ASSERT(graph, node, assertion, ...) do { \
97 if (!!(assertion)) \
98 break; \
99 (graph).logAssertionFailure( \
100 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
101 CRASH_WITH_SECURITY_IMPLICATION_AND_INFO(__VA_ARGS__); \
102 } while (false)
103
104#define DFG_CRASH(graph, node, reason, ...) do { \
105 (graph).logAssertionFailure( \
106 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
107 CRASH_WITH_SECURITY_IMPLICATION_AND_INFO(__VA_ARGS__); \
108 } while (false)
109
110struct InlineVariableData {
111 InlineCallFrame* inlineCallFrame;
112 unsigned argumentPositionStart;
113 VariableAccessData* calleeVariable;
114};
115
116enum AddSpeculationMode {
117 DontSpeculateInt32,
118 SpeculateInt32AndTruncateConstants,
119 SpeculateInt32
120};
121
122//
123// === Graph ===
124//
125// The order may be significant for nodes with side-effects (property accesses, value conversions).
126// Nodes that are 'dead' remain in the vector with refCount 0.
127class Graph : public virtual Scannable {
128public:
129 Graph(VM&, Plan&);
130 ~Graph();
131
132 void changeChild(Edge& edge, Node* newNode)
133 {
134 edge.setNode(newNode);
135 }
136
137 void changeEdge(Edge& edge, Edge newEdge)
138 {
139 edge = newEdge;
140 }
141
142 void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
143 {
144 if (edge.node() != oldNode)
145 return;
146 changeChild(edge, newNode);
147 }
148
149 void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
150 {
151 if (edge != oldEdge)
152 return;
153 changeEdge(edge, newEdge);
154 }
155
156 void performSubstitution(Node* node)
157 {
158 if (node->flags() & NodeHasVarArgs) {
159 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
160 performSubstitutionForEdge(m_varArgChildren[childIdx]);
161 } else {
162 performSubstitutionForEdge(node->child1());
163 performSubstitutionForEdge(node->child2());
164 performSubstitutionForEdge(node->child3());
165 }
166 }
167
168 void performSubstitutionForEdge(Edge& child)
169 {
170 // Check if this operand is actually unused.
171 if (!child)
172 return;
173
174 // Check if there is any replacement.
175 Node* replacement = child->replacement();
176 if (!replacement)
177 return;
178
179 child.setNode(replacement);
180
181 // There is definitely a replacement. Assert that the replacement does not
182 // have a replacement.
183 ASSERT(!child->replacement());
184 }
185
186 template<typename... Params>
187 Node* addNode(Params... params)
188 {
189 return m_nodes.addNew(params...);
190 }
191
192 template<typename... Params>
193 Node* addNode(SpeculatedType type, Params... params)
194 {
195 Node* node = m_nodes.addNew(params...);
196 node->predict(type);
197 return node;
198 }
199
200 void deleteNode(Node*);
201 unsigned maxNodeCount() const { return m_nodes.size(); }
202 Node* nodeAt(unsigned index) const { return m_nodes[index]; }
203 void packNodeIndices();
204
205 void dethread();
206
207 FrozenValue* freeze(JSValue); // We use weak freezing by default.
208 FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue).
209
210 void convertToConstant(Node* node, FrozenValue* value);
211 void convertToConstant(Node* node, JSValue value);
212 void convertToStrongConstant(Node* node, JSValue value);
213
214 RegisteredStructure registerStructure(Structure* structure)
215 {
216 StructureRegistrationResult ignored;
217 return registerStructure(structure, ignored);
218 }
219 RegisteredStructure registerStructure(Structure*, StructureRegistrationResult&);
220 void registerAndWatchStructureTransition(Structure*);
221 void assertIsRegistered(Structure* structure);
222
223 // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
224 void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
225
226 bool terminalsAreValid();
227
228 enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
229 void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
230 void dump(PrintStream&, Edge);
231 void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
232 static int amountOfNodeWhiteSpace(Node*);
233 static void printNodeWhiteSpace(PrintStream&, Node*);
234
235 // Dump the code origin of the given node as a diff from the code origin of the
236 // preceding node. Returns true if anything was printed.
237 bool dumpCodeOrigin(PrintStream&, const char* prefix, Node*& previousNode, Node* currentNode, DumpContext*);
238
239 AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
240 {
241 ASSERT(add->op() == ValueAdd || add->op() == ValueSub || add->op() == ArithAdd || add->op() == ArithSub);
242
243 RareCaseProfilingSource source = add->sourceFor(pass);
244
245 Node* left = add->child1().node();
246 Node* right = add->child2().node();
247
248 if (left->hasConstant())
249 return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source);
250 if (right->hasConstant())
251 return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source);
252
253 return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
254 }
255
256 AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
257 {
258 return addSpeculationMode(
259 add,
260 add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
261 add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
262 pass);
263 }
264
265 AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
266 {
267 return addSpeculationMode(
268 add,
269 add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
270 add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
271 pass);
272 }
273
274 AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
275 {
276 if (add->op() == ValueAdd)
277 return valueAddSpeculationMode(add, pass);
278
279 return arithAddSpeculationMode(add, pass);
280 }
281
282 bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
283 {
284 return addSpeculationMode(add, pass) != DontSpeculateInt32;
285 }
286
287 bool addShouldSpeculateInt52(Node* add)
288 {
289 if (!enableInt52())
290 return false;
291
292 Node* left = add->child1().node();
293 Node* right = add->child2().node();
294
295 if (hasExitSite(add, Int52Overflow))
296 return false;
297
298 if (Node::shouldSpeculateInt52(left, right))
299 return true;
300
301 auto shouldSpeculateInt52ForAdd = [] (Node* node) {
302 // When DoubleConstant node appears, it means that users explicitly write a constant in their code with double form instead of integer form (1.0 instead of 1).
303 // In that case, we should honor this decision: using it as integer is not appropriate.
304 if (node->op() == DoubleConstant)
305 return false;
306 return isIntAnyFormat(node->prediction());
307 };
308
309 // Allow Int52 ArithAdd only when the one side of the binary operation should be speculated Int52. It is a bit conservative
310 // decision. This is because Double to Int52 conversion is not so cheap. Frequent back-and-forth conversions between Double and Int52
311 // rather hurt the performance. If the one side of the operation is already Int52, the cost for constructing ArithAdd becomes
312 // cheap since only one Double to Int52 conversion could be required.
313 // This recovers some regression in assorted tests while keeping kraken crypto improvements.
314 if (!left->shouldSpeculateInt52() && !right->shouldSpeculateInt52())
315 return false;
316
317 auto usesAsNumbers = [](Node* node) {
318 NodeFlags flags = node->flags() & NodeBytecodeBackPropMask;
319 if (!flags)
320 return false;
321 return (flags & (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex)) == flags;
322 };
323
324 // Wrapping Int52 to Value is also not so cheap. Thus, we allow Int52 addition only when the node is used as number.
325 if (!usesAsNumbers(add))
326 return false;
327
328 return shouldSpeculateInt52ForAdd(left) && shouldSpeculateInt52ForAdd(right);
329 }
330
331 bool binaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
332 {
333 Node* left = node->child1().node();
334 Node* right = node->child2().node();
335
336 return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
337 && node->canSpeculateInt32(node->sourceFor(pass));
338 }
339
340 bool binaryArithShouldSpeculateInt52(Node* node, PredictionPass pass)
341 {
342 if (!enableInt52())
343 return false;
344
345 Node* left = node->child1().node();
346 Node* right = node->child2().node();
347
348 return Node::shouldSpeculateInt52(left, right)
349 && node->canSpeculateInt52(pass)
350 && !hasExitSite(node, Int52Overflow);
351 }
352
353 bool unaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
354 {
355 return node->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
356 && node->canSpeculateInt32(pass);
357 }
358
359 bool unaryArithShouldSpeculateInt52(Node* node, PredictionPass pass)
360 {
361 if (!enableInt52())
362 return false;
363 return node->child1()->shouldSpeculateInt52()
364 && node->canSpeculateInt52(pass)
365 && !hasExitSite(node, Int52Overflow);
366 }
367
368 bool canOptimizeStringObjectAccess(const CodeOrigin&);
369
370 bool getRegExpPrototypeProperty(JSObject* regExpPrototype, Structure* regExpPrototypeStructure, UniquedStringImpl* uid, JSValue& returnJSValue);
371
372 bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass)
373 {
374 ASSERT(arithRound->op() == ArithRound || arithRound->op() == ArithFloor || arithRound->op() == ArithCeil || arithRound->op() == ArithTrunc);
375 return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero);
376 }
377
378 static const char *opName(NodeType);
379
380 RegisteredStructureSet* addStructureSet(const StructureSet& structureSet)
381 {
382 m_structureSets.append();
383 RegisteredStructureSet* result = &m_structureSets.last();
384
385 for (Structure* structure : structureSet)
386 result->add(registerStructure(structure));
387
388 return result;
389 }
390
391 RegisteredStructureSet* addStructureSet(const RegisteredStructureSet& structureSet)
392 {
393 m_structureSets.append();
394 RegisteredStructureSet* result = &m_structureSets.last();
395
396 for (RegisteredStructure structure : structureSet)
397 result->add(structure);
398
399 return result;
400 }
401
402 JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
403 {
404 return m_codeBlock->globalObjectFor(codeOrigin);
405 }
406
407 JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
408 {
409 JSGlobalObject* object = globalObjectFor(codeOrigin);
410 return jsCast<JSObject*>(object->methodTable(m_vm)->toThis(object, object->globalExec(), NotStrictMode));
411 }
412
413 ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
414 {
415 if (!inlineCallFrame)
416 return m_codeBlock->ownerExecutable();
417
418 return inlineCallFrame->baselineCodeBlock->ownerExecutable();
419 }
420
421 ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
422 {
423 return executableFor(codeOrigin.inlineCallFrame());
424 }
425
426 CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
427 {
428 if (!inlineCallFrame)
429 return m_profiledBlock;
430 return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
431 }
432
433 CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
434 {
435 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
436 }
437
438 bool isStrictModeFor(CodeOrigin codeOrigin)
439 {
440 if (!codeOrigin.inlineCallFrame())
441 return m_codeBlock->isStrictMode();
442 return codeOrigin.inlineCallFrame()->isStrictMode();
443 }
444
445 ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
446 {
447 return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
448 }
449
450 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
451 {
452 return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
453 }
454
455 bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
456 {
457 return baselineCodeBlockFor(codeOrigin)->unlinkedCodeBlock()->hasExitSite(FrequentExitSite(exitKind));
458 }
459
460 bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
461 {
462 return baselineCodeBlockFor(codeOrigin)->unlinkedCodeBlock()->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex(), exitKind));
463 }
464
465 bool hasExitSite(Node* node, ExitKind exitKind)
466 {
467 return hasExitSite(node->origin.semantic, exitKind);
468 }
469
470 MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* currentNode, Node* operandNode);
471
472 BlockIndex numBlocks() const { return m_blocks.size(); }
473 BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
474 BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
475
476 void appendBlock(Ref<BasicBlock>&& basicBlock)
477 {
478 basicBlock->index = m_blocks.size();
479 m_blocks.append(WTFMove(basicBlock));
480 }
481
482 void killBlock(BlockIndex blockIndex)
483 {
484 m_blocks[blockIndex] = nullptr;
485 }
486
487 void killBlock(BasicBlock* basicBlock)
488 {
489 killBlock(basicBlock->index);
490 }
491
492 void killBlockAndItsContents(BasicBlock*);
493
494 void killUnreachableBlocks();
495
496 void determineReachability();
497 void resetReachability();
498
499 void computeRefCounts();
500
501 unsigned varArgNumChildren(Node* node)
502 {
503 ASSERT(node->flags() & NodeHasVarArgs);
504 return node->numChildren();
505 }
506
507 unsigned numChildren(Node* node)
508 {
509 if (node->flags() & NodeHasVarArgs)
510 return varArgNumChildren(node);
511 return AdjacencyList::Size;
512 }
513
514 template <typename Function = bool(*)(Edge)>
515 AdjacencyList copyVarargChildren(Node* node, Function filter = [] (Edge) { return true; })
516 {
517 ASSERT(node->flags() & NodeHasVarArgs);
518 unsigned firstChild = m_varArgChildren.size();
519 unsigned numChildren = 0;
520 doToChildren(node, [&] (Edge edge) {
521 if (filter(edge)) {
522 ++numChildren;
523 m_varArgChildren.append(edge);
524 }
525 });
526
527 return AdjacencyList(AdjacencyList::Variable, firstChild, numChildren);
528 }
529
530 Edge& varArgChild(Node* node, unsigned index)
531 {
532 ASSERT(node->flags() & NodeHasVarArgs);
533 return m_varArgChildren[node->firstChild() + index];
534 }
535
536 Edge& child(Node* node, unsigned index)
537 {
538 if (node->flags() & NodeHasVarArgs)
539 return varArgChild(node, index);
540 return node->children.child(index);
541 }
542
543 void voteNode(Node* node, unsigned ballot, float weight = 1)
544 {
545 switch (node->op()) {
546 case ValueToInt32:
547 case UInt32ToNumber:
548 node = node->child1().node();
549 break;
550 default:
551 break;
552 }
553
554 if (node->op() == GetLocal)
555 node->variableAccessData()->vote(ballot, weight);
556 }
557
558 void voteNode(Edge edge, unsigned ballot, float weight = 1)
559 {
560 voteNode(edge.node(), ballot, weight);
561 }
562
563 void voteChildren(Node* node, unsigned ballot, float weight = 1)
564 {
565 if (node->flags() & NodeHasVarArgs) {
566 for (unsigned childIdx = node->firstChild();
567 childIdx < node->firstChild() + node->numChildren();
568 childIdx++) {
569 if (!!m_varArgChildren[childIdx])
570 voteNode(m_varArgChildren[childIdx], ballot, weight);
571 }
572 return;
573 }
574
575 if (!node->child1())
576 return;
577 voteNode(node->child1(), ballot, weight);
578 if (!node->child2())
579 return;
580 voteNode(node->child2(), ballot, weight);
581 if (!node->child3())
582 return;
583 voteNode(node->child3(), ballot, weight);
584 }
585
586 template<typename T> // T = Node* or Edge
587 void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
588 {
589 for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
590 Node* node = block[indexInBlock];
591 if (node->flags() & NodeHasVarArgs) {
592 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
593 if (!!m_varArgChildren[childIdx])
594 compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
595 }
596 continue;
597 }
598 if (!node->child1())
599 continue;
600 compareAndSwap(node->children.child1(), oldThing, newThing);
601 if (!node->child2())
602 continue;
603 compareAndSwap(node->children.child2(), oldThing, newThing);
604 if (!node->child3())
605 continue;
606 compareAndSwap(node->children.child3(), oldThing, newThing);
607 }
608 }
609
610 // Use this if you introduce a new GetLocal and you know that you introduced it *before*
611 // any GetLocals in the basic block.
612 // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
613 // introduced anywhere in the basic block.
614 void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
615
616 void invalidateCFG();
617 void invalidateNodeLiveness();
618
619 void clearFlagsOnAllNodes(NodeFlags);
620
621 void clearReplacements();
622 void clearEpochs();
623 void initializeNodeOwners();
624
625 BlockList blocksInPreOrder();
626 BlockList blocksInPostOrder(bool isSafeToValidate = true);
627
628 class NaturalBlockIterable {
629 public:
630 NaturalBlockIterable()
631 : m_graph(nullptr)
632 {
633 }
634
635 NaturalBlockIterable(Graph& graph)
636 : m_graph(&graph)
637 {
638 }
639
640 class iterator {
641 public:
642 iterator()
643 : m_graph(nullptr)
644 , m_index(0)
645 {
646 }
647
648 iterator(Graph& graph, BlockIndex index)
649 : m_graph(&graph)
650 , m_index(findNext(index))
651 {
652 }
653
654 BasicBlock *operator*()
655 {
656 return m_graph->block(m_index);
657 }
658
659 iterator& operator++()
660 {
661 m_index = findNext(m_index + 1);
662 return *this;
663 }
664
665 bool operator==(const iterator& other) const
666 {
667 return m_index == other.m_index;
668 }
669
670 bool operator!=(const iterator& other) const
671 {
672 return !(*this == other);
673 }
674
675 private:
676 BlockIndex findNext(BlockIndex index)
677 {
678 while (index < m_graph->numBlocks() && !m_graph->block(index))
679 index++;
680 return index;
681 }
682
683 Graph* m_graph;
684 BlockIndex m_index;
685 };
686
687 iterator begin()
688 {
689 return iterator(*m_graph, 0);
690 }
691
692 iterator end()
693 {
694 return iterator(*m_graph, m_graph->numBlocks());
695 }
696
697 private:
698 Graph* m_graph;
699 };
700
701 NaturalBlockIterable blocksInNaturalOrder()
702 {
703 return NaturalBlockIterable(*this);
704 }
705
706 template<typename ChildFunctor>
707 ALWAYS_INLINE void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
708 {
709 DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
710 }
711
712 template<typename ChildFunctor>
713 ALWAYS_INLINE void doToChildren(Node* node, const ChildFunctor& functor)
714 {
715 class ForwardingFunc {
716 public:
717 ForwardingFunc(const ChildFunctor& functor)
718 : m_functor(functor)
719 {
720 }
721
722 // This is a manually written func because we want ALWAYS_INLINE.
723 ALWAYS_INLINE void operator()(Node*, Edge& edge) const
724 {
725 m_functor(edge);
726 }
727
728 private:
729 const ChildFunctor& m_functor;
730 };
731
732 doToChildrenWithNode(node, ForwardingFunc(functor));
733 }
734
735 bool uses(Node* node, Node* child)
736 {
737 bool result = false;
738 doToChildren(node, [&] (Edge edge) { result |= edge == child; });
739 return result;
740 }
741
742 bool isWatchingHavingABadTimeWatchpoint(Node* node)
743 {
744 JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
745 return watchpoints().isWatched(globalObject->havingABadTimeWatchpoint());
746 }
747
748 bool isWatchingGlobalObjectWatchpoint(JSGlobalObject* globalObject, InlineWatchpointSet& set)
749 {
750 if (watchpoints().isWatched(set))
751 return true;
752
753 if (set.isStillValid()) {
754 // Since the global object owns this watchpoint, we make ourselves have a weak pointer to it.
755 // If the global object got deallocated, it wouldn't fire the watchpoint. It's unlikely the
756 // global object would get deallocated without this code ever getting thrown away, however,
757 // it's more sound logically to depend on the global object lifetime weakly.
758 freeze(globalObject);
759 watchpoints().addLazily(set);
760 return true;
761 }
762
763 return false;
764 }
765
766 bool isWatchingArrayIteratorProtocolWatchpoint(Node* node)
767 {
768 JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
769 InlineWatchpointSet& set = globalObject->arrayIteratorProtocolWatchpoint();
770 return isWatchingGlobalObjectWatchpoint(globalObject, set);
771 }
772
773 bool isWatchingNumberToStringWatchpoint(Node* node)
774 {
775 JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
776 InlineWatchpointSet& set = globalObject->numberToStringWatchpoint();
777 return isWatchingGlobalObjectWatchpoint(globalObject, set);
778 }
779
780 Profiler::Compilation* compilation() { return m_plan.compilation(); }
781
782 DesiredIdentifiers& identifiers() { return m_plan.identifiers(); }
783 DesiredWatchpoints& watchpoints() { return m_plan.watchpoints(); }
784 DesiredGlobalProperties& globalProperties() { return m_plan.globalProperties(); }
785
786 // Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
787 // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
788 // what's going on.
789 bool watchCondition(const ObjectPropertyCondition&);
790 bool watchConditions(const ObjectPropertyConditionSet&);
791
792 bool watchGlobalProperty(JSGlobalObject*, unsigned identifierNumber);
793
794 // Checks if it's known that loading from the given object at the given offset is fine. This is
795 // computed by tracking which conditions we track with watchCondition().
796 bool isSafeToLoad(JSObject* base, PropertyOffset);
797
798 // This uses either constant property inference or property type inference to derive a good abstract
799 // value for some property accessed with the given abstract value base.
800 AbstractValue inferredValueForProperty(
801 const AbstractValue& base, PropertyOffset, StructureClobberState);
802
803 FullBytecodeLiveness& livenessFor(CodeBlock*);
804 FullBytecodeLiveness& livenessFor(InlineCallFrame*);
805
806 // Quickly query if a single local is live at the given point. This is faster than calling
807 // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
808 // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
809 bool isLiveInBytecode(VirtualRegister, CodeOrigin);
810
811 // Quickly get all of the non-argument locals live at the given point. This doesn't give you
812 // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
813 // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
814 template<typename Functor>
815 void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
816 {
817 // Support for not redundantly reporting arguments. Necessary because in case of a varargs
818 // call, only the callee knows that arguments are live while in the case of a non-varargs
819 // call, both callee and caller will see the variables live.
820 VirtualRegister exclusionStart;
821 VirtualRegister exclusionEnd;
822
823 CodeOrigin* codeOriginPtr = &codeOrigin;
824
825 for (;;) {
826 InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame();
827 VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
828
829 if (inlineCallFrame) {
830 if (inlineCallFrame->isClosureCall)
831 functor(stackOffset + CallFrameSlot::callee);
832 if (inlineCallFrame->isVarargs())
833 functor(stackOffset + CallFrameSlot::argumentCount);
834 }
835
836 CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
837 FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
838 const FastBitVector& liveness = fullLiveness.getLiveness(codeOriginPtr->bytecodeIndex());
839 for (unsigned relativeLocal = codeBlock->numCalleeLocals(); relativeLocal--;) {
840 VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
841
842 // Don't report if our callee already reported.
843 if (reg >= exclusionStart && reg < exclusionEnd)
844 continue;
845
846 if (liveness[relativeLocal])
847 functor(reg);
848 }
849
850 if (!inlineCallFrame)
851 break;
852
853 // Arguments are always live. This would be redundant if it wasn't for our
854 // op_call_varargs inlining. See the comment above.
855 exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0);
856 exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->argumentsWithFixup.size());
857
858 // We will always have a "this" argument and exclusionStart should be a smaller stack
859 // offset than exclusionEnd.
860 ASSERT(exclusionStart < exclusionEnd);
861
862 for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
863 functor(reg);
864
865 // We need to handle tail callers because we may decide to exit to the
866 // the return bytecode following the tail call.
867 codeOriginPtr = &inlineCallFrame->directCaller;
868 }
869 }
870
871 // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
872 // you want to compare two sets of live locals from two different CodeOrigins.
873 BitVector localsLiveInBytecode(CodeOrigin);
874
875 // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
876 // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
877 template<typename Functor>
878 void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
879 {
880 forAllLocalsLiveInBytecode(codeOrigin, functor);
881
882 // Report all arguments as being live.
883 for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
884 functor(virtualRegisterForArgument(argument));
885 }
886
887 BytecodeKills& killsFor(CodeBlock*);
888 BytecodeKills& killsFor(InlineCallFrame*);
889
890 static unsigned parameterSlotsForArgCount(unsigned);
891
892 unsigned frameRegisterCount();
893 unsigned stackPointerOffset();
894 unsigned requiredRegisterCountForExit();
895 unsigned requiredRegisterCountForExecutionAndExit();
896
897 JSValue tryGetConstantProperty(JSValue base, const RegisteredStructureSet&, PropertyOffset);
898 JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
899 JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
900 JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
901
902 JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset);
903 JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset);
904 JSValue tryGetConstantClosureVar(Node*, ScopeOffset);
905
906 JSArrayBufferView* tryGetFoldableView(JSValue);
907 JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode);
908
909 bool canDoFastSpread(Node*, const AbstractValue&);
910
911 void registerFrozenValues();
912
913 void visitChildren(SlotVisitor&) override;
914
915 void logAssertionFailure(
916 std::nullptr_t, const char* file, int line, const char* function,
917 const char* assertion);
918 void logAssertionFailure(
919 Node*, const char* file, int line, const char* function,
920 const char* assertion);
921 void logAssertionFailure(
922 BasicBlock*, const char* file, int line, const char* function,
923 const char* assertion);
924
925 bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; }
926
927 CPSDominators& ensureCPSDominators();
928 SSADominators& ensureSSADominators();
929 CPSNaturalLoops& ensureCPSNaturalLoops();
930 SSANaturalLoops& ensureSSANaturalLoops();
931 BackwardsCFG& ensureBackwardsCFG();
932 BackwardsDominators& ensureBackwardsDominators();
933 ControlEquivalenceAnalysis& ensureControlEquivalenceAnalysis();
934 CPSCFG& ensureCPSCFG();
935
936 // These functions only makes sense to call after bytecode parsing
937 // because it queries the m_hasExceptionHandlers boolean whose value
938 // is only fully determined after bytcode parsing.
939 bool willCatchExceptionInMachineFrame(CodeOrigin codeOrigin)
940 {
941 CodeOrigin ignored;
942 HandlerInfo* ignored2;
943 return willCatchExceptionInMachineFrame(codeOrigin, ignored, ignored2);
944 }
945 bool willCatchExceptionInMachineFrame(CodeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut);
946
947 bool needsScopeRegister() const { return m_hasDebuggerEnabled || m_codeBlock->usesEval(); }
948 bool needsFlushedThis() const { return m_codeBlock->usesEval(); }
949
950 void clearCPSCFGData();
951
952 bool isRoot(BasicBlock* block) const
953 {
954 ASSERT_WITH_MESSAGE(!m_isInSSAConversion, "This is not written to work during SSA conversion.");
955
956 if (m_form == SSA) {
957 ASSERT(m_roots.size() == 1);
958 ASSERT(m_roots.contains(this->block(0)));
959 return block == this->block(0);
960 }
961
962 if (m_roots.size() <= 4) {
963 bool result = m_roots.contains(block);
964 ASSERT(result == m_rootToArguments.contains(block));
965 return result;
966 }
967 bool result = m_rootToArguments.contains(block);
968 ASSERT(result == m_roots.contains(block));
969 return result;
970 }
971
972 VM& m_vm;
973 Plan& m_plan;
974 CodeBlock* m_codeBlock;
975 CodeBlock* m_profiledBlock;
976
977 Vector<RefPtr<BasicBlock>, 8> m_blocks;
978 Vector<BasicBlock*, 1> m_roots;
979 Vector<Edge, 16> m_varArgChildren;
980
981 HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
982 Bag<FrozenValue> m_frozenValues;
983
984 Vector<uint32_t> m_uint32ValuesInUse;
985
986 Bag<StorageAccessData> m_storageAccessData;
987
988 // In CPS, this is all of the SetArgumentDefinitely nodes for the arguments in the machine code block
989 // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
990 // nodes. In SSA, this has no meaning. It's empty.
991 HashMap<BasicBlock*, ArgumentsVector> m_rootToArguments;
992
993 // In SSA, this is the argument speculation that we've locked in for an entrypoint block.
994 //
995 // We must speculate on the argument types at each entrypoint even if operations involving
996 // arguments get killed. For example:
997 //
998 // function foo(x) {
999 // var tmp = x + 1;
1000 // }
1001 //
1002 // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
1003 // have a proven check for the edge to "x". So, we will not insert a Check node and we will
1004 // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
1005 // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
1006 //
1007 // var o = {
1008 // valueOf: function() { do side effects; }
1009 // };
1010 // foo(o);
1011 //
1012 // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
1013 // effects.
1014 //
1015 // By convention, entrypoint index 0 is used for the CodeBlock's op_enter entrypoint.
1016 // So argumentFormats[0] are the argument formats for the normal call entrypoint.
1017 Vector<Vector<FlushFormat>> m_argumentFormats;
1018
1019 SegmentedVector<VariableAccessData, 16> m_variableAccessData;
1020 SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
1021 Bag<Transition> m_transitions;
1022 Bag<BranchData> m_branchData;
1023 Bag<SwitchData> m_switchData;
1024 Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
1025 Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
1026 Bag<MatchStructureData> m_matchStructureData;
1027 Bag<ObjectMaterializationData> m_objectMaterializationData;
1028 Bag<CallVarargsData> m_callVarargsData;
1029 Bag<LoadVarargsData> m_loadVarargsData;
1030 Bag<StackAccessData> m_stackAccessData;
1031 Bag<LazyJSValue> m_lazyJSValues;
1032 Bag<CallDOMGetterData> m_callDOMGetterData;
1033 Bag<BitVector> m_bitVectors;
1034 Vector<InlineVariableData, 4> m_inlineVariableData;
1035 HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
1036 HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>> m_bytecodeKills;
1037 HashSet<std::pair<JSObject*, PropertyOffset>> m_safeToLoad;
1038 Vector<Ref<Snippet>> m_domJITSnippets;
1039 std::unique_ptr<CPSDominators> m_cpsDominators;
1040 std::unique_ptr<SSADominators> m_ssaDominators;
1041 std::unique_ptr<CPSNaturalLoops> m_cpsNaturalLoops;
1042 std::unique_ptr<SSANaturalLoops> m_ssaNaturalLoops;
1043 std::unique_ptr<SSACFG> m_ssaCFG;
1044 std::unique_ptr<CPSCFG> m_cpsCFG;
1045 std::unique_ptr<BackwardsCFG> m_backwardsCFG;
1046 std::unique_ptr<BackwardsDominators> m_backwardsDominators;
1047 std::unique_ptr<ControlEquivalenceAnalysis> m_controlEquivalenceAnalysis;
1048 unsigned m_localVars;
1049 unsigned m_nextMachineLocal;
1050 unsigned m_parameterSlots;
1051
1052 // This is the number of logical entrypoints that we're compiling. This is only used
1053 // in SSA. Each EntrySwitch node must have m_numberOfEntrypoints cases. Note, this is
1054 // not the same as m_roots.size(). m_roots.size() represents the number of roots in
1055 // the CFG. In SSA, m_roots.size() == 1 even if we're compiling more than one entrypoint.
1056 unsigned m_numberOfEntrypoints { UINT_MAX };
1057
1058 // This maps an entrypoint index to a particular op_catch bytecode offset. By convention,
1059 // it'll never have zero as a key because we use zero to mean the op_enter entrypoint.
1060 HashMap<unsigned, unsigned> m_entrypointIndexToCatchBytecodeOffset;
1061
1062 HashSet<String> m_localStrings;
1063 HashMap<const StringImpl*, String> m_copiedStrings;
1064
1065#if USE(JSVALUE32_64)
1066 StdUnorderedMap<int64_t, double*> m_doubleConstantsMap;
1067 std::unique_ptr<Bag<double>> m_doubleConstants;
1068#endif
1069
1070 OptimizationFixpointState m_fixpointState;
1071 StructureRegistrationState m_structureRegistrationState;
1072 GraphForm m_form;
1073 UnificationState m_unificationState;
1074 PlanStage m_planStage { PlanStage::Initial };
1075 RefCountState m_refCountState;
1076 bool m_hasDebuggerEnabled;
1077 bool m_hasExceptionHandlers { false };
1078 bool m_isInSSAConversion { false };
1079 Optional<uint32_t> m_maxLocalsForCatchOSREntry;
1080 std::unique_ptr<FlowIndexing> m_indexingCache;
1081 std::unique_ptr<FlowMap<AbstractValue>> m_abstractValuesCache;
1082 Bag<EntrySwitchData> m_entrySwitchData;
1083
1084 RegisteredStructure stringStructure;
1085 RegisteredStructure symbolStructure;
1086
1087private:
1088 bool isStringPrototypeMethodSane(JSGlobalObject*, UniquedStringImpl*);
1089
1090 void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
1091
1092 AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source)
1093 {
1094 ASSERT(immediate->hasConstant());
1095
1096 JSValue immediateValue = immediate->asJSValue();
1097 if (!immediateValue.isNumber() && !immediateValue.isBoolean())
1098 return DontSpeculateInt32;
1099
1100 if (!variableShouldSpeculateInt32)
1101 return DontSpeculateInt32;
1102
1103 // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
1104 // In that case, we stay conservative unless the other operand was explicitly typed as integer.
1105 NodeFlags operandResultType = operand->result();
1106 if (operandResultType != NodeResultInt32 && immediateValue.isDouble())
1107 return DontSpeculateInt32;
1108
1109 if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32())
1110 return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
1111
1112 double doubleImmediate = immediateValue.asDouble();
1113 const double twoToThe48 = 281474976710656.0;
1114 if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
1115 return DontSpeculateInt32;
1116
1117 return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
1118 }
1119
1120 B3::SparseCollection<Node> m_nodes;
1121 SegmentedVector<RegisteredStructureSet, 16> m_structureSets;
1122};
1123
1124} } // namespace JSC::DFG
1125
1126#endif
1127