1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(DFG_JIT)
29
30#include "AssemblyHelpers.h"
31#include "BytecodeLivenessAnalysisInlines.h"
32#include "CodeBlock.h"
33#include "DFGArgumentPosition.h"
34#include "DFGBasicBlock.h"
35#include "DFGFrozenValue.h"
36#include "DFGNode.h"
37#include "DFGPlan.h"
38#include "DFGPropertyTypeKey.h"
39#include "DFGScannable.h"
40#include "FullBytecodeLiveness.h"
41#include "MethodOfGettingAValueProfile.h"
42#include <wtf/BitVector.h>
43#include <wtf/HashMap.h>
44#include <wtf/Vector.h>
45#include <wtf/StdLibExtras.h>
46#include <wtf/StdUnorderedMap.h>
47
48namespace WTF {
49template <typename T> class SingleRootGraph;
50}
51
52namespace JSC {
53
54class CodeBlock;
55class CallFrame;
56
57namespace DFG {
58
59class BackwardsCFG;
60class BackwardsDominators;
61class CFG;
62class CPSCFG;
63class ControlEquivalenceAnalysis;
64template <typename T> class Dominators;
65template <typename T> class NaturalLoops;
66class FlowIndexing;
67template<typename> class FlowMap;
68
69using ArgumentsVector = Vector<Node*, 8>;
70
71using SSACFG = CFG;
72using CPSDominators = Dominators<CPSCFG>;
73using SSADominators = Dominators<SSACFG>;
74using CPSNaturalLoops = NaturalLoops<CPSCFG>;
75using SSANaturalLoops = NaturalLoops<SSACFG>;
76
77#define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
78 Node* _node = (node); \
79 if (_node->flags() & NodeHasVarArgs) { \
80 for (unsigned _childIdx = _node->firstChild(); \
81 _childIdx < _node->firstChild() + _node->numChildren(); \
82 _childIdx++) { \
83 if (!!(graph).m_varArgChildren[_childIdx]) \
84 thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
85 } \
86 } else { \
87 for (unsigned _edgeIndex = 0; _edgeIndex < AdjacencyList::Size; _edgeIndex++) { \
88 Edge& _edge = _node->children.child(_edgeIndex); \
89 if (!_edge) \
90 break; \
91 thingToDo(_node, _edge); \
92 } \
93 } \
94 } while (false)
95
96#define DFG_ASSERT(graph, node, assertion, ...) do { \
97 if (!!(assertion)) \
98 break; \
99 (graph).logAssertionFailure( \
100 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
101 CRASH_WITH_SECURITY_IMPLICATION_AND_INFO(__VA_ARGS__); \
102 } while (false)
103
104#define DFG_CRASH(graph, node, reason, ...) do { \
105 (graph).logAssertionFailure( \
106 (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
107 CRASH_WITH_SECURITY_IMPLICATION_AND_INFO(__VA_ARGS__); \
108 } while (false)
109
110struct InlineVariableData {
111 InlineCallFrame* inlineCallFrame;
112 unsigned argumentPositionStart;
113 VariableAccessData* calleeVariable;
114};
115
116enum AddSpeculationMode {
117 DontSpeculateInt32,
118 SpeculateInt32AndTruncateConstants,
119 SpeculateInt32
120};
121
122struct Prefix {
123 enum NoHeaderTag { NoHeader };
124
125 Prefix() { }
126
127 Prefix(const char* prefixStr, NoHeaderTag tag = NoHeader)
128 : prefixStr(prefixStr)
129 , noHeader(tag == NoHeader)
130 { }
131
132 Prefix(NoHeaderTag)
133 : noHeader(true)
134 { }
135
136 void dump(PrintStream& out) const;
137
138 void clearBlockIndex() { blockIndex = -1; }
139 void clearNodeIndex() { nodeIndex = -1; }
140
141 void enable() { m_enabled = true; }
142 void disable() { m_enabled = false; }
143
144 int32_t phaseNumber { -1 };
145 int32_t blockIndex { -1 };
146 int32_t nodeIndex { -1 };
147 const char* prefixStr { nullptr };
148 bool noHeader { false };
149
150 static constexpr const char* noString = nullptr;
151
152private:
153 bool m_enabled { true };
154};
155
156//
157// === Graph ===
158//
159// The order may be significant for nodes with side-effects (property accesses, value conversions).
160// Nodes that are 'dead' remain in the vector with refCount 0.
161class Graph : public virtual Scannable {
162public:
163 Graph(VM&, Plan&);
164 ~Graph();
165
166 void changeChild(Edge& edge, Node* newNode)
167 {
168 edge.setNode(newNode);
169 }
170
171 void changeEdge(Edge& edge, Edge newEdge)
172 {
173 edge = newEdge;
174 }
175
176 void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
177 {
178 if (edge.node() != oldNode)
179 return;
180 changeChild(edge, newNode);
181 }
182
183 void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
184 {
185 if (edge != oldEdge)
186 return;
187 changeEdge(edge, newEdge);
188 }
189
190 void performSubstitution(Node* node)
191 {
192 if (node->flags() & NodeHasVarArgs) {
193 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
194 performSubstitutionForEdge(m_varArgChildren[childIdx]);
195 } else {
196 performSubstitutionForEdge(node->child1());
197 performSubstitutionForEdge(node->child2());
198 performSubstitutionForEdge(node->child3());
199 }
200 }
201
202 void performSubstitutionForEdge(Edge& child)
203 {
204 // Check if this operand is actually unused.
205 if (!child)
206 return;
207
208 // Check if there is any replacement.
209 Node* replacement = child->replacement();
210 if (!replacement)
211 return;
212
213 child.setNode(replacement);
214
215 // There is definitely a replacement. Assert that the replacement does not
216 // have a replacement.
217 ASSERT(!child->replacement());
218 }
219
220 template<typename... Params>
221 Node* addNode(Params... params)
222 {
223 return m_nodes.addNew(params...);
224 }
225
226 template<typename... Params>
227 Node* addNode(SpeculatedType type, Params... params)
228 {
229 Node* node = m_nodes.addNew(params...);
230 node->predict(type);
231 return node;
232 }
233
234 void deleteNode(Node*);
235 unsigned maxNodeCount() const { return m_nodes.size(); }
236 Node* nodeAt(unsigned index) const { return m_nodes[index]; }
237 void packNodeIndices();
238
239 void dethread();
240
241 FrozenValue* freeze(JSValue); // We use weak freezing by default.
242 FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue).
243
244 void convertToConstant(Node* node, FrozenValue* value);
245 void convertToConstant(Node* node, JSValue value);
246 void convertToStrongConstant(Node* node, JSValue value);
247
248 RegisteredStructure registerStructure(Structure* structure)
249 {
250 StructureRegistrationResult ignored;
251 return registerStructure(structure, ignored);
252 }
253 RegisteredStructure registerStructure(Structure*, StructureRegistrationResult&);
254 void registerAndWatchStructureTransition(Structure*);
255 void assertIsRegistered(Structure* structure);
256
257 // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
258 void dump(PrintStream& = WTF::dataFile(), DumpContext* = 0);
259
260 bool terminalsAreValid();
261
262 enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
263 void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
264 void dump(PrintStream&, Edge);
265 void dump(PrintStream&, const char* prefix, Node*, DumpContext* = 0);
266 static int amountOfNodeWhiteSpace(Node*);
267 static void printNodeWhiteSpace(PrintStream&, Node*);
268
269 // Dump the code origin of the given node as a diff from the code origin of the
270 // preceding node. Returns true if anything was printed.
271 bool dumpCodeOrigin(PrintStream&, const char* prefix, Node*& previousNode, Node* currentNode, DumpContext*);
272
273 AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
274 {
275 ASSERT(add->op() == ValueAdd || add->op() == ValueSub || add->op() == ArithAdd || add->op() == ArithSub);
276
277 RareCaseProfilingSource source = add->sourceFor(pass);
278
279 Node* left = add->child1().node();
280 Node* right = add->child2().node();
281
282 if (left->hasConstant())
283 return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source);
284 if (right->hasConstant())
285 return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source);
286
287 return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
288 }
289
290 AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
291 {
292 return addSpeculationMode(
293 add,
294 add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
295 add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
296 pass);
297 }
298
299 AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
300 {
301 return addSpeculationMode(
302 add,
303 add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
304 add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
305 pass);
306 }
307
308 AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
309 {
310 if (add->op() == ValueAdd)
311 return valueAddSpeculationMode(add, pass);
312
313 return arithAddSpeculationMode(add, pass);
314 }
315
316 bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
317 {
318 return addSpeculationMode(add, pass) != DontSpeculateInt32;
319 }
320
321 bool addShouldSpeculateInt52(Node* add)
322 {
323 if (!enableInt52())
324 return false;
325
326 Node* left = add->child1().node();
327 Node* right = add->child2().node();
328
329 if (hasExitSite(add, Int52Overflow))
330 return false;
331
332 if (Node::shouldSpeculateInt52(left, right))
333 return true;
334
335 auto shouldSpeculateInt52ForAdd = [] (Node* node) {
336 // When DoubleConstant node appears, it means that users explicitly write a constant in their code with double form instead of integer form (1.0 instead of 1).
337 // In that case, we should honor this decision: using it as integer is not appropriate.
338 if (node->op() == DoubleConstant)
339 return false;
340 return isIntAnyFormat(node->prediction());
341 };
342
343 // Allow Int52 ArithAdd only when the one side of the binary operation should be speculated Int52. It is a bit conservative
344 // decision. This is because Double to Int52 conversion is not so cheap. Frequent back-and-forth conversions between Double and Int52
345 // rather hurt the performance. If the one side of the operation is already Int52, the cost for constructing ArithAdd becomes
346 // cheap since only one Double to Int52 conversion could be required.
347 // This recovers some regression in assorted tests while keeping kraken crypto improvements.
348 if (!left->shouldSpeculateInt52() && !right->shouldSpeculateInt52())
349 return false;
350
351 auto usesAsNumbers = [](Node* node) {
352 NodeFlags flags = node->flags() & NodeBytecodeBackPropMask;
353 if (!flags)
354 return false;
355 return (flags & (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex)) == flags;
356 };
357
358 // Wrapping Int52 to Value is also not so cheap. Thus, we allow Int52 addition only when the node is used as number.
359 if (!usesAsNumbers(add))
360 return false;
361
362 return shouldSpeculateInt52ForAdd(left) && shouldSpeculateInt52ForAdd(right);
363 }
364
365 bool binaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
366 {
367 Node* left = node->child1().node();
368 Node* right = node->child2().node();
369
370 return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
371 && node->canSpeculateInt32(node->sourceFor(pass));
372 }
373
374 bool binaryArithShouldSpeculateInt52(Node* node, PredictionPass pass)
375 {
376 if (!enableInt52())
377 return false;
378
379 Node* left = node->child1().node();
380 Node* right = node->child2().node();
381
382 return Node::shouldSpeculateInt52(left, right)
383 && node->canSpeculateInt52(pass)
384 && !hasExitSite(node, Int52Overflow);
385 }
386
387 bool unaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
388 {
389 return node->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
390 && node->canSpeculateInt32(pass);
391 }
392
393 bool unaryArithShouldSpeculateInt52(Node* node, PredictionPass pass)
394 {
395 if (!enableInt52())
396 return false;
397 return node->child1()->shouldSpeculateInt52()
398 && node->canSpeculateInt52(pass)
399 && !hasExitSite(node, Int52Overflow);
400 }
401
402 bool canOptimizeStringObjectAccess(const CodeOrigin&);
403
404 bool getRegExpPrototypeProperty(JSObject* regExpPrototype, Structure* regExpPrototypeStructure, UniquedStringImpl* uid, JSValue& returnJSValue);
405
406 bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass)
407 {
408 ASSERT(arithRound->op() == ArithRound || arithRound->op() == ArithFloor || arithRound->op() == ArithCeil || arithRound->op() == ArithTrunc);
409 return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero);
410 }
411
412 static const char *opName(NodeType);
413
414 RegisteredStructureSet* addStructureSet(const StructureSet& structureSet)
415 {
416 m_structureSets.append();
417 RegisteredStructureSet* result = &m_structureSets.last();
418
419 for (Structure* structure : structureSet)
420 result->add(registerStructure(structure));
421
422 return result;
423 }
424
425 RegisteredStructureSet* addStructureSet(const RegisteredStructureSet& structureSet)
426 {
427 m_structureSets.append();
428 RegisteredStructureSet* result = &m_structureSets.last();
429
430 for (RegisteredStructure structure : structureSet)
431 result->add(structure);
432
433 return result;
434 }
435
436 JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
437 {
438 return m_codeBlock->globalObjectFor(codeOrigin);
439 }
440
441 JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
442 {
443 JSGlobalObject* object = globalObjectFor(codeOrigin);
444 return jsCast<JSObject*>(object->methodTable(m_vm)->toThis(object, object, NotStrictMode));
445 }
446
447 ScriptExecutable* executableFor(InlineCallFrame* inlineCallFrame)
448 {
449 if (!inlineCallFrame)
450 return m_codeBlock->ownerExecutable();
451
452 return inlineCallFrame->baselineCodeBlock->ownerExecutable();
453 }
454
455 ScriptExecutable* executableFor(const CodeOrigin& codeOrigin)
456 {
457 return executableFor(codeOrigin.inlineCallFrame());
458 }
459
460 CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
461 {
462 if (!inlineCallFrame)
463 return m_profiledBlock;
464 return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
465 }
466
467 CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
468 {
469 return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
470 }
471
472 bool isStrictModeFor(CodeOrigin codeOrigin)
473 {
474 if (!codeOrigin.inlineCallFrame())
475 return m_codeBlock->isStrictMode();
476 return codeOrigin.inlineCallFrame()->isStrictMode();
477 }
478
479 ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
480 {
481 return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
482 }
483
484 bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
485 {
486 return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
487 }
488
489 bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
490 {
491 return baselineCodeBlockFor(codeOrigin)->unlinkedCodeBlock()->hasExitSite(FrequentExitSite(exitKind));
492 }
493
494 bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
495 {
496 return baselineCodeBlockFor(codeOrigin)->unlinkedCodeBlock()->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex(), exitKind));
497 }
498
499 bool hasExitSite(Node* node, ExitKind exitKind)
500 {
501 return hasExitSite(node->origin.semantic, exitKind);
502 }
503
504 MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* currentNode, Node* operandNode);
505
506 BlockIndex numBlocks() const { return m_blocks.size(); }
507 BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
508 BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
509
510 void appendBlock(Ref<BasicBlock>&& basicBlock)
511 {
512 basicBlock->index = m_blocks.size();
513 m_blocks.append(WTFMove(basicBlock));
514 }
515
516 void killBlock(BlockIndex blockIndex)
517 {
518 m_blocks[blockIndex] = nullptr;
519 }
520
521 void killBlock(BasicBlock* basicBlock)
522 {
523 killBlock(basicBlock->index);
524 }
525
526 void killBlockAndItsContents(BasicBlock*);
527
528 void killUnreachableBlocks();
529
530 void determineReachability();
531 void resetReachability();
532
533 void computeRefCounts();
534
535 unsigned varArgNumChildren(Node* node)
536 {
537 ASSERT(node->flags() & NodeHasVarArgs);
538 return node->numChildren();
539 }
540
541 unsigned numChildren(Node* node)
542 {
543 if (node->flags() & NodeHasVarArgs)
544 return varArgNumChildren(node);
545 return AdjacencyList::Size;
546 }
547
548 template <typename Function = bool(*)(Edge)>
549 AdjacencyList copyVarargChildren(Node* node, Function filter = [] (Edge) { return true; })
550 {
551 ASSERT(node->flags() & NodeHasVarArgs);
552 unsigned firstChild = m_varArgChildren.size();
553 unsigned numChildren = 0;
554 doToChildren(node, [&] (Edge edge) {
555 if (filter(edge)) {
556 ++numChildren;
557 m_varArgChildren.append(edge);
558 }
559 });
560
561 return AdjacencyList(AdjacencyList::Variable, firstChild, numChildren);
562 }
563
564 Edge& varArgChild(Node* node, unsigned index)
565 {
566 ASSERT(node->flags() & NodeHasVarArgs);
567 return m_varArgChildren[node->firstChild() + index];
568 }
569
570 Edge& child(Node* node, unsigned index)
571 {
572 if (node->flags() & NodeHasVarArgs)
573 return varArgChild(node, index);
574 return node->children.child(index);
575 }
576
577 void voteNode(Node* node, unsigned ballot, float weight = 1)
578 {
579 switch (node->op()) {
580 case ValueToInt32:
581 case UInt32ToNumber:
582 node = node->child1().node();
583 break;
584 default:
585 break;
586 }
587
588 if (node->op() == GetLocal)
589 node->variableAccessData()->vote(ballot, weight);
590 }
591
592 void voteNode(Edge edge, unsigned ballot, float weight = 1)
593 {
594 voteNode(edge.node(), ballot, weight);
595 }
596
597 void voteChildren(Node* node, unsigned ballot, float weight = 1)
598 {
599 if (node->flags() & NodeHasVarArgs) {
600 for (unsigned childIdx = node->firstChild();
601 childIdx < node->firstChild() + node->numChildren();
602 childIdx++) {
603 if (!!m_varArgChildren[childIdx])
604 voteNode(m_varArgChildren[childIdx], ballot, weight);
605 }
606 return;
607 }
608
609 if (!node->child1())
610 return;
611 voteNode(node->child1(), ballot, weight);
612 if (!node->child2())
613 return;
614 voteNode(node->child2(), ballot, weight);
615 if (!node->child3())
616 return;
617 voteNode(node->child3(), ballot, weight);
618 }
619
620 template<typename T> // T = Node* or Edge
621 void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
622 {
623 for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
624 Node* node = block[indexInBlock];
625 if (node->flags() & NodeHasVarArgs) {
626 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
627 if (!!m_varArgChildren[childIdx])
628 compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
629 }
630 continue;
631 }
632 if (!node->child1())
633 continue;
634 compareAndSwap(node->children.child1(), oldThing, newThing);
635 if (!node->child2())
636 continue;
637 compareAndSwap(node->children.child2(), oldThing, newThing);
638 if (!node->child3())
639 continue;
640 compareAndSwap(node->children.child3(), oldThing, newThing);
641 }
642 }
643
644 // Use this if you introduce a new GetLocal and you know that you introduced it *before*
645 // any GetLocals in the basic block.
646 // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
647 // introduced anywhere in the basic block.
648 void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
649
650 void invalidateCFG();
651 void invalidateNodeLiveness();
652
653 void clearFlagsOnAllNodes(NodeFlags);
654
655 void clearReplacements();
656 void clearEpochs();
657 void initializeNodeOwners();
658
659 BlockList blocksInPreOrder();
660 BlockList blocksInPostOrder(bool isSafeToValidate = true);
661
662 class NaturalBlockIterable {
663 public:
664 NaturalBlockIterable()
665 : m_graph(nullptr)
666 {
667 }
668
669 NaturalBlockIterable(Graph& graph)
670 : m_graph(&graph)
671 {
672 }
673
674 class iterator {
675 public:
676 iterator()
677 : m_graph(nullptr)
678 , m_index(0)
679 {
680 }
681
682 iterator(Graph& graph, BlockIndex index)
683 : m_graph(&graph)
684 , m_index(findNext(index))
685 {
686 }
687
688 BasicBlock *operator*()
689 {
690 return m_graph->block(m_index);
691 }
692
693 iterator& operator++()
694 {
695 m_index = findNext(m_index + 1);
696 return *this;
697 }
698
699 bool operator==(const iterator& other) const
700 {
701 return m_index == other.m_index;
702 }
703
704 bool operator!=(const iterator& other) const
705 {
706 return !(*this == other);
707 }
708
709 private:
710 BlockIndex findNext(BlockIndex index)
711 {
712 while (index < m_graph->numBlocks() && !m_graph->block(index))
713 index++;
714 return index;
715 }
716
717 Graph* m_graph;
718 BlockIndex m_index;
719 };
720
721 iterator begin()
722 {
723 return iterator(*m_graph, 0);
724 }
725
726 iterator end()
727 {
728 return iterator(*m_graph, m_graph->numBlocks());
729 }
730
731 private:
732 Graph* m_graph;
733 };
734
735 NaturalBlockIterable blocksInNaturalOrder()
736 {
737 return NaturalBlockIterable(*this);
738 }
739
740 template<typename ChildFunctor>
741 ALWAYS_INLINE void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
742 {
743 DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
744 }
745
746 template<typename ChildFunctor>
747 ALWAYS_INLINE void doToChildren(Node* node, const ChildFunctor& functor)
748 {
749 class ForwardingFunc {
750 public:
751 ForwardingFunc(const ChildFunctor& functor)
752 : m_functor(functor)
753 {
754 }
755
756 // This is a manually written func because we want ALWAYS_INLINE.
757 ALWAYS_INLINE void operator()(Node*, Edge& edge) const
758 {
759 m_functor(edge);
760 }
761
762 private:
763 const ChildFunctor& m_functor;
764 };
765
766 doToChildrenWithNode(node, ForwardingFunc(functor));
767 }
768
769 bool uses(Node* node, Node* child)
770 {
771 bool result = false;
772 doToChildren(node, [&] (Edge edge) { result |= edge == child; });
773 return result;
774 }
775
776 bool isWatchingHavingABadTimeWatchpoint(Node* node)
777 {
778 JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
779 return watchpoints().isWatched(globalObject->havingABadTimeWatchpoint());
780 }
781
782 bool isWatchingGlobalObjectWatchpoint(JSGlobalObject* globalObject, InlineWatchpointSet& set)
783 {
784 if (watchpoints().isWatched(set))
785 return true;
786
787 if (set.isStillValid()) {
788 // Since the global object owns this watchpoint, we make ourselves have a weak pointer to it.
789 // If the global object got deallocated, it wouldn't fire the watchpoint. It's unlikely the
790 // global object would get deallocated without this code ever getting thrown away, however,
791 // it's more sound logically to depend on the global object lifetime weakly.
792 freeze(globalObject);
793 watchpoints().addLazily(set);
794 return true;
795 }
796
797 return false;
798 }
799
800 bool isWatchingArrayIteratorProtocolWatchpoint(Node* node)
801 {
802 JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
803 InlineWatchpointSet& set = globalObject->arrayIteratorProtocolWatchpointSet();
804 return isWatchingGlobalObjectWatchpoint(globalObject, set);
805 }
806
807 bool isWatchingNumberToStringWatchpoint(Node* node)
808 {
809 JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
810 InlineWatchpointSet& set = globalObject->numberToStringWatchpointSet();
811 return isWatchingGlobalObjectWatchpoint(globalObject, set);
812 }
813
814 Profiler::Compilation* compilation() { return m_plan.compilation(); }
815
816 DesiredIdentifiers& identifiers() { return m_plan.identifiers(); }
817 DesiredWatchpoints& watchpoints() { return m_plan.watchpoints(); }
818 DesiredGlobalProperties& globalProperties() { return m_plan.globalProperties(); }
819
820 // Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
821 // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
822 // what's going on.
823 bool watchCondition(const ObjectPropertyCondition&);
824 bool watchConditions(const ObjectPropertyConditionSet&);
825
826 bool watchGlobalProperty(JSGlobalObject*, unsigned identifierNumber);
827
828 // Checks if it's known that loading from the given object at the given offset is fine. This is
829 // computed by tracking which conditions we track with watchCondition().
830 bool isSafeToLoad(JSObject* base, PropertyOffset);
831
832 // This uses either constant property inference or property type inference to derive a good abstract
833 // value for some property accessed with the given abstract value base.
834 AbstractValue inferredValueForProperty(
835 const AbstractValue& base, PropertyOffset, StructureClobberState);
836
837 FullBytecodeLiveness& livenessFor(CodeBlock*);
838 FullBytecodeLiveness& livenessFor(InlineCallFrame*);
839
840 // Quickly query if a single local is live at the given point. This is faster than calling
841 // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
842 // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
843 bool isLiveInBytecode(VirtualRegister, CodeOrigin);
844
845 // Quickly get all of the non-argument locals live at the given point. This doesn't give you
846 // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
847 // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
848 template<typename Functor>
849 void forAllLocalsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
850 {
851 // Support for not redundantly reporting arguments. Necessary because in case of a varargs
852 // call, only the callee knows that arguments are live while in the case of a non-varargs
853 // call, both callee and caller will see the variables live.
854 VirtualRegister exclusionStart;
855 VirtualRegister exclusionEnd;
856
857 CodeOrigin* codeOriginPtr = &codeOrigin;
858
859 for (;;) {
860 InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame();
861 VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
862
863 if (inlineCallFrame) {
864 if (inlineCallFrame->isClosureCall)
865 functor(stackOffset + CallFrameSlot::callee);
866 if (inlineCallFrame->isVarargs())
867 functor(stackOffset + CallFrameSlot::argumentCount);
868 }
869
870 CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
871 FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
872 const FastBitVector& liveness = fullLiveness.getLiveness(codeOriginPtr->bytecodeIndex());
873 for (unsigned relativeLocal = codeBlock->numCalleeLocals(); relativeLocal--;) {
874 VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
875
876 // Don't report if our callee already reported.
877 if (reg >= exclusionStart && reg < exclusionEnd)
878 continue;
879
880 if (liveness[relativeLocal])
881 functor(reg);
882 }
883
884 if (!inlineCallFrame)
885 break;
886
887 // Arguments are always live. This would be redundant if it wasn't for our
888 // op_call_varargs inlining. See the comment above.
889 exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0);
890 exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->argumentsWithFixup.size());
891
892 // We will always have a "this" argument and exclusionStart should be a smaller stack
893 // offset than exclusionEnd.
894 ASSERT(exclusionStart < exclusionEnd);
895
896 for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
897 functor(reg);
898
899 // We need to handle tail callers because we may decide to exit to the
900 // the return bytecode following the tail call.
901 codeOriginPtr = &inlineCallFrame->directCaller;
902 }
903 }
904
905 // Get a BitVector of all of the non-argument locals live right now. This is mostly useful if
906 // you want to compare two sets of live locals from two different CodeOrigins.
907 BitVector localsLiveInBytecode(CodeOrigin);
908
909 // Tells you all of the arguments and locals live at the given CodeOrigin. This is a small
910 // extension to forAllLocalsLiveInBytecode(), since all arguments are always presumed live.
911 template<typename Functor>
912 void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
913 {
914 forAllLocalsLiveInBytecode(codeOrigin, functor);
915
916 // Report all arguments as being live.
917 for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
918 functor(virtualRegisterForArgument(argument));
919 }
920
921 BytecodeKills& killsFor(CodeBlock*);
922 BytecodeKills& killsFor(InlineCallFrame*);
923
924 static unsigned parameterSlotsForArgCount(unsigned);
925
926 unsigned frameRegisterCount();
927 unsigned stackPointerOffset();
928 unsigned requiredRegisterCountForExit();
929 unsigned requiredRegisterCountForExecutionAndExit();
930
931 JSValue tryGetConstantProperty(JSValue base, const RegisteredStructureSet&, PropertyOffset);
932 JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
933 JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
934 JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
935
936 JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset);
937 JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset);
938 JSValue tryGetConstantClosureVar(Node*, ScopeOffset);
939
940 JSArrayBufferView* tryGetFoldableView(JSValue);
941 JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode);
942
943 bool canDoFastSpread(Node*, const AbstractValue&);
944
945 void registerFrozenValues();
946
947 void visitChildren(SlotVisitor&) override;
948
949 void logAssertionFailure(
950 std::nullptr_t, const char* file, int line, const char* function,
951 const char* assertion);
952 void logAssertionFailure(
953 Node*, const char* file, int line, const char* function,
954 const char* assertion);
955 void logAssertionFailure(
956 BasicBlock*, const char* file, int line, const char* function,
957 const char* assertion);
958
959 bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; }
960
961 CPSDominators& ensureCPSDominators();
962 SSADominators& ensureSSADominators();
963 CPSNaturalLoops& ensureCPSNaturalLoops();
964 SSANaturalLoops& ensureSSANaturalLoops();
965 BackwardsCFG& ensureBackwardsCFG();
966 BackwardsDominators& ensureBackwardsDominators();
967 ControlEquivalenceAnalysis& ensureControlEquivalenceAnalysis();
968 CPSCFG& ensureCPSCFG();
969
970 // These functions only makes sense to call after bytecode parsing
971 // because it queries the m_hasExceptionHandlers boolean whose value
972 // is only fully determined after bytcode parsing.
973 bool willCatchExceptionInMachineFrame(CodeOrigin codeOrigin)
974 {
975 CodeOrigin ignored;
976 HandlerInfo* ignored2;
977 return willCatchExceptionInMachineFrame(codeOrigin, ignored, ignored2);
978 }
979 bool willCatchExceptionInMachineFrame(CodeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut);
980
981 bool needsScopeRegister() const { return m_hasDebuggerEnabled || m_codeBlock->usesEval(); }
982 bool needsFlushedThis() const { return m_codeBlock->usesEval(); }
983
984 void clearCPSCFGData();
985
986 bool isRoot(BasicBlock* block) const
987 {
988 ASSERT_WITH_MESSAGE(!m_isInSSAConversion, "This is not written to work during SSA conversion.");
989
990 if (m_form == SSA) {
991 ASSERT(m_roots.size() == 1);
992 ASSERT(m_roots.contains(this->block(0)));
993 return block == this->block(0);
994 }
995
996 if (m_roots.size() <= 4) {
997 bool result = m_roots.contains(block);
998 ASSERT(result == m_rootToArguments.contains(block));
999 return result;
1000 }
1001 bool result = m_rootToArguments.contains(block);
1002 ASSERT(result == m_roots.contains(block));
1003 return result;
1004 }
1005
1006 Prefix& prefix() { return m_prefix; }
1007 void nextPhase() { m_prefix.phaseNumber++; }
1008
1009 VM& m_vm;
1010 Plan& m_plan;
1011 CodeBlock* m_codeBlock;
1012 CodeBlock* m_profiledBlock;
1013
1014 Vector<RefPtr<BasicBlock>, 8> m_blocks;
1015 Vector<BasicBlock*, 1> m_roots;
1016 Vector<Edge, 16> m_varArgChildren;
1017
1018 HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
1019 Bag<FrozenValue> m_frozenValues;
1020
1021 Vector<uint32_t> m_uint32ValuesInUse;
1022
1023 Bag<StorageAccessData> m_storageAccessData;
1024
1025 // In CPS, this is all of the SetArgumentDefinitely nodes for the arguments in the machine code block
1026 // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
1027 // nodes. In SSA, this has no meaning. It's empty.
1028 HashMap<BasicBlock*, ArgumentsVector> m_rootToArguments;
1029
1030 // In SSA, this is the argument speculation that we've locked in for an entrypoint block.
1031 //
1032 // We must speculate on the argument types at each entrypoint even if operations involving
1033 // arguments get killed. For example:
1034 //
1035 // function foo(x) {
1036 // var tmp = x + 1;
1037 // }
1038 //
1039 // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
1040 // have a proven check for the edge to "x". So, we will not insert a Check node and we will
1041 // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
1042 // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
1043 //
1044 // var o = {
1045 // valueOf: function() { do side effects; }
1046 // };
1047 // foo(o);
1048 //
1049 // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
1050 // effects.
1051 //
1052 // By convention, entrypoint index 0 is used for the CodeBlock's op_enter entrypoint.
1053 // So argumentFormats[0] are the argument formats for the normal call entrypoint.
1054 Vector<Vector<FlushFormat>> m_argumentFormats;
1055
1056 SegmentedVector<VariableAccessData, 16> m_variableAccessData;
1057 SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
1058 Bag<Transition> m_transitions;
1059 Bag<BranchData> m_branchData;
1060 Bag<SwitchData> m_switchData;
1061 Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
1062 Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
1063 Bag<MatchStructureData> m_matchStructureData;
1064 Bag<ObjectMaterializationData> m_objectMaterializationData;
1065 Bag<CallVarargsData> m_callVarargsData;
1066 Bag<LoadVarargsData> m_loadVarargsData;
1067 Bag<StackAccessData> m_stackAccessData;
1068 Bag<LazyJSValue> m_lazyJSValues;
1069 Bag<CallDOMGetterData> m_callDOMGetterData;
1070 Bag<BitVector> m_bitVectors;
1071 Vector<InlineVariableData, 4> m_inlineVariableData;
1072 HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
1073 HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>> m_bytecodeKills;
1074 HashSet<std::pair<JSObject*, PropertyOffset>> m_safeToLoad;
1075 Vector<Ref<Snippet>> m_domJITSnippets;
1076 std::unique_ptr<CPSDominators> m_cpsDominators;
1077 std::unique_ptr<SSADominators> m_ssaDominators;
1078 std::unique_ptr<CPSNaturalLoops> m_cpsNaturalLoops;
1079 std::unique_ptr<SSANaturalLoops> m_ssaNaturalLoops;
1080 std::unique_ptr<SSACFG> m_ssaCFG;
1081 std::unique_ptr<CPSCFG> m_cpsCFG;
1082 std::unique_ptr<BackwardsCFG> m_backwardsCFG;
1083 std::unique_ptr<BackwardsDominators> m_backwardsDominators;
1084 std::unique_ptr<ControlEquivalenceAnalysis> m_controlEquivalenceAnalysis;
1085 unsigned m_localVars;
1086 unsigned m_nextMachineLocal;
1087 unsigned m_parameterSlots;
1088
1089 // This is the number of logical entrypoints that we're compiling. This is only used
1090 // in SSA. Each EntrySwitch node must have m_numberOfEntrypoints cases. Note, this is
1091 // not the same as m_roots.size(). m_roots.size() represents the number of roots in
1092 // the CFG. In SSA, m_roots.size() == 1 even if we're compiling more than one entrypoint.
1093 unsigned m_numberOfEntrypoints { UINT_MAX };
1094
1095 // This maps an entrypoint index to a particular op_catch bytecode offset. By convention,
1096 // it'll never have zero as a key because we use zero to mean the op_enter entrypoint.
1097 HashMap<unsigned, BytecodeIndex> m_entrypointIndexToCatchBytecodeIndex;
1098
1099 HashSet<String> m_localStrings;
1100 HashMap<const StringImpl*, String> m_copiedStrings;
1101
1102#if USE(JSVALUE32_64)
1103 StdUnorderedMap<int64_t, double*> m_doubleConstantsMap;
1104 std::unique_ptr<Bag<double>> m_doubleConstants;
1105#endif
1106
1107 OptimizationFixpointState m_fixpointState;
1108 StructureRegistrationState m_structureRegistrationState;
1109 GraphForm m_form;
1110 UnificationState m_unificationState;
1111 PlanStage m_planStage { PlanStage::Initial };
1112 RefCountState m_refCountState;
1113 bool m_hasDebuggerEnabled;
1114 bool m_hasExceptionHandlers { false };
1115 bool m_isInSSAConversion { false };
1116 Optional<uint32_t> m_maxLocalsForCatchOSREntry;
1117 std::unique_ptr<FlowIndexing> m_indexingCache;
1118 std::unique_ptr<FlowMap<AbstractValue>> m_abstractValuesCache;
1119 Bag<EntrySwitchData> m_entrySwitchData;
1120
1121 RegisteredStructure stringStructure;
1122 RegisteredStructure symbolStructure;
1123
1124 HashSet<Node*> m_slowGetByVal;
1125
1126private:
1127 bool isStringPrototypeMethodSane(JSGlobalObject*, UniquedStringImpl*);
1128
1129 void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
1130
1131 AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source)
1132 {
1133 ASSERT(immediate->hasConstant());
1134
1135 JSValue immediateValue = immediate->asJSValue();
1136 if (!immediateValue.isNumber() && !immediateValue.isBoolean())
1137 return DontSpeculateInt32;
1138
1139 if (!variableShouldSpeculateInt32)
1140 return DontSpeculateInt32;
1141
1142 // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
1143 // In that case, we stay conservative unless the other operand was explicitly typed as integer.
1144 NodeFlags operandResultType = operand->result();
1145 if (operandResultType != NodeResultInt32 && immediateValue.isDouble())
1146 return DontSpeculateInt32;
1147
1148 if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32())
1149 return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
1150
1151 double doubleImmediate = immediateValue.asDouble();
1152 const double twoToThe48 = 281474976710656.0;
1153 if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
1154 return DontSpeculateInt32;
1155
1156 return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
1157 }
1158
1159 B3::SparseCollection<Node> m_nodes;
1160 SegmentedVector<RegisteredStructureSet, 16> m_structureSets;
1161 Prefix m_prefix;
1162};
1163
1164} } // namespace JSC::DFG
1165
1166#endif
1167