1/*
2 * Copyright (C) 2015-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGArgumentsEliminationPhase.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "ArrayPrototype.h"
32#include "BytecodeLivenessAnalysisInlines.h"
33#include "ClonedArguments.h"
34#include "DFGArgumentsUtilities.h"
35#include "DFGBasicBlockInlines.h"
36#include "DFGBlockMapInlines.h"
37#include "DFGClobberize.h"
38#include "DFGCombinedLiveness.h"
39#include "DFGForAllKills.h"
40#include "DFGGraph.h"
41#include "DFGInsertionSet.h"
42#include "DFGLivenessAnalysisPhase.h"
43#include "DFGOSRAvailabilityAnalysisPhase.h"
44#include "DFGPhase.h"
45#include "JSCInlines.h"
46#include <wtf/HashSet.h>
47#include <wtf/ListDump.h>
48#include <wtf/RecursableLambda.h>
49
50namespace JSC { namespace DFG {
51
52namespace {
53
54namespace DFGArgumentsEliminationPhaseInternal {
55static const bool verbose = false;
56}
57
58class ArgumentsEliminationPhase : public Phase {
59public:
60 ArgumentsEliminationPhase(Graph& graph)
61 : Phase(graph, "arguments elimination")
62 {
63 }
64
65 bool run()
66 {
67 // For now this phase only works on SSA. This could be changed; we could have a block-local
68 // version over LoadStore.
69 DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
70
71 if (DFGArgumentsEliminationPhaseInternal::verbose) {
72 dataLog("Graph before arguments elimination:\n");
73 m_graph.dump();
74 }
75
76 identifyCandidates();
77 if (m_candidates.isEmpty())
78 return false;
79
80 eliminateCandidatesThatEscape();
81 if (m_candidates.isEmpty())
82 return false;
83
84 eliminateCandidatesThatInterfere();
85 if (m_candidates.isEmpty())
86 return false;
87
88 transform();
89
90 return true;
91 }
92
93private:
94 // Just finds nodes that we know how to work with.
95 void identifyCandidates()
96 {
97 for (BasicBlock* block : m_graph.blocksInPreOrder()) {
98 for (Node* node : *block) {
99 switch (node->op()) {
100 case CreateDirectArguments:
101 case CreateClonedArguments:
102 m_candidates.add(node);
103 break;
104
105 case CreateRest:
106 if (m_graph.isWatchingHavingABadTimeWatchpoint(node)) {
107 // If we're watching the HavingABadTime watchpoint it means that we will be invalidated
108 // when it fires (it may or may not have actually fired yet). We don't try to eliminate
109 // this allocation when we're not watching the watchpoint because it could entail calling
110 // indexed accessors (and probably more crazy things) on out of bound accesses to the
111 // rest parameter. It's also much easier to reason about this way.
112 m_candidates.add(node);
113 }
114 break;
115
116 case Spread:
117 if (m_graph.isWatchingHavingABadTimeWatchpoint(node)) {
118 // We check ArrayUse here because ArrayUse indicates that the iterator
119 // protocol for Arrays is non-observable by user code (e.g, it hasn't
120 // been changed).
121 if (node->child1().useKind() == ArrayUse) {
122 if ((node->child1()->op() == CreateRest || node->child1()->op() == NewArrayBuffer) && m_candidates.contains(node->child1().node()))
123 m_candidates.add(node);
124 }
125 }
126 break;
127
128 case NewArrayWithSpread: {
129 if (m_graph.isWatchingHavingABadTimeWatchpoint(node)) {
130 BitVector* bitVector = node->bitVector();
131 // We only allow for Spreads to be of CreateRest or NewArrayBuffer nodes for now.
132 bool isOK = true;
133 for (unsigned i = 0; i < node->numChildren(); i++) {
134 if (bitVector->get(i)) {
135 Node* child = m_graph.varArgChild(node, i).node();
136 isOK = child->op() == Spread && (child->child1()->op() == CreateRest || child->child1()->op() == NewArrayBuffer) && m_candidates.contains(child);
137 if (!isOK)
138 break;
139 }
140 }
141
142 if (!isOK)
143 break;
144
145 m_candidates.add(node);
146 }
147 break;
148 }
149
150 case NewArrayBuffer: {
151 if (m_graph.isWatchingHavingABadTimeWatchpoint(node) && !hasAnyArrayStorage(node->indexingMode()))
152 m_candidates.add(node);
153 break;
154 }
155
156 case CreateScopedArguments:
157 // FIXME: We could handle this if it wasn't for the fact that scoped arguments are
158 // always stored into the activation.
159 // https://bugs.webkit.org/show_bug.cgi?id=143072 and
160 // https://bugs.webkit.org/show_bug.cgi?id=143073
161 break;
162
163 default:
164 break;
165 }
166 if (node->isPseudoTerminal())
167 break;
168 }
169 }
170
171 if (DFGArgumentsEliminationPhaseInternal::verbose)
172 dataLog("Candidates: ", listDump(m_candidates), "\n");
173 }
174
175 bool isStillValidCandidate(Node* candidate)
176 {
177 switch (candidate->op()) {
178 case Spread:
179 return m_candidates.contains(candidate->child1().node());
180
181 case NewArrayWithSpread: {
182 BitVector* bitVector = candidate->bitVector();
183 for (unsigned i = 0; i < candidate->numChildren(); i++) {
184 if (bitVector->get(i)) {
185 if (!m_candidates.contains(m_graph.varArgChild(candidate, i).node()))
186 return false;
187 }
188 }
189 return true;
190 }
191
192 default:
193 return true;
194 }
195
196 RELEASE_ASSERT_NOT_REACHED();
197 return false;
198 }
199
200 void removeInvalidCandidates()
201 {
202 bool changed;
203 do {
204 changed = false;
205 Vector<Node*, 1> toRemove;
206
207 for (Node* candidate : m_candidates) {
208 if (!isStillValidCandidate(candidate))
209 toRemove.append(candidate);
210 }
211
212 if (toRemove.size()) {
213 changed = true;
214 for (Node* node : toRemove)
215 m_candidates.remove(node);
216 }
217
218 } while (changed);
219 }
220
221 void transitivelyRemoveCandidate(Node* node, Node* source = nullptr)
222 {
223 bool removed = m_candidates.remove(node);
224 if (removed && DFGArgumentsEliminationPhaseInternal::verbose && source)
225 dataLog("eliminating candidate: ", node, " because it escapes from: ", source, "\n");
226
227 if (removed)
228 removeInvalidCandidates();
229 }
230
231 // Look for escaping sites, and remove from the candidates set if we see an escape.
232 void eliminateCandidatesThatEscape()
233 {
234 auto escape = [&] (Edge edge, Node* source) {
235 if (!edge)
236 return;
237 transitivelyRemoveCandidate(edge.node(), source);
238 };
239
240 auto escapeBasedOnArrayMode = [&] (ArrayMode mode, Edge edge, Node* source) {
241 switch (mode.type()) {
242 case Array::DirectArguments: {
243 if (edge->op() != CreateDirectArguments) {
244 escape(edge, source);
245 break;
246 }
247
248 // Everything is fine if we're doing an in-bounds access.
249 if (mode.isInBounds())
250 break;
251
252 // If we're out-of-bounds then we proceed only if the prototype chain
253 // for the allocation is sane (i.e. doesn't have indexed properties).
254 JSGlobalObject* globalObject = m_graph.globalObjectFor(edge->origin.semantic);
255 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(m_graph.m_vm);
256 if (objectPrototypeStructure->transitionWatchpointSetIsStillValid()
257 && globalObject->objectPrototypeIsSane()) {
258 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
259 break;
260 }
261 escape(edge, source);
262 break;
263 }
264
265 case Array::Contiguous: {
266 if (edge->op() != CreateClonedArguments && edge->op() != CreateRest) {
267 escape(edge, source);
268 break;
269 }
270
271 // Everything is fine if we're doing an in-bounds access.
272 if (mode.isInBounds())
273 break;
274
275 // If we're out-of-bounds then we proceed only if the prototype chain
276 // for the allocation is sane (i.e. doesn't have indexed properties).
277 JSGlobalObject* globalObject = m_graph.globalObjectFor(edge->origin.semantic);
278 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(m_graph.m_vm);
279 if (edge->op() == CreateRest) {
280 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(m_graph.m_vm);
281 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
282 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
283 && globalObject->arrayPrototypeChainIsSane()) {
284 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
285 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
286 break;
287 }
288 } else {
289 if (objectPrototypeStructure->transitionWatchpointSetIsStillValid()
290 && globalObject->objectPrototypeIsSane()) {
291 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
292 break;
293 }
294 }
295 escape(edge, source);
296 break;
297 }
298
299 case Array::ForceExit:
300 break;
301
302 default:
303 escape(edge, source);
304 break;
305 }
306 };
307
308 removeInvalidCandidates();
309
310 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
311 for (Node* node : *block) {
312 switch (node->op()) {
313 case GetFromArguments:
314 break;
315
316 case GetByVal:
317 escapeBasedOnArrayMode(node->arrayMode(), m_graph.varArgChild(node, 0), node);
318 escape(m_graph.varArgChild(node, 1), node);
319 escape(m_graph.varArgChild(node, 2), node);
320 break;
321
322 case GetArrayLength:
323 escape(node->child2(), node);
324 // Computing the length of a NewArrayWithSpread can require some additions.
325 // These additions can overflow if the array is sufficiently enormous, and in that case we will need to exit.
326 if ((node->child1()->op() == NewArrayWithSpread) && !node->origin.exitOK)
327 escape(node->child1(), node);
328 break;
329
330 case NewArrayWithSpread: {
331 BitVector* bitVector = node->bitVector();
332 bool isWatchingHavingABadTimeWatchpoint = m_graph.isWatchingHavingABadTimeWatchpoint(node);
333 for (unsigned i = 0; i < node->numChildren(); i++) {
334 Edge child = m_graph.varArgChild(node, i);
335 bool dontEscape;
336 if (bitVector->get(i)) {
337 dontEscape = child->op() == Spread
338 && child->child1().useKind() == ArrayUse
339 && (child->child1()->op() == CreateRest || child->child1()->op() == NewArrayBuffer)
340 && isWatchingHavingABadTimeWatchpoint;
341 } else
342 dontEscape = false;
343
344 if (!dontEscape)
345 escape(child, node);
346 }
347
348 break;
349 }
350
351 case Spread: {
352 bool isOK = node->child1().useKind() == ArrayUse && (node->child1()->op() == CreateRest || node->child1()->op() == NewArrayBuffer);
353 if (!isOK)
354 escape(node->child1(), node);
355 break;
356 }
357
358 case NewArrayBuffer:
359 break;
360
361 case LoadVarargs:
362 if (node->loadVarargsData()->offset && (node->child1()->op() == NewArrayWithSpread || node->child1()->op() == Spread || node->child1()->op() == NewArrayBuffer))
363 escape(node->child1(), node);
364 break;
365
366 case CallVarargs:
367 case ConstructVarargs:
368 case TailCallVarargs:
369 case TailCallVarargsInlinedCaller:
370 escape(node->child1(), node);
371 escape(node->child2(), node);
372 if (node->callVarargsData()->firstVarArgOffset && (node->child3()->op() == NewArrayWithSpread || node->child3()->op() == Spread || node->child1()->op() == NewArrayBuffer))
373 escape(node->child3(), node);
374 break;
375
376 case Check:
377 case CheckVarargs:
378 m_graph.doToChildren(
379 node,
380 [&] (Edge edge) {
381 if (edge.willNotHaveCheck())
382 return;
383
384 if (alreadyChecked(edge.useKind(), SpecObject))
385 return;
386
387 escape(edge, node);
388 });
389 break;
390
391 case MovHint:
392 case PutHint:
393 break;
394
395 case GetButterfly:
396 // This barely works. The danger is that the GetButterfly is used by something that
397 // does something escaping to a candidate. Fortunately, the only butterfly-using ops
398 // that we exempt here also use the candidate directly. If there ever was a
399 // butterfly-using op that we wanted to exempt, then we'd have to look at the
400 // butterfly's child and check if it's a candidate.
401 break;
402
403 case FilterGetByIdStatus:
404 case FilterPutByIdStatus:
405 case FilterCallLinkStatus:
406 case FilterInByIdStatus:
407 break;
408
409 case CheckArray:
410 escapeBasedOnArrayMode(node->arrayMode(), node->child1(), node);
411 break;
412
413 case CheckStructureOrEmpty:
414 case CheckStructure: {
415 Node* target = node->child1().node();
416 if (!m_candidates.contains(target))
417 break;
418
419 Structure* structure = nullptr;
420 JSGlobalObject* globalObject = m_graph.globalObjectFor(target->origin.semantic);
421 switch (target->op()) {
422 case CreateDirectArguments:
423 structure = globalObject->directArgumentsStructure();
424 break;
425 case CreateClonedArguments:
426 structure = globalObject->clonedArgumentsStructure();
427 break;
428 case CreateRest:
429 ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
430 structure = globalObject->restParameterStructure();
431 break;
432 case NewArrayWithSpread:
433 ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
434 structure = globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous);
435 break;
436 case NewArrayBuffer: {
437 ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
438 IndexingType indexingMode = target->indexingMode();
439 ASSERT(!hasAnyArrayStorage(indexingMode));
440 structure = globalObject->originalArrayStructureForIndexingType(indexingMode);
441 break;
442 }
443 default:
444 RELEASE_ASSERT_NOT_REACHED();
445 }
446 ASSERT(structure);
447
448 if (!node->structureSet().contains(m_graph.registerStructure(structure)))
449 escape(node->child1(), node);
450 break;
451 }
452
453 // FIXME: We should be able to handle GetById/GetByOffset on callee.
454 // https://bugs.webkit.org/show_bug.cgi?id=143075
455
456 case GetByOffset:
457 if (node->child2()->op() == CreateClonedArguments && node->storageAccessData().offset == clonedArgumentsLengthPropertyOffset)
458 break;
459 FALLTHROUGH;
460 default:
461 m_graph.doToChildren(node, [&] (Edge edge) { return escape(edge, node); });
462 break;
463 }
464 if (node->isPseudoTerminal())
465 break;
466 }
467 }
468
469 if (DFGArgumentsEliminationPhaseInternal::verbose)
470 dataLog("After escape analysis: ", listDump(m_candidates), "\n");
471 }
472
473 // Anywhere that a candidate is live (in bytecode or in DFG), check if there is a chance of
474 // interference between the stack area that the arguments object copies from and the arguments
475 // object's payload. Conservatively this means that the stack region doesn't get stored to.
476 void eliminateCandidatesThatInterfere()
477 {
478 performLivenessAnalysis(m_graph);
479 performOSRAvailabilityAnalysis(m_graph);
480 m_graph.initializeNodeOwners();
481 CombinedLiveness combinedLiveness(m_graph);
482
483 BlockMap<Operands<bool>> clobberedByBlock(m_graph);
484 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
485 Operands<bool>& clobberedByThisBlock = clobberedByBlock[block];
486 clobberedByThisBlock = Operands<bool>(OperandsLike, m_graph.block(0)->variablesAtHead);
487 for (Node* node : *block) {
488 clobberize(
489 m_graph, node, NoOpClobberize(),
490 [&] (AbstractHeap heap) {
491 if (heap.kind() != Stack) {
492 ASSERT(!heap.overlaps(Stack));
493 return;
494 }
495 ASSERT(!heap.payload().isTop());
496 VirtualRegister reg(heap.payload().value32());
497 // The register may not point to an argument or local, for example if we are looking at SetArgumentCountIncludingThis.
498 if (!reg.isHeader())
499 clobberedByThisBlock.operand(reg) = true;
500 },
501 NoOpClobberize());
502 }
503 }
504
505 using InlineCallFrames = HashSet<InlineCallFrame*, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>>;
506 using InlineCallFramesForCanditates = HashMap<Node*, InlineCallFrames>;
507 InlineCallFramesForCanditates inlineCallFramesForCandidate;
508 auto forEachDependentNode = recursableLambda([&](auto self, Node* node, const auto& functor) -> void {
509 functor(node);
510
511 if (node->op() == Spread) {
512 self(node->child1().node(), functor);
513 return;
514 }
515
516 if (node->op() == NewArrayWithSpread) {
517 BitVector* bitVector = node->bitVector();
518 for (unsigned i = node->numChildren(); i--; ) {
519 if (bitVector->get(i))
520 self(m_graph.varArgChild(node, i).node(), functor);
521 }
522 return;
523 }
524 });
525 for (Node* candidate : m_candidates) {
526 auto& set = inlineCallFramesForCandidate.add(candidate, InlineCallFrames()).iterator->value;
527 forEachDependentNode(candidate, [&](Node* dependent) {
528 set.add(dependent->origin.semantic.inlineCallFrame());
529 });
530 }
531
532 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
533 // Stop if we've already removed all candidates.
534 if (m_candidates.isEmpty())
535 return;
536
537 // Ignore blocks that don't write to the stack.
538 bool writesToStack = false;
539 for (unsigned i = clobberedByBlock[block].size(); i--;) {
540 if (clobberedByBlock[block][i]) {
541 writesToStack = true;
542 break;
543 }
544 }
545 if (!writesToStack)
546 continue;
547
548 forAllKillsInBlock(
549 m_graph, combinedLiveness, block,
550 [&] (unsigned nodeIndex, Node* candidate) {
551 if (!m_candidates.contains(candidate))
552 return;
553
554 for (InlineCallFrame* inlineCallFrame : inlineCallFramesForCandidate.get(candidate)) {
555 // Check if this block has any clobbers that affect this candidate. This is a fairly
556 // fast check.
557 bool isClobberedByBlock = false;
558 Operands<bool>& clobberedByThisBlock = clobberedByBlock[block];
559
560 if (inlineCallFrame) {
561 if (inlineCallFrame->isVarargs()) {
562 isClobberedByBlock |= clobberedByThisBlock.operand(
563 inlineCallFrame->stackOffset + CallFrameSlot::argumentCount);
564 }
565
566 if (!isClobberedByBlock || inlineCallFrame->isClosureCall) {
567 isClobberedByBlock |= clobberedByThisBlock.operand(
568 inlineCallFrame->stackOffset + CallFrameSlot::callee);
569 }
570
571 if (!isClobberedByBlock) {
572 for (unsigned i = 0; i < inlineCallFrame->argumentCountIncludingThis - 1; ++i) {
573 VirtualRegister reg =
574 VirtualRegister(inlineCallFrame->stackOffset) +
575 CallFrame::argumentOffset(i);
576 if (clobberedByThisBlock.operand(reg)) {
577 isClobberedByBlock = true;
578 break;
579 }
580 }
581 }
582 } else {
583 // We don't include the ArgumentCount or Callee in this case because we can be
584 // damn sure that this won't be clobbered.
585 for (unsigned i = 1; i < static_cast<unsigned>(codeBlock()->numParameters()); ++i) {
586 if (clobberedByThisBlock.argument(i)) {
587 isClobberedByBlock = true;
588 break;
589 }
590 }
591 }
592
593 if (!isClobberedByBlock)
594 continue;
595
596 // Check if we can immediately eliminate this candidate. If the block has a clobber
597 // for this arguments allocation, and we'd have to examine every node in the block,
598 // then we can just eliminate the candidate.
599 if (nodeIndex == block->size() && candidate->owner != block) {
600 if (DFGArgumentsEliminationPhaseInternal::verbose)
601 dataLog("eliminating candidate: ", candidate, " because it is clobbered by: ", block->at(nodeIndex), "\n");
602 transitivelyRemoveCandidate(candidate);
603 return;
604 }
605
606 // This loop considers all nodes up to the nodeIndex, excluding the nodeIndex.
607 while (nodeIndex--) {
608 Node* node = block->at(nodeIndex);
609 if (node == candidate)
610 break;
611
612 bool found = false;
613 clobberize(
614 m_graph, node, NoOpClobberize(),
615 [&] (AbstractHeap heap) {
616 if (heap.kind() == Stack && !heap.payload().isTop()) {
617 if (argumentsInvolveStackSlot(inlineCallFrame, VirtualRegister(heap.payload().value32())))
618 found = true;
619 return;
620 }
621 if (heap.overlaps(Stack))
622 found = true;
623 },
624 NoOpClobberize());
625
626 if (found) {
627 if (DFGArgumentsEliminationPhaseInternal::verbose)
628 dataLog("eliminating candidate: ", candidate, " because it is clobbered by ", block->at(nodeIndex), "\n");
629 transitivelyRemoveCandidate(candidate);
630 return;
631 }
632 }
633 }
634 });
635 }
636
637 // Q: How do we handle OSR exit with a live PhantomArguments at a point where the inline call
638 // frame is dead? A: Naively we could say that PhantomArguments must escape the stack slots. But
639 // that would break PutStack sinking, which in turn would break object allocation sinking, in
640 // cases where we have a varargs call to an otherwise pure method. So, we need something smarter.
641 // For the outermost arguments, we just have a PhantomArguments that magically knows that it
642 // should load the arguments from the call frame. For the inline arguments, we have the heap map
643 // in the availabiltiy map track each possible inline argument as a promoted heap location. If the
644 // PutStacks for those arguments aren't sunk, those heap locations will map to very trivial
645 // availabilities (they will be flush availabilities). But if sinking happens then those
646 // availabilities may become whatever. OSR exit should be able to handle this quite naturally,
647 // since those availabilities speak of the stack before the optimizing compiler stack frame is
648 // torn down.
649
650 if (DFGArgumentsEliminationPhaseInternal::verbose)
651 dataLog("After interference analysis: ", listDump(m_candidates), "\n");
652 }
653
654 void transform()
655 {
656 bool modifiedCFG = false;
657
658 InsertionSet insertionSet(m_graph);
659
660 for (BasicBlock* block : m_graph.blocksInPreOrder()) {
661 Node* pseudoTerminal = nullptr;
662 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
663 Node* node = block->at(nodeIndex);
664
665 auto getArrayLength = [&] (Node* candidate) -> Node* {
666 return emitCodeToGetArgumentsArrayLength(
667 insertionSet, candidate, nodeIndex, node->origin);
668 };
669
670 auto isEliminatedAllocation = [&] (Node* candidate) -> bool {
671 if (!m_candidates.contains(candidate))
672 return false;
673 // We traverse in such a way that we are guaranteed to see a def before a use.
674 // Therefore, we should have already transformed the allocation before the use
675 // of an allocation.
676 ASSERT(candidate->op() == PhantomCreateRest || candidate->op() == PhantomDirectArguments || candidate->op() == PhantomClonedArguments
677 || candidate->op() == PhantomSpread || candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer);
678 return true;
679 };
680
681 switch (node->op()) {
682 case CreateDirectArguments:
683 if (!m_candidates.contains(node))
684 break;
685
686 node->setOpAndDefaultFlags(PhantomDirectArguments);
687 break;
688
689 case CreateRest:
690 if (!m_candidates.contains(node))
691 break;
692
693 ASSERT(node->origin.exitOK);
694 ASSERT(node->child1().useKind() == Int32Use);
695 insertionSet.insertNode(
696 nodeIndex, SpecNone, Check, node->origin,
697 node->child1());
698
699 node->setOpAndDefaultFlags(PhantomCreateRest);
700 // We don't need this parameter for OSR exit, we can find out all the information
701 // we need via the static parameter count and the dynamic argument count.
702 node->child1() = Edge();
703 break;
704
705 case CreateClonedArguments:
706 if (!m_candidates.contains(node))
707 break;
708
709 node->setOpAndDefaultFlags(PhantomClonedArguments);
710 break;
711
712 case Spread:
713 if (!m_candidates.contains(node))
714 break;
715
716 node->setOpAndDefaultFlags(PhantomSpread);
717 break;
718
719 case NewArrayWithSpread:
720 if (!m_candidates.contains(node))
721 break;
722
723 node->setOpAndDefaultFlags(PhantomNewArrayWithSpread);
724 break;
725
726 case NewArrayBuffer:
727 if (!m_candidates.contains(node))
728 break;
729
730 node->setOpAndDefaultFlags(PhantomNewArrayBuffer);
731 node->child1() = Edge(insertionSet.insertConstant(nodeIndex, node->origin, node->cellOperand()));
732 break;
733
734 case GetFromArguments: {
735 Node* candidate = node->child1().node();
736 if (!isEliminatedAllocation(candidate))
737 break;
738
739 DFG_ASSERT(
740 m_graph, node, node->child1()->op() == PhantomDirectArguments, node->child1()->op());
741 VirtualRegister reg =
742 virtualRegisterForArgument(node->capturedArgumentsOffset().offset() + 1) +
743 node->origin.semantic.stackOffset();
744 StackAccessData* data = m_graph.m_stackAccessData.add(reg, FlushedJSValue);
745 node->convertToGetStack(data);
746 break;
747 }
748
749 case GetByOffset: {
750 Node* candidate = node->child2().node();
751 if (!isEliminatedAllocation(candidate))
752 break;
753
754 ASSERT(candidate->op() == PhantomClonedArguments);
755 ASSERT(node->storageAccessData().offset == clonedArgumentsLengthPropertyOffset);
756
757 // Meh, this is kind of hackish - we use an Identity so that we can reuse the
758 // getArrayLength() helper.
759 node->convertToIdentityOn(getArrayLength(candidate));
760 break;
761 }
762
763 case GetArrayLength: {
764 Node* candidate = node->child1().node();
765 if (!isEliminatedAllocation(candidate))
766 break;
767
768 node->convertToIdentityOn(getArrayLength(candidate));
769 break;
770 }
771
772 case GetByVal: {
773 // FIXME: For ClonedArguments, we would have already done a separate bounds check.
774 // This code will cause us to have two bounds checks - the original one that we
775 // already factored out in SSALoweringPhase, and the new one we insert here, which is
776 // often implicitly part of GetMyArgumentByVal. B3 will probably eliminate the
777 // second bounds check, but still - that's just silly.
778 // https://bugs.webkit.org/show_bug.cgi?id=143076
779
780 Node* candidate = m_graph.varArgChild(node, 0).node();
781 if (!isEliminatedAllocation(candidate))
782 break;
783
784 unsigned numberOfArgumentsToSkip = 0;
785 if (candidate->op() == PhantomCreateRest)
786 numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
787
788 Node* result = nullptr;
789 if (m_graph.varArgChild(node, 1)->isInt32Constant()) {
790 unsigned index = m_graph.varArgChild(node, 1)->asUInt32();
791 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
792 index += numberOfArgumentsToSkip;
793
794 bool safeToGetStack = index >= numberOfArgumentsToSkip;
795 if (inlineCallFrame)
796 safeToGetStack &= index < inlineCallFrame->argumentCountIncludingThis - 1;
797 else {
798 safeToGetStack &=
799 index < static_cast<unsigned>(codeBlock()->numParameters()) - 1;
800 }
801 if (safeToGetStack) {
802 StackAccessData* data;
803 VirtualRegister arg = virtualRegisterForArgument(index + 1);
804 if (inlineCallFrame)
805 arg += inlineCallFrame->stackOffset;
806 data = m_graph.m_stackAccessData.add(arg, FlushedJSValue);
807
808 Node* check = nullptr;
809 if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
810 check = insertionSet.insertNode(
811 nodeIndex, SpecNone, CheckInBounds, node->origin,
812 m_graph.varArgChild(node, 1), Edge(getArrayLength(candidate), Int32Use));
813 }
814
815 result = insertionSet.insertNode(
816 nodeIndex, node->prediction(), GetStack, node->origin, OpInfo(data), Edge(check, UntypedUse));
817 }
818 }
819
820 if (!result) {
821 NodeType op;
822 if (node->arrayMode().isInBounds())
823 op = GetMyArgumentByVal;
824 else
825 op = GetMyArgumentByValOutOfBounds;
826 result = insertionSet.insertNode(
827 nodeIndex, node->prediction(), op, node->origin, OpInfo(numberOfArgumentsToSkip),
828 m_graph.varArgChild(node, 0), m_graph.varArgChild(node, 1));
829 }
830
831 // Need to do this because we may have a data format conversion here.
832 node->convertToIdentityOn(result);
833 break;
834 }
835
836 case LoadVarargs: {
837 Node* candidate = node->child1().node();
838 if (!isEliminatedAllocation(candidate))
839 break;
840
841 // LoadVarargs can exit, so it better be exitOK.
842 DFG_ASSERT(m_graph, node, node->origin.exitOK);
843 bool canExit = true;
844 LoadVarargsData* varargsData = node->loadVarargsData();
845
846 auto storeArgumentCountIncludingThis = [&] (unsigned argumentCountIncludingThis) {
847 Node* argumentCountIncludingThisNode = insertionSet.insertConstant(
848 nodeIndex, node->origin.withExitOK(canExit),
849 jsNumber(argumentCountIncludingThis));
850 insertionSet.insertNode(
851 nodeIndex, SpecNone, MovHint, node->origin.takeValidExit(canExit),
852 OpInfo(varargsData->count.offset()), Edge(argumentCountIncludingThisNode));
853 insertionSet.insertNode(
854 nodeIndex, SpecNone, PutStack, node->origin.withExitOK(canExit),
855 OpInfo(m_graph.m_stackAccessData.add(varargsData->count, FlushedInt32)),
856 Edge(argumentCountIncludingThisNode, KnownInt32Use));
857 };
858
859 auto storeValue = [&] (Node* value, unsigned storeIndex) {
860 VirtualRegister reg = varargsData->start + storeIndex;
861 StackAccessData* data =
862 m_graph.m_stackAccessData.add(reg, FlushedJSValue);
863
864 insertionSet.insertNode(
865 nodeIndex, SpecNone, MovHint, node->origin.takeValidExit(canExit),
866 OpInfo(reg.offset()), Edge(value));
867 insertionSet.insertNode(
868 nodeIndex, SpecNone, PutStack, node->origin.withExitOK(canExit),
869 OpInfo(data), Edge(value));
870 };
871
872 if (candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer || candidate->op() == PhantomSpread) {
873 auto canConvertToStaticLoadStores = recursableLambda([&] (auto self, Node* candidate) -> bool {
874 if (candidate->op() == PhantomSpread)
875 return self(candidate->child1().node());
876
877 if (candidate->op() == PhantomNewArrayWithSpread) {
878 BitVector* bitVector = candidate->bitVector();
879 for (unsigned i = 0; i < candidate->numChildren(); i++) {
880 if (bitVector->get(i)) {
881 if (!self(m_graph.varArgChild(candidate, i).node()))
882 return false;
883 }
884 }
885 return true;
886 }
887
888 // PhantomNewArrayBuffer only contains constants. It can always convert LoadVarargs to static load stores.
889 if (candidate->op() == PhantomNewArrayBuffer)
890 return true;
891
892 ASSERT(candidate->op() == PhantomCreateRest);
893 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
894 return inlineCallFrame && !inlineCallFrame->isVarargs();
895 });
896
897 if (canConvertToStaticLoadStores(candidate)) {
898 auto countNumberOfArguments = recursableLambda([&](auto self, Node* candidate) -> unsigned {
899 if (candidate->op() == PhantomSpread)
900 return self(candidate->child1().node());
901
902 if (candidate->op() == PhantomNewArrayWithSpread) {
903 BitVector* bitVector = candidate->bitVector();
904 unsigned result = 0;
905 for (unsigned i = 0; i < candidate->numChildren(); i++) {
906 if (bitVector->get(i))
907 result += self(m_graph.varArgChild(candidate, i).node());
908 else
909 ++result;
910 }
911 return result;
912 }
913
914 if (candidate->op() == PhantomNewArrayBuffer)
915 return candidate->castOperand<JSImmutableButterfly*>()->length();
916
917 ASSERT(candidate->op() == PhantomCreateRest);
918 unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
919 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
920 unsigned frameArgumentCount = inlineCallFrame->argumentCountIncludingThis - 1;
921 if (frameArgumentCount >= numberOfArgumentsToSkip)
922 return frameArgumentCount - numberOfArgumentsToSkip;
923 return 0;
924 });
925
926 unsigned argumentCountIncludingThis = 1 + countNumberOfArguments(candidate); // |this|
927 if (argumentCountIncludingThis <= varargsData->limit) {
928 storeArgumentCountIncludingThis(argumentCountIncludingThis);
929
930 DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum, varargsData->limit, varargsData->mandatoryMinimum);
931 // Define our limit to exclude "this", since that's a bit easier to reason about.
932 unsigned limit = varargsData->limit - 1;
933
934 auto forwardNode = recursableLambda([&](auto self, Node* candidate, unsigned storeIndex) -> unsigned {
935 if (candidate->op() == PhantomSpread)
936 return self(candidate->child1().node(), storeIndex);
937
938 if (candidate->op() == PhantomNewArrayWithSpread) {
939 BitVector* bitVector = candidate->bitVector();
940 for (unsigned i = 0; i < candidate->numChildren(); i++) {
941 if (bitVector->get(i))
942 storeIndex = self(m_graph.varArgChild(candidate, i).node(), storeIndex);
943 else {
944 Node* value = m_graph.varArgChild(candidate, i).node();
945 storeValue(value, storeIndex++);
946 }
947 }
948 return storeIndex;
949 }
950
951 if (candidate->op() == PhantomNewArrayBuffer) {
952 auto* array = candidate->castOperand<JSImmutableButterfly*>();
953 for (unsigned index = 0; index < array->length(); ++index) {
954 JSValue constant;
955 if (candidate->indexingType() == ArrayWithDouble)
956 constant = jsDoubleNumber(array->get(index).asNumber());
957 else
958 constant = array->get(index);
959 Node* value = insertionSet.insertConstant(nodeIndex, node->origin.withExitOK(canExit), constant);
960 storeValue(value, storeIndex++);
961 }
962 return storeIndex;
963 }
964
965 ASSERT(candidate->op() == PhantomCreateRest);
966 unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
967 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
968 unsigned frameArgumentCount = inlineCallFrame->argumentCountIncludingThis - 1;
969 for (unsigned loadIndex = numberOfArgumentsToSkip; loadIndex < frameArgumentCount; ++loadIndex) {
970 VirtualRegister reg = virtualRegisterForArgument(loadIndex + 1) + inlineCallFrame->stackOffset;
971 StackAccessData* data = m_graph.m_stackAccessData.add(reg, FlushedJSValue);
972 Node* value = insertionSet.insertNode(
973 nodeIndex, SpecNone, GetStack, node->origin.withExitOK(canExit),
974 OpInfo(data));
975 storeValue(value, storeIndex++);
976 }
977 return storeIndex;
978 });
979
980 unsigned storeIndex = forwardNode(candidate, 0);
981 RELEASE_ASSERT(storeIndex <= limit);
982 Node* undefined = nullptr;
983 for (; storeIndex < limit; ++storeIndex) {
984 if (!undefined) {
985 undefined = insertionSet.insertConstant(
986 nodeIndex, node->origin.withExitOK(canExit), jsUndefined());
987 }
988 storeValue(undefined, storeIndex);
989 }
990
991 node->remove(m_graph);
992 node->origin.exitOK = canExit;
993 break;
994 }
995 }
996 } else {
997 unsigned numberOfArgumentsToSkip = 0;
998 if (candidate->op() == PhantomCreateRest)
999 numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
1000 varargsData->offset += numberOfArgumentsToSkip;
1001
1002 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
1003
1004 if (inlineCallFrame
1005 && !inlineCallFrame->isVarargs()) {
1006
1007 unsigned argumentCountIncludingThis = inlineCallFrame->argumentCountIncludingThis;
1008 if (argumentCountIncludingThis > varargsData->offset)
1009 argumentCountIncludingThis -= varargsData->offset;
1010 else
1011 argumentCountIncludingThis = 1;
1012 RELEASE_ASSERT(argumentCountIncludingThis >= 1);
1013
1014 if (argumentCountIncludingThis <= varargsData->limit) {
1015
1016 storeArgumentCountIncludingThis(argumentCountIncludingThis);
1017
1018 DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum, varargsData->limit, varargsData->mandatoryMinimum);
1019 // Define our limit to exclude "this", since that's a bit easier to reason about.
1020 unsigned limit = varargsData->limit - 1;
1021 Node* undefined = nullptr;
1022 for (unsigned storeIndex = 0; storeIndex < limit; ++storeIndex) {
1023 // First determine if we have an element we can load, and load it if
1024 // possible.
1025
1026 Node* value = nullptr;
1027 unsigned loadIndex = storeIndex + varargsData->offset;
1028
1029 if (loadIndex + 1 < inlineCallFrame->argumentCountIncludingThis) {
1030 VirtualRegister reg = virtualRegisterForArgument(loadIndex + 1) + inlineCallFrame->stackOffset;
1031 StackAccessData* data = m_graph.m_stackAccessData.add(
1032 reg, FlushedJSValue);
1033
1034 value = insertionSet.insertNode(
1035 nodeIndex, SpecNone, GetStack, node->origin.withExitOK(canExit),
1036 OpInfo(data));
1037 } else {
1038 // FIXME: We shouldn't have to store anything if
1039 // storeIndex >= varargsData->mandatoryMinimum, but we will still
1040 // have GetStacks in that range. So if we don't do the stores, we'll
1041 // have degenerate IR: we'll have GetStacks of something that didn't
1042 // have PutStacks.
1043 // https://bugs.webkit.org/show_bug.cgi?id=147434
1044
1045 if (!undefined) {
1046 undefined = insertionSet.insertConstant(
1047 nodeIndex, node->origin.withExitOK(canExit), jsUndefined());
1048 }
1049 value = undefined;
1050 }
1051
1052 // Now that we have a value, store it.
1053 storeValue(value, storeIndex);
1054 }
1055
1056 node->remove(m_graph);
1057 node->origin.exitOK = canExit;
1058 break;
1059 }
1060 }
1061 }
1062
1063 node->setOpAndDefaultFlags(ForwardVarargs);
1064 break;
1065 }
1066
1067 case CallVarargs:
1068 case ConstructVarargs:
1069 case TailCallVarargs:
1070 case TailCallVarargsInlinedCaller: {
1071 Node* candidate = node->child3().node();
1072 if (!isEliminatedAllocation(candidate))
1073 break;
1074
1075 auto convertToStaticArgumentCountCall = [&] (const Vector<Node*>& arguments) {
1076 unsigned firstChild = m_graph.m_varArgChildren.size();
1077 m_graph.m_varArgChildren.append(node->child1());
1078 m_graph.m_varArgChildren.append(node->child2());
1079 for (Node* argument : arguments)
1080 m_graph.m_varArgChildren.append(Edge(argument));
1081 switch (node->op()) {
1082 case CallVarargs:
1083 node->setOpAndDefaultFlags(Call);
1084 break;
1085 case ConstructVarargs:
1086 node->setOpAndDefaultFlags(Construct);
1087 break;
1088 case TailCallVarargs:
1089 node->setOpAndDefaultFlags(TailCall);
1090 break;
1091 case TailCallVarargsInlinedCaller:
1092 node->setOpAndDefaultFlags(TailCallInlinedCaller);
1093 break;
1094 default:
1095 RELEASE_ASSERT_NOT_REACHED();
1096 }
1097 node->children = AdjacencyList(
1098 AdjacencyList::Variable,
1099 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1100 };
1101
1102 auto convertToForwardsCall = [&] () {
1103 switch (node->op()) {
1104 case CallVarargs:
1105 node->setOpAndDefaultFlags(CallForwardVarargs);
1106 break;
1107 case ConstructVarargs:
1108 node->setOpAndDefaultFlags(ConstructForwardVarargs);
1109 break;
1110 case TailCallVarargs:
1111 node->setOpAndDefaultFlags(TailCallForwardVarargs);
1112 break;
1113 case TailCallVarargsInlinedCaller:
1114 node->setOpAndDefaultFlags(TailCallForwardVarargsInlinedCaller);
1115 break;
1116 default:
1117 RELEASE_ASSERT_NOT_REACHED();
1118 }
1119 };
1120
1121 if (candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer || candidate->op() == PhantomSpread) {
1122 auto canTransformToStaticArgumentCountCall = recursableLambda([&](auto self, Node* candidate) -> bool {
1123 if (candidate->op() == PhantomSpread)
1124 return self(candidate->child1().node());
1125
1126 if (candidate->op() == PhantomNewArrayWithSpread) {
1127 BitVector* bitVector = candidate->bitVector();
1128 for (unsigned i = 0; i < candidate->numChildren(); i++) {
1129 if (bitVector->get(i)) {
1130 Node* spread = m_graph.varArgChild(candidate, i).node();
1131 if (!self(spread))
1132 return false;
1133 }
1134 }
1135 return true;
1136 }
1137
1138 // PhantomNewArrayBuffer only contains constants. It can always convert LoadVarargs to static load stores.
1139 if (candidate->op() == PhantomNewArrayBuffer)
1140 return true;
1141
1142 ASSERT(candidate->op() == PhantomCreateRest);
1143 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
1144 return inlineCallFrame && !inlineCallFrame->isVarargs();
1145 });
1146
1147 if (canTransformToStaticArgumentCountCall(candidate)) {
1148 Vector<Node*> arguments;
1149 auto appendNode = recursableLambda([&](auto self, Node* candidate) -> void {
1150 if (candidate->op() == PhantomSpread) {
1151 self(candidate->child1().node());
1152 return;
1153 }
1154
1155 if (candidate->op() == PhantomNewArrayWithSpread) {
1156 BitVector* bitVector = candidate->bitVector();
1157 for (unsigned i = 0; i < candidate->numChildren(); i++) {
1158 Node* child = m_graph.varArgChild(candidate, i).node();
1159 if (bitVector->get(i))
1160 self(child);
1161 else
1162 arguments.append(child);
1163 }
1164 return;
1165 }
1166
1167 if (candidate->op() == PhantomNewArrayBuffer) {
1168 bool canExit = true;
1169 auto* array = candidate->castOperand<JSImmutableButterfly*>();
1170 for (unsigned index = 0; index < array->length(); ++index) {
1171 JSValue constant;
1172 if (candidate->indexingType() == ArrayWithDouble)
1173 constant = jsDoubleNumber(array->get(index).asNumber());
1174 else
1175 constant = array->get(index);
1176 arguments.append(insertionSet.insertConstant(nodeIndex, node->origin.withExitOK(canExit), constant));
1177 }
1178 return;
1179 }
1180
1181 ASSERT(candidate->op() == PhantomCreateRest);
1182 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
1183 unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
1184 for (unsigned i = 1 + numberOfArgumentsToSkip; i < inlineCallFrame->argumentCountIncludingThis; ++i) {
1185 StackAccessData* data = m_graph.m_stackAccessData.add(
1186 virtualRegisterForArgument(i) + inlineCallFrame->stackOffset,
1187 FlushedJSValue);
1188
1189 Node* value = insertionSet.insertNode(
1190 nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data));
1191
1192 arguments.append(value);
1193 }
1194 });
1195
1196 appendNode(candidate);
1197 convertToStaticArgumentCountCall(arguments);
1198 } else
1199 convertToForwardsCall();
1200 } else {
1201 unsigned numberOfArgumentsToSkip = 0;
1202 if (candidate->op() == PhantomCreateRest)
1203 numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
1204 CallVarargsData* varargsData = node->callVarargsData();
1205 varargsData->firstVarArgOffset += numberOfArgumentsToSkip;
1206
1207 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
1208 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
1209 Vector<Node*> arguments;
1210 for (unsigned i = 1 + varargsData->firstVarArgOffset; i < inlineCallFrame->argumentCountIncludingThis; ++i) {
1211 StackAccessData* data = m_graph.m_stackAccessData.add(
1212 virtualRegisterForArgument(i) + inlineCallFrame->stackOffset,
1213 FlushedJSValue);
1214
1215 Node* value = insertionSet.insertNode(
1216 nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data));
1217
1218 arguments.append(value);
1219 }
1220
1221 convertToStaticArgumentCountCall(arguments);
1222 } else
1223 convertToForwardsCall();
1224 }
1225
1226 break;
1227 }
1228
1229 case CheckArray:
1230 case GetButterfly:
1231 case FilterGetByIdStatus:
1232 case FilterPutByIdStatus:
1233 case FilterCallLinkStatus:
1234 case FilterInByIdStatus: {
1235 if (!isEliminatedAllocation(node->child1().node()))
1236 break;
1237 node->remove(m_graph);
1238 break;
1239 }
1240
1241 case CheckStructureOrEmpty:
1242 case CheckStructure:
1243 if (!isEliminatedAllocation(node->child1().node()))
1244 break;
1245 node->child1() = Edge(); // Remove the cell check since we've proven it's not needed and FTL lowering might botch this.
1246 node->remove(m_graph);
1247 break;
1248
1249 default:
1250 break;
1251 }
1252
1253 if (node->isPseudoTerminal()) {
1254 pseudoTerminal = node;
1255 break;
1256 }
1257 }
1258
1259 insertionSet.execute(block);
1260
1261 if (pseudoTerminal) {
1262 for (unsigned i = 0; i < block->size(); ++i) {
1263 Node* node = block->at(i);
1264 if (node != pseudoTerminal)
1265 continue;
1266 block->resize(i + 1);
1267 block->append(m_graph.addNode(SpecNone, Unreachable, node->origin));
1268 modifiedCFG = true;
1269 break;
1270 }
1271 }
1272 }
1273
1274 if (modifiedCFG) {
1275 m_graph.invalidateCFG();
1276 m_graph.resetReachability();
1277 m_graph.killUnreachableBlocks();
1278 }
1279 }
1280
1281 HashSet<Node*> m_candidates;
1282};
1283
1284} // anonymous namespace
1285
1286bool performArgumentsElimination(Graph& graph)
1287{
1288 return runPhase<ArgumentsEliminationPhase>(graph);
1289}
1290
1291} } // namespace JSC::DFG
1292
1293#endif // ENABLE(DFG_JIT)
1294
1295