1/*
2 * Copyright (C) 2015-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGArgumentsEliminationPhase.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "ArrayPrototype.h"
32#include "BytecodeLivenessAnalysisInlines.h"
33#include "ClonedArguments.h"
34#include "DFGArgumentsUtilities.h"
35#include "DFGBasicBlockInlines.h"
36#include "DFGBlockMapInlines.h"
37#include "DFGClobberize.h"
38#include "DFGCombinedLiveness.h"
39#include "DFGForAllKills.h"
40#include "DFGGraph.h"
41#include "DFGInsertionSet.h"
42#include "DFGLivenessAnalysisPhase.h"
43#include "DFGOSRAvailabilityAnalysisPhase.h"
44#include "DFGPhase.h"
45#include "JSCInlines.h"
46#include <wtf/HashSet.h>
47#include <wtf/ListDump.h>
48#include <wtf/RecursableLambda.h>
49
50namespace JSC { namespace DFG {
51
52namespace {
53
54namespace DFGArgumentsEliminationPhaseInternal {
55static constexpr bool verbose = false;
56}
57
58class ArgumentsEliminationPhase : public Phase {
59public:
60 ArgumentsEliminationPhase(Graph& graph)
61 : Phase(graph, "arguments elimination")
62 {
63 }
64
65 bool run()
66 {
67 // For now this phase only works on SSA. This could be changed; we could have a block-local
68 // version over LoadStore.
69 DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
70
71 if (DFGArgumentsEliminationPhaseInternal::verbose) {
72 dataLog("Graph before arguments elimination:\n");
73 m_graph.dump();
74 }
75
76 identifyCandidates();
77 if (m_candidates.isEmpty())
78 return false;
79
80 eliminateCandidatesThatEscape();
81 if (m_candidates.isEmpty())
82 return false;
83
84 eliminateCandidatesThatInterfere();
85 if (m_candidates.isEmpty())
86 return false;
87
88 transform();
89
90 return true;
91 }
92
93private:
94 // Just finds nodes that we know how to work with.
95 void identifyCandidates()
96 {
97 for (BasicBlock* block : m_graph.blocksInPreOrder()) {
98 for (Node* node : *block) {
99 switch (node->op()) {
100 case CreateDirectArguments:
101 case CreateClonedArguments:
102 m_candidates.add(node);
103 break;
104
105 case CreateRest:
106 if (m_graph.isWatchingHavingABadTimeWatchpoint(node)) {
107 // If we're watching the HavingABadTime watchpoint it means that we will be invalidated
108 // when it fires (it may or may not have actually fired yet). We don't try to eliminate
109 // this allocation when we're not watching the watchpoint because it could entail calling
110 // indexed accessors (and probably more crazy things) on out of bound accesses to the
111 // rest parameter. It's also much easier to reason about this way.
112 m_candidates.add(node);
113 }
114 break;
115
116 case Spread:
117 if (m_graph.isWatchingHavingABadTimeWatchpoint(node)) {
118 // We check ArrayUse here because ArrayUse indicates that the iterator
119 // protocol for Arrays is non-observable by user code (e.g, it hasn't
120 // been changed).
121 if (node->child1().useKind() == ArrayUse) {
122 if ((node->child1()->op() == CreateRest || node->child1()->op() == NewArrayBuffer) && m_candidates.contains(node->child1().node()))
123 m_candidates.add(node);
124 }
125 }
126 break;
127
128 case NewArrayWithSpread: {
129 if (m_graph.isWatchingHavingABadTimeWatchpoint(node)) {
130 BitVector* bitVector = node->bitVector();
131 // We only allow for Spreads to be of CreateRest or NewArrayBuffer nodes for now.
132 bool isOK = true;
133 for (unsigned i = 0; i < node->numChildren(); i++) {
134 if (bitVector->get(i)) {
135 Node* child = m_graph.varArgChild(node, i).node();
136 isOK = child->op() == Spread && (child->child1()->op() == CreateRest || child->child1()->op() == NewArrayBuffer) && m_candidates.contains(child);
137 if (!isOK)
138 break;
139 }
140 }
141
142 if (!isOK)
143 break;
144
145 m_candidates.add(node);
146 }
147 break;
148 }
149
150 case NewArrayBuffer: {
151 if (m_graph.isWatchingHavingABadTimeWatchpoint(node) && !hasAnyArrayStorage(node->indexingMode()))
152 m_candidates.add(node);
153 break;
154 }
155
156 case CreateScopedArguments:
157 // FIXME: We could handle this if it wasn't for the fact that scoped arguments are
158 // always stored into the activation.
159 // https://bugs.webkit.org/show_bug.cgi?id=143072 and
160 // https://bugs.webkit.org/show_bug.cgi?id=143073
161 break;
162
163 default:
164 break;
165 }
166 if (node->isPseudoTerminal())
167 break;
168 }
169 }
170
171 if (DFGArgumentsEliminationPhaseInternal::verbose)
172 dataLog("Candidates: ", listDump(m_candidates), "\n");
173 }
174
175 bool isStillValidCandidate(Node* candidate)
176 {
177 switch (candidate->op()) {
178 case Spread:
179 return m_candidates.contains(candidate->child1().node());
180
181 case NewArrayWithSpread: {
182 BitVector* bitVector = candidate->bitVector();
183 for (unsigned i = 0; i < candidate->numChildren(); i++) {
184 if (bitVector->get(i)) {
185 if (!m_candidates.contains(m_graph.varArgChild(candidate, i).node()))
186 return false;
187 }
188 }
189 return true;
190 }
191
192 default:
193 return true;
194 }
195
196 RELEASE_ASSERT_NOT_REACHED();
197 return false;
198 }
199
200 void removeInvalidCandidates()
201 {
202 bool changed;
203 do {
204 changed = false;
205 Vector<Node*, 1> toRemove;
206
207 for (Node* candidate : m_candidates) {
208 if (!isStillValidCandidate(candidate))
209 toRemove.append(candidate);
210 }
211
212 if (toRemove.size()) {
213 changed = true;
214 for (Node* node : toRemove)
215 m_candidates.remove(node);
216 }
217
218 } while (changed);
219 }
220
221 void transitivelyRemoveCandidate(Node* node, Node* source = nullptr)
222 {
223 bool removed = m_candidates.remove(node);
224 if (removed && DFGArgumentsEliminationPhaseInternal::verbose && source)
225 dataLog("eliminating candidate: ", node, " because it escapes from: ", source, "\n");
226
227 if (removed)
228 removeInvalidCandidates();
229 }
230
231 // Look for escaping sites, and remove from the candidates set if we see an escape.
232 void eliminateCandidatesThatEscape()
233 {
234 auto escape = [&] (Edge edge, Node* source) {
235 if (!edge)
236 return;
237 transitivelyRemoveCandidate(edge.node(), source);
238 };
239
240 auto escapeBasedOnArrayMode = [&] (ArrayMode mode, Edge edge, Node* source) {
241 switch (mode.type()) {
242 case Array::DirectArguments: {
243 if (edge->op() != CreateDirectArguments) {
244 escape(edge, source);
245 break;
246 }
247
248 // Everything is fine if we're doing an in-bounds access.
249 if (mode.isInBounds())
250 break;
251
252 // If we're out-of-bounds then we proceed only if the prototype chain
253 // for the allocation is sane (i.e. doesn't have indexed properties).
254 JSGlobalObject* globalObject = m_graph.globalObjectFor(edge->origin.semantic);
255 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(m_graph.m_vm);
256 if (objectPrototypeStructure->transitionWatchpointSetIsStillValid()
257 && globalObject->objectPrototypeIsSane()) {
258 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
259 break;
260 }
261 escape(edge, source);
262 break;
263 }
264
265 case Array::Contiguous: {
266 if (edge->op() != CreateClonedArguments && edge->op() != CreateRest) {
267 escape(edge, source);
268 break;
269 }
270
271 // Everything is fine if we're doing an in-bounds access.
272 if (mode.isInBounds())
273 break;
274
275 // If we're out-of-bounds then we proceed only if the prototype chain
276 // for the allocation is sane (i.e. doesn't have indexed properties).
277 JSGlobalObject* globalObject = m_graph.globalObjectFor(edge->origin.semantic);
278 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(m_graph.m_vm);
279 if (edge->op() == CreateRest) {
280 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(m_graph.m_vm);
281 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
282 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
283 && globalObject->arrayPrototypeChainIsSane()) {
284 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
285 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
286 break;
287 }
288 } else {
289 if (objectPrototypeStructure->transitionWatchpointSetIsStillValid()
290 && globalObject->objectPrototypeIsSane()) {
291 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
292 break;
293 }
294 }
295 escape(edge, source);
296 break;
297 }
298
299 case Array::ForceExit:
300 break;
301
302 default:
303 escape(edge, source);
304 break;
305 }
306 };
307
308 removeInvalidCandidates();
309
310 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
311 for (Node* node : *block) {
312 switch (node->op()) {
313 case GetFromArguments:
314 break;
315
316 case GetByVal:
317 escapeBasedOnArrayMode(node->arrayMode(), m_graph.varArgChild(node, 0), node);
318 escape(m_graph.varArgChild(node, 1), node);
319 escape(m_graph.varArgChild(node, 2), node);
320 break;
321
322 case GetArrayLength:
323 escape(node->child2(), node);
324 // Computing the length of a NewArrayWithSpread can require some additions.
325 // These additions can overflow if the array is sufficiently enormous, and in that case we will need to exit.
326 if ((node->child1()->op() == NewArrayWithSpread) && !node->origin.exitOK)
327 escape(node->child1(), node);
328 break;
329
330 case NewArrayWithSpread: {
331 BitVector* bitVector = node->bitVector();
332 bool isWatchingHavingABadTimeWatchpoint = m_graph.isWatchingHavingABadTimeWatchpoint(node);
333 for (unsigned i = 0; i < node->numChildren(); i++) {
334 Edge child = m_graph.varArgChild(node, i);
335 bool dontEscape;
336 if (bitVector->get(i)) {
337 dontEscape = child->op() == Spread
338 && child->child1().useKind() == ArrayUse
339 && (child->child1()->op() == CreateRest || child->child1()->op() == NewArrayBuffer)
340 && isWatchingHavingABadTimeWatchpoint;
341 } else
342 dontEscape = false;
343
344 if (!dontEscape)
345 escape(child, node);
346 }
347
348 break;
349 }
350
351 case Spread: {
352 bool isOK = node->child1().useKind() == ArrayUse && (node->child1()->op() == CreateRest || node->child1()->op() == NewArrayBuffer);
353 if (!isOK)
354 escape(node->child1(), node);
355 break;
356 }
357
358 case NewArrayBuffer:
359 break;
360
361 case LoadVarargs:
362 if (node->loadVarargsData()->offset && (node->child1()->op() == NewArrayWithSpread || node->child1()->op() == Spread || node->child1()->op() == NewArrayBuffer))
363 escape(node->child1(), node);
364 break;
365
366 case CallVarargs:
367 case ConstructVarargs:
368 case TailCallVarargs:
369 case TailCallVarargsInlinedCaller:
370 escape(node->child1(), node);
371 escape(node->child2(), node);
372 if (node->callVarargsData()->firstVarArgOffset && (node->child3()->op() == NewArrayWithSpread || node->child3()->op() == Spread || node->child1()->op() == NewArrayBuffer))
373 escape(node->child3(), node);
374 break;
375
376 case Check:
377 case CheckVarargs:
378 m_graph.doToChildren(
379 node,
380 [&] (Edge edge) {
381 if (edge.willNotHaveCheck())
382 return;
383
384 if (alreadyChecked(edge.useKind(), SpecObject))
385 return;
386
387 escape(edge, node);
388 });
389 break;
390
391 case MovHint:
392 case PutHint:
393 break;
394
395 case GetButterfly:
396 // This barely works. The danger is that the GetButterfly is used by something that
397 // does something escaping to a candidate. Fortunately, the only butterfly-using ops
398 // that we exempt here also use the candidate directly. If there ever was a
399 // butterfly-using op that we wanted to exempt, then we'd have to look at the
400 // butterfly's child and check if it's a candidate.
401 break;
402
403 case FilterGetByStatus:
404 case FilterPutByIdStatus:
405 case FilterCallLinkStatus:
406 case FilterInByIdStatus:
407 break;
408
409 case CheckArray:
410 escapeBasedOnArrayMode(node->arrayMode(), node->child1(), node);
411 break;
412
413 case CheckStructureOrEmpty:
414 case CheckStructure: {
415 Node* target = node->child1().node();
416 if (!m_candidates.contains(target))
417 break;
418
419 Structure* structure = nullptr;
420 JSGlobalObject* globalObject = m_graph.globalObjectFor(target->origin.semantic);
421 switch (target->op()) {
422 case CreateDirectArguments:
423 structure = globalObject->directArgumentsStructure();
424 break;
425 case CreateClonedArguments:
426 structure = globalObject->clonedArgumentsStructure();
427 break;
428 case CreateRest:
429 ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
430 structure = globalObject->restParameterStructure();
431 break;
432 case NewArrayWithSpread:
433 ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
434 structure = globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous);
435 break;
436 case NewArrayBuffer: {
437 ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(target));
438 IndexingType indexingMode = target->indexingMode();
439 ASSERT(!hasAnyArrayStorage(indexingMode));
440 structure = globalObject->originalArrayStructureForIndexingType(indexingMode);
441 break;
442 }
443 default:
444 RELEASE_ASSERT_NOT_REACHED();
445 }
446 ASSERT(structure);
447
448 if (!node->structureSet().contains(m_graph.registerStructure(structure)))
449 escape(node->child1(), node);
450 break;
451 }
452
453 // FIXME: We should be able to handle GetById/GetByOffset on callee.
454 // https://bugs.webkit.org/show_bug.cgi?id=143075
455
456 case GetByOffset:
457 if (node->child2()->op() == CreateClonedArguments && node->storageAccessData().offset == clonedArgumentsLengthPropertyOffset)
458 break;
459 FALLTHROUGH;
460 default:
461 m_graph.doToChildren(node, [&] (Edge edge) { return escape(edge, node); });
462 break;
463 }
464 if (node->isPseudoTerminal())
465 break;
466 }
467 }
468
469 if (DFGArgumentsEliminationPhaseInternal::verbose)
470 dataLog("After escape analysis: ", listDump(m_candidates), "\n");
471 }
472
473 // Anywhere that a candidate is live (in bytecode or in DFG), check if there is a chance of
474 // interference between the stack area that the arguments object copies from and the arguments
475 // object's payload. Conservatively this means that the stack region doesn't get stored to.
476 void eliminateCandidatesThatInterfere()
477 {
478 performLivenessAnalysis(m_graph);
479 performOSRAvailabilityAnalysis(m_graph);
480 m_graph.initializeNodeOwners();
481 CombinedLiveness combinedLiveness(m_graph);
482
483 BlockMap<Operands<bool>> clobberedByBlock(m_graph);
484 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
485 Operands<bool>& clobberedByThisBlock = clobberedByBlock[block];
486 clobberedByThisBlock = Operands<bool>(OperandsLike, m_graph.block(0)->variablesAtHead);
487 for (Node* node : *block) {
488 clobberize(
489 m_graph, node, NoOpClobberize(),
490 [&] (AbstractHeap heap) {
491 if (heap.kind() != Stack) {
492 ASSERT(!heap.overlaps(Stack));
493 return;
494 }
495 ASSERT(!heap.payload().isTop());
496 VirtualRegister reg(heap.payload().value32());
497 // The register may not point to an argument or local, for example if we are looking at SetArgumentCountIncludingThis.
498 if (!reg.isHeader())
499 clobberedByThisBlock.operand(reg) = true;
500 },
501 NoOpClobberize());
502 }
503 }
504
505 using InlineCallFrames = HashSet<InlineCallFrame*, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>>;
506 using InlineCallFramesForCanditates = HashMap<Node*, InlineCallFrames>;
507 InlineCallFramesForCanditates inlineCallFramesForCandidate;
508 auto forEachDependentNode = recursableLambda([&](auto self, Node* node, const auto& functor) -> void {
509 functor(node);
510
511 if (node->op() == Spread) {
512 self(node->child1().node(), functor);
513 return;
514 }
515
516 if (node->op() == NewArrayWithSpread) {
517 BitVector* bitVector = node->bitVector();
518 for (unsigned i = node->numChildren(); i--; ) {
519 if (bitVector->get(i))
520 self(m_graph.varArgChild(node, i).node(), functor);
521 }
522 return;
523 }
524 });
525 for (Node* candidate : m_candidates) {
526 auto& set = inlineCallFramesForCandidate.add(candidate, InlineCallFrames()).iterator->value;
527 forEachDependentNode(candidate, [&](Node* dependent) {
528 set.add(dependent->origin.semantic.inlineCallFrame());
529 });
530 }
531
532 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
533 // Stop if we've already removed all candidates.
534 if (m_candidates.isEmpty())
535 return;
536
537 // Ignore blocks that don't write to the stack.
538 bool writesToStack = false;
539 for (unsigned i = clobberedByBlock[block].size(); i--;) {
540 if (clobberedByBlock[block][i]) {
541 writesToStack = true;
542 break;
543 }
544 }
545 if (!writesToStack)
546 continue;
547
548 forAllKillsInBlock(
549 m_graph, combinedLiveness, block,
550 [&] (unsigned nodeIndex, Node* candidate) {
551 if (!m_candidates.contains(candidate))
552 return;
553
554 for (InlineCallFrame* inlineCallFrame : inlineCallFramesForCandidate.get(candidate)) {
555 // Check if this block has any clobbers that affect this candidate. This is a fairly
556 // fast check.
557 bool isClobberedByBlock = false;
558 Operands<bool>& clobberedByThisBlock = clobberedByBlock[block];
559
560 if (inlineCallFrame) {
561 if (inlineCallFrame->isVarargs()) {
562 isClobberedByBlock |= clobberedByThisBlock.operand(
563 inlineCallFrame->stackOffset + CallFrameSlot::argumentCount);
564 }
565
566 if (!isClobberedByBlock || inlineCallFrame->isClosureCall) {
567 isClobberedByBlock |= clobberedByThisBlock.operand(
568 inlineCallFrame->stackOffset + CallFrameSlot::callee);
569 }
570
571 if (!isClobberedByBlock) {
572 for (unsigned i = 0; i < inlineCallFrame->argumentCountIncludingThis - 1; ++i) {
573 VirtualRegister reg =
574 VirtualRegister(inlineCallFrame->stackOffset) +
575 CallFrame::argumentOffset(i);
576 if (clobberedByThisBlock.operand(reg)) {
577 isClobberedByBlock = true;
578 break;
579 }
580 }
581 }
582 } else {
583 // We don't include the ArgumentCount or Callee in this case because we can be
584 // damn sure that this won't be clobbered.
585 for (unsigned i = 1; i < static_cast<unsigned>(codeBlock()->numParameters()); ++i) {
586 if (clobberedByThisBlock.argument(i)) {
587 isClobberedByBlock = true;
588 break;
589 }
590 }
591 }
592
593 if (!isClobberedByBlock)
594 continue;
595
596 // Check if we can immediately eliminate this candidate. If the block has a clobber
597 // for this arguments allocation, and we'd have to examine every node in the block,
598 // then we can just eliminate the candidate.
599 if (nodeIndex == block->size() && candidate->owner != block) {
600 if (DFGArgumentsEliminationPhaseInternal::verbose)
601 dataLog("eliminating candidate: ", candidate, " because it is clobbered by: ", block->at(nodeIndex), "\n");
602 transitivelyRemoveCandidate(candidate);
603 return;
604 }
605
606 // This loop considers all nodes up to the nodeIndex, excluding the nodeIndex.
607 //
608 // Note: nodeIndex here has a double meaning. Before entering this
609 // while loop, it refers to the remaining number of nodes that have
610 // yet to be processed. Inside the look, it refers to the index
611 // of the current node to process (after we decrement it).
612 //
613 // If the remaining number of nodes is 0, we should not decrement nodeIndex.
614 // Hence, we must only decrement nodeIndex inside the while loop instead of
615 // in its condition statement. Note that this while loop is embedded in an
616 // outer for loop. If we decrement nodeIndex in the condition statement, a
617 // nodeIndex of 0 will become UINT_MAX, and the outer loop will wrongly
618 // treat this as there being UINT_MAX remaining nodes to process.
619 while (nodeIndex) {
620 --nodeIndex;
621 Node* node = block->at(nodeIndex);
622 if (node == candidate)
623 break;
624
625 bool found = false;
626 clobberize(
627 m_graph, node, NoOpClobberize(),
628 [&] (AbstractHeap heap) {
629 if (heap.kind() == Stack && !heap.payload().isTop()) {
630 if (argumentsInvolveStackSlot(inlineCallFrame, VirtualRegister(heap.payload().value32())))
631 found = true;
632 return;
633 }
634 if (heap.overlaps(Stack))
635 found = true;
636 },
637 NoOpClobberize());
638
639 if (found) {
640 if (DFGArgumentsEliminationPhaseInternal::verbose)
641 dataLog("eliminating candidate: ", candidate, " because it is clobbered by ", block->at(nodeIndex), "\n");
642 transitivelyRemoveCandidate(candidate);
643 return;
644 }
645 }
646 }
647 });
648 }
649
650 // Q: How do we handle OSR exit with a live PhantomArguments at a point where the inline call
651 // frame is dead? A: Naively we could say that PhantomArguments must escape the stack slots. But
652 // that would break PutStack sinking, which in turn would break object allocation sinking, in
653 // cases where we have a varargs call to an otherwise pure method. So, we need something smarter.
654 // For the outermost arguments, we just have a PhantomArguments that magically knows that it
655 // should load the arguments from the call frame. For the inline arguments, we have the heap map
656 // in the availabiltiy map track each possible inline argument as a promoted heap location. If the
657 // PutStacks for those arguments aren't sunk, those heap locations will map to very trivial
658 // availabilities (they will be flush availabilities). But if sinking happens then those
659 // availabilities may become whatever. OSR exit should be able to handle this quite naturally,
660 // since those availabilities speak of the stack before the optimizing compiler stack frame is
661 // torn down.
662
663 if (DFGArgumentsEliminationPhaseInternal::verbose)
664 dataLog("After interference analysis: ", listDump(m_candidates), "\n");
665 }
666
667 void transform()
668 {
669 bool modifiedCFG = false;
670
671 InsertionSet insertionSet(m_graph);
672
673 for (BasicBlock* block : m_graph.blocksInPreOrder()) {
674 Node* pseudoTerminal = nullptr;
675 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
676 Node* node = block->at(nodeIndex);
677
678 auto getArrayLength = [&] (Node* candidate) -> Node* {
679 return emitCodeToGetArgumentsArrayLength(
680 insertionSet, candidate, nodeIndex, node->origin);
681 };
682
683 auto isEliminatedAllocation = [&] (Node* candidate) -> bool {
684 if (!m_candidates.contains(candidate))
685 return false;
686 // We traverse in such a way that we are guaranteed to see a def before a use.
687 // Therefore, we should have already transformed the allocation before the use
688 // of an allocation.
689 ASSERT(candidate->op() == PhantomCreateRest || candidate->op() == PhantomDirectArguments || candidate->op() == PhantomClonedArguments
690 || candidate->op() == PhantomSpread || candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer);
691 return true;
692 };
693
694 switch (node->op()) {
695 case CreateDirectArguments:
696 if (!m_candidates.contains(node))
697 break;
698
699 node->setOpAndDefaultFlags(PhantomDirectArguments);
700 break;
701
702 case CreateRest:
703 if (!m_candidates.contains(node))
704 break;
705
706 ASSERT(node->origin.exitOK);
707 ASSERT(node->child1().useKind() == Int32Use);
708 insertionSet.insertNode(
709 nodeIndex, SpecNone, Check, node->origin,
710 node->child1());
711
712 node->setOpAndDefaultFlags(PhantomCreateRest);
713 // We don't need this parameter for OSR exit, we can find out all the information
714 // we need via the static parameter count and the dynamic argument count.
715 node->child1() = Edge();
716 break;
717
718 case CreateClonedArguments:
719 if (!m_candidates.contains(node))
720 break;
721
722 node->setOpAndDefaultFlags(PhantomClonedArguments);
723 break;
724
725 case Spread:
726 if (!m_candidates.contains(node))
727 break;
728
729 node->setOpAndDefaultFlags(PhantomSpread);
730 break;
731
732 case NewArrayWithSpread:
733 if (!m_candidates.contains(node))
734 break;
735
736 node->setOpAndDefaultFlags(PhantomNewArrayWithSpread);
737 break;
738
739 case NewArrayBuffer:
740 if (!m_candidates.contains(node))
741 break;
742
743 node->setOpAndDefaultFlags(PhantomNewArrayBuffer);
744 node->child1() = Edge(insertionSet.insertConstant(nodeIndex, node->origin, node->cellOperand()));
745 break;
746
747 case GetFromArguments: {
748 Node* candidate = node->child1().node();
749 if (!isEliminatedAllocation(candidate))
750 break;
751
752 DFG_ASSERT(
753 m_graph, node, node->child1()->op() == PhantomDirectArguments, node->child1()->op());
754 VirtualRegister reg =
755 virtualRegisterForArgument(node->capturedArgumentsOffset().offset() + 1) +
756 node->origin.semantic.stackOffset();
757 StackAccessData* data = m_graph.m_stackAccessData.add(reg, FlushedJSValue);
758 node->convertToGetStack(data);
759 break;
760 }
761
762 case GetByOffset: {
763 Node* candidate = node->child2().node();
764 if (!isEliminatedAllocation(candidate))
765 break;
766
767 ASSERT(candidate->op() == PhantomClonedArguments);
768 ASSERT(node->storageAccessData().offset == clonedArgumentsLengthPropertyOffset);
769
770 // Meh, this is kind of hackish - we use an Identity so that we can reuse the
771 // getArrayLength() helper.
772 node->convertToIdentityOn(getArrayLength(candidate));
773 break;
774 }
775
776 case GetArrayLength: {
777 Node* candidate = node->child1().node();
778 if (!isEliminatedAllocation(candidate))
779 break;
780
781 node->convertToIdentityOn(getArrayLength(candidate));
782 break;
783 }
784
785 case GetByVal: {
786 // FIXME: For ClonedArguments, we would have already done a separate bounds check.
787 // This code will cause us to have two bounds checks - the original one that we
788 // already factored out in SSALoweringPhase, and the new one we insert here, which is
789 // often implicitly part of GetMyArgumentByVal. B3 will probably eliminate the
790 // second bounds check, but still - that's just silly.
791 // https://bugs.webkit.org/show_bug.cgi?id=143076
792
793 Node* candidate = m_graph.varArgChild(node, 0).node();
794 if (!isEliminatedAllocation(candidate))
795 break;
796
797 unsigned numberOfArgumentsToSkip = 0;
798 if (candidate->op() == PhantomCreateRest)
799 numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
800
801 Node* result = nullptr;
802 if (m_graph.varArgChild(node, 1)->isInt32Constant()) {
803 unsigned index = m_graph.varArgChild(node, 1)->asUInt32();
804 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
805 index += numberOfArgumentsToSkip;
806
807 bool safeToGetStack = index >= numberOfArgumentsToSkip;
808 if (inlineCallFrame)
809 safeToGetStack &= index < inlineCallFrame->argumentCountIncludingThis - 1;
810 else {
811 safeToGetStack &=
812 index < static_cast<unsigned>(codeBlock()->numParameters()) - 1;
813 }
814 if (safeToGetStack) {
815 StackAccessData* data;
816 VirtualRegister arg = virtualRegisterForArgument(index + 1);
817 if (inlineCallFrame)
818 arg += inlineCallFrame->stackOffset;
819 data = m_graph.m_stackAccessData.add(arg, FlushedJSValue);
820
821 Node* check = nullptr;
822 if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
823 check = insertionSet.insertNode(
824 nodeIndex, SpecNone, CheckInBounds, node->origin,
825 m_graph.varArgChild(node, 1), Edge(getArrayLength(candidate), Int32Use));
826 }
827
828 result = insertionSet.insertNode(
829 nodeIndex, node->prediction(), GetStack, node->origin, OpInfo(data), Edge(check, UntypedUse));
830 }
831 }
832
833 if (!result) {
834 NodeType op;
835 if (node->arrayMode().isInBounds())
836 op = GetMyArgumentByVal;
837 else
838 op = GetMyArgumentByValOutOfBounds;
839 result = insertionSet.insertNode(
840 nodeIndex, node->prediction(), op, node->origin, OpInfo(numberOfArgumentsToSkip),
841 m_graph.varArgChild(node, 0), m_graph.varArgChild(node, 1));
842 }
843
844 // Need to do this because we may have a data format conversion here.
845 node->convertToIdentityOn(result);
846 break;
847 }
848
849 case LoadVarargs: {
850 Node* candidate = node->child1().node();
851 if (!isEliminatedAllocation(candidate))
852 break;
853
854 // LoadVarargs can exit, so it better be exitOK.
855 DFG_ASSERT(m_graph, node, node->origin.exitOK);
856 bool canExit = true;
857 LoadVarargsData* varargsData = node->loadVarargsData();
858
859 auto storeArgumentCountIncludingThis = [&] (unsigned argumentCountIncludingThis) {
860 Node* argumentCountIncludingThisNode = insertionSet.insertConstant(
861 nodeIndex, node->origin.withExitOK(canExit),
862 jsNumber(argumentCountIncludingThis));
863 insertionSet.insertNode(
864 nodeIndex, SpecNone, KillStack, node->origin.takeValidExit(canExit),
865 OpInfo(varargsData->count.offset()));
866 insertionSet.insertNode(
867 nodeIndex, SpecNone, MovHint, node->origin.takeValidExit(canExit),
868 OpInfo(varargsData->count.offset()), Edge(argumentCountIncludingThisNode));
869 insertionSet.insertNode(
870 nodeIndex, SpecNone, PutStack, node->origin.withExitOK(canExit),
871 OpInfo(m_graph.m_stackAccessData.add(varargsData->count, FlushedInt32)),
872 Edge(argumentCountIncludingThisNode, KnownInt32Use));
873 };
874
875 auto storeValue = [&] (Node* value, unsigned storeIndex) {
876 VirtualRegister reg = varargsData->start + storeIndex;
877 StackAccessData* data =
878 m_graph.m_stackAccessData.add(reg, FlushedJSValue);
879
880 insertionSet.insertNode(
881 nodeIndex, SpecNone, KillStack, node->origin.takeValidExit(canExit), OpInfo(reg.offset()));
882 insertionSet.insertNode(
883 nodeIndex, SpecNone, MovHint, node->origin.takeValidExit(canExit),
884 OpInfo(reg.offset()), Edge(value));
885 insertionSet.insertNode(
886 nodeIndex, SpecNone, PutStack, node->origin.withExitOK(canExit),
887 OpInfo(data), Edge(value));
888 };
889
890 if (candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer || candidate->op() == PhantomSpread) {
891 auto canConvertToStaticLoadStores = recursableLambda([&] (auto self, Node* candidate) -> bool {
892 if (candidate->op() == PhantomSpread)
893 return self(candidate->child1().node());
894
895 if (candidate->op() == PhantomNewArrayWithSpread) {
896 BitVector* bitVector = candidate->bitVector();
897 for (unsigned i = 0; i < candidate->numChildren(); i++) {
898 if (bitVector->get(i)) {
899 if (!self(m_graph.varArgChild(candidate, i).node()))
900 return false;
901 }
902 }
903 return true;
904 }
905
906 // PhantomNewArrayBuffer only contains constants. It can always convert LoadVarargs to static load stores.
907 if (candidate->op() == PhantomNewArrayBuffer)
908 return true;
909
910 ASSERT(candidate->op() == PhantomCreateRest);
911 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
912 return inlineCallFrame && !inlineCallFrame->isVarargs();
913 });
914
915 if (canConvertToStaticLoadStores(candidate)) {
916 auto countNumberOfArguments = recursableLambda([&](auto self, Node* candidate) -> unsigned {
917 if (candidate->op() == PhantomSpread)
918 return self(candidate->child1().node());
919
920 if (candidate->op() == PhantomNewArrayWithSpread) {
921 BitVector* bitVector = candidate->bitVector();
922 unsigned result = 0;
923 for (unsigned i = 0; i < candidate->numChildren(); i++) {
924 if (bitVector->get(i))
925 result += self(m_graph.varArgChild(candidate, i).node());
926 else
927 ++result;
928 }
929 return result;
930 }
931
932 if (candidate->op() == PhantomNewArrayBuffer)
933 return candidate->castOperand<JSImmutableButterfly*>()->length();
934
935 ASSERT(candidate->op() == PhantomCreateRest);
936 unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
937 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
938 unsigned frameArgumentCount = inlineCallFrame->argumentCountIncludingThis - 1;
939 if (frameArgumentCount >= numberOfArgumentsToSkip)
940 return frameArgumentCount - numberOfArgumentsToSkip;
941 return 0;
942 });
943
944 unsigned argumentCountIncludingThis = 1 + countNumberOfArguments(candidate); // |this|
945 if (argumentCountIncludingThis <= varargsData->limit) {
946 storeArgumentCountIncludingThis(argumentCountIncludingThis);
947
948 DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum, varargsData->limit, varargsData->mandatoryMinimum);
949 // Define our limit to exclude "this", since that's a bit easier to reason about.
950 unsigned limit = varargsData->limit - 1;
951
952 auto forwardNode = recursableLambda([&](auto self, Node* candidate, unsigned storeIndex) -> unsigned {
953 if (candidate->op() == PhantomSpread)
954 return self(candidate->child1().node(), storeIndex);
955
956 if (candidate->op() == PhantomNewArrayWithSpread) {
957 BitVector* bitVector = candidate->bitVector();
958 for (unsigned i = 0; i < candidate->numChildren(); i++) {
959 if (bitVector->get(i))
960 storeIndex = self(m_graph.varArgChild(candidate, i).node(), storeIndex);
961 else {
962 Node* value = m_graph.varArgChild(candidate, i).node();
963 storeValue(value, storeIndex++);
964 }
965 }
966 return storeIndex;
967 }
968
969 if (candidate->op() == PhantomNewArrayBuffer) {
970 auto* array = candidate->castOperand<JSImmutableButterfly*>();
971 for (unsigned index = 0; index < array->length(); ++index) {
972 JSValue constant;
973 if (candidate->indexingType() == ArrayWithDouble)
974 constant = jsDoubleNumber(array->get(index).asNumber());
975 else
976 constant = array->get(index);
977 Node* value = insertionSet.insertConstant(nodeIndex, node->origin.withExitOK(canExit), constant);
978 storeValue(value, storeIndex++);
979 }
980 return storeIndex;
981 }
982
983 ASSERT(candidate->op() == PhantomCreateRest);
984 unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
985 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
986 unsigned frameArgumentCount = inlineCallFrame->argumentCountIncludingThis - 1;
987 for (unsigned loadIndex = numberOfArgumentsToSkip; loadIndex < frameArgumentCount; ++loadIndex) {
988 VirtualRegister reg = virtualRegisterForArgument(loadIndex + 1) + inlineCallFrame->stackOffset;
989 StackAccessData* data = m_graph.m_stackAccessData.add(reg, FlushedJSValue);
990 Node* value = insertionSet.insertNode(
991 nodeIndex, SpecNone, GetStack, node->origin.withExitOK(canExit),
992 OpInfo(data));
993 storeValue(value, storeIndex++);
994 }
995 return storeIndex;
996 });
997
998 unsigned storeIndex = forwardNode(candidate, 0);
999 RELEASE_ASSERT(storeIndex <= limit);
1000 Node* undefined = nullptr;
1001 for (; storeIndex < limit; ++storeIndex) {
1002 if (!undefined) {
1003 undefined = insertionSet.insertConstant(
1004 nodeIndex, node->origin.withExitOK(canExit), jsUndefined());
1005 }
1006 storeValue(undefined, storeIndex);
1007 }
1008
1009 node->remove(m_graph);
1010 node->origin.exitOK = canExit;
1011 break;
1012 }
1013 }
1014 } else {
1015 unsigned numberOfArgumentsToSkip = 0;
1016 if (candidate->op() == PhantomCreateRest)
1017 numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
1018 varargsData->offset += numberOfArgumentsToSkip;
1019
1020 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
1021
1022 if (inlineCallFrame
1023 && !inlineCallFrame->isVarargs()) {
1024
1025 unsigned argumentCountIncludingThis = inlineCallFrame->argumentCountIncludingThis;
1026 if (argumentCountIncludingThis > varargsData->offset)
1027 argumentCountIncludingThis -= varargsData->offset;
1028 else
1029 argumentCountIncludingThis = 1;
1030 RELEASE_ASSERT(argumentCountIncludingThis >= 1);
1031
1032 if (argumentCountIncludingThis <= varargsData->limit) {
1033
1034 storeArgumentCountIncludingThis(argumentCountIncludingThis);
1035
1036 DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum, varargsData->limit, varargsData->mandatoryMinimum);
1037 // Define our limit to exclude "this", since that's a bit easier to reason about.
1038 unsigned limit = varargsData->limit - 1;
1039 Node* undefined = nullptr;
1040 for (unsigned storeIndex = 0; storeIndex < limit; ++storeIndex) {
1041 // First determine if we have an element we can load, and load it if
1042 // possible.
1043
1044 Node* value = nullptr;
1045 unsigned loadIndex = storeIndex + varargsData->offset;
1046
1047 if (loadIndex + 1 < inlineCallFrame->argumentCountIncludingThis) {
1048 VirtualRegister reg = virtualRegisterForArgument(loadIndex + 1) + inlineCallFrame->stackOffset;
1049 StackAccessData* data = m_graph.m_stackAccessData.add(
1050 reg, FlushedJSValue);
1051
1052 value = insertionSet.insertNode(
1053 nodeIndex, SpecNone, GetStack, node->origin.withExitOK(canExit),
1054 OpInfo(data));
1055 } else {
1056 // FIXME: We shouldn't have to store anything if
1057 // storeIndex >= varargsData->mandatoryMinimum, but we will still
1058 // have GetStacks in that range. So if we don't do the stores, we'll
1059 // have degenerate IR: we'll have GetStacks of something that didn't
1060 // have PutStacks.
1061 // https://bugs.webkit.org/show_bug.cgi?id=147434
1062
1063 if (!undefined) {
1064 undefined = insertionSet.insertConstant(
1065 nodeIndex, node->origin.withExitOK(canExit), jsUndefined());
1066 }
1067 value = undefined;
1068 }
1069
1070 // Now that we have a value, store it.
1071 storeValue(value, storeIndex);
1072 }
1073
1074 node->remove(m_graph);
1075 node->origin.exitOK = canExit;
1076 break;
1077 }
1078 }
1079 }
1080
1081 node->setOpAndDefaultFlags(ForwardVarargs);
1082 break;
1083 }
1084
1085 case CallVarargs:
1086 case ConstructVarargs:
1087 case TailCallVarargs:
1088 case TailCallVarargsInlinedCaller: {
1089 Node* candidate = node->child3().node();
1090 if (!isEliminatedAllocation(candidate))
1091 break;
1092
1093 auto convertToStaticArgumentCountCall = [&] (const Vector<Node*>& arguments) {
1094 unsigned firstChild = m_graph.m_varArgChildren.size();
1095 m_graph.m_varArgChildren.append(node->child1());
1096 m_graph.m_varArgChildren.append(node->child2());
1097 for (Node* argument : arguments)
1098 m_graph.m_varArgChildren.append(Edge(argument));
1099 switch (node->op()) {
1100 case CallVarargs:
1101 node->setOpAndDefaultFlags(Call);
1102 break;
1103 case ConstructVarargs:
1104 node->setOpAndDefaultFlags(Construct);
1105 break;
1106 case TailCallVarargs:
1107 node->setOpAndDefaultFlags(TailCall);
1108 break;
1109 case TailCallVarargsInlinedCaller:
1110 node->setOpAndDefaultFlags(TailCallInlinedCaller);
1111 break;
1112 default:
1113 RELEASE_ASSERT_NOT_REACHED();
1114 }
1115 node->children = AdjacencyList(
1116 AdjacencyList::Variable,
1117 firstChild, m_graph.m_varArgChildren.size() - firstChild);
1118 };
1119
1120 auto convertToForwardsCall = [&] () {
1121 switch (node->op()) {
1122 case CallVarargs:
1123 node->setOpAndDefaultFlags(CallForwardVarargs);
1124 break;
1125 case ConstructVarargs:
1126 node->setOpAndDefaultFlags(ConstructForwardVarargs);
1127 break;
1128 case TailCallVarargs:
1129 node->setOpAndDefaultFlags(TailCallForwardVarargs);
1130 break;
1131 case TailCallVarargsInlinedCaller:
1132 node->setOpAndDefaultFlags(TailCallForwardVarargsInlinedCaller);
1133 break;
1134 default:
1135 RELEASE_ASSERT_NOT_REACHED();
1136 }
1137 };
1138
1139 if (candidate->op() == PhantomNewArrayWithSpread || candidate->op() == PhantomNewArrayBuffer || candidate->op() == PhantomSpread) {
1140 auto canTransformToStaticArgumentCountCall = recursableLambda([&](auto self, Node* candidate) -> bool {
1141 if (candidate->op() == PhantomSpread)
1142 return self(candidate->child1().node());
1143
1144 if (candidate->op() == PhantomNewArrayWithSpread) {
1145 BitVector* bitVector = candidate->bitVector();
1146 for (unsigned i = 0; i < candidate->numChildren(); i++) {
1147 if (bitVector->get(i)) {
1148 Node* spread = m_graph.varArgChild(candidate, i).node();
1149 if (!self(spread))
1150 return false;
1151 }
1152 }
1153 return true;
1154 }
1155
1156 // PhantomNewArrayBuffer only contains constants. It can always convert LoadVarargs to static load stores.
1157 if (candidate->op() == PhantomNewArrayBuffer)
1158 return true;
1159
1160 ASSERT(candidate->op() == PhantomCreateRest);
1161 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
1162 return inlineCallFrame && !inlineCallFrame->isVarargs();
1163 });
1164
1165 if (canTransformToStaticArgumentCountCall(candidate)) {
1166 Vector<Node*> arguments;
1167 auto appendNode = recursableLambda([&](auto self, Node* candidate) -> void {
1168 if (candidate->op() == PhantomSpread) {
1169 self(candidate->child1().node());
1170 return;
1171 }
1172
1173 if (candidate->op() == PhantomNewArrayWithSpread) {
1174 BitVector* bitVector = candidate->bitVector();
1175 for (unsigned i = 0; i < candidate->numChildren(); i++) {
1176 Node* child = m_graph.varArgChild(candidate, i).node();
1177 if (bitVector->get(i))
1178 self(child);
1179 else
1180 arguments.append(child);
1181 }
1182 return;
1183 }
1184
1185 if (candidate->op() == PhantomNewArrayBuffer) {
1186 bool canExit = true;
1187 auto* array = candidate->castOperand<JSImmutableButterfly*>();
1188 for (unsigned index = 0; index < array->length(); ++index) {
1189 JSValue constant;
1190 if (candidate->indexingType() == ArrayWithDouble)
1191 constant = jsDoubleNumber(array->get(index).asNumber());
1192 else
1193 constant = array->get(index);
1194 arguments.append(insertionSet.insertConstant(nodeIndex, node->origin.withExitOK(canExit), constant));
1195 }
1196 return;
1197 }
1198
1199 ASSERT(candidate->op() == PhantomCreateRest);
1200 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
1201 unsigned numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
1202 for (unsigned i = 1 + numberOfArgumentsToSkip; i < inlineCallFrame->argumentCountIncludingThis; ++i) {
1203 StackAccessData* data = m_graph.m_stackAccessData.add(
1204 virtualRegisterForArgument(i) + inlineCallFrame->stackOffset,
1205 FlushedJSValue);
1206
1207 Node* value = insertionSet.insertNode(
1208 nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data));
1209
1210 arguments.append(value);
1211 }
1212 });
1213
1214 appendNode(candidate);
1215 convertToStaticArgumentCountCall(arguments);
1216 } else
1217 convertToForwardsCall();
1218 } else {
1219 unsigned numberOfArgumentsToSkip = 0;
1220 if (candidate->op() == PhantomCreateRest)
1221 numberOfArgumentsToSkip = candidate->numberOfArgumentsToSkip();
1222 CallVarargsData* varargsData = node->callVarargsData();
1223 varargsData->firstVarArgOffset += numberOfArgumentsToSkip;
1224
1225 InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame();
1226 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
1227 Vector<Node*> arguments;
1228 for (unsigned i = 1 + varargsData->firstVarArgOffset; i < inlineCallFrame->argumentCountIncludingThis; ++i) {
1229 StackAccessData* data = m_graph.m_stackAccessData.add(
1230 virtualRegisterForArgument(i) + inlineCallFrame->stackOffset,
1231 FlushedJSValue);
1232
1233 Node* value = insertionSet.insertNode(
1234 nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data));
1235
1236 arguments.append(value);
1237 }
1238
1239 convertToStaticArgumentCountCall(arguments);
1240 } else
1241 convertToForwardsCall();
1242 }
1243
1244 break;
1245 }
1246
1247 case CheckArray:
1248 case GetButterfly:
1249 case FilterGetByStatus:
1250 case FilterPutByIdStatus:
1251 case FilterCallLinkStatus:
1252 case FilterInByIdStatus: {
1253 if (!isEliminatedAllocation(node->child1().node()))
1254 break;
1255 node->remove(m_graph);
1256 break;
1257 }
1258
1259 case CheckStructureOrEmpty:
1260 case CheckStructure:
1261 if (!isEliminatedAllocation(node->child1().node()))
1262 break;
1263 node->child1() = Edge(); // Remove the cell check since we've proven it's not needed and FTL lowering might botch this.
1264 node->remove(m_graph);
1265 break;
1266
1267 default:
1268 break;
1269 }
1270
1271 if (node->isPseudoTerminal()) {
1272 pseudoTerminal = node;
1273 break;
1274 }
1275 }
1276
1277 insertionSet.execute(block);
1278
1279 if (pseudoTerminal) {
1280 for (unsigned i = 0; i < block->size(); ++i) {
1281 Node* node = block->at(i);
1282 if (node != pseudoTerminal)
1283 continue;
1284 block->resize(i + 1);
1285 block->append(m_graph.addNode(SpecNone, Unreachable, node->origin));
1286 modifiedCFG = true;
1287 break;
1288 }
1289 }
1290 }
1291
1292 if (modifiedCFG) {
1293 m_graph.invalidateCFG();
1294 m_graph.resetReachability();
1295 m_graph.killUnreachableBlocks();
1296 }
1297 }
1298
1299 HashSet<Node*> m_candidates;
1300};
1301
1302} // anonymous namespace
1303
1304bool performArgumentsElimination(Graph& graph)
1305{
1306 return runPhase<ArgumentsEliminationPhase>(graph);
1307}
1308
1309} } // namespace JSC::DFG
1310
1311#endif // ENABLE(DFG_JIT)
1312
1313