1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGByteCodeParser.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "ArithProfile.h"
32#include "ArrayConstructor.h"
33#include "BasicBlockLocation.h"
34#include "BuiltinNames.h"
35#include "BytecodeStructs.h"
36#include "CallLinkStatus.h"
37#include "CodeBlock.h"
38#include "CodeBlockWithJITType.h"
39#include "CommonSlowPaths.h"
40#include "DFGAbstractHeap.h"
41#include "DFGArrayMode.h"
42#include "DFGCFG.h"
43#include "DFGCapabilities.h"
44#include "DFGClobberize.h"
45#include "DFGClobbersExitState.h"
46#include "DFGGraph.h"
47#include "DFGJITCode.h"
48#include "FunctionCodeBlock.h"
49#include "GetByIdStatus.h"
50#include "Heap.h"
51#include "InByIdStatus.h"
52#include "InstanceOfStatus.h"
53#include "JSCInlines.h"
54#include "JSFixedArray.h"
55#include "JSImmutableButterfly.h"
56#include "JSModuleEnvironment.h"
57#include "JSModuleNamespaceObject.h"
58#include "NumberConstructor.h"
59#include "ObjectConstructor.h"
60#include "OpcodeInlines.h"
61#include "PreciseJumpTargets.h"
62#include "PutByIdFlags.h"
63#include "PutByIdStatus.h"
64#include "RegExpPrototype.h"
65#include "StackAlignment.h"
66#include "StringConstructor.h"
67#include "StructureStubInfo.h"
68#include "SymbolConstructor.h"
69#include "Watchdog.h"
70#include <wtf/CommaPrinter.h>
71#include <wtf/HashMap.h>
72#include <wtf/MathExtras.h>
73#include <wtf/SetForScope.h>
74#include <wtf/StdLibExtras.h>
75
76namespace JSC { namespace DFG {
77
78namespace DFGByteCodeParserInternal {
79#ifdef NDEBUG
80static const bool verbose = false;
81#else
82static const bool verbose = true;
83#endif
84} // namespace DFGByteCodeParserInternal
85
86#define VERBOSE_LOG(...) do { \
87if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88dataLog(__VA_ARGS__); \
89} while (false)
90
91// === ByteCodeParser ===
92//
93// This class is used to compile the dataflow graph from a CodeBlock.
94class ByteCodeParser {
95public:
96 ByteCodeParser(Graph& graph)
97 : m_vm(&graph.m_vm)
98 , m_codeBlock(graph.m_codeBlock)
99 , m_profiledBlock(graph.m_profiledBlock)
100 , m_graph(graph)
101 , m_currentBlock(0)
102 , m_currentIndex(0)
103 , m_constantUndefined(graph.freeze(jsUndefined()))
104 , m_constantNull(graph.freeze(jsNull()))
105 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106 , m_constantOne(graph.freeze(jsNumber(1)))
107 , m_numArguments(m_codeBlock->numParameters())
108 , m_numLocals(m_codeBlock->numCalleeLocals())
109 , m_parameterSlots(0)
110 , m_numPassedVarArgs(0)
111 , m_inlineStackTop(0)
112 , m_currentInstruction(0)
113 , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
114 {
115 ASSERT(m_profiledBlock);
116 }
117
118 // Parse a full CodeBlock of bytecode.
119 void parse();
120
121private:
122 struct InlineStackEntry;
123
124 // Just parse from m_currentIndex to the end of the current CodeBlock.
125 void parseCodeBlock();
126
127 void ensureLocals(unsigned newNumLocals)
128 {
129 VERBOSE_LOG(" ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130 if (newNumLocals <= m_numLocals)
131 return;
132 m_numLocals = newNumLocals;
133 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134 m_graph.block(i)->ensureLocals(newNumLocals);
135 }
136
137 // Helper for min and max.
138 template<typename ChecksFunctor>
139 bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
140
141 void refineStatically(CallLinkStatus&, Node* callTarget);
142 // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143 // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144 // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145 // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146 // than to move the right index all the way to the treatment of op_ret.
147 BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148 BasicBlock* allocateUntargetableBlock();
149 // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150 void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151 void addJumpTo(BasicBlock*);
152 void addJumpTo(unsigned bytecodeIndex);
153 // Handle calls. This resolves issues surrounding inlining and intrinsics.
154 enum Terminality { Terminal, NonTerminal };
155 Terminality handleCall(
156 VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157 Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158 SpeculatedType prediction);
159 template<typename CallOp>
160 Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161 template<typename CallOp>
162 Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165 Node* getArgumentCount();
166 template<typename ChecksFunctor>
167 bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170 bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171 unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172 enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173 CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174 CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175 template<typename ChecksFunctor>
176 void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178 template<typename ChecksFunctor>
179 bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180 template<typename ChecksFunctor>
181 bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182 template<typename ChecksFunctor>
183 bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184 template<typename ChecksFunctor>
185 bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186 template<typename ChecksFunctor>
187 bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190 bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191 bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
192
193 template<typename Bytecode>
194 void handlePutByVal(Bytecode, unsigned instructionSize);
195 template <typename Bytecode>
196 void handlePutAccessorById(NodeType, Bytecode);
197 template <typename Bytecode>
198 void handlePutAccessorByVal(NodeType, Bytecode);
199 template <typename Bytecode>
200 void handleNewFunc(NodeType, Bytecode);
201 template <typename Bytecode>
202 void handleNewFuncExp(NodeType, Bytecode);
203
204 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205 // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206 ObjectPropertyCondition presenceLike(
207 JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
208
209 // Attempt to watch the presence of a property. It will watch that the property is present in the same
210 // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211 // Returns true if this all works out.
212 bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213 void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
214
215 // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216 template<typename VariantType>
217 Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
218
219 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
220
221 template<typename Op>
222 void parseGetById(const Instruction*);
223 void handleGetById(
224 VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
225 void emitPutById(
226 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
227 void handlePutById(
228 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229 bool isDirect, unsigned intructionSize);
230
231 // Either register a watchpoint or emit a check for this condition. Returns false if the
232 // condition no longer holds, and therefore no reasonable check can be emitted.
233 bool check(const ObjectPropertyCondition&);
234
235 GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
236
237 // Either register a watchpoint or emit a check for this condition. It must be a Presence
238 // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239 // Emits code for the loaded value that the condition guards, and returns a node containing
240 // the loaded value. Returns null if the condition no longer holds.
241 GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242 Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243 Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
244
245 // Calls check() for each condition in the set: that is, it either emits checks or registers
246 // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247 // conditions are no longer checkable, returns false.
248 bool check(const ObjectPropertyConditionSet&);
249
250 // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251 // base. Does a combination of watchpoint registration and check emission to guard the
252 // conditions, and emits code to load the value from the slot base. Returns a node containing
253 // the loaded value. Returns null if any of the conditions were no longer checkable.
254 GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255 Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
256
257 void prepareToParseBlock();
258 void clearCaches();
259
260 // Parse a single basic block of bytecode instructions.
261 void parseBlock(unsigned limit);
262 // Link block successors.
263 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264 void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
265
266 VariableAccessData* newVariableAccessData(VirtualRegister operand)
267 {
268 ASSERT(!operand.isConstant());
269
270 m_graph.m_variableAccessData.append(VariableAccessData(operand));
271 return &m_graph.m_variableAccessData.last();
272 }
273
274 // Get/Set the operands/result of a bytecode instruction.
275 Node* getDirect(VirtualRegister operand)
276 {
277 ASSERT(!operand.isConstant());
278
279 // Is this an argument?
280 if (operand.isArgument())
281 return getArgument(operand);
282
283 // Must be a local.
284 return getLocal(operand);
285 }
286
287 Node* get(VirtualRegister operand)
288 {
289 if (operand.isConstant()) {
290 unsigned constantIndex = operand.toConstantIndex();
291 unsigned oldSize = m_constants.size();
292 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294 JSValue value = codeBlock.getConstant(operand.offset());
295 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296 if (constantIndex >= oldSize) {
297 m_constants.grow(constantIndex + 1);
298 for (unsigned i = oldSize; i < m_constants.size(); ++i)
299 m_constants[i] = nullptr;
300 }
301
302 Node* constantNode = nullptr;
303 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304 constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
305 else
306 constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307 m_constants[constantIndex] = constantNode;
308 }
309 ASSERT(m_constants[constantIndex]);
310 return m_constants[constantIndex];
311 }
312
313 if (inlineCallFrame()) {
314 if (!inlineCallFrame()->isClosureCall) {
315 JSFunction* callee = inlineCallFrame()->calleeConstant();
316 if (operand.offset() == CallFrameSlot::callee)
317 return weakJSConstant(callee);
318 }
319 } else if (operand.offset() == CallFrameSlot::callee) {
320 // We have to do some constant-folding here because this enables CreateThis folding. Note
321 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322 // case if the function is a singleton then we already know it.
323 if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324 if (JSFunction* function = executable->singleton().inferredValue()) {
325 m_graph.watchpoints().addLazily(executable);
326 return weakJSConstant(function);
327 }
328 }
329 return addToGraph(GetCallee);
330 }
331
332 return getDirect(m_inlineStackTop->remapOperand(operand));
333 }
334
335 enum SetMode {
336 // A normal set which follows a two-phase commit that spans code origins. During
337 // the current code origin it issues a MovHint, and at the start of the next
338 // code origin there will be a SetLocal. If the local needs flushing, the second
339 // SetLocal will be preceded with a Flush.
340 NormalSet,
341
342 // A set where the SetLocal happens immediately and there is still a Flush. This
343 // is relevant when assigning to a local in tricky situations for the delayed
344 // SetLocal logic but where we know that we have not performed any side effects
345 // within this code origin. This is a safe replacement for NormalSet anytime we
346 // know that we have not yet performed side effects in this code origin.
347 ImmediateSetWithFlush,
348
349 // A set where the SetLocal happens immediately and we do not Flush it even if
350 // this is a local that is marked as needing it. This is relevant when
351 // initializing locals at the top of a function.
352 ImmediateNakedSet
353 };
354 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
355 {
356 addToGraph(MovHint, OpInfo(operand.offset()), value);
357
358 // We can't exit anymore because our OSR exit state has changed.
359 m_exitOK = false;
360
361 DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
362
363 if (setMode == NormalSet) {
364 m_setLocalQueue.append(delayed);
365 return nullptr;
366 }
367
368 return delayed.execute(this);
369 }
370
371 void processSetLocalQueue()
372 {
373 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
374 m_setLocalQueue[i].execute(this);
375 m_setLocalQueue.shrink(0);
376 }
377
378 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
379 {
380 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
381 }
382
383 Node* injectLazyOperandSpeculation(Node* node)
384 {
385 ASSERT(node->op() == GetLocal);
386 ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
387 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
388 LazyOperandValueProfileKey key(m_currentIndex, node->local());
389 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
390 node->variableAccessData()->predict(prediction);
391 return node;
392 }
393
394 // Used in implementing get/set, above, where the operand is a local variable.
395 Node* getLocal(VirtualRegister operand)
396 {
397 unsigned local = operand.toLocal();
398
399 Node* node = m_currentBlock->variablesAtTail.local(local);
400
401 // This has two goals: 1) link together variable access datas, and 2)
402 // try to avoid creating redundant GetLocals. (1) is required for
403 // correctness - no other phase will ensure that block-local variable
404 // access data unification is done correctly. (2) is purely opportunistic
405 // and is meant as an compile-time optimization only.
406
407 VariableAccessData* variable;
408
409 if (node) {
410 variable = node->variableAccessData();
411
412 switch (node->op()) {
413 case GetLocal:
414 return node;
415 case SetLocal:
416 return node->child1().node();
417 default:
418 break;
419 }
420 } else
421 variable = newVariableAccessData(operand);
422
423 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
424 m_currentBlock->variablesAtTail.local(local) = node;
425 return node;
426 }
427 Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
428 {
429 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
430
431 unsigned local = operand.toLocal();
432
433 if (setMode != ImmediateNakedSet) {
434 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
435 if (argumentPosition)
436 flushDirect(operand, argumentPosition);
437 else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
438 flush(operand);
439 }
440
441 VariableAccessData* variableAccessData = newVariableAccessData(operand);
442 variableAccessData->mergeStructureCheckHoistingFailed(
443 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
444 variableAccessData->mergeCheckArrayHoistingFailed(
445 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
446 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
447 m_currentBlock->variablesAtTail.local(local) = node;
448 return node;
449 }
450
451 // Used in implementing get/set, above, where the operand is an argument.
452 Node* getArgument(VirtualRegister operand)
453 {
454 unsigned argument = operand.toArgument();
455 ASSERT(argument < m_numArguments);
456
457 Node* node = m_currentBlock->variablesAtTail.argument(argument);
458
459 VariableAccessData* variable;
460
461 if (node) {
462 variable = node->variableAccessData();
463
464 switch (node->op()) {
465 case GetLocal:
466 return node;
467 case SetLocal:
468 return node->child1().node();
469 default:
470 break;
471 }
472 } else
473 variable = newVariableAccessData(operand);
474
475 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
476 m_currentBlock->variablesAtTail.argument(argument) = node;
477 return node;
478 }
479 Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
480 {
481 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
482
483 unsigned argument = operand.toArgument();
484 ASSERT(argument < m_numArguments);
485
486 VariableAccessData* variableAccessData = newVariableAccessData(operand);
487
488 // Always flush arguments, except for 'this'. If 'this' is created by us,
489 // then make sure that it's never unboxed.
490 if (argument || m_graph.needsFlushedThis()) {
491 if (setMode != ImmediateNakedSet)
492 flushDirect(operand);
493 }
494
495 if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
496 variableAccessData->mergeShouldNeverUnbox(true);
497
498 variableAccessData->mergeStructureCheckHoistingFailed(
499 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
500 variableAccessData->mergeCheckArrayHoistingFailed(
501 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
502 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
503 m_currentBlock->variablesAtTail.argument(argument) = node;
504 return node;
505 }
506
507 ArgumentPosition* findArgumentPositionForArgument(int argument)
508 {
509 InlineStackEntry* stack = m_inlineStackTop;
510 while (stack->m_inlineCallFrame)
511 stack = stack->m_caller;
512 return stack->m_argumentPositions[argument];
513 }
514
515 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
516 {
517 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
518 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
519 if (!inlineCallFrame)
520 break;
521 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
522 continue;
523 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
524 continue;
525 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
526 return stack->m_argumentPositions[argument];
527 }
528 return 0;
529 }
530
531 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
532 {
533 if (operand.isArgument())
534 return findArgumentPositionForArgument(operand.toArgument());
535 return findArgumentPositionForLocal(operand);
536 }
537
538 template<typename AddFlushDirectFunc>
539 void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
540 {
541 int numArguments;
542 if (inlineCallFrame) {
543 ASSERT(!m_graph.hasDebuggerEnabled());
544 numArguments = inlineCallFrame->argumentsWithFixup.size();
545 if (inlineCallFrame->isClosureCall)
546 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
547 if (inlineCallFrame->isVarargs())
548 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
549 } else
550 numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
551
552 for (unsigned argument = numArguments; argument--;)
553 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
554
555 if (m_graph.needsScopeRegister())
556 addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
557 }
558
559 template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
560 void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
561 {
562 origin.walkUpInlineStack(
563 [&] (CodeOrigin origin) {
564 unsigned bytecodeIndex = origin.bytecodeIndex();
565 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
566 flushImpl(inlineCallFrame, addFlushDirect);
567
568 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
569 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
570 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
571
572 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
573 if (livenessAtBytecode[local])
574 addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
575 }
576 });
577 }
578
579 void flush(VirtualRegister operand)
580 {
581 flushDirect(m_inlineStackTop->remapOperand(operand));
582 }
583
584 void flushDirect(VirtualRegister operand)
585 {
586 flushDirect(operand, findArgumentPosition(operand));
587 }
588
589 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
590 {
591 addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
592 }
593
594 template<NodeType nodeType>
595 void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
596 {
597 ASSERT(!operand.isConstant());
598
599 Node* node = m_currentBlock->variablesAtTail.operand(operand);
600
601 VariableAccessData* variable;
602
603 if (node)
604 variable = node->variableAccessData();
605 else
606 variable = newVariableAccessData(operand);
607
608 node = addToGraph(nodeType, OpInfo(variable));
609 m_currentBlock->variablesAtTail.operand(operand) = node;
610 if (argumentPosition)
611 argumentPosition->addVariable(variable);
612 }
613
614 void phantomLocalDirect(VirtualRegister operand)
615 {
616 addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
617 }
618
619 void flush(InlineStackEntry* inlineStackEntry)
620 {
621 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
622 flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
623 }
624
625 void flushForTerminal()
626 {
627 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
628 auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
629 flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
630 }
631
632 void flushForReturn()
633 {
634 flush(m_inlineStackTop);
635 }
636
637 void flushIfTerminal(SwitchData& data)
638 {
639 if (data.fallThrough.bytecodeIndex() > m_currentIndex)
640 return;
641
642 for (unsigned i = data.cases.size(); i--;) {
643 if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
644 return;
645 }
646
647 flushForTerminal();
648 }
649
650 // Assumes that the constant should be strongly marked.
651 Node* jsConstant(JSValue constantValue)
652 {
653 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
654 }
655
656 Node* weakJSConstant(JSValue constantValue)
657 {
658 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
659 }
660
661 // Helper functions to get/set the this value.
662 Node* getThis()
663 {
664 return get(m_inlineStackTop->m_codeBlock->thisRegister());
665 }
666
667 void setThis(Node* value)
668 {
669 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
670 }
671
672 InlineCallFrame* inlineCallFrame()
673 {
674 return m_inlineStackTop->m_inlineCallFrame;
675 }
676
677 bool allInlineFramesAreTailCalls()
678 {
679 return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
680 }
681
682 CodeOrigin currentCodeOrigin()
683 {
684 return CodeOrigin(m_currentIndex, inlineCallFrame());
685 }
686
687 NodeOrigin currentNodeOrigin()
688 {
689 CodeOrigin semantic;
690 CodeOrigin forExit;
691
692 if (m_currentSemanticOrigin.isSet())
693 semantic = m_currentSemanticOrigin;
694 else
695 semantic = currentCodeOrigin();
696
697 forExit = currentCodeOrigin();
698
699 return NodeOrigin(semantic, forExit, m_exitOK);
700 }
701
702 BranchData* branchData(unsigned taken, unsigned notTaken)
703 {
704 // We assume that branches originating from bytecode always have a fall-through. We
705 // use this assumption to avoid checking for the creation of terminal blocks.
706 ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
707 BranchData* data = m_graph.m_branchData.add();
708 *data = BranchData::withBytecodeIndices(taken, notTaken);
709 return data;
710 }
711
712 Node* addToGraph(Node* node)
713 {
714 VERBOSE_LOG(" appended ", node, " ", Graph::opName(node->op()), "\n");
715
716 m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
717
718 m_currentBlock->append(node);
719 if (clobbersExitState(m_graph, node))
720 m_exitOK = false;
721 return node;
722 }
723
724 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
725 {
726 Node* result = m_graph.addNode(
727 op, currentNodeOrigin(), Edge(child1), Edge(child2),
728 Edge(child3));
729 return addToGraph(result);
730 }
731 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
732 {
733 Node* result = m_graph.addNode(
734 op, currentNodeOrigin(), child1, child2, child3);
735 return addToGraph(result);
736 }
737 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
738 {
739 Node* result = m_graph.addNode(
740 op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
741 Edge(child3));
742 return addToGraph(result);
743 }
744 Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
745 {
746 Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
747 return addToGraph(result);
748 }
749 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
750 {
751 Node* result = m_graph.addNode(
752 op, currentNodeOrigin(), info1, info2,
753 Edge(child1), Edge(child2), Edge(child3));
754 return addToGraph(result);
755 }
756 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
757 {
758 Node* result = m_graph.addNode(
759 op, currentNodeOrigin(), info1, info2, child1, child2, child3);
760 return addToGraph(result);
761 }
762
763 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
764 {
765 Node* result = m_graph.addNode(
766 Node::VarArg, op, currentNodeOrigin(), info1, info2,
767 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
768 addToGraph(result);
769
770 m_numPassedVarArgs = 0;
771
772 return result;
773 }
774
775 void addVarArgChild(Node* child)
776 {
777 m_graph.m_varArgChildren.append(Edge(child));
778 m_numPassedVarArgs++;
779 }
780
781 void addVarArgChild(Edge child)
782 {
783 m_graph.m_varArgChildren.append(child);
784 m_numPassedVarArgs++;
785 }
786
787 Node* addCallWithoutSettingResult(
788 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
789 OpInfo prediction)
790 {
791 addVarArgChild(callee);
792 size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
793
794 if (parameterSlots > m_parameterSlots)
795 m_parameterSlots = parameterSlots;
796
797 for (int i = 0; i < argCount; ++i)
798 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
799
800 return addToGraph(Node::VarArg, op, opInfo, prediction);
801 }
802
803 Node* addCall(
804 VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
805 SpeculatedType prediction)
806 {
807 if (op == TailCall) {
808 if (allInlineFramesAreTailCalls())
809 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
810 op = TailCallInlinedCaller;
811 }
812
813
814 Node* call = addCallWithoutSettingResult(
815 op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
816 if (result.isValid())
817 set(result, call);
818 return call;
819 }
820
821 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
822 {
823 // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
824 // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
825 // object's structure as soon as we make it a weakJSCosntant.
826 Node* objectNode = weakJSConstant(object);
827 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
828 return objectNode;
829 }
830
831 SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
832 {
833 auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
834 {
835 SpeculatedType prediction;
836 {
837 ConcurrentJSLocker locker(codeBlock->m_lock);
838 prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
839 }
840 auto* fuzzerAgent = m_vm->fuzzerAgent();
841 if (UNLIKELY(fuzzerAgent))
842 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
843 return prediction;
844 };
845
846 SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
847 if (prediction != SpecNone)
848 return prediction;
849
850 // If we have no information about the values this
851 // node generates, we check if by any chance it is
852 // a tail call opcode. In that case, we walk up the
853 // inline frames to find a call higher in the call
854 // chain and use its prediction. If we only have
855 // inlined tail call frames, we use SpecFullTop
856 // to avoid a spurious OSR exit.
857 auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
858 OpcodeID opcodeID = instruction->opcodeID();
859
860 switch (opcodeID) {
861 case op_tail_call:
862 case op_tail_call_varargs:
863 case op_tail_call_forward_arguments: {
864 // Things should be more permissive to us returning BOTTOM instead of TOP here.
865 // Currently, this will cause us to Force OSR exit. This is bad because returning
866 // TOP will cause anything that transitively touches this speculated type to
867 // also become TOP during prediction propagation.
868 // https://bugs.webkit.org/show_bug.cgi?id=164337
869 if (!inlineCallFrame())
870 return SpecFullTop;
871
872 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
873 if (!codeOrigin)
874 return SpecFullTop;
875
876 InlineStackEntry* stack = m_inlineStackTop;
877 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
878 stack = stack->m_caller;
879
880 return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
881 }
882
883 default:
884 return SpecNone;
885 }
886
887 RELEASE_ASSERT_NOT_REACHED();
888 return SpecNone;
889 }
890
891 SpeculatedType getPrediction(unsigned bytecodeIndex)
892 {
893 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
894
895 if (prediction == SpecNone) {
896 // We have no information about what values this node generates. Give up
897 // on executing this code, since we're likely to do more damage than good.
898 addToGraph(ForceOSRExit);
899 }
900
901 return prediction;
902 }
903
904 SpeculatedType getPredictionWithoutOSRExit()
905 {
906 return getPredictionWithoutOSRExit(m_currentIndex);
907 }
908
909 SpeculatedType getPrediction()
910 {
911 return getPrediction(m_currentIndex);
912 }
913
914 ArrayMode getArrayMode(Array::Action action)
915 {
916 CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
917 ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
918 return getArrayMode(*profile, action);
919 }
920
921 ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
922 {
923 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
924 profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
925 bool makeSafe = profile.outOfBounds(locker);
926 return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
927 }
928
929 Node* makeSafe(Node* node)
930 {
931 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
932 node->mergeFlags(NodeMayOverflowInt32InDFG);
933 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
934 node->mergeFlags(NodeMayNegZeroInDFG);
935
936 if (!isX86() && (node->op() == ArithMod || node->op() == ValueMod))
937 return node;
938
939 {
940 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
941 if (arithProfile) {
942 switch (node->op()) {
943 case ArithAdd:
944 case ArithSub:
945 case ValueAdd:
946 if (arithProfile->didObserveDouble())
947 node->mergeFlags(NodeMayHaveDoubleResult);
948 if (arithProfile->didObserveNonNumeric())
949 node->mergeFlags(NodeMayHaveNonNumericResult);
950 if (arithProfile->didObserveBigInt())
951 node->mergeFlags(NodeMayHaveBigIntResult);
952 break;
953
954 case ValueMul:
955 case ArithMul: {
956 if (arithProfile->didObserveInt52Overflow())
957 node->mergeFlags(NodeMayOverflowInt52);
958 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
959 node->mergeFlags(NodeMayOverflowInt32InBaseline);
960 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
961 node->mergeFlags(NodeMayNegZeroInBaseline);
962 if (arithProfile->didObserveDouble())
963 node->mergeFlags(NodeMayHaveDoubleResult);
964 if (arithProfile->didObserveNonNumeric())
965 node->mergeFlags(NodeMayHaveNonNumericResult);
966 if (arithProfile->didObserveBigInt())
967 node->mergeFlags(NodeMayHaveBigIntResult);
968 break;
969 }
970 case ValueNegate:
971 case ArithNegate: {
972 if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
973 node->mergeFlags(NodeMayHaveDoubleResult);
974 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
975 node->mergeFlags(NodeMayNegZeroInBaseline);
976 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
977 node->mergeFlags(NodeMayOverflowInt32InBaseline);
978 if (arithProfile->didObserveNonNumeric())
979 node->mergeFlags(NodeMayHaveNonNumericResult);
980 if (arithProfile->didObserveBigInt())
981 node->mergeFlags(NodeMayHaveBigIntResult);
982 break;
983 }
984
985 default:
986 break;
987 }
988 }
989 }
990
991 if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
992 switch (node->op()) {
993 case UInt32ToNumber:
994 case ArithAdd:
995 case ArithSub:
996 case ValueAdd:
997 case ValueMod:
998 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
999 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1000 break;
1001
1002 default:
1003 break;
1004 }
1005 }
1006
1007 return node;
1008 }
1009
1010 Node* makeDivSafe(Node* node)
1011 {
1012 ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1013
1014 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1015 node->mergeFlags(NodeMayOverflowInt32InDFG);
1016 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1017 node->mergeFlags(NodeMayNegZeroInDFG);
1018
1019 // The main slow case counter for op_div in the old JIT counts only when
1020 // the operands are not numbers. We don't care about that since we already
1021 // have speculations in place that take care of that separately. We only
1022 // care about when the outcome of the division is not an integer, which
1023 // is what the special fast case counter tells us.
1024
1025 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1026 return node;
1027
1028 // FIXME: It might be possible to make this more granular.
1029 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1030
1031 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1032 if (arithProfile->didObserveBigInt())
1033 node->mergeFlags(NodeMayHaveBigIntResult);
1034
1035 return node;
1036 }
1037
1038 void noticeArgumentsUse()
1039 {
1040 // All of the arguments in this function need to be formatted as JSValues because we will
1041 // load from them in a random-access fashion and we don't want to have to switch on
1042 // format.
1043
1044 for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1045 argument->mergeShouldNeverUnbox(true);
1046 }
1047
1048 bool needsDynamicLookup(ResolveType, OpcodeID);
1049
1050 VM* m_vm;
1051 CodeBlock* m_codeBlock;
1052 CodeBlock* m_profiledBlock;
1053 Graph& m_graph;
1054
1055 // The current block being generated.
1056 BasicBlock* m_currentBlock;
1057 // The bytecode index of the current instruction being generated.
1058 unsigned m_currentIndex;
1059 // The semantic origin of the current node if different from the current Index.
1060 CodeOrigin m_currentSemanticOrigin;
1061 // True if it's OK to OSR exit right now.
1062 bool m_exitOK { false };
1063
1064 FrozenValue* m_constantUndefined;
1065 FrozenValue* m_constantNull;
1066 FrozenValue* m_constantNaN;
1067 FrozenValue* m_constantOne;
1068 Vector<Node*, 16> m_constants;
1069
1070 HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1071
1072 // The number of arguments passed to the function.
1073 unsigned m_numArguments;
1074 // The number of locals (vars + temporaries) used in the function.
1075 unsigned m_numLocals;
1076 // The number of slots (in units of sizeof(Register)) that we need to
1077 // preallocate for arguments to outgoing calls from this frame. This
1078 // number includes the CallFrame slots that we initialize for the callee
1079 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1080 // This number is 0 if and only if this function is a leaf.
1081 unsigned m_parameterSlots;
1082 // The number of var args passed to the next var arg node.
1083 unsigned m_numPassedVarArgs;
1084
1085 struct InlineStackEntry {
1086 ByteCodeParser* m_byteCodeParser;
1087
1088 CodeBlock* m_codeBlock;
1089 CodeBlock* m_profiledBlock;
1090 InlineCallFrame* m_inlineCallFrame;
1091
1092 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1093
1094 QueryableExitProfile m_exitProfile;
1095
1096 // Remapping of identifier and constant numbers from the code block being
1097 // inlined (inline callee) to the code block that we're inlining into
1098 // (the machine code block, which is the transitive, though not necessarily
1099 // direct, caller).
1100 Vector<unsigned> m_identifierRemap;
1101 Vector<unsigned> m_switchRemap;
1102
1103 // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1104 // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1105 Vector<BasicBlock*> m_unlinkedBlocks;
1106
1107 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1108 // cannot have two blocks that have the same bytecodeBegin.
1109 Vector<BasicBlock*> m_blockLinkingTargets;
1110
1111 // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1112 BasicBlock* m_continuationBlock;
1113
1114 VirtualRegister m_returnValue;
1115
1116 // Speculations about variable types collected from the profiled code block,
1117 // which are based on OSR exit profiles that past DFG compilations of this
1118 // code block had gathered.
1119 LazyOperandValueProfileParser m_lazyOperands;
1120
1121 ICStatusMap m_baselineMap;
1122 ICStatusContext m_optimizedContext;
1123
1124 // Pointers to the argument position trackers for this slice of code.
1125 Vector<ArgumentPosition*> m_argumentPositions;
1126
1127 InlineStackEntry* m_caller;
1128
1129 InlineStackEntry(
1130 ByteCodeParser*,
1131 CodeBlock*,
1132 CodeBlock* profiledBlock,
1133 JSFunction* callee, // Null if this is a closure call.
1134 VirtualRegister returnValueVR,
1135 VirtualRegister inlineCallFrameStart,
1136 int argumentCountIncludingThis,
1137 InlineCallFrame::Kind,
1138 BasicBlock* continuationBlock);
1139
1140 ~InlineStackEntry();
1141
1142 VirtualRegister remapOperand(VirtualRegister operand) const
1143 {
1144 if (!m_inlineCallFrame)
1145 return operand;
1146
1147 ASSERT(!operand.isConstant());
1148
1149 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1150 }
1151 };
1152
1153 InlineStackEntry* m_inlineStackTop;
1154
1155 ICStatusContextStack m_icContextStack;
1156
1157 struct DelayedSetLocal {
1158 CodeOrigin m_origin;
1159 VirtualRegister m_operand;
1160 Node* m_value;
1161 SetMode m_setMode;
1162
1163 DelayedSetLocal() { }
1164 DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1165 : m_origin(origin)
1166 , m_operand(operand)
1167 , m_value(value)
1168 , m_setMode(setMode)
1169 {
1170 RELEASE_ASSERT(operand.isValid());
1171 }
1172
1173 Node* execute(ByteCodeParser* parser)
1174 {
1175 if (m_operand.isArgument())
1176 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1177 return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1178 }
1179 };
1180
1181 Vector<DelayedSetLocal, 2> m_setLocalQueue;
1182
1183 const Instruction* m_currentInstruction;
1184 bool m_hasDebuggerEnabled;
1185 bool m_hasAnyForceOSRExits { false };
1186};
1187
1188BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1189{
1190 ASSERT(bytecodeIndex != UINT_MAX);
1191 Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1192 BasicBlock* blockPtr = block.ptr();
1193 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1194 if (m_inlineStackTop->m_blockLinkingTargets.size())
1195 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1196 m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1197 m_graph.appendBlock(WTFMove(block));
1198 return blockPtr;
1199}
1200
1201BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1202{
1203 Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1204 BasicBlock* blockPtr = block.ptr();
1205 m_graph.appendBlock(WTFMove(block));
1206 return blockPtr;
1207}
1208
1209void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1210{
1211 RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1212 block->bytecodeBegin = bytecodeIndex;
1213 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1214 if (m_inlineStackTop->m_blockLinkingTargets.size())
1215 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1216 m_inlineStackTop->m_blockLinkingTargets.append(block);
1217}
1218
1219void ByteCodeParser::addJumpTo(BasicBlock* block)
1220{
1221 ASSERT(!m_currentBlock->terminal());
1222 Node* jumpNode = addToGraph(Jump);
1223 jumpNode->targetBlock() = block;
1224 m_currentBlock->didLink();
1225}
1226
1227void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1228{
1229 ASSERT(!m_currentBlock->terminal());
1230 addToGraph(Jump, OpInfo(bytecodeIndex));
1231 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1232}
1233
1234template<typename CallOp>
1235ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1236{
1237 auto bytecode = pc->as<CallOp>();
1238 Node* callTarget = get(bytecode.m_callee);
1239 int registerOffset = -static_cast<int>(bytecode.m_argv);
1240
1241 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1242 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1243 m_inlineStackTop->m_baselineMap, m_icContextStack);
1244
1245 InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1246
1247 return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1248 bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1249}
1250
1251void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1252{
1253 if (callTarget->isCellConstant())
1254 callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1255}
1256
1257ByteCodeParser::Terminality ByteCodeParser::handleCall(
1258 VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1259 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1260 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1261{
1262 ASSERT(registerOffset <= 0);
1263
1264 refineStatically(callLinkStatus, callTarget);
1265
1266 VERBOSE_LOG(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1267
1268 // If we have profiling information about this call, and it did not behave too polymorphically,
1269 // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1270 if (callLinkStatus.canOptimize()) {
1271 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1272
1273 VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1274 auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1275 argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1276 if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1277 return Terminal;
1278 if (optimizationResult == CallOptimizationResult::Inlined) {
1279 if (UNLIKELY(m_graph.compilation()))
1280 m_graph.compilation()->noticeInlinedCall();
1281 return NonTerminal;
1282 }
1283 }
1284
1285 Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1286 ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1287 return callNode->op() == TailCall ? Terminal : NonTerminal;
1288}
1289
1290template<typename CallOp>
1291ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1292{
1293 auto bytecode = pc->as<CallOp>();
1294 int firstFreeReg = bytecode.m_firstFree.offset();
1295 int firstVarArgOffset = bytecode.m_firstVarArg;
1296
1297 SpeculatedType prediction = getPrediction();
1298
1299 Node* callTarget = get(bytecode.m_callee);
1300
1301 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1302 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1303 m_inlineStackTop->m_baselineMap, m_icContextStack);
1304 refineStatically(callLinkStatus, callTarget);
1305
1306 VERBOSE_LOG(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1307
1308 if (callLinkStatus.canOptimize()) {
1309 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1310
1311 if (handleVarargsInlining(callTarget, bytecode.m_dst,
1312 callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1313 firstVarArgOffset, op,
1314 InlineCallFrame::varargsKindFor(callMode))) {
1315 if (UNLIKELY(m_graph.compilation()))
1316 m_graph.compilation()->noticeInlinedCall();
1317 return NonTerminal;
1318 }
1319 }
1320
1321 CallVarargsData* data = m_graph.m_callVarargsData.add();
1322 data->firstVarArgOffset = firstVarArgOffset;
1323
1324 Node* thisChild = get(bytecode.m_thisValue);
1325 Node* argumentsChild = nullptr;
1326 if (op != TailCallForwardVarargs)
1327 argumentsChild = get(bytecode.m_arguments);
1328
1329 if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1330 if (allInlineFramesAreTailCalls()) {
1331 addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1332 return Terminal;
1333 }
1334 op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1335 }
1336
1337 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1338 if (bytecode.m_dst.isValid())
1339 set(bytecode.m_dst, call);
1340 return NonTerminal;
1341}
1342
1343void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1344{
1345 Node* thisArgument;
1346 if (thisArgumentReg.isValid())
1347 thisArgument = get(thisArgumentReg);
1348 else
1349 thisArgument = nullptr;
1350
1351 JSCell* calleeCell;
1352 Node* callTargetForCheck;
1353 if (callee.isClosureCall()) {
1354 calleeCell = callee.executable();
1355 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1356 } else {
1357 calleeCell = callee.nonExecutableCallee();
1358 callTargetForCheck = callTarget;
1359 }
1360
1361 ASSERT(calleeCell);
1362 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1363 if (thisArgument)
1364 addToGraph(Phantom, thisArgument);
1365}
1366
1367Node* ByteCodeParser::getArgumentCount()
1368{
1369 Node* argumentCount;
1370 if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1371 argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1372 else
1373 argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1374 return argumentCount;
1375}
1376
1377void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1378{
1379 for (int i = 0; i < argumentCountIncludingThis; ++i)
1380 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1381}
1382
1383template<typename ChecksFunctor>
1384bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1385{
1386 if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1387 return false;
1388
1389 auto targetExecutable = callVariant.executable();
1390 InlineStackEntry* stackEntry = m_inlineStackTop;
1391 do {
1392 if (targetExecutable != stackEntry->executable())
1393 continue;
1394 VERBOSE_LOG(" We found a recursive tail call, trying to optimize it into a jump.\n");
1395
1396 if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1397 // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1398 // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1399 if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1400 continue;
1401 } else {
1402 // We are in the machine code entry (i.e. the original caller).
1403 // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1404 if (argumentCountIncludingThis > m_codeBlock->numParameters())
1405 return false;
1406 }
1407
1408 // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1409 // Check if this is the same callee that we try to inline here.
1410 if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1411 if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1412 continue;
1413 }
1414
1415 // We must add some check that the profiling information was correct and the target of this call is what we thought.
1416 emitFunctionCheckIfNeeded();
1417 // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1418 flushForTerminal();
1419
1420 // We must set the callee to the right value
1421 if (stackEntry->m_inlineCallFrame) {
1422 if (stackEntry->m_inlineCallFrame->isClosureCall)
1423 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1424 } else
1425 addToGraph(SetCallee, callTargetNode);
1426
1427 // We must set the arguments to the right values
1428 if (!stackEntry->m_inlineCallFrame)
1429 addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1430 int argIndex = 0;
1431 for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1432 Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1433 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1434 }
1435 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1436 for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1437 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1438
1439 // We must repeat the work of op_enter here as we will jump right after it.
1440 // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1441 for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1442 setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1443
1444 // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1445 unsigned oldIndex = m_currentIndex;
1446 auto oldStackTop = m_inlineStackTop;
1447 m_inlineStackTop = stackEntry;
1448 m_currentIndex = opcodeLengths[op_enter];
1449 m_exitOK = true;
1450 processSetLocalQueue();
1451 m_currentIndex = oldIndex;
1452 m_inlineStackTop = oldStackTop;
1453 m_exitOK = false;
1454
1455 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1456 RELEASE_ASSERT(entryBlockPtr);
1457 addJumpTo(*entryBlockPtr);
1458 return true;
1459 // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1460 } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1461
1462 // The tail call was not recursive
1463 return false;
1464}
1465
1466unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1467{
1468 CallMode callMode = InlineCallFrame::callModeFor(kind);
1469 CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1470 VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1471
1472 if (m_hasDebuggerEnabled) {
1473 VERBOSE_LOG(" Failing because the debugger is in use.\n");
1474 return UINT_MAX;
1475 }
1476
1477 FunctionExecutable* executable = callee.functionExecutable();
1478 if (!executable) {
1479 VERBOSE_LOG(" Failing because there is no function executable.\n");
1480 return UINT_MAX;
1481 }
1482
1483 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1484 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1485 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1486 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1487 // to inline it if we had a static proof of what was being called; this might happen for example
1488 // if you call a global function, where watchpointing gives us static information. Overall,
1489 // it's a rare case because we expect that any hot callees would have already been compiled.
1490 CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1491 if (!codeBlock) {
1492 VERBOSE_LOG(" Failing because no code block available.\n");
1493 return UINT_MAX;
1494 }
1495
1496 if (!Options::useArityFixupInlining()) {
1497 if (codeBlock->numParameters() > argumentCountIncludingThis) {
1498 VERBOSE_LOG(" Failing because of arity mismatch.\n");
1499 return UINT_MAX;
1500 }
1501 }
1502
1503 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1504 codeBlock, specializationKind, callee.isClosureCall());
1505 VERBOSE_LOG(" Call mode: ", callMode, "\n");
1506 VERBOSE_LOG(" Is closure call: ", callee.isClosureCall(), "\n");
1507 VERBOSE_LOG(" Capability level: ", capabilityLevel, "\n");
1508 VERBOSE_LOG(" Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1509 VERBOSE_LOG(" Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1510 VERBOSE_LOG(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1511 VERBOSE_LOG(" Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1512 if (!canInline(capabilityLevel)) {
1513 VERBOSE_LOG(" Failing because the function is not inlineable.\n");
1514 return UINT_MAX;
1515 }
1516
1517 // Check if the caller is already too large. We do this check here because that's just
1518 // where we happen to also have the callee's code block, and we want that for the
1519 // purpose of unsetting SABI.
1520 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1521 codeBlock->m_shouldAlwaysBeInlined = false;
1522 VERBOSE_LOG(" Failing because the caller is too large.\n");
1523 return UINT_MAX;
1524 }
1525
1526 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1527 // this function.
1528 // https://bugs.webkit.org/show_bug.cgi?id=127627
1529
1530 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1531 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1532 // haven't gotten to Baseline yet. Consider not inlining these functions.
1533 // https://bugs.webkit.org/show_bug.cgi?id=145503
1534
1535 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1536 // too many levels? If either of these are detected, then don't inline. We adjust our
1537 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1538
1539 unsigned depth = 0;
1540 unsigned recursion = 0;
1541
1542 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1543 ++depth;
1544 if (depth >= Options::maximumInliningDepth()) {
1545 VERBOSE_LOG(" Failing because depth exceeded.\n");
1546 return UINT_MAX;
1547 }
1548
1549 if (entry->executable() == executable) {
1550 ++recursion;
1551 if (recursion >= Options::maximumInliningRecursion()) {
1552 VERBOSE_LOG(" Failing because recursion detected.\n");
1553 return UINT_MAX;
1554 }
1555 }
1556 }
1557
1558 VERBOSE_LOG(" Inlining should be possible.\n");
1559
1560 // It might be possible to inline.
1561 return codeBlock->bytecodeCost();
1562}
1563
1564template<typename ChecksFunctor>
1565void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1566{
1567 const Instruction* savedCurrentInstruction = m_currentInstruction;
1568 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1569
1570 ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1571
1572 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1573 insertChecks(codeBlock);
1574
1575 // FIXME: Don't flush constants!
1576
1577 // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1578 // numberOfStackPaddingSlots consider alignment. Consider the following case,
1579 //
1580 // before: [ ... ][arg0][header]
1581 // after: [ ... ][ext ][arg1][arg0][header]
1582 //
1583 // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1584 // We insert extra slots to align stack.
1585 int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1586 int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1587 ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1588 int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1589
1590 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1591
1592 ensureLocals(
1593 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1594 CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1595
1596 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1597
1598 if (result.isValid())
1599 result = m_inlineStackTop->remapOperand(result);
1600
1601 VariableAccessData* calleeVariable = nullptr;
1602 if (callee.isClosureCall()) {
1603 Node* calleeSet = set(
1604 VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1605
1606 calleeVariable = calleeSet->variableAccessData();
1607 calleeVariable->mergeShouldNeverUnbox(true);
1608 }
1609
1610 InlineStackEntry* callerStackTop = m_inlineStackTop;
1611 InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1612 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1613
1614 // This is where the actual inlining really happens.
1615 unsigned oldIndex = m_currentIndex;
1616 m_currentIndex = 0;
1617
1618 switch (kind) {
1619 case InlineCallFrame::GetterCall:
1620 case InlineCallFrame::SetterCall: {
1621 // When inlining getter and setter calls, we setup a stack frame which does not appear in the bytecode.
1622 // Because Inlining can switch on executable, we could have a graph like this.
1623 //
1624 // BB#0
1625 // ...
1626 // 30: GetSetter
1627 // 31: MovHint(loc10)
1628 // 32: SetLocal(loc10)
1629 // 33: MovHint(loc9)
1630 // 34: SetLocal(loc9)
1631 // ...
1632 // 37: GetExecutable(@30)
1633 // ...
1634 // 41: Switch(@37)
1635 //
1636 // BB#2
1637 // 42: GetLocal(loc12, bc#7 of caller)
1638 // ...
1639 // --> callee: loc9 and loc10 are arguments of callee.
1640 // ...
1641 // <HERE, exit to callee, loc9 and loc10 are required in the bytecode>
1642 //
1643 // When we prune OSR availability at the beginning of BB#2 (bc#7 in the caller), we prune loc9 and loc10's liveness because the caller does not actually have loc9 and loc10.
1644 // However, when we begin executing the callee, we need OSR exit to be aware of where it can recover the arguments to the setter, loc9 and loc10. The MovHints in the inlined
1645 // callee make it so that if we exit at <HERE>, we can recover loc9 and loc10.
1646 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1647 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1648 Node* value = getDirect(argumentToGet);
1649 addToGraph(MovHint, OpInfo(argumentToGet.offset()), value);
1650 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToGet, value, ImmediateNakedSet });
1651 }
1652 break;
1653 }
1654 default:
1655 break;
1656 }
1657
1658 if (arityFixupCount) {
1659 // Note: we do arity fixup in two phases:
1660 // 1. We get all the values we need and MovHint them to the expected locals.
1661 // 2. We SetLocal them after that. This way, if we exit, the callee's
1662 // frame is already set up. If any SetLocal exits, we have a valid exit state.
1663 // This is required because if we didn't do this in two phases, we may exit in
1664 // the middle of arity fixup from the callee's CodeOrigin. This is unsound because exited
1665 // code does not have arity fixup so that remaining necessary fixups are not executed.
1666 // For example, consider if we need to pad two args:
1667 // [arg3][arg2][arg1][arg0]
1668 // [fix ][fix ][arg3][arg2][arg1][arg0]
1669 // We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1670 // for arg3's SetLocal in the callee's CodeOrigin, we'd exit with a frame like so:
1671 // [arg3][arg2][arg1][arg2][arg1][arg0]
1672 // Since we do not perform arity fixup in the callee, this is the frame used by the callee.
1673 // And the callee would then just end up thinking its argument are:
1674 // [fix ][fix ][arg3][arg2][arg1][arg0]
1675 // which is incorrect.
1676
1677 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1678 // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1679 // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1680 // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1681 //
1682 // before: [ ... ][ext ][arg1][arg0][header]
1683 //
1684 // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1685 // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1686 //
1687 // before: [ ... ][ext ][arg1][arg0][header]
1688 // after: [ ... ][arg2][arg1][arg0][header]
1689 //
1690 // In such cases, we do not need to move frames.
1691 if (registerOffsetAfterFixup != registerOffset) {
1692 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1693 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1694 Node* value = getDirect(argumentToGet);
1695 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
1696 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1697 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1698 }
1699 }
1700 for (int index = 0; index < arityFixupCount; ++index) {
1701 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
1702 addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1703 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1704 }
1705
1706 // At this point, it's OK to OSR exit because we finished setting up
1707 // our callee's frame. We emit an ExitOK below.
1708 }
1709
1710 // At this point, it's again OK to OSR exit.
1711 m_exitOK = true;
1712 addToGraph(ExitOK);
1713
1714 processSetLocalQueue();
1715
1716 InlineVariableData inlineVariableData;
1717 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1718 inlineVariableData.argumentPositionStart = argumentPositionStart;
1719 inlineVariableData.calleeVariable = 0;
1720
1721 RELEASE_ASSERT(
1722 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1723 == callee.isClosureCall());
1724 if (callee.isClosureCall()) {
1725 RELEASE_ASSERT(calleeVariable);
1726 inlineVariableData.calleeVariable = calleeVariable;
1727 }
1728
1729 m_graph.m_inlineVariableData.append(inlineVariableData);
1730
1731 parseCodeBlock();
1732 clearCaches(); // Reset our state now that we're back to the outer code.
1733
1734 m_currentIndex = oldIndex;
1735 m_exitOK = false;
1736
1737 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1738
1739 // Most functions have at least one op_ret and thus set up the continuation block.
1740 // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1741 if (inlineStackEntry.m_continuationBlock)
1742 m_currentBlock = inlineStackEntry.m_continuationBlock;
1743 else
1744 m_currentBlock = allocateUntargetableBlock();
1745 ASSERT(!m_currentBlock->terminal());
1746
1747 prepareToParseBlock();
1748 m_currentInstruction = savedCurrentInstruction;
1749}
1750
1751ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1752{
1753 VERBOSE_LOG(" Considering callee ", callee, "\n");
1754
1755 bool didInsertChecks = false;
1756 auto insertChecksWithAccounting = [&] () {
1757 if (needsToCheckCallee)
1758 emitFunctionChecks(callee, callTargetNode, thisArgument);
1759 didInsertChecks = true;
1760 };
1761
1762 if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1763 RELEASE_ASSERT(didInsertChecks);
1764 return CallOptimizationResult::OptimizedToJump;
1765 }
1766 RELEASE_ASSERT(!didInsertChecks);
1767
1768 if (!inliningBalance)
1769 return CallOptimizationResult::DidNothing;
1770
1771 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1772
1773 auto endSpecialCase = [&] () {
1774 RELEASE_ASSERT(didInsertChecks);
1775 addToGraph(Phantom, callTargetNode);
1776 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1777 inliningBalance--;
1778 if (continuationBlock) {
1779 m_currentIndex = nextOffset;
1780 m_exitOK = true;
1781 processSetLocalQueue();
1782 addJumpTo(continuationBlock);
1783 }
1784 };
1785
1786 if (InternalFunction* function = callee.internalFunction()) {
1787 if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1788 endSpecialCase();
1789 return CallOptimizationResult::Inlined;
1790 }
1791 RELEASE_ASSERT(!didInsertChecks);
1792 return CallOptimizationResult::DidNothing;
1793 }
1794
1795 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1796 if (intrinsic != NoIntrinsic) {
1797 if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1798 endSpecialCase();
1799 return CallOptimizationResult::Inlined;
1800 }
1801 RELEASE_ASSERT(!didInsertChecks);
1802 // We might still try to inline the Intrinsic because it might be a builtin JS function.
1803 }
1804
1805 if (Options::useDOMJIT()) {
1806 if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1807 if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1808 endSpecialCase();
1809 return CallOptimizationResult::Inlined;
1810 }
1811 RELEASE_ASSERT(!didInsertChecks);
1812 }
1813 }
1814
1815 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1816 if (myInliningCost > inliningBalance)
1817 return CallOptimizationResult::DidNothing;
1818
1819 auto insertCheck = [&] (CodeBlock*) {
1820 if (needsToCheckCallee)
1821 emitFunctionChecks(callee, callTargetNode, thisArgument);
1822 };
1823 inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1824 inliningBalance -= myInliningCost;
1825 return CallOptimizationResult::Inlined;
1826}
1827
1828bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1829 const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1830 VirtualRegister argumentsArgument, unsigned argumentsOffset,
1831 NodeType callOp, InlineCallFrame::Kind kind)
1832{
1833 VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1834 if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1835 VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1836 return false;
1837 }
1838 if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1839 VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1840 return false;
1841 }
1842
1843 CallVariant callVariant = callLinkStatus[0];
1844
1845 unsigned mandatoryMinimum;
1846 if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1847 mandatoryMinimum = functionExecutable->parameterCount();
1848 else
1849 mandatoryMinimum = 0;
1850
1851 // includes "this"
1852 unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1853
1854 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1855 if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1856 VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1857 return false;
1858 }
1859
1860 int registerOffset = firstFreeReg + 1;
1861 registerOffset -= maxNumArguments; // includes "this"
1862 registerOffset -= CallFrame::headerSizeInRegisters;
1863 registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1864
1865 Vector<VirtualRegister> setArgumentMaybes;
1866
1867 auto insertChecks = [&] (CodeBlock* codeBlock) {
1868 emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1869
1870 int remappedRegisterOffset =
1871 m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1872
1873 ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1874
1875 int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1876 int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1877
1878 LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1879 data->start = VirtualRegister(remappedArgumentStart + 1);
1880 data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1881 data->offset = argumentsOffset;
1882 data->limit = maxNumArguments;
1883 data->mandatoryMinimum = mandatoryMinimum;
1884
1885 if (callOp == TailCallForwardVarargs)
1886 addToGraph(ForwardVarargs, OpInfo(data));
1887 else
1888 addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1889
1890 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1891 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1892 // callTargetNode because the other 2 are still in use and alive at this point.
1893 addToGraph(Phantom, callTargetNode);
1894
1895 // In DFG IR before SSA, we cannot insert control flow between after the
1896 // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1897 // SSA. Fortunately, we also have other reasons for not inserting control flow
1898 // before SSA.
1899
1900 VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1901 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1902 // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1903 // mostly just a formality.
1904 countVariable->predict(SpecInt32Only);
1905 countVariable->mergeIsProfitableToUnbox(true);
1906 Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1907 m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1908
1909 set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1910 unsigned numSetArguments = 0;
1911 for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1912 VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1913 variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1914
1915 // For a while it had been my intention to do things like this inside the
1916 // prediction injection phase. But in this case it's really best to do it here,
1917 // because it's here that we have access to the variable access datas for the
1918 // inlining we're about to do.
1919 //
1920 // Something else that's interesting here is that we'd really love to get
1921 // predictions from the arguments loaded at the callsite, rather than the
1922 // arguments received inside the callee. But that probably won't matter for most
1923 // calls.
1924 if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1925 ConcurrentJSLocker locker(codeBlock->m_lock);
1926 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1927 variable->predict(profile.computeUpdatedPrediction(locker));
1928 }
1929
1930 Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
1931 if (numSetArguments >= mandatoryMinimum && Options::useMaximalFlushInsertionPhase())
1932 setArgumentMaybes.append(variable->local());
1933 m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1934 ++numSetArguments;
1935 }
1936 };
1937
1938 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1939 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1940 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1941 // and there are no callsite value profiles and native function won't have callee value profiles for
1942 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1943 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1944 // calling LoadVarargs twice.
1945 inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1946
1947 for (VirtualRegister reg : setArgumentMaybes)
1948 setDirect(reg, jsConstant(jsUndefined()), ImmediateNakedSet);
1949
1950 VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1951 return true;
1952}
1953
1954unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1955{
1956 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateBytecodeCost();
1957 if (specializationKind == CodeForConstruct)
1958 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateBytecoodeCost());
1959 if (callLinkStatus.isClosureCall())
1960 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateBytecodeCost());
1961 return inliningBalance;
1962}
1963
1964ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1965 Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1966 int registerOffset, VirtualRegister thisArgument,
1967 int argumentCountIncludingThis,
1968 unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1969{
1970 VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1971
1972 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1973 unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1974
1975 // First check if we can avoid creating control flow. Our inliner does some CFG
1976 // simplification on the fly and this helps reduce compile times, but we can only leverage
1977 // this in cases where we don't need control flow diamonds to check the callee.
1978 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1979 return handleCallVariant(
1980 callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1981 argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1982 }
1983
1984 // We need to create some kind of switch over callee. For now we only do this if we believe that
1985 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1986 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1987 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1988 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1989 // also.
1990 if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1991 VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1992 return CallOptimizationResult::DidNothing;
1993 }
1994
1995 // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1996 // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1997 // it has no idea.
1998 if (!Options::usePolymorphicCallInliningForNonStubStatus()
1999 && !callLinkStatus.isBasedOnStub()) {
2000 VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
2001 return CallOptimizationResult::DidNothing;
2002 }
2003
2004 bool allAreClosureCalls = true;
2005 bool allAreDirectCalls = true;
2006 for (unsigned i = callLinkStatus.size(); i--;) {
2007 if (callLinkStatus[i].isClosureCall())
2008 allAreDirectCalls = false;
2009 else
2010 allAreClosureCalls = false;
2011 }
2012
2013 Node* thingToSwitchOn;
2014 if (allAreDirectCalls)
2015 thingToSwitchOn = callTargetNode;
2016 else if (allAreClosureCalls)
2017 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
2018 else {
2019 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
2020 // where it would be beneficial. It might be best to handle these cases as if all calls were
2021 // closure calls.
2022 // https://bugs.webkit.org/show_bug.cgi?id=136020
2023 VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
2024 return CallOptimizationResult::DidNothing;
2025 }
2026
2027 VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
2028
2029 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
2030 // store the callee so that it will be accessible to all of the blocks we're about to create. We
2031 // get away with doing an immediate-set here because we wouldn't have performed any side effects
2032 // yet.
2033 VERBOSE_LOG("Register offset: ", registerOffset);
2034 VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
2035 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
2036 VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
2037 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
2038
2039 // It's OK to exit right now, even though we set some locals. That's because those locals are not
2040 // user-visible.
2041 m_exitOK = true;
2042 addToGraph(ExitOK);
2043
2044 SwitchData& data = *m_graph.m_switchData.add();
2045 data.kind = SwitchCell;
2046 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
2047 m_currentBlock->didLink();
2048
2049 BasicBlock* continuationBlock = allocateUntargetableBlock();
2050 VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2051
2052 // We may force this true if we give up on inlining any of the edges.
2053 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2054
2055 VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2056
2057 unsigned oldOffset = m_currentIndex;
2058 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2059 m_currentIndex = oldOffset;
2060 BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2061 m_currentBlock = calleeEntryBlock;
2062 prepareToParseBlock();
2063
2064 // At the top of each switch case, we can exit.
2065 m_exitOK = true;
2066
2067 Node* myCallTargetNode = getDirect(calleeReg);
2068
2069 auto inliningResult = handleCallVariant(
2070 myCallTargetNode, result, callLinkStatus[i], registerOffset,
2071 thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2072 inliningBalance, continuationBlock, false);
2073
2074 if (inliningResult == CallOptimizationResult::DidNothing) {
2075 // That failed so we let the block die. Nothing interesting should have been added to
2076 // the block. We also give up on inlining any of the (less frequent) callees.
2077 ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2078 m_graph.killBlockAndItsContents(m_currentBlock);
2079 m_graph.m_blocks.removeLast();
2080 VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2081
2082 // The fact that inlining failed means we need a slow path.
2083 couldTakeSlowPath = true;
2084 break;
2085 }
2086
2087 JSCell* thingToCaseOn;
2088 if (allAreDirectCalls)
2089 thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2090 else {
2091 ASSERT(allAreClosureCalls);
2092 thingToCaseOn = callLinkStatus[i].executable();
2093 }
2094 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2095 VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2096 }
2097
2098 // Slow path block
2099 m_currentBlock = allocateUntargetableBlock();
2100 m_currentIndex = oldOffset;
2101 m_exitOK = true;
2102 data.fallThrough = BranchTarget(m_currentBlock);
2103 prepareToParseBlock();
2104 Node* myCallTargetNode = getDirect(calleeReg);
2105 if (couldTakeSlowPath) {
2106 addCall(
2107 result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2108 registerOffset, prediction);
2109 VERBOSE_LOG("We added a call in the slow path\n");
2110 } else {
2111 addToGraph(CheckBadCell);
2112 addToGraph(Phantom, myCallTargetNode);
2113 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2114
2115 set(result, addToGraph(BottomValue));
2116 VERBOSE_LOG("couldTakeSlowPath was false\n");
2117 }
2118
2119 m_currentIndex = nextOffset;
2120 m_exitOK = true; // Origin changed, so it's fine to exit again.
2121 processSetLocalQueue();
2122
2123 if (Node* terminal = m_currentBlock->terminal())
2124 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2125 else {
2126 addJumpTo(continuationBlock);
2127 }
2128
2129 prepareToParseBlock();
2130
2131 m_currentIndex = oldOffset;
2132 m_currentBlock = continuationBlock;
2133 m_exitOK = true;
2134
2135 VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2136 return CallOptimizationResult::Inlined;
2137}
2138
2139template<typename ChecksFunctor>
2140bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2141{
2142 ASSERT(op == ArithMin || op == ArithMax);
2143
2144 if (argumentCountIncludingThis == 1) {
2145 insertChecks();
2146 double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2147 set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2148 return true;
2149 }
2150
2151 if (argumentCountIncludingThis == 2) {
2152 insertChecks();
2153 Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2154 addToGraph(Phantom, Edge(resultNode, NumberUse));
2155 set(result, resultNode);
2156 return true;
2157 }
2158
2159 if (argumentCountIncludingThis == 3) {
2160 insertChecks();
2161 set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2162 return true;
2163 }
2164
2165 // Don't handle >=3 arguments for now.
2166 return false;
2167}
2168
2169template<typename ChecksFunctor>
2170bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2171{
2172 VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n");
2173
2174 if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2175 return false;
2176
2177 // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2178 // it would only benefit intrinsics called as setters, like if you do:
2179 //
2180 // o.__defineSetter__("foo", Math.pow)
2181 //
2182 // Which is extremely amusing, but probably not worth optimizing.
2183 if (!result.isValid())
2184 return false;
2185
2186 bool didSetResult = false;
2187 auto setResult = [&] (Node* node) {
2188 RELEASE_ASSERT(!didSetResult);
2189 set(result, node);
2190 didSetResult = true;
2191 };
2192
2193 auto inlineIntrinsic = [&] {
2194 switch (intrinsic) {
2195
2196 // Intrinsic Functions:
2197
2198 case AbsIntrinsic: {
2199 if (argumentCountIncludingThis == 1) { // Math.abs()
2200 insertChecks();
2201 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2202 return true;
2203 }
2204
2205 if (!MacroAssembler::supportsFloatingPointAbs())
2206 return false;
2207
2208 insertChecks();
2209 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2210 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2211 node->mergeFlags(NodeMayOverflowInt32InDFG);
2212 setResult(node);
2213 return true;
2214 }
2215
2216 case MinIntrinsic:
2217 case MaxIntrinsic:
2218 if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2219 didSetResult = true;
2220 return true;
2221 }
2222 return false;
2223
2224#define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2225 case capitalizedName##Intrinsic:
2226 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2227#undef DFG_ARITH_UNARY
2228 {
2229 if (argumentCountIncludingThis == 1) {
2230 insertChecks();
2231 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2232 return true;
2233 }
2234 Arith::UnaryType type = Arith::UnaryType::Sin;
2235 switch (intrinsic) {
2236#define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2237 case capitalizedName##Intrinsic: \
2238 type = Arith::UnaryType::capitalizedName; \
2239 break;
2240 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2241#undef DFG_ARITH_UNARY
2242 default:
2243 RELEASE_ASSERT_NOT_REACHED();
2244 }
2245 insertChecks();
2246 setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2247 return true;
2248 }
2249
2250 case FRoundIntrinsic:
2251 case SqrtIntrinsic: {
2252 if (argumentCountIncludingThis == 1) {
2253 insertChecks();
2254 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2255 return true;
2256 }
2257
2258 NodeType nodeType = Unreachable;
2259 switch (intrinsic) {
2260 case FRoundIntrinsic:
2261 nodeType = ArithFRound;
2262 break;
2263 case SqrtIntrinsic:
2264 nodeType = ArithSqrt;
2265 break;
2266 default:
2267 RELEASE_ASSERT_NOT_REACHED();
2268 }
2269 insertChecks();
2270 setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2271 return true;
2272 }
2273
2274 case PowIntrinsic: {
2275 if (argumentCountIncludingThis < 3) {
2276 // Math.pow() and Math.pow(x) return NaN.
2277 insertChecks();
2278 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2279 return true;
2280 }
2281 insertChecks();
2282 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2283 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2284 setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2285 return true;
2286 }
2287
2288 case ArrayPushIntrinsic: {
2289#if USE(JSVALUE32_64)
2290 if (isX86()) {
2291 if (argumentCountIncludingThis > 2)
2292 return false;
2293 }
2294#endif
2295
2296 if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2297 return false;
2298
2299 ArrayMode arrayMode = getArrayMode(Array::Write);
2300 if (!arrayMode.isJSArray())
2301 return false;
2302 switch (arrayMode.type()) {
2303 case Array::Int32:
2304 case Array::Double:
2305 case Array::Contiguous:
2306 case Array::ArrayStorage: {
2307 insertChecks();
2308
2309 addVarArgChild(nullptr); // For storage.
2310 for (int i = 0; i < argumentCountIncludingThis; ++i)
2311 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2312 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2313 setResult(arrayPush);
2314 return true;
2315 }
2316
2317 default:
2318 return false;
2319 }
2320 }
2321
2322 case ArraySliceIntrinsic: {
2323#if USE(JSVALUE32_64)
2324 if (isX86()) {
2325 // There aren't enough registers for this to be done easily.
2326 return false;
2327 }
2328#endif
2329 if (argumentCountIncludingThis < 1)
2330 return false;
2331
2332 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2333 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2334 return false;
2335
2336 ArrayMode arrayMode = getArrayMode(Array::Read);
2337 if (!arrayMode.isJSArray())
2338 return false;
2339
2340 if (!arrayMode.isJSArrayWithOriginalStructure())
2341 return false;
2342
2343 switch (arrayMode.type()) {
2344 case Array::Double:
2345 case Array::Int32:
2346 case Array::Contiguous: {
2347 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2348
2349 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2350 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2351
2352 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2353 // https://bugs.webkit.org/show_bug.cgi?id=173171
2354 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2355 && globalObject->havingABadTimeWatchpoint()->isStillValid()
2356 && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2357 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2358 && globalObject->arrayPrototypeChainIsSane()) {
2359
2360 m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2361 m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2362 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2363 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2364
2365 insertChecks();
2366
2367 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2368 // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2369 // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2370 // effects of slice require that we perform a Get(array, "constructor") and we can skip
2371 // that if we're an original array structure. (We can relax this in the future by using
2372 // TryGetById and CheckCell).
2373 //
2374 // 2. We check that the array we're calling slice on has the same global object as the lexical
2375 // global object that this code is running in. This requirement is necessary because we setup the
2376 // watchpoints above on the lexical global object. This means that code that calls slice on
2377 // arrays produced by other global objects won't get this optimization. We could relax this
2378 // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2379 // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2380 //
2381 // 3. By proving we're an original array structure, we guarantee that the incoming array
2382 // isn't a subclass of Array.
2383
2384 StructureSet structureSet;
2385 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2386 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2387 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2388 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2389 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2390 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2391 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2392
2393 addVarArgChild(array);
2394 if (argumentCountIncludingThis >= 2)
2395 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2396 if (argumentCountIncludingThis >= 3)
2397 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2398 addVarArgChild(addToGraph(GetButterfly, array));
2399
2400 Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2401 setResult(arraySlice);
2402 return true;
2403 }
2404
2405 return false;
2406 }
2407 default:
2408 return false;
2409 }
2410
2411 RELEASE_ASSERT_NOT_REACHED();
2412 return false;
2413 }
2414
2415 case ArrayIndexOfIntrinsic: {
2416 if (argumentCountIncludingThis < 2)
2417 return false;
2418
2419 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2420 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2421 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2422 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2423 return false;
2424
2425 ArrayMode arrayMode = getArrayMode(Array::Read);
2426 if (!arrayMode.isJSArray())
2427 return false;
2428
2429 if (!arrayMode.isJSArrayWithOriginalStructure())
2430 return false;
2431
2432 // We do not want to convert arrays into one type just to perform indexOf.
2433 if (arrayMode.doesConversion())
2434 return false;
2435
2436 switch (arrayMode.type()) {
2437 case Array::Double:
2438 case Array::Int32:
2439 case Array::Contiguous: {
2440 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2441
2442 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2443 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2444
2445 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2446 // https://bugs.webkit.org/show_bug.cgi?id=173171
2447 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2448 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2449 && globalObject->arrayPrototypeChainIsSane()) {
2450
2451 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2452 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2453
2454 insertChecks();
2455
2456 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2457 addVarArgChild(array);
2458 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2459 if (argumentCountIncludingThis >= 3)
2460 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2461 addVarArgChild(nullptr);
2462
2463 Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2464 setResult(node);
2465 return true;
2466 }
2467
2468 return false;
2469 }
2470 default:
2471 return false;
2472 }
2473
2474 RELEASE_ASSERT_NOT_REACHED();
2475 return false;
2476
2477 }
2478
2479 case ArrayPopIntrinsic: {
2480 if (argumentCountIncludingThis != 1)
2481 return false;
2482
2483 ArrayMode arrayMode = getArrayMode(Array::Write);
2484 if (!arrayMode.isJSArray())
2485 return false;
2486 switch (arrayMode.type()) {
2487 case Array::Int32:
2488 case Array::Double:
2489 case Array::Contiguous:
2490 case Array::ArrayStorage: {
2491 insertChecks();
2492 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2493 setResult(arrayPop);
2494 return true;
2495 }
2496
2497 default:
2498 return false;
2499 }
2500 }
2501
2502 case AtomicsAddIntrinsic:
2503 case AtomicsAndIntrinsic:
2504 case AtomicsCompareExchangeIntrinsic:
2505 case AtomicsExchangeIntrinsic:
2506 case AtomicsIsLockFreeIntrinsic:
2507 case AtomicsLoadIntrinsic:
2508 case AtomicsOrIntrinsic:
2509 case AtomicsStoreIntrinsic:
2510 case AtomicsSubIntrinsic:
2511 case AtomicsXorIntrinsic: {
2512 if (!is64Bit())
2513 return false;
2514
2515 NodeType op = LastNodeType;
2516 Array::Action action = Array::Write;
2517 unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2518 switch (intrinsic) {
2519 case AtomicsAddIntrinsic:
2520 op = AtomicsAdd;
2521 numArgs = 3;
2522 break;
2523 case AtomicsAndIntrinsic:
2524 op = AtomicsAnd;
2525 numArgs = 3;
2526 break;
2527 case AtomicsCompareExchangeIntrinsic:
2528 op = AtomicsCompareExchange;
2529 numArgs = 4;
2530 break;
2531 case AtomicsExchangeIntrinsic:
2532 op = AtomicsExchange;
2533 numArgs = 3;
2534 break;
2535 case AtomicsIsLockFreeIntrinsic:
2536 // This gets no backing store, but we need no special logic for this since this also does
2537 // not need varargs.
2538 op = AtomicsIsLockFree;
2539 numArgs = 1;
2540 break;
2541 case AtomicsLoadIntrinsic:
2542 op = AtomicsLoad;
2543 numArgs = 2;
2544 action = Array::Read;
2545 break;
2546 case AtomicsOrIntrinsic:
2547 op = AtomicsOr;
2548 numArgs = 3;
2549 break;
2550 case AtomicsStoreIntrinsic:
2551 op = AtomicsStore;
2552 numArgs = 3;
2553 break;
2554 case AtomicsSubIntrinsic:
2555 op = AtomicsSub;
2556 numArgs = 3;
2557 break;
2558 case AtomicsXorIntrinsic:
2559 op = AtomicsXor;
2560 numArgs = 3;
2561 break;
2562 default:
2563 RELEASE_ASSERT_NOT_REACHED();
2564 break;
2565 }
2566
2567 if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2568 return false;
2569
2570 insertChecks();
2571
2572 Vector<Node*, 3> args;
2573 for (unsigned i = 0; i < numArgs; ++i)
2574 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2575
2576 Node* resultNode;
2577 if (numArgs + 1 <= 3) {
2578 while (args.size() < 3)
2579 args.append(nullptr);
2580 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2581 } else {
2582 for (Node* node : args)
2583 addVarArgChild(node);
2584 addVarArgChild(nullptr);
2585 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2586 }
2587
2588 setResult(resultNode);
2589 return true;
2590 }
2591
2592 case ParseIntIntrinsic: {
2593 if (argumentCountIncludingThis < 2)
2594 return false;
2595
2596 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2597 return false;
2598
2599 insertChecks();
2600 VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2601 Node* parseInt;
2602 if (argumentCountIncludingThis == 2)
2603 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2604 else {
2605 ASSERT(argumentCountIncludingThis > 2);
2606 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2607 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2608 }
2609 setResult(parseInt);
2610 return true;
2611 }
2612
2613 case CharCodeAtIntrinsic: {
2614 if (argumentCountIncludingThis != 2)
2615 return false;
2616
2617 insertChecks();
2618 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2619 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2620 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2621
2622 setResult(charCode);
2623 return true;
2624 }
2625
2626 case CharAtIntrinsic: {
2627 if (argumentCountIncludingThis != 2)
2628 return false;
2629
2630 insertChecks();
2631 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2632 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2633 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2634
2635 setResult(charCode);
2636 return true;
2637 }
2638 case Clz32Intrinsic: {
2639 insertChecks();
2640 if (argumentCountIncludingThis == 1)
2641 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2642 else {
2643 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2644 setResult(addToGraph(ArithClz32, operand));
2645 }
2646 return true;
2647 }
2648 case FromCharCodeIntrinsic: {
2649 if (argumentCountIncludingThis != 2)
2650 return false;
2651
2652 insertChecks();
2653 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2654 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2655
2656 setResult(charCode);
2657
2658 return true;
2659 }
2660
2661 case RegExpExecIntrinsic: {
2662 if (argumentCountIncludingThis != 2)
2663 return false;
2664
2665 insertChecks();
2666 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2667 setResult(regExpExec);
2668
2669 return true;
2670 }
2671
2672 case RegExpTestIntrinsic:
2673 case RegExpTestFastIntrinsic: {
2674 if (argumentCountIncludingThis != 2)
2675 return false;
2676
2677 if (intrinsic == RegExpTestIntrinsic) {
2678 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2679 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2680 return false;
2681
2682 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2683 Structure* regExpStructure = globalObject->regExpStructure();
2684 m_graph.registerStructure(regExpStructure);
2685 ASSERT(regExpStructure->storedPrototype().isObject());
2686 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2687
2688 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2689 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2690
2691 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2692 JSValue currentProperty;
2693 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2694 return false;
2695
2696 return currentProperty == primordialProperty;
2697 };
2698
2699 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2700 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2701 return false;
2702
2703 // Check that regExpObject is actually a RegExp object.
2704 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2705 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2706
2707 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2708 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2709 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2710 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2711 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2712 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2713 }
2714
2715 insertChecks();
2716 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2717 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2718 setResult(regExpExec);
2719
2720 return true;
2721 }
2722
2723 case RegExpMatchFastIntrinsic: {
2724 RELEASE_ASSERT(argumentCountIncludingThis == 2);
2725
2726 insertChecks();
2727 Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2728 setResult(regExpMatch);
2729 return true;
2730 }
2731
2732 case ObjectCreateIntrinsic: {
2733 if (argumentCountIncludingThis != 2)
2734 return false;
2735
2736 insertChecks();
2737 setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2738 return true;
2739 }
2740
2741 case ObjectGetPrototypeOfIntrinsic: {
2742 if (argumentCountIncludingThis != 2)
2743 return false;
2744
2745 insertChecks();
2746 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2747 return true;
2748 }
2749
2750 case ObjectIsIntrinsic: {
2751 if (argumentCountIncludingThis < 3)
2752 return false;
2753
2754 insertChecks();
2755 setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2756 return true;
2757 }
2758
2759 case ObjectKeysIntrinsic: {
2760 if (argumentCountIncludingThis < 2)
2761 return false;
2762
2763 insertChecks();
2764 setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2765 return true;
2766 }
2767
2768 case ReflectGetPrototypeOfIntrinsic: {
2769 if (argumentCountIncludingThis != 2)
2770 return false;
2771
2772 insertChecks();
2773 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2774 return true;
2775 }
2776
2777 case IsTypedArrayViewIntrinsic: {
2778 ASSERT(argumentCountIncludingThis == 2);
2779
2780 insertChecks();
2781 setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2782 return true;
2783 }
2784
2785 case StringPrototypeValueOfIntrinsic: {
2786 insertChecks();
2787 Node* value = get(virtualRegisterForArgument(0, registerOffset));
2788 setResult(addToGraph(StringValueOf, value));
2789 return true;
2790 }
2791
2792 case StringPrototypeReplaceIntrinsic: {
2793 if (argumentCountIncludingThis != 3)
2794 return false;
2795
2796 // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2797 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2798 return false;
2799
2800 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2801 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2802 return false;
2803
2804 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2805 Structure* regExpStructure = globalObject->regExpStructure();
2806 m_graph.registerStructure(regExpStructure);
2807 ASSERT(regExpStructure->storedPrototype().isObject());
2808 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2809
2810 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2811 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2812
2813 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2814 JSValue currentProperty;
2815 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2816 return false;
2817
2818 return currentProperty == primordialProperty;
2819 };
2820
2821 // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2822 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2823 return false;
2824
2825 // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2826 if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2827 return false;
2828
2829 // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2830 if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2831 return false;
2832
2833 // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2834 if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2835 return false;
2836
2837 insertChecks();
2838
2839 Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2840 setResult(resultNode);
2841 return true;
2842 }
2843
2844 case StringPrototypeReplaceRegExpIntrinsic: {
2845 if (argumentCountIncludingThis != 3)
2846 return false;
2847
2848 insertChecks();
2849 Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2850 setResult(resultNode);
2851 return true;
2852 }
2853
2854 case RoundIntrinsic:
2855 case FloorIntrinsic:
2856 case CeilIntrinsic:
2857 case TruncIntrinsic: {
2858 if (argumentCountIncludingThis == 1) {
2859 insertChecks();
2860 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2861 return true;
2862 }
2863 insertChecks();
2864 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2865 NodeType op;
2866 if (intrinsic == RoundIntrinsic)
2867 op = ArithRound;
2868 else if (intrinsic == FloorIntrinsic)
2869 op = ArithFloor;
2870 else if (intrinsic == CeilIntrinsic)
2871 op = ArithCeil;
2872 else {
2873 ASSERT(intrinsic == TruncIntrinsic);
2874 op = ArithTrunc;
2875 }
2876 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2877 setResult(roundNode);
2878 return true;
2879 }
2880 case IMulIntrinsic: {
2881 if (argumentCountIncludingThis != 3)
2882 return false;
2883 insertChecks();
2884 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2885 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2886 Node* left = get(leftOperand);
2887 Node* right = get(rightOperand);
2888 setResult(addToGraph(ArithIMul, left, right));
2889 return true;
2890 }
2891
2892 case RandomIntrinsic: {
2893 if (argumentCountIncludingThis != 1)
2894 return false;
2895 insertChecks();
2896 setResult(addToGraph(ArithRandom));
2897 return true;
2898 }
2899
2900 case DFGTrueIntrinsic: {
2901 insertChecks();
2902 setResult(jsConstant(jsBoolean(true)));
2903 return true;
2904 }
2905
2906 case FTLTrueIntrinsic: {
2907 insertChecks();
2908 setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2909 return true;
2910 }
2911
2912 case OSRExitIntrinsic: {
2913 insertChecks();
2914 addToGraph(ForceOSRExit);
2915 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2916 return true;
2917 }
2918
2919 case IsFinalTierIntrinsic: {
2920 insertChecks();
2921 setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2922 return true;
2923 }
2924
2925 case SetInt32HeapPredictionIntrinsic: {
2926 insertChecks();
2927 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2928 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2929 if (node->hasHeapPrediction())
2930 node->setHeapPrediction(SpecInt32Only);
2931 }
2932 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2933 return true;
2934 }
2935
2936 case CheckInt32Intrinsic: {
2937 insertChecks();
2938 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2939 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2940 addToGraph(Phantom, Edge(node, Int32Use));
2941 }
2942 setResult(jsConstant(jsBoolean(true)));
2943 return true;
2944 }
2945
2946 case FiatInt52Intrinsic: {
2947 if (argumentCountIncludingThis != 2)
2948 return false;
2949 insertChecks();
2950 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2951 if (enableInt52())
2952 setResult(addToGraph(FiatInt52, get(operand)));
2953 else
2954 setResult(get(operand));
2955 return true;
2956 }
2957
2958 case JSMapGetIntrinsic: {
2959 if (argumentCountIncludingThis != 2)
2960 return false;
2961
2962 insertChecks();
2963 Node* map = get(virtualRegisterForArgument(0, registerOffset));
2964 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2965 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2966 Node* hash = addToGraph(MapHash, normalizedKey);
2967 Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2968 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2969 setResult(resultNode);
2970 return true;
2971 }
2972
2973 case JSSetHasIntrinsic:
2974 case JSMapHasIntrinsic: {
2975 if (argumentCountIncludingThis != 2)
2976 return false;
2977
2978 insertChecks();
2979 Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2980 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2981 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2982 Node* hash = addToGraph(MapHash, normalizedKey);
2983 UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2984 Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2985 JSCell* sentinel = nullptr;
2986 if (intrinsic == JSMapHasIntrinsic)
2987 sentinel = m_vm->sentinelMapBucket();
2988 else
2989 sentinel = m_vm->sentinelSetBucket();
2990
2991 FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2992 Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2993 Node* resultNode = addToGraph(LogicalNot, invertedResult);
2994 setResult(resultNode);
2995 return true;
2996 }
2997
2998 case JSSetAddIntrinsic: {
2999 if (argumentCountIncludingThis != 2)
3000 return false;
3001
3002 insertChecks();
3003 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3004 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3005 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3006 Node* hash = addToGraph(MapHash, normalizedKey);
3007 addToGraph(SetAdd, base, normalizedKey, hash);
3008 setResult(base);
3009 return true;
3010 }
3011
3012 case JSMapSetIntrinsic: {
3013 if (argumentCountIncludingThis != 3)
3014 return false;
3015
3016 insertChecks();
3017 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3018 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3019 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3020
3021 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3022 Node* hash = addToGraph(MapHash, normalizedKey);
3023
3024 addVarArgChild(base);
3025 addVarArgChild(normalizedKey);
3026 addVarArgChild(value);
3027 addVarArgChild(hash);
3028 addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
3029 setResult(base);
3030 return true;
3031 }
3032
3033 case JSSetBucketHeadIntrinsic:
3034 case JSMapBucketHeadIntrinsic: {
3035 ASSERT(argumentCountIncludingThis == 2);
3036
3037 insertChecks();
3038 Node* map = get(virtualRegisterForArgument(1, registerOffset));
3039 UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
3040 Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
3041 setResult(resultNode);
3042 return true;
3043 }
3044
3045 case JSSetBucketNextIntrinsic:
3046 case JSMapBucketNextIntrinsic: {
3047 ASSERT(argumentCountIncludingThis == 2);
3048
3049 insertChecks();
3050 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3051 BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3052 Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3053 setResult(resultNode);
3054 return true;
3055 }
3056
3057 case JSSetBucketKeyIntrinsic:
3058 case JSMapBucketKeyIntrinsic: {
3059 ASSERT(argumentCountIncludingThis == 2);
3060
3061 insertChecks();
3062 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3063 BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3064 Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3065 setResult(resultNode);
3066 return true;
3067 }
3068
3069 case JSMapBucketValueIntrinsic: {
3070 ASSERT(argumentCountIncludingThis == 2);
3071
3072 insertChecks();
3073 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3074 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3075 setResult(resultNode);
3076 return true;
3077 }
3078
3079 case JSWeakMapGetIntrinsic: {
3080 if (argumentCountIncludingThis != 2)
3081 return false;
3082
3083 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3084 return false;
3085
3086 insertChecks();
3087 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3088 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3089 addToGraph(Check, Edge(key, ObjectUse));
3090 Node* hash = addToGraph(MapHash, key);
3091 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3092 Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3093
3094 setResult(resultNode);
3095 return true;
3096 }
3097
3098 case JSWeakMapHasIntrinsic: {
3099 if (argumentCountIncludingThis != 2)
3100 return false;
3101
3102 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3103 return false;
3104
3105 insertChecks();
3106 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3107 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3108 addToGraph(Check, Edge(key, ObjectUse));
3109 Node* hash = addToGraph(MapHash, key);
3110 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3111 Node* invertedResult = addToGraph(IsEmpty, holder);
3112 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3113
3114 setResult(resultNode);
3115 return true;
3116 }
3117
3118 case JSWeakSetHasIntrinsic: {
3119 if (argumentCountIncludingThis != 2)
3120 return false;
3121
3122 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3123 return false;
3124
3125 insertChecks();
3126 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3127 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3128 addToGraph(Check, Edge(key, ObjectUse));
3129 Node* hash = addToGraph(MapHash, key);
3130 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3131 Node* invertedResult = addToGraph(IsEmpty, holder);
3132 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3133
3134 setResult(resultNode);
3135 return true;
3136 }
3137
3138 case JSWeakSetAddIntrinsic: {
3139 if (argumentCountIncludingThis != 2)
3140 return false;
3141
3142 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3143 return false;
3144
3145 insertChecks();
3146 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3147 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3148 addToGraph(Check, Edge(key, ObjectUse));
3149 Node* hash = addToGraph(MapHash, key);
3150 addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3151 setResult(base);
3152 return true;
3153 }
3154
3155 case JSWeakMapSetIntrinsic: {
3156 if (argumentCountIncludingThis != 3)
3157 return false;
3158
3159 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3160 return false;
3161
3162 insertChecks();
3163 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3164 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3165 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3166
3167 addToGraph(Check, Edge(key, ObjectUse));
3168 Node* hash = addToGraph(MapHash, key);
3169
3170 addVarArgChild(Edge(base, WeakMapObjectUse));
3171 addVarArgChild(Edge(key, ObjectUse));
3172 addVarArgChild(Edge(value));
3173 addVarArgChild(Edge(hash, Int32Use));
3174 addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
3175 setResult(base);
3176 return true;
3177 }
3178
3179 case DataViewGetInt8:
3180 case DataViewGetUint8:
3181 case DataViewGetInt16:
3182 case DataViewGetUint16:
3183 case DataViewGetInt32:
3184 case DataViewGetUint32:
3185 case DataViewGetFloat32:
3186 case DataViewGetFloat64: {
3187 if (!is64Bit())
3188 return false;
3189
3190 // To inline data view accesses, we assume the architecture we're running on:
3191 // - Is little endian.
3192 // - Allows unaligned loads/stores without crashing.
3193
3194 if (argumentCountIncludingThis < 2)
3195 return false;
3196 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3197 return false;
3198
3199 insertChecks();
3200
3201 uint8_t byteSize;
3202 NodeType op = DataViewGetInt;
3203 bool isSigned = false;
3204 switch (intrinsic) {
3205 case DataViewGetInt8:
3206 isSigned = true;
3207 FALLTHROUGH;
3208 case DataViewGetUint8:
3209 byteSize = 1;
3210 break;
3211
3212 case DataViewGetInt16:
3213 isSigned = true;
3214 FALLTHROUGH;
3215 case DataViewGetUint16:
3216 byteSize = 2;
3217 break;
3218
3219 case DataViewGetInt32:
3220 isSigned = true;
3221 FALLTHROUGH;
3222 case DataViewGetUint32:
3223 byteSize = 4;
3224 break;
3225
3226 case DataViewGetFloat32:
3227 byteSize = 4;
3228 op = DataViewGetFloat;
3229 break;
3230 case DataViewGetFloat64:
3231 byteSize = 8;
3232 op = DataViewGetFloat;
3233 break;
3234 default:
3235 RELEASE_ASSERT_NOT_REACHED();
3236 }
3237
3238 TriState isLittleEndian = MixedTriState;
3239 Node* littleEndianChild = nullptr;
3240 if (byteSize > 1) {
3241 if (argumentCountIncludingThis < 3)
3242 isLittleEndian = FalseTriState;
3243 else {
3244 littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
3245 if (littleEndianChild->hasConstant()) {
3246 JSValue constant = littleEndianChild->constant()->value();
3247 isLittleEndian = constant.pureToBoolean();
3248 if (isLittleEndian != MixedTriState)
3249 littleEndianChild = nullptr;
3250 } else
3251 isLittleEndian = MixedTriState;
3252 }
3253 }
3254
3255 DataViewData data { };
3256 data.isLittleEndian = isLittleEndian;
3257 data.isSigned = isSigned;
3258 data.byteSize = byteSize;
3259
3260 setResult(
3261 addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
3262 return true;
3263 }
3264
3265 case DataViewSetInt8:
3266 case DataViewSetUint8:
3267 case DataViewSetInt16:
3268 case DataViewSetUint16:
3269 case DataViewSetInt32:
3270 case DataViewSetUint32:
3271 case DataViewSetFloat32:
3272 case DataViewSetFloat64: {
3273 if (!is64Bit())
3274 return false;
3275
3276 if (argumentCountIncludingThis < 3)
3277 return false;
3278
3279 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3280 return false;
3281
3282 insertChecks();
3283
3284 uint8_t byteSize;
3285 bool isFloatingPoint = false;
3286 bool isSigned = false;
3287 switch (intrinsic) {
3288 case DataViewSetInt8:
3289 isSigned = true;
3290 FALLTHROUGH;
3291 case DataViewSetUint8:
3292 byteSize = 1;
3293 break;
3294
3295 case DataViewSetInt16:
3296 isSigned = true;
3297 FALLTHROUGH;
3298 case DataViewSetUint16:
3299 byteSize = 2;
3300 break;
3301
3302 case DataViewSetInt32:
3303 isSigned = true;
3304 FALLTHROUGH;
3305 case DataViewSetUint32:
3306 byteSize = 4;
3307 break;
3308
3309 case DataViewSetFloat32:
3310 isFloatingPoint = true;
3311 byteSize = 4;
3312 break;
3313 case DataViewSetFloat64:
3314 isFloatingPoint = true;
3315 byteSize = 8;
3316 break;
3317 default:
3318 RELEASE_ASSERT_NOT_REACHED();
3319 }
3320
3321 TriState isLittleEndian = MixedTriState;
3322 Node* littleEndianChild = nullptr;
3323 if (byteSize > 1) {
3324 if (argumentCountIncludingThis < 4)
3325 isLittleEndian = FalseTriState;
3326 else {
3327 littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
3328 if (littleEndianChild->hasConstant()) {
3329 JSValue constant = littleEndianChild->constant()->value();
3330 isLittleEndian = constant.pureToBoolean();
3331 if (isLittleEndian != MixedTriState)
3332 littleEndianChild = nullptr;
3333 } else
3334 isLittleEndian = MixedTriState;
3335 }
3336 }
3337
3338 DataViewData data { };
3339 data.isLittleEndian = isLittleEndian;
3340 data.isSigned = isSigned;
3341 data.byteSize = byteSize;
3342 data.isFloatingPoint = isFloatingPoint;
3343
3344 addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
3345 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
3346 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
3347 addVarArgChild(littleEndianChild);
3348
3349 addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
3350 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
3351 return true;
3352 }
3353
3354 case HasOwnPropertyIntrinsic: {
3355 if (argumentCountIncludingThis != 2)
3356 return false;
3357
3358 // This can be racy, that's fine. We know that once we observe that this is created,
3359 // that it will never be destroyed until the VM is destroyed. It's unlikely that
3360 // we'd ever get to the point where we inline this as an intrinsic without the
3361 // cache being created, however, it's possible if we always throw exceptions inside
3362 // hasOwnProperty.
3363 if (!m_vm->hasOwnPropertyCache())
3364 return false;
3365
3366 insertChecks();
3367 Node* object = get(virtualRegisterForArgument(0, registerOffset));
3368 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3369 Node* resultNode = addToGraph(HasOwnProperty, object, key);
3370 setResult(resultNode);
3371 return true;
3372 }
3373
3374 case StringPrototypeSliceIntrinsic: {
3375 if (argumentCountIncludingThis < 2)
3376 return false;
3377
3378 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3379 return false;
3380
3381 insertChecks();
3382 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3383 Node* start = get(virtualRegisterForArgument(1, registerOffset));
3384 Node* end = nullptr;
3385 if (argumentCountIncludingThis > 2)
3386 end = get(virtualRegisterForArgument(2, registerOffset));
3387 Node* resultNode = addToGraph(StringSlice, thisString, start, end);
3388 setResult(resultNode);
3389 return true;
3390 }
3391
3392 case StringPrototypeToLowerCaseIntrinsic: {
3393 if (argumentCountIncludingThis != 1)
3394 return false;
3395
3396 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3397 return false;
3398
3399 insertChecks();
3400 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3401 Node* resultNode = addToGraph(ToLowerCase, thisString);
3402 setResult(resultNode);
3403 return true;
3404 }
3405
3406 case NumberPrototypeToStringIntrinsic: {
3407 if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
3408 return false;
3409
3410 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3411 return false;
3412
3413 insertChecks();
3414 Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
3415 if (argumentCountIncludingThis == 1) {
3416 Node* resultNode = addToGraph(ToString, thisNumber);
3417 setResult(resultNode);
3418 } else {
3419 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
3420 Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
3421 setResult(resultNode);
3422 }
3423 return true;
3424 }
3425
3426 case NumberIsIntegerIntrinsic: {
3427 if (argumentCountIncludingThis < 2)
3428 return false;
3429
3430 insertChecks();
3431 Node* input = get(virtualRegisterForArgument(1, registerOffset));
3432 Node* resultNode = addToGraph(NumberIsInteger, input);
3433 setResult(resultNode);
3434 return true;
3435 }
3436
3437 case CPUMfenceIntrinsic:
3438 case CPURdtscIntrinsic:
3439 case CPUCpuidIntrinsic:
3440 case CPUPauseIntrinsic: {
3441#if CPU(X86_64)
3442 if (!m_graph.m_plan.isFTL())
3443 return false;
3444 insertChecks();
3445 setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
3446 return true;
3447#else
3448 return false;
3449#endif
3450 }
3451
3452 default:
3453 return false;
3454 }
3455 };
3456
3457 if (inlineIntrinsic()) {
3458 RELEASE_ASSERT(didSetResult);
3459 return true;
3460 }
3461
3462 return false;
3463}
3464
3465template<typename ChecksFunctor>
3466bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3467{
3468 if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
3469 return false;
3470 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3471 return false;
3472
3473 // FIXME: Currently, we only support functions which arguments are up to 2.
3474 // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
3475 // https://bugs.webkit.org/show_bug.cgi?id=164346
3476 ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
3477
3478 insertChecks();
3479 addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
3480 return true;
3481}
3482
3483
3484template<typename ChecksFunctor>
3485bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
3486{
3487 switch (variant.intrinsic()) {
3488 case TypedArrayByteLengthIntrinsic: {
3489 insertChecks();
3490
3491 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3492 Array::Type arrayType = toArrayType(type);
3493 size_t logSize = logElementSize(type);
3494
3495 variant.structureSet().forEach([&] (Structure* structure) {
3496 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3497 ASSERT(logSize == logElementSize(curType));
3498 arrayType = refineTypedArrayType(arrayType, curType);
3499 ASSERT(arrayType != Array::Generic);
3500 });
3501
3502 Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode);
3503
3504 if (!logSize) {
3505 set(result, lengthNode);
3506 return true;
3507 }
3508
3509 // We can use a BitLShift here because typed arrays will never have a byteLength
3510 // that overflows int32.
3511 Node* shiftNode = jsConstant(jsNumber(logSize));
3512 set(result, addToGraph(BitLShift, lengthNode, shiftNode));
3513
3514 return true;
3515 }
3516
3517 case TypedArrayLengthIntrinsic: {
3518 insertChecks();
3519
3520 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3521 Array::Type arrayType = toArrayType(type);
3522
3523 variant.structureSet().forEach([&] (Structure* structure) {
3524 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3525 arrayType = refineTypedArrayType(arrayType, curType);
3526 ASSERT(arrayType != Array::Generic);
3527 });
3528
3529 set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3530
3531 return true;
3532
3533 }
3534
3535 case TypedArrayByteOffsetIntrinsic: {
3536 insertChecks();
3537
3538 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3539 Array::Type arrayType = toArrayType(type);
3540
3541 variant.structureSet().forEach([&] (Structure* structure) {
3542 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3543 arrayType = refineTypedArrayType(arrayType, curType);
3544 ASSERT(arrayType != Array::Generic);
3545 });
3546
3547 set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3548
3549 return true;
3550 }
3551
3552 case UnderscoreProtoIntrinsic: {
3553 insertChecks();
3554
3555 bool canFold = !variant.structureSet().isEmpty();
3556 JSValue prototype;
3557 variant.structureSet().forEach([&] (Structure* structure) {
3558 auto getPrototypeMethod = structure->classInfo()->methodTable.getPrototype;
3559 MethodTable::GetPrototypeFunctionPtr defaultGetPrototype = JSObject::getPrototype;
3560 if (getPrototypeMethod != defaultGetPrototype) {
3561 canFold = false;
3562 return;
3563 }
3564
3565 if (structure->hasPolyProto()) {
3566 canFold = false;
3567 return;
3568 }
3569 if (!prototype)
3570 prototype = structure->storedPrototype();
3571 else if (prototype != structure->storedPrototype())
3572 canFold = false;
3573 });
3574
3575 // OK, only one prototype is found. We perform constant folding here.
3576 // This information is important for super's constructor call to get new.target constant.
3577 if (prototype && canFold) {
3578 set(result, weakJSConstant(prototype));
3579 return true;
3580 }
3581
3582 set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode));
3583 return true;
3584 }
3585
3586 default:
3587 return false;
3588 }
3589 RELEASE_ASSERT_NOT_REACHED();
3590}
3591
3592static void blessCallDOMGetter(Node* node)
3593{
3594 DOMJIT::CallDOMGetterSnippet* snippet = node->callDOMGetterData()->snippet;
3595 if (snippet && !snippet->effect.mustGenerate())
3596 node->clearFlags(NodeMustGenerate);
3597}
3598
3599bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)
3600{
3601 if (!variant.domAttribute())
3602 return false;
3603
3604 auto domAttribute = variant.domAttribute().value();
3605
3606 // We do not need to actually look up CustomGetterSetter here. Checking Structures or registering watchpoints are enough,
3607 // since replacement of CustomGetterSetter always incurs Structure transition.
3608 if (!check(variant.conditionSet()))
3609 return false;
3610 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), thisNode);
3611
3612 // We do not need to emit CheckCell thingy here. When the custom accessor is replaced to different one, Structure transition occurs.
3613 addToGraph(CheckSubClass, OpInfo(domAttribute.classInfo), thisNode);
3614
3615 bool wasSeenInJIT = true;
3616 addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), GetByIdStatus(GetByIdStatus::Custom, wasSeenInJIT, variant))), thisNode);
3617
3618 CallDOMGetterData* callDOMGetterData = m_graph.m_callDOMGetterData.add();
3619 callDOMGetterData->customAccessorGetter = variant.customAccessorGetter();
3620 ASSERT(callDOMGetterData->customAccessorGetter);
3621
3622 if (const auto* domJIT = domAttribute.domJIT) {
3623 callDOMGetterData->domJIT = domJIT;
3624 Ref<DOMJIT::CallDOMGetterSnippet> snippet = domJIT->compiler()();
3625 callDOMGetterData->snippet = snippet.ptr();
3626 m_graph.m_domJITSnippets.append(WTFMove(snippet));
3627 }
3628 DOMJIT::CallDOMGetterSnippet* callDOMGetterSnippet = callDOMGetterData->snippet;
3629 callDOMGetterData->identifierNumber = identifierNumber;
3630
3631 Node* callDOMGetterNode = nullptr;
3632 // GlobalObject of thisNode is always used to create a DOMWrapper.
3633 if (callDOMGetterSnippet && callDOMGetterSnippet->requireGlobalObject) {
3634 Node* globalObject = addToGraph(GetGlobalObject, thisNode);
3635 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode, globalObject);
3636 } else
3637 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode);
3638 blessCallDOMGetter(callDOMGetterNode);
3639 set(result, callDOMGetterNode);
3640 return true;
3641}
3642
3643bool ByteCodeParser::handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType prediction, Node* base, GetByIdStatus getById)
3644{
3645 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
3646 return false;
3647 addToGraph(CheckCell, OpInfo(m_graph.freeze(getById.moduleNamespaceObject())), Edge(base, CellUse));
3648
3649 addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getById)), base);
3650
3651 // Ideally we wouldn't have to do this Phantom. But:
3652 //
3653 // For the constant case: we must do it because otherwise we would have no way of knowing
3654 // that the scope is live at OSR here.
3655 //
3656 // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
3657 // won't be able to handle an Undefined scope.
3658 addToGraph(Phantom, base);
3659
3660 // Constant folding in the bytecode parser is important for performance. This may not
3661 // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
3662 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
3663 // would recompile. But if we can fold it here, we avoid the exit.
3664 m_graph.freeze(getById.moduleEnvironment());
3665 if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) {
3666 set(result, weakJSConstant(value));
3667 return true;
3668 }
3669 set(result, addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment())));
3670 return true;
3671}
3672
3673template<typename ChecksFunctor>
3674bool ByteCodeParser::handleTypedArrayConstructor(
3675 VirtualRegister result, InternalFunction* function, int registerOffset,
3676 int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
3677{
3678 if (!isTypedView(type))
3679 return false;
3680
3681 if (function->classInfo() != constructorClassInfoForType(type))
3682 return false;
3683
3684 if (function->globalObject(*m_vm) != m_inlineStackTop->m_codeBlock->globalObject())
3685 return false;
3686
3687 // We only have an intrinsic for the case where you say:
3688 //
3689 // new FooArray(blah);
3690 //
3691 // Of course, 'blah' could be any of the following:
3692 //
3693 // - Integer, indicating that you want to allocate an array of that length.
3694 // This is the thing we're hoping for, and what we can actually do meaningful
3695 // optimizations for.
3696 //
3697 // - Array buffer, indicating that you want to create a view onto that _entire_
3698 // buffer.
3699 //
3700 // - Non-buffer object, indicating that you want to create a copy of that
3701 // object by pretending that it quacks like an array.
3702 //
3703 // - Anything else, indicating that you want to have an exception thrown at
3704 // you.
3705 //
3706 // The intrinsic, NewTypedArray, will behave as if it could do any of these
3707 // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
3708 // predicted Int32, then we lock it in as a normal typed array allocation.
3709 // Otherwise, NewTypedArray turns into a totally opaque function call that
3710 // may clobber the world - by virtue of it accessing properties on what could
3711 // be an object.
3712 //
3713 // Note that although the generic form of NewTypedArray sounds sort of awful,
3714 // it is actually quite likely to be more efficient than a fully generic
3715 // Construct. So, we might want to think about making NewTypedArray variadic,
3716 // or else making Construct not super slow.
3717
3718 if (argumentCountIncludingThis != 2)
3719 return false;
3720
3721 if (!function->globalObject(*m_vm)->typedArrayStructureConcurrently(type))
3722 return false;
3723
3724 insertChecks();
3725 set(result,
3726 addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
3727 return true;
3728}
3729
3730template<typename ChecksFunctor>
3731bool ByteCodeParser::handleConstantInternalFunction(
3732 Node* callTargetNode, VirtualRegister result, InternalFunction* function, int registerOffset,
3733 int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3734{
3735 VERBOSE_LOG(" Handling constant internal function ", JSValue(function), "\n");
3736
3737 // It so happens that the code below assumes that the result operand is valid. It's extremely
3738 // unlikely that the result operand would be invalid - you'd have to call this via a setter call.
3739 if (!result.isValid())
3740 return false;
3741
3742 if (kind == CodeForConstruct) {
3743 Node* newTargetNode = get(virtualRegisterForArgument(0, registerOffset));
3744 // We cannot handle the case where new.target != callee (i.e. a construct from a super call) because we
3745 // don't know what the prototype of the constructed object will be.
3746 // FIXME: If we have inlined super calls up to the call site, however, we should be able to figure out the structure. https://bugs.webkit.org/show_bug.cgi?id=152700
3747 if (newTargetNode != callTargetNode)
3748 return false;
3749 }
3750
3751 if (function->classInfo() == ArrayConstructor::info()) {
3752 if (function->globalObject(*m_vm) != m_inlineStackTop->m_codeBlock->globalObject())
3753 return false;
3754
3755 insertChecks();
3756 if (argumentCountIncludingThis == 2) {
3757 set(result,
3758 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
3759 return true;
3760 }
3761
3762 for (int i = 1; i < argumentCountIncludingThis; ++i)
3763 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
3764 set(result,
3765 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(argumentCountIncludingThis - 1)));
3766 return true;
3767 }
3768
3769 if (function->classInfo() == NumberConstructor::info()) {
3770 if (kind == CodeForConstruct)
3771 return false;
3772
3773 insertChecks();
3774 if (argumentCountIncludingThis <= 1)
3775 set(result, jsConstant(jsNumber(0)));
3776 else
3777 set(result, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
3778
3779 return true;
3780 }
3781
3782 if (function->classInfo() == StringConstructor::info()) {
3783 insertChecks();
3784
3785 Node* resultNode;
3786
3787 if (argumentCountIncludingThis <= 1)
3788 resultNode = jsConstant(m_vm->smallStrings.emptyString());
3789 else
3790 resultNode = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
3791
3792 if (kind == CodeForConstruct)
3793 resultNode = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), resultNode);
3794
3795 set(result, resultNode);
3796 return true;
3797 }
3798
3799 if (function->classInfo() == SymbolConstructor::info() && kind == CodeForCall) {
3800 insertChecks();
3801
3802 Node* resultNode;
3803
3804 if (argumentCountIncludingThis <= 1)
3805 resultNode = addToGraph(NewSymbol);
3806 else
3807 resultNode = addToGraph(NewSymbol, addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset))));
3808
3809 set(result, resultNode);
3810 return true;
3811 }
3812
3813 // FIXME: This should handle construction as well. https://bugs.webkit.org/show_bug.cgi?id=155591
3814 if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) {
3815 insertChecks();
3816
3817 Node* resultNode;
3818 if (argumentCountIncludingThis <= 1)
3819 resultNode = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor())));
3820 else
3821 resultNode = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
3822 set(result, resultNode);
3823 return true;
3824 }
3825
3826 for (unsigned typeIndex = 0; typeIndex < NumberOfTypedArrayTypes; ++typeIndex) {
3827 bool handled = handleTypedArrayConstructor(
3828 result, function, registerOffset, argumentCountIncludingThis,
3829 indexToTypedArrayType(typeIndex), insertChecks);
3830 if (handled)
3831 return true;
3832 }
3833
3834 return false;
3835}
3836
3837Node* ByteCodeParser::handleGetByOffset(
3838 SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, NodeType op)
3839{
3840 Node* propertyStorage;
3841 if (isInlineOffset(offset))
3842 propertyStorage = base;
3843 else
3844 propertyStorage = addToGraph(GetButterfly, base);
3845
3846 StorageAccessData* data = m_graph.m_storageAccessData.add();
3847 data->offset = offset;
3848 data->identifierNumber = identifierNumber;
3849
3850 Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
3851
3852 return getByOffset;
3853}
3854
3855Node* ByteCodeParser::handlePutByOffset(
3856 Node* base, unsigned identifier, PropertyOffset offset,
3857 Node* value)
3858{
3859 Node* propertyStorage;
3860 if (isInlineOffset(offset))
3861 propertyStorage = base;
3862 else
3863 propertyStorage = addToGraph(GetButterfly, base);
3864
3865 StorageAccessData* data = m_graph.m_storageAccessData.add();
3866 data->offset = offset;
3867 data->identifierNumber = identifier;
3868
3869 Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
3870
3871 return result;
3872}
3873
3874bool ByteCodeParser::check(const ObjectPropertyCondition& condition)
3875{
3876 if (!condition)
3877 return false;
3878
3879 if (m_graph.watchCondition(condition))
3880 return true;
3881
3882 Structure* structure = condition.object()->structure(*m_vm);
3883 if (!condition.structureEnsuresValidity(structure))
3884 return false;
3885
3886 addToGraph(
3887 CheckStructure,
3888 OpInfo(m_graph.addStructureSet(structure)),
3889 weakJSConstant(condition.object()));
3890 return true;
3891}
3892
3893GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method)
3894{
3895 if (method.kind() == GetByOffsetMethod::LoadFromPrototype
3896 && method.prototype()->structure()->dfgShouldWatch()) {
3897 if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset()))
3898 return GetByOffsetMethod::constant(m_graph.freeze(constant));
3899 }
3900
3901 return method;
3902}
3903
3904bool ByteCodeParser::needsDynamicLookup(ResolveType type, OpcodeID opcode)
3905{
3906 ASSERT(opcode == op_resolve_scope || opcode == op_get_from_scope || opcode == op_put_to_scope);
3907
3908 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3909 if (needsVarInjectionChecks(type) && globalObject->varInjectionWatchpoint()->hasBeenInvalidated())
3910 return true;
3911
3912 switch (type) {
3913 case GlobalProperty:
3914 case GlobalVar:
3915 case GlobalLexicalVar:
3916 case ClosureVar:
3917 case LocalClosureVar:
3918 case ModuleVar:
3919 return false;
3920
3921 case UnresolvedProperty:
3922 case UnresolvedPropertyWithVarInjectionChecks: {
3923 // The heuristic for UnresolvedProperty scope accesses is we will ForceOSRExit if we
3924 // haven't exited from from this access before to let the baseline JIT try to better
3925 // cache the access. If we've already exited from this operation, it's unlikely that
3926 // the baseline will come up with a better ResolveType and instead we will compile
3927 // this as a dynamic scope access.
3928
3929 // We only track our heuristic through resolve_scope since resolve_scope will
3930 // dominate unresolved gets/puts on that scope.
3931 if (opcode != op_resolve_scope)
3932 return true;
3933
3934 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, InadequateCoverage)) {
3935 // We've already exited so give up on getting better ResolveType information.
3936 return true;
3937 }
3938
3939 // We have not exited yet, so let's have the baseline get better ResolveType information for us.
3940 // This type of code is often seen when we tier up in a loop but haven't executed the part
3941 // of a function that comes after the loop.
3942 return false;
3943 }
3944
3945 case Dynamic:
3946 return true;
3947
3948 case GlobalPropertyWithVarInjectionChecks:
3949 case GlobalVarWithVarInjectionChecks:
3950 case GlobalLexicalVarWithVarInjectionChecks:
3951 case ClosureVarWithVarInjectionChecks:
3952 return false;
3953 }
3954
3955 ASSERT_NOT_REACHED();
3956 return false;
3957}
3958
3959GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyCondition& condition)
3960{
3961 VERBOSE_LOG("Planning a load: ", condition, "\n");
3962
3963 // We might promote this to Equivalence, and a later DFG pass might also do such promotion
3964 // even if we fail, but for simplicity this cannot be asked to load an equivalence condition.
3965 // None of the clients of this method will request a load of an Equivalence condition anyway,
3966 // and supporting it would complicate the heuristics below.
3967 RELEASE_ASSERT(condition.kind() == PropertyCondition::Presence);
3968
3969 // Here's the ranking of how to handle this, from most preferred to least preferred:
3970 //
3971 // 1) Watchpoint on an equivalence condition and return a constant node for the loaded value.
3972 // No other code is emitted, and the structure of the base object is never registered.
3973 // Hence this results in zero code and we won't jettison this compilation if the object
3974 // transitions, even if the structure is watchable right now.
3975 //
3976 // 2) Need to emit a load, and the current structure of the base is going to be watched by the
3977 // DFG anyway (i.e. dfgShouldWatch). Watch the structure and emit the load. Don't watch the
3978 // condition, since the act of turning the base into a constant in IR will cause the DFG to
3979 // watch the structure anyway and doing so would subsume watching the condition.
3980 //
3981 // 3) Need to emit a load, and the current structure of the base is watchable but not by the
3982 // DFG (i.e. transitionWatchpointSetIsStillValid() and !dfgShouldWatchIfPossible()). Watch
3983 // the condition, and emit a load.
3984 //
3985 // 4) Need to emit a load, and the current structure of the base is not watchable. Emit a
3986 // structure check, and emit a load.
3987 //
3988 // 5) The condition does not hold. Give up and return null.
3989
3990 // First, try to promote Presence to Equivalence. We do this before doing anything else
3991 // because it's the most profitable. Also, there are cases where the presence is watchable but
3992 // we don't want to watch it unless it became an equivalence (see the relationship between
3993 // (1), (2), and (3) above).
3994 ObjectPropertyCondition equivalenceCondition = condition.attemptToMakeEquivalenceWithoutBarrier(*m_vm);
3995 if (m_graph.watchCondition(equivalenceCondition))
3996 return GetByOffsetMethod::constant(m_graph.freeze(equivalenceCondition.requiredValue()));
3997
3998 // At this point, we'll have to materialize the condition's base as a constant in DFG IR. Once
3999 // we do this, the frozen value will have its own idea of what the structure is. Use that from
4000 // now on just because it's less confusing.
4001 FrozenValue* base = m_graph.freeze(condition.object());
4002 Structure* structure = base->structure();
4003
4004 // Check if the structure that we've registered makes the condition hold. If not, just give
4005 // up. This is case (5) above.
4006 if (!condition.structureEnsuresValidity(structure))
4007 return GetByOffsetMethod();
4008
4009 // If the structure is watched by the DFG already, then just use this fact to emit the load.
4010 // This is case (2) above.
4011 if (structure->dfgShouldWatch())
4012 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4013
4014 // If we can watch the condition right now, then we can emit the load after watching it. This
4015 // is case (3) above.
4016 if (m_graph.watchCondition(condition))
4017 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4018
4019 // We can't watch anything but we know that the current structure satisfies the condition. So,
4020 // check for that structure and then emit the load.
4021 addToGraph(
4022 CheckStructure,
4023 OpInfo(m_graph.addStructureSet(structure)),
4024 addToGraph(JSConstant, OpInfo(base)));
4025 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4026}
4027
4028Node* ByteCodeParser::load(
4029 SpeculatedType prediction, unsigned identifierNumber, const GetByOffsetMethod& method,
4030 NodeType op)
4031{
4032 switch (method.kind()) {
4033 case GetByOffsetMethod::Invalid:
4034 return nullptr;
4035 case GetByOffsetMethod::Constant:
4036 return addToGraph(JSConstant, OpInfo(method.constant()));
4037 case GetByOffsetMethod::LoadFromPrototype: {
4038 Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype()));
4039 return handleGetByOffset(
4040 prediction, baseNode, identifierNumber, method.offset(), op);
4041 }
4042 case GetByOffsetMethod::Load:
4043 // Will never see this from planLoad().
4044 RELEASE_ASSERT_NOT_REACHED();
4045 return nullptr;
4046 }
4047
4048 RELEASE_ASSERT_NOT_REACHED();
4049 return nullptr;
4050}
4051
4052Node* ByteCodeParser::load(
4053 SpeculatedType prediction, const ObjectPropertyCondition& condition, NodeType op)
4054{
4055 GetByOffsetMethod method = planLoad(condition);
4056 return load(prediction, m_graph.identifiers().ensure(condition.uid()), method, op);
4057}
4058
4059bool ByteCodeParser::check(const ObjectPropertyConditionSet& conditionSet)
4060{
4061 for (const ObjectPropertyCondition& condition : conditionSet) {
4062 if (!check(condition))
4063 return false;
4064 }
4065 return true;
4066}
4067
4068GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyConditionSet& conditionSet)
4069{
4070 VERBOSE_LOG("conditionSet = ", conditionSet, "\n");
4071
4072 GetByOffsetMethod result;
4073 for (const ObjectPropertyCondition& condition : conditionSet) {
4074 switch (condition.kind()) {
4075 case PropertyCondition::Presence:
4076 RELEASE_ASSERT(!result); // Should only see exactly one of these.
4077 result = planLoad(condition);
4078 if (!result)
4079 return GetByOffsetMethod();
4080 break;
4081 default:
4082 if (!check(condition))
4083 return GetByOffsetMethod();
4084 break;
4085 }
4086 }
4087 if (!result) {
4088 // We have a unset property.
4089 ASSERT(!conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence));
4090 return GetByOffsetMethod::constant(m_constantUndefined);
4091 }
4092 return result;
4093}
4094
4095Node* ByteCodeParser::load(
4096 SpeculatedType prediction, const ObjectPropertyConditionSet& conditionSet, NodeType op)
4097{
4098 GetByOffsetMethod method = planLoad(conditionSet);
4099 return load(
4100 prediction,
4101 m_graph.identifiers().ensure(conditionSet.slotBaseCondition().uid()),
4102 method, op);
4103}
4104
4105ObjectPropertyCondition ByteCodeParser::presenceLike(
4106 JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4107{
4108 if (set.isEmpty())
4109 return ObjectPropertyCondition();
4110 unsigned attributes;
4111 PropertyOffset firstOffset = set[0]->getConcurrently(uid, attributes);
4112 if (firstOffset != offset)
4113 return ObjectPropertyCondition();
4114 for (unsigned i = 1; i < set.size(); ++i) {
4115 unsigned otherAttributes;
4116 PropertyOffset otherOffset = set[i]->getConcurrently(uid, otherAttributes);
4117 if (otherOffset != offset || otherAttributes != attributes)
4118 return ObjectPropertyCondition();
4119 }
4120 return ObjectPropertyCondition::presenceWithoutBarrier(knownBase, uid, offset, attributes);
4121}
4122
4123bool ByteCodeParser::checkPresenceLike(
4124 JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4125{
4126 return check(presenceLike(knownBase, uid, offset, set));
4127}
4128
4129void ByteCodeParser::checkPresenceLike(
4130 Node* base, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4131{
4132 if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) {
4133 if (checkPresenceLike(knownBase, uid, offset, set))
4134 return;
4135 }
4136
4137 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(set)), base);
4138}
4139
4140template<typename VariantType>
4141Node* ByteCodeParser::load(
4142 SpeculatedType prediction, Node* base, unsigned identifierNumber, const VariantType& variant)
4143{
4144 // Make sure backwards propagation knows that we've used base.
4145 addToGraph(Phantom, base);
4146
4147 bool needStructureCheck = true;
4148
4149 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
4150
4151 if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) {
4152 // Try to optimize away the structure check. Note that it's not worth doing anything about this
4153 // if the base's structure is watched.
4154 Structure* structure = base->constant()->structure();
4155 if (!structure->dfgShouldWatch()) {
4156 if (!variant.conditionSet().isEmpty()) {
4157 // This means that we're loading from a prototype or we have a property miss. We expect
4158 // the base not to have the property. We can only use ObjectPropertyCondition if all of
4159 // the structures in the variant.structureSet() agree on the prototype (it would be
4160 // hilariously rare if they didn't). Note that we are relying on structureSet() having
4161 // at least one element. That will always be true here because of how GetByIdStatus/PutByIdStatus work.
4162
4163 // FIXME: right now, if we have an OPCS, we have mono proto. However, this will
4164 // need to be changed in the future once we have a hybrid data structure for
4165 // poly proto:
4166 // https://bugs.webkit.org/show_bug.cgi?id=177339
4167 JSObject* prototype = variant.structureSet()[0]->storedPrototypeObject();
4168 bool allAgree = true;
4169 for (unsigned i = 1; i < variant.structureSet().size(); ++i) {
4170 if (variant.structureSet()[i]->storedPrototypeObject() != prototype) {
4171 allAgree = false;
4172 break;
4173 }
4174 }
4175 if (allAgree) {
4176 ObjectPropertyCondition condition = ObjectPropertyCondition::absenceWithoutBarrier(
4177 knownBase, uid, prototype);
4178 if (check(condition))
4179 needStructureCheck = false;
4180 }
4181 } else {
4182 // This means we're loading directly from base. We can avoid all of the code that follows
4183 // if we can prove that the property is a constant. Otherwise, we try to prove that the
4184 // property is watchably present, in which case we get rid of the structure check.
4185
4186 ObjectPropertyCondition presenceCondition =
4187 presenceLike(knownBase, uid, variant.offset(), variant.structureSet());
4188 if (presenceCondition) {
4189 ObjectPropertyCondition equivalenceCondition =
4190 presenceCondition.attemptToMakeEquivalenceWithoutBarrier(*m_vm);
4191 if (m_graph.watchCondition(equivalenceCondition))
4192 return weakJSConstant(equivalenceCondition.requiredValue());
4193
4194 if (check(presenceCondition))
4195 needStructureCheck = false;
4196 }
4197 }
4198 }
4199 }
4200
4201 if (needStructureCheck)
4202 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
4203
4204 if (variant.isPropertyUnset()) {
4205 if (m_graph.watchConditions(variant.conditionSet()))
4206 return jsConstant(jsUndefined());
4207 return nullptr;
4208 }
4209
4210 SpeculatedType loadPrediction;
4211 NodeType loadOp;
4212 if (variant.callLinkStatus() || variant.intrinsic() != NoIntrinsic) {
4213 loadPrediction = SpecCellOther;
4214 loadOp = GetGetterSetterByOffset;
4215 } else {
4216 loadPrediction = prediction;
4217 loadOp = GetByOffset;
4218 }
4219
4220 Node* loadedValue;
4221 if (!variant.conditionSet().isEmpty())
4222 loadedValue = load(loadPrediction, variant.conditionSet(), loadOp);
4223 else {
4224 if (needStructureCheck && base->hasConstant()) {
4225 // We did emit a structure check. That means that we have an opportunity to do constant folding
4226 // here, since we didn't do it above.
4227 JSValue constant = m_graph.tryGetConstantProperty(
4228 base->asJSValue(), *m_graph.addStructureSet(variant.structureSet()), variant.offset());
4229 if (constant)
4230 return weakJSConstant(constant);
4231 }
4232
4233 loadedValue = handleGetByOffset(
4234 loadPrediction, base, identifierNumber, variant.offset(), loadOp);
4235 }
4236
4237 return loadedValue;
4238}
4239
4240Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVariant& variant, Node* value)
4241{
4242 RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace);
4243
4244 checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure());
4245 return handlePutByOffset(base, identifier, variant.offset(), value);
4246}
4247
4248void ByteCodeParser::handleGetById(
4249 VirtualRegister destination, SpeculatedType prediction, Node* base, unsigned identifierNumber,
4250 GetByIdStatus getByIdStatus, AccessType type, unsigned instructionSize)
4251{
4252 // Attempt to reduce the set of things in the GetByIdStatus.
4253 if (base->op() == NewObject) {
4254 bool ok = true;
4255 for (unsigned i = m_currentBlock->size(); i--;) {
4256 Node* node = m_currentBlock->at(i);
4257 if (node == base)
4258 break;
4259 if (writesOverlap(m_graph, node, JSCell_structureID)) {
4260 ok = false;
4261 break;
4262 }
4263 }
4264 if (ok)
4265 getByIdStatus.filter(base->structure().get());
4266 }
4267
4268 NodeType getById;
4269 if (type == AccessType::Get)
4270 getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
4271 else if (type == AccessType::TryGet)
4272 getById = TryGetById;
4273 else
4274 getById = getByIdStatus.makesCalls() ? GetByIdDirectFlush : GetByIdDirect;
4275
4276 if (getById != TryGetById && getByIdStatus.isModuleNamespace()) {
4277 if (handleModuleNamespaceLoad(destination, prediction, base, getByIdStatus)) {
4278 if (UNLIKELY(m_graph.compilation()))
4279 m_graph.compilation()->noticeInlinedGetById();
4280 return;
4281 }
4282 }
4283
4284 // Special path for custom accessors since custom's offset does not have any meanings.
4285 // So, this is completely different from Simple one. But we have a chance to optimize it when we use DOMJIT.
4286 if (Options::useDOMJIT() && getByIdStatus.isCustom()) {
4287 ASSERT(getByIdStatus.numVariants() == 1);
4288 ASSERT(!getByIdStatus.makesCalls());
4289 GetByIdVariant variant = getByIdStatus[0];
4290 ASSERT(variant.domAttribute());
4291 if (handleDOMJITGetter(destination, variant, base, identifierNumber, prediction)) {
4292 if (UNLIKELY(m_graph.compilation()))
4293 m_graph.compilation()->noticeInlinedGetById();
4294 return;
4295 }
4296 }
4297
4298 ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !getByIdStatus.makesCalls());
4299 if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) {
4300 set(destination,
4301 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4302 return;
4303 }
4304
4305 // FIXME: If we use the GetByIdStatus for anything then we should record it and insert a node
4306 // after everything else (like the GetByOffset or whatever) that will filter the recorded
4307 // GetByIdStatus. That means that the constant folder also needs to do the same!
4308
4309 if (getByIdStatus.numVariants() > 1) {
4310 if (getByIdStatus.makesCalls() || !m_graph.m_plan.isFTL()
4311 || !Options::usePolymorphicAccessInlining()
4312 || getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
4313 set(destination,
4314 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4315 return;
4316 }
4317
4318 addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
4319
4320 Vector<MultiGetByOffsetCase, 2> cases;
4321
4322 // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
4323 // optimal, if there is some rarely executed case in the chain that requires a lot
4324 // of checks and those checks are not watchpointable.
4325 for (const GetByIdVariant& variant : getByIdStatus.variants()) {
4326 if (variant.intrinsic() != NoIntrinsic) {
4327 set(destination,
4328 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4329 return;
4330 }
4331
4332 if (variant.conditionSet().isEmpty()) {
4333 cases.append(
4334 MultiGetByOffsetCase(
4335 *m_graph.addStructureSet(variant.structureSet()),
4336 GetByOffsetMethod::load(variant.offset())));
4337 continue;
4338 }
4339
4340 GetByOffsetMethod method = planLoad(variant.conditionSet());
4341 if (!method) {
4342 set(destination,
4343 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4344 return;
4345 }
4346
4347 cases.append(MultiGetByOffsetCase(*m_graph.addStructureSet(variant.structureSet()), method));
4348 }
4349
4350 if (UNLIKELY(m_graph.compilation()))
4351 m_graph.compilation()->noticeInlinedGetById();
4352
4353 // 2) Emit a MultiGetByOffset
4354 MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
4355 data->cases = cases;
4356 data->identifierNumber = identifierNumber;
4357 set(destination,
4358 addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
4359 return;
4360 }
4361
4362 addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
4363
4364 ASSERT(getByIdStatus.numVariants() == 1);
4365 GetByIdVariant variant = getByIdStatus[0];
4366
4367 Node* loadedValue = load(prediction, base, identifierNumber, variant);
4368 if (!loadedValue) {
4369 set(destination,
4370 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4371 return;
4372 }
4373
4374 if (UNLIKELY(m_graph.compilation()))
4375 m_graph.compilation()->noticeInlinedGetById();
4376
4377 ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !variant.callLinkStatus());
4378 if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) {
4379 set(destination, loadedValue);
4380 return;
4381 }
4382
4383 Node* getter = addToGraph(GetGetter, loadedValue);
4384
4385 if (handleIntrinsicGetter(destination, prediction, variant, base,
4386 [&] () {
4387 addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter);
4388 })) {
4389 addToGraph(Phantom, base);
4390 return;
4391 }
4392
4393 ASSERT(variant.intrinsic() == NoIntrinsic);
4394
4395 // Make a call. We don't try to get fancy with using the smallest operand number because
4396 // the stack layout phase should compress the stack anyway.
4397
4398 unsigned numberOfParameters = 0;
4399 numberOfParameters++; // The 'this' argument.
4400 numberOfParameters++; // True return PC.
4401
4402 // Start with a register offset that corresponds to the last in-use register.
4403 int registerOffset = virtualRegisterForLocal(
4404 m_inlineStackTop->m_profiledBlock->numCalleeLocals() - 1).offset();
4405 registerOffset -= numberOfParameters;
4406 registerOffset -= CallFrame::headerSizeInRegisters;
4407
4408 // Get the alignment right.
4409 registerOffset = -WTF::roundUpToMultipleOf(
4410 stackAlignmentRegisters(),
4411 -registerOffset);
4412
4413 ensureLocals(
4414 m_inlineStackTop->remapOperand(
4415 VirtualRegister(registerOffset)).toLocal());
4416
4417 // Issue SetLocals. This has two effects:
4418 // 1) That's how handleCall() sees the arguments.
4419 // 2) If we inline then this ensures that the arguments are flushed so that if you use
4420 // the dreaded arguments object on the getter, the right things happen. Well, sort of -
4421 // since we only really care about 'this' in this case. But we're not going to take that
4422 // shortcut.
4423 set(virtualRegisterForArgument(0, registerOffset), base, ImmediateNakedSet);
4424
4425 // We've set some locals, but they are not user-visible. It's still OK to exit from here.
4426 m_exitOK = true;
4427 addToGraph(ExitOK);
4428
4429 handleCall(
4430 destination, Call, InlineCallFrame::GetterCall, instructionSize,
4431 getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
4432}
4433
4434void ByteCodeParser::emitPutById(
4435 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
4436{
4437 if (isDirect)
4438 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
4439 else
4440 addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
4441}
4442
4443void ByteCodeParser::handlePutById(
4444 Node* base, unsigned identifierNumber, Node* value,
4445 const PutByIdStatus& putByIdStatus, bool isDirect, unsigned instructionSize)
4446{
4447 if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) {
4448 if (!putByIdStatus.isSet())
4449 addToGraph(ForceOSRExit);
4450 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4451 return;
4452 }
4453
4454 if (putByIdStatus.numVariants() > 1) {
4455 if (!m_graph.m_plan.isFTL() || putByIdStatus.makesCalls()
4456 || !Options::usePolymorphicAccessInlining()
4457 || putByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
4458 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4459 return;
4460 }
4461
4462 if (!isDirect) {
4463 for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
4464 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
4465 continue;
4466 if (!check(putByIdStatus[variantIndex].conditionSet())) {
4467 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4468 return;
4469 }
4470 }
4471 }
4472
4473 if (UNLIKELY(m_graph.compilation()))
4474 m_graph.compilation()->noticeInlinedPutById();
4475
4476 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4477
4478 for (const PutByIdVariant& variant : putByIdStatus.variants()) {
4479 for (Structure* structure : variant.oldStructure())
4480 m_graph.registerStructure(structure);
4481 if (variant.kind() == PutByIdVariant::Transition)
4482 m_graph.registerStructure(variant.newStructure());
4483 }
4484
4485 MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
4486 data->variants = putByIdStatus.variants();
4487 data->identifierNumber = identifierNumber;
4488 addToGraph(MultiPutByOffset, OpInfo(data), base, value);
4489 return;
4490 }
4491
4492 ASSERT(putByIdStatus.numVariants() == 1);
4493 const PutByIdVariant& variant = putByIdStatus[0];
4494
4495 switch (variant.kind()) {
4496 case PutByIdVariant::Replace: {
4497 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4498
4499 store(base, identifierNumber, variant, value);
4500 if (UNLIKELY(m_graph.compilation()))
4501 m_graph.compilation()->noticeInlinedPutById();
4502 return;
4503 }
4504
4505 case PutByIdVariant::Transition: {
4506 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4507
4508 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
4509 if (!check(variant.conditionSet())) {
4510 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4511 return;
4512 }
4513
4514 ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
4515
4516 Node* propertyStorage;
4517 Transition* transition = m_graph.m_transitions.add(
4518 m_graph.registerStructure(variant.oldStructureForTransition()), m_graph.registerStructure(variant.newStructure()));
4519
4520 if (variant.reallocatesStorage()) {
4521
4522 // If we're growing the property storage then it must be because we're
4523 // storing into the out-of-line storage.
4524 ASSERT(!isInlineOffset(variant.offset()));
4525
4526 if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
4527 propertyStorage = addToGraph(
4528 AllocatePropertyStorage, OpInfo(transition), base);
4529 } else {
4530 propertyStorage = addToGraph(
4531 ReallocatePropertyStorage, OpInfo(transition),
4532 base, addToGraph(GetButterfly, base));
4533 }
4534 } else {
4535 if (isInlineOffset(variant.offset()))
4536 propertyStorage = base;
4537 else
4538 propertyStorage = addToGraph(GetButterfly, base);
4539 }
4540
4541 StorageAccessData* data = m_graph.m_storageAccessData.add();
4542 data->offset = variant.offset();
4543 data->identifierNumber = identifierNumber;
4544
4545 // NOTE: We could GC at this point because someone could insert an operation that GCs.
4546 // That's fine because:
4547 // - Things already in the structure will get scanned because we haven't messed with
4548 // the object yet.
4549 // - The value we are fixing to put is going to be kept live by OSR exit handling. So
4550 // if the GC does a conservative scan here it will see the new value.
4551
4552 addToGraph(
4553 PutByOffset,
4554 OpInfo(data),
4555 propertyStorage,
4556 base,
4557 value);
4558
4559 if (variant.reallocatesStorage())
4560 addToGraph(NukeStructureAndSetButterfly, base, propertyStorage);
4561
4562 // FIXME: PutStructure goes last until we fix either
4563 // https://bugs.webkit.org/show_bug.cgi?id=142921 or
4564 // https://bugs.webkit.org/show_bug.cgi?id=142924.
4565 addToGraph(PutStructure, OpInfo(transition), base);
4566
4567 if (UNLIKELY(m_graph.compilation()))
4568 m_graph.compilation()->noticeInlinedPutById();
4569 return;
4570 }
4571
4572 case PutByIdVariant::Setter: {
4573 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4574
4575 Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant);
4576 if (!loadedValue) {
4577 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4578 return;
4579 }
4580
4581 Node* setter = addToGraph(GetSetter, loadedValue);
4582
4583 // Make a call. We don't try to get fancy with using the smallest operand number because
4584 // the stack layout phase should compress the stack anyway.
4585
4586 unsigned numberOfParameters = 0;
4587 numberOfParameters++; // The 'this' argument.
4588 numberOfParameters++; // The new value.
4589 numberOfParameters++; // True return PC.
4590
4591 // Start with a register offset that corresponds to the last in-use register.
4592 int registerOffset = virtualRegisterForLocal(
4593 m_inlineStackTop->m_profiledBlock->numCalleeLocals() - 1).offset();
4594 registerOffset -= numberOfParameters;
4595 registerOffset -= CallFrame::headerSizeInRegisters;
4596
4597 // Get the alignment right.
4598 registerOffset = -WTF::roundUpToMultipleOf(
4599 stackAlignmentRegisters(),
4600 -registerOffset);
4601
4602 ensureLocals(
4603 m_inlineStackTop->remapOperand(
4604 VirtualRegister(registerOffset)).toLocal());
4605
4606 set(virtualRegisterForArgument(0, registerOffset), base, ImmediateNakedSet);
4607 set(virtualRegisterForArgument(1, registerOffset), value, ImmediateNakedSet);
4608
4609 // We've set some locals, but they are not user-visible. It's still OK to exit from here.
4610 m_exitOK = true;
4611 addToGraph(ExitOK);
4612
4613 handleCall(
4614 VirtualRegister(), Call, InlineCallFrame::SetterCall,
4615 instructionSize, setter, numberOfParameters - 1, registerOffset,
4616 *variant.callLinkStatus(), SpecOther);
4617 return;
4618 }
4619
4620 default: {
4621 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4622 return;
4623 } }
4624}
4625
4626void ByteCodeParser::prepareToParseBlock()
4627{
4628 clearCaches();
4629 ASSERT(m_setLocalQueue.isEmpty());
4630}
4631
4632void ByteCodeParser::clearCaches()
4633{
4634 m_constants.shrink(0);
4635}
4636
4637template<typename Op>
4638void ByteCodeParser::parseGetById(const Instruction* currentInstruction)
4639{
4640 auto bytecode = currentInstruction->as<Op>();
4641 SpeculatedType prediction = getPrediction();
4642
4643 Node* base = get(bytecode.m_base);
4644 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
4645
4646 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
4647 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
4648 m_inlineStackTop->m_profiledBlock,
4649 m_inlineStackTop->m_baselineMap, m_icContextStack,
4650 currentCodeOrigin(), uid);
4651
4652 AccessType type = AccessType::Get;
4653 unsigned opcodeLength = currentInstruction->size();
4654 if (Op::opcodeID == op_try_get_by_id)
4655 type = AccessType::TryGet;
4656 else if (Op::opcodeID == op_get_by_id_direct)
4657 type = AccessType::GetDirect;
4658
4659 handleGetById(
4660 bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength);
4661
4662}
4663
4664static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutInfo)
4665{
4666 static_assert(sizeof(identifierNumber) == 4,
4667 "We cannot fit identifierNumber into the high bits of m_opInfo");
4668 return static_cast<uint64_t>(identifierNumber) | (static_cast<uint64_t>(getPutInfo) << 32);
4669}
4670
4671// The idiom:
4672// if (true) { ...; goto label; } else label: continue
4673// Allows using NEXT_OPCODE as a statement, even in unbraced if+else, while containing a `continue`.
4674// The more common idiom:
4675// do { ...; } while (false)
4676// Doesn't allow using `continue`.
4677#define NEXT_OPCODE(name) \
4678 if (true) { \
4679 m_currentIndex += currentInstruction->size(); \
4680 goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \
4681 } else \
4682 WTF_CONCAT(NEXT_OPCODE_, __LINE__): \
4683 continue
4684
4685#define LAST_OPCODE_LINKED(name) do { \
4686 m_currentIndex += currentInstruction->size(); \
4687 m_exitOK = false; \
4688 return; \
4689 } while (false)
4690
4691#define LAST_OPCODE(name) \
4692 do { \
4693 if (m_currentBlock->terminal()) { \
4694 switch (m_currentBlock->terminal()->op()) { \
4695 case Jump: \
4696 case Branch: \
4697 case Switch: \
4698 ASSERT(!m_currentBlock->isLinked); \
4699 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock); \
4700 break;\
4701 default: break; \
4702 } \
4703 } \
4704 LAST_OPCODE_LINKED(name); \
4705 } while (false)
4706
4707void ByteCodeParser::parseBlock(unsigned limit)
4708{
4709 auto& instructions = m_inlineStackTop->m_codeBlock->instructions();
4710 unsigned blockBegin = m_currentIndex;
4711
4712 // If we are the first basic block, introduce markers for arguments. This allows
4713 // us to track if a use of an argument may use the actual argument passed, as
4714 // opposed to using a value we set explicitly.
4715 if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
4716 auto addResult = m_graph.m_rootToArguments.add(m_currentBlock, ArgumentsVector());
4717 RELEASE_ASSERT(addResult.isNewEntry);
4718 ArgumentsVector& entrypointArguments = addResult.iterator->value;
4719 entrypointArguments.resize(m_numArguments);
4720
4721 // We will emit SetArgumentDefinitely nodes. They don't exit, but we're at the top of an op_enter so
4722 // exitOK = true.
4723 m_exitOK = true;
4724 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
4725 VariableAccessData* variable = newVariableAccessData(
4726 virtualRegisterForArgument(argument));
4727 variable->mergeStructureCheckHoistingFailed(
4728 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
4729 variable->mergeCheckArrayHoistingFailed(
4730 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
4731
4732 Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
4733 entrypointArguments[argument] = setArgument;
4734 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
4735 }
4736 }
4737
4738 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
4739
4740 auto jumpTarget = [&](int target) {
4741 if (target)
4742 return target;
4743 return codeBlock->outOfLineJumpOffset(m_currentInstruction);
4744 };
4745
4746 while (true) {
4747 // We're staring at a new bytecode instruction. So we once again have a place that we can exit
4748 // to.
4749 m_exitOK = true;
4750
4751 processSetLocalQueue();
4752
4753 // Don't extend over jump destinations.
4754 if (m_currentIndex == limit) {
4755 // Ordinarily we want to plant a jump. But refuse to do this if the block is
4756 // empty. This is a special case for inlining, which might otherwise create
4757 // some empty blocks in some cases. When parseBlock() returns with an empty
4758 // block, it will get repurposed instead of creating a new one. Note that this
4759 // logic relies on every bytecode resulting in one or more nodes, which would
4760 // be true anyway except for op_loop_hint, which emits a Phantom to force this
4761 // to be true.
4762
4763 if (!m_currentBlock->isEmpty())
4764 addJumpTo(m_currentIndex);
4765 return;
4766 }
4767
4768 // Switch on the current bytecode opcode.
4769 const Instruction* currentInstruction = instructions.at(m_currentIndex).ptr();
4770 m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
4771 OpcodeID opcodeID = currentInstruction->opcodeID();
4772
4773 VERBOSE_LOG(" parsing ", currentCodeOrigin(), ": ", opcodeID, "\n");
4774
4775 if (UNLIKELY(m_graph.compilation())) {
4776 addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
4777 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
4778 }
4779
4780 switch (opcodeID) {
4781
4782 // === Function entry opcodes ===
4783
4784 case op_enter: {
4785 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
4786 // Initialize all locals to undefined.
4787 for (int i = 0; i < m_inlineStackTop->m_codeBlock->numVars(); ++i)
4788 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
4789
4790 NEXT_OPCODE(op_enter);
4791 }
4792
4793 case op_to_this: {
4794 Node* op1 = getThis();
4795 auto& metadata = currentInstruction->as<OpToThis>().metadata(codeBlock);
4796 StructureID cachedStructureID = metadata.m_cachedStructureID;
4797 Structure* cachedStructure = nullptr;
4798 if (cachedStructureID)
4799 cachedStructure = m_vm->heap.structureIDTable().get(cachedStructureID);
4800 if (metadata.m_toThisStatus != ToThisOK
4801 || !cachedStructure
4802 || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
4803 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
4804 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
4805 || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
4806 setThis(addToGraph(ToThis, OpInfo(), OpInfo(getPrediction()), op1));
4807 } else {
4808 addToGraph(
4809 CheckStructure,
4810 OpInfo(m_graph.addStructureSet(cachedStructure)),
4811 op1);
4812 }
4813 NEXT_OPCODE(op_to_this);
4814 }
4815
4816 case op_create_this: {
4817 auto bytecode = currentInstruction->as<OpCreateThis>();
4818 Node* callee = get(VirtualRegister(bytecode.m_callee));
4819
4820 JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm);
4821 if (!function) {
4822 JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet();
4823 if (cachedFunction
4824 && cachedFunction != JSCell::seenMultipleCalleeObjects()
4825 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
4826 ASSERT(cachedFunction->inherits<JSFunction>(*m_vm));
4827
4828 FrozenValue* frozen = m_graph.freeze(cachedFunction);
4829 addToGraph(CheckCell, OpInfo(frozen), callee);
4830
4831 function = static_cast<JSFunction*>(cachedFunction);
4832 }
4833 }
4834
4835 bool alreadyEmitted = false;
4836 if (function) {
4837 if (FunctionRareData* rareData = function->rareData()) {
4838 if (rareData->allocationProfileWatchpointSet().isStillValid()) {
4839 Structure* structure = rareData->objectAllocationStructure();
4840 JSObject* prototype = rareData->objectAllocationPrototype();
4841 if (structure
4842 && (structure->hasMonoProto() || prototype)
4843 && rareData->allocationProfileWatchpointSet().isStillValid()) {
4844
4845 m_graph.freeze(rareData);
4846 m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
4847
4848 // The callee is still live up to this point.
4849 addToGraph(Phantom, callee);
4850 Node* object = addToGraph(NewObject, OpInfo(m_graph.registerStructure(structure)));
4851 if (structure->hasPolyProto()) {
4852 StorageAccessData* data = m_graph.m_storageAccessData.add();
4853 data->offset = knownPolyProtoOffset;
4854 data->identifierNumber = m_graph.identifiers().ensure(m_graph.m_vm.propertyNames->builtinNames().polyProtoName().impl());
4855 ASSERT(isInlineOffset(knownPolyProtoOffset));
4856 addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype));
4857 }
4858 set(VirtualRegister(bytecode.m_dst), object);
4859 alreadyEmitted = true;
4860 }
4861 }
4862 }
4863 }
4864 if (!alreadyEmitted) {
4865 set(VirtualRegister(bytecode.m_dst),
4866 addToGraph(CreateThis, OpInfo(bytecode.m_inlineCapacity), callee));
4867 }
4868 NEXT_OPCODE(op_create_this);
4869 }
4870
4871 case op_new_object: {
4872 auto bytecode = currentInstruction->as<OpNewObject>();
4873 set(bytecode.m_dst,
4874 addToGraph(NewObject,
4875 OpInfo(m_graph.registerStructure(bytecode.metadata(codeBlock).m_objectAllocationProfile.structure()))));
4876 NEXT_OPCODE(op_new_object);
4877 }
4878
4879 case op_new_array: {
4880 auto bytecode = currentInstruction->as<OpNewArray>();
4881 int startOperand = bytecode.m_argv.offset();
4882 int numOperands = bytecode.m_argc;
4883 ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
4884 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
4885 addVarArgChild(get(VirtualRegister(operandIdx)));
4886 unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHintConcurrently(), numOperands);
4887 set(bytecode.m_dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingTypeConcurrently()), OpInfo(vectorLengthHint)));
4888 NEXT_OPCODE(op_new_array);
4889 }
4890
4891 case op_new_array_with_spread: {
4892 auto bytecode = currentInstruction->as<OpNewArrayWithSpread>();
4893 int startOperand = bytecode.m_argv.offset();
4894 int numOperands = bytecode.m_argc;
4895 const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.m_bitVector);
4896 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
4897 addVarArgChild(get(VirtualRegister(operandIdx)));
4898
4899 BitVector* copy = m_graph.m_bitVectors.add(bitVector);
4900 ASSERT(*copy == bitVector);
4901
4902 set(bytecode.m_dst,
4903 addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy)));
4904 NEXT_OPCODE(op_new_array_with_spread);
4905 }
4906
4907 case op_spread: {
4908 auto bytecode = currentInstruction->as<OpSpread>();
4909 set(bytecode.m_dst,
4910 addToGraph(Spread, get(bytecode.m_argument)));
4911 NEXT_OPCODE(op_spread);
4912 }
4913
4914 case op_new_array_with_size: {
4915 auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
4916 ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
4917 set(bytecode.m_dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingTypeConcurrently()), get(bytecode.m_length)));
4918 NEXT_OPCODE(op_new_array_with_size);
4919 }
4920
4921 case op_new_array_buffer: {
4922 auto bytecode = currentInstruction->as<OpNewArrayBuffer>();
4923 // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we
4924 // cannot allocate from compilation threads.
4925 WTF::loadLoadFence();
4926 FrozenValue* frozen = get(VirtualRegister(bytecode.m_immutableButterfly))->constant();
4927 WTF::loadLoadFence();
4928 JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>();
4929 NewArrayBufferData data { };
4930 data.indexingMode = immutableButterfly->indexingMode();
4931 data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength();
4932
4933 set(VirtualRegister(bytecode.m_dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord)));
4934 NEXT_OPCODE(op_new_array_buffer);
4935 }
4936
4937 case op_new_regexp: {
4938 auto bytecode = currentInstruction->as<OpNewRegexp>();
4939 ASSERT(bytecode.m_regexp.isConstant());
4940 FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_regexp.offset()));
4941 set(bytecode.m_dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));
4942 NEXT_OPCODE(op_new_regexp);
4943 }
4944
4945 case op_get_rest_length: {
4946 auto bytecode = currentInstruction->as<OpGetRestLength>();
4947 InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
4948 Node* length;
4949 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
4950 unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1;
4951 JSValue restLength;
4952 if (argumentsLength <= bytecode.m_numParametersToSkip)
4953 restLength = jsNumber(0);
4954 else
4955 restLength = jsNumber(argumentsLength - bytecode.m_numParametersToSkip);
4956
4957 length = jsConstant(restLength);
4958 } else
4959 length = addToGraph(GetRestLength, OpInfo(bytecode.m_numParametersToSkip));
4960 set(bytecode.m_dst, length);
4961 NEXT_OPCODE(op_get_rest_length);
4962 }
4963
4964 case op_create_rest: {
4965 auto bytecode = currentInstruction->as<OpCreateRest>();
4966 noticeArgumentsUse();
4967 Node* arrayLength = get(bytecode.m_arraySize);
4968 set(bytecode.m_dst,
4969 addToGraph(CreateRest, OpInfo(bytecode.m_numParametersToSkip), arrayLength));
4970 NEXT_OPCODE(op_create_rest);
4971 }
4972
4973 // === Bitwise operations ===
4974
4975 case op_bitnot: {
4976 auto bytecode = currentInstruction->as<OpBitnot>();
4977 SpeculatedType prediction = getPrediction();
4978 Node* op1 = get(bytecode.m_operand);
4979 if (op1->hasNumberOrAnyIntResult())
4980 set(bytecode.m_dst, addToGraph(ArithBitNot, op1));
4981 else
4982 set(bytecode.m_dst, addToGraph(ValueBitNot, OpInfo(), OpInfo(prediction), op1));
4983 NEXT_OPCODE(op_bitnot);
4984 }
4985
4986 case op_bitand: {
4987 auto bytecode = currentInstruction->as<OpBitand>();
4988 SpeculatedType prediction = getPrediction();
4989 Node* op1 = get(bytecode.m_lhs);
4990 Node* op2 = get(bytecode.m_rhs);
4991 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
4992 set(bytecode.m_dst, addToGraph(ArithBitAnd, op1, op2));
4993 else
4994 set(bytecode.m_dst, addToGraph(ValueBitAnd, OpInfo(), OpInfo(prediction), op1, op2));
4995 NEXT_OPCODE(op_bitand);
4996 }
4997
4998 case op_bitor: {
4999 auto bytecode = currentInstruction->as<OpBitor>();
5000 SpeculatedType prediction = getPrediction();
5001 Node* op1 = get(bytecode.m_lhs);
5002 Node* op2 = get(bytecode.m_rhs);
5003 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5004 set(bytecode.m_dst, addToGraph(ArithBitOr, op1, op2));
5005 else
5006 set(bytecode.m_dst, addToGraph(ValueBitOr, OpInfo(), OpInfo(prediction), op1, op2));
5007 NEXT_OPCODE(op_bitor);
5008 }
5009
5010 case op_bitxor: {
5011 auto bytecode = currentInstruction->as<OpBitxor>();
5012 SpeculatedType prediction = getPrediction();
5013 Node* op1 = get(bytecode.m_lhs);
5014 Node* op2 = get(bytecode.m_rhs);
5015 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5016 set(bytecode.m_dst, addToGraph(ArithBitXor, op1, op2));
5017 else
5018 set(bytecode.m_dst, addToGraph(ValueBitXor, OpInfo(), OpInfo(prediction), op1, op2));
5019 NEXT_OPCODE(op_bitxor);
5020 }
5021
5022 case op_rshift: {
5023 auto bytecode = currentInstruction->as<OpRshift>();
5024 Node* op1 = get(bytecode.m_lhs);
5025 Node* op2 = get(bytecode.m_rhs);
5026 set(bytecode.m_dst, addToGraph(BitRShift, op1, op2));
5027 NEXT_OPCODE(op_rshift);
5028 }
5029
5030 case op_lshift: {
5031 auto bytecode = currentInstruction->as<OpLshift>();
5032 Node* op1 = get(bytecode.m_lhs);
5033 Node* op2 = get(bytecode.m_rhs);
5034 set(bytecode.m_dst, addToGraph(BitLShift, op1, op2));
5035 NEXT_OPCODE(op_lshift);
5036 }
5037
5038 case op_urshift: {
5039 auto bytecode = currentInstruction->as<OpUrshift>();
5040 Node* op1 = get(bytecode.m_lhs);
5041 Node* op2 = get(bytecode.m_rhs);
5042 set(bytecode.m_dst, addToGraph(BitURShift, op1, op2));
5043 NEXT_OPCODE(op_urshift);
5044 }
5045
5046 case op_unsigned: {
5047 auto bytecode = currentInstruction->as<OpUnsigned>();
5048 set(bytecode.m_dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.m_operand))));
5049 NEXT_OPCODE(op_unsigned);
5050 }
5051
5052 // === Increment/Decrement opcodes ===
5053
5054 case op_inc: {
5055 auto bytecode = currentInstruction->as<OpInc>();
5056 Node* op = get(bytecode.m_srcDst);
5057 set(bytecode.m_srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
5058 NEXT_OPCODE(op_inc);
5059 }
5060
5061 case op_dec: {
5062 auto bytecode = currentInstruction->as<OpDec>();
5063 Node* op = get(bytecode.m_srcDst);
5064 set(bytecode.m_srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
5065 NEXT_OPCODE(op_dec);
5066 }
5067
5068 // === Arithmetic operations ===
5069
5070 case op_add: {
5071 auto bytecode = currentInstruction->as<OpAdd>();
5072 Node* op1 = get(bytecode.m_lhs);
5073 Node* op2 = get(bytecode.m_rhs);
5074 if (op1->hasNumberResult() && op2->hasNumberResult())
5075 set(bytecode.m_dst, makeSafe(addToGraph(ArithAdd, op1, op2)));
5076 else
5077 set(bytecode.m_dst, makeSafe(addToGraph(ValueAdd, op1, op2)));
5078 NEXT_OPCODE(op_add);
5079 }
5080
5081 case op_sub: {
5082 auto bytecode = currentInstruction->as<OpSub>();
5083 Node* op1 = get(bytecode.m_lhs);
5084 Node* op2 = get(bytecode.m_rhs);
5085 if (op1->hasNumberResult() && op2->hasNumberResult())
5086 set(bytecode.m_dst, makeSafe(addToGraph(ArithSub, op1, op2)));
5087 else
5088 set(bytecode.m_dst, makeSafe(addToGraph(ValueSub, op1, op2)));
5089 NEXT_OPCODE(op_sub);
5090 }
5091
5092 case op_negate: {
5093 auto bytecode = currentInstruction->as<OpNegate>();
5094 Node* op1 = get(bytecode.m_operand);
5095 if (op1->hasNumberResult())
5096 set(bytecode.m_dst, makeSafe(addToGraph(ArithNegate, op1)));
5097 else
5098 set(bytecode.m_dst, makeSafe(addToGraph(ValueNegate, op1)));
5099 NEXT_OPCODE(op_negate);
5100 }
5101
5102 case op_mul: {
5103 // Multiply requires that the inputs are not truncated, unfortunately.
5104 auto bytecode = currentInstruction->as<OpMul>();
5105 Node* op1 = get(bytecode.m_lhs);
5106 Node* op2 = get(bytecode.m_rhs);
5107 if (op1->hasNumberResult() && op2->hasNumberResult())
5108 set(bytecode.m_dst, makeSafe(addToGraph(ArithMul, op1, op2)));
5109 else
5110 set(bytecode.m_dst, makeSafe(addToGraph(ValueMul, op1, op2)));
5111 NEXT_OPCODE(op_mul);
5112 }
5113
5114 case op_mod: {
5115 auto bytecode = currentInstruction->as<OpMod>();
5116 Node* op1 = get(bytecode.m_lhs);
5117 Node* op2 = get(bytecode.m_rhs);
5118 if (op1->hasNumberResult() && op2->hasNumberResult())
5119 set(bytecode.m_dst, makeSafe(addToGraph(ArithMod, op1, op2)));
5120 else
5121 set(bytecode.m_dst, makeSafe(addToGraph(ValueMod, op1, op2)));
5122 NEXT_OPCODE(op_mod);
5123 }
5124
5125 case op_pow: {
5126 auto bytecode = currentInstruction->as<OpPow>();
5127 Node* op1 = get(bytecode.m_lhs);
5128 Node* op2 = get(bytecode.m_rhs);
5129 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5130 set(bytecode.m_dst, addToGraph(ArithPow, op1, op2));
5131 else
5132 set(bytecode.m_dst, addToGraph(ValuePow, op1, op2));
5133 NEXT_OPCODE(op_pow);
5134 }
5135
5136 case op_div: {
5137 auto bytecode = currentInstruction->as<OpDiv>();
5138 Node* op1 = get(bytecode.m_lhs);
5139 Node* op2 = get(bytecode.m_rhs);
5140 if (op1->hasNumberResult() && op2->hasNumberResult())
5141 set(bytecode.m_dst, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
5142 else
5143 set(bytecode.m_dst, makeDivSafe(addToGraph(ValueDiv, op1, op2)));
5144 NEXT_OPCODE(op_div);
5145 }
5146
5147 // === Misc operations ===
5148
5149 case op_debug: {
5150 // This is a nop in the DFG/FTL because when we set a breakpoint in the debugger,
5151 // we will jettison all optimized CodeBlocks that contains the breakpoint.
5152 addToGraph(Check); // We add a nop here so that basic block linking doesn't break.
5153 NEXT_OPCODE(op_debug);
5154 }
5155
5156 case op_mov: {
5157 auto bytecode = currentInstruction->as<OpMov>();
5158 Node* op = get(bytecode.m_src);
5159 set(bytecode.m_dst, op);
5160 NEXT_OPCODE(op_mov);
5161 }
5162
5163 case op_check_tdz: {
5164 auto bytecode = currentInstruction->as<OpCheckTdz>();
5165 addToGraph(CheckNotEmpty, get(bytecode.m_targetVirtualRegister));
5166 NEXT_OPCODE(op_check_tdz);
5167 }
5168
5169 case op_overrides_has_instance: {
5170 auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
5171 JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction();
5172
5173 Node* constructor = get(VirtualRegister(bytecode.m_constructor));
5174 Node* hasInstanceValue = get(VirtualRegister(bytecode.m_hasInstanceValue));
5175
5176 set(VirtualRegister(bytecode.m_dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));
5177 NEXT_OPCODE(op_overrides_has_instance);
5178 }
5179
5180 case op_identity_with_profile: {
5181 auto bytecode = currentInstruction->as<OpIdentityWithProfile>();
5182 Node* srcDst = get(bytecode.m_srcDst);
5183 SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.m_topProfile) << 32 | static_cast<SpeculatedType>(bytecode.m_bottomProfile);
5184 set(bytecode.m_srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst));
5185 NEXT_OPCODE(op_identity_with_profile);
5186 }
5187
5188 case op_instanceof: {
5189 auto bytecode = currentInstruction->as<OpInstanceof>();
5190
5191 InstanceOfStatus status = InstanceOfStatus::computeFor(
5192 m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap,
5193 m_currentIndex);
5194
5195 Node* value = get(bytecode.m_value);
5196 Node* prototype = get(bytecode.m_prototype);
5197
5198 // Only inline it if it's Simple with a commonPrototype; bottom/top or variable
5199 // prototypes both get handled by the IC. This makes sense for bottom (unprofiled)
5200 // instanceof ICs because the profit of this optimization is fairly low. So, in the
5201 // absence of any information, it's better to avoid making this be the cause of a
5202 // recompilation.
5203 if (JSObject* commonPrototype = status.commonPrototype()) {
5204 addToGraph(CheckCell, OpInfo(m_graph.freeze(commonPrototype)), prototype);
5205
5206 bool allOK = true;
5207 MatchStructureData* data = m_graph.m_matchStructureData.add();
5208 for (const InstanceOfVariant& variant : status.variants()) {
5209 if (!check(variant.conditionSet())) {
5210 allOK = false;
5211 break;
5212 }
5213 for (Structure* structure : variant.structureSet()) {
5214 MatchStructureVariant matchVariant;
5215 matchVariant.structure = m_graph.registerStructure(structure);
5216 matchVariant.result = variant.isHit();
5217
5218 data->variants.append(WTFMove(matchVariant));
5219 }
5220 }
5221
5222 if (allOK) {
5223 Node* match = addToGraph(MatchStructure, OpInfo(data), value);
5224 set(bytecode.m_dst, match);
5225 NEXT_OPCODE(op_instanceof);
5226 }
5227 }
5228
5229 set(bytecode.m_dst, addToGraph(InstanceOf, value, prototype));
5230 NEXT_OPCODE(op_instanceof);
5231 }
5232
5233 case op_instanceof_custom: {
5234 auto bytecode = currentInstruction->as<OpInstanceofCustom>();
5235 Node* value = get(bytecode.m_value);
5236 Node* constructor = get(bytecode.m_constructor);
5237 Node* hasInstanceValue = get(bytecode.m_hasInstanceValue);
5238 set(bytecode.m_dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));
5239 NEXT_OPCODE(op_instanceof_custom);
5240 }
5241 case op_is_empty: {
5242 auto bytecode = currentInstruction->as<OpIsEmpty>();
5243 Node* value = get(bytecode.m_operand);
5244 set(bytecode.m_dst, addToGraph(IsEmpty, value));
5245 NEXT_OPCODE(op_is_empty);
5246 }
5247 case op_is_undefined: {
5248 auto bytecode = currentInstruction->as<OpIsUndefined>();
5249 Node* value = get(bytecode.m_operand);
5250 set(bytecode.m_dst, addToGraph(IsUndefined, value));
5251 NEXT_OPCODE(op_is_undefined);
5252 }
5253 case op_is_undefined_or_null: {
5254 auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>();
5255 Node* value = get(bytecode.m_operand);
5256 set(bytecode.m_dst, addToGraph(IsUndefinedOrNull, value));
5257 NEXT_OPCODE(op_is_undefined_or_null);
5258 }
5259
5260 case op_is_boolean: {
5261 auto bytecode = currentInstruction->as<OpIsBoolean>();
5262 Node* value = get(bytecode.m_operand);
5263 set(bytecode.m_dst, addToGraph(IsBoolean, value));
5264 NEXT_OPCODE(op_is_boolean);
5265 }
5266
5267 case op_is_number: {
5268 auto bytecode = currentInstruction->as<OpIsNumber>();
5269 Node* value = get(bytecode.m_operand);
5270 set(bytecode.m_dst, addToGraph(IsNumber, value));
5271 NEXT_OPCODE(op_is_number);
5272 }
5273
5274 case op_is_cell_with_type: {
5275 auto bytecode = currentInstruction->as<OpIsCellWithType>();
5276 Node* value = get(bytecode.m_operand);
5277 set(bytecode.m_dst, addToGraph(IsCellWithType, OpInfo(bytecode.m_type), value));
5278 NEXT_OPCODE(op_is_cell_with_type);
5279 }
5280
5281 case op_is_object: {
5282 auto bytecode = currentInstruction->as<OpIsObject>();
5283 Node* value = get(bytecode.m_operand);
5284 set(bytecode.m_dst, addToGraph(IsObject, value));
5285 NEXT_OPCODE(op_is_object);
5286 }
5287
5288 case op_is_object_or_null: {
5289 auto bytecode = currentInstruction->as<OpIsObjectOrNull>();
5290 Node* value = get(bytecode.m_operand);
5291 set(bytecode.m_dst, addToGraph(IsObjectOrNull, value));
5292 NEXT_OPCODE(op_is_object_or_null);
5293 }
5294
5295 case op_is_function: {
5296 auto bytecode = currentInstruction->as<OpIsFunction>();
5297 Node* value = get(bytecode.m_operand);
5298 set(bytecode.m_dst, addToGraph(IsFunction, value));
5299 NEXT_OPCODE(op_is_function);
5300 }
5301
5302 case op_not: {
5303 auto bytecode = currentInstruction->as<OpNot>();
5304 Node* value = get(bytecode.m_operand);
5305 set(bytecode.m_dst, addToGraph(LogicalNot, value));
5306 NEXT_OPCODE(op_not);
5307 }
5308
5309 case op_to_primitive: {
5310 auto bytecode = currentInstruction->as<OpToPrimitive>();
5311 Node* value = get(bytecode.m_src);
5312 set(bytecode.m_dst, addToGraph(ToPrimitive, value));
5313 NEXT_OPCODE(op_to_primitive);
5314 }
5315
5316 case op_strcat: {
5317 auto bytecode = currentInstruction->as<OpStrcat>();
5318 int startOperand = bytecode.m_src.offset();
5319 int numOperands = bytecode.m_count;
5320#if CPU(X86)
5321 // X86 doesn't have enough registers to compile MakeRope with three arguments. The
5322 // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever,
5323 // we just make StrCat dumber on this processor.
5324 const unsigned maxArguments = 2;
5325#else
5326 const unsigned maxArguments = 3;
5327#endif
5328 Node* operands[AdjacencyList::Size];
5329 unsigned indexInOperands = 0;
5330 for (unsigned i = 0; i < AdjacencyList::Size; ++i)
5331 operands[i] = 0;
5332 for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
5333 if (indexInOperands == maxArguments) {
5334 operands[0] = addToGraph(StrCat, operands[0], operands[1], operands[2]);
5335 for (unsigned i = 1; i < AdjacencyList::Size; ++i)
5336 operands[i] = 0;
5337 indexInOperands = 1;
5338 }
5339
5340 ASSERT(indexInOperands < AdjacencyList::Size);
5341 ASSERT(indexInOperands < maxArguments);
5342 operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx));
5343 }
5344 set(bytecode.m_dst, addToGraph(StrCat, operands[0], operands[1], operands[2]));
5345 NEXT_OPCODE(op_strcat);
5346 }
5347
5348 case op_less: {
5349 auto bytecode = currentInstruction->as<OpLess>();
5350 Node* op1 = get(bytecode.m_lhs);
5351 Node* op2 = get(bytecode.m_rhs);
5352 set(bytecode.m_dst, addToGraph(CompareLess, op1, op2));
5353 NEXT_OPCODE(op_less);
5354 }
5355
5356 case op_lesseq: {
5357 auto bytecode = currentInstruction->as<OpLesseq>();
5358 Node* op1 = get(bytecode.m_lhs);
5359 Node* op2 = get(bytecode.m_rhs);
5360 set(bytecode.m_dst, addToGraph(CompareLessEq, op1, op2));
5361 NEXT_OPCODE(op_lesseq);
5362 }
5363
5364 case op_greater: {
5365 auto bytecode = currentInstruction->as<OpGreater>();
5366 Node* op1 = get(bytecode.m_lhs);
5367 Node* op2 = get(bytecode.m_rhs);
5368 set(bytecode.m_dst, addToGraph(CompareGreater, op1, op2));
5369 NEXT_OPCODE(op_greater);
5370 }
5371
5372 case op_greatereq: {
5373 auto bytecode = currentInstruction->as<OpGreatereq>();
5374 Node* op1 = get(bytecode.m_lhs);
5375 Node* op2 = get(bytecode.m_rhs);
5376 set(bytecode.m_dst, addToGraph(CompareGreaterEq, op1, op2));
5377 NEXT_OPCODE(op_greatereq);
5378 }
5379
5380 case op_below: {
5381 auto bytecode = currentInstruction->as<OpBelow>();
5382 Node* op1 = get(bytecode.m_lhs);
5383 Node* op2 = get(bytecode.m_rhs);
5384 set(bytecode.m_dst, addToGraph(CompareBelow, op1, op2));
5385 NEXT_OPCODE(op_below);
5386 }
5387
5388 case op_beloweq: {
5389 auto bytecode = currentInstruction->as<OpBeloweq>();
5390 Node* op1 = get(bytecode.m_lhs);
5391 Node* op2 = get(bytecode.m_rhs);
5392 set(bytecode.m_dst, addToGraph(CompareBelowEq, op1, op2));
5393 NEXT_OPCODE(op_beloweq);
5394 }
5395
5396 case op_eq: {
5397 auto bytecode = currentInstruction->as<OpEq>();
5398 Node* op1 = get(bytecode.m_lhs);
5399 Node* op2 = get(bytecode.m_rhs);
5400 set(bytecode.m_dst, addToGraph(CompareEq, op1, op2));
5401 NEXT_OPCODE(op_eq);
5402 }
5403
5404 case op_eq_null: {
5405 auto bytecode = currentInstruction->as<OpEqNull>();
5406 Node* value = get(bytecode.m_operand);
5407 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5408 set(bytecode.m_dst, addToGraph(CompareEq, value, nullConstant));
5409 NEXT_OPCODE(op_eq_null);
5410 }
5411
5412 case op_stricteq: {
5413 auto bytecode = currentInstruction->as<OpStricteq>();
5414 Node* op1 = get(bytecode.m_lhs);
5415 Node* op2 = get(bytecode.m_rhs);
5416 set(bytecode.m_dst, addToGraph(CompareStrictEq, op1, op2));
5417 NEXT_OPCODE(op_stricteq);
5418 }
5419
5420 case op_neq: {
5421 auto bytecode = currentInstruction->as<OpNeq>();
5422 Node* op1 = get(bytecode.m_lhs);
5423 Node* op2 = get(bytecode.m_rhs);
5424 set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
5425 NEXT_OPCODE(op_neq);
5426 }
5427
5428 case op_neq_null: {
5429 auto bytecode = currentInstruction->as<OpNeqNull>();
5430 Node* value = get(bytecode.m_operand);
5431 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5432 set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));
5433 NEXT_OPCODE(op_neq_null);
5434 }
5435
5436 case op_nstricteq: {
5437 auto bytecode = currentInstruction->as<OpNstricteq>();
5438 Node* op1 = get(bytecode.m_lhs);
5439 Node* op2 = get(bytecode.m_rhs);
5440 Node* invertedResult;
5441 invertedResult = addToGraph(CompareStrictEq, op1, op2);
5442 set(bytecode.m_dst, addToGraph(LogicalNot, invertedResult));
5443 NEXT_OPCODE(op_nstricteq);
5444 }
5445
5446 // === Property access operations ===
5447
5448 case op_get_by_val: {
5449 auto bytecode = currentInstruction->as<OpGetByVal>();
5450 SpeculatedType prediction = getPredictionWithoutOSRExit();
5451
5452 Node* base = get(bytecode.m_base);
5453 Node* property = get(bytecode.m_property);
5454 bool compiledAsGetById = false;
5455 GetByIdStatus getByIdStatus;
5456 unsigned identifierNumber = 0;
5457 {
5458 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
5459 ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
5460 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
5461 // At that time, there is no information.
5462 if (byValInfo
5463 && byValInfo->stubInfo
5464 && !byValInfo->tookSlowPath
5465 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
5466 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
5467 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
5468 compiledAsGetById = true;
5469 identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
5470 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
5471
5472 if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
5473 FrozenValue* frozen = m_graph.freezeStrong(symbol);
5474 addToGraph(CheckCell, OpInfo(frozen), property);
5475 } else {
5476 ASSERT(!uid->isSymbol());
5477 addToGraph(CheckStringIdent, OpInfo(uid), property);
5478 }
5479
5480 getByIdStatus = GetByIdStatus::computeForStubInfo(
5481 locker, m_inlineStackTop->m_profiledBlock,
5482 byValInfo->stubInfo, currentCodeOrigin(), uid);
5483 }
5484 }
5485
5486 if (compiledAsGetById)
5487 handleGetById(bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, currentInstruction->size());
5488 else {
5489 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
5490 // FIXME: We could consider making this not vararg, since it only uses three child
5491 // slots.
5492 // https://bugs.webkit.org/show_bug.cgi?id=184192
5493 addVarArgChild(base);
5494 addVarArgChild(property);
5495 addVarArgChild(0); // Leave room for property storage.
5496 Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction));
5497 m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic.
5498 set(bytecode.m_dst, getByVal);
5499 }
5500
5501 NEXT_OPCODE(op_get_by_val);
5502 }
5503
5504 case op_get_by_val_with_this: {
5505 auto bytecode = currentInstruction->as<OpGetByValWithThis>();
5506 SpeculatedType prediction = getPrediction();
5507
5508 Node* base = get(bytecode.m_base);
5509 Node* thisValue = get(bytecode.m_thisValue);
5510 Node* property = get(bytecode.m_property);
5511 Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property);
5512 set(bytecode.m_dst, getByValWithThis);
5513
5514 NEXT_OPCODE(op_get_by_val_with_this);
5515 }
5516
5517 case op_put_by_val_direct:
5518 handlePutByVal(currentInstruction->as<OpPutByValDirect>(), currentInstruction->size());
5519 NEXT_OPCODE(op_put_by_val_direct);
5520
5521 case op_put_by_val: {
5522 handlePutByVal(currentInstruction->as<OpPutByVal>(), currentInstruction->size());
5523 NEXT_OPCODE(op_put_by_val);
5524 }
5525
5526 case op_put_by_val_with_this: {
5527 auto bytecode = currentInstruction->as<OpPutByValWithThis>();
5528 Node* base = get(bytecode.m_base);
5529 Node* thisValue = get(bytecode.m_thisValue);
5530 Node* property = get(bytecode.m_property);
5531 Node* value = get(bytecode.m_value);
5532
5533 addVarArgChild(base);
5534 addVarArgChild(thisValue);
5535 addVarArgChild(property);
5536 addVarArgChild(value);
5537 addToGraph(Node::VarArg, PutByValWithThis, OpInfo(0), OpInfo(0));
5538
5539 NEXT_OPCODE(op_put_by_val_with_this);
5540 }
5541
5542 case op_define_data_property: {
5543 auto bytecode = currentInstruction->as<OpDefineDataProperty>();
5544 Node* base = get(bytecode.m_base);
5545 Node* property = get(bytecode.m_property);
5546 Node* value = get(bytecode.m_value);
5547 Node* attributes = get(bytecode.m_attributes);
5548
5549 addVarArgChild(base);
5550 addVarArgChild(property);
5551 addVarArgChild(value);
5552 addVarArgChild(attributes);
5553 addToGraph(Node::VarArg, DefineDataProperty, OpInfo(0), OpInfo(0));
5554
5555 NEXT_OPCODE(op_define_data_property);
5556 }
5557
5558 case op_define_accessor_property: {
5559 auto bytecode = currentInstruction->as<OpDefineAccessorProperty>();
5560 Node* base = get(bytecode.m_base);
5561 Node* property = get(bytecode.m_property);
5562 Node* getter = get(bytecode.m_getter);
5563 Node* setter = get(bytecode.m_setter);
5564 Node* attributes = get(bytecode.m_attributes);
5565
5566 addVarArgChild(base);
5567 addVarArgChild(property);
5568 addVarArgChild(getter);
5569 addVarArgChild(setter);
5570 addVarArgChild(attributes);
5571 addToGraph(Node::VarArg, DefineAccessorProperty, OpInfo(0), OpInfo(0));
5572
5573 NEXT_OPCODE(op_define_accessor_property);
5574 }
5575
5576 case op_get_by_id_direct: {
5577 parseGetById<OpGetByIdDirect>(currentInstruction);
5578 NEXT_OPCODE(op_get_by_id_direct);
5579 }
5580 case op_try_get_by_id: {
5581 parseGetById<OpTryGetById>(currentInstruction);
5582 NEXT_OPCODE(op_try_get_by_id);
5583 }
5584 case op_get_by_id: {
5585 parseGetById<OpGetById>(currentInstruction);
5586 NEXT_OPCODE(op_get_by_id);
5587 }
5588 case op_get_by_id_with_this: {
5589 SpeculatedType prediction = getPrediction();
5590
5591 auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
5592 Node* base = get(bytecode.m_base);
5593 Node* thisValue = get(bytecode.m_thisValue);
5594 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5595
5596 set(bytecode.m_dst,
5597 addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue));
5598
5599 NEXT_OPCODE(op_get_by_id_with_this);
5600 }
5601 case op_put_by_id: {
5602 auto bytecode = currentInstruction->as<OpPutById>();
5603 Node* value = get(bytecode.m_value);
5604 Node* base = get(bytecode.m_base);
5605 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5606 bool direct = !!(bytecode.m_flags & PutByIdIsDirect);
5607
5608 PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
5609 m_inlineStackTop->m_profiledBlock,
5610 m_inlineStackTop->m_baselineMap, m_icContextStack,
5611 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
5612
5613 handlePutById(base, identifierNumber, value, putByIdStatus, direct, currentInstruction->size());
5614 NEXT_OPCODE(op_put_by_id);
5615 }
5616
5617 case op_put_by_id_with_this: {
5618 auto bytecode = currentInstruction->as<OpPutByIdWithThis>();
5619 Node* base = get(bytecode.m_base);
5620 Node* thisValue = get(bytecode.m_thisValue);
5621 Node* value = get(bytecode.m_value);
5622 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5623
5624 addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value);
5625 NEXT_OPCODE(op_put_by_id_with_this);
5626 }
5627
5628 case op_put_getter_by_id:
5629 handlePutAccessorById(PutGetterById, currentInstruction->as<OpPutGetterById>());
5630 NEXT_OPCODE(op_put_getter_by_id);
5631 case op_put_setter_by_id: {
5632 handlePutAccessorById(PutSetterById, currentInstruction->as<OpPutSetterById>());
5633 NEXT_OPCODE(op_put_setter_by_id);
5634 }
5635
5636 case op_put_getter_setter_by_id: {
5637 auto bytecode = currentInstruction->as<OpPutGetterSetterById>();
5638 Node* base = get(bytecode.m_base);
5639 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5640 Node* getter = get(bytecode.m_getter);
5641 Node* setter = get(bytecode.m_setter);
5642 addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, getter, setter);
5643 NEXT_OPCODE(op_put_getter_setter_by_id);
5644 }
5645
5646 case op_put_getter_by_val:
5647 handlePutAccessorByVal(PutGetterByVal, currentInstruction->as<OpPutGetterByVal>());
5648 NEXT_OPCODE(op_put_getter_by_val);
5649 case op_put_setter_by_val: {
5650 handlePutAccessorByVal(PutSetterByVal, currentInstruction->as<OpPutSetterByVal>());
5651 NEXT_OPCODE(op_put_setter_by_val);
5652 }
5653
5654 case op_del_by_id: {
5655 auto bytecode = currentInstruction->as<OpDelById>();
5656 Node* base = get(bytecode.m_base);
5657 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5658 set(bytecode.m_dst, addToGraph(DeleteById, OpInfo(identifierNumber), base));
5659 NEXT_OPCODE(op_del_by_id);
5660 }
5661
5662 case op_del_by_val: {
5663 auto bytecode = currentInstruction->as<OpDelByVal>();
5664 Node* base = get(bytecode.m_base);
5665 Node* key = get(bytecode.m_property);
5666 set(bytecode.m_dst, addToGraph(DeleteByVal, base, key));
5667 NEXT_OPCODE(op_del_by_val);
5668 }
5669
5670 case op_profile_type: {
5671 auto bytecode = currentInstruction->as<OpProfileType>();
5672 auto& metadata = bytecode.metadata(codeBlock);
5673 Node* valueToProfile = get(bytecode.m_targetVirtualRegister);
5674 addToGraph(ProfileType, OpInfo(metadata.m_typeLocation), valueToProfile);
5675 NEXT_OPCODE(op_profile_type);
5676 }
5677
5678 case op_profile_control_flow: {
5679 auto bytecode = currentInstruction->as<OpProfileControlFlow>();
5680 BasicBlockLocation* basicBlockLocation = bytecode.metadata(codeBlock).m_basicBlockLocation;
5681 addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
5682 NEXT_OPCODE(op_profile_control_flow);
5683 }
5684
5685 // === Block terminators. ===
5686
5687 case op_jmp: {
5688 ASSERT(!m_currentBlock->terminal());
5689 auto bytecode = currentInstruction->as<OpJmp>();
5690 int relativeOffset = jumpTarget(bytecode.m_targetLabel);
5691 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
5692 if (relativeOffset <= 0)
5693 flushForTerminal();
5694 LAST_OPCODE(op_jmp);
5695 }
5696
5697 case op_jtrue: {
5698 auto bytecode = currentInstruction->as<OpJtrue>();
5699 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5700 Node* condition = get(bytecode.m_condition);
5701 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5702 LAST_OPCODE(op_jtrue);
5703 }
5704
5705 case op_jfalse: {
5706 auto bytecode = currentInstruction->as<OpJfalse>();
5707 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5708 Node* condition = get(bytecode.m_condition);
5709 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5710 LAST_OPCODE(op_jfalse);
5711 }
5712
5713 case op_jeq_null: {
5714 auto bytecode = currentInstruction->as<OpJeqNull>();
5715 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5716 Node* value = get(bytecode.m_value);
5717 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5718 Node* condition = addToGraph(CompareEq, value, nullConstant);
5719 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5720 LAST_OPCODE(op_jeq_null);
5721 }
5722
5723 case op_jneq_null: {
5724 auto bytecode = currentInstruction->as<OpJneqNull>();
5725 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5726 Node* value = get(bytecode.m_value);
5727 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5728 Node* condition = addToGraph(CompareEq, value, nullConstant);
5729 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5730 LAST_OPCODE(op_jneq_null);
5731 }
5732
5733 case op_jless: {
5734 auto bytecode = currentInstruction->as<OpJless>();
5735 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5736 Node* op1 = get(bytecode.m_lhs);
5737 Node* op2 = get(bytecode.m_rhs);
5738 Node* condition = addToGraph(CompareLess, op1, op2);
5739 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5740 LAST_OPCODE(op_jless);
5741 }
5742
5743 case op_jlesseq: {
5744 auto bytecode = currentInstruction->as<OpJlesseq>();
5745 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5746 Node* op1 = get(bytecode.m_lhs);
5747 Node* op2 = get(bytecode.m_rhs);
5748 Node* condition = addToGraph(CompareLessEq, op1, op2);
5749 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5750 LAST_OPCODE(op_jlesseq);
5751 }
5752
5753 case op_jgreater: {
5754 auto bytecode = currentInstruction->as<OpJgreater>();
5755 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5756 Node* op1 = get(bytecode.m_lhs);
5757 Node* op2 = get(bytecode.m_rhs);
5758 Node* condition = addToGraph(CompareGreater, op1, op2);
5759 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5760 LAST_OPCODE(op_jgreater);
5761 }
5762
5763 case op_jgreatereq: {
5764 auto bytecode = currentInstruction->as<OpJgreatereq>();
5765 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5766 Node* op1 = get(bytecode.m_lhs);
5767 Node* op2 = get(bytecode.m_rhs);
5768 Node* condition = addToGraph(CompareGreaterEq, op1, op2);
5769 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5770 LAST_OPCODE(op_jgreatereq);
5771 }
5772
5773 case op_jeq: {
5774 auto bytecode = currentInstruction->as<OpJeq>();
5775 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5776 Node* op1 = get(bytecode.m_lhs);
5777 Node* op2 = get(bytecode.m_rhs);
5778 Node* condition = addToGraph(CompareEq, op1, op2);
5779 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5780 LAST_OPCODE(op_jeq);
5781 }
5782
5783 case op_jstricteq: {
5784 auto bytecode = currentInstruction->as<OpJstricteq>();
5785 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5786 Node* op1 = get(bytecode.m_lhs);
5787 Node* op2 = get(bytecode.m_rhs);
5788 Node* condition = addToGraph(CompareStrictEq, op1, op2);
5789 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5790 LAST_OPCODE(op_jstricteq);
5791 }
5792
5793 case op_jnless: {
5794 auto bytecode = currentInstruction->as<OpJnless>();
5795 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5796 Node* op1 = get(bytecode.m_lhs);
5797 Node* op2 = get(bytecode.m_rhs);
5798 Node* condition = addToGraph(CompareLess, op1, op2);
5799 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5800 LAST_OPCODE(op_jnless);
5801 }
5802
5803 case op_jnlesseq: {
5804 auto bytecode = currentInstruction->as<OpJnlesseq>();
5805 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5806 Node* op1 = get(bytecode.m_lhs);
5807 Node* op2 = get(bytecode.m_rhs);
5808 Node* condition = addToGraph(CompareLessEq, op1, op2);
5809 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5810 LAST_OPCODE(op_jnlesseq);
5811 }
5812
5813 case op_jngreater: {
5814 auto bytecode = currentInstruction->as<OpJngreater>();
5815 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5816 Node* op1 = get(bytecode.m_lhs);
5817 Node* op2 = get(bytecode.m_rhs);
5818 Node* condition = addToGraph(CompareGreater, op1, op2);
5819 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5820 LAST_OPCODE(op_jngreater);
5821 }
5822
5823 case op_jngreatereq: {
5824 auto bytecode = currentInstruction->as<OpJngreatereq>();
5825 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5826 Node* op1 = get(bytecode.m_lhs);
5827 Node* op2 = get(bytecode.m_rhs);
5828 Node* condition = addToGraph(CompareGreaterEq, op1, op2);
5829 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5830 LAST_OPCODE(op_jngreatereq);
5831 }
5832
5833 case op_jneq: {
5834 auto bytecode = currentInstruction->as<OpJneq>();
5835 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5836 Node* op1 = get(bytecode.m_lhs);
5837 Node* op2 = get(bytecode.m_rhs);
5838 Node* condition = addToGraph(CompareEq, op1, op2);
5839 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5840 LAST_OPCODE(op_jneq);
5841 }
5842
5843 case op_jnstricteq: {
5844 auto bytecode = currentInstruction->as<OpJnstricteq>();
5845 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5846 Node* op1 = get(bytecode.m_lhs);
5847 Node* op2 = get(bytecode.m_rhs);
5848 Node* condition = addToGraph(CompareStrictEq, op1, op2);
5849 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5850 LAST_OPCODE(op_jnstricteq);
5851 }
5852
5853 case op_jbelow: {
5854 auto bytecode = currentInstruction->as<OpJbelow>();
5855 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5856 Node* op1 = get(bytecode.m_lhs);
5857 Node* op2 = get(bytecode.m_rhs);
5858 Node* condition = addToGraph(CompareBelow, op1, op2);
5859 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5860 LAST_OPCODE(op_jbelow);
5861 }
5862
5863 case op_jbeloweq: {
5864 auto bytecode = currentInstruction->as<OpJbeloweq>();
5865 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5866 Node* op1 = get(bytecode.m_lhs);
5867 Node* op2 = get(bytecode.m_rhs);
5868 Node* condition = addToGraph(CompareBelowEq, op1, op2);
5869 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5870 LAST_OPCODE(op_jbeloweq);
5871 }
5872
5873 case op_switch_imm: {
5874 auto bytecode = currentInstruction->as<OpSwitchImm>();
5875 SwitchData& data = *m_graph.m_switchData.add();
5876 data.kind = SwitchImm;
5877 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
5878 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
5879 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
5880 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
5881 if (!table.branchOffsets[i])
5882 continue;
5883 unsigned target = m_currentIndex + table.branchOffsets[i];
5884 if (target == data.fallThrough.bytecodeIndex())
5885 continue;
5886 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
5887 }
5888 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
5889 flushIfTerminal(data);
5890 LAST_OPCODE(op_switch_imm);
5891 }
5892
5893 case op_switch_char: {
5894 auto bytecode = currentInstruction->as<OpSwitchChar>();
5895 SwitchData& data = *m_graph.m_switchData.add();
5896 data.kind = SwitchChar;
5897 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
5898 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
5899 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
5900 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
5901 if (!table.branchOffsets[i])
5902 continue;
5903 unsigned target = m_currentIndex + table.branchOffsets[i];
5904 if (target == data.fallThrough.bytecodeIndex())
5905 continue;
5906 data.cases.append(
5907 SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
5908 }
5909 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
5910 flushIfTerminal(data);
5911 LAST_OPCODE(op_switch_char);
5912 }
5913
5914 case op_switch_string: {
5915 auto bytecode = currentInstruction->as<OpSwitchString>();
5916 SwitchData& data = *m_graph.m_switchData.add();
5917 data.kind = SwitchString;
5918 data.switchTableIndex = bytecode.m_tableIndex;
5919 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
5920 StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
5921 StringJumpTable::StringOffsetTable::iterator iter;
5922 StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
5923 for (iter = table.offsetTable.begin(); iter != end; ++iter) {
5924 unsigned target = m_currentIndex + iter->value.branchOffset;
5925 if (target == data.fallThrough.bytecodeIndex())
5926 continue;
5927 data.cases.append(
5928 SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
5929 }
5930 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
5931 flushIfTerminal(data);
5932 LAST_OPCODE(op_switch_string);
5933 }
5934
5935 case op_ret: {
5936 auto bytecode = currentInstruction->as<OpRet>();
5937 ASSERT(!m_currentBlock->terminal());
5938 if (!inlineCallFrame()) {
5939 // Simple case: we are just producing a return
5940 addToGraph(Return, get(bytecode.m_value));
5941 flushForReturn();
5942 LAST_OPCODE(op_ret);
5943 }
5944
5945 flushForReturn();
5946 if (m_inlineStackTop->m_returnValue.isValid())
5947 setDirect(m_inlineStackTop->m_returnValue, get(bytecode.m_value), ImmediateSetWithFlush);
5948
5949 if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) {
5950 // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one.
5951 // It is untargetable, because we do not know the appropriate index.
5952 // If this block turns out to be a jump target, parseCodeBlock will fix its bytecodeIndex before putting it in m_blockLinkingTargets
5953 m_inlineStackTop->m_continuationBlock = allocateUntargetableBlock();
5954 }
5955
5956 if (m_inlineStackTop->m_continuationBlock)
5957 addJumpTo(m_inlineStackTop->m_continuationBlock);
5958 else {
5959 // We are returning from an inlined function, and do not need to jump anywhere, so we just keep the current block
5960 m_inlineStackTop->m_continuationBlock = m_currentBlock;
5961 }
5962 LAST_OPCODE_LINKED(op_ret);
5963 }
5964 case op_end:
5965 ASSERT(!inlineCallFrame());
5966 addToGraph(Return, get(currentInstruction->as<OpEnd>().m_value));
5967 flushForReturn();
5968 LAST_OPCODE(op_end);
5969
5970 case op_throw:
5971 addToGraph(Throw, get(currentInstruction->as<OpThrow>().m_value));
5972 flushForTerminal();
5973 LAST_OPCODE(op_throw);
5974
5975 case op_throw_static_error: {
5976 auto bytecode = currentInstruction->as<OpThrowStaticError>();
5977 addToGraph(ThrowStaticError, OpInfo(bytecode.m_errorType), get(bytecode.m_message));
5978 flushForTerminal();
5979 LAST_OPCODE(op_throw_static_error);
5980 }
5981
5982 case op_catch: {
5983 auto bytecode = currentInstruction->as<OpCatch>();
5984 m_graph.m_hasExceptionHandlers = true;
5985
5986 if (inlineCallFrame()) {
5987 // We can't do OSR entry into an inlined frame.
5988 NEXT_OPCODE(op_catch);
5989 }
5990
5991 if (m_graph.m_plan.mode() == FTLForOSREntryMode) {
5992 NEXT_OPCODE(op_catch);
5993 }
5994
5995 RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution));
5996
5997 ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).m_buffer;
5998
5999 if (!buffer) {
6000 NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread.
6001 }
6002
6003 // We're now committed to compiling this as an entrypoint.
6004 m_currentBlock->isCatchEntrypoint = true;
6005 m_graph.m_roots.append(m_currentBlock);
6006
6007 Vector<SpeculatedType> argumentPredictions(m_numArguments);
6008 Vector<SpeculatedType> localPredictions;
6009 HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> seenArguments;
6010
6011 {
6012 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6013
6014 buffer->forEach([&] (ValueProfileAndOperand& profile) {
6015 VirtualRegister operand(profile.m_operand);
6016 SpeculatedType prediction = profile.computeUpdatedPrediction(locker);
6017 if (operand.isLocal())
6018 localPredictions.append(prediction);
6019 else {
6020 RELEASE_ASSERT(operand.isArgument());
6021 RELEASE_ASSERT(static_cast<uint32_t>(operand.toArgument()) < argumentPredictions.size());
6022 if (validationEnabled())
6023 seenArguments.add(operand.toArgument());
6024 argumentPredictions[operand.toArgument()] = prediction;
6025 }
6026 });
6027
6028 if (validationEnabled()) {
6029 for (unsigned argument = 0; argument < m_numArguments; ++argument)
6030 RELEASE_ASSERT(seenArguments.contains(argument));
6031 }
6032 }
6033
6034 Vector<std::pair<VirtualRegister, Node*>> localsToSet;
6035 localsToSet.reserveInitialCapacity(buffer->m_size); // Note: This will reserve more than the number of locals we see below because the buffer includes arguments.
6036
6037 // We're not allowed to exit here since we would not properly recover values.
6038 // We first need to bootstrap the catch entrypoint state.
6039 m_exitOK = false;
6040
6041 unsigned numberOfLocals = 0;
6042 buffer->forEach([&] (ValueProfileAndOperand& profile) {
6043 VirtualRegister operand(profile.m_operand);
6044 if (operand.isArgument())
6045 return;
6046 ASSERT(operand.isLocal());
6047 Node* value = addToGraph(ExtractCatchLocal, OpInfo(numberOfLocals), OpInfo(localPredictions[numberOfLocals]));
6048 ++numberOfLocals;
6049 addToGraph(MovHint, OpInfo(profile.m_operand), value);
6050 localsToSet.uncheckedAppend(std::make_pair(operand, value));
6051 });
6052 if (numberOfLocals)
6053 addToGraph(ClearCatchLocals);
6054
6055 if (!m_graph.m_maxLocalsForCatchOSREntry)
6056 m_graph.m_maxLocalsForCatchOSREntry = 0;
6057 m_graph.m_maxLocalsForCatchOSREntry = std::max(numberOfLocals, *m_graph.m_maxLocalsForCatchOSREntry);
6058
6059 // We could not exit before this point in the program because we would not know how to do value
6060 // recovery for live locals. The above IR sets up the necessary state so we can recover values
6061 // during OSR exit.
6062 //
6063 // The nodes that follow here all exit to the following bytecode instruction, not
6064 // the op_catch. Exiting to op_catch is reserved for when an exception is thrown.
6065 // The SetArgumentDefinitely nodes that follow below may exit because we may hoist type checks
6066 // to them. The SetLocal nodes that follow below may exit because we may choose
6067 // a flush format that speculates on the type of the local.
6068 m_exitOK = true;
6069 addToGraph(ExitOK);
6070
6071 {
6072 auto addResult = m_graph.m_rootToArguments.add(m_currentBlock, ArgumentsVector());
6073 RELEASE_ASSERT(addResult.isNewEntry);
6074 ArgumentsVector& entrypointArguments = addResult.iterator->value;
6075 entrypointArguments.resize(m_numArguments);
6076
6077 unsigned exitBytecodeIndex = m_currentIndex + currentInstruction->size();
6078
6079 for (unsigned argument = 0; argument < argumentPredictions.size(); ++argument) {
6080 VariableAccessData* variable = newVariableAccessData(virtualRegisterForArgument(argument));
6081 variable->predict(argumentPredictions[argument]);
6082
6083 variable->mergeStructureCheckHoistingFailed(
6084 m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadCache));
6085 variable->mergeCheckArrayHoistingFailed(
6086 m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadIndexingType));
6087
6088 Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
6089 setArgument->origin.forExit = CodeOrigin(exitBytecodeIndex, setArgument->origin.forExit.inlineCallFrame());
6090 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
6091 entrypointArguments[argument] = setArgument;
6092 }
6093 }
6094
6095 for (const std::pair<VirtualRegister, Node*>& pair : localsToSet) {
6096 DelayedSetLocal delayed { currentCodeOrigin(), pair.first, pair.second, ImmediateNakedSet };
6097 m_setLocalQueue.append(delayed);
6098 }
6099
6100 NEXT_OPCODE(op_catch);
6101 }
6102
6103 case op_call:
6104 handleCall<OpCall>(currentInstruction, Call, CallMode::Regular);
6105 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6106 NEXT_OPCODE(op_call);
6107
6108 case op_tail_call: {
6109 flushForReturn();
6110 Terminality terminality = handleCall<OpTailCall>(currentInstruction, TailCall, CallMode::Tail);
6111 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6112 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6113 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6114 // things up.
6115 if (terminality == NonTerminal)
6116 NEXT_OPCODE(op_tail_call);
6117 else
6118 LAST_OPCODE_LINKED(op_tail_call);
6119 // We use LAST_OPCODE_LINKED instead of LAST_OPCODE because if the tail call was optimized, it may now be a jump to a bytecode index in a different InlineStackEntry.
6120 }
6121
6122 case op_construct:
6123 handleCall<OpConstruct>(currentInstruction, Construct, CallMode::Construct);
6124 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6125 NEXT_OPCODE(op_construct);
6126
6127 case op_call_varargs: {
6128 handleVarargsCall<OpCallVarargs>(currentInstruction, CallVarargs, CallMode::Regular);
6129 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6130 NEXT_OPCODE(op_call_varargs);
6131 }
6132
6133 case op_tail_call_varargs: {
6134 flushForReturn();
6135 Terminality terminality = handleVarargsCall<OpTailCallVarargs>(currentInstruction, TailCallVarargs, CallMode::Tail);
6136 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6137 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6138 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6139 // things up.
6140 if (terminality == NonTerminal)
6141 NEXT_OPCODE(op_tail_call_varargs);
6142 else
6143 LAST_OPCODE(op_tail_call_varargs);
6144 }
6145
6146 case op_tail_call_forward_arguments: {
6147 // We need to make sure that we don't unbox our arguments here since that won't be
6148 // done by the arguments object creation node as that node may not exist.
6149 noticeArgumentsUse();
6150 flushForReturn();
6151 Terminality terminality = handleVarargsCall<OpTailCallForwardArguments>(currentInstruction, TailCallForwardVarargs, CallMode::Tail);
6152 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6153 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6154 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6155 // things up.
6156 if (terminality == NonTerminal)
6157 NEXT_OPCODE(op_tail_call_forward_arguments);
6158 else
6159 LAST_OPCODE(op_tail_call_forward_arguments);
6160 }
6161
6162 case op_construct_varargs: {
6163 handleVarargsCall<OpConstructVarargs>(currentInstruction, ConstructVarargs, CallMode::Construct);
6164 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6165 NEXT_OPCODE(op_construct_varargs);
6166 }
6167
6168 case op_call_eval: {
6169 auto bytecode = currentInstruction->as<OpCallEval>();
6170 int registerOffset = -bytecode.m_argv;
6171 addCall(bytecode.m_dst, CallEval, nullptr, get(bytecode.m_callee), bytecode.m_argc, registerOffset, getPrediction());
6172 NEXT_OPCODE(op_call_eval);
6173 }
6174
6175 case op_jneq_ptr: {
6176 auto bytecode = currentInstruction->as<OpJneqPtr>();
6177 Special::Pointer specialPointer = bytecode.m_specialPointer;
6178 ASSERT(pointerIsCell(specialPointer));
6179 JSCell* actualPointer = static_cast<JSCell*>(
6180 actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer));
6181 FrozenValue* frozenPointer = m_graph.freeze(actualPointer);
6182 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
6183 Node* child = get(bytecode.m_value);
6184 if (bytecode.metadata(codeBlock).m_hasJumped) {
6185 Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child);
6186 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
6187 LAST_OPCODE(op_jneq_ptr);
6188 }
6189 addToGraph(CheckCell, OpInfo(frozenPointer), child);
6190 NEXT_OPCODE(op_jneq_ptr);
6191 }
6192
6193 case op_resolve_scope: {
6194 auto bytecode = currentInstruction->as<OpResolveScope>();
6195 auto& metadata = bytecode.metadata(codeBlock);
6196
6197 ResolveType resolveType;
6198 unsigned depth;
6199 JSScope* constantScope = nullptr;
6200 JSCell* lexicalEnvironment = nullptr;
6201 SymbolTable* symbolTable = nullptr;
6202 {
6203 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6204 resolveType = metadata.m_resolveType;
6205 depth = metadata.m_localScopeDepth;
6206 switch (resolveType) {
6207 case GlobalProperty:
6208 case GlobalVar:
6209 case GlobalPropertyWithVarInjectionChecks:
6210 case GlobalVarWithVarInjectionChecks:
6211 case GlobalLexicalVar:
6212 case GlobalLexicalVarWithVarInjectionChecks:
6213 constantScope = metadata.m_constantScope.get();
6214 break;
6215 case ModuleVar:
6216 lexicalEnvironment = metadata.m_lexicalEnvironment.get();
6217 break;
6218 case LocalClosureVar:
6219 case ClosureVar:
6220 case ClosureVarWithVarInjectionChecks:
6221 symbolTable = metadata.m_symbolTable.get();
6222 break;
6223 default:
6224 break;
6225 }
6226 }
6227
6228 if (needsDynamicLookup(resolveType, op_resolve_scope)) {
6229 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6230 set(bytecode.m_dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.m_scope)));
6231 NEXT_OPCODE(op_resolve_scope);
6232 }
6233
6234 // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
6235 if (needsVarInjectionChecks(resolveType))
6236 m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint());
6237
6238 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6239 // https://bugs.webkit.org/show_bug.cgi?id=193347
6240 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6241 if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
6242 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6243 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6244 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6245 addToGraph(ForceOSRExit);
6246 }
6247 }
6248
6249 switch (resolveType) {
6250 case GlobalProperty:
6251 case GlobalVar:
6252 case GlobalPropertyWithVarInjectionChecks:
6253 case GlobalVarWithVarInjectionChecks:
6254 case GlobalLexicalVar:
6255 case GlobalLexicalVarWithVarInjectionChecks: {
6256 RELEASE_ASSERT(constantScope);
6257 RELEASE_ASSERT(constantScope == JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6258 set(bytecode.m_dst, weakJSConstant(constantScope));
6259 addToGraph(Phantom, get(bytecode.m_scope));
6260 break;
6261 }
6262 case ModuleVar: {
6263 // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar,
6264 // we need not to keep it alive by the Phantom node.
6265 // Module environment is already strongly referenced by the CodeBlock.
6266 set(bytecode.m_dst, weakJSConstant(lexicalEnvironment));
6267 break;
6268 }
6269 case LocalClosureVar:
6270 case ClosureVar:
6271 case ClosureVarWithVarInjectionChecks: {
6272 Node* localBase = get(bytecode.m_scope);
6273 addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
6274
6275 // We have various forms of constant folding here. This is necessary to avoid
6276 // spurious recompiles in dead-but-foldable code.
6277
6278 if (symbolTable) {
6279 if (JSScope* scope = symbolTable->singleton().inferredValue()) {
6280 m_graph.watchpoints().addLazily(symbolTable);
6281 set(bytecode.m_dst, weakJSConstant(scope));
6282 break;
6283 }
6284 }
6285 if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) {
6286 for (unsigned n = depth; n--;)
6287 scope = scope->next();
6288 set(bytecode.m_dst, weakJSConstant(scope));
6289 break;
6290 }
6291 for (unsigned n = depth; n--;)
6292 localBase = addToGraph(SkipScope, localBase);
6293 set(bytecode.m_dst, localBase);
6294 break;
6295 }
6296 case UnresolvedProperty:
6297 case UnresolvedPropertyWithVarInjectionChecks: {
6298 addToGraph(Phantom, get(bytecode.m_scope));
6299 addToGraph(ForceOSRExit);
6300 set(bytecode.m_dst, addToGraph(JSConstant, OpInfo(m_constantNull)));
6301 break;
6302 }
6303 case Dynamic:
6304 RELEASE_ASSERT_NOT_REACHED();
6305 break;
6306 }
6307 NEXT_OPCODE(op_resolve_scope);
6308 }
6309 case op_resolve_scope_for_hoisting_func_decl_in_eval: {
6310 auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>();
6311 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
6312 set(bytecode.m_dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.m_scope)));
6313
6314 NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval);
6315 }
6316
6317 case op_get_from_scope: {
6318 auto bytecode = currentInstruction->as<OpGetFromScope>();
6319 auto& metadata = bytecode.metadata(codeBlock);
6320 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6321 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
6322
6323 ResolveType resolveType;
6324 GetPutInfo getPutInfo(0);
6325 Structure* structure = 0;
6326 WatchpointSet* watchpoints = 0;
6327 uintptr_t operand;
6328 {
6329 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6330 getPutInfo = metadata.m_getPutInfo;
6331 resolveType = getPutInfo.resolveType();
6332 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6333 watchpoints = metadata.m_watchpointSet;
6334 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
6335 structure = metadata.m_structure.get();
6336 operand = metadata.m_operand;
6337 }
6338
6339 if (needsDynamicLookup(resolveType, op_get_from_scope)) {
6340 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
6341 SpeculatedType prediction = getPrediction();
6342 set(bytecode.m_dst,
6343 addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.m_scope)));
6344 NEXT_OPCODE(op_get_from_scope);
6345 }
6346
6347 UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
6348
6349 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6350
6351 switch (resolveType) {
6352 case GlobalProperty:
6353 case GlobalPropertyWithVarInjectionChecks: {
6354 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6355 // https://bugs.webkit.org/show_bug.cgi?id=193347
6356 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6357 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6358 addToGraph(ForceOSRExit);
6359 }
6360
6361 SpeculatedType prediction = getPrediction();
6362
6363 GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
6364 if (status.state() != GetByIdStatus::Simple
6365 || status.numVariants() != 1
6366 || status[0].structureSet().size() != 1) {
6367 set(bytecode.m_dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.m_scope)));
6368 break;
6369 }
6370
6371 Node* base = weakJSConstant(globalObject);
6372 Node* result = load(prediction, base, identifierNumber, status[0]);
6373 addToGraph(Phantom, get(bytecode.m_scope));
6374 set(bytecode.m_dst, result);
6375 break;
6376 }
6377 case GlobalVar:
6378 case GlobalVarWithVarInjectionChecks:
6379 case GlobalLexicalVar:
6380 case GlobalLexicalVarWithVarInjectionChecks: {
6381 addToGraph(Phantom, get(bytecode.m_scope));
6382 WatchpointSet* watchpointSet;
6383 ScopeOffset offset;
6384 JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6385 {
6386 ConcurrentJSLocker locker(scopeObject->symbolTable()->m_lock);
6387 SymbolTableEntry entry = scopeObject->symbolTable()->get(locker, uid);
6388 watchpointSet = entry.watchpointSet();
6389 offset = entry.scopeOffset();
6390 }
6391 if (watchpointSet && watchpointSet->state() == IsWatched) {
6392 // This has a fun concurrency story. There is the possibility of a race in two
6393 // directions:
6394 //
6395 // We see that the set IsWatched, but in the meantime it gets invalidated: this is
6396 // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
6397 // invalidated, then this compilation is invalidated. Note that in the meantime we
6398 // may load an absurd value from the global object. It's fine to load an absurd
6399 // value if the compilation is invalidated anyway.
6400 //
6401 // We see that the set IsWatched, but the value isn't yet initialized: this isn't
6402 // possible because of the ordering of operations.
6403 //
6404 // Here's how we order operations:
6405 //
6406 // Main thread stores to the global object: always store a value first, and only
6407 // after that do we touch the watchpoint set. There is a fence in the touch, that
6408 // ensures that the store to the global object always happens before the touch on the
6409 // set.
6410 //
6411 // Compilation thread: always first load the state of the watchpoint set, and then
6412 // load the value. The WatchpointSet::state() method does fences for us to ensure
6413 // that the load of the state happens before our load of the value.
6414 //
6415 // Finalizing compilation: this happens on the main thread and synchronously checks
6416 // validity of all watchpoint sets.
6417 //
6418 // We will only perform optimizations if the load of the state yields IsWatched. That
6419 // means that at least one store would have happened to initialize the original value
6420 // of the variable (that is, the value we'd like to constant fold to). There may be
6421 // other stores that happen after that, but those stores will invalidate the
6422 // watchpoint set and also the compilation.
6423
6424 // Note that we need to use the operand, which is a direct pointer at the global,
6425 // rather than looking up the global by doing variableAt(offset). That's because the
6426 // internal data structures of JSSegmentedVariableObject are not thread-safe even
6427 // though accessing the global itself is. The segmentation involves a vector spine
6428 // that resizes with malloc/free, so if new globals unrelated to the one we are
6429 // reading are added, we might access freed memory if we do variableAt().
6430 WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand);
6431
6432 ASSERT(scopeObject->findVariableIndex(pointer) == offset);
6433
6434 JSValue value = pointer->get();
6435 if (value) {
6436 m_graph.watchpoints().addLazily(watchpointSet);
6437 set(bytecode.m_dst, weakJSConstant(value));
6438 break;
6439 }
6440 }
6441
6442 SpeculatedType prediction = getPrediction();
6443 NodeType nodeType;
6444 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
6445 nodeType = GetGlobalVar;
6446 else
6447 nodeType = GetGlobalLexicalVariable;
6448 Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction));
6449 if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6450 addToGraph(CheckNotEmpty, value);
6451 set(bytecode.m_dst, value);
6452 break;
6453 }
6454 case LocalClosureVar:
6455 case ClosureVar:
6456 case ClosureVarWithVarInjectionChecks: {
6457 Node* scopeNode = get(bytecode.m_scope);
6458
6459 // Ideally we wouldn't have to do this Phantom. But:
6460 //
6461 // For the constant case: we must do it because otherwise we would have no way of knowing
6462 // that the scope is live at OSR here.
6463 //
6464 // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
6465 // won't be able to handle an Undefined scope.
6466 addToGraph(Phantom, scopeNode);
6467
6468 // Constant folding in the bytecode parser is important for performance. This may not
6469 // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
6470 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
6471 // would recompile. But if we can fold it here, we avoid the exit.
6472 if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) {
6473 set(bytecode.m_dst, weakJSConstant(value));
6474 break;
6475 }
6476 SpeculatedType prediction = getPrediction();
6477 set(bytecode.m_dst,
6478 addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode));
6479 break;
6480 }
6481 case UnresolvedProperty:
6482 case UnresolvedPropertyWithVarInjectionChecks:
6483 case ModuleVar:
6484 case Dynamic:
6485 RELEASE_ASSERT_NOT_REACHED();
6486 break;
6487 }
6488 NEXT_OPCODE(op_get_from_scope);
6489 }
6490
6491 case op_put_to_scope: {
6492 auto bytecode = currentInstruction->as<OpPutToScope>();
6493 auto& metadata = bytecode.metadata(codeBlock);
6494 unsigned identifierNumber = bytecode.m_var;
6495 if (identifierNumber != UINT_MAX)
6496 identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber];
6497 UniquedStringImpl* uid;
6498 if (identifierNumber != UINT_MAX)
6499 uid = m_graph.identifiers()[identifierNumber];
6500 else
6501 uid = nullptr;
6502
6503 ResolveType resolveType;
6504 GetPutInfo getPutInfo(0);
6505 Structure* structure = nullptr;
6506 WatchpointSet* watchpoints = nullptr;
6507 uintptr_t operand;
6508 {
6509 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6510 getPutInfo = metadata.m_getPutInfo;
6511 resolveType = getPutInfo.resolveType();
6512 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6513 watchpoints = metadata.m_watchpointSet;
6514 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
6515 structure = metadata.m_structure.get();
6516 operand = metadata.m_operand;
6517 }
6518
6519 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6520
6521 if (needsDynamicLookup(resolveType, op_put_to_scope)) {
6522 ASSERT(identifierNumber != UINT_MAX);
6523 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
6524 addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.m_scope), get(bytecode.m_value));
6525 NEXT_OPCODE(op_put_to_scope);
6526 }
6527
6528 switch (resolveType) {
6529 case GlobalProperty:
6530 case GlobalPropertyWithVarInjectionChecks: {
6531 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6532 // https://bugs.webkit.org/show_bug.cgi?id=193347
6533 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6534 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6535 addToGraph(ForceOSRExit);
6536 }
6537
6538 PutByIdStatus status;
6539 if (uid)
6540 status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
6541 else
6542 status = PutByIdStatus(PutByIdStatus::TakesSlowPath);
6543 if (status.numVariants() != 1
6544 || status[0].kind() != PutByIdVariant::Replace
6545 || status[0].structure().size() != 1) {
6546 addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.m_scope), get(bytecode.m_value));
6547 break;
6548 }
6549 Node* base = weakJSConstant(globalObject);
6550 store(base, identifierNumber, status[0], get(bytecode.m_value));
6551 // Keep scope alive until after put.
6552 addToGraph(Phantom, get(bytecode.m_scope));
6553 break;
6554 }
6555 case GlobalLexicalVar:
6556 case GlobalLexicalVarWithVarInjectionChecks:
6557 case GlobalVar:
6558 case GlobalVarWithVarInjectionChecks: {
6559 if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
6560 SpeculatedType prediction = SpecEmpty;
6561 Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction));
6562 addToGraph(CheckNotEmpty, value);
6563 }
6564
6565 JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6566 if (watchpoints) {
6567 SymbolTableEntry entry = scopeObject->symbolTable()->get(uid);
6568 ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet());
6569 }
6570 Node* valueNode = get(bytecode.m_value);
6571 addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode);
6572 if (watchpoints && watchpoints->state() != IsInvalidated) {
6573 // Must happen after the store. See comment for GetGlobalVar.
6574 addToGraph(NotifyWrite, OpInfo(watchpoints));
6575 }
6576 // Keep scope alive until after put.
6577 addToGraph(Phantom, get(bytecode.m_scope));
6578 break;
6579 }
6580 case LocalClosureVar:
6581 case ClosureVar:
6582 case ClosureVarWithVarInjectionChecks: {
6583 Node* scopeNode = get(bytecode.m_scope);
6584 Node* valueNode = get(bytecode.m_value);
6585
6586 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode);
6587
6588 if (watchpoints && watchpoints->state() != IsInvalidated) {
6589 // Must happen after the store. See comment for GetGlobalVar.
6590 addToGraph(NotifyWrite, OpInfo(watchpoints));
6591 }
6592 break;
6593 }
6594
6595 case ModuleVar:
6596 // Need not to keep "scope" and "value" register values here by Phantom because
6597 // they are not used in LLInt / baseline op_put_to_scope with ModuleVar.
6598 addToGraph(ForceOSRExit);
6599 break;
6600
6601 case Dynamic:
6602 case UnresolvedProperty:
6603 case UnresolvedPropertyWithVarInjectionChecks:
6604 RELEASE_ASSERT_NOT_REACHED();
6605 break;
6606 }
6607 NEXT_OPCODE(op_put_to_scope);
6608 }
6609
6610 case op_loop_hint: {
6611 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
6612 // OSR can only happen at basic block boundaries. Assert that these two statements
6613 // are compatible.
6614 RELEASE_ASSERT(m_currentIndex == blockBegin);
6615
6616 // We never do OSR into an inlined code block. That could not happen, since OSR
6617 // looks up the code block that is the replacement for the baseline JIT code
6618 // block. Hence, machine code block = true code block = not inline code block.
6619 if (!m_inlineStackTop->m_caller)
6620 m_currentBlock->isOSRTarget = true;
6621
6622 addToGraph(LoopHint);
6623 NEXT_OPCODE(op_loop_hint);
6624 }
6625
6626 case op_check_traps: {
6627 addToGraph(Options::usePollingTraps() ? CheckTraps : InvalidationPoint);
6628 NEXT_OPCODE(op_check_traps);
6629 }
6630
6631 case op_nop: {
6632 addToGraph(Check); // We add a nop here so that basic block linking doesn't break.
6633 NEXT_OPCODE(op_nop);
6634 }
6635
6636 case op_super_sampler_begin: {
6637 addToGraph(SuperSamplerBegin);
6638 NEXT_OPCODE(op_super_sampler_begin);
6639 }
6640
6641 case op_super_sampler_end: {
6642 addToGraph(SuperSamplerEnd);
6643 NEXT_OPCODE(op_super_sampler_end);
6644 }
6645
6646 case op_create_lexical_environment: {
6647 auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>();
6648 ASSERT(bytecode.m_symbolTable.isConstant() && bytecode.m_initialValue.isConstant());
6649 FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_symbolTable.offset()));
6650 FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_initialValue.offset()));
6651 Node* scope = get(bytecode.m_scope);
6652 Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope);
6653 set(bytecode.m_dst, lexicalEnvironment);
6654 NEXT_OPCODE(op_create_lexical_environment);
6655 }
6656
6657 case op_push_with_scope: {
6658 auto bytecode = currentInstruction->as<OpPushWithScope>();
6659 Node* currentScope = get(bytecode.m_currentScope);
6660 Node* object = get(bytecode.m_newScope);
6661 set(bytecode.m_dst, addToGraph(PushWithScope, currentScope, object));
6662 NEXT_OPCODE(op_push_with_scope);
6663 }
6664
6665 case op_get_parent_scope: {
6666 auto bytecode = currentInstruction->as<OpGetParentScope>();
6667 Node* currentScope = get(bytecode.m_scope);
6668 Node* newScope = addToGraph(SkipScope, currentScope);
6669 set(bytecode.m_dst, newScope);
6670 addToGraph(Phantom, currentScope);
6671 NEXT_OPCODE(op_get_parent_scope);
6672 }
6673
6674 case op_get_scope: {
6675 // Help the later stages a bit by doing some small constant folding here. Note that this
6676 // only helps for the first basic block. It's extremely important not to constant fold
6677 // loads from the scope register later, as that would prevent the DFG from tracking the
6678 // bytecode-level liveness of the scope register.
6679 auto bytecode = currentInstruction->as<OpGetScope>();
6680 Node* callee = get(VirtualRegister(CallFrameSlot::callee));
6681 Node* result;
6682 if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm))
6683 result = weakJSConstant(function->scope());
6684 else
6685 result = addToGraph(GetScope, callee);
6686 set(bytecode.m_dst, result);
6687 NEXT_OPCODE(op_get_scope);
6688 }
6689
6690 case op_argument_count: {
6691 auto bytecode = currentInstruction->as<OpArgumentCount>();
6692 Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne)));
6693 set(bytecode.m_dst, sub);
6694 NEXT_OPCODE(op_argument_count);
6695 }
6696
6697 case op_create_direct_arguments: {
6698 auto bytecode = currentInstruction->as<OpCreateDirectArguments>();
6699 noticeArgumentsUse();
6700 Node* createArguments = addToGraph(CreateDirectArguments);
6701 set(bytecode.m_dst, createArguments);
6702 NEXT_OPCODE(op_create_direct_arguments);
6703 }
6704
6705 case op_create_scoped_arguments: {
6706 auto bytecode = currentInstruction->as<OpCreateScopedArguments>();
6707 noticeArgumentsUse();
6708 Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.m_scope));
6709 set(bytecode.m_dst, createArguments);
6710 NEXT_OPCODE(op_create_scoped_arguments);
6711 }
6712
6713 case op_create_cloned_arguments: {
6714 auto bytecode = currentInstruction->as<OpCreateClonedArguments>();
6715 noticeArgumentsUse();
6716 Node* createArguments = addToGraph(CreateClonedArguments);
6717 set(bytecode.m_dst, createArguments);
6718 NEXT_OPCODE(op_create_cloned_arguments);
6719 }
6720
6721 case op_get_from_arguments: {
6722 auto bytecode = currentInstruction->as<OpGetFromArguments>();
6723 set(bytecode.m_dst,
6724 addToGraph(
6725 GetFromArguments,
6726 OpInfo(bytecode.m_index),
6727 OpInfo(getPrediction()),
6728 get(bytecode.m_arguments)));
6729 NEXT_OPCODE(op_get_from_arguments);
6730 }
6731
6732 case op_put_to_arguments: {
6733 auto bytecode = currentInstruction->as<OpPutToArguments>();
6734 addToGraph(
6735 PutToArguments,
6736 OpInfo(bytecode.m_index),
6737 get(bytecode.m_arguments),
6738 get(bytecode.m_value));
6739 NEXT_OPCODE(op_put_to_arguments);
6740 }
6741
6742 case op_get_argument: {
6743 auto bytecode = currentInstruction->as<OpGetArgument>();
6744 InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
6745 Node* argument;
6746 int32_t argumentIndexIncludingThis = bytecode.m_index;
6747 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
6748 int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size();
6749 if (argumentIndexIncludingThis < argumentCountIncludingThisWithFixup)
6750 argument = get(virtualRegisterForArgument(argumentIndexIncludingThis));
6751 else
6752 argument = addToGraph(JSConstant, OpInfo(m_constantUndefined));
6753 } else
6754 argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction()));
6755 set(bytecode.m_dst, argument);
6756 NEXT_OPCODE(op_get_argument);
6757 }
6758 case op_new_async_generator_func:
6759 handleNewFunc(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFunc>());
6760 NEXT_OPCODE(op_new_async_generator_func);
6761 case op_new_func:
6762 handleNewFunc(NewFunction, currentInstruction->as<OpNewFunc>());
6763 NEXT_OPCODE(op_new_func);
6764 case op_new_generator_func:
6765 handleNewFunc(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFunc>());
6766 NEXT_OPCODE(op_new_generator_func);
6767 case op_new_async_func:
6768 handleNewFunc(NewAsyncFunction, currentInstruction->as<OpNewAsyncFunc>());
6769 NEXT_OPCODE(op_new_async_func);
6770
6771 case op_new_func_exp:
6772 handleNewFuncExp(NewFunction, currentInstruction->as<OpNewFuncExp>());
6773 NEXT_OPCODE(op_new_func_exp);
6774 case op_new_generator_func_exp:
6775 handleNewFuncExp(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFuncExp>());
6776 NEXT_OPCODE(op_new_generator_func_exp);
6777 case op_new_async_generator_func_exp:
6778 handleNewFuncExp(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFuncExp>());
6779 NEXT_OPCODE(op_new_async_generator_func_exp);
6780 case op_new_async_func_exp:
6781 handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewAsyncFuncExp>());
6782 NEXT_OPCODE(op_new_async_func_exp);
6783
6784 case op_set_function_name: {
6785 auto bytecode = currentInstruction->as<OpSetFunctionName>();
6786 Node* func = get(bytecode.m_function);
6787 Node* name = get(bytecode.m_name);
6788 addToGraph(SetFunctionName, func, name);
6789 NEXT_OPCODE(op_set_function_name);
6790 }
6791
6792 case op_typeof: {
6793 auto bytecode = currentInstruction->as<OpTypeof>();
6794 set(bytecode.m_dst, addToGraph(TypeOf, get(bytecode.m_value)));
6795 NEXT_OPCODE(op_typeof);
6796 }
6797
6798 case op_to_number: {
6799 auto bytecode = currentInstruction->as<OpToNumber>();
6800 SpeculatedType prediction = getPrediction();
6801 Node* value = get(bytecode.m_operand);
6802 set(bytecode.m_dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));
6803 NEXT_OPCODE(op_to_number);
6804 }
6805
6806 case op_to_string: {
6807 auto bytecode = currentInstruction->as<OpToString>();
6808 Node* value = get(bytecode.m_operand);
6809 set(bytecode.m_dst, addToGraph(ToString, value));
6810 NEXT_OPCODE(op_to_string);
6811 }
6812
6813 case op_to_object: {
6814 auto bytecode = currentInstruction->as<OpToObject>();
6815 SpeculatedType prediction = getPrediction();
6816 Node* value = get(bytecode.m_operand);
6817 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_message];
6818 set(bytecode.m_dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value));
6819 NEXT_OPCODE(op_to_object);
6820 }
6821
6822 case op_in_by_val: {
6823 auto bytecode = currentInstruction->as<OpInByVal>();
6824 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
6825 set(bytecode.m_dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.m_base), get(bytecode.m_property)));
6826 NEXT_OPCODE(op_in_by_val);
6827 }
6828
6829 case op_in_by_id: {
6830 auto bytecode = currentInstruction->as<OpInById>();
6831 Node* base = get(bytecode.m_base);
6832 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
6833 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
6834
6835 InByIdStatus status = InByIdStatus::computeFor(
6836 m_inlineStackTop->m_profiledBlock,
6837 m_inlineStackTop->m_baselineMap, m_icContextStack,
6838 currentCodeOrigin(), uid);
6839
6840 if (status.isSimple()) {
6841 bool allOK = true;
6842 MatchStructureData* data = m_graph.m_matchStructureData.add();
6843 for (const InByIdVariant& variant : status.variants()) {
6844 if (!check(variant.conditionSet())) {
6845 allOK = false;
6846 break;
6847 }
6848 for (Structure* structure : variant.structureSet()) {
6849 MatchStructureVariant matchVariant;
6850 matchVariant.structure = m_graph.registerStructure(structure);
6851 matchVariant.result = variant.isHit();
6852
6853 data->variants.append(WTFMove(matchVariant));
6854 }
6855 }
6856
6857 if (allOK) {
6858 addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addInByIdStatus(currentCodeOrigin(), status)), base);
6859
6860 Node* match = addToGraph(MatchStructure, OpInfo(data), base);
6861 set(bytecode.m_dst, match);
6862 NEXT_OPCODE(op_in_by_id);
6863 }
6864 }
6865
6866 set(bytecode.m_dst, addToGraph(InById, OpInfo(identifierNumber), base));
6867 NEXT_OPCODE(op_in_by_id);
6868 }
6869
6870 case op_get_enumerable_length: {
6871 auto bytecode = currentInstruction->as<OpGetEnumerableLength>();
6872 set(bytecode.m_dst, addToGraph(GetEnumerableLength, get(bytecode.m_base)));
6873 NEXT_OPCODE(op_get_enumerable_length);
6874 }
6875
6876 case op_has_generic_property: {
6877 auto bytecode = currentInstruction->as<OpHasGenericProperty>();
6878 set(bytecode.m_dst, addToGraph(HasGenericProperty, get(bytecode.m_base), get(bytecode.m_property)));
6879 NEXT_OPCODE(op_has_generic_property);
6880 }
6881
6882 case op_has_structure_property: {
6883 auto bytecode = currentInstruction->as<OpHasStructureProperty>();
6884 set(bytecode.m_dst, addToGraph(HasStructureProperty,
6885 get(bytecode.m_base),
6886 get(bytecode.m_property),
6887 get(bytecode.m_enumerator)));
6888 NEXT_OPCODE(op_has_structure_property);
6889 }
6890
6891 case op_has_indexed_property: {
6892 auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
6893 Node* base = get(bytecode.m_base);
6894 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
6895 Node* property = get(bytecode.m_property);
6896 addVarArgChild(base);
6897 addVarArgChild(property);
6898 addVarArgChild(nullptr);
6899 Node* hasIterableProperty = addToGraph(Node::VarArg, HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)));
6900 m_exitOK = false; // HasIndexedProperty must be treated as if it clobbers exit state, since FixupPhase may make it generic.
6901 set(bytecode.m_dst, hasIterableProperty);
6902 NEXT_OPCODE(op_has_indexed_property);
6903 }
6904
6905 case op_get_direct_pname: {
6906 auto bytecode = currentInstruction->as<OpGetDirectPname>();
6907 SpeculatedType prediction = getPredictionWithoutOSRExit();
6908
6909 Node* base = get(bytecode.m_base);
6910 Node* property = get(bytecode.m_property);
6911 Node* index = get(bytecode.m_index);
6912 Node* enumerator = get(bytecode.m_enumerator);
6913
6914 addVarArgChild(base);
6915 addVarArgChild(property);
6916 addVarArgChild(index);
6917 addVarArgChild(enumerator);
6918 set(bytecode.m_dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
6919
6920 NEXT_OPCODE(op_get_direct_pname);
6921 }
6922
6923 case op_get_property_enumerator: {
6924 auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>();
6925 set(bytecode.m_dst, addToGraph(GetPropertyEnumerator, get(bytecode.m_base)));
6926 NEXT_OPCODE(op_get_property_enumerator);
6927 }
6928
6929 case op_enumerator_structure_pname: {
6930 auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>();
6931 set(bytecode.m_dst, addToGraph(GetEnumeratorStructurePname,
6932 get(bytecode.m_enumerator),
6933 get(bytecode.m_index)));
6934 NEXT_OPCODE(op_enumerator_structure_pname);
6935 }
6936
6937 case op_enumerator_generic_pname: {
6938 auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>();
6939 set(bytecode.m_dst, addToGraph(GetEnumeratorGenericPname,
6940 get(bytecode.m_enumerator),
6941 get(bytecode.m_index)));
6942 NEXT_OPCODE(op_enumerator_generic_pname);
6943 }
6944
6945 case op_to_index_string: {
6946 auto bytecode = currentInstruction->as<OpToIndexString>();
6947 set(bytecode.m_dst, addToGraph(ToIndexString, get(bytecode.m_index)));
6948 NEXT_OPCODE(op_to_index_string);
6949 }
6950
6951 case op_log_shadow_chicken_prologue: {
6952 auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
6953 if (!m_inlineStackTop->m_inlineCallFrame)
6954 addToGraph(LogShadowChickenPrologue, get(bytecode.m_scope));
6955 NEXT_OPCODE(op_log_shadow_chicken_prologue);
6956 }
6957
6958 case op_log_shadow_chicken_tail: {
6959 auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
6960 if (!m_inlineStackTop->m_inlineCallFrame) {
6961 // FIXME: The right solution for inlining is to elide these whenever the tail call
6962 // ends up being inlined.
6963 // https://bugs.webkit.org/show_bug.cgi?id=155686
6964 addToGraph(LogShadowChickenTail, get(bytecode.m_thisValue), get(bytecode.m_scope));
6965 }
6966 NEXT_OPCODE(op_log_shadow_chicken_tail);
6967 }
6968
6969 case op_unreachable: {
6970 flushForTerminal();
6971 addToGraph(Unreachable);
6972 LAST_OPCODE(op_unreachable);
6973 }
6974
6975 default:
6976 // Parse failed! This should not happen because the capabilities checker
6977 // should have caught it.
6978 RELEASE_ASSERT_NOT_REACHED();
6979 return;
6980 }
6981 }
6982}
6983
6984void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
6985{
6986 ASSERT(!block->isLinked);
6987 ASSERT(!block->isEmpty());
6988 Node* node = block->terminal();
6989 ASSERT(node->isTerminal());
6990
6991 switch (node->op()) {
6992 case Jump:
6993 node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
6994 break;
6995
6996 case Branch: {
6997 BranchData* data = node->branchData();
6998 data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
6999 data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
7000 break;
7001 }
7002
7003 case Switch: {
7004 SwitchData* data = node->switchData();
7005 for (unsigned i = node->switchData()->cases.size(); i--;)
7006 data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
7007 data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
7008 break;
7009 }
7010
7011 default:
7012 RELEASE_ASSERT_NOT_REACHED();
7013 }
7014
7015 VERBOSE_LOG("Marking ", RawPointer(block), " as linked (actually did linking)\n");
7016 block->didLink();
7017}
7018
7019void ByteCodeParser::linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
7020{
7021 for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
7022 VERBOSE_LOG("Attempting to link ", RawPointer(unlinkedBlocks[i]), "\n");
7023 linkBlock(unlinkedBlocks[i], possibleTargets);
7024 }
7025}
7026
7027ByteCodeParser::InlineStackEntry::InlineStackEntry(
7028 ByteCodeParser* byteCodeParser,
7029 CodeBlock* codeBlock,
7030 CodeBlock* profiledBlock,
7031 JSFunction* callee, // Null if this is a closure call.
7032 VirtualRegister returnValueVR,
7033 VirtualRegister inlineCallFrameStart,
7034 int argumentCountIncludingThis,
7035 InlineCallFrame::Kind kind,
7036 BasicBlock* continuationBlock)
7037 : m_byteCodeParser(byteCodeParser)
7038 , m_codeBlock(codeBlock)
7039 , m_profiledBlock(profiledBlock)
7040 , m_continuationBlock(continuationBlock)
7041 , m_returnValue(returnValueVR)
7042 , m_caller(byteCodeParser->m_inlineStackTop)
7043{
7044 {
7045 m_exitProfile.initialize(m_profiledBlock->unlinkedCodeBlock());
7046
7047 ConcurrentJSLocker locker(m_profiledBlock->m_lock);
7048 m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles(locker));
7049
7050 // We do this while holding the lock because we want to encourage StructureStubInfo's
7051 // to be potentially added to operations and because the profiled block could be in the
7052 // middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
7053 if (m_profiledBlock->hasBaselineJITProfiling())
7054 m_profiledBlock->getICStatusMap(locker, m_baselineMap);
7055 }
7056
7057 CodeBlock* optimizedBlock = m_profiledBlock->replacement();
7058 m_optimizedContext.optimizedCodeBlock = optimizedBlock;
7059 if (Options::usePolyvariantDevirtualization() && optimizedBlock) {
7060 ConcurrentJSLocker locker(optimizedBlock->m_lock);
7061 optimizedBlock->getICStatusMap(locker, m_optimizedContext.map);
7062 }
7063 byteCodeParser->m_icContextStack.append(&m_optimizedContext);
7064
7065 int argumentCountIncludingThisWithFixup = std::max<int>(argumentCountIncludingThis, codeBlock->numParameters());
7066
7067 if (m_caller) {
7068 // Inline case.
7069 ASSERT(codeBlock != byteCodeParser->m_codeBlock);
7070 ASSERT(inlineCallFrameStart.isValid());
7071
7072 m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames()->add();
7073 m_optimizedContext.inlineCallFrame = m_inlineCallFrame;
7074
7075 // The owner is the machine code block, and we already have a barrier on that when the
7076 // plan finishes.
7077 m_inlineCallFrame->baselineCodeBlock.setWithoutWriteBarrier(codeBlock->baselineVersion());
7078 m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - CallFrame::headerSizeInRegisters);
7079 m_inlineCallFrame->argumentCountIncludingThis = argumentCountIncludingThis;
7080 if (callee) {
7081 m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee);
7082 m_inlineCallFrame->isClosureCall = false;
7083 } else
7084 m_inlineCallFrame->isClosureCall = true;
7085 m_inlineCallFrame->directCaller = byteCodeParser->currentCodeOrigin();
7086 m_inlineCallFrame->argumentsWithFixup.resizeToFit(argumentCountIncludingThisWithFixup); // Set the number of arguments including this, but don't configure the value recoveries, yet.
7087 m_inlineCallFrame->kind = kind;
7088
7089 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
7090 m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
7091
7092 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
7093 UniquedStringImpl* rep = codeBlock->identifier(i).impl();
7094 unsigned index = byteCodeParser->m_graph.identifiers().ensure(rep);
7095 m_identifierRemap[i] = index;
7096 }
7097 for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) {
7098 m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables();
7099 byteCodeParser->m_codeBlock->addSwitchJumpTable() = codeBlock->switchJumpTable(i);
7100 }
7101 } else {
7102 // Machine code block case.
7103 ASSERT(codeBlock == byteCodeParser->m_codeBlock);
7104 ASSERT(!callee);
7105 ASSERT(!returnValueVR.isValid());
7106 ASSERT(!inlineCallFrameStart.isValid());
7107
7108 m_inlineCallFrame = 0;
7109
7110 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
7111 m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
7112 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
7113 m_identifierRemap[i] = i;
7114 for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i)
7115 m_switchRemap[i] = i;
7116 }
7117
7118 m_argumentPositions.resize(argumentCountIncludingThisWithFixup);
7119 for (int i = 0; i < argumentCountIncludingThisWithFixup; ++i) {
7120 byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
7121 ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
7122 m_argumentPositions[i] = argumentPosition;
7123 }
7124 byteCodeParser->m_inlineCallFrameToArgumentPositions.add(m_inlineCallFrame, m_argumentPositions);
7125
7126 byteCodeParser->m_inlineStackTop = this;
7127}
7128
7129ByteCodeParser::InlineStackEntry::~InlineStackEntry()
7130{
7131 m_byteCodeParser->m_inlineStackTop = m_caller;
7132 RELEASE_ASSERT(m_byteCodeParser->m_icContextStack.last() == &m_optimizedContext);
7133 m_byteCodeParser->m_icContextStack.removeLast();
7134}
7135
7136void ByteCodeParser::parseCodeBlock()
7137{
7138 clearCaches();
7139
7140 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
7141
7142 if (UNLIKELY(m_graph.compilation())) {
7143 m_graph.compilation()->addProfiledBytecodes(
7144 *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
7145 }
7146
7147 if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
7148 Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump();
7149 if (inlineCallFrame()) {
7150 DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITType::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex());
7151 deferredSourceDump.append(dump);
7152 } else
7153 deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
7154 }
7155
7156 if (Options::dumpBytecodeAtDFGTime()) {
7157 dataLog("Parsing ", *codeBlock);
7158 if (inlineCallFrame()) {
7159 dataLog(
7160 " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT),
7161 " ", inlineCallFrame()->directCaller);
7162 }
7163 dataLog(
7164 ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
7165 codeBlock->baselineVersion()->dumpBytecode();
7166 }
7167
7168 Vector<InstructionStream::Offset, 32> jumpTargets;
7169 computePreciseJumpTargets(codeBlock, jumpTargets);
7170 if (Options::dumpBytecodeAtDFGTime()) {
7171 dataLog("Jump targets: ");
7172 CommaPrinter comma;
7173 for (unsigned i = 0; i < jumpTargets.size(); ++i)
7174 dataLog(comma, jumpTargets[i]);
7175 dataLog("\n");
7176 }
7177
7178 for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
7179 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
7180 unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
7181 ASSERT(m_currentIndex < limit);
7182
7183 // Loop until we reach the current limit (i.e. next jump target).
7184 do {
7185 // There may already be a currentBlock in two cases:
7186 // - we may have just entered the loop for the first time
7187 // - we may have just returned from an inlined callee that had some early returns and
7188 // so allocated a continuation block, and the instruction after the call is a jump target.
7189 // In both cases, we want to keep using it.
7190 if (!m_currentBlock) {
7191 m_currentBlock = allocateTargetableBlock(m_currentIndex);
7192
7193 // The first block is definitely an OSR target.
7194 if (m_graph.numBlocks() == 1) {
7195 m_currentBlock->isOSRTarget = true;
7196 m_graph.m_roots.append(m_currentBlock);
7197 }
7198 prepareToParseBlock();
7199 }
7200
7201 parseBlock(limit);
7202
7203 // We should not have gone beyond the limit.
7204 ASSERT(m_currentIndex <= limit);
7205
7206 if (m_currentBlock->isEmpty()) {
7207 // This case only happens if the last instruction was an inlined call with early returns
7208 // or polymorphic (creating an empty continuation block),
7209 // and then we hit the limit before putting anything in the continuation block.
7210 ASSERT(m_currentIndex == limit);
7211 makeBlockTargetable(m_currentBlock, m_currentIndex);
7212 } else {
7213 ASSERT(m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()));
7214 m_currentBlock = nullptr;
7215 }
7216 } while (m_currentIndex < limit);
7217 }
7218
7219 // Should have reached the end of the instructions.
7220 ASSERT(m_currentIndex == codeBlock->instructions().size());
7221
7222 VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n");
7223}
7224
7225template <typename Bytecode>
7226void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize)
7227{
7228 Node* base = get(bytecode.m_base);
7229 Node* property = get(bytecode.m_property);
7230 Node* value = get(bytecode.m_value);
7231 bool isDirect = Bytecode::opcodeID == op_put_by_val_direct;
7232 bool compiledAsPutById = false;
7233 {
7234 unsigned identifierNumber = std::numeric_limits<unsigned>::max();
7235 PutByIdStatus putByIdStatus;
7236 {
7237 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
7238 ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
7239 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
7240 // At that time, there is no information.
7241 if (byValInfo
7242 && byValInfo->stubInfo
7243 && !byValInfo->tookSlowPath
7244 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
7245 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
7246 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
7247 compiledAsPutById = true;
7248 identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
7249 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
7250
7251 if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
7252 FrozenValue* frozen = m_graph.freezeStrong(symbol);
7253 addToGraph(CheckCell, OpInfo(frozen), property);
7254 } else {
7255 ASSERT(!uid->isSymbol());
7256 addToGraph(CheckStringIdent, OpInfo(uid), property);
7257 }
7258
7259 putByIdStatus = PutByIdStatus::computeForStubInfo(
7260 locker, m_inlineStackTop->m_profiledBlock,
7261 byValInfo->stubInfo, currentCodeOrigin(), uid);
7262
7263 }
7264 }
7265
7266 if (compiledAsPutById)
7267 handlePutById(base, identifierNumber, value, putByIdStatus, isDirect, instructionSize);
7268 }
7269
7270 if (!compiledAsPutById) {
7271 ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_inlineStackTop->m_codeBlock).m_arrayProfile, Array::Write);
7272
7273 addVarArgChild(base);
7274 addVarArgChild(property);
7275 addVarArgChild(value);
7276 addVarArgChild(0); // Leave room for property storage.
7277 addVarArgChild(0); // Leave room for length.
7278 addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
7279 m_exitOK = false; // PutByVal and PutByValDirect must be treated as if they clobber exit state, since FixupPhase may make them generic.
7280 }
7281}
7282
7283template <typename Bytecode>
7284void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode)
7285{
7286 Node* base = get(bytecode.m_base);
7287 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
7288 Node* accessor = get(bytecode.m_accessor);
7289 addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, accessor);
7290}
7291
7292template <typename Bytecode>
7293void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode)
7294{
7295 Node* base = get(bytecode.m_base);
7296 Node* subscript = get(bytecode.m_property);
7297 Node* accessor = get(bytecode.m_accessor);
7298 addToGraph(op, OpInfo(bytecode.m_attributes), base, subscript, accessor);
7299}
7300
7301template <typename Bytecode>
7302void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode)
7303{
7304 FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.m_functionDecl);
7305 FrozenValue* frozen = m_graph.freezeStrong(decl);
7306 Node* scope = get(bytecode.m_scope);
7307 set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
7308 // Ideally we wouldn't have to do this Phantom. But:
7309 //
7310 // For the constant case: we must do it because otherwise we would have no way of knowing
7311 // that the scope is live at OSR here.
7312 //
7313 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
7314 // won't be able to handle an Undefined scope.
7315 addToGraph(Phantom, scope);
7316}
7317
7318template <typename Bytecode>
7319void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode)
7320{
7321 FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.m_functionDecl);
7322 FrozenValue* frozen = m_graph.freezeStrong(expr);
7323 Node* scope = get(bytecode.m_scope);
7324 set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
7325 // Ideally we wouldn't have to do this Phantom. But:
7326 //
7327 // For the constant case: we must do it because otherwise we would have no way of knowing
7328 // that the scope is live at OSR here.
7329 //
7330 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
7331 // won't be able to handle an Undefined scope.
7332 addToGraph(Phantom, scope);
7333}
7334
7335void ByteCodeParser::parse()
7336{
7337 // Set during construction.
7338 ASSERT(!m_currentIndex);
7339
7340 VERBOSE_LOG("Parsing ", *m_codeBlock, "\n");
7341
7342 InlineStackEntry inlineStackEntry(
7343 this, m_codeBlock, m_profiledBlock, 0, VirtualRegister(), VirtualRegister(),
7344 m_codeBlock->numParameters(), InlineCallFrame::Call, nullptr);
7345
7346 parseCodeBlock();
7347 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
7348
7349 if (m_hasAnyForceOSRExits) {
7350 BlockSet blocksToIgnore;
7351 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7352 if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex()) {
7353 blocksToIgnore.add(block);
7354 break;
7355 }
7356 }
7357
7358 {
7359 bool isSafeToValidate = false;
7360 auto postOrder = m_graph.blocksInPostOrder(isSafeToValidate); // This algorithm doesn't rely on the predecessors list, which is not yet built.
7361 bool changed;
7362 do {
7363 changed = false;
7364 for (BasicBlock* block : postOrder) {
7365 for (BasicBlock* successor : block->successors()) {
7366 if (blocksToIgnore.contains(successor)) {
7367 changed |= blocksToIgnore.add(block);
7368 break;
7369 }
7370 }
7371 }
7372 } while (changed);
7373 }
7374
7375 InsertionSet insertionSet(m_graph);
7376 Operands<VariableAccessData*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead);
7377
7378 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7379 if (blocksToIgnore.contains(block))
7380 continue;
7381
7382 mapping.fill(nullptr);
7383 if (validationEnabled()) {
7384 // Verify that it's correct to fill mapping with nullptr.
7385 for (unsigned i = 0; i < block->variablesAtHead.size(); ++i) {
7386 Node* node = block->variablesAtHead.at(i);
7387 RELEASE_ASSERT(!node);
7388 }
7389 }
7390
7391 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
7392 {
7393 Node* node = block->at(nodeIndex);
7394
7395 if (node->hasVariableAccessData(m_graph))
7396 mapping.operand(node->local()) = node->variableAccessData();
7397
7398 if (node->op() != ForceOSRExit)
7399 continue;
7400 }
7401
7402 NodeOrigin origin = block->at(nodeIndex)->origin;
7403 RELEASE_ASSERT(origin.exitOK);
7404
7405 ++nodeIndex;
7406
7407 {
7408 if (validationEnabled()) {
7409 // This verifies that we don't need to change any of the successors's predecessor
7410 // list after planting the Unreachable below. At this point in the bytecode
7411 // parser, we haven't linked up the predecessor lists yet.
7412 for (BasicBlock* successor : block->successors())
7413 RELEASE_ASSERT(successor->predecessors.isEmpty());
7414 }
7415
7416 auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, VirtualRegister operand) {
7417 VariableAccessData* variable = mapping.operand(operand);
7418 if (!variable) {
7419 variable = newVariableAccessData(operand);
7420 mapping.operand(operand) = variable;
7421 }
7422
7423 VirtualRegister argument = operand - (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
7424 if (argument.isArgument() && !argument.isHeader()) {
7425 const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame);
7426 arguments[argument.toArgument()]->addVariable(variable);
7427 }
7428 insertionSet.insertNode(nodeIndex, SpecNone, op, origin, OpInfo(variable));
7429 };
7430 auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
7431 insertLivenessPreservingOp(inlineCallFrame, Flush, operand);
7432 };
7433 auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
7434 insertLivenessPreservingOp(inlineCallFrame, PhantomLocal, operand);
7435 };
7436 flushForTerminalImpl(origin.semantic, addFlushDirect, addPhantomLocalDirect);
7437 }
7438
7439 while (true) {
7440 RELEASE_ASSERT(nodeIndex < block->size());
7441
7442 Node* node = block->at(nodeIndex);
7443
7444 node->origin = origin;
7445 m_graph.doToChildren(node, [&] (Edge edge) {
7446 // We only need to keep data flow edges to nodes defined prior to the ForceOSRExit. The reason
7447 // for this is we rely on backwards propagation being able to see the "full" bytecode. To model
7448 // this, we preserve uses of a node in a generic way so that backwards propagation can reason
7449 // about them. Therefore, we can't remove uses of a node which is defined before the ForceOSRExit
7450 // even when we're at a point in the program after the ForceOSRExit, because that would break backwards
7451 // propagation's analysis over the uses of a node. However, we don't need this same preservation for
7452 // nodes defined after ForceOSRExit, as we've already exitted before those defs.
7453 if (edge->hasResult())
7454 insertionSet.insertNode(nodeIndex, SpecNone, Phantom, origin, Edge(edge.node(), UntypedUse));
7455 });
7456
7457 bool isTerminal = node->isTerminal();
7458
7459 node->removeWithoutChecks();
7460
7461 if (isTerminal) {
7462 insertionSet.insertNode(nodeIndex, SpecNone, Unreachable, origin);
7463 break;
7464 }
7465
7466 ++nodeIndex;
7467 }
7468
7469 insertionSet.execute(block);
7470
7471 auto nodeAndIndex = block->findTerminal();
7472 RELEASE_ASSERT(nodeAndIndex.node->op() == Unreachable);
7473 block->resize(nodeAndIndex.index + 1);
7474 break;
7475 }
7476 }
7477 } else if (validationEnabled()) {
7478 // Ensure our bookkeeping for ForceOSRExit nodes is working.
7479 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7480 for (Node* node : *block)
7481 RELEASE_ASSERT(node->op() != ForceOSRExit);
7482 }
7483 }
7484
7485 m_graph.determineReachability();
7486 m_graph.killUnreachableBlocks();
7487
7488 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
7489 BasicBlock* block = m_graph.block(blockIndex);
7490 if (!block)
7491 continue;
7492 ASSERT(block->variablesAtHead.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
7493 ASSERT(block->variablesAtHead.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
7494 ASSERT(block->variablesAtTail.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
7495 ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
7496 }
7497
7498 m_graph.m_localVars = m_numLocals;
7499 m_graph.m_parameterSlots = m_parameterSlots;
7500}
7501
7502void parse(Graph& graph)
7503{
7504 ByteCodeParser(graph).parse();
7505}
7506
7507} } // namespace JSC::DFG
7508
7509#endif
7510