1/*
2 * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "FTLLowerDFGToB3.h"
28
29#if ENABLE(FTL_JIT)
30
31#include "AirCode.h"
32#include "AirGenerationContext.h"
33#include "AllowMacroScratchRegisterUsage.h"
34#include "AllowMacroScratchRegisterUsageIf.h"
35#include "AtomicsObject.h"
36#include "B3CheckValue.h"
37#include "B3FenceValue.h"
38#include "B3PatchpointValue.h"
39#include "B3SlotBaseValue.h"
40#include "B3StackmapGenerationParams.h"
41#include "B3ValueInlines.h"
42#include "CallFrameShuffler.h"
43#include "CodeBlockWithJITType.h"
44#include "DFGAbstractInterpreterInlines.h"
45#include "DFGCapabilities.h"
46#include "DFGDoesGC.h"
47#include "DFGDominators.h"
48#include "DFGInPlaceAbstractState.h"
49#include "DFGLivenessAnalysisPhase.h"
50#include "DFGMayExit.h"
51#include "DFGOSRAvailabilityAnalysisPhase.h"
52#include "DFGOSRExitFuzz.h"
53#include "DirectArguments.h"
54#include "FTLAbstractHeapRepository.h"
55#include "FTLAvailableRecovery.h"
56#include "FTLExceptionTarget.h"
57#include "FTLForOSREntryJITCode.h"
58#include "FTLFormattedValue.h"
59#include "FTLLazySlowPathCall.h"
60#include "FTLLoweredNodeValue.h"
61#include "FTLOperations.h"
62#include "FTLOutput.h"
63#include "FTLPatchpointExceptionHandle.h"
64#include "FTLSnippetParams.h"
65#include "FTLThunks.h"
66#include "FTLWeightedTarget.h"
67#include "JITAddGenerator.h"
68#include "JITBitAndGenerator.h"
69#include "JITBitOrGenerator.h"
70#include "JITBitXorGenerator.h"
71#include "JITDivGenerator.h"
72#include "JITInlineCacheGenerator.h"
73#include "JITLeftShiftGenerator.h"
74#include "JITMathIC.h"
75#include "JITMulGenerator.h"
76#include "JITRightShiftGenerator.h"
77#include "JITSubGenerator.h"
78#include "JSAsyncFunction.h"
79#include "JSAsyncGeneratorFunction.h"
80#include "JSCInlines.h"
81#include "JSGeneratorFunction.h"
82#include "JSImmutableButterfly.h"
83#include "JSLexicalEnvironment.h"
84#include "JSMap.h"
85#include "OperandsInlines.h"
86#include "ProbeContext.h"
87#include "RegExpObject.h"
88#include "ScopedArguments.h"
89#include "ScopedArgumentsTable.h"
90#include "ScratchRegisterAllocator.h"
91#include "SetupVarargsFrame.h"
92#include "ShadowChicken.h"
93#include "StructureStubInfo.h"
94#include "SuperSampler.h"
95#include "ThunkGenerators.h"
96#include "VirtualRegister.h"
97#include "Watchdog.h"
98#include <atomic>
99#include <wtf/Box.h>
100#include <wtf/Gigacage.h>
101#include <wtf/RecursableLambda.h>
102#include <wtf/StdUnorderedSet.h>
103
104#undef RELEASE_ASSERT
105#define RELEASE_ASSERT(assertion) do { \
106 if (!(assertion)) { \
107 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
108 CRASH(); \
109 } \
110} while (0)
111
112namespace JSC { namespace FTL {
113
114using namespace B3;
115using namespace DFG;
116
117namespace {
118
119std::atomic<int> compileCounter;
120
121#if !ASSERT_DISABLED
122NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
123 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
124{
125 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
126 if (nodeIndex != UINT_MAX)
127 dataLog(", node @", nodeIndex);
128 dataLog(".\n");
129 CRASH();
130}
131#endif
132
133// Using this instead of typeCheck() helps to reduce the load on B3, by creating
134// significantly less dead code.
135#define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
136 FormattedValue _ftc_lowValue = (lowValue); \
137 Edge _ftc_highValue = (highValue); \
138 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
139 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
140 break; \
141 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
142 } while (false)
143
144#define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
145 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
146
147class LowerDFGToB3 {
148 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
149public:
150 LowerDFGToB3(State& state)
151 : m_graph(state.graph)
152 , m_ftlState(state)
153 , m_out(state)
154 , m_proc(*state.proc)
155 , m_availabilityCalculator(m_graph)
156 , m_state(state.graph)
157 , m_interpreter(state.graph, m_state)
158 , m_indexMaskingMode(Options::enableSpectreMitigations() ? IndexMaskingEnabled : IndexMaskingDisabled)
159 {
160 if (Options::validateAbstractInterpreterState()) {
161 performLivenessAnalysis(m_graph);
162
163 // We only use node liveness here, not combined liveness, as we only track
164 // AI state for live nodes.
165 for (DFG::BasicBlock* block : m_graph.blocksInNaturalOrder()) {
166 NodeSet live;
167
168 for (NodeFlowProjection node : block->ssa->liveAtTail) {
169 if (node.kind() == NodeFlowProjection::Primary)
170 live.addVoid(node.node());
171 }
172
173 for (unsigned i = block->size(); i--; ) {
174 Node* node = block->at(i);
175 live.remove(node);
176 m_graph.doToChildren(node, [&] (Edge child) {
177 live.addVoid(child.node());
178 });
179 m_liveInToNode.add(node, live);
180 }
181 }
182 }
183 }
184
185 void lower()
186 {
187 State* state = &m_ftlState;
188
189 CString name;
190 if (verboseCompilationEnabled()) {
191 name = toCString(
192 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
193 "_", codeBlock()->hash());
194 } else
195 name = "jsBody";
196
197 {
198 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
199 CodeBlock* codeBlock = m_graph.m_codeBlock;
200
201 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
202 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
203 AllowMacroScratchRegisterUsage allowScratch(jit);
204 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
205 if (Options::zeroStackFrame())
206 jit.clearStackFrame(GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister, GPRInfo::regT0, code.frameSize());
207
208 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
209 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
210 });
211
212 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
213 RELEASE_ASSERT(catchEntrypointIndex != 0);
214 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
215 }
216
217 if (m_graph.m_maxLocalsForCatchOSREntry) {
218 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
219 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
220 }
221 }
222
223 m_graph.ensureSSADominators();
224
225 if (verboseCompilationEnabled())
226 dataLog("Function ready, beginning lowering.\n");
227
228 m_out.initialize(m_heaps);
229
230 // We use prologue frequency for all of the initialization code.
231 m_out.setFrequency(1);
232
233 bool hasMultipleEntrypoints = m_graph.m_numberOfEntrypoints > 1;
234
235 LBasicBlock prologue = m_out.newBlock();
236 LBasicBlock callEntrypointArgumentSpeculations = hasMultipleEntrypoints ? m_out.newBlock() : nullptr;
237 m_handleExceptions = m_out.newBlock();
238
239 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
240 m_highBlock = m_graph.block(blockIndex);
241 if (!m_highBlock)
242 continue;
243 m_out.setFrequency(m_highBlock->executionCount);
244 m_blocks.add(m_highBlock, m_out.newBlock());
245 }
246
247 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
248 m_out.setFrequency(1);
249
250 m_out.appendTo(prologue, hasMultipleEntrypoints ? callEntrypointArgumentSpeculations : m_handleExceptions);
251 m_out.initializeConstants(m_proc, prologue);
252 createPhiVariables();
253
254 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
255 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
256 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
257 state->capturedValue = capturedBase->slot();
258
259 auto preOrder = m_graph.blocksInPreOrder();
260
261 m_callFrame = m_out.framePointer();
262 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
263 m_tagMask = m_out.constInt64(TagMask);
264
265 // Make sure that B3 knows that we really care about the mask registers. This forces the
266 // constants to be materialized in registers.
267 m_proc.addFastConstant(m_tagTypeNumber->key());
268 m_proc.addFastConstant(m_tagMask->key());
269
270 // We don't want the CodeBlock to have a weak pointer to itself because
271 // that would cause it to always get collected.
272 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
273
274 VM* vm = &this->vm();
275
276 // Stack Overflow Check.
277 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
278 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
279 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
280 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
281 stackOverflowHandler->appendSomeRegister(m_callFrame);
282 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
283 stackOverflowHandler->numGPScratchRegisters = 1;
284 stackOverflowHandler->setGenerator(
285 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
286 AllowMacroScratchRegisterUsage allowScratch(jit);
287 GPRReg fp = params[0].gpr();
288 GPRReg scratch = params.gpScratch(0);
289
290 unsigned ftlFrameSize = params.proc().frameSize();
291 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
292
293 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
294 MacroAssembler::JumpList stackOverflow;
295 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
296 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
297 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
298
299 params.addLatePath([=] (CCallHelpers& jit) {
300 AllowMacroScratchRegisterUsage allowScratch(jit);
301
302 stackOverflow.link(&jit);
303
304 // FIXME: We would not have to do this if the stack check was part of the Air
305 // prologue. Then, we would know that there is no way for the callee-saves to
306 // get clobbered.
307 // https://bugs.webkit.org/show_bug.cgi?id=172456
308 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
309
310 jit.store32(
311 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
312 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
313 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
314
315 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
316 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
317 CCallHelpers::Call throwCall = jit.call(OperationPtrTag);
318
319 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
320 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
321 CCallHelpers::Call lookupExceptionHandlerCall = jit.call(OperationPtrTag);
322 jit.jumpToExceptionHandler(*vm);
323
324 jit.addLinkTask(
325 [=] (LinkBuffer& linkBuffer) {
326 linkBuffer.link(throwCall, FunctionPtr<OperationPtrTag>(operationThrowStackOverflowError));
327 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame));
328 });
329 });
330 });
331
332 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
333
334 {
335 if (hasMultipleEntrypoints) {
336 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
337 successors[0] = callEntrypointArgumentSpeculations;
338 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
339 // Currently, the only other entrypoint is an op_catch entrypoint.
340 // We do OSR entry at op_catch, and we prove argument formats before
341 // jumping to FTL code, so we don't need to check argument types here
342 // for these entrypoints.
343 successors[i] = firstDFGBasicBlock;
344 }
345
346 m_out.entrySwitch(successors);
347 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
348 }
349
350 m_node = nullptr;
351 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
352
353 // Check Arguments.
354 availabilityMap().clear();
355 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
356 for (unsigned i = codeBlock()->numParameters(); i--;) {
357 availabilityMap().m_locals.argument(i) =
358 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
359 }
360
361 for (unsigned i = codeBlock()->numParameters(); i--;) {
362 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
363 VirtualRegister operand = virtualRegisterForArgument(i);
364 LValue jsValue = m_out.load64(addressFor(operand));
365
366 switch (m_graph.m_argumentFormats[0][i]) {
367 case FlushedInt32:
368 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
369 break;
370 case FlushedBoolean:
371 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
372 break;
373 case FlushedCell:
374 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
375 break;
376 case FlushedJSValue:
377 break;
378 default:
379 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
380 break;
381 }
382 }
383 m_out.jump(firstDFGBasicBlock);
384 }
385
386
387 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
388 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
389 m_out.patchpoint(Void)->setGenerator(
390 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
391 CCallHelpers::Jump jump = jit.jump();
392 jit.addLinkTask(
393 [=] (LinkBuffer& linkBuffer) {
394 linkBuffer.link(jump, linkBuffer.locationOf<ExceptionHandlerPtrTag>(*exceptionHandler));
395 });
396 });
397 m_out.unreachable();
398
399 for (DFG::BasicBlock* block : preOrder)
400 compileBlock(block);
401
402 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
403 // to happen last because our abstract heaps are generated lazily. They have to be
404 // generated lazily because we have an infinite number of numbered, indexed, and
405 // absolute heaps. We only become aware of the ones we actually mention while lowering.
406 m_heaps.computeRangesAndDecorateInstructions();
407
408 // We create all Phi's up front, but we may then decide not to compile the basic block
409 // that would have contained one of them. So this creates orphans, which triggers B3
410 // validation failures. Calling this fixes the issue.
411 //
412 // Note that you should avoid the temptation to make this call conditional upon
413 // validation being enabled. B3 makes no guarantees of any kind of correctness when
414 // dealing with IR that would have failed validation. For example, it would be valid to
415 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
416 // if any orphans were around. We might even have such phases already.
417 m_proc.deleteOrphans();
418
419 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
420 m_out.applyBlockOrder();
421 }
422
423private:
424
425 void createPhiVariables()
426 {
427 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
428 DFG::BasicBlock* block = m_graph.block(blockIndex);
429 if (!block)
430 continue;
431 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
432 Node* node = block->at(nodeIndex);
433 if (node->op() != DFG::Phi)
434 continue;
435 LType type;
436 switch (node->flags() & NodeResultMask) {
437 case NodeResultDouble:
438 type = Double;
439 break;
440 case NodeResultInt32:
441 type = Int32;
442 break;
443 case NodeResultInt52:
444 type = Int64;
445 break;
446 case NodeResultBoolean:
447 type = Int32;
448 break;
449 case NodeResultJS:
450 type = Int64;
451 break;
452 default:
453 DFG_CRASH(m_graph, node, "Bad Phi node result type");
454 break;
455 }
456 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
457 }
458 }
459 }
460
461 void compileBlock(DFG::BasicBlock* block)
462 {
463 if (!block)
464 return;
465
466 if (verboseCompilationEnabled())
467 dataLog("Compiling block ", *block, "\n");
468
469 m_highBlock = block;
470
471 // Make sure that any blocks created while lowering code in the high block have the frequency of
472 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
473 // something roughly approximate for things like register allocation.
474 m_out.setFrequency(m_highBlock->executionCount);
475
476 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
477
478 m_nextHighBlock = 0;
479 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
480 m_nextHighBlock = m_graph.block(nextBlockIndex);
481 if (m_nextHighBlock)
482 break;
483 }
484 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
485
486 // All of this effort to find the next block gives us the ability to keep the
487 // generated IR in roughly program order. This ought not affect the performance
488 // of the generated code (since we expect B3 to reorder things) but it will
489 // make IR dumps easier to read.
490 m_out.appendTo(lowBlock, m_nextLowBlock);
491
492 if (Options::ftlCrashes())
493 m_out.trap();
494
495 if (!m_highBlock->cfaHasVisited) {
496 if (verboseCompilationEnabled())
497 dataLog("Bailing because CFA didn't reach.\n");
498 crash(m_highBlock, nullptr);
499 return;
500 }
501
502 m_aiCheckedNodes.clear();
503
504 m_availabilityCalculator.beginBlock(m_highBlock);
505
506 m_state.reset();
507 m_state.beginBasicBlock(m_highBlock);
508
509 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
510 if (!compileNode(m_nodeIndex))
511 break;
512 }
513 }
514
515 void safelyInvalidateAfterTermination()
516 {
517 if (verboseCompilationEnabled())
518 dataLog("Bailing.\n");
519 crash();
520
521 // Invalidate dominated blocks. Under normal circumstances we would expect
522 // them to be invalidated already. But you can have the CFA become more
523 // precise over time because the structures of objects change on the main
524 // thread. Failing to do this would result in weird crashes due to a value
525 // being used but not defined. Race conditions FTW!
526 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
527 DFG::BasicBlock* target = m_graph.block(blockIndex);
528 if (!target)
529 continue;
530 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
531 if (verboseCompilationEnabled())
532 dataLog("Block ", *target, " will bail also.\n");
533 target->cfaHasVisited = false;
534 }
535 }
536 }
537
538 void validateAIState(Node* node)
539 {
540 if (!m_graphDump) {
541 StringPrintStream out;
542 m_graph.dump(out);
543 m_graphDump = out.toString();
544 }
545
546 switch (node->op()) {
547 case MovHint:
548 case ZombieHint:
549 case JSConstant:
550 case LazyJSConstant:
551 case DoubleConstant:
552 case Int52Constant:
553 case GetStack:
554 case PutStack:
555 case KillStack:
556 case ExitOK:
557 return;
558 default:
559 break;
560 }
561
562 // Before we execute node.
563 NodeSet& live = m_liveInToNode.find(node)->value;
564 unsigned highParentIndex = node->index();
565 {
566 uint64_t hash = WTF::intHash(highParentIndex);
567 if (hash >= static_cast<uint64_t>((static_cast<double>(std::numeric_limits<unsigned>::max()) + 1) * Options::validateAbstractInterpreterStateProbability()))
568 return;
569 }
570
571 for (Node* node : live) {
572 if (node->isPhantomAllocation())
573 continue;
574
575 if (node->op() == CheckInBounds)
576 continue;
577
578 AbstractValue value = m_interpreter.forNode(node);
579 {
580 auto iter = m_aiCheckedNodes.find(node);
581 if (iter != m_aiCheckedNodes.end()) {
582 AbstractValue checkedValue = iter->value;
583 if (checkedValue == value) {
584 if (!(value.m_type & SpecCell))
585 continue;
586 }
587 }
588 m_aiCheckedNodes.set(node, value);
589 }
590
591 FlushFormat flushFormat;
592 LValue input;
593 if (node->hasJSResult()) {
594 input = lowJSValue(Edge(node, UntypedUse));
595 flushFormat = FlushedJSValue;
596 } else if (node->hasDoubleResult()) {
597 input = lowDouble(Edge(node, DoubleRepUse));
598 flushFormat = FlushedDouble;
599 } else if (node->hasInt52Result()) {
600 input = strictInt52ToJSValue(lowStrictInt52(Edge(node, Int52RepUse)));
601 flushFormat = FlushedInt52;
602 } else
603 continue;
604
605 unsigned highChildIndex = node->index();
606
607 String graphDump = m_graphDump;
608
609 PatchpointValue* patchpoint = m_out.patchpoint(Void);
610 patchpoint->effects = Effects::none();
611 patchpoint->effects.writesLocalState = true;
612 patchpoint->appendSomeRegister(input);
613 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
614 GPRReg reg = InvalidGPRReg;
615 FPRReg fpReg = InvalidFPRReg;
616 if (flushFormat == FlushedDouble)
617 fpReg = params[0].fpr();
618 else
619 reg = params[0].gpr();
620 jit.probe([=] (Probe::Context& context) {
621 JSValue input;
622 double doubleInput;
623
624 auto dumpAndCrash = [&] {
625 dataLogLn("Validation failed at node: @", highParentIndex);
626 dataLogLn("Failed validating live value: @", highChildIndex);
627 dataLogLn();
628 dataLogLn("Expected AI value = ", value);
629 if (flushFormat != FlushedDouble)
630 dataLogLn("Unexpected value = ", input);
631 else
632 dataLogLn("Unexpected double value = ", doubleInput);
633 dataLogLn();
634 dataLogLn(graphDump);
635 CRASH();
636 };
637
638 if (flushFormat == FlushedDouble) {
639 doubleInput = context.fpr(fpReg);
640 SpeculatedType type;
641 if (!std::isnan(doubleInput))
642 type = speculationFromValue(jsDoubleNumber(doubleInput));
643 else if (isImpureNaN(doubleInput))
644 type = SpecDoubleImpureNaN;
645 else
646 type = SpecDoublePureNaN;
647
648 if (!value.couldBeType(type))
649 dumpAndCrash();
650 } else {
651 input = JSValue::decode(context.gpr(reg));
652 if (flushFormat == FlushedInt52) {
653 RELEASE_ASSERT(input.isAnyInt());
654 input = jsDoubleNumber(input.asAnyInt());
655 }
656 if (!value.validateOSREntryValue(input, flushFormat))
657 dumpAndCrash();
658 }
659
660 });
661 });
662 }
663 }
664
665 bool compileNode(unsigned nodeIndex)
666 {
667 if (!m_state.isValid()) {
668 safelyInvalidateAfterTermination();
669 return false;
670 }
671
672 m_node = m_highBlock->at(nodeIndex);
673 m_origin = m_node->origin;
674 m_out.setOrigin(m_node);
675
676 if (verboseCompilationEnabled())
677 dataLog("Lowering ", m_node, "\n");
678
679 m_interpreter.startExecuting();
680 m_interpreter.executeKnownEdgeTypes(m_node);
681
682 if (Options::validateAbstractInterpreterState())
683 validateAIState(m_node);
684
685 if (validateDFGDoesGC) {
686 bool expectDoesGC = doesGC(m_graph, m_node);
687 m_out.store(m_out.constBool(expectDoesGC), m_out.absolute(vm().heap.addressOfExpectDoesGC()));
688 }
689
690 switch (m_node->op()) {
691 case DFG::Upsilon:
692 compileUpsilon();
693 break;
694 case DFG::Phi:
695 compilePhi();
696 break;
697 case JSConstant:
698 break;
699 case DoubleConstant:
700 compileDoubleConstant();
701 break;
702 case Int52Constant:
703 compileInt52Constant();
704 break;
705 case LazyJSConstant:
706 compileLazyJSConstant();
707 break;
708 case DoubleRep:
709 compileDoubleRep();
710 break;
711 case DoubleAsInt32:
712 compileDoubleAsInt32();
713 break;
714 case DFG::ValueRep:
715 compileValueRep();
716 break;
717 case Int52Rep:
718 compileInt52Rep();
719 break;
720 case ValueToInt32:
721 compileValueToInt32();
722 break;
723 case BooleanToNumber:
724 compileBooleanToNumber();
725 break;
726 case ExtractOSREntryLocal:
727 compileExtractOSREntryLocal();
728 break;
729 case ExtractCatchLocal:
730 compileExtractCatchLocal();
731 break;
732 case ClearCatchLocals:
733 compileClearCatchLocals();
734 break;
735 case GetStack:
736 compileGetStack();
737 break;
738 case PutStack:
739 compilePutStack();
740 break;
741 case DFG::Check:
742 case CheckVarargs:
743 compileNoOp();
744 break;
745 case ToObject:
746 case CallObjectConstructor:
747 compileToObjectOrCallObjectConstructor();
748 break;
749 case ToThis:
750 compileToThis();
751 break;
752 case ValueNegate:
753 compileValueNegate();
754 break;
755 case ValueAdd:
756 compileValueAdd();
757 break;
758 case ValueSub:
759 compileValueSub();
760 break;
761 case ValueMul:
762 compileValueMul();
763 break;
764 case StrCat:
765 compileStrCat();
766 break;
767 case ArithAdd:
768 case ArithSub:
769 compileArithAddOrSub();
770 break;
771 case ArithClz32:
772 compileArithClz32();
773 break;
774 case ArithMul:
775 compileArithMul();
776 break;
777 case ValueDiv:
778 compileValueDiv();
779 break;
780 case ArithDiv:
781 compileArithDiv();
782 break;
783 case ValueMod:
784 compileValueMod();
785 break;
786 case ArithMod:
787 compileArithMod();
788 break;
789 case ArithMin:
790 case ArithMax:
791 compileArithMinOrMax();
792 break;
793 case ArithAbs:
794 compileArithAbs();
795 break;
796 case ValuePow:
797 compileValuePow();
798 break;
799 case ArithPow:
800 compileArithPow();
801 break;
802 case ArithRandom:
803 compileArithRandom();
804 break;
805 case ArithRound:
806 compileArithRound();
807 break;
808 case ArithFloor:
809 compileArithFloor();
810 break;
811 case ArithCeil:
812 compileArithCeil();
813 break;
814 case ArithTrunc:
815 compileArithTrunc();
816 break;
817 case ArithSqrt:
818 compileArithSqrt();
819 break;
820 case ArithFRound:
821 compileArithFRound();
822 break;
823 case ArithNegate:
824 compileArithNegate();
825 break;
826 case ArithUnary:
827 compileArithUnary();
828 break;
829 case ValueBitNot:
830 compileValueBitNot();
831 break;
832 case ArithBitNot:
833 compileArithBitNot();
834 break;
835 case ValueBitAnd:
836 compileValueBitAnd();
837 break;
838 case ArithBitAnd:
839 compileArithBitAnd();
840 break;
841 case ValueBitOr:
842 compileValueBitOr();
843 break;
844 case ArithBitOr:
845 compileArithBitOr();
846 break;
847 case ArithBitXor:
848 compileArithBitXor();
849 break;
850 case ValueBitXor:
851 compileValueBitXor();
852 break;
853 case BitRShift:
854 compileBitRShift();
855 break;
856 case BitLShift:
857 compileBitLShift();
858 break;
859 case BitURShift:
860 compileBitURShift();
861 break;
862 case UInt32ToNumber:
863 compileUInt32ToNumber();
864 break;
865 case CheckStructure:
866 compileCheckStructure();
867 break;
868 case CheckStructureOrEmpty:
869 compileCheckStructureOrEmpty();
870 break;
871 case CheckCell:
872 compileCheckCell();
873 break;
874 case CheckNotEmpty:
875 compileCheckNotEmpty();
876 break;
877 case AssertNotEmpty:
878 compileAssertNotEmpty();
879 break;
880 case CheckBadCell:
881 compileCheckBadCell();
882 break;
883 case CheckStringIdent:
884 compileCheckStringIdent();
885 break;
886 case GetExecutable:
887 compileGetExecutable();
888 break;
889 case Arrayify:
890 case ArrayifyToStructure:
891 compileArrayify();
892 break;
893 case PutStructure:
894 compilePutStructure();
895 break;
896 case TryGetById:
897 compileGetById(AccessType::TryGet);
898 break;
899 case GetById:
900 case GetByIdFlush:
901 compileGetById(AccessType::Get);
902 break;
903 case GetByIdWithThis:
904 compileGetByIdWithThis();
905 break;
906 case GetByIdDirect:
907 case GetByIdDirectFlush:
908 compileGetById(AccessType::GetDirect);
909 break;
910 case InById:
911 compileInById();
912 break;
913 case InByVal:
914 compileInByVal();
915 break;
916 case HasOwnProperty:
917 compileHasOwnProperty();
918 break;
919 case PutById:
920 case PutByIdDirect:
921 case PutByIdFlush:
922 compilePutById();
923 break;
924 case PutByIdWithThis:
925 compilePutByIdWithThis();
926 break;
927 case PutGetterById:
928 case PutSetterById:
929 compilePutAccessorById();
930 break;
931 case PutGetterSetterById:
932 compilePutGetterSetterById();
933 break;
934 case PutGetterByVal:
935 case PutSetterByVal:
936 compilePutAccessorByVal();
937 break;
938 case DeleteById:
939 compileDeleteById();
940 break;
941 case DeleteByVal:
942 compileDeleteByVal();
943 break;
944 case GetButterfly:
945 compileGetButterfly();
946 break;
947 case ConstantStoragePointer:
948 compileConstantStoragePointer();
949 break;
950 case GetIndexedPropertyStorage:
951 compileGetIndexedPropertyStorage();
952 break;
953 case CheckArray:
954 compileCheckArray();
955 break;
956 case GetArrayLength:
957 compileGetArrayLength();
958 break;
959 case GetVectorLength:
960 compileGetVectorLength();
961 break;
962 case CheckInBounds:
963 compileCheckInBounds();
964 break;
965 case GetByVal:
966 compileGetByVal();
967 break;
968 case GetMyArgumentByVal:
969 case GetMyArgumentByValOutOfBounds:
970 compileGetMyArgumentByVal();
971 break;
972 case GetByValWithThis:
973 compileGetByValWithThis();
974 break;
975 case PutByVal:
976 case PutByValAlias:
977 case PutByValDirect:
978 compilePutByVal();
979 break;
980 case PutByValWithThis:
981 compilePutByValWithThis();
982 break;
983 case AtomicsAdd:
984 case AtomicsAnd:
985 case AtomicsCompareExchange:
986 case AtomicsExchange:
987 case AtomicsLoad:
988 case AtomicsOr:
989 case AtomicsStore:
990 case AtomicsSub:
991 case AtomicsXor:
992 compileAtomicsReadModifyWrite();
993 break;
994 case AtomicsIsLockFree:
995 compileAtomicsIsLockFree();
996 break;
997 case DefineDataProperty:
998 compileDefineDataProperty();
999 break;
1000 case DefineAccessorProperty:
1001 compileDefineAccessorProperty();
1002 break;
1003 case ArrayPush:
1004 compileArrayPush();
1005 break;
1006 case ArrayPop:
1007 compileArrayPop();
1008 break;
1009 case ArraySlice:
1010 compileArraySlice();
1011 break;
1012 case ArrayIndexOf:
1013 compileArrayIndexOf();
1014 break;
1015 case CreateActivation:
1016 compileCreateActivation();
1017 break;
1018 case PushWithScope:
1019 compilePushWithScope();
1020 break;
1021 case NewFunction:
1022 case NewGeneratorFunction:
1023 case NewAsyncGeneratorFunction:
1024 case NewAsyncFunction:
1025 compileNewFunction();
1026 break;
1027 case CreateDirectArguments:
1028 compileCreateDirectArguments();
1029 break;
1030 case CreateScopedArguments:
1031 compileCreateScopedArguments();
1032 break;
1033 case CreateClonedArguments:
1034 compileCreateClonedArguments();
1035 break;
1036 case ObjectCreate:
1037 compileObjectCreate();
1038 break;
1039 case ObjectKeys:
1040 compileObjectKeys();
1041 break;
1042 case NewObject:
1043 compileNewObject();
1044 break;
1045 case NewStringObject:
1046 compileNewStringObject();
1047 break;
1048 case NewSymbol:
1049 compileNewSymbol();
1050 break;
1051 case NewArray:
1052 compileNewArray();
1053 break;
1054 case NewArrayWithSpread:
1055 compileNewArrayWithSpread();
1056 break;
1057 case CreateThis:
1058 compileCreateThis();
1059 break;
1060 case Spread:
1061 compileSpread();
1062 break;
1063 case NewArrayBuffer:
1064 compileNewArrayBuffer();
1065 break;
1066 case NewArrayWithSize:
1067 compileNewArrayWithSize();
1068 break;
1069 case NewTypedArray:
1070 compileNewTypedArray();
1071 break;
1072 case GetTypedArrayByteOffset:
1073 compileGetTypedArrayByteOffset();
1074 break;
1075 case GetPrototypeOf:
1076 compileGetPrototypeOf();
1077 break;
1078 case AllocatePropertyStorage:
1079 compileAllocatePropertyStorage();
1080 break;
1081 case ReallocatePropertyStorage:
1082 compileReallocatePropertyStorage();
1083 break;
1084 case NukeStructureAndSetButterfly:
1085 compileNukeStructureAndSetButterfly();
1086 break;
1087 case ToNumber:
1088 compileToNumber();
1089 break;
1090 case ToString:
1091 case CallStringConstructor:
1092 case StringValueOf:
1093 compileToStringOrCallStringConstructorOrStringValueOf();
1094 break;
1095 case ToPrimitive:
1096 compileToPrimitive();
1097 break;
1098 case MakeRope:
1099 compileMakeRope();
1100 break;
1101 case StringCharAt:
1102 compileStringCharAt();
1103 break;
1104 case StringCharCodeAt:
1105 compileStringCharCodeAt();
1106 break;
1107 case StringFromCharCode:
1108 compileStringFromCharCode();
1109 break;
1110 case GetByOffset:
1111 case GetGetterSetterByOffset:
1112 compileGetByOffset();
1113 break;
1114 case GetGetter:
1115 compileGetGetter();
1116 break;
1117 case GetSetter:
1118 compileGetSetter();
1119 break;
1120 case MultiGetByOffset:
1121 compileMultiGetByOffset();
1122 break;
1123 case PutByOffset:
1124 compilePutByOffset();
1125 break;
1126 case MultiPutByOffset:
1127 compileMultiPutByOffset();
1128 break;
1129 case MatchStructure:
1130 compileMatchStructure();
1131 break;
1132 case GetGlobalVar:
1133 case GetGlobalLexicalVariable:
1134 compileGetGlobalVariable();
1135 break;
1136 case PutGlobalVariable:
1137 compilePutGlobalVariable();
1138 break;
1139 case NotifyWrite:
1140 compileNotifyWrite();
1141 break;
1142 case GetCallee:
1143 compileGetCallee();
1144 break;
1145 case SetCallee:
1146 compileSetCallee();
1147 break;
1148 case GetArgumentCountIncludingThis:
1149 compileGetArgumentCountIncludingThis();
1150 break;
1151 case SetArgumentCountIncludingThis:
1152 compileSetArgumentCountIncludingThis();
1153 break;
1154 case GetScope:
1155 compileGetScope();
1156 break;
1157 case SkipScope:
1158 compileSkipScope();
1159 break;
1160 case GetGlobalObject:
1161 compileGetGlobalObject();
1162 break;
1163 case GetGlobalThis:
1164 compileGetGlobalThis();
1165 break;
1166 case GetClosureVar:
1167 compileGetClosureVar();
1168 break;
1169 case PutClosureVar:
1170 compilePutClosureVar();
1171 break;
1172 case GetFromArguments:
1173 compileGetFromArguments();
1174 break;
1175 case PutToArguments:
1176 compilePutToArguments();
1177 break;
1178 case GetArgument:
1179 compileGetArgument();
1180 break;
1181 case CompareEq:
1182 compileCompareEq();
1183 break;
1184 case CompareStrictEq:
1185 compileCompareStrictEq();
1186 break;
1187 case CompareLess:
1188 compileCompareLess();
1189 break;
1190 case CompareLessEq:
1191 compileCompareLessEq();
1192 break;
1193 case CompareGreater:
1194 compileCompareGreater();
1195 break;
1196 case CompareGreaterEq:
1197 compileCompareGreaterEq();
1198 break;
1199 case CompareBelow:
1200 compileCompareBelow();
1201 break;
1202 case CompareBelowEq:
1203 compileCompareBelowEq();
1204 break;
1205 case CompareEqPtr:
1206 compileCompareEqPtr();
1207 break;
1208 case SameValue:
1209 compileSameValue();
1210 break;
1211 case LogicalNot:
1212 compileLogicalNot();
1213 break;
1214 case Call:
1215 case TailCallInlinedCaller:
1216 case Construct:
1217 compileCallOrConstruct();
1218 break;
1219 case DirectCall:
1220 case DirectTailCallInlinedCaller:
1221 case DirectConstruct:
1222 case DirectTailCall:
1223 compileDirectCallOrConstruct();
1224 break;
1225 case TailCall:
1226 compileTailCall();
1227 break;
1228 case CallVarargs:
1229 case CallForwardVarargs:
1230 case TailCallVarargs:
1231 case TailCallVarargsInlinedCaller:
1232 case TailCallForwardVarargs:
1233 case TailCallForwardVarargsInlinedCaller:
1234 case ConstructVarargs:
1235 case ConstructForwardVarargs:
1236 compileCallOrConstructVarargs();
1237 break;
1238 case CallEval:
1239 compileCallEval();
1240 break;
1241 case LoadVarargs:
1242 compileLoadVarargs();
1243 break;
1244 case ForwardVarargs:
1245 compileForwardVarargs();
1246 break;
1247 case DFG::Jump:
1248 compileJump();
1249 break;
1250 case DFG::Branch:
1251 compileBranch();
1252 break;
1253 case DFG::Switch:
1254 compileSwitch();
1255 break;
1256 case DFG::EntrySwitch:
1257 compileEntrySwitch();
1258 break;
1259 case DFG::Return:
1260 compileReturn();
1261 break;
1262 case ForceOSRExit:
1263 compileForceOSRExit();
1264 break;
1265 case CPUIntrinsic:
1266#if CPU(X86_64)
1267 compileCPUIntrinsic();
1268#else
1269 RELEASE_ASSERT_NOT_REACHED();
1270#endif
1271 break;
1272 case Throw:
1273 compileThrow();
1274 break;
1275 case ThrowStaticError:
1276 compileThrowStaticError();
1277 break;
1278 case InvalidationPoint:
1279 compileInvalidationPoint();
1280 break;
1281 case IsEmpty:
1282 compileIsEmpty();
1283 break;
1284 case IsUndefined:
1285 compileIsUndefined();
1286 break;
1287 case IsUndefinedOrNull:
1288 compileIsUndefinedOrNull();
1289 break;
1290 case IsBoolean:
1291 compileIsBoolean();
1292 break;
1293 case IsNumber:
1294 compileIsNumber();
1295 break;
1296 case NumberIsInteger:
1297 compileNumberIsInteger();
1298 break;
1299 case IsCellWithType:
1300 compileIsCellWithType();
1301 break;
1302 case MapHash:
1303 compileMapHash();
1304 break;
1305 case NormalizeMapKey:
1306 compileNormalizeMapKey();
1307 break;
1308 case GetMapBucket:
1309 compileGetMapBucket();
1310 break;
1311 case GetMapBucketHead:
1312 compileGetMapBucketHead();
1313 break;
1314 case GetMapBucketNext:
1315 compileGetMapBucketNext();
1316 break;
1317 case LoadKeyFromMapBucket:
1318 compileLoadKeyFromMapBucket();
1319 break;
1320 case LoadValueFromMapBucket:
1321 compileLoadValueFromMapBucket();
1322 break;
1323 case ExtractValueFromWeakMapGet:
1324 compileExtractValueFromWeakMapGet();
1325 break;
1326 case SetAdd:
1327 compileSetAdd();
1328 break;
1329 case MapSet:
1330 compileMapSet();
1331 break;
1332 case WeakMapGet:
1333 compileWeakMapGet();
1334 break;
1335 case WeakSetAdd:
1336 compileWeakSetAdd();
1337 break;
1338 case WeakMapSet:
1339 compileWeakMapSet();
1340 break;
1341 case IsObject:
1342 compileIsObject();
1343 break;
1344 case IsObjectOrNull:
1345 compileIsObjectOrNull();
1346 break;
1347 case IsFunction:
1348 compileIsFunction();
1349 break;
1350 case IsTypedArrayView:
1351 compileIsTypedArrayView();
1352 break;
1353 case ParseInt:
1354 compileParseInt();
1355 break;
1356 case TypeOf:
1357 compileTypeOf();
1358 break;
1359 case CheckTypeInfoFlags:
1360 compileCheckTypeInfoFlags();
1361 break;
1362 case OverridesHasInstance:
1363 compileOverridesHasInstance();
1364 break;
1365 case InstanceOf:
1366 compileInstanceOf();
1367 break;
1368 case InstanceOfCustom:
1369 compileInstanceOfCustom();
1370 break;
1371 case CountExecution:
1372 compileCountExecution();
1373 break;
1374 case SuperSamplerBegin:
1375 compileSuperSamplerBegin();
1376 break;
1377 case SuperSamplerEnd:
1378 compileSuperSamplerEnd();
1379 break;
1380 case StoreBarrier:
1381 case FencedStoreBarrier:
1382 compileStoreBarrier();
1383 break;
1384 case HasIndexedProperty:
1385 compileHasIndexedProperty();
1386 break;
1387 case HasGenericProperty:
1388 compileHasGenericProperty();
1389 break;
1390 case HasStructureProperty:
1391 compileHasStructureProperty();
1392 break;
1393 case GetDirectPname:
1394 compileGetDirectPname();
1395 break;
1396 case GetEnumerableLength:
1397 compileGetEnumerableLength();
1398 break;
1399 case GetPropertyEnumerator:
1400 compileGetPropertyEnumerator();
1401 break;
1402 case GetEnumeratorStructurePname:
1403 compileGetEnumeratorStructurePname();
1404 break;
1405 case GetEnumeratorGenericPname:
1406 compileGetEnumeratorGenericPname();
1407 break;
1408 case ToIndexString:
1409 compileToIndexString();
1410 break;
1411 case CheckStructureImmediate:
1412 compileCheckStructureImmediate();
1413 break;
1414 case MaterializeNewObject:
1415 compileMaterializeNewObject();
1416 break;
1417 case MaterializeCreateActivation:
1418 compileMaterializeCreateActivation();
1419 break;
1420 case CheckTraps:
1421 compileCheckTraps();
1422 break;
1423 case CreateRest:
1424 compileCreateRest();
1425 break;
1426 case GetRestLength:
1427 compileGetRestLength();
1428 break;
1429 case RegExpExec:
1430 compileRegExpExec();
1431 break;
1432 case RegExpExecNonGlobalOrSticky:
1433 compileRegExpExecNonGlobalOrSticky();
1434 break;
1435 case RegExpTest:
1436 compileRegExpTest();
1437 break;
1438 case RegExpMatchFast:
1439 compileRegExpMatchFast();
1440 break;
1441 case RegExpMatchFastGlobal:
1442 compileRegExpMatchFastGlobal();
1443 break;
1444 case NewRegexp:
1445 compileNewRegexp();
1446 break;
1447 case SetFunctionName:
1448 compileSetFunctionName();
1449 break;
1450 case StringReplace:
1451 case StringReplaceRegExp:
1452 compileStringReplace();
1453 break;
1454 case GetRegExpObjectLastIndex:
1455 compileGetRegExpObjectLastIndex();
1456 break;
1457 case SetRegExpObjectLastIndex:
1458 compileSetRegExpObjectLastIndex();
1459 break;
1460 case LogShadowChickenPrologue:
1461 compileLogShadowChickenPrologue();
1462 break;
1463 case LogShadowChickenTail:
1464 compileLogShadowChickenTail();
1465 break;
1466 case RecordRegExpCachedResult:
1467 compileRecordRegExpCachedResult();
1468 break;
1469 case ResolveScopeForHoistingFuncDeclInEval:
1470 compileResolveScopeForHoistingFuncDeclInEval();
1471 break;
1472 case ResolveScope:
1473 compileResolveScope();
1474 break;
1475 case GetDynamicVar:
1476 compileGetDynamicVar();
1477 break;
1478 case PutDynamicVar:
1479 compilePutDynamicVar();
1480 break;
1481 case Unreachable:
1482 compileUnreachable();
1483 break;
1484 case StringSlice:
1485 compileStringSlice();
1486 break;
1487 case ToLowerCase:
1488 compileToLowerCase();
1489 break;
1490 case NumberToStringWithRadix:
1491 compileNumberToStringWithRadix();
1492 break;
1493 case NumberToStringWithValidRadixConstant:
1494 compileNumberToStringWithValidRadixConstant();
1495 break;
1496 case CheckSubClass:
1497 compileCheckSubClass();
1498 break;
1499 case CallDOM:
1500 compileCallDOM();
1501 break;
1502 case CallDOMGetter:
1503 compileCallDOMGetter();
1504 break;
1505 case FilterCallLinkStatus:
1506 case FilterGetByIdStatus:
1507 case FilterPutByIdStatus:
1508 case FilterInByIdStatus:
1509 compileFilterICStatus();
1510 break;
1511 case DataViewGetInt:
1512 case DataViewGetFloat:
1513 compileDataViewGet();
1514 break;
1515 case DataViewSet:
1516 compileDataViewSet();
1517 break;
1518
1519 case PhantomLocal:
1520 case LoopHint:
1521 case MovHint:
1522 case ZombieHint:
1523 case ExitOK:
1524 case PhantomNewObject:
1525 case PhantomNewFunction:
1526 case PhantomNewGeneratorFunction:
1527 case PhantomNewAsyncGeneratorFunction:
1528 case PhantomNewAsyncFunction:
1529 case PhantomCreateActivation:
1530 case PhantomDirectArguments:
1531 case PhantomCreateRest:
1532 case PhantomSpread:
1533 case PhantomNewArrayWithSpread:
1534 case PhantomNewArrayBuffer:
1535 case PhantomClonedArguments:
1536 case PhantomNewRegexp:
1537 case PutHint:
1538 case BottomValue:
1539 case KillStack:
1540 case InitializeEntrypointArguments:
1541 break;
1542 default:
1543 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1544 break;
1545 }
1546
1547 if (m_node->isTerminal())
1548 return false;
1549
1550 if (!m_state.isValid()) {
1551 safelyInvalidateAfterTermination();
1552 return false;
1553 }
1554
1555 m_availabilityCalculator.executeNode(m_node);
1556 m_interpreter.executeEffects(nodeIndex);
1557
1558 return true;
1559 }
1560
1561 void compileUpsilon()
1562 {
1563 LValue upsilonValue = nullptr;
1564 switch (m_node->child1().useKind()) {
1565 case DoubleRepUse:
1566 upsilonValue = lowDouble(m_node->child1());
1567 break;
1568 case Int32Use:
1569 case KnownInt32Use:
1570 upsilonValue = lowInt32(m_node->child1());
1571 break;
1572 case Int52RepUse:
1573 upsilonValue = lowInt52(m_node->child1());
1574 break;
1575 case BooleanUse:
1576 case KnownBooleanUse:
1577 upsilonValue = lowBoolean(m_node->child1());
1578 break;
1579 case CellUse:
1580 case KnownCellUse:
1581 upsilonValue = lowCell(m_node->child1());
1582 break;
1583 case UntypedUse:
1584 upsilonValue = lowJSValue(m_node->child1());
1585 break;
1586 default:
1587 DFG_CRASH(m_graph, m_node, "Bad use kind");
1588 break;
1589 }
1590 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1591 LValue phiNode = m_phis.get(m_node->phi());
1592 m_out.addIncomingToPhi(phiNode, upsilon);
1593 }
1594
1595 void compilePhi()
1596 {
1597 LValue phi = m_phis.get(m_node);
1598 m_out.m_block->append(phi);
1599
1600 switch (m_node->flags() & NodeResultMask) {
1601 case NodeResultDouble:
1602 setDouble(phi);
1603 break;
1604 case NodeResultInt32:
1605 setInt32(phi);
1606 break;
1607 case NodeResultInt52:
1608 setInt52(phi);
1609 break;
1610 case NodeResultBoolean:
1611 setBoolean(phi);
1612 break;
1613 case NodeResultJS:
1614 setJSValue(phi);
1615 break;
1616 default:
1617 DFG_CRASH(m_graph, m_node, "Bad result type");
1618 break;
1619 }
1620 }
1621
1622 void compileDoubleConstant()
1623 {
1624 setDouble(m_out.constDouble(m_node->asNumber()));
1625 }
1626
1627 void compileInt52Constant()
1628 {
1629 int64_t value = m_node->asAnyInt();
1630
1631 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1632 setStrictInt52(m_out.constInt64(value));
1633 }
1634
1635 void compileLazyJSConstant()
1636 {
1637 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1638 LazyJSValue value = m_node->lazyJSValue();
1639 patchpoint->setGenerator(
1640 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1641 value.emit(jit, JSValueRegs(params[0].gpr()));
1642 });
1643 patchpoint->effects = Effects::none();
1644 setJSValue(patchpoint);
1645 }
1646
1647 void compileDoubleRep()
1648 {
1649 switch (m_node->child1().useKind()) {
1650 case RealNumberUse: {
1651 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1652
1653 LValue doubleValue = unboxDouble(value);
1654
1655 LBasicBlock intCase = m_out.newBlock();
1656 LBasicBlock continuation = m_out.newBlock();
1657
1658 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1659 m_out.branch(
1660 m_out.doubleEqual(doubleValue, doubleValue),
1661 usually(continuation), rarely(intCase));
1662
1663 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1664
1665 FTL_TYPE_CHECK(
1666 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1667 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1668 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1669 m_out.jump(continuation);
1670
1671 m_out.appendTo(continuation, lastNext);
1672
1673 setDouble(m_out.phi(Double, fastResult, slowResult));
1674 return;
1675 }
1676
1677 case NotCellUse:
1678 case NumberUse: {
1679 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1680
1681 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1682
1683 LBasicBlock intCase = m_out.newBlock();
1684 LBasicBlock doubleTesting = m_out.newBlock();
1685 LBasicBlock doubleCase = m_out.newBlock();
1686 LBasicBlock nonDoubleCase = m_out.newBlock();
1687 LBasicBlock continuation = m_out.newBlock();
1688
1689 m_out.branch(
1690 isNotInt32(value, provenType(m_node->child1())),
1691 unsure(doubleTesting), unsure(intCase));
1692
1693 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1694
1695 ValueFromBlock intToDouble = m_out.anchor(
1696 m_out.intToDouble(unboxInt32(value)));
1697 m_out.jump(continuation);
1698
1699 m_out.appendTo(doubleTesting, doubleCase);
1700 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1701 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1702
1703 m_out.appendTo(doubleCase, nonDoubleCase);
1704 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1705 m_out.jump(continuation);
1706
1707 if (shouldConvertNonNumber) {
1708 LBasicBlock undefinedCase = m_out.newBlock();
1709 LBasicBlock testNullCase = m_out.newBlock();
1710 LBasicBlock nullCase = m_out.newBlock();
1711 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1712 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1713 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1714
1715 m_out.appendTo(nonDoubleCase, undefinedCase);
1716 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1717 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1718
1719 m_out.appendTo(undefinedCase, testNullCase);
1720 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1721 m_out.jump(continuation);
1722
1723 m_out.appendTo(testNullCase, nullCase);
1724 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1725 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1726
1727 m_out.appendTo(nullCase, testBooleanTrueCase);
1728 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1729 m_out.jump(continuation);
1730
1731 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1732 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1733 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1734
1735 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1736 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1737 m_out.jump(continuation);
1738
1739 m_out.appendTo(convertBooleanFalseCase, continuation);
1740
1741 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1742 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1743 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1744 m_out.jump(continuation);
1745
1746 m_out.appendTo(continuation, lastNext);
1747 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1748 return;
1749 }
1750 m_out.appendTo(nonDoubleCase, continuation);
1751 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1752 m_out.unreachable();
1753
1754 m_out.appendTo(continuation, lastNext);
1755
1756 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1757 return;
1758 }
1759
1760 case Int52RepUse: {
1761 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1762 return;
1763 }
1764
1765 default:
1766 DFG_CRASH(m_graph, m_node, "Bad use kind");
1767 }
1768 }
1769
1770 void compileDoubleAsInt32()
1771 {
1772 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1773 setInt32(integerValue);
1774 }
1775
1776 void compileValueRep()
1777 {
1778 switch (m_node->child1().useKind()) {
1779 case DoubleRepUse: {
1780 LValue value = lowDouble(m_node->child1());
1781
1782 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1783 value = m_out.select(
1784 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1785 }
1786
1787 setJSValue(boxDouble(value));
1788 return;
1789 }
1790
1791 case Int52RepUse: {
1792 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1793 return;
1794 }
1795
1796 default:
1797 DFG_CRASH(m_graph, m_node, "Bad use kind");
1798 }
1799 }
1800
1801 void compileInt52Rep()
1802 {
1803 switch (m_node->child1().useKind()) {
1804 case Int32Use:
1805 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1806 return;
1807
1808 case AnyIntUse:
1809 setStrictInt52(
1810 jsValueToStrictInt52(
1811 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1812 return;
1813
1814 case DoubleRepAnyIntUse:
1815 setStrictInt52(
1816 doubleToStrictInt52(
1817 m_node->child1(), lowDouble(m_node->child1())));
1818 return;
1819
1820 default:
1821 RELEASE_ASSERT_NOT_REACHED();
1822 }
1823 }
1824
1825 void compileValueToInt32()
1826 {
1827 switch (m_node->child1().useKind()) {
1828 case Int52RepUse:
1829 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1830 break;
1831
1832 case DoubleRepUse:
1833 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1834 break;
1835
1836 case NumberUse:
1837 case NotCellUse: {
1838 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1839 if (isValid(value)) {
1840 setInt32(value.value());
1841 break;
1842 }
1843
1844 value = m_jsValueValues.get(m_node->child1().node());
1845 if (isValid(value)) {
1846 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1847 break;
1848 }
1849
1850 // We'll basically just get here for constants. But it's good to have this
1851 // catch-all since we often add new representations into the mix.
1852 setInt32(
1853 numberOrNotCellToInt32(
1854 m_node->child1(),
1855 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1856 break;
1857 }
1858
1859 default:
1860 DFG_CRASH(m_graph, m_node, "Bad use kind");
1861 break;
1862 }
1863 }
1864
1865 void compileBooleanToNumber()
1866 {
1867 switch (m_node->child1().useKind()) {
1868 case BooleanUse: {
1869 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1870 return;
1871 }
1872
1873 case UntypedUse: {
1874 LValue value = lowJSValue(m_node->child1());
1875
1876 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1877 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1878 return;
1879 }
1880
1881 LBasicBlock booleanCase = m_out.newBlock();
1882 LBasicBlock continuation = m_out.newBlock();
1883
1884 ValueFromBlock notBooleanResult = m_out.anchor(value);
1885 m_out.branch(
1886 isBoolean(value, provenType(m_node->child1())),
1887 unsure(booleanCase), unsure(continuation));
1888
1889 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1890 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1891 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1892 m_out.jump(continuation);
1893
1894 m_out.appendTo(continuation, lastNext);
1895 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1896 return;
1897 }
1898
1899 default:
1900 RELEASE_ASSERT_NOT_REACHED();
1901 return;
1902 }
1903 }
1904
1905 void compileExtractOSREntryLocal()
1906 {
1907 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1908 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1909 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1910 }
1911
1912 void compileExtractCatchLocal()
1913 {
1914 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1915 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1916 }
1917
1918 void compileClearCatchLocals()
1919 {
1920 ScratchBuffer* scratchBuffer = m_ftlState.jitCode->common.catchOSREntryBuffer;
1921 ASSERT(scratchBuffer);
1922 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
1923 }
1924
1925 void compileGetStack()
1926 {
1927 StackAccessData* data = m_node->stackAccessData();
1928 AbstractValue& value = m_state.operand(data->local);
1929
1930 DFG_ASSERT(m_graph, m_node, isConcrete(data->format), data->format);
1931
1932 switch (data->format) {
1933 case FlushedDouble:
1934 setDouble(m_out.loadDouble(addressFor(data->machineLocal)));
1935 break;
1936 case FlushedInt52:
1937 setInt52(m_out.load64(addressFor(data->machineLocal)));
1938 break;
1939 default:
1940 if (isInt32Speculation(value.m_type))
1941 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1942 else
1943 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1944 break;
1945 }
1946 }
1947
1948 void compilePutStack()
1949 {
1950 StackAccessData* data = m_node->stackAccessData();
1951 switch (data->format) {
1952 case FlushedJSValue: {
1953 LValue value = lowJSValue(m_node->child1());
1954 m_out.store64(value, addressFor(data->machineLocal));
1955 break;
1956 }
1957
1958 case FlushedDouble: {
1959 LValue value = lowDouble(m_node->child1());
1960 m_out.storeDouble(value, addressFor(data->machineLocal));
1961 break;
1962 }
1963
1964 case FlushedInt32: {
1965 LValue value = lowInt32(m_node->child1());
1966 m_out.store32(value, payloadFor(data->machineLocal));
1967 break;
1968 }
1969
1970 case FlushedInt52: {
1971 LValue value = lowInt52(m_node->child1());
1972 m_out.store64(value, addressFor(data->machineLocal));
1973 break;
1974 }
1975
1976 case FlushedCell: {
1977 LValue value = lowCell(m_node->child1());
1978 m_out.store64(value, addressFor(data->machineLocal));
1979 break;
1980 }
1981
1982 case FlushedBoolean: {
1983 speculateBoolean(m_node->child1());
1984 m_out.store64(
1985 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1986 addressFor(data->machineLocal));
1987 break;
1988 }
1989
1990 default:
1991 DFG_CRASH(m_graph, m_node, "Bad flush format");
1992 break;
1993 }
1994 }
1995
1996 void compileNoOp()
1997 {
1998 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1999 }
2000
2001 void compileToObjectOrCallObjectConstructor()
2002 {
2003 LValue value = lowJSValue(m_node->child1());
2004
2005 LBasicBlock isCellCase = m_out.newBlock();
2006 LBasicBlock slowCase = m_out.newBlock();
2007 LBasicBlock continuation = m_out.newBlock();
2008
2009 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2010
2011 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2012 ValueFromBlock fastResult = m_out.anchor(value);
2013 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
2014
2015 m_out.appendTo(slowCase, continuation);
2016
2017 ValueFromBlock slowResult;
2018 if (m_node->op() == ToObject) {
2019 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2020 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2021 } else
2022 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
2023 m_out.jump(continuation);
2024
2025 m_out.appendTo(continuation, lastNext);
2026 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2027 }
2028
2029 void compileToThis()
2030 {
2031 LValue value = lowJSValue(m_node->child1());
2032
2033 LBasicBlock isCellCase = m_out.newBlock();
2034 LBasicBlock slowCase = m_out.newBlock();
2035 LBasicBlock continuation = m_out.newBlock();
2036
2037 m_out.branch(
2038 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2039
2040 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2041 ValueFromBlock fastResult = m_out.anchor(value);
2042 m_out.branch(
2043 m_out.testIsZero32(
2044 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
2045 m_out.constInt32(OverridesToThis)),
2046 usually(continuation), rarely(slowCase));
2047
2048 m_out.appendTo(slowCase, continuation);
2049 J_JITOperation_EJ function;
2050 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2051 function = operationToThisStrict;
2052 else
2053 function = operationToThis;
2054 ValueFromBlock slowResult = m_out.anchor(
2055 vmCall(Int64, m_out.operation(function), m_callFrame, value));
2056 m_out.jump(continuation);
2057
2058 m_out.appendTo(continuation, lastNext);
2059 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2060 }
2061
2062 void compileValueAdd()
2063 {
2064 if (m_node->isBinaryUseKind(BigIntUse)) {
2065 LValue left = lowBigInt(m_node->child1());
2066 LValue right = lowBigInt(m_node->child2());
2067
2068 LValue result = vmCall(pointerType(), m_out.operation(operationAddBigInt), m_callFrame, left, right);
2069 setJSValue(result);
2070 return;
2071 }
2072
2073 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2074 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2075 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2076 auto repatchingFunction = operationValueAddOptimize;
2077 auto nonRepatchingFunction = operationValueAdd;
2078 compileBinaryMathIC<JITAddGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2079 }
2080
2081 void compileValueSub()
2082 {
2083 if (m_node->isBinaryUseKind(BigIntUse)) {
2084 LValue left = lowBigInt(m_node->child1());
2085 LValue right = lowBigInt(m_node->child2());
2086
2087 LValue result = vmCall(pointerType(), m_out.operation(operationSubBigInt), m_callFrame, left, right);
2088 setJSValue(result);
2089 return;
2090 }
2091
2092 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2093 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2094 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2095 auto repatchingFunction = operationValueSubOptimize;
2096 auto nonRepatchingFunction = operationValueSub;
2097 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2098 }
2099
2100 void compileValueMul()
2101 {
2102 if (m_node->isBinaryUseKind(BigIntUse)) {
2103 LValue left = lowBigInt(m_node->child1());
2104 LValue right = lowBigInt(m_node->child2());
2105
2106 LValue result = vmCall(Int64, m_out.operation(operationMulBigInt), m_callFrame, left, right);
2107 setJSValue(result);
2108 return;
2109 }
2110
2111 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2112 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2113 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2114 auto repatchingFunction = operationValueMulOptimize;
2115 auto nonRepatchingFunction = operationValueMul;
2116 compileBinaryMathIC<JITMulGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2117 }
2118
2119 template <typename Generator, typename Func1, typename Func2,
2120 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2121 void compileUnaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2122 {
2123 Node* node = m_node;
2124
2125 LValue operand = lowJSValue(node->child1());
2126
2127 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2128 patchpoint->appendSomeRegister(operand);
2129 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2130 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2131 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
2132 patchpoint->numGPScratchRegisters = 1;
2133 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2134 State* state = &m_ftlState;
2135 patchpoint->setGenerator(
2136 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2137 AllowMacroScratchRegisterUsage allowScratch(jit);
2138
2139 Box<CCallHelpers::JumpList> exceptions =
2140 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2141
2142#if ENABLE(MATH_IC_STATS)
2143 auto inlineStart = jit.label();
2144#endif
2145
2146 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2147 JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2148 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
2149
2150 bool shouldEmitProfiling = false;
2151 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2152
2153 if (generatedInline) {
2154 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2155 auto done = jit.label();
2156 params.addLatePath([=] (CCallHelpers& jit) {
2157 AllowMacroScratchRegisterUsage allowScratch(jit);
2158 mathICGenerationState->slowPathJumps.link(&jit);
2159 mathICGenerationState->slowPathStart = jit.label();
2160#if ENABLE(MATH_IC_STATS)
2161 auto slowPathStart = jit.label();
2162#endif
2163
2164 if (mathICGenerationState->shouldSlowPathRepatch) {
2165 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2166 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2167 mathICGenerationState->slowPathCall = call.call();
2168 } else {
2169 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2170 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2171 mathICGenerationState->slowPathCall = call.call();
2172 }
2173 jit.jump().linkTo(done, &jit);
2174
2175 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2176 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2177 });
2178
2179#if ENABLE(MATH_IC_STATS)
2180 auto slowPathEnd = jit.label();
2181 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2182 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2183 mathIC->m_generatedCodeSize += size;
2184 });
2185#endif
2186 });
2187 } else {
2188 callOperation(
2189 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2190 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2191 }
2192
2193#if ENABLE(MATH_IC_STATS)
2194 auto inlineEnd = jit.label();
2195 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2196 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2197 mathIC->m_generatedCodeSize += size;
2198 });
2199#endif
2200 });
2201
2202 setJSValue(patchpoint);
2203 }
2204
2205 template <typename Generator, typename Func1, typename Func2,
2206 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2207 void compileBinaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2208 {
2209 Node* node = m_node;
2210
2211 LValue left = lowJSValue(node->child1());
2212 LValue right = lowJSValue(node->child2());
2213
2214 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2215 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2216
2217 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2218 patchpoint->appendSomeRegister(left);
2219 patchpoint->appendSomeRegister(right);
2220 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2221 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2222 RefPtr<PatchpointExceptionHandle> exceptionHandle =
2223 preparePatchpointForExceptions(patchpoint);
2224 patchpoint->numGPScratchRegisters = 1;
2225 patchpoint->numFPScratchRegisters = 2;
2226 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2227 State* state = &m_ftlState;
2228 patchpoint->setGenerator(
2229 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2230 AllowMacroScratchRegisterUsage allowScratch(jit);
2231
2232
2233 Box<CCallHelpers::JumpList> exceptions =
2234 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2235
2236#if ENABLE(MATH_IC_STATS)
2237 auto inlineStart = jit.label();
2238#endif
2239
2240 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2241 JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2242 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
2243 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
2244 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
2245
2246 bool shouldEmitProfiling = false;
2247 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2248
2249 if (generatedInline) {
2250 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2251 auto done = jit.label();
2252 params.addLatePath([=] (CCallHelpers& jit) {
2253 AllowMacroScratchRegisterUsage allowScratch(jit);
2254 mathICGenerationState->slowPathJumps.link(&jit);
2255 mathICGenerationState->slowPathStart = jit.label();
2256#if ENABLE(MATH_IC_STATS)
2257 auto slowPathStart = jit.label();
2258#endif
2259
2260 if (mathICGenerationState->shouldSlowPathRepatch) {
2261 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2262 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2263 mathICGenerationState->slowPathCall = call.call();
2264 } else {
2265 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2266 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2267 mathICGenerationState->slowPathCall = call.call();
2268 }
2269 jit.jump().linkTo(done, &jit);
2270
2271 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2272 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2273 });
2274
2275#if ENABLE(MATH_IC_STATS)
2276 auto slowPathEnd = jit.label();
2277 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2278 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2279 mathIC->m_generatedCodeSize += size;
2280 });
2281#endif
2282 });
2283 } else {
2284 callOperation(
2285 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2286 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2287 }
2288
2289#if ENABLE(MATH_IC_STATS)
2290 auto inlineEnd = jit.label();
2291 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2292 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2293 mathIC->m_generatedCodeSize += size;
2294 });
2295#endif
2296 });
2297
2298 setJSValue(patchpoint);
2299 }
2300
2301 void compileStrCat()
2302 {
2303 LValue result;
2304 if (m_node->child3()) {
2305 result = vmCall(
2306 Int64, m_out.operation(operationStrCat3), m_callFrame,
2307 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2308 lowJSValue(m_node->child2(), ManualOperandSpeculation),
2309 lowJSValue(m_node->child3(), ManualOperandSpeculation));
2310 } else {
2311 result = vmCall(
2312 Int64, m_out.operation(operationStrCat2), m_callFrame,
2313 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2314 lowJSValue(m_node->child2(), ManualOperandSpeculation));
2315 }
2316 setJSValue(result);
2317 }
2318
2319 void compileArithAddOrSub()
2320 {
2321 bool isSub = m_node->op() == ArithSub;
2322 switch (m_node->binaryUseKind()) {
2323 case Int32Use: {
2324 LValue left = lowInt32(m_node->child1());
2325 LValue right = lowInt32(m_node->child2());
2326
2327 if (!shouldCheckOverflow(m_node->arithMode())) {
2328 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
2329 break;
2330 }
2331
2332 CheckValue* result =
2333 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2334 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2335 setInt32(result);
2336 break;
2337 }
2338
2339 case Int52RepUse: {
2340 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)
2341 && !abstractValue(m_node->child2()).couldBeType(SpecNonInt32AsInt52)) {
2342 Int52Kind kind;
2343 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2344 LValue right = lowInt52(m_node->child2(), kind);
2345 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
2346 break;
2347 }
2348
2349 LValue left = lowInt52(m_node->child1());
2350 LValue right = lowInt52(m_node->child2());
2351 CheckValue* result =
2352 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2353 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2354 setInt52(result);
2355 break;
2356 }
2357
2358 case DoubleRepUse: {
2359 LValue C1 = lowDouble(m_node->child1());
2360 LValue C2 = lowDouble(m_node->child2());
2361
2362 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2363 break;
2364 }
2365
2366 case UntypedUse: {
2367 if (!isSub) {
2368 DFG_CRASH(m_graph, m_node, "Bad use kind");
2369 break;
2370 }
2371
2372 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2373 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2374 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2375 auto repatchingFunction = operationValueSubOptimize;
2376 auto nonRepatchingFunction = operationValueSub;
2377 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2378 break;
2379 }
2380
2381 default:
2382 DFG_CRASH(m_graph, m_node, "Bad use kind");
2383 break;
2384 }
2385 }
2386
2387 void compileArithClz32()
2388 {
2389 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2390 LValue operand = lowInt32(m_node->child1());
2391 setInt32(m_out.ctlz32(operand));
2392 return;
2393 }
2394 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2395 LValue argument = lowJSValue(m_node->child1());
2396 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2397 setInt32(result);
2398 }
2399
2400 void compileArithMul()
2401 {
2402 switch (m_node->binaryUseKind()) {
2403 case Int32Use: {
2404 LValue left = lowInt32(m_node->child1());
2405 LValue right = lowInt32(m_node->child2());
2406
2407 LValue result;
2408
2409 if (!shouldCheckOverflow(m_node->arithMode()))
2410 result = m_out.mul(left, right);
2411 else {
2412 CheckValue* speculation = m_out.speculateMul(left, right);
2413 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2414 result = speculation;
2415 }
2416
2417 if (shouldCheckNegativeZero(m_node->arithMode())) {
2418 LBasicBlock slowCase = m_out.newBlock();
2419 LBasicBlock continuation = m_out.newBlock();
2420
2421 m_out.branch(
2422 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2423
2424 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2425 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2426 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2427 m_out.jump(continuation);
2428 m_out.appendTo(continuation, lastNext);
2429 }
2430
2431 setInt32(result);
2432 break;
2433 }
2434
2435 case Int52RepUse: {
2436 Int52Kind kind;
2437 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2438 LValue right = lowInt52(m_node->child2(), opposite(kind));
2439
2440 CheckValue* result = m_out.speculateMul(left, right);
2441 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2442
2443 if (shouldCheckNegativeZero(m_node->arithMode())) {
2444 LBasicBlock slowCase = m_out.newBlock();
2445 LBasicBlock continuation = m_out.newBlock();
2446
2447 m_out.branch(
2448 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2449
2450 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2451 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2452 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2453 m_out.jump(continuation);
2454 m_out.appendTo(continuation, lastNext);
2455 }
2456
2457 setInt52(result);
2458 break;
2459 }
2460
2461 case DoubleRepUse: {
2462 setDouble(
2463 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2464 break;
2465 }
2466
2467 default:
2468 DFG_CRASH(m_graph, m_node, "Bad use kind");
2469 break;
2470 }
2471 }
2472
2473 void compileValueDiv()
2474 {
2475 if (m_node->isBinaryUseKind(BigIntUse)) {
2476 LValue left = lowBigInt(m_node->child1());
2477 LValue right = lowBigInt(m_node->child2());
2478
2479 LValue result = vmCall(pointerType(), m_out.operation(operationDivBigInt), m_callFrame, left, right);
2480 setJSValue(result);
2481 return;
2482 }
2483
2484 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2485 }
2486
2487 void compileArithDiv()
2488 {
2489 switch (m_node->binaryUseKind()) {
2490 case Int32Use: {
2491 LValue numerator = lowInt32(m_node->child1());
2492 LValue denominator = lowInt32(m_node->child2());
2493
2494 if (shouldCheckNegativeZero(m_node->arithMode())) {
2495 LBasicBlock zeroNumerator = m_out.newBlock();
2496 LBasicBlock numeratorContinuation = m_out.newBlock();
2497
2498 m_out.branch(
2499 m_out.isZero32(numerator),
2500 rarely(zeroNumerator), usually(numeratorContinuation));
2501
2502 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2503
2504 speculate(
2505 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2506
2507 m_out.jump(numeratorContinuation);
2508
2509 m_out.appendTo(numeratorContinuation, innerLastNext);
2510 }
2511
2512 if (shouldCheckOverflow(m_node->arithMode())) {
2513 LBasicBlock unsafeDenominator = m_out.newBlock();
2514 LBasicBlock continuation = m_out.newBlock();
2515
2516 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2517 m_out.branch(
2518 m_out.above(adjustedDenominator, m_out.int32One),
2519 usually(continuation), rarely(unsafeDenominator));
2520
2521 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2522 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2523 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2524 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2525 m_out.jump(continuation);
2526
2527 m_out.appendTo(continuation, lastNext);
2528 LValue result = m_out.div(numerator, denominator);
2529 speculate(
2530 Overflow, noValue(), 0,
2531 m_out.notEqual(m_out.mul(result, denominator), numerator));
2532 setInt32(result);
2533 } else
2534 setInt32(m_out.chillDiv(numerator, denominator));
2535
2536 break;
2537 }
2538
2539 case DoubleRepUse: {
2540 setDouble(m_out.doubleDiv(
2541 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2542 break;
2543 }
2544
2545 default:
2546 DFG_CRASH(m_graph, m_node, "Bad use kind");
2547 break;
2548 }
2549 }
2550
2551 void compileValueMod()
2552 {
2553 if (m_node->binaryUseKind() == BigIntUse) {
2554 LValue left = lowBigInt(m_node->child1());
2555 LValue right = lowBigInt(m_node->child2());
2556
2557 LValue result = vmCall(pointerType(), m_out.operation(operationModBigInt), m_callFrame, left, right);
2558 setJSValue(result);
2559 return;
2560 }
2561
2562 DFG_ASSERT(m_graph, m_node, m_node->binaryUseKind() == UntypedUse, m_node->binaryUseKind());
2563 LValue left = lowJSValue(m_node->child1());
2564 LValue right = lowJSValue(m_node->child2());
2565 LValue result = vmCall(Int64, m_out.operation(operationValueMod), m_callFrame, left, right);
2566 setJSValue(result);
2567 }
2568
2569 void compileArithMod()
2570 {
2571 switch (m_node->binaryUseKind()) {
2572 case Int32Use: {
2573 LValue numerator = lowInt32(m_node->child1());
2574 LValue denominator = lowInt32(m_node->child2());
2575
2576 LValue remainder;
2577 if (shouldCheckOverflow(m_node->arithMode())) {
2578 LBasicBlock unsafeDenominator = m_out.newBlock();
2579 LBasicBlock continuation = m_out.newBlock();
2580
2581 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2582 m_out.branch(
2583 m_out.above(adjustedDenominator, m_out.int32One),
2584 usually(continuation), rarely(unsafeDenominator));
2585
2586 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2587 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2588 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2589 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2590 m_out.jump(continuation);
2591
2592 m_out.appendTo(continuation, lastNext);
2593 LValue result = m_out.mod(numerator, denominator);
2594 remainder = result;
2595 } else
2596 remainder = m_out.chillMod(numerator, denominator);
2597
2598 if (shouldCheckNegativeZero(m_node->arithMode())) {
2599 LBasicBlock negativeNumerator = m_out.newBlock();
2600 LBasicBlock numeratorContinuation = m_out.newBlock();
2601
2602 m_out.branch(
2603 m_out.lessThan(numerator, m_out.int32Zero),
2604 unsure(negativeNumerator), unsure(numeratorContinuation));
2605
2606 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2607
2608 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2609
2610 m_out.jump(numeratorContinuation);
2611
2612 m_out.appendTo(numeratorContinuation, innerLastNext);
2613 }
2614
2615 setInt32(remainder);
2616 break;
2617 }
2618
2619 case DoubleRepUse: {
2620 setDouble(
2621 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2622 break;
2623 }
2624
2625 default:
2626 DFG_CRASH(m_graph, m_node, "Bad use kind");
2627 break;
2628 }
2629 }
2630
2631 void compileArithMinOrMax()
2632 {
2633 switch (m_node->binaryUseKind()) {
2634 case Int32Use: {
2635 LValue left = lowInt32(m_node->child1());
2636 LValue right = lowInt32(m_node->child2());
2637
2638 setInt32(
2639 m_out.select(
2640 m_node->op() == ArithMin
2641 ? m_out.lessThan(left, right)
2642 : m_out.lessThan(right, left),
2643 left, right));
2644 break;
2645 }
2646
2647 case DoubleRepUse: {
2648 LValue left = lowDouble(m_node->child1());
2649 LValue right = lowDouble(m_node->child2());
2650
2651 LBasicBlock notLessThan = m_out.newBlock();
2652 LBasicBlock continuation = m_out.newBlock();
2653
2654 Vector<ValueFromBlock, 2> results;
2655
2656 results.append(m_out.anchor(left));
2657 m_out.branch(
2658 m_node->op() == ArithMin
2659 ? m_out.doubleLessThan(left, right)
2660 : m_out.doubleGreaterThan(left, right),
2661 unsure(continuation), unsure(notLessThan));
2662
2663 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2664 results.append(m_out.anchor(m_out.select(
2665 m_node->op() == ArithMin
2666 ? m_out.doubleGreaterThanOrEqual(left, right)
2667 : m_out.doubleLessThanOrEqual(left, right),
2668 right, m_out.constDouble(PNaN))));
2669 m_out.jump(continuation);
2670
2671 m_out.appendTo(continuation, lastNext);
2672 setDouble(m_out.phi(Double, results));
2673 break;
2674 }
2675
2676 default:
2677 DFG_CRASH(m_graph, m_node, "Bad use kind");
2678 break;
2679 }
2680 }
2681
2682 void compileArithAbs()
2683 {
2684 switch (m_node->child1().useKind()) {
2685 case Int32Use: {
2686 LValue value = lowInt32(m_node->child1());
2687
2688 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2689 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2690
2691 if (shouldCheckOverflow(m_node->arithMode()))
2692 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2693
2694 setInt32(result);
2695 break;
2696 }
2697
2698 case DoubleRepUse: {
2699 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2700 break;
2701 }
2702
2703 default: {
2704 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2705 LValue argument = lowJSValue(m_node->child1());
2706 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2707 setDouble(result);
2708 break;
2709 }
2710 }
2711 }
2712
2713 void compileArithUnary()
2714 {
2715 if (m_node->child1().useKind() == DoubleRepUse) {
2716 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2717 return;
2718 }
2719 LValue argument = lowJSValue(m_node->child1());
2720 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2721 setDouble(result);
2722 }
2723
2724 void compileValuePow()
2725 {
2726 if (m_node->isBinaryUseKind(BigIntUse)) {
2727 LValue base = lowBigInt(m_node->child1());
2728 LValue exponent = lowBigInt(m_node->child2());
2729
2730 LValue result = vmCall(pointerType(), m_out.operation(operationPowBigInt), m_callFrame, base, exponent);
2731 setJSValue(result);
2732 return;
2733 }
2734
2735 LValue base = lowJSValue(m_node->child1());
2736 LValue exponent = lowJSValue(m_node->child2());
2737 LValue result = vmCall(Int64, m_out.operation(operationValuePow), m_callFrame, base, exponent);
2738 setJSValue(result);
2739 }
2740
2741 void compileArithPow()
2742 {
2743 if (m_node->child2().useKind() == Int32Use)
2744 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2745 else {
2746 LValue base = lowDouble(m_node->child1());
2747 LValue exponent = lowDouble(m_node->child2());
2748
2749 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2750 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2751 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2752 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2753 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2754 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2755 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2756 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2757 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2758 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2759 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2760 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2761 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2762 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2763 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2764 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2765 LBasicBlock powBlock = m_out.newBlock();
2766 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2767 LBasicBlock continuation = m_out.newBlock();
2768
2769 LValue integerExponent = m_out.doubleToInt(exponent);
2770 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2771 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2772 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2773
2774 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2775 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2776 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2777
2778 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2779 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2780 m_out.jump(continuation);
2781
2782 // If y is NaN, the result is NaN.
2783 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2784 LValue exponentIsNaN;
2785 if (provenType(m_node->child2()) & SpecDoubleNaN)
2786 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2787 else
2788 exponentIsNaN = m_out.booleanFalse;
2789 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2790
2791 // If abs(x) is 1 and y is +infinity, the result is NaN.
2792 // If abs(x) is 1 and y is -infinity, the result is NaN.
2793
2794 // Test if base == 1.
2795 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2796 LValue absoluteBase = m_out.doubleAbs(base);
2797 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2798 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2799
2800 // Test if abs(y) == Infinity.
2801 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2802 LValue absoluteExponent = m_out.doubleAbs(exponent);
2803 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2804 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2805
2806 // If y == 0.5 or y == -0.5, handle it through SQRT.
2807 // We have be carefuly with -0 and -Infinity.
2808
2809 // Test if y == 0.5
2810 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2811 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2812 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2813
2814 // Handle x == -0.
2815 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2816 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2817 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2818 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2819
2820 // Test if abs(x) == Infinity.
2821 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2822 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2823 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2824
2825 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2826 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2827 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2828 m_out.jump(continuation);
2829
2830 // The exponent is 0.5, the base is infinite, the result is always infinite.
2831 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2832 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2833 m_out.jump(continuation);
2834
2835 // Test if y == -0.5
2836 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2837 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2838 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2839
2840 // Handle x == -0.
2841 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2842 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2843 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2844
2845 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2846 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2847 m_out.jump(continuation);
2848
2849 // Test if abs(x) == Infinity.
2850 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2851 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2852 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2853
2854 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2855 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2856 LValue sqrtBase = m_out.doubleSqrt(base);
2857 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2858 m_out.jump(continuation);
2859
2860 // The exponent is -0.5, the base is infinite, the result is always zero.
2861 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2862 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2863 m_out.jump(continuation);
2864
2865 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2866 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2867 m_out.jump(continuation);
2868
2869 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2870 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2871 m_out.jump(continuation);
2872
2873 m_out.appendTo(continuation, lastNext);
2874 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2875 }
2876 }
2877
2878 void compileArithRandom()
2879 {
2880 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2881
2882 // Inlined WeakRandom::advance().
2883 // uint64_t x = m_low;
2884 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2885 LValue low = m_out.load64(m_out.absolute(lowAddress));
2886 // uint64_t y = m_high;
2887 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2888 LValue high = m_out.load64(m_out.absolute(highAddress));
2889 // m_low = y;
2890 m_out.store64(high, m_out.absolute(lowAddress));
2891
2892 // x ^= x << 23;
2893 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2894
2895 // x ^= x >> 17;
2896 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2897
2898 // x ^= y ^ (y >> 26);
2899 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2900
2901 // m_high = x;
2902 m_out.store64(phase3, m_out.absolute(highAddress));
2903
2904 // return x + y;
2905 LValue random64 = m_out.add(phase3, high);
2906
2907 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2908 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2909
2910 LValue double53Integer = m_out.intToDouble(random53);
2911
2912 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2913 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2914 static const double scale = 1.0 / (1ULL << 53);
2915
2916 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2917 // It just reduces the exp part of the given 53bit double integer.
2918 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2919 // Now we get 53bit precision random double value in [0, 1).
2920 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2921
2922 setDouble(result);
2923 }
2924
2925 void compileArithRound()
2926 {
2927 if (m_node->child1().useKind() == DoubleRepUse) {
2928 LValue result = nullptr;
2929 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2930 LValue value = lowDouble(m_node->child1());
2931 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2932 } else {
2933 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2934 LBasicBlock continuation = m_out.newBlock();
2935
2936 LValue value = lowDouble(m_node->child1());
2937 LValue integerValue = m_out.doubleCeil(value);
2938 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2939
2940 LValue realPart = m_out.doubleSub(integerValue, value);
2941
2942 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2943
2944 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2945 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2946 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2947 m_out.jump(continuation);
2948 m_out.appendTo(continuation, lastNext);
2949
2950 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2951 }
2952
2953 if (producesInteger(m_node->arithRoundingMode())) {
2954 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2955 setInt32(integerValue);
2956 } else
2957 setDouble(result);
2958 return;
2959 }
2960
2961 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2962 LValue argument = lowJSValue(m_node->child1());
2963 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2964 }
2965
2966 void compileArithFloor()
2967 {
2968 if (m_node->child1().useKind() == DoubleRepUse) {
2969 LValue value = lowDouble(m_node->child1());
2970 LValue integerValue = m_out.doubleFloor(value);
2971 if (producesInteger(m_node->arithRoundingMode()))
2972 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2973 else
2974 setDouble(integerValue);
2975 return;
2976 }
2977 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2978 LValue argument = lowJSValue(m_node->child1());
2979 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2980 }
2981
2982 void compileArithCeil()
2983 {
2984 if (m_node->child1().useKind() == DoubleRepUse) {
2985 LValue value = lowDouble(m_node->child1());
2986 LValue integerValue = m_out.doubleCeil(value);
2987 if (producesInteger(m_node->arithRoundingMode()))
2988 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2989 else
2990 setDouble(integerValue);
2991 return;
2992 }
2993 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2994 LValue argument = lowJSValue(m_node->child1());
2995 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2996 }
2997
2998 void compileArithTrunc()
2999 {
3000 if (m_node->child1().useKind() == DoubleRepUse) {
3001 LValue value = lowDouble(m_node->child1());
3002 LValue result = m_out.doubleTrunc(value);
3003 if (producesInteger(m_node->arithRoundingMode()))
3004 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
3005 else
3006 setDouble(result);
3007 return;
3008 }
3009 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
3010 LValue argument = lowJSValue(m_node->child1());
3011 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
3012 }
3013
3014 void compileArithSqrt()
3015 {
3016 if (m_node->child1().useKind() == DoubleRepUse) {
3017 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
3018 return;
3019 }
3020 LValue argument = lowJSValue(m_node->child1());
3021 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
3022 setDouble(result);
3023 }
3024
3025 void compileArithFRound()
3026 {
3027 if (m_node->child1().useKind() == DoubleRepUse) {
3028 setDouble(m_out.fround(lowDouble(m_node->child1())));
3029 return;
3030 }
3031 LValue argument = lowJSValue(m_node->child1());
3032 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
3033 setDouble(result);
3034 }
3035
3036 void compileValueNegate()
3037 {
3038 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
3039 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
3040 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
3041 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
3042 auto repatchingFunction = operationArithNegateOptimize;
3043 auto nonRepatchingFunction = operationArithNegate;
3044 compileUnaryMathIC<JITNegGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
3045 }
3046
3047 void compileArithNegate()
3048 {
3049 switch (m_node->child1().useKind()) {
3050 case Int32Use: {
3051 LValue value = lowInt32(m_node->child1());
3052
3053 LValue result;
3054 if (!shouldCheckOverflow(m_node->arithMode()))
3055 result = m_out.neg(value);
3056 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
3057 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
3058 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
3059 result = check;
3060 } else {
3061 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
3062 result = m_out.neg(value);
3063 }
3064
3065 setInt32(result);
3066 break;
3067 }
3068
3069 case Int52RepUse: {
3070 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)) {
3071 Int52Kind kind;
3072 LValue value = lowWhicheverInt52(m_node->child1(), kind);
3073 LValue result = m_out.neg(value);
3074 if (shouldCheckNegativeZero(m_node->arithMode()))
3075 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3076 setInt52(result, kind);
3077 break;
3078 }
3079
3080 LValue value = lowInt52(m_node->child1());
3081 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
3082 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
3083 if (shouldCheckNegativeZero(m_node->arithMode()))
3084 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3085 setInt52(result);
3086 break;
3087 }
3088
3089 case DoubleRepUse: {
3090 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
3091 break;
3092 }
3093
3094 default:
3095 DFG_CRASH(m_graph, m_node, "Bad use kind");
3096 break;
3097 }
3098 }
3099
3100 void compileValueBitNot()
3101 {
3102 if (m_node->child1().useKind() == BigIntUse) {
3103 LValue operand = lowBigInt(m_node->child1());
3104 LValue result = vmCall(pointerType(), m_out.operation(operationBitNotBigInt), m_callFrame, operand);
3105 setJSValue(result);
3106 return;
3107 }
3108
3109 LValue operand = lowJSValue(m_node->child1());
3110 LValue result = vmCall(Int64, m_out.operation(operationValueBitNot), m_callFrame, operand);
3111 setJSValue(result);
3112 }
3113
3114 void compileArithBitNot()
3115 {
3116 setInt32(m_out.bitNot(lowInt32(m_node->child1())));
3117 }
3118
3119 void compileValueBitAnd()
3120 {
3121 if (m_node->isBinaryUseKind(BigIntUse)) {
3122 LValue left = lowBigInt(m_node->child1());
3123 LValue right = lowBigInt(m_node->child2());
3124
3125 LValue result = vmCall(pointerType(), m_out.operation(operationBitAndBigInt), m_callFrame, left, right);
3126 setJSValue(result);
3127 return;
3128 }
3129
3130 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
3131 }
3132
3133 void compileArithBitAnd()
3134 {
3135 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3136 }
3137
3138 void compileValueBitOr()
3139 {
3140 if (m_node->isBinaryUseKind(BigIntUse)) {
3141 LValue left = lowBigInt(m_node->child1());
3142 LValue right = lowBigInt(m_node->child2());
3143
3144 LValue result = vmCall(pointerType(), m_out.operation(operationBitOrBigInt), m_callFrame, left, right);
3145 setJSValue(result);
3146 return;
3147 }
3148
3149 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
3150 }
3151
3152 void compileArithBitOr()
3153 {
3154 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3155 }
3156
3157 void compileValueBitXor()
3158 {
3159 if (m_node->isBinaryUseKind(BigIntUse)) {
3160 LValue left = lowBigInt(m_node->child1());
3161 LValue right = lowBigInt(m_node->child2());
3162
3163 LValue result = vmCall(pointerType(), m_out.operation(operationBitXorBigInt), m_callFrame, left, right);
3164 setJSValue(result);
3165 return;
3166 }
3167
3168 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
3169 }
3170
3171 void compileArithBitXor()
3172 {
3173 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3174 }
3175
3176 void compileBitRShift()
3177 {
3178 if (m_node->isBinaryUseKind(UntypedUse)) {
3179 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
3180 return;
3181 }
3182 setInt32(m_out.aShr(
3183 lowInt32(m_node->child1()),
3184 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3185 }
3186
3187 void compileBitLShift()
3188 {
3189 if (m_node->isBinaryUseKind(UntypedUse)) {
3190 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
3191 return;
3192 }
3193 setInt32(m_out.shl(
3194 lowInt32(m_node->child1()),
3195 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3196 }
3197
3198 void compileBitURShift()
3199 {
3200 if (m_node->isBinaryUseKind(UntypedUse)) {
3201 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
3202 return;
3203 }
3204 setInt32(m_out.lShr(
3205 lowInt32(m_node->child1()),
3206 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3207 }
3208
3209 void compileUInt32ToNumber()
3210 {
3211 LValue value = lowInt32(m_node->child1());
3212
3213 if (doesOverflow(m_node->arithMode())) {
3214 setStrictInt52(m_out.zeroExtPtr(value));
3215 return;
3216 }
3217
3218 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
3219 setInt32(value);
3220 }
3221
3222 void compileCheckStructure()
3223 {
3224 ExitKind exitKind;
3225 if (m_node->child1()->hasConstant())
3226 exitKind = BadConstantCache;
3227 else
3228 exitKind = BadCache;
3229
3230 switch (m_node->child1().useKind()) {
3231 case CellUse:
3232 case KnownCellUse: {
3233 LValue cell = lowCell(m_node->child1());
3234
3235 checkStructure(
3236 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3237 exitKind, m_node->structureSet(),
3238 [&] (RegisteredStructure structure) {
3239 return weakStructureID(structure);
3240 });
3241 return;
3242 }
3243
3244 case CellOrOtherUse: {
3245 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
3246
3247 LBasicBlock cellCase = m_out.newBlock();
3248 LBasicBlock notCellCase = m_out.newBlock();
3249 LBasicBlock continuation = m_out.newBlock();
3250
3251 m_out.branch(
3252 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3253
3254 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3255 checkStructure(
3256 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
3257 exitKind, m_node->structureSet(),
3258 [&] (RegisteredStructure structure) {
3259 return weakStructureID(structure);
3260 });
3261 m_out.jump(continuation);
3262
3263 m_out.appendTo(notCellCase, continuation);
3264 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
3265 m_out.jump(continuation);
3266
3267 m_out.appendTo(continuation, lastNext);
3268 return;
3269 }
3270
3271 default:
3272 DFG_CRASH(m_graph, m_node, "Bad use kind");
3273 return;
3274 }
3275 }
3276
3277 void compileCheckStructureOrEmpty()
3278 {
3279 ExitKind exitKind;
3280 if (m_node->child1()->hasConstant())
3281 exitKind = BadConstantCache;
3282 else
3283 exitKind = BadCache;
3284
3285 LValue cell = lowCell(m_node->child1());
3286 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
3287 LBasicBlock notEmpty;
3288 LBasicBlock continuation;
3289 LBasicBlock lastNext;
3290 if (maySeeEmptyValue) {
3291 notEmpty = m_out.newBlock();
3292 continuation = m_out.newBlock();
3293 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
3294 lastNext = m_out.appendTo(notEmpty, continuation);
3295 }
3296
3297 checkStructure(
3298 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3299 exitKind, m_node->structureSet(),
3300 [&] (RegisteredStructure structure) {
3301 return weakStructureID(structure);
3302 });
3303
3304 if (maySeeEmptyValue) {
3305 m_out.jump(continuation);
3306 m_out.appendTo(continuation, lastNext);
3307 }
3308 }
3309
3310 void compileCheckCell()
3311 {
3312 LValue cell = lowCell(m_node->child1());
3313
3314 speculate(
3315 BadCell, jsValueValue(cell), m_node->child1().node(),
3316 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
3317 }
3318
3319 void compileCheckBadCell()
3320 {
3321 terminate(BadCell);
3322 }
3323
3324 void compileCheckNotEmpty()
3325 {
3326 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
3327 }
3328
3329 void compileAssertNotEmpty()
3330 {
3331 if (!validationEnabled())
3332 return;
3333
3334 LValue val = lowJSValue(m_node->child1());
3335 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3336 patchpoint->appendSomeRegister(val);
3337 patchpoint->setGenerator(
3338 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3339 AllowMacroScratchRegisterUsage allowScratch(jit);
3340 GPRReg input = params[0].gpr();
3341 CCallHelpers::Jump done = jit.branchIfNotEmpty(input);
3342 jit.breakpoint();
3343 done.link(&jit);
3344 });
3345 }
3346
3347 void compileCheckStringIdent()
3348 {
3349 UniquedStringImpl* uid = m_node->uidOperand();
3350 LValue stringImpl = lowStringIdent(m_node->child1());
3351 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
3352 }
3353
3354 void compileGetExecutable()
3355 {
3356 LValue cell = lowCell(m_node->child1());
3357 speculateFunction(m_node->child1(), cell);
3358 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
3359 }
3360
3361 void compileArrayify()
3362 {
3363 LValue cell = lowCell(m_node->child1());
3364 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
3365
3366 LBasicBlock unexpectedStructure = m_out.newBlock();
3367 LBasicBlock continuation = m_out.newBlock();
3368
3369 auto isUnexpectedArray = [&] (LValue cell) {
3370 if (m_node->op() == Arrayify)
3371 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
3372
3373 ASSERT(m_node->op() == ArrayifyToStructure);
3374 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
3375 };
3376
3377 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
3378
3379 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
3380
3381 if (property) {
3382 switch (m_node->arrayMode().type()) {
3383 case Array::Int32:
3384 case Array::Double:
3385 case Array::Contiguous:
3386 speculate(
3387 Uncountable, noValue(), 0,
3388 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
3389 break;
3390 default:
3391 break;
3392 }
3393 }
3394
3395 switch (m_node->arrayMode().type()) {
3396 case Array::Int32:
3397 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
3398 break;
3399 case Array::Double:
3400 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
3401 break;
3402 case Array::Contiguous:
3403 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
3404 break;
3405 case Array::ArrayStorage:
3406 case Array::SlowPutArrayStorage:
3407 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
3408 break;
3409 default:
3410 DFG_CRASH(m_graph, m_node, "Bad array type");
3411 break;
3412 }
3413
3414 speculate(BadIndexingType, jsValueValue(cell), 0, isUnexpectedArray(cell));
3415 m_out.jump(continuation);
3416
3417 m_out.appendTo(continuation, lastNext);
3418 }
3419
3420 void compilePutStructure()
3421 {
3422 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
3423
3424 RegisteredStructure oldStructure = m_node->transition()->previous;
3425 RegisteredStructure newStructure = m_node->transition()->next;
3426 ASSERT_UNUSED(oldStructure, oldStructure->indexingMode() == newStructure->indexingMode());
3427 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
3428 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
3429
3430 LValue cell = lowCell(m_node->child1());
3431 m_out.store32(
3432 weakStructureID(newStructure),
3433 cell, m_heaps.JSCell_structureID);
3434 }
3435
3436 void compileGetById(AccessType type)
3437 {
3438 ASSERT(type == AccessType::Get || type == AccessType::TryGet || type == AccessType::GetDirect);
3439 switch (m_node->child1().useKind()) {
3440 case CellUse: {
3441 setJSValue(getById(lowCell(m_node->child1()), type));
3442 return;
3443 }
3444
3445 case UntypedUse: {
3446 // This is pretty weird, since we duplicate the slow path both here and in the
3447 // code generated by the IC. We should investigate making this less bad.
3448 // https://bugs.webkit.org/show_bug.cgi?id=127830
3449 LValue value = lowJSValue(m_node->child1());
3450
3451 LBasicBlock cellCase = m_out.newBlock();
3452 LBasicBlock notCellCase = m_out.newBlock();
3453 LBasicBlock continuation = m_out.newBlock();
3454
3455 m_out.branch(
3456 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3457
3458 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3459 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
3460 m_out.jump(continuation);
3461
3462 J_JITOperation_EJI getByIdFunction = appropriateGenericGetByIdFunction(type);
3463
3464 m_out.appendTo(notCellCase, continuation);
3465 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3466 Int64, m_out.operation(getByIdFunction),
3467 m_callFrame, value,
3468 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3469 m_out.jump(continuation);
3470
3471 m_out.appendTo(continuation, lastNext);
3472 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3473 return;
3474 }
3475
3476 default:
3477 DFG_CRASH(m_graph, m_node, "Bad use kind");
3478 return;
3479 }
3480 }
3481
3482 void compileGetByIdWithThis()
3483 {
3484 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3485 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3486 else {
3487 LValue base = lowJSValue(m_node->child1());
3488 LValue thisValue = lowJSValue(m_node->child2());
3489
3490 LBasicBlock baseCellCase = m_out.newBlock();
3491 LBasicBlock notCellCase = m_out.newBlock();
3492 LBasicBlock thisValueCellCase = m_out.newBlock();
3493 LBasicBlock continuation = m_out.newBlock();
3494
3495 m_out.branch(
3496 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3497
3498 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3499
3500 m_out.branch(
3501 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3502
3503 m_out.appendTo(thisValueCellCase, notCellCase);
3504 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3505 m_out.jump(continuation);
3506
3507 m_out.appendTo(notCellCase, continuation);
3508 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3509 Int64, m_out.operation(operationGetByIdWithThisGeneric),
3510 m_callFrame, base, thisValue,
3511 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3512 m_out.jump(continuation);
3513
3514 m_out.appendTo(continuation, lastNext);
3515 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3516 }
3517
3518 }
3519
3520 void compileGetByValWithThis()
3521 {
3522 LValue base = lowJSValue(m_node->child1());
3523 LValue thisValue = lowJSValue(m_node->child2());
3524 LValue subscript = lowJSValue(m_node->child3());
3525
3526 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
3527 setJSValue(result);
3528 }
3529
3530 void compilePutByIdWithThis()
3531 {
3532 LValue base = lowJSValue(m_node->child1());
3533 LValue thisValue = lowJSValue(m_node->child2());
3534 LValue value = lowJSValue(m_node->child3());
3535
3536 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
3537 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
3538 }
3539
3540 void compilePutByValWithThis()
3541 {
3542 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
3543 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
3544 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
3545 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
3546
3547 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
3548 m_callFrame, base, thisValue, property, value);
3549 }
3550
3551 void compileAtomicsReadModifyWrite()
3552 {
3553 TypedArrayType type = m_node->arrayMode().typedArrayType();
3554 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
3555 Edge baseEdge = m_graph.child(m_node, 0);
3556 Edge indexEdge = m_graph.child(m_node, 1);
3557 Edge argEdges[maxNumExtraAtomicsArgs];
3558 for (unsigned i = numExtraArgs; i--;)
3559 argEdges[i] = m_graph.child(m_node, 2 + i);
3560 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
3561
3562 auto operation = [&] () -> LValue {
3563 switch (m_node->op()) {
3564 case AtomicsAdd:
3565 return m_out.operation(operationAtomicsAdd);
3566 case AtomicsAnd:
3567 return m_out.operation(operationAtomicsAnd);
3568 case AtomicsCompareExchange:
3569 return m_out.operation(operationAtomicsCompareExchange);
3570 case AtomicsExchange:
3571 return m_out.operation(operationAtomicsExchange);
3572 case AtomicsLoad:
3573 return m_out.operation(operationAtomicsLoad);
3574 case AtomicsOr:
3575 return m_out.operation(operationAtomicsOr);
3576 case AtomicsStore:
3577 return m_out.operation(operationAtomicsStore);
3578 case AtomicsSub:
3579 return m_out.operation(operationAtomicsSub);
3580 case AtomicsXor:
3581 return m_out.operation(operationAtomicsXor);
3582 default:
3583 RELEASE_ASSERT_NOT_REACHED();
3584 break;
3585 }
3586 };
3587
3588 if (!storageEdge) {
3589 Vector<LValue> args;
3590 args.append(m_callFrame);
3591 args.append(lowJSValue(baseEdge));
3592 args.append(lowJSValue(indexEdge));
3593 for (unsigned i = 0; i < numExtraArgs; ++i)
3594 args.append(lowJSValue(argEdges[i]));
3595 LValue result = vmCall(Int64, operation(), args);
3596 setJSValue(result);
3597 return;
3598 }
3599
3600 LValue index = lowInt32(indexEdge);
3601 LValue args[2];
3602 for (unsigned i = numExtraArgs; i--;)
3603 args[i] = getIntTypedArrayStoreOperand(argEdges[i]);
3604 LValue storage = lowStorage(storageEdge);
3605
3606 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
3607 Width width = widthForBytes(elementSize(type));
3608
3609 LValue atomicValue;
3610 LValue result;
3611
3612 auto sanitizeResult = [&] (LValue value) -> LValue {
3613 if (isSigned(type)) {
3614 switch (elementSize(type)) {
3615 case 1:
3616 value = m_out.bitAnd(value, m_out.constInt32(0xff));
3617 break;
3618 case 2:
3619 value = m_out.bitAnd(value, m_out.constInt32(0xffff));
3620 break;
3621 case 4:
3622 break;
3623 default:
3624 RELEASE_ASSERT_NOT_REACHED();
3625 break;
3626 }
3627 }
3628 return value;
3629 };
3630
3631 switch (m_node->op()) {
3632 case AtomicsAdd:
3633 atomicValue = m_out.atomicXchgAdd(args[0], pointer, width);
3634 result = sanitizeResult(atomicValue);
3635 break;
3636 case AtomicsAnd:
3637 atomicValue = m_out.atomicXchgAnd(args[0], pointer, width);
3638 result = sanitizeResult(atomicValue);
3639 break;
3640 case AtomicsCompareExchange:
3641 atomicValue = m_out.atomicStrongCAS(args[0], args[1], pointer, width);
3642 result = sanitizeResult(atomicValue);
3643 break;
3644 case AtomicsExchange:
3645 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3646 result = sanitizeResult(atomicValue);
3647 break;
3648 case AtomicsLoad:
3649 atomicValue = m_out.atomicXchgAdd(m_out.int32Zero, pointer, width);
3650 result = sanitizeResult(atomicValue);
3651 break;
3652 case AtomicsOr:
3653 atomicValue = m_out.atomicXchgOr(args[0], pointer, width);
3654 result = sanitizeResult(atomicValue);
3655 break;
3656 case AtomicsStore:
3657 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3658 result = args[0];
3659 break;
3660 case AtomicsSub:
3661 atomicValue = m_out.atomicXchgSub(args[0], pointer, width);
3662 result = sanitizeResult(atomicValue);
3663 break;
3664 case AtomicsXor:
3665 atomicValue = m_out.atomicXchgXor(args[0], pointer, width);
3666 result = sanitizeResult(atomicValue);
3667 break;
3668 default:
3669 RELEASE_ASSERT_NOT_REACHED();
3670 break;
3671 }
3672 // Signify that the state against which the atomic operations are serialized is confined to just
3673 // the typed array storage, since that's as precise of an abstraction as we can have of shared
3674 // array buffer storage.
3675 m_heaps.decorateFencedAccess(&m_heaps.typedArrayProperties, atomicValue);
3676
3677 setIntTypedArrayLoadResult(result, type);
3678 }
3679
3680 void compileAtomicsIsLockFree()
3681 {
3682 if (m_node->child1().useKind() != Int32Use) {
3683 setJSValue(vmCall(Int64, m_out.operation(operationAtomicsIsLockFree), m_callFrame, lowJSValue(m_node->child1())));
3684 return;
3685 }
3686
3687 LValue bytes = lowInt32(m_node->child1());
3688
3689 LBasicBlock trueCase = m_out.newBlock();
3690 LBasicBlock falseCase = m_out.newBlock();
3691 LBasicBlock continuation = m_out.newBlock();
3692
3693 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueCase);
3694
3695 Vector<SwitchCase> cases;
3696 cases.append(SwitchCase(m_out.constInt32(1), trueCase, Weight()));
3697 cases.append(SwitchCase(m_out.constInt32(2), trueCase, Weight()));
3698 cases.append(SwitchCase(m_out.constInt32(4), trueCase, Weight()));
3699 m_out.switchInstruction(bytes, cases, falseCase, Weight());
3700
3701 m_out.appendTo(trueCase, falseCase);
3702 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
3703 m_out.jump(continuation);
3704 m_out.appendTo(falseCase, continuation);
3705 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
3706 m_out.jump(continuation);
3707
3708 m_out.appendTo(continuation, lastNext);
3709 setBoolean(m_out.phi(Int32, trueValue, falseValue));
3710 }
3711
3712 void compileDefineDataProperty()
3713 {
3714 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3715 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
3716 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 3));
3717 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3718 switch (propertyEdge.useKind()) {
3719 case StringUse: {
3720 LValue property = lowString(propertyEdge);
3721 vmCall(Void, m_out.operation(operationDefineDataPropertyString), m_callFrame, base, property, value, attributes);
3722 break;
3723 }
3724 case StringIdentUse: {
3725 LValue property = lowStringIdent(propertyEdge);
3726 vmCall(Void, m_out.operation(operationDefineDataPropertyStringIdent), m_callFrame, base, property, value, attributes);
3727 break;
3728 }
3729 case SymbolUse: {
3730 LValue property = lowSymbol(propertyEdge);
3731 vmCall(Void, m_out.operation(operationDefineDataPropertySymbol), m_callFrame, base, property, value, attributes);
3732 break;
3733 }
3734 case UntypedUse: {
3735 LValue property = lowJSValue(propertyEdge);
3736 vmCall(Void, m_out.operation(operationDefineDataProperty), m_callFrame, base, property, value, attributes);
3737 break;
3738 }
3739 default:
3740 RELEASE_ASSERT_NOT_REACHED();
3741 }
3742 }
3743
3744 void compileDefineAccessorProperty()
3745 {
3746 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3747 LValue getter = lowCell(m_graph.varArgChild(m_node, 2));
3748 LValue setter = lowCell(m_graph.varArgChild(m_node, 3));
3749 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 4));
3750 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3751 switch (propertyEdge.useKind()) {
3752 case StringUse: {
3753 LValue property = lowString(propertyEdge);
3754 vmCall(Void, m_out.operation(operationDefineAccessorPropertyString), m_callFrame, base, property, getter, setter, attributes);
3755 break;
3756 }
3757 case StringIdentUse: {
3758 LValue property = lowStringIdent(propertyEdge);
3759 vmCall(Void, m_out.operation(operationDefineAccessorPropertyStringIdent), m_callFrame, base, property, getter, setter, attributes);
3760 break;
3761 }
3762 case SymbolUse: {
3763 LValue property = lowSymbol(propertyEdge);
3764 vmCall(Void, m_out.operation(operationDefineAccessorPropertySymbol), m_callFrame, base, property, getter, setter, attributes);
3765 break;
3766 }
3767 case UntypedUse: {
3768 LValue property = lowJSValue(propertyEdge);
3769 vmCall(Void, m_out.operation(operationDefineAccessorProperty), m_callFrame, base, property, getter, setter, attributes);
3770 break;
3771 }
3772 default:
3773 RELEASE_ASSERT_NOT_REACHED();
3774 }
3775 }
3776
3777 void compilePutById()
3778 {
3779 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse, m_node->child1().useKind());
3780
3781 Node* node = m_node;
3782 LValue base = lowCell(node->child1());
3783 LValue value = lowJSValue(node->child2());
3784 auto uid = m_graph.identifiers()[node->identifierNumber()];
3785
3786 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3787 patchpoint->appendSomeRegister(base);
3788 patchpoint->appendSomeRegister(value);
3789 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
3790 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
3791 patchpoint->clobber(RegisterSet::macroScratchRegisters());
3792
3793 // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
3794 // https://bugs.webkit.org/show_bug.cgi?id=152848
3795
3796 RefPtr<PatchpointExceptionHandle> exceptionHandle =
3797 preparePatchpointForExceptions(patchpoint);
3798
3799 State* state = &m_ftlState;
3800 ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode();
3801
3802 patchpoint->setGenerator(
3803 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3804 AllowMacroScratchRegisterUsage allowScratch(jit);
3805
3806 CallSiteIndex callSiteIndex =
3807 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
3808
3809 Box<CCallHelpers::JumpList> exceptions =
3810 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
3811
3812 // JS setter call ICs generated by the PutById IC will need this.
3813 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
3814
3815 auto generator = Box<JITPutByIdGenerator>::create(
3816 jit.codeBlock(), node->origin.semantic, callSiteIndex,
3817 params.unavailableRegisters(), JSValueRegs(params[0].gpr()),
3818 JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode,
3819 node->op() == PutByIdDirect ? Direct : NotDirect);
3820
3821 generator->generateFastPath(jit);
3822 CCallHelpers::Label done = jit.label();
3823
3824 params.addLatePath(
3825 [=] (CCallHelpers& jit) {
3826 AllowMacroScratchRegisterUsage allowScratch(jit);
3827
3828 generator->slowPathJump().link(&jit);
3829 CCallHelpers::Label slowPathBegin = jit.label();
3830 CCallHelpers::Call slowPathCall = callOperation(
3831 *state, params.unavailableRegisters(), jit, node->origin.semantic,
3832 exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
3833 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
3834 params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
3835 jit.jump().linkTo(done, &jit);
3836
3837 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
3838
3839 jit.addLinkTask(
3840 [=] (LinkBuffer& linkBuffer) {
3841 generator->finalize(linkBuffer, linkBuffer);
3842 });
3843 });
3844 });
3845 }
3846
3847 void compileGetButterfly()
3848 {
3849 LValue butterfly = m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly);
3850 setStorage(butterfly);
3851 }
3852
3853 void compileConstantStoragePointer()
3854 {
3855 setStorage(m_out.constIntPtr(m_node->storagePointer()));
3856 }
3857
3858 void compileGetIndexedPropertyStorage()
3859 {
3860 LValue cell = lowCell(m_node->child1());
3861
3862 if (m_node->arrayMode().type() == Array::String) {
3863 LBasicBlock slowPath = m_out.newBlock();
3864 LBasicBlock continuation = m_out.newBlock();
3865
3866 LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
3867 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
3868
3869 m_out.branch(isRopeString(cell, m_node->child1()), rarely(slowPath), usually(continuation));
3870
3871 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
3872
3873 ValueFromBlock slowResult = m_out.anchor(
3874 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, cell));
3875
3876 m_out.jump(continuation);
3877
3878 m_out.appendTo(continuation, lastNext);
3879
3880 setStorage(m_out.loadPtr(m_out.phi(pointerType(), fastResult, slowResult), m_heaps.StringImpl_data));
3881 return;
3882 }
3883
3884 DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()), m_node->arrayMode().typedArrayType());
3885 LValue vector = m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector);
3886 setStorage(caged(Gigacage::Primitive, vector, cell));
3887 }
3888
3889 void compileCheckArray()
3890 {
3891 Edge edge = m_node->child1();
3892 LValue cell = lowCell(edge);
3893
3894 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
3895 return;
3896
3897 speculate(
3898 BadIndexingType, jsValueValue(cell), 0,
3899 m_out.logicalNot(isArrayTypeForCheckArray(cell, m_node->arrayMode())));
3900 }
3901
3902 void compileGetTypedArrayByteOffset()
3903 {
3904 LValue basePtr = lowCell(m_node->child1());
3905
3906 LBasicBlock simpleCase = m_out.newBlock();
3907 LBasicBlock wastefulCase = m_out.newBlock();
3908 LBasicBlock notNull = m_out.newBlock();
3909 LBasicBlock continuation = m_out.newBlock();
3910
3911 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
3912 m_out.branch(
3913 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
3914 unsure(simpleCase), unsure(wastefulCase));
3915
3916 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
3917
3918 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
3919
3920 m_out.jump(continuation);
3921
3922 m_out.appendTo(wastefulCase, notNull);
3923
3924 LValue vector = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
3925 ValueFromBlock nullVectorOut = m_out.anchor(vector);
3926 m_out.branch(vector, unsure(notNull), unsure(continuation));
3927
3928 m_out.appendTo(notNull, continuation);
3929
3930 LValue butterflyPtr = caged(Gigacage::JSValue, m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly), basePtr);
3931 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
3932
3933 LValue vectorPtr = caged(Gigacage::Primitive, vector, basePtr);
3934
3935 // FIXME: This needs caging.
3936 // https://bugs.webkit.org/show_bug.cgi?id=175515
3937 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
3938 dataPtr = removeArrayPtrTag(dataPtr);
3939
3940 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
3941
3942 m_out.jump(continuation);
3943 m_out.appendTo(continuation, lastNext);
3944
3945 setInt32(m_out.castToInt32(m_out.phi(pointerType(), simpleOut, nullVectorOut, wastefulOut)));
3946 }
3947
3948 void compileGetPrototypeOf()
3949 {
3950 switch (m_node->child1().useKind()) {
3951 case ArrayUse:
3952 case FunctionUse:
3953 case FinalObjectUse: {
3954 LValue object = lowCell(m_node->child1());
3955 switch (m_node->child1().useKind()) {
3956 case ArrayUse:
3957 speculateArray(m_node->child1(), object);
3958 break;
3959 case FunctionUse:
3960 speculateFunction(m_node->child1(), object);
3961 break;
3962 case FinalObjectUse:
3963 speculateFinalObject(m_node->child1(), object);
3964 break;
3965 default:
3966 RELEASE_ASSERT_NOT_REACHED();
3967 break;
3968 }
3969
3970 LValue structure = loadStructure(object);
3971
3972 AbstractValue& value = m_state.forNode(m_node->child1());
3973 if ((value.m_type && !(value.m_type & ~SpecObject)) && value.m_structure.isFinite()) {
3974 bool hasPolyProto = false;
3975 bool hasMonoProto = false;
3976 value.m_structure.forEach([&] (RegisteredStructure structure) {
3977 if (structure->hasPolyProto())
3978 hasPolyProto = true;
3979 else
3980 hasMonoProto = true;
3981 });
3982
3983 if (hasMonoProto && !hasPolyProto) {
3984 setJSValue(m_out.load64(structure, m_heaps.Structure_prototype));
3985 return;
3986 }
3987
3988 if (hasPolyProto && !hasMonoProto) {
3989 setJSValue(m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
3990 return;
3991 }
3992 }
3993
3994 LBasicBlock continuation = m_out.newBlock();
3995 LBasicBlock loadPolyProto = m_out.newBlock();
3996
3997 LValue prototypeBits = m_out.load64(structure, m_heaps.Structure_prototype);
3998 ValueFromBlock directPrototype = m_out.anchor(prototypeBits);
3999 m_out.branch(m_out.isZero64(prototypeBits), unsure(loadPolyProto), unsure(continuation));
4000
4001 LBasicBlock lastNext = m_out.appendTo(loadPolyProto, continuation);
4002 ValueFromBlock polyProto = m_out.anchor(
4003 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
4004 m_out.jump(continuation);
4005
4006 m_out.appendTo(continuation, lastNext);
4007 setJSValue(m_out.phi(Int64, directPrototype, polyProto));
4008 return;
4009 }
4010 case ObjectUse: {
4011 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOfObject), m_callFrame, lowObject(m_node->child1())));
4012 return;
4013 }
4014 default: {
4015 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOf), m_callFrame, lowJSValue(m_node->child1())));
4016 return;
4017 }
4018 }
4019 }
4020
4021 void compileGetArrayLength()
4022 {
4023 switch (m_node->arrayMode().type()) {
4024 case Array::Undecided:
4025 case Array::Int32:
4026 case Array::Double:
4027 case Array::Contiguous: {
4028 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
4029 return;
4030 }
4031
4032 case Array::ArrayStorage:
4033 case Array::SlowPutArrayStorage: {
4034 LValue length = m_out.load32(lowStorage(m_node->child2()), m_heaps.ArrayStorage_publicLength);
4035 speculate(Uncountable, noValue(), nullptr, m_out.lessThan(length, m_out.int32Zero));
4036 setInt32(length);
4037 return;
4038 }
4039
4040 case Array::String: {
4041 LValue string = lowCell(m_node->child1());
4042
4043 LBasicBlock ropePath = m_out.newBlock();
4044 LBasicBlock nonRopePath = m_out.newBlock();
4045 LBasicBlock continuation = m_out.newBlock();
4046
4047 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropePath), usually(nonRopePath));
4048
4049 LBasicBlock lastNext = m_out.appendTo(ropePath, nonRopePath);
4050 ValueFromBlock ropeLength = m_out.anchor(m_out.load32NonNegative(string, m_heaps.JSRopeString_length));
4051 m_out.jump(continuation);
4052
4053 m_out.appendTo(nonRopePath, continuation);
4054 ValueFromBlock nonRopeLength = m_out.anchor(m_out.load32NonNegative(m_out.loadPtr(string, m_heaps.JSString_value), m_heaps.StringImpl_length));
4055 m_out.jump(continuation);
4056
4057 m_out.appendTo(continuation, lastNext);
4058 setInt32(m_out.phi(Int32, ropeLength, nonRopeLength));
4059 return;
4060 }
4061
4062 case Array::DirectArguments: {
4063 LValue arguments = lowCell(m_node->child1());
4064 speculate(
4065 ExoticObjectMode, noValue(), nullptr,
4066 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_mappedArguments)));
4067 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
4068 return;
4069 }
4070
4071 case Array::ScopedArguments: {
4072 LValue arguments = lowCell(m_node->child1());
4073 LValue storage = m_out.loadPtr(arguments, m_heaps.ScopedArguments_storage);
4074 speculate(
4075 ExoticObjectMode, noValue(), nullptr,
4076 m_out.notZero32(m_out.load8ZeroExt32(storage, m_heaps.ScopedArguments_Storage_overrodeThings)));
4077 setInt32(m_out.load32NonNegative(storage, m_heaps.ScopedArguments_Storage_totalLength));
4078 return;
4079 }
4080
4081 default:
4082 if (m_node->arrayMode().isSomeTypedArrayView()) {
4083 setInt32(
4084 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
4085 return;
4086 }
4087
4088 DFG_CRASH(m_graph, m_node, "Bad array type");
4089 return;
4090 }
4091 }
4092
4093 void compileGetVectorLength()
4094 {
4095 switch (m_node->arrayMode().type()) {
4096 case Array::ArrayStorage:
4097 case Array::SlowPutArrayStorage:
4098 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.ArrayStorage_vectorLength));
4099 return;
4100 default:
4101 return;
4102 }
4103 }
4104
4105 void compileCheckInBounds()
4106 {
4107 speculate(
4108 OutOfBounds, noValue(), 0,
4109 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
4110
4111 // Even though we claim to have JSValue result, no user of us should
4112 // depend on our value. Users of this node just need to maintain that
4113 // we dominate them.
4114 }
4115
4116 void compileGetByVal()
4117 {
4118 switch (m_node->arrayMode().type()) {
4119 case Array::Int32:
4120 case Array::Contiguous: {
4121 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4122 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4123
4124 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
4125 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
4126
4127 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4128
4129 if (m_node->arrayMode().isInBounds()) {
4130 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4131 LValue isHole = m_out.isZero64(result);
4132 if (m_node->arrayMode().isSaneChain()) {
4133 DFG_ASSERT(
4134 m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous, m_node->arrayMode().type());
4135 result = m_out.select(
4136 isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
4137 } else
4138 speculate(LoadFromHole, noValue(), 0, isHole);
4139 setJSValue(result);
4140 return;
4141 }
4142
4143 LBasicBlock fastCase = m_out.newBlock();
4144 LBasicBlock slowCase = m_out.newBlock();
4145 LBasicBlock continuation = m_out.newBlock();
4146
4147 m_out.branch(
4148 m_out.aboveOrEqual(
4149 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
4150 rarely(slowCase), usually(fastCase));
4151
4152 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
4153
4154 LValue fastResultValue = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4155 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
4156 m_out.branch(
4157 m_out.isZero64(fastResultValue), rarely(slowCase), usually(continuation));
4158
4159 m_out.appendTo(slowCase, continuation);
4160 ValueFromBlock slowResult = m_out.anchor(
4161 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4162 m_out.jump(continuation);
4163
4164 m_out.appendTo(continuation, lastNext);
4165 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4166 return;
4167 }
4168
4169 case Array::Double: {
4170 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4171 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4172 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4173
4174 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
4175
4176 if (m_node->arrayMode().isInBounds()) {
4177 LValue result = m_out.loadDouble(
4178 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4179
4180 if (!m_node->arrayMode().isSaneChain()) {
4181 speculate(
4182 LoadFromHole, noValue(), 0,
4183 m_out.doubleNotEqualOrUnordered(result, result));
4184 }
4185 setDouble(result);
4186 break;
4187 }
4188
4189 LBasicBlock inBounds = m_out.newBlock();
4190 LBasicBlock boxPath = m_out.newBlock();
4191 LBasicBlock slowCase = m_out.newBlock();
4192 LBasicBlock continuation = m_out.newBlock();
4193
4194 m_out.branch(
4195 m_out.aboveOrEqual(
4196 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
4197 rarely(slowCase), usually(inBounds));
4198
4199 LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
4200 LValue doubleValue = m_out.loadDouble(
4201 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4202 m_out.branch(
4203 m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
4204 rarely(slowCase), usually(boxPath));
4205
4206 m_out.appendTo(boxPath, slowCase);
4207 ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
4208 m_out.jump(continuation);
4209
4210 m_out.appendTo(slowCase, continuation);
4211 ValueFromBlock slowResult = m_out.anchor(
4212 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4213 m_out.jump(continuation);
4214
4215 m_out.appendTo(continuation, lastNext);
4216 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4217 return;
4218 }
4219
4220 case Array::Undecided: {
4221 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4222
4223 speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero));
4224 setJSValue(m_out.constInt64(ValueUndefined));
4225 return;
4226 }
4227
4228 case Array::DirectArguments: {
4229 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4230 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4231
4232 speculate(
4233 ExoticObjectMode, noValue(), nullptr,
4234 m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_mappedArguments)));
4235
4236 LValue length = m_out.load32NonNegative(base, m_heaps.DirectArguments_length);
4237 auto isOutOfBounds = m_out.aboveOrEqual(index, length);
4238 if (m_node->arrayMode().isInBounds()) {
4239 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
4240 TypedPointer address = m_out.baseIndex(
4241 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
4242 setJSValue(m_out.load64(address));
4243 return;
4244 }
4245
4246 LBasicBlock inBounds = m_out.newBlock();
4247 LBasicBlock slowCase = m_out.newBlock();
4248 LBasicBlock continuation = m_out.newBlock();
4249
4250 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBounds));
4251
4252 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
4253 TypedPointer address = m_out.baseIndex(
4254 m_heaps.DirectArguments_storage,
4255 base,
4256 m_out.zeroExt(index, pointerType()));
4257 ValueFromBlock fastResult = m_out.anchor(m_out.load64(address));
4258 m_out.jump(continuation);
4259
4260 m_out.appendTo(slowCase, continuation);
4261 ValueFromBlock slowResult = m_out.anchor(
4262 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4263 m_out.jump(continuation);
4264
4265 m_out.appendTo(continuation, lastNext);
4266 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4267 return;
4268 }
4269
4270 case Array::ScopedArguments: {
4271 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4272 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4273
4274 LValue storage = m_out.loadPtr(base, m_heaps.ScopedArguments_storage);
4275 LValue totalLength = m_out.load32NonNegative(
4276 storage, m_heaps.ScopedArguments_Storage_totalLength);
4277 speculate(
4278 ExoticObjectMode, noValue(), nullptr,
4279 m_out.aboveOrEqual(index, totalLength));
4280
4281 LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
4282 LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
4283
4284 LBasicBlock namedCase = m_out.newBlock();
4285 LBasicBlock overflowCase = m_out.newBlock();
4286 LBasicBlock continuation = m_out.newBlock();
4287
4288 m_out.branch(
4289 m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
4290
4291 LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
4292
4293 LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
4294 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
4295
4296 TypedPointer address = m_out.baseIndex(
4297 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
4298 LValue scopeOffset = m_out.load32(address);
4299
4300 speculate(
4301 ExoticObjectMode, noValue(), nullptr,
4302 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
4303
4304 address = m_out.baseIndex(
4305 m_heaps.JSLexicalEnvironment_variables, scope, m_out.zeroExtPtr(scopeOffset));
4306 ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
4307 m_out.jump(continuation);
4308
4309 m_out.appendTo(overflowCase, continuation);
4310
4311 address = m_out.baseIndex(
4312 m_heaps.ScopedArguments_Storage_storage, storage,
4313 m_out.zeroExtPtr(m_out.sub(index, namedLength)));
4314 LValue overflowValue = m_out.load64(address);
4315 speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
4316 ValueFromBlock overflowResult = m_out.anchor(overflowValue);
4317 m_out.jump(continuation);
4318
4319 m_out.appendTo(continuation, lastNext);
4320
4321 LValue result = m_out.phi(Int64, namedResult, overflowResult);
4322 result = preciseIndexMask32(result, index, totalLength);
4323
4324 setJSValue(result);
4325 return;
4326 }
4327
4328 case Array::Generic: {
4329 if (m_graph.varArgChild(m_node, 0).useKind() == ObjectUse) {
4330 if (m_graph.varArgChild(m_node, 1).useKind() == StringUse) {
4331 setJSValue(vmCall(
4332 Int64, m_out.operation(operationGetByValObjectString), m_callFrame,
4333 lowObject(m_graph.varArgChild(m_node, 0)), lowString(m_graph.varArgChild(m_node, 1))));
4334 return;
4335 }
4336
4337 if (m_graph.varArgChild(m_node, 1).useKind() == SymbolUse) {
4338 setJSValue(vmCall(
4339 Int64, m_out.operation(operationGetByValObjectSymbol), m_callFrame,
4340 lowObject(m_graph.varArgChild(m_node, 0)), lowSymbol(m_graph.varArgChild(m_node, 1))));
4341 return;
4342 }
4343 }
4344 setJSValue(vmCall(
4345 Int64, m_out.operation(operationGetByVal), m_callFrame,
4346 lowJSValue(m_graph.varArgChild(m_node, 0)), lowJSValue(m_graph.varArgChild(m_node, 1))));
4347 return;
4348 }
4349
4350 case Array::ArrayStorage:
4351 case Array::SlowPutArrayStorage: {
4352 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4353 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4354 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4355
4356 IndexedAbstractHeap& heap = m_heaps.ArrayStorage_vector;
4357
4358 if (m_node->arrayMode().isInBounds()) {
4359 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4360 speculate(LoadFromHole, noValue(), 0, m_out.isZero64(result));
4361 setJSValue(result);
4362 break;
4363 }
4364
4365 LBasicBlock inBounds = m_out.newBlock();
4366 LBasicBlock slowCase = m_out.newBlock();
4367 LBasicBlock continuation = m_out.newBlock();
4368
4369 m_out.branch(
4370 m_out.aboveOrEqual(index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
4371 rarely(slowCase), usually(inBounds));
4372
4373 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
4374 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4375 ValueFromBlock fastResult = m_out.anchor(result);
4376 m_out.branch(
4377 m_out.isZero64(result),
4378 rarely(slowCase), usually(continuation));
4379
4380 m_out.appendTo(slowCase, continuation);
4381 ValueFromBlock slowResult = m_out.anchor(
4382 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4383 m_out.jump(continuation);
4384
4385 m_out.appendTo(continuation, lastNext);
4386 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4387 return;
4388 }
4389
4390 case Array::String: {
4391 compileStringCharAt();
4392 return;
4393 }
4394
4395 case Array::Int8Array:
4396 case Array::Int16Array:
4397 case Array::Int32Array:
4398 case Array::Uint8Array:
4399 case Array::Uint8ClampedArray:
4400 case Array::Uint16Array:
4401 case Array::Uint32Array:
4402 case Array::Float32Array:
4403 case Array::Float64Array: {
4404 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4405 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4406
4407 TypedArrayType type = m_node->arrayMode().typedArrayType();
4408 ASSERT(isTypedView(type));
4409 {
4410 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
4411
4412 if (isInt(type)) {
4413 LValue result = loadFromIntTypedArray(pointer, type);
4414 bool canSpeculate = true;
4415 setIntTypedArrayLoadResult(result, type, canSpeculate);
4416 return;
4417 }
4418
4419 ASSERT(isFloat(type));
4420
4421 LValue result;
4422 switch (type) {
4423 case TypeFloat32:
4424 result = m_out.floatToDouble(m_out.loadFloat(pointer));
4425 break;
4426 case TypeFloat64:
4427 result = m_out.loadDouble(pointer);
4428 break;
4429 default:
4430 DFG_CRASH(m_graph, m_node, "Bad typed array type");
4431 }
4432
4433 setDouble(result);
4434 return;
4435 }
4436 }
4437
4438 case Array::AnyTypedArray:
4439 case Array::ForceExit:
4440 case Array::SelectUsingArguments:
4441 case Array::SelectUsingPredictions:
4442 case Array::Unprofiled:
4443 DFG_CRASH(m_graph, m_node, "Bad array type");
4444 return;
4445 }
4446 }
4447
4448 void compileGetMyArgumentByVal()
4449 {
4450 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
4451
4452 LValue originalIndex = lowInt32(m_node->child2());
4453
4454 LValue numberOfArgsIncludingThis;
4455 if (inlineCallFrame && !inlineCallFrame->isVarargs())
4456 numberOfArgsIncludingThis = m_out.constInt32(inlineCallFrame->argumentCountIncludingThis);
4457 else {
4458 VirtualRegister argumentCountRegister = AssemblyHelpers::argumentCount(inlineCallFrame);
4459 numberOfArgsIncludingThis = m_out.load32(payloadFor(argumentCountRegister));
4460 }
4461
4462 LValue numberOfArgs = m_out.sub(numberOfArgsIncludingThis, m_out.int32One);
4463 LValue indexToCheck = originalIndex;
4464 LValue numberOfArgumentsToSkip = m_out.int32Zero;
4465 if (m_node->numberOfArgumentsToSkip()) {
4466 numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
4467 CheckValue* check = m_out.speculateAdd(indexToCheck, numberOfArgumentsToSkip);
4468 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
4469 indexToCheck = check;
4470 }
4471
4472 LValue isOutOfBounds = m_out.bitOr(m_out.aboveOrEqual(indexToCheck, numberOfArgs), m_out.below(indexToCheck, numberOfArgumentsToSkip));
4473 LBasicBlock continuation = nullptr;
4474 LBasicBlock lastNext = nullptr;
4475 ValueFromBlock slowResult;
4476 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
4477 LBasicBlock normalCase = m_out.newBlock();
4478 continuation = m_out.newBlock();
4479
4480 slowResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())));
4481 m_out.branch(isOutOfBounds, unsure(continuation), unsure(normalCase));
4482
4483 lastNext = m_out.appendTo(normalCase, continuation);
4484 } else
4485 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
4486
4487 LValue index = m_out.add(indexToCheck, m_out.int32One);
4488
4489 TypedPointer base;
4490 if (inlineCallFrame) {
4491 if (inlineCallFrame->argumentCountIncludingThis > 1)
4492 base = addressFor(inlineCallFrame->argumentsWithFixup[0].virtualRegister());
4493 } else
4494 base = addressFor(virtualRegisterForArgument(0));
4495
4496 LValue result;
4497 if (base) {
4498 LValue pointer = m_out.baseIndex(
4499 base.value(), m_out.zeroExt(index, pointerType()), ScaleEight);
4500 result = m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer));
4501 result = preciseIndexMask32(result, indexToCheck, numberOfArgs);
4502 } else
4503 result = m_out.constInt64(JSValue::encode(jsUndefined()));
4504
4505 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
4506 ValueFromBlock normalResult = m_out.anchor(result);
4507 m_out.jump(continuation);
4508
4509 m_out.appendTo(continuation, lastNext);
4510 result = m_out.phi(Int64, slowResult, normalResult);
4511 }
4512
4513 setJSValue(result);
4514 }
4515
4516 void compilePutByVal()
4517 {
4518 Edge child1 = m_graph.varArgChild(m_node, 0);
4519 Edge child2 = m_graph.varArgChild(m_node, 1);
4520 Edge child3 = m_graph.varArgChild(m_node, 2);
4521 Edge child4 = m_graph.varArgChild(m_node, 3);
4522 Edge child5 = m_graph.varArgChild(m_node, 4);
4523
4524 ArrayMode arrayMode = m_node->arrayMode().modeForPut();
4525 switch (arrayMode.type()) {
4526 case Array::Generic: {
4527 if (child1.useKind() == CellUse) {
4528 V_JITOperation_ECCJ operation = nullptr;
4529 if (child2.useKind() == StringUse) {
4530 if (m_node->op() == PutByValDirect) {
4531 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4532 operation = operationPutByValDirectCellStringStrict;
4533 else
4534 operation = operationPutByValDirectCellStringNonStrict;
4535 } else {
4536 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4537 operation = operationPutByValCellStringStrict;
4538 else
4539 operation = operationPutByValCellStringNonStrict;
4540 }
4541 vmCall(Void, m_out.operation(operation), m_callFrame, lowCell(child1), lowString(child2), lowJSValue(child3));
4542 return;
4543 }
4544
4545 if (child2.useKind() == SymbolUse) {
4546 if (m_node->op() == PutByValDirect) {
4547 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4548 operation = operationPutByValDirectCellSymbolStrict;
4549 else
4550 operation = operationPutByValDirectCellSymbolNonStrict;
4551 } else {
4552 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4553 operation = operationPutByValCellSymbolStrict;
4554 else
4555 operation = operationPutByValCellSymbolNonStrict;
4556 }
4557 vmCall(Void, m_out.operation(operation), m_callFrame, lowCell(child1), lowSymbol(child2), lowJSValue(child3));
4558 return;
4559 }
4560 }
4561
4562 V_JITOperation_EJJJ operation;
4563 if (m_node->op() == PutByValDirect) {
4564 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4565 operation = operationPutByValDirectStrict;
4566 else
4567 operation = operationPutByValDirectNonStrict;
4568 } else {
4569 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4570 operation = operationPutByValStrict;
4571 else
4572 operation = operationPutByValNonStrict;
4573 }
4574
4575 vmCall(
4576 Void, m_out.operation(operation), m_callFrame,
4577 lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
4578 return;
4579 }
4580
4581 default:
4582 break;
4583 }
4584
4585 LValue base = lowCell(child1);
4586 LValue index = lowInt32(child2);
4587 LValue storage = lowStorage(child4);
4588
4589 switch (arrayMode.type()) {
4590 case Array::Int32:
4591 case Array::Double:
4592 case Array::Contiguous: {
4593 LBasicBlock continuation = m_out.newBlock();
4594 LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
4595
4596 switch (arrayMode.type()) {
4597 case Array::Int32:
4598 case Array::Contiguous: {
4599 LValue value = lowJSValue(child3, ManualOperandSpeculation);
4600
4601 if (arrayMode.type() == Array::Int32)
4602 FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32Only, isNotInt32(value));
4603
4604 TypedPointer elementPointer = m_out.baseIndex(
4605 arrayMode.type() == Array::Int32 ?
4606 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
4607 storage, m_out.zeroExtPtr(index), provenValue(child2));
4608
4609 if (m_node->op() == PutByValAlias) {
4610 m_out.store64(value, elementPointer);
4611 break;
4612 }
4613
4614 contiguousPutByValOutOfBounds(
4615 m_graph.isStrictModeFor(m_node->origin.semantic)
4616 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
4617 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict),
4618 base, storage, index, value, continuation);
4619
4620 m_out.store64(value, elementPointer);
4621 break;
4622 }
4623
4624 case Array::Double: {
4625 LValue value = lowDouble(child3);
4626
4627 FTL_TYPE_CHECK(
4628 doubleValue(value), child3, SpecDoubleReal,
4629 m_out.doubleNotEqualOrUnordered(value, value));
4630
4631 TypedPointer elementPointer = m_out.baseIndex(
4632 m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
4633 provenValue(child2));
4634
4635 if (m_node->op() == PutByValAlias) {
4636 m_out.storeDouble(value, elementPointer);
4637 break;
4638 }
4639
4640 contiguousPutByValOutOfBounds(
4641 m_graph.isStrictModeFor(m_node->origin.semantic)
4642 ? (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
4643 : (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
4644 base, storage, index, value, continuation);
4645
4646 m_out.storeDouble(value, elementPointer);
4647 break;
4648 }
4649
4650 default:
4651 DFG_CRASH(m_graph, m_node, "Bad array type");
4652 }
4653
4654 m_out.jump(continuation);
4655 m_out.appendTo(continuation, outerLastNext);
4656 return;
4657 }
4658
4659 case Array::ArrayStorage:
4660 case Array::SlowPutArrayStorage: {
4661 LValue value = lowJSValue(child3);
4662
4663 TypedPointer elementPointer = m_out.baseIndex(
4664 m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(index),
4665 provenValue(child2));
4666
4667 if (m_node->op() == PutByValAlias) {
4668 m_out.store64(value, elementPointer);
4669 return;
4670 }
4671
4672 if (arrayMode.isInBounds()) {
4673 speculate(StoreToHole, noValue(), 0, m_out.isZero64(m_out.load64(elementPointer)));
4674 m_out.store64(value, elementPointer);
4675 return;
4676 }
4677
4678 LValue isOutOfBounds = m_out.aboveOrEqual(
4679 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength));
4680
4681 auto slowPathFunction = m_graph.isStrictModeFor(m_node->origin.semantic)
4682 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
4683 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict);
4684 if (!arrayMode.isOutOfBounds()) {
4685 speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
4686 isOutOfBounds = m_out.booleanFalse;
4687 }
4688
4689 LBasicBlock inBoundCase = m_out.newBlock();
4690 LBasicBlock slowCase = m_out.newBlock();
4691 LBasicBlock holeCase = m_out.newBlock();
4692 LBasicBlock doStoreCase = m_out.newBlock();
4693 LBasicBlock lengthUpdateCase = m_out.newBlock();
4694 LBasicBlock continuation = m_out.newBlock();
4695
4696 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBoundCase));
4697
4698 LBasicBlock lastNext = m_out.appendTo(slowCase, inBoundCase);
4699 vmCall(
4700 Void, m_out.operation(slowPathFunction),
4701 m_callFrame, base, index, value);
4702 m_out.jump(continuation);
4703
4704
4705 if (arrayMode.isSlowPut()) {
4706 m_out.appendTo(inBoundCase, doStoreCase);
4707 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(slowCase), usually(doStoreCase));
4708 } else {
4709 m_out.appendTo(inBoundCase, holeCase);
4710 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(holeCase), usually(doStoreCase));
4711
4712 m_out.appendTo(holeCase, lengthUpdateCase);
4713 m_out.store32(
4714 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
4715 storage, m_heaps.ArrayStorage_numValuesInVector);
4716 m_out.branch(
4717 m_out.below(
4718 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_publicLength)),
4719 unsure(doStoreCase), unsure(lengthUpdateCase));
4720
4721 m_out.appendTo(lengthUpdateCase, doStoreCase);
4722 m_out.store32(
4723 m_out.add(index, m_out.int32One),
4724 storage, m_heaps.ArrayStorage_publicLength);
4725 m_out.jump(doStoreCase);
4726 }
4727
4728 m_out.appendTo(doStoreCase, continuation);
4729 m_out.store64(value, elementPointer);
4730 m_out.jump(continuation);
4731
4732 m_out.appendTo(continuation, lastNext);
4733 return;
4734 }
4735
4736 case Array::Int8Array:
4737 case Array::Int16Array:
4738 case Array::Int32Array:
4739 case Array::Uint8Array:
4740 case Array::Uint8ClampedArray:
4741 case Array::Uint16Array:
4742 case Array::Uint32Array:
4743 case Array::Float32Array:
4744 case Array::Float64Array: {
4745 TypedArrayType type = arrayMode.typedArrayType();
4746
4747 ASSERT(isTypedView(type));
4748 {
4749 TypedPointer pointer = TypedPointer(
4750 m_heaps.typedArrayProperties,
4751 m_out.add(
4752 storage,
4753 m_out.shl(
4754 m_out.zeroExt(index, pointerType()),
4755 m_out.constIntPtr(logElementSize(type)))));
4756
4757 LValue valueToStore;
4758
4759 if (isInt(type)) {
4760 LValue intValue = getIntTypedArrayStoreOperand(child3, isClamped(type));
4761
4762 valueToStore = intValue;
4763 } else /* !isInt(type) */ {
4764 LValue value = lowDouble(child3);
4765 switch (type) {
4766 case TypeFloat32:
4767 valueToStore = m_out.doubleToFloat(value);
4768 break;
4769 case TypeFloat64:
4770 valueToStore = value;
4771 break;
4772 default:
4773 DFG_CRASH(m_graph, m_node, "Bad typed array type");
4774 }
4775 }
4776
4777 if (arrayMode.isInBounds() || m_node->op() == PutByValAlias)
4778 m_out.store(valueToStore, pointer, storeType(type));
4779 else {
4780 LBasicBlock isInBounds = m_out.newBlock();
4781 LBasicBlock isOutOfBounds = m_out.newBlock();
4782 LBasicBlock continuation = m_out.newBlock();
4783
4784 m_out.branch(
4785 m_out.aboveOrEqual(index, lowInt32(child5)),
4786 unsure(isOutOfBounds), unsure(isInBounds));
4787
4788 LBasicBlock lastNext = m_out.appendTo(isInBounds, isOutOfBounds);
4789 m_out.store(valueToStore, pointer, storeType(type));
4790 m_out.jump(continuation);
4791
4792 m_out.appendTo(isOutOfBounds, continuation);
4793 speculateTypedArrayIsNotNeutered(base);
4794 m_out.jump(continuation);
4795
4796 m_out.appendTo(continuation, lastNext);
4797 }
4798
4799 return;
4800 }
4801 }
4802
4803 case Array::AnyTypedArray:
4804 case Array::String:
4805 case Array::DirectArguments:
4806 case Array::ForceExit:
4807 case Array::Generic:
4808 case Array::ScopedArguments:
4809 case Array::SelectUsingArguments:
4810 case Array::SelectUsingPredictions:
4811 case Array::Undecided:
4812 case Array::Unprofiled:
4813 DFG_CRASH(m_graph, m_node, "Bad array type");
4814 break;
4815 }
4816 }
4817
4818 void compilePutAccessorById()
4819 {
4820 LValue base = lowCell(m_node->child1());
4821 LValue accessor = lowCell(m_node->child2());
4822 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4823 vmCall(
4824 Void,
4825 m_out.operation(m_node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById),
4826 m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), accessor);
4827 }
4828
4829 void compilePutGetterSetterById()
4830 {
4831 LValue base = lowCell(m_node->child1());
4832 LValue getter = lowJSValue(m_node->child2());
4833 LValue setter = lowJSValue(m_node->child3());
4834 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4835 vmCall(
4836 Void, m_out.operation(operationPutGetterSetter),
4837 m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), getter, setter);
4838
4839 }
4840
4841 void compilePutAccessorByVal()
4842 {
4843 LValue base = lowCell(m_node->child1());
4844 LValue subscript = lowJSValue(m_node->child2());
4845 LValue accessor = lowCell(m_node->child3());
4846 vmCall(
4847 Void,
4848 m_out.operation(m_node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal),
4849 m_callFrame, base, subscript, m_out.constInt32(m_node->accessorAttributes()), accessor);
4850 }
4851
4852 void compileDeleteById()
4853 {
4854 LValue base = lowJSValue(m_node->child1());
4855 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4856 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationDeleteById), m_callFrame, base, m_out.constIntPtr(uid))));
4857 }
4858
4859 void compileDeleteByVal()
4860 {
4861 LValue base = lowJSValue(m_node->child1());
4862 LValue subscript = lowJSValue(m_node->child2());
4863 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationDeleteByVal), m_callFrame, base, subscript)));
4864 }
4865
4866 void compileArrayPush()
4867 {
4868 LValue base = lowCell(m_graph.varArgChild(m_node, 1));
4869 LValue storage = lowStorage(m_graph.varArgChild(m_node, 0));
4870 unsigned elementOffset = 2;
4871 unsigned elementCount = m_node->numChildren() - elementOffset;
4872
4873 switch (m_node->arrayMode().type()) {
4874 case Array::Int32:
4875 case Array::Contiguous:
4876 case Array::Double: {
4877 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
4878
4879 if (elementCount == 1) {
4880 LValue value;
4881 Output::StoreType storeType;
4882
4883 Edge& element = m_graph.varArgChild(m_node, elementOffset);
4884 speculate(element);
4885 if (m_node->arrayMode().type() != Array::Double) {
4886 value = lowJSValue(element, ManualOperandSpeculation);
4887 storeType = Output::Store64;
4888 } else {
4889 value = lowDouble(element);
4890 storeType = Output::StoreDouble;
4891 }
4892
4893 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
4894
4895 LBasicBlock fastPath = m_out.newBlock();
4896 LBasicBlock slowPath = m_out.newBlock();
4897 LBasicBlock continuation = m_out.newBlock();
4898
4899 m_out.branch(
4900 m_out.aboveOrEqual(
4901 prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)),
4902 unsure(slowPath), unsure(fastPath));
4903
4904 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4905 m_out.store(
4906 value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), storeType);
4907 LValue newLength = m_out.add(prevLength, m_out.int32One);
4908 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
4909
4910 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4911 m_out.jump(continuation);
4912
4913 m_out.appendTo(slowPath, continuation);
4914 LValue operation;
4915 if (m_node->arrayMode().type() != Array::Double)
4916 operation = m_out.operation(operationArrayPush);
4917 else
4918 operation = m_out.operation(operationArrayPushDouble);
4919 ValueFromBlock slowResult = m_out.anchor(
4920 vmCall(Int64, operation, m_callFrame, value, base));
4921 m_out.jump(continuation);
4922
4923 m_out.appendTo(continuation, lastNext);
4924 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4925 return;
4926 }
4927
4928 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
4929 Edge element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
4930 speculate(element);
4931 }
4932
4933 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
4934 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
4935
4936 LBasicBlock fastPath = m_out.newBlock();
4937 LBasicBlock slowPath = m_out.newBlock();
4938 LBasicBlock setup = m_out.newBlock();
4939 LBasicBlock slowCallPath = m_out.newBlock();
4940 LBasicBlock continuation = m_out.newBlock();
4941
4942 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength));
4943
4944 m_out.branch(beyondVectorLength, unsure(slowPath), unsure(fastPath));
4945
4946 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4947 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
4948 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight));
4949 m_out.jump(setup);
4950
4951 m_out.appendTo(slowPath, setup);
4952 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
4953 static_assert(sizeof(EncodedJSValue) == sizeof(double), "");
4954 ASSERT(scratchSize);
4955 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
4956 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
4957 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
4958 m_out.jump(setup);
4959
4960 m_out.appendTo(setup, slowCallPath);
4961 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
4962 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
4963 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
4964
4965 LValue value;
4966 Output::StoreType storeType;
4967 if (m_node->arrayMode().type() != Array::Double) {
4968 value = lowJSValue(element, ManualOperandSpeculation);
4969 storeType = Output::Store64;
4970 } else {
4971 value = lowDouble(element);
4972 storeType = Output::StoreDouble;
4973 }
4974
4975 m_out.store(value, m_out.baseIndex(heap, buffer, m_out.constInt32(elementIndex), jsNumber(elementIndex)), storeType);
4976 }
4977 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4978
4979 m_out.branch(beyondVectorLength, unsure(slowCallPath), unsure(continuation));
4980
4981 m_out.appendTo(slowCallPath, continuation);
4982 LValue operation;
4983 if (m_node->arrayMode().type() != Array::Double)
4984 operation = m_out.operation(operationArrayPushMultiple);
4985 else
4986 operation = m_out.operation(operationArrayPushDoubleMultiple);
4987 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, base, buffer, m_out.constInt32(elementCount)));
4988 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
4989 m_out.jump(continuation);
4990
4991 m_out.appendTo(continuation, lastNext);
4992 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4993 return;
4994 }
4995
4996 case Array::ArrayStorage: {
4997 // This ensures that the result of ArrayPush is Int32 in AI.
4998 int32_t largestPositiveInt32Length = 0x7fffffff - elementCount;
4999
5000 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
5001 // Refuse to handle bizarre lengths.
5002 speculate(Uncountable, noValue(), nullptr, m_out.above(prevLength, m_out.constInt32(largestPositiveInt32Length)));
5003
5004 if (elementCount == 1) {
5005 Edge& element = m_graph.varArgChild(m_node, elementOffset);
5006
5007 LValue value = lowJSValue(element);
5008
5009 LBasicBlock fastPath = m_out.newBlock();
5010 LBasicBlock slowPath = m_out.newBlock();
5011 LBasicBlock continuation = m_out.newBlock();
5012
5013 m_out.branch(
5014 m_out.aboveOrEqual(
5015 prevLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)),
5016 rarely(slowPath), usually(fastPath));
5017
5018 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
5019 m_out.store64(
5020 value, m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(prevLength)));
5021 LValue newLength = m_out.add(prevLength, m_out.int32One);
5022 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5023 m_out.store32(
5024 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
5025 storage, m_heaps.ArrayStorage_numValuesInVector);
5026
5027 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
5028 m_out.jump(continuation);
5029
5030 m_out.appendTo(slowPath, continuation);
5031 ValueFromBlock slowResult = m_out.anchor(
5032 vmCall(Int64, m_out.operation(operationArrayPush), m_callFrame, value, base));
5033 m_out.jump(continuation);
5034
5035 m_out.appendTo(continuation, lastNext);
5036 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5037 return;
5038 }
5039
5040 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
5041
5042 LBasicBlock fastPath = m_out.newBlock();
5043 LBasicBlock slowPath = m_out.newBlock();
5044 LBasicBlock setup = m_out.newBlock();
5045 LBasicBlock slowCallPath = m_out.newBlock();
5046 LBasicBlock continuation = m_out.newBlock();
5047
5048 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength));
5049
5050 m_out.branch(beyondVectorLength, rarely(slowPath), usually(fastPath));
5051
5052 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
5053 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5054 m_out.store32(
5055 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.constInt32(elementCount)),
5056 storage, m_heaps.ArrayStorage_numValuesInVector);
5057 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight, ArrayStorage::vectorOffset()));
5058 m_out.jump(setup);
5059
5060 m_out.appendTo(slowPath, setup);
5061 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
5062 ASSERT(scratchSize);
5063 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
5064 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5065 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
5066 m_out.jump(setup);
5067
5068 m_out.appendTo(setup, slowCallPath);
5069 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
5070 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
5071 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
5072
5073 LValue value = lowJSValue(element);
5074 m_out.store64(value, m_out.baseIndex(m_heaps.ArrayStorage_vector.atAnyIndex(), buffer, m_out.constIntPtr(elementIndex), ScaleEight));
5075 }
5076 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
5077
5078 m_out.branch(beyondVectorLength, rarely(slowCallPath), usually(continuation));
5079
5080 m_out.appendTo(slowCallPath, continuation);
5081 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationArrayPushMultiple), m_callFrame, base, buffer, m_out.constInt32(elementCount)));
5082 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5083 m_out.jump(continuation);
5084
5085 m_out.appendTo(continuation, lastNext);
5086 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5087 return;
5088 }
5089
5090 default:
5091 DFG_CRASH(m_graph, m_node, "Bad array type");
5092 return;
5093 }
5094 }
5095
5096 std::pair<LValue, LValue> populateSliceRange(LValue start, LValue end, LValue length)
5097 {
5098 // end can be nullptr.
5099 ASSERT(start);
5100 ASSERT(length);
5101
5102 auto pickIndex = [&] (LValue index) {
5103 return m_out.select(m_out.greaterThanOrEqual(index, m_out.int32Zero),
5104 m_out.select(m_out.above(index, length), length, index),
5105 m_out.select(m_out.lessThan(m_out.add(length, index), m_out.int32Zero), m_out.int32Zero, m_out.add(length, index)));
5106 };
5107
5108 LValue endBoundary = length;
5109 if (end)
5110 endBoundary = pickIndex(end);
5111 LValue startIndex = pickIndex(start);
5112 return std::make_pair(startIndex, endBoundary);
5113 }
5114
5115 void compileArraySlice()
5116 {
5117 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5118
5119 LValue sourceArray = lowCell(m_graph.varArgChild(m_node, 0));
5120 LValue sourceStorage = lowStorage(m_graph.varArgChild(m_node, m_node->numChildren() - 1));
5121 LValue inputLength = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
5122
5123 LValue startIndex = nullptr;
5124 LValue resultLength = nullptr;
5125 if (m_node->numChildren() == 2) {
5126 startIndex = m_out.constInt32(0);
5127 resultLength = inputLength;
5128 } else {
5129 LValue start = lowInt32(m_graph.varArgChild(m_node, 1));
5130 LValue end = nullptr;
5131 if (m_node->numChildren() != 3)
5132 end = lowInt32(m_graph.varArgChild(m_node, 2));
5133
5134 auto range = populateSliceRange(start, end, inputLength);
5135 startIndex = range.first;
5136 LValue endBoundary = range.second;
5137
5138 resultLength = m_out.select(m_out.belowOrEqual(startIndex, endBoundary),
5139 m_out.sub(endBoundary, startIndex),
5140 m_out.constInt32(0));
5141 }
5142
5143 ArrayValues arrayResult;
5144 {
5145 LValue indexingType = m_out.load8ZeroExt32(sourceArray, m_heaps.JSCell_indexingTypeAndMisc);
5146 // We can ignore the writability of the cell since we won't write to the source.
5147 indexingType = m_out.bitAnd(indexingType, m_out.constInt32(AllWritableArrayTypesAndHistory));
5148 // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
5149 // to ensure the incoming array is one to be one of the original array structures
5150 // with one of the following indexing shapes: Int32, Contiguous, Double.
5151 LValue structure = m_out.select(
5152 m_out.equal(indexingType, m_out.constInt32(ArrayWithInt32)),
5153 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithInt32))),
5154 m_out.select(m_out.equal(indexingType, m_out.constInt32(ArrayWithContiguous)),
5155 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous))),
5156 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithDouble)))));
5157 arrayResult = allocateJSArray(resultLength, resultLength, structure, indexingType, false, false);
5158 }
5159
5160 // Keep the sourceArray alive at least until after anything that can GC.
5161 keepAlive(sourceArray);
5162
5163 LBasicBlock loop = m_out.newBlock();
5164 LBasicBlock continuation = m_out.newBlock();
5165
5166 resultLength = m_out.zeroExtPtr(resultLength);
5167 ValueFromBlock startLoadIndex = m_out.anchor(m_out.zeroExtPtr(startIndex));
5168 ValueFromBlock startStoreIndex = m_out.anchor(m_out.constIntPtr(0));
5169
5170 m_out.branch(
5171 m_out.below(m_out.constIntPtr(0), resultLength), unsure(loop), unsure(continuation));
5172
5173 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
5174 LValue storeIndex = m_out.phi(pointerType(), startStoreIndex);
5175 LValue loadIndex = m_out.phi(pointerType(), startLoadIndex);
5176 LValue value = m_out.load64(m_out.baseIndex(m_heaps.root, sourceStorage, loadIndex, ScaleEight));
5177 m_out.store64(value, m_out.baseIndex(m_heaps.root, arrayResult.butterfly, storeIndex, ScaleEight));
5178 LValue nextStoreIndex = m_out.add(storeIndex, m_out.constIntPtr(1));
5179 m_out.addIncomingToPhi(storeIndex, m_out.anchor(nextStoreIndex));
5180 m_out.addIncomingToPhi(loadIndex, m_out.anchor(m_out.add(loadIndex, m_out.constIntPtr(1))));
5181 m_out.branch(
5182 m_out.below(nextStoreIndex, resultLength), unsure(loop), unsure(continuation));
5183
5184 m_out.appendTo(continuation, lastNext);
5185
5186 mutatorFence();
5187 setJSValue(arrayResult.array);
5188 }
5189
5190 void compileArrayIndexOf()
5191 {
5192 LValue storage = lowStorage(m_node->numChildren() == 3 ? m_graph.varArgChild(m_node, 2) : m_graph.varArgChild(m_node, 3));
5193 LValue length = m_out.load32(storage, m_heaps.Butterfly_publicLength);
5194
5195 LValue startIndex;
5196 if (m_node->numChildren() == 4) {
5197 startIndex = lowInt32(m_graph.varArgChild(m_node, 2));
5198 startIndex = m_out.select(m_out.greaterThanOrEqual(startIndex, m_out.int32Zero),
5199 m_out.select(m_out.above(startIndex, length), length, startIndex),
5200 m_out.select(m_out.lessThan(m_out.add(length, startIndex), m_out.int32Zero), m_out.int32Zero, m_out.add(length, startIndex)));
5201 } else
5202 startIndex = m_out.int32Zero;
5203
5204 Edge& searchElementEdge = m_graph.varArgChild(m_node, 1);
5205 switch (searchElementEdge.useKind()) {
5206 case Int32Use:
5207 case ObjectUse:
5208 case SymbolUse:
5209 case OtherUse:
5210 case DoubleRepUse: {
5211 LBasicBlock loopHeader = m_out.newBlock();
5212 LBasicBlock loopBody = m_out.newBlock();
5213 LBasicBlock loopNext = m_out.newBlock();
5214 LBasicBlock notFound = m_out.newBlock();
5215 LBasicBlock continuation = m_out.newBlock();
5216
5217 LValue searchElement;
5218 switch (searchElementEdge.useKind()) {
5219 case Int32Use:
5220 ASSERT(m_node->arrayMode().type() == Array::Int32);
5221 speculate(searchElementEdge);
5222 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
5223 break;
5224 case ObjectUse:
5225 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5226 searchElement = lowObject(searchElementEdge);
5227 break;
5228 case SymbolUse:
5229 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5230 searchElement = lowSymbol(searchElementEdge);
5231 break;
5232 case OtherUse:
5233 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5234 speculate(searchElementEdge);
5235 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
5236 break;
5237 case DoubleRepUse:
5238 ASSERT(m_node->arrayMode().type() == Array::Double);
5239 searchElement = lowDouble(searchElementEdge);
5240 break;
5241 default:
5242 RELEASE_ASSERT_NOT_REACHED();
5243 break;
5244 }
5245
5246 startIndex = m_out.zeroExtPtr(startIndex);
5247 length = m_out.zeroExtPtr(length);
5248
5249 ValueFromBlock initialStartIndex = m_out.anchor(startIndex);
5250 m_out.jump(loopHeader);
5251
5252 LBasicBlock lastNext = m_out.appendTo(loopHeader, loopBody);
5253 LValue index = m_out.phi(pointerType(), initialStartIndex);
5254 m_out.branch(m_out.notEqual(index, length), unsure(loopBody), unsure(notFound));
5255
5256 m_out.appendTo(loopBody, loopNext);
5257 ValueFromBlock foundResult = m_out.anchor(index);
5258 switch (searchElementEdge.useKind()) {
5259 case Int32Use: {
5260 // Empty value is ignored because of TagTypeNumber.
5261 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedInt32Properties, storage, index));
5262 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
5263 break;
5264 }
5265 case ObjectUse:
5266 case SymbolUse:
5267 case OtherUse: {
5268 // Empty value never matches against non-empty JS values.
5269 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, index));
5270 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
5271 break;
5272 }
5273 case DoubleRepUse: {
5274 // Empty value is ignored because of NaN.
5275 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, storage, index));
5276 m_out.branch(m_out.doubleEqual(value, searchElement), unsure(continuation), unsure(loopNext));
5277 break;
5278 }
5279 default:
5280 RELEASE_ASSERT_NOT_REACHED();
5281 break;
5282 }
5283
5284 m_out.appendTo(loopNext, notFound);
5285 LValue nextIndex = m_out.add(index, m_out.intPtrOne);
5286 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
5287 m_out.jump(loopHeader);
5288
5289 m_out.appendTo(notFound, continuation);
5290 ValueFromBlock notFoundResult = m_out.anchor(m_out.constIntPtr(-1));
5291 m_out.jump(continuation);
5292
5293 m_out.appendTo(continuation, lastNext);
5294 setInt32(m_out.castToInt32(m_out.phi(pointerType(), notFoundResult, foundResult)));
5295 break;
5296 }
5297
5298 case StringUse:
5299 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5300 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfString), m_callFrame, storage, lowString(searchElementEdge), startIndex));
5301 break;
5302
5303 case UntypedUse:
5304 switch (m_node->arrayMode().type()) {
5305 case Array::Double:
5306 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfValueDouble), m_callFrame, storage, lowJSValue(searchElementEdge), startIndex));
5307 break;
5308 case Array::Int32:
5309 case Array::Contiguous:
5310 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfValueInt32OrContiguous), m_callFrame, storage, lowJSValue(searchElementEdge), startIndex));
5311 break;
5312 default:
5313 RELEASE_ASSERT_NOT_REACHED();
5314 break;
5315 }
5316 break;
5317
5318 default:
5319 RELEASE_ASSERT_NOT_REACHED();
5320 break;
5321 }
5322 }
5323
5324
5325 void compileArrayPop()
5326 {
5327 LValue base = lowCell(m_node->child1());
5328 LValue storage = lowStorage(m_node->child2());
5329
5330 switch (m_node->arrayMode().type()) {
5331 case Array::Int32:
5332 case Array::Double:
5333 case Array::Contiguous: {
5334 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
5335
5336 LBasicBlock fastCase = m_out.newBlock();
5337 LBasicBlock slowCase = m_out.newBlock();
5338 LBasicBlock continuation = m_out.newBlock();
5339
5340 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
5341
5342 Vector<ValueFromBlock, 3> results;
5343 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
5344 m_out.branch(
5345 m_out.isZero32(prevLength), rarely(continuation), usually(fastCase));
5346
5347 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
5348 LValue newLength = m_out.sub(prevLength, m_out.int32One);
5349 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
5350 TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
5351 if (m_node->arrayMode().type() != Array::Double) {
5352 LValue result = m_out.load64(pointer);
5353 m_out.store64(m_out.int64Zero, pointer);
5354 results.append(m_out.anchor(result));
5355 m_out.branch(
5356 m_out.notZero64(result), usually(continuation), rarely(slowCase));
5357 } else {
5358 LValue result = m_out.loadDouble(pointer);
5359 m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer);
5360 results.append(m_out.anchor(boxDouble(result)));
5361 m_out.branch(
5362 m_out.doubleEqual(result, result),
5363 usually(continuation), rarely(slowCase));
5364 }
5365
5366 m_out.appendTo(slowCase, continuation);
5367 results.append(m_out.anchor(vmCall(
5368 Int64, m_out.operation(operationArrayPopAndRecoverLength), m_callFrame, base)));
5369 m_out.jump(continuation);
5370
5371 m_out.appendTo(continuation, lastNext);
5372 setJSValue(m_out.phi(Int64, results));
5373 return;
5374 }
5375
5376 case Array::ArrayStorage: {
5377 LBasicBlock vectorLengthCheckCase = m_out.newBlock();
5378 LBasicBlock popCheckCase = m_out.newBlock();
5379 LBasicBlock fastCase = m_out.newBlock();
5380 LBasicBlock slowCase = m_out.newBlock();
5381 LBasicBlock continuation = m_out.newBlock();
5382
5383 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
5384
5385 Vector<ValueFromBlock, 3> results;
5386 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
5387 m_out.branch(
5388 m_out.isZero32(prevLength), rarely(continuation), usually(vectorLengthCheckCase));
5389
5390 LBasicBlock lastNext = m_out.appendTo(vectorLengthCheckCase, popCheckCase);
5391 LValue newLength = m_out.sub(prevLength, m_out.int32One);
5392 m_out.branch(
5393 m_out.aboveOrEqual(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)), rarely(slowCase), usually(popCheckCase));
5394
5395 m_out.appendTo(popCheckCase, fastCase);
5396 TypedPointer pointer = m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(newLength));
5397 LValue result = m_out.load64(pointer);
5398 m_out.branch(m_out.notZero64(result), usually(fastCase), rarely(slowCase));
5399
5400 m_out.appendTo(fastCase, slowCase);
5401 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5402 m_out.store64(m_out.int64Zero, pointer);
5403 m_out.store32(
5404 m_out.sub(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
5405 storage, m_heaps.ArrayStorage_numValuesInVector);
5406 results.append(m_out.anchor(result));
5407 m_out.jump(continuation);
5408
5409 m_out.appendTo(slowCase, continuation);
5410 results.append(m_out.anchor(vmCall(
5411 Int64, m_out.operation(operationArrayPop), m_callFrame, base)));
5412 m_out.jump(continuation);
5413
5414 m_out.appendTo(continuation, lastNext);
5415 setJSValue(m_out.phi(Int64, results));
5416 return;
5417 }
5418
5419 default:
5420 DFG_CRASH(m_graph, m_node, "Bad array type");
5421 return;
5422 }
5423 }
5424
5425 void compilePushWithScope()
5426 {
5427 LValue parentScope = lowCell(m_node->child1());
5428 auto objectEdge = m_node->child2();
5429 if (objectEdge.useKind() == ObjectUse) {
5430 LValue object = lowNonNullObject(objectEdge);
5431 LValue result = vmCall(Int64, m_out.operation(operationPushWithScopeObject), m_callFrame, parentScope, object);
5432 setJSValue(result);
5433 } else {
5434 ASSERT(objectEdge.useKind() == UntypedUse);
5435 LValue object = lowJSValue(m_node->child2());
5436 LValue result = vmCall(Int64, m_out.operation(operationPushWithScope), m_callFrame, parentScope, object);
5437 setJSValue(result);
5438 }
5439 }
5440
5441 void compileCreateActivation()
5442 {
5443 LValue scope = lowCell(m_node->child1());
5444 SymbolTable* table = m_node->castOperand<SymbolTable*>();
5445 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
5446 JSValue initializationValue = m_node->initializationValueForActivation();
5447 ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue());
5448 if (table->singleton().isStillValid()) {
5449 LValue callResult = vmCall(
5450 Int64,
5451 m_out.operation(operationCreateActivationDirect), m_callFrame, weakStructure(structure),
5452 scope, weakPointer(table), m_out.constInt64(JSValue::encode(initializationValue)));
5453 setJSValue(callResult);
5454 return;
5455 }
5456
5457 LBasicBlock slowPath = m_out.newBlock();
5458 LBasicBlock continuation = m_out.newBlock();
5459
5460 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5461
5462 LValue fastObject = allocateObject<JSLexicalEnvironment>(
5463 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
5464
5465 // We don't need memory barriers since we just fast-created the activation, so the
5466 // activation must be young.
5467 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
5468 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
5469
5470 for (unsigned i = 0; i < table->scopeSize(); ++i) {
5471 m_out.store64(
5472 m_out.constInt64(JSValue::encode(initializationValue)),
5473 fastObject, m_heaps.JSLexicalEnvironment_variables[i]);
5474 }
5475
5476 mutatorFence();
5477
5478 ValueFromBlock fastResult = m_out.anchor(fastObject);
5479 m_out.jump(continuation);
5480
5481 m_out.appendTo(slowPath, continuation);
5482 VM& vm = this->vm();
5483 LValue callResult = lazySlowPath(
5484 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5485 return createLazyCallGenerator(vm,
5486 operationCreateActivationDirect, locations[0].directGPR(),
5487 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
5488 CCallHelpers::TrustedImmPtr(table),
5489 CCallHelpers::TrustedImm64(JSValue::encode(initializationValue)));
5490 },
5491 scope);
5492 ValueFromBlock slowResult = m_out.anchor(callResult);
5493 m_out.jump(continuation);
5494
5495 m_out.appendTo(continuation, lastNext);
5496 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5497 }
5498
5499 void compileNewFunction()
5500 {
5501 ASSERT(m_node->op() == NewFunction || m_node->op() == NewGeneratorFunction || m_node->op() == NewAsyncGeneratorFunction || m_node->op() == NewAsyncFunction);
5502 bool isGeneratorFunction = m_node->op() == NewGeneratorFunction;
5503 bool isAsyncFunction = m_node->op() == NewAsyncFunction;
5504 bool isAsyncGeneratorFunction = m_node->op() == NewAsyncGeneratorFunction;
5505
5506 LValue scope = lowCell(m_node->child1());
5507
5508 FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
5509 if (executable->singleton().isStillValid()) {
5510 LValue callResult =
5511 isGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
5512 isAsyncFunction ? vmCall(Int64, m_out.operation(operationNewAsyncFunction), m_callFrame, scope, weakPointer(executable)) :
5513 isAsyncGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewAsyncGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
5514 vmCall(Int64, m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable));
5515 setJSValue(callResult);
5516 return;
5517 }
5518
5519 RegisteredStructure structure = m_graph.registerStructure(
5520 [&] () {
5521 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5522 switch (m_node->op()) {
5523 case NewGeneratorFunction:
5524 return globalObject->generatorFunctionStructure();
5525 case NewAsyncFunction:
5526 return globalObject->asyncFunctionStructure();
5527 case NewAsyncGeneratorFunction:
5528 return globalObject->asyncGeneratorFunctionStructure();
5529 case NewFunction:
5530 return JSFunction::selectStructureForNewFuncExp(globalObject, m_node->castOperand<FunctionExecutable*>());
5531 default:
5532 RELEASE_ASSERT_NOT_REACHED();
5533 }
5534 }());
5535
5536 LBasicBlock slowPath = m_out.newBlock();
5537 LBasicBlock continuation = m_out.newBlock();
5538
5539 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5540
5541 LValue fastObject =
5542 isGeneratorFunction ? allocateObject<JSGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
5543 isAsyncFunction ? allocateObject<JSAsyncFunction>(structure, m_out.intPtrZero, slowPath) :
5544 isAsyncGeneratorFunction ? allocateObject<JSAsyncGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
5545 allocateObject<JSFunction>(structure, m_out.intPtrZero, slowPath);
5546
5547
5548 // We don't need memory barriers since we just fast-created the function, so it
5549 // must be young.
5550 m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
5551 m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
5552 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
5553
5554 VM& vm = this->vm();
5555 if (executable->isAnonymousBuiltinFunction()) {
5556 mutatorFence();
5557 Allocator allocator = allocatorForNonVirtualConcurrently<FunctionRareData>(vm, sizeof(FunctionRareData), AllocatorForMode::AllocatorIfExists);
5558 LValue rareData = allocateCell(m_out.constIntPtr(allocator.localAllocator()), vm.functionRareDataStructure.get(), slowPath);
5559 m_out.storePtr(m_out.intPtrZero, rareData, m_heaps.FunctionRareData_allocator);
5560 m_out.storePtr(m_out.intPtrZero, rareData, m_heaps.FunctionRareData_structure);
5561 m_out.storePtr(m_out.intPtrZero, rareData, m_heaps.FunctionRareData_prototype);
5562 m_out.storePtr(m_out.intPtrOne, rareData, m_heaps.FunctionRareData_objectAllocationProfileWatchpoint);
5563 m_out.storePtr(m_out.intPtrZero, rareData, m_heaps.FunctionRareData_internalFunctionAllocationProfile_structure);
5564 m_out.storePtr(m_out.intPtrZero, rareData, m_heaps.FunctionRareData_boundFunctionStructure);
5565 m_out.storePtr(m_out.intPtrZero, rareData, m_heaps.FunctionRareData_allocationProfileClearingWatchpoint);
5566 m_out.store32As8(m_out.int32One, rareData, m_heaps.FunctionRareData_hasReifiedName);
5567 m_out.store32As8(m_out.int32Zero, rareData, m_heaps.FunctionRareData_hasReifiedLength);
5568 mutatorFence();
5569 m_out.storePtr(rareData, fastObject, m_heaps.JSFunction_rareData);
5570 } else
5571 mutatorFence();
5572
5573 ValueFromBlock fastResult = m_out.anchor(fastObject);
5574 m_out.jump(continuation);
5575
5576 m_out.appendTo(slowPath, continuation);
5577
5578 Vector<LValue> slowPathArguments;
5579 slowPathArguments.append(scope);
5580 LValue callResult = lazySlowPath(
5581 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5582 auto* operation = operationNewFunctionWithInvalidatedReallocationWatchpoint;
5583 if (isGeneratorFunction)
5584 operation = operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint;
5585 else if (isAsyncFunction)
5586 operation = operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint;
5587 else if (isAsyncGeneratorFunction)
5588 operation = operationNewAsyncGeneratorFunctionWithInvalidatedReallocationWatchpoint;
5589
5590 return createLazyCallGenerator(vm, operation,
5591 locations[0].directGPR(), locations[1].directGPR(),
5592 CCallHelpers::TrustedImmPtr(executable));
5593 },
5594 slowPathArguments);
5595 ValueFromBlock slowResult = m_out.anchor(callResult);
5596 m_out.jump(continuation);
5597
5598 m_out.appendTo(continuation, lastNext);
5599 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5600 }
5601
5602 void compileCreateDirectArguments()
5603 {
5604 // FIXME: A more effective way of dealing with the argument count and callee is to have
5605 // them be explicit arguments to this node.
5606 // https://bugs.webkit.org/show_bug.cgi?id=142207
5607
5608 RegisteredStructure structure =
5609 m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure());
5610
5611 unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
5612
5613 LBasicBlock slowPath = m_out.newBlock();
5614 LBasicBlock continuation = m_out.newBlock();
5615
5616 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5617
5618 ArgumentsLength length = getArgumentsLength();
5619
5620 LValue fastObject;
5621 if (length.isKnown) {
5622 fastObject = allocateObject<DirectArguments>(
5623 DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
5624 m_out.intPtrZero, slowPath);
5625 } else {
5626 LValue size = m_out.add(
5627 m_out.shl(length.value, m_out.constInt32(3)),
5628 m_out.constInt32(DirectArguments::storageOffset()));
5629
5630 size = m_out.select(
5631 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
5632 size, m_out.constInt32(DirectArguments::allocationSize(minCapacity)));
5633
5634 fastObject = allocateVariableSizedObject<DirectArguments>(
5635 m_out.zeroExtPtr(size), structure, m_out.intPtrZero, slowPath);
5636 }
5637
5638 m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
5639 m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
5640 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_mappedArguments);
5641 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_modifiedArgumentsDescriptor);
5642
5643 ValueFromBlock fastResult = m_out.anchor(fastObject);
5644 m_out.jump(continuation);
5645
5646 m_out.appendTo(slowPath, continuation);
5647 VM& vm = this->vm();
5648 LValue callResult = lazySlowPath(
5649 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5650 return createLazyCallGenerator(vm,
5651 operationCreateDirectArguments, locations[0].directGPR(),
5652 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
5653 CCallHelpers::TrustedImm32(minCapacity));
5654 }, length.value);
5655 ValueFromBlock slowResult = m_out.anchor(callResult);
5656 m_out.jump(continuation);
5657
5658 m_out.appendTo(continuation, lastNext);
5659 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
5660
5661 m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee);
5662
5663 if (length.isKnown) {
5664 VirtualRegister start = AssemblyHelpers::argumentsStart(m_node->origin.semantic);
5665 for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) {
5666 m_out.store64(
5667 m_out.load64(addressFor(start + i)),
5668 result, m_heaps.DirectArguments_storage[i]);
5669 }
5670 } else {
5671 LValue stackBase = getArgumentsStart();
5672
5673 LBasicBlock loop = m_out.newBlock();
5674 LBasicBlock end = m_out.newBlock();
5675
5676 ValueFromBlock originalLength;
5677 if (minCapacity) {
5678 LValue capacity = m_out.select(
5679 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
5680 length.value,
5681 m_out.constInt32(minCapacity));
5682 LValue originalLengthValue = m_out.zeroExtPtr(capacity);
5683 originalLength = m_out.anchor(originalLengthValue);
5684 m_out.jump(loop);
5685 } else {
5686 LValue originalLengthValue = m_out.zeroExtPtr(length.value);
5687 originalLength = m_out.anchor(originalLengthValue);
5688 m_out.branch(m_out.isNull(originalLengthValue), unsure(end), unsure(loop));
5689 }
5690
5691 lastNext = m_out.appendTo(loop, end);
5692 LValue previousIndex = m_out.phi(pointerType(), originalLength);
5693 LValue index = m_out.sub(previousIndex, m_out.intPtrOne);
5694 m_out.store64(
5695 m_out.load64(m_out.baseIndex(m_heaps.variables, stackBase, index)),
5696 m_out.baseIndex(m_heaps.DirectArguments_storage, result, index));
5697 ValueFromBlock nextIndex = m_out.anchor(index);
5698 m_out.addIncomingToPhi(previousIndex, nextIndex);
5699 m_out.branch(m_out.isNull(index), unsure(end), unsure(loop));
5700
5701 m_out.appendTo(end, lastNext);
5702 }
5703
5704 mutatorFence();
5705
5706 setJSValue(result);
5707 }
5708
5709 void compileCreateScopedArguments()
5710 {
5711 LValue scope = lowCell(m_node->child1());
5712
5713 LValue result = vmCall(
5714 Int64, m_out.operation(operationCreateScopedArguments), m_callFrame,
5715 weakPointer(
5716 m_graph.globalObjectFor(m_node->origin.semantic)->scopedArgumentsStructure()),
5717 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee(), scope);
5718
5719 setJSValue(result);
5720 }
5721
5722 void compileCreateClonedArguments()
5723 {
5724 LValue result = vmCall(
5725 Int64, m_out.operation(operationCreateClonedArguments), m_callFrame,
5726 weakPointer(
5727 m_graph.globalObjectFor(m_node->origin.semantic)->clonedArgumentsStructure()),
5728 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee());
5729
5730 setJSValue(result);
5731 }
5732
5733 void compileCreateRest()
5734 {
5735 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5736 LBasicBlock continuation = m_out.newBlock();
5737 LValue arrayLength = lowInt32(m_node->child1());
5738 LBasicBlock loopStart = m_out.newBlock();
5739 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5740 RegisteredStructure structure = m_graph.registerStructure(globalObject->originalRestParameterStructure());
5741 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(arrayLength, structure);
5742 LValue array = arrayValues.array;
5743 LValue butterfly = arrayValues.butterfly;
5744 ValueFromBlock startLength = m_out.anchor(arrayLength);
5745 LValue argumentRegion = m_out.add(getArgumentsStart(), m_out.constInt64(sizeof(Register) * m_node->numberOfArgumentsToSkip()));
5746 m_out.branch(m_out.equal(arrayLength, m_out.constInt32(0)),
5747 unsure(continuation), unsure(loopStart));
5748
5749 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
5750 LValue phiOffset = m_out.phi(Int32, startLength);
5751 LValue currentOffset = m_out.sub(phiOffset, m_out.int32One);
5752 m_out.addIncomingToPhi(phiOffset, m_out.anchor(currentOffset));
5753 LValue loadedValue = m_out.load64(m_out.baseIndex(m_heaps.variables, argumentRegion, m_out.zeroExtPtr(currentOffset)));
5754 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
5755 m_out.store64(loadedValue, m_out.baseIndex(heap, butterfly, m_out.zeroExtPtr(currentOffset)));
5756 m_out.branch(m_out.equal(currentOffset, m_out.constInt32(0)), unsure(continuation), unsure(loopStart));
5757
5758 m_out.appendTo(continuation, lastNext);
5759 mutatorFence();
5760 setJSValue(array);
5761 return;
5762 }
5763
5764 LValue arrayLength = lowInt32(m_node->child1());
5765 LValue argumentStart = getArgumentsStart();
5766 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5767 setJSValue(vmCall(
5768 Int64, m_out.operation(operationCreateRest), m_callFrame, argumentStart, numberOfArgumentsToSkip, arrayLength));
5769 }
5770
5771 void compileGetRestLength()
5772 {
5773 LBasicBlock nonZeroLength = m_out.newBlock();
5774 LBasicBlock continuation = m_out.newBlock();
5775
5776 ValueFromBlock zeroLengthResult = m_out.anchor(m_out.constInt32(0));
5777
5778 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5779 LValue argumentsLength = getArgumentsLength().value;
5780 m_out.branch(m_out.above(argumentsLength, numberOfArgumentsToSkip),
5781 unsure(nonZeroLength), unsure(continuation));
5782
5783 LBasicBlock lastNext = m_out.appendTo(nonZeroLength, continuation);
5784 ValueFromBlock nonZeroLengthResult = m_out.anchor(m_out.sub(argumentsLength, numberOfArgumentsToSkip));
5785 m_out.jump(continuation);
5786
5787 m_out.appendTo(continuation, lastNext);
5788 setInt32(m_out.phi(Int32, zeroLengthResult, nonZeroLengthResult));
5789 }
5790
5791 void compileObjectKeys()
5792 {
5793 switch (m_node->child1().useKind()) {
5794 case ObjectUse: {
5795 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5796 LBasicBlock notNullCase = m_out.newBlock();
5797 LBasicBlock rareDataCase = m_out.newBlock();
5798 LBasicBlock useCacheCase = m_out.newBlock();
5799 LBasicBlock slowButArrayBufferCase = m_out.newBlock();
5800 LBasicBlock slowCase = m_out.newBlock();
5801 LBasicBlock continuation = m_out.newBlock();
5802
5803 LValue object = lowObject(m_node->child1());
5804 LValue structure = loadStructure(object);
5805 LValue previousOrRareData = m_out.loadPtr(structure, m_heaps.Structure_previousOrRareData);
5806 m_out.branch(m_out.notNull(previousOrRareData), unsure(notNullCase), unsure(slowCase));
5807
5808 LBasicBlock lastNext = m_out.appendTo(notNullCase, rareDataCase);
5809 m_out.branch(
5810 m_out.notEqual(m_out.load32(previousOrRareData, m_heaps.JSCell_structureID), m_out.constInt32(m_graph.m_vm.structureStructure->structureID())),
5811 unsure(rareDataCase), unsure(slowCase));
5812
5813 m_out.appendTo(rareDataCase, useCacheCase);
5814 ASSERT(bitwise_cast<uintptr_t>(StructureRareData::cachedOwnKeysSentinel()) == 1);
5815 LValue cachedOwnKeys = m_out.loadPtr(previousOrRareData, m_heaps.StructureRareData_cachedOwnKeys);
5816 m_out.branch(m_out.belowOrEqual(cachedOwnKeys, m_out.constIntPtr(bitwise_cast<void*>(StructureRareData::cachedOwnKeysSentinel()))), unsure(slowCase), unsure(useCacheCase));
5817
5818 m_out.appendTo(useCacheCase, slowButArrayBufferCase);
5819 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5820 RegisteredStructure arrayStructure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(CopyOnWriteArrayWithContiguous));
5821 LValue fastArray = allocateObject<JSArray>(arrayStructure, m_out.addPtr(cachedOwnKeys, JSImmutableButterfly::offsetOfData()), slowButArrayBufferCase);
5822 ValueFromBlock fastResult = m_out.anchor(fastArray);
5823 m_out.jump(continuation);
5824
5825 m_out.appendTo(slowButArrayBufferCase, slowCase);
5826 LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(arrayStructure), cachedOwnKeys);
5827 ValueFromBlock slowButArrayBufferResult = m_out.anchor(slowArray);
5828 m_out.jump(continuation);
5829
5830 m_out.appendTo(slowCase, continuation);
5831 VM& vm = this->vm();
5832 LValue slowResultValue = lazySlowPath(
5833 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5834 return createLazyCallGenerator(vm,
5835 operationObjectKeysObject, locations[0].directGPR(), locations[1].directGPR());
5836 },
5837 object);
5838 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
5839 m_out.jump(continuation);
5840
5841 m_out.appendTo(continuation, lastNext);
5842 setJSValue(m_out.phi(pointerType(), fastResult, slowButArrayBufferResult, slowResult));
5843 break;
5844 }
5845 setJSValue(vmCall(Int64, m_out.operation(operationObjectKeysObject), m_callFrame, lowObject(m_node->child1())));
5846 break;
5847 }
5848 case UntypedUse:
5849 setJSValue(vmCall(Int64, m_out.operation(operationObjectKeys), m_callFrame, lowJSValue(m_node->child1())));
5850 break;
5851 default:
5852 RELEASE_ASSERT_NOT_REACHED();
5853 break;
5854 }
5855 }
5856
5857 void compileObjectCreate()
5858 {
5859 switch (m_node->child1().useKind()) {
5860 case ObjectUse:
5861 setJSValue(vmCall(Int64, m_out.operation(operationObjectCreateObject), m_callFrame, lowObject(m_node->child1())));
5862 break;
5863 case UntypedUse:
5864 setJSValue(vmCall(Int64, m_out.operation(operationObjectCreate), m_callFrame, lowJSValue(m_node->child1())));
5865 break;
5866 default:
5867 RELEASE_ASSERT_NOT_REACHED();
5868 break;
5869 }
5870 }
5871
5872 void compileNewObject()
5873 {
5874 setJSValue(allocateObject(m_node->structure()));
5875 mutatorFence();
5876 }
5877
5878 void compileNewStringObject()
5879 {
5880 RegisteredStructure structure = m_node->structure();
5881 LValue string = lowString(m_node->child1());
5882
5883 LBasicBlock slowCase = m_out.newBlock();
5884 LBasicBlock continuation = m_out.newBlock();
5885
5886 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
5887
5888 LValue fastResultValue = allocateObject<StringObject>(structure, m_out.intPtrZero, slowCase);
5889 m_out.storePtr(m_out.constIntPtr(StringObject::info()), fastResultValue, m_heaps.JSDestructibleObject_classInfo);
5890 m_out.store64(string, fastResultValue, m_heaps.JSWrapperObject_internalValue);
5891 mutatorFence();
5892 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
5893 m_out.jump(continuation);
5894
5895 m_out.appendTo(slowCase, continuation);
5896 VM& vm = this->vm();
5897 LValue slowResultValue = lazySlowPath(
5898 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5899 return createLazyCallGenerator(vm,
5900 operationNewStringObject, locations[0].directGPR(), locations[1].directGPR(),
5901 CCallHelpers::TrustedImmPtr(structure.get()));
5902 },
5903 string);
5904 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
5905 m_out.jump(continuation);
5906
5907 m_out.appendTo(continuation, lastNext);
5908 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5909 }
5910
5911 void compileNewSymbol()
5912 {
5913 if (!m_node->child1()) {
5914 setJSValue(vmCall(pointerType(), m_out.operation(operationNewSymbol), m_callFrame));
5915 return;
5916 }
5917 ASSERT(m_node->child1().useKind() == KnownStringUse);
5918 setJSValue(vmCall(pointerType(), m_out.operation(operationNewSymbolWithDescription), m_callFrame, lowString(m_node->child1())));
5919 }
5920
5921 void compileNewArray()
5922 {
5923 // First speculate appropriately on all of the children. Do this unconditionally up here
5924 // because some of the slow paths may otherwise forget to do it. It's sort of arguable
5925 // that doing the speculations up here might be unprofitable for RA - so we can consider
5926 // sinking this to below the allocation fast path if we find that this has a lot of
5927 // register pressure.
5928 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
5929 speculate(m_graph.varArgChild(m_node, operandIndex));
5930
5931 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5932 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
5933 m_node->indexingType()));
5934
5935 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
5936 unsigned numElements = m_node->numChildren();
5937 unsigned vectorLengthHint = m_node->vectorLengthHint();
5938 ASSERT(vectorLengthHint >= numElements);
5939
5940 ArrayValues arrayValues =
5941 allocateUninitializedContiguousJSArray(numElements, vectorLengthHint, structure);
5942
5943 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
5944 Edge edge = m_graph.varArgChild(m_node, operandIndex);
5945
5946 switch (m_node->indexingType()) {
5947 case ALL_BLANK_INDEXING_TYPES:
5948 case ALL_UNDECIDED_INDEXING_TYPES:
5949 DFG_CRASH(m_graph, m_node, "Bad indexing type");
5950 break;
5951
5952 case ALL_DOUBLE_INDEXING_TYPES:
5953 m_out.storeDouble(
5954 lowDouble(edge),
5955 arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
5956 break;
5957
5958 case ALL_INT32_INDEXING_TYPES:
5959 case ALL_CONTIGUOUS_INDEXING_TYPES:
5960 m_out.store64(
5961 lowJSValue(edge, ManualOperandSpeculation),
5962 arrayValues.butterfly,
5963 m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
5964 break;
5965
5966 default:
5967 DFG_CRASH(m_graph, m_node, "Corrupt indexing type");
5968 break;
5969 }
5970 }
5971
5972 setJSValue(arrayValues.array);
5973 mutatorFence();
5974 return;
5975 }
5976
5977 if (!m_node->numChildren()) {
5978 setJSValue(vmCall(
5979 Int64, m_out.operation(operationNewEmptyArray), m_callFrame,
5980 weakStructure(structure)));
5981 return;
5982 }
5983
5984 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
5985 ASSERT(scratchSize);
5986 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
5987 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
5988
5989 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
5990 Edge edge = m_graph.varArgChild(m_node, operandIndex);
5991 LValue valueToStore;
5992 switch (m_node->indexingType()) {
5993 case ALL_DOUBLE_INDEXING_TYPES:
5994 valueToStore = boxDouble(lowDouble(edge));
5995 break;
5996 default:
5997 valueToStore = lowJSValue(edge, ManualOperandSpeculation);
5998 break;
5999 }
6000 m_out.store64(valueToStore, m_out.absolute(buffer + operandIndex));
6001 }
6002
6003 m_out.storePtr(
6004 m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6005
6006 LValue result = vmCall(
6007 Int64, m_out.operation(operationNewArray), m_callFrame,
6008 weakStructure(structure), m_out.constIntPtr(buffer),
6009 m_out.constIntPtr(m_node->numChildren()));
6010
6011 m_out.storePtr(m_out.intPtrZero, m_out.absolute(scratchBuffer->addressOfActiveLength()));
6012
6013 setJSValue(result);
6014 }
6015
6016 void compileNewArrayWithSpread()
6017 {
6018 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
6019 CheckedInt32 startLength = 0;
6020 BitVector* bitVector = m_node->bitVector();
6021 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
6022
6023 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6024 if (!bitVector->get(i))
6025 ++startLength;
6026 else {
6027 Edge& child = m_graph.varArgChild(m_node, i);
6028 if (child->op() == PhantomSpread && child->child1()->op() == PhantomNewArrayBuffer)
6029 startLength += child->child1()->castOperand<JSImmutableButterfly*>()->length();
6030 }
6031 }
6032
6033 if (startLength.hasOverflowed()) {
6034 terminate(Overflow);
6035 return;
6036 }
6037
6038 LValue length = m_out.constInt32(startLength.unsafeGet());
6039
6040 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6041 if (bitVector->get(i)) {
6042 Edge use = m_graph.varArgChild(m_node, i);
6043 CheckValue* lengthCheck = nullptr;
6044 if (use->op() == PhantomSpread) {
6045 if (use->child1()->op() == PhantomCreateRest) {
6046 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
6047 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
6048 LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
6049 return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
6050 }).iterator->value;
6051 lengthCheck = m_out.speculateAdd(length, spreadLength);
6052 }
6053 } else {
6054 LValue fixedArray = lowCell(use);
6055 lengthCheck = m_out.speculateAdd(length, m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
6056 }
6057
6058 if (lengthCheck) {
6059 blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
6060 length = lengthCheck;
6061 }
6062 }
6063 }
6064
6065 LValue exceedsMaxAllowedLength = m_out.aboveOrEqual(length, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
6066 blessSpeculation(m_out.speculate(exceedsMaxAllowedLength), Overflow, noValue(), nullptr, m_origin);
6067
6068 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous));
6069 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(length, structure);
6070 LValue result = arrayValues.array;
6071 LValue storage = arrayValues.butterfly;
6072 LValue index = m_out.constIntPtr(0);
6073
6074 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6075 Edge use = m_graph.varArgChild(m_node, i);
6076 if (bitVector->get(i)) {
6077 if (use->op() == PhantomSpread) {
6078 if (use->child1()->op() == PhantomNewArrayBuffer) {
6079 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6080 auto* array = use->child1()->castOperand<JSImmutableButterfly*>();
6081 for (unsigned i = 0; i < array->length(); ++i) {
6082 // Because resulted array from NewArrayWithSpread is always contiguous, we should not generate value
6083 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
6084 int64_t value = JSValue::encode(array->get(i));
6085 m_out.store64(m_out.constInt64(value), m_out.baseIndex(heap, storage, index, JSValue(), (Checked<int32_t>(sizeof(JSValue)) * i).unsafeGet()));
6086 }
6087 index = m_out.add(index, m_out.constIntPtr(array->length()));
6088 } else {
6089 RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
6090 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
6091 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
6092
6093 LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
6094 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
6095
6096 LBasicBlock loopStart = m_out.newBlock();
6097 LBasicBlock continuation = m_out.newBlock();
6098
6099 ValueFromBlock loadIndexStart = m_out.anchor(m_out.constIntPtr(0));
6100 ValueFromBlock arrayIndexStart = m_out.anchor(index);
6101 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
6102
6103 m_out.branch(
6104 m_out.isZero64(length),
6105 unsure(continuation), unsure(loopStart));
6106
6107 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6108
6109 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
6110 LValue loadIndex = m_out.phi(pointerType(), loadIndexStart);
6111
6112 LValue item = m_out.load64(m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
6113 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
6114
6115 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
6116 LValue nextLoadIndex = m_out.add(loadIndex, m_out.constIntPtr(1));
6117 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
6118
6119 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
6120 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
6121
6122 m_out.branch(
6123 m_out.below(nextLoadIndex, length),
6124 unsure(loopStart), unsure(continuation));
6125
6126 m_out.appendTo(continuation, lastNext);
6127 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
6128 }
6129 } else {
6130 LBasicBlock loopStart = m_out.newBlock();
6131 LBasicBlock continuation = m_out.newBlock();
6132
6133 LValue fixedArray = lowCell(use);
6134
6135 ValueFromBlock fixedIndexStart = m_out.anchor(m_out.constIntPtr(0));
6136 ValueFromBlock arrayIndexStart = m_out.anchor(index);
6137 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
6138
6139 LValue fixedArraySize = m_out.zeroExtPtr(m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
6140
6141 m_out.branch(
6142 m_out.isZero64(fixedArraySize),
6143 unsure(continuation), unsure(loopStart));
6144
6145 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6146
6147 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
6148 LValue fixedArrayIndex = m_out.phi(pointerType(), fixedIndexStart);
6149
6150 LValue item = m_out.load64(m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, fixedArrayIndex));
6151 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
6152
6153 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
6154 LValue nextFixedArrayIndex = m_out.add(fixedArrayIndex, m_out.constIntPtr(1));
6155 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
6156
6157 m_out.addIncomingToPhi(fixedArrayIndex, m_out.anchor(nextFixedArrayIndex));
6158 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
6159
6160 m_out.branch(
6161 m_out.below(nextFixedArrayIndex, fixedArraySize),
6162 unsure(loopStart), unsure(continuation));
6163
6164 m_out.appendTo(continuation, lastNext);
6165 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
6166 }
6167 } else {
6168 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6169 LValue item = lowJSValue(use);
6170 m_out.store64(item, m_out.baseIndex(heap, storage, index));
6171 index = m_out.add(index, m_out.constIntPtr(1));
6172 }
6173 }
6174
6175 mutatorFence();
6176 setJSValue(result);
6177 return;
6178 }
6179
6180 ASSERT(m_node->numChildren());
6181 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
6182 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
6183 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
6184 BitVector* bitVector = m_node->bitVector();
6185 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6186 Edge use = m_graph.m_varArgChildren[m_node->firstChild() + i];
6187 LValue value;
6188 if (bitVector->get(i))
6189 value = lowCell(use);
6190 else
6191 value = lowJSValue(use);
6192 m_out.store64(value, m_out.absolute(&buffer[i]));
6193 }
6194
6195 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6196 LValue result = vmCall(Int64, m_out.operation(operationNewArrayWithSpreadSlow), m_callFrame, m_out.constIntPtr(buffer), m_out.constInt32(m_node->numChildren()));
6197 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6198
6199 setJSValue(result);
6200 }
6201
6202 void compileCreateThis()
6203 {
6204 LValue callee = lowCell(m_node->child1());
6205
6206 LBasicBlock isFunctionBlock = m_out.newBlock();
6207 LBasicBlock hasRareData = m_out.newBlock();
6208 LBasicBlock slowPath = m_out.newBlock();
6209 LBasicBlock continuation = m_out.newBlock();
6210
6211 m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowPath));
6212
6213 LBasicBlock lastNext = m_out.appendTo(isFunctionBlock, hasRareData);
6214 LValue rareData = m_out.loadPtr(callee, m_heaps.JSFunction_rareData);
6215 m_out.branch(m_out.isZero64(rareData), rarely(slowPath), usually(hasRareData));
6216
6217 m_out.appendTo(hasRareData, slowPath);
6218 LValue allocator = m_out.loadPtr(rareData, m_heaps.FunctionRareData_allocator);
6219 LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_structure);
6220 LValue butterfly = m_out.constIntPtr(0);
6221 ValueFromBlock fastResult = m_out.anchor(allocateObject(allocator, structure, butterfly, slowPath));
6222 m_out.jump(continuation);
6223
6224 m_out.appendTo(slowPath, continuation);
6225 ValueFromBlock slowResult = m_out.anchor(vmCall(
6226 Int64, m_out.operation(operationCreateThis), m_callFrame, callee, m_out.constInt32(m_node->inlineCapacity())));
6227 m_out.jump(continuation);
6228
6229 m_out.appendTo(continuation, lastNext);
6230 LValue result = m_out.phi(Int64, fastResult, slowResult);
6231
6232 mutatorFence();
6233 setJSValue(result);
6234 }
6235
6236 void compileSpread()
6237 {
6238 if (m_node->child1()->op() == PhantomNewArrayBuffer) {
6239 LBasicBlock slowAllocation = m_out.newBlock();
6240 LBasicBlock continuation = m_out.newBlock();
6241
6242 auto* immutableButterfly = m_node->child1()->castOperand<JSImmutableButterfly*>();
6243
6244 LValue fastFixedArrayValue = allocateVariableSizedCell<JSFixedArray>(
6245 m_out.constIntPtr(JSFixedArray::allocationSize(immutableButterfly->length()).unsafeGet()),
6246 m_graph.m_vm.fixedArrayStructure.get(), slowAllocation);
6247 m_out.store32(m_out.constInt32(immutableButterfly->length()), fastFixedArrayValue, m_heaps.JSFixedArray_size);
6248 ValueFromBlock fastFixedArray = m_out.anchor(fastFixedArrayValue);
6249 m_out.jump(continuation);
6250
6251 LBasicBlock lastNext = m_out.appendTo(slowAllocation, continuation);
6252 ValueFromBlock slowFixedArray = m_out.anchor(vmCall(pointerType(), m_out.operation(operationCreateFixedArray), m_callFrame, m_out.constInt32(immutableButterfly->length())));
6253 m_out.jump(continuation);
6254
6255 m_out.appendTo(continuation, lastNext);
6256 LValue fixedArray = m_out.phi(pointerType(), fastFixedArray, slowFixedArray);
6257 for (unsigned i = 0; i < immutableButterfly->length(); i++) {
6258 // Because forwarded values are drained as JSValue, we should not generate value
6259 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
6260 int64_t value = JSValue::encode(immutableButterfly->get(i));
6261 m_out.store64(
6262 m_out.constInt64(value),
6263 m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, m_out.constIntPtr(i), jsNumber(i)));
6264 }
6265 mutatorFence();
6266 setJSValue(fixedArray);
6267 return;
6268 }
6269
6270 if (m_node->child1()->op() == PhantomCreateRest) {
6271 // This IR is rare to generate since it requires escaping the Spread
6272 // but not the CreateRest. In bytecode, we have only few operations that
6273 // accept Spread's result as input. This usually leads to the Spread node not
6274 // escaping. However, this can happen if for example we generate a PutStack on
6275 // the Spread but nothing escapes the CreateRest.
6276 LBasicBlock loopHeader = m_out.newBlock();
6277 LBasicBlock loopBody = m_out.newBlock();
6278 LBasicBlock slowAllocation = m_out.newBlock();
6279 LBasicBlock continuation = m_out.newBlock();
6280 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopHeader);
6281
6282 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
6283 unsigned numberOfArgumentsToSkip = m_node->child1()->numberOfArgumentsToSkip();
6284 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
6285 LValue length = getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
6286 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
6287 LValue size = m_out.add(
6288 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
6289 m_out.constIntPtr(JSFixedArray::offsetOfData()));
6290
6291 LValue fastArrayValue = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowAllocation);
6292 m_out.store32(length, fastArrayValue, m_heaps.JSFixedArray_size);
6293 ValueFromBlock fastArray = m_out.anchor(fastArrayValue);
6294 m_out.jump(loopHeader);
6295
6296 m_out.appendTo(slowAllocation, loopHeader);
6297 ValueFromBlock slowArray = m_out.anchor(vmCall(pointerType(), m_out.operation(operationCreateFixedArray), m_callFrame, length));
6298 m_out.jump(loopHeader);
6299
6300 m_out.appendTo(loopHeader, loopBody);
6301 LValue fixedArray = m_out.phi(pointerType(), fastArray, slowArray);
6302 ValueFromBlock startIndex = m_out.anchor(m_out.constIntPtr(0));
6303 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopBody));
6304
6305 m_out.appendTo(loopBody, continuation);
6306 LValue index = m_out.phi(pointerType(), startIndex);
6307 LValue value = m_out.load64(
6308 m_out.baseIndex(m_heaps.variables, sourceStart, index));
6309 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, index));
6310 LValue nextIndex = m_out.add(m_out.constIntPtr(1), index);
6311 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6312 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)), unsure(loopBody), unsure(continuation));
6313
6314 m_out.appendTo(continuation, lastNext);
6315 mutatorFence();
6316 setJSValue(fixedArray);
6317 return;
6318 }
6319
6320 LValue argument = lowCell(m_node->child1());
6321
6322 LValue result;
6323
6324 if (m_node->child1().useKind() == ArrayUse)
6325 speculateArray(m_node->child1());
6326
6327 if (m_graph.canDoFastSpread(m_node, m_state.forNode(m_node->child1()))) {
6328 LBasicBlock preLoop = m_out.newBlock();
6329 LBasicBlock loopSelection = m_out.newBlock();
6330 LBasicBlock contiguousLoopStart = m_out.newBlock();
6331 LBasicBlock doubleLoopStart = m_out.newBlock();
6332 LBasicBlock slowPath = m_out.newBlock();
6333 LBasicBlock continuation = m_out.newBlock();
6334
6335 LValue indexingShape = m_out.load8ZeroExt32(argument, m_heaps.JSCell_indexingTypeAndMisc);
6336 indexingShape = m_out.bitAnd(indexingShape, m_out.constInt32(IndexingShapeMask));
6337 LValue isOKIndexingType = m_out.belowOrEqual(
6338 m_out.sub(indexingShape, m_out.constInt32(Int32Shape)),
6339 m_out.constInt32(ContiguousShape - Int32Shape));
6340
6341 m_out.branch(isOKIndexingType, unsure(preLoop), unsure(slowPath));
6342 LBasicBlock lastNext = m_out.appendTo(preLoop, loopSelection);
6343
6344 LValue butterfly = m_out.loadPtr(argument, m_heaps.JSObject_butterfly);
6345 LValue length = m_out.load32NonNegative(butterfly, m_heaps.Butterfly_publicLength);
6346 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
6347 LValue size = m_out.add(
6348 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
6349 m_out.constIntPtr(JSFixedArray::offsetOfData()));
6350
6351 LValue fastAllocation = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowPath);
6352 ValueFromBlock fastResult = m_out.anchor(fastAllocation);
6353 m_out.store32(length, fastAllocation, m_heaps.JSFixedArray_size);
6354
6355 ValueFromBlock startIndexForContiguous = m_out.anchor(m_out.constIntPtr(0));
6356 ValueFromBlock startIndexForDouble = m_out.anchor(m_out.constIntPtr(0));
6357
6358 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopSelection));
6359
6360 m_out.appendTo(loopSelection, contiguousLoopStart);
6361 m_out.branch(m_out.equal(indexingShape, m_out.constInt32(DoubleShape)),
6362 unsure(doubleLoopStart), unsure(contiguousLoopStart));
6363
6364 {
6365 m_out.appendTo(contiguousLoopStart, doubleLoopStart);
6366 LValue index = m_out.phi(pointerType(), startIndexForContiguous);
6367
6368 TypedPointer loadSite = m_out.baseIndex(m_heaps.root, butterfly, index, ScaleEight); // We read TOP here since we can be reading either int32 or contiguous properties.
6369 LValue value = m_out.load64(loadSite);
6370 value = m_out.select(m_out.isZero64(value), m_out.constInt64(JSValue::encode(jsUndefined())), value);
6371 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
6372
6373 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
6374 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6375
6376 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
6377 unsure(contiguousLoopStart), unsure(continuation));
6378 }
6379
6380 {
6381 m_out.appendTo(doubleLoopStart, slowPath);
6382 LValue index = m_out.phi(pointerType(), startIndexForDouble);
6383
6384 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, butterfly, index));
6385 LValue isNaN = m_out.doubleNotEqualOrUnordered(value, value);
6386 LValue holeResult = m_out.constInt64(JSValue::encode(jsUndefined()));
6387 LValue normalResult = boxDouble(value);
6388 value = m_out.select(isNaN, holeResult, normalResult);
6389 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
6390
6391 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
6392 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6393
6394 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
6395 unsure(doubleLoopStart), unsure(continuation));
6396 }
6397
6398 m_out.appendTo(slowPath, continuation);
6399 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationSpreadFastArray), m_callFrame, argument));
6400 m_out.jump(continuation);
6401
6402 m_out.appendTo(continuation, lastNext);
6403 result = m_out.phi(pointerType(), fastResult, slowResult);
6404 mutatorFence();
6405 } else
6406 result = vmCall(pointerType(), m_out.operation(operationSpreadGeneric), m_callFrame, argument);
6407
6408 setJSValue(result);
6409 }
6410
6411 void compileNewArrayBuffer()
6412 {
6413 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6414 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
6415 m_node->indexingMode()));
6416 auto* immutableButterfly = m_node->castOperand<JSImmutableButterfly*>();
6417
6418 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingMode())) {
6419 LBasicBlock slowPath = m_out.newBlock();
6420 LBasicBlock continuation = m_out.newBlock();
6421
6422 LValue fastArray = allocateObject<JSArray>(structure, m_out.constIntPtr(immutableButterfly->toButterfly()), slowPath);
6423 ValueFromBlock fastResult = m_out.anchor(fastArray);
6424 m_out.jump(continuation);
6425
6426 m_out.appendTo(slowPath, continuation);
6427 LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(structure), m_out.weakPointer(m_node->cellOperand()));
6428 ValueFromBlock slowResult = m_out.anchor(slowArray);
6429 m_out.jump(continuation);
6430
6431 m_out.appendTo(continuation);
6432
6433 mutatorFence();
6434 setJSValue(m_out.phi(pointerType(), slowResult, fastResult));
6435 return;
6436 }
6437
6438 setJSValue(vmCall(
6439 Int64, m_out.operation(operationNewArrayBuffer), m_callFrame,
6440 weakStructure(structure), m_out.weakPointer(m_node->cellOperand())));
6441 }
6442
6443 void compileNewArrayWithSize()
6444 {
6445 LValue publicLength = lowInt32(m_node->child1());
6446
6447 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6448 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
6449 m_node->indexingType()));
6450
6451 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
6452 IndexingType indexingType = m_node->indexingType();
6453 setJSValue(
6454 allocateJSArray(
6455 publicLength, publicLength, weakPointer(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType)), m_out.constInt32(indexingType)).array);
6456 mutatorFence();
6457 return;
6458 }
6459
6460 LValue structureValue = m_out.select(
6461 m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
6462 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))),
6463 weakStructure(structure));
6464 setJSValue(vmCall(Int64, m_out.operation(operationNewArrayWithSize), m_callFrame, structureValue, publicLength, m_out.intPtrZero));
6465 }
6466
6467 void compileNewTypedArray()
6468 {
6469 TypedArrayType typedArrayType = m_node->typedArrayType();
6470 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6471
6472 switch (m_node->child1().useKind()) {
6473 case Int32Use: {
6474 RegisteredStructure structure = m_graph.registerStructure(globalObject->typedArrayStructureConcurrently(typedArrayType));
6475
6476 LValue size = lowInt32(m_node->child1());
6477
6478 LBasicBlock smallEnoughCase = m_out.newBlock();
6479 LBasicBlock slowCase = m_out.newBlock();
6480 LBasicBlock continuation = m_out.newBlock();
6481
6482 ValueFromBlock noStorage = m_out.anchor(m_out.intPtrZero);
6483
6484 m_out.branch(
6485 m_out.above(size, m_out.constInt32(JSArrayBufferView::fastSizeLimit)),
6486 rarely(slowCase), usually(smallEnoughCase));
6487
6488 LBasicBlock lastNext = m_out.appendTo(smallEnoughCase, slowCase);
6489
6490 LValue byteSize =
6491 m_out.shl(m_out.zeroExtPtr(size), m_out.constInt32(logElementSize(typedArrayType)));
6492 if (elementSize(typedArrayType) < 8) {
6493 byteSize = m_out.bitAnd(
6494 m_out.add(byteSize, m_out.constIntPtr(7)),
6495 m_out.constIntPtr(~static_cast<intptr_t>(7)));
6496 }
6497
6498 LValue allocator = allocatorForSize(vm().primitiveGigacageAuxiliarySpace, byteSize, slowCase);
6499 LValue storage = allocateHeapCell(allocator, slowCase);
6500
6501 splatWords(
6502 storage,
6503 m_out.int32Zero,
6504 m_out.castToInt32(m_out.lShr(byteSize, m_out.constIntPtr(3))),
6505 m_out.int64Zero,
6506 m_heaps.typedArrayProperties);
6507
6508#if CPU(ARM64E)
6509 {
6510 LValue sizePtr = m_out.zeroExtPtr(size);
6511 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
6512 authenticate->appendSomeRegister(storage);
6513 authenticate->append(sizePtr, B3::ValueRep(B3::ValueRep::SomeLateRegister));
6514 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
6515 jit.move(params[1].gpr(), params[0].gpr());
6516 jit.tagArrayPtr(params[2].gpr(), params[0].gpr());
6517 });
6518 storage = authenticate;
6519 }
6520#endif
6521
6522 ValueFromBlock haveStorage = m_out.anchor(storage);
6523
6524 LValue fastResultValue =
6525 allocateObject<JSArrayBufferView>(structure, m_out.intPtrZero, slowCase);
6526
6527 m_out.storePtr(storage, fastResultValue, m_heaps.JSArrayBufferView_vector);
6528 m_out.store32(size, fastResultValue, m_heaps.JSArrayBufferView_length);
6529 m_out.store32(m_out.constInt32(FastTypedArray), fastResultValue, m_heaps.JSArrayBufferView_mode);
6530
6531 mutatorFence();
6532 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
6533 m_out.jump(continuation);
6534
6535 m_out.appendTo(slowCase, continuation);
6536 LValue storageValue = m_out.phi(pointerType(), noStorage, haveStorage);
6537
6538 VM& vm = this->vm();
6539 LValue slowResultValue = lazySlowPath(
6540 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6541 return createLazyCallGenerator(vm,
6542 operationNewTypedArrayWithSizeForType(typedArrayType), locations[0].directGPR(),
6543 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
6544 locations[2].directGPR());
6545 },
6546 size, storageValue);
6547 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
6548 m_out.jump(continuation);
6549
6550 m_out.appendTo(continuation, lastNext);
6551 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
6552 return;
6553 }
6554
6555 case UntypedUse: {
6556 LValue argument = lowJSValue(m_node->child1());
6557
6558 LValue result = vmCall(
6559 pointerType(), m_out.operation(operationNewTypedArrayWithOneArgumentForType(typedArrayType)),
6560 m_callFrame, weakPointer(globalObject->typedArrayStructureConcurrently(typedArrayType)), argument);
6561
6562 setJSValue(result);
6563 return;
6564 }
6565
6566 default:
6567 DFG_CRASH(m_graph, m_node, "Bad use kind");
6568 return;
6569 }
6570 }
6571
6572 void compileAllocatePropertyStorage()
6573 {
6574 LValue object = lowCell(m_node->child1());
6575 setStorage(allocatePropertyStorage(object, m_node->transition()->previous.get()));
6576 }
6577
6578 void compileReallocatePropertyStorage()
6579 {
6580 Transition* transition = m_node->transition();
6581 LValue object = lowCell(m_node->child1());
6582 LValue oldStorage = lowStorage(m_node->child2());
6583
6584 setStorage(
6585 reallocatePropertyStorage(
6586 object, oldStorage, transition->previous.get(), transition->next.get()));
6587 }
6588
6589 void compileNukeStructureAndSetButterfly()
6590 {
6591 nukeStructureAndSetButterfly(lowStorage(m_node->child2()), lowCell(m_node->child1()));
6592 }
6593
6594 void compileToNumber()
6595 {
6596 LValue value = lowJSValue(m_node->child1());
6597
6598 if (!(abstractValue(m_node->child1()).m_type & SpecBytecodeNumber))
6599 setJSValue(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
6600 else {
6601 LBasicBlock notNumber = m_out.newBlock();
6602 LBasicBlock continuation = m_out.newBlock();
6603
6604 ValueFromBlock fastResult = m_out.anchor(value);
6605 m_out.branch(isNumber(value, provenType(m_node->child1())), unsure(continuation), unsure(notNumber));
6606
6607 // notNumber case.
6608 LBasicBlock lastNext = m_out.appendTo(notNumber, continuation);
6609 // We have several attempts to remove ToNumber. But ToNumber still exists.
6610 // It means that converting non-numbers to numbers by this ToNumber is not rare.
6611 // Instead of the lazy slow path generator, we call the operation here.
6612 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
6613 m_out.jump(continuation);
6614
6615 // continuation case.
6616 m_out.appendTo(continuation, lastNext);
6617 setJSValue(m_out.phi(Int64, fastResult, slowResult));
6618 }
6619 }
6620
6621 void compileToStringOrCallStringConstructorOrStringValueOf()
6622 {
6623 ASSERT(m_node->op() != StringValueOf || m_node->child1().useKind() == UntypedUse);
6624 switch (m_node->child1().useKind()) {
6625 case StringObjectUse: {
6626 LValue cell = lowCell(m_node->child1());
6627 speculateStringObjectForCell(m_node->child1(), cell);
6628 setJSValue(m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
6629 return;
6630 }
6631
6632 case StringOrStringObjectUse: {
6633 LValue cell = lowCell(m_node->child1());
6634 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
6635
6636 LBasicBlock notString = m_out.newBlock();
6637 LBasicBlock continuation = m_out.newBlock();
6638
6639 ValueFromBlock simpleResult = m_out.anchor(cell);
6640 m_out.branch(
6641 m_out.equal(type, m_out.constInt32(StringType)),
6642 unsure(continuation), unsure(notString));
6643
6644 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
6645 speculate(
6646 BadType, jsValueValue(cell), m_node->child1().node(),
6647 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
6648 ValueFromBlock unboxedResult = m_out.anchor(
6649 m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
6650 m_out.jump(continuation);
6651
6652 m_out.appendTo(continuation, lastNext);
6653 setJSValue(m_out.phi(Int64, simpleResult, unboxedResult));
6654
6655 m_interpreter.filter(m_node->child1(), SpecString | SpecStringObject);
6656 return;
6657 }
6658
6659 case CellUse:
6660 case NotCellUse:
6661 case UntypedUse: {
6662 LValue value;
6663 if (m_node->child1().useKind() == CellUse)
6664 value = lowCell(m_node->child1());
6665 else if (m_node->child1().useKind() == NotCellUse)
6666 value = lowNotCell(m_node->child1());
6667 else
6668 value = lowJSValue(m_node->child1());
6669
6670 LBasicBlock isCell = m_out.newBlock();
6671 LBasicBlock notString = m_out.newBlock();
6672 LBasicBlock continuation = m_out.newBlock();
6673
6674 LValue isCellPredicate;
6675 if (m_node->child1().useKind() == CellUse)
6676 isCellPredicate = m_out.booleanTrue;
6677 else if (m_node->child1().useKind() == NotCellUse)
6678 isCellPredicate = m_out.booleanFalse;
6679 else
6680 isCellPredicate = this->isCell(value, provenType(m_node->child1()));
6681 m_out.branch(isCellPredicate, unsure(isCell), unsure(notString));
6682
6683 LBasicBlock lastNext = m_out.appendTo(isCell, notString);
6684 ValueFromBlock simpleResult = m_out.anchor(value);
6685 LValue isStringPredicate;
6686 if (m_node->child1()->prediction() & SpecString) {
6687 isStringPredicate = isString(value, provenType(m_node->child1()));
6688 } else
6689 isStringPredicate = m_out.booleanFalse;
6690 m_out.branch(isStringPredicate, unsure(continuation), unsure(notString));
6691
6692 m_out.appendTo(notString, continuation);
6693 LValue operation;
6694 if (m_node->child1().useKind() == CellUse) {
6695 ASSERT(m_node->op() != StringValueOf);
6696 operation = m_out.operation(m_node->op() == ToString ? operationToStringOnCell : operationCallStringConstructorOnCell);
6697 } else {
6698 operation = m_out.operation(m_node->op() == ToString
6699 ? operationToString : m_node->op() == StringValueOf
6700 ? operationStringValueOf : operationCallStringConstructor);
6701 }
6702 ValueFromBlock convertedResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, value));
6703 m_out.jump(continuation);
6704
6705 m_out.appendTo(continuation, lastNext);
6706 setJSValue(m_out.phi(Int64, simpleResult, convertedResult));
6707 return;
6708 }
6709
6710 case Int32Use:
6711 setJSValue(vmCall(Int64, m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(10)));
6712 return;
6713
6714 case Int52RepUse:
6715 setJSValue(vmCall(Int64, m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(10)));
6716 return;
6717
6718 case DoubleRepUse:
6719 setJSValue(vmCall(Int64, m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(10)));
6720 return;
6721
6722 default:
6723 DFG_CRASH(m_graph, m_node, "Bad use kind");
6724 break;
6725 }
6726 }
6727
6728 void compileToPrimitive()
6729 {
6730 LValue value = lowJSValue(m_node->child1());
6731
6732 LBasicBlock isCellCase = m_out.newBlock();
6733 LBasicBlock isObjectCase = m_out.newBlock();
6734 LBasicBlock continuation = m_out.newBlock();
6735
6736 Vector<ValueFromBlock, 3> results;
6737
6738 results.append(m_out.anchor(value));
6739 m_out.branch(
6740 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
6741
6742 LBasicBlock lastNext = m_out.appendTo(isCellCase, isObjectCase);
6743 results.append(m_out.anchor(value));
6744 m_out.branch(
6745 isObject(value, provenType(m_node->child1())),
6746 unsure(isObjectCase), unsure(continuation));
6747
6748 m_out.appendTo(isObjectCase, continuation);
6749 results.append(m_out.anchor(vmCall(
6750 Int64, m_out.operation(operationToPrimitive), m_callFrame, value)));
6751 m_out.jump(continuation);
6752
6753 m_out.appendTo(continuation, lastNext);
6754 setJSValue(m_out.phi(Int64, results));
6755 }
6756
6757 void compileMakeRope()
6758 {
6759 struct FlagsAndLength {
6760 LValue flags;
6761 LValue length;
6762 };
6763
6764 Edge edges[3] = {
6765 m_node->child1(),
6766 m_node->child2(),
6767 m_node->child3(),
6768 };
6769 LValue kids[3];
6770 unsigned numKids;
6771 kids[0] = lowCell(edges[0]);
6772 kids[1] = lowCell(edges[1]);
6773 if (edges[2]) {
6774 kids[2] = lowCell(edges[2]);
6775 numKids = 3;
6776 } else {
6777 kids[2] = 0;
6778 numKids = 2;
6779 }
6780
6781 LBasicBlock emptyCase = m_out.newBlock();
6782 LBasicBlock slowPath = m_out.newBlock();
6783 LBasicBlock continuation = m_out.newBlock();
6784
6785 Allocator allocator = allocatorForNonVirtualConcurrently<JSRopeString>(vm(), sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
6786
6787 LValue result = allocateCell(
6788 m_out.constIntPtr(allocator.localAllocator()), vm().stringStructure.get(), slowPath);
6789
6790 // This puts nullptr for the first fiber. It makes visitChildren safe even if this JSRopeString is discarded due to the speculation failure in the following path.
6791 m_out.storePtr(m_out.constIntPtr(JSString::isRopeInPointer), result, m_heaps.JSRopeString_fiber0);
6792
6793 auto getFlagsAndLength = [&] (Edge& edge, LValue child) {
6794 if (JSString* string = edge->dynamicCastConstant<JSString*>(vm())) {
6795 return FlagsAndLength {
6796 m_out.constInt32(string->is8Bit() ? StringImpl::flagIs8Bit() : 0),
6797 m_out.constInt32(string->length())
6798 };
6799 }
6800
6801 LBasicBlock continuation = m_out.newBlock();
6802 LBasicBlock ropeCase = m_out.newBlock();
6803 LBasicBlock notRopeCase = m_out.newBlock();
6804
6805 m_out.branch(isRopeString(child, edge), unsure(ropeCase), unsure(notRopeCase));
6806
6807 LBasicBlock lastNext = m_out.appendTo(ropeCase, notRopeCase);
6808 ValueFromBlock flagsForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_flags));
6809 ValueFromBlock lengthForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_length));
6810 m_out.jump(continuation);
6811
6812 m_out.appendTo(notRopeCase, continuation);
6813 LValue stringImpl = m_out.loadPtr(child, m_heaps.JSString_value);
6814 ValueFromBlock flagsForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_hashAndFlags));
6815 ValueFromBlock lengthForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length));
6816 m_out.jump(continuation);
6817
6818 m_out.appendTo(continuation, lastNext);
6819 return FlagsAndLength {
6820 m_out.phi(Int32, flagsForRope, flagsForNonRope),
6821 m_out.phi(Int32, lengthForRope, lengthForNonRope)
6822 };
6823 };
6824
6825 FlagsAndLength flagsAndLength = getFlagsAndLength(edges[0], kids[0]);
6826 for (unsigned i = 1; i < numKids; ++i) {
6827 auto mergeFlagsAndLength = [&] (Edge& edge, LValue child, FlagsAndLength previousFlagsAndLength) {
6828 FlagsAndLength flagsAndLength = getFlagsAndLength(edge, child);
6829 LValue flags = m_out.bitAnd(previousFlagsAndLength.flags, flagsAndLength.flags);
6830 CheckValue* lengthCheck = m_out.speculateAdd(previousFlagsAndLength.length, flagsAndLength.length);
6831 blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin);
6832 return FlagsAndLength {
6833 flags,
6834 lengthCheck
6835 };
6836 };
6837 flagsAndLength = mergeFlagsAndLength(edges[i], kids[i], flagsAndLength);
6838 }
6839
6840 m_out.storePtr(
6841 m_out.bitOr(
6842 m_out.bitOr(kids[0], m_out.constIntPtr(JSString::isRopeInPointer)),
6843 m_out.bitAnd(m_out.constIntPtr(JSRopeString::is8BitInPointer), m_out.zeroExtPtr(flagsAndLength.flags))),
6844 result, m_heaps.JSRopeString_fiber0);
6845 m_out.storePtr(
6846 m_out.bitOr(m_out.zeroExtPtr(flagsAndLength.length), m_out.shl(kids[1], m_out.constInt32(32))),
6847 result, m_heaps.JSRopeString_fiber1);
6848 if (numKids == 2)
6849 m_out.storePtr(m_out.lShr(kids[1], m_out.constInt32(32)), result, m_heaps.JSRopeString_fiber2);
6850 else
6851 m_out.storePtr(m_out.bitOr(m_out.lShr(kids[1], m_out.constInt32(32)), m_out.shl(kids[2], m_out.constInt32(16))), result, m_heaps.JSRopeString_fiber2);
6852
6853 mutatorFence();
6854 ValueFromBlock fastResult = m_out.anchor(result);
6855 m_out.branch(m_out.isZero32(flagsAndLength.length), rarely(emptyCase), usually(continuation));
6856
6857 LBasicBlock lastNext = m_out.appendTo(emptyCase, slowPath);
6858 ValueFromBlock emptyResult = m_out.anchor(weakPointer(jsEmptyString(&m_graph.m_vm)));
6859 m_out.jump(continuation);
6860
6861 m_out.appendTo(slowPath, continuation);
6862 LValue slowResultValue;
6863 VM& vm = this->vm();
6864 switch (numKids) {
6865 case 2:
6866 slowResultValue = lazySlowPath(
6867 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6868 return createLazyCallGenerator(vm,
6869 operationMakeRope2, locations[0].directGPR(), locations[1].directGPR(),
6870 locations[2].directGPR());
6871 }, kids[0], kids[1]);
6872 break;
6873 case 3:
6874 slowResultValue = lazySlowPath(
6875 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6876 return createLazyCallGenerator(vm,
6877 operationMakeRope3, locations[0].directGPR(), locations[1].directGPR(),
6878 locations[2].directGPR(), locations[3].directGPR());
6879 }, kids[0], kids[1], kids[2]);
6880 break;
6881 default:
6882 DFG_CRASH(m_graph, m_node, "Bad number of children");
6883 break;
6884 }
6885 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
6886 m_out.jump(continuation);
6887
6888 m_out.appendTo(continuation, lastNext);
6889 setJSValue(m_out.phi(Int64, fastResult, emptyResult, slowResult));
6890 }
6891
6892 void compileStringCharAt()
6893 {
6894 LValue base = lowString(m_graph.child(m_node, 0));
6895 LValue index = lowInt32(m_graph.child(m_node, 1));
6896 LValue storage = lowStorage(m_graph.child(m_node, 2));
6897
6898 LBasicBlock fastPath = m_out.newBlock();
6899 LBasicBlock slowPath = m_out.newBlock();
6900 LBasicBlock continuation = m_out.newBlock();
6901
6902 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
6903 m_out.branch(
6904 m_out.aboveOrEqual(
6905 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)),
6906 rarely(slowPath), usually(fastPath));
6907
6908 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
6909
6910 LBasicBlock is8Bit = m_out.newBlock();
6911 LBasicBlock is16Bit = m_out.newBlock();
6912 LBasicBlock bitsContinuation = m_out.newBlock();
6913 LBasicBlock bigCharacter = m_out.newBlock();
6914
6915 m_out.branch(
6916 m_out.testIsZero32(
6917 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
6918 m_out.constInt32(StringImpl::flagIs8Bit())),
6919 unsure(is16Bit), unsure(is8Bit));
6920
6921 m_out.appendTo(is8Bit, is16Bit);
6922
6923 // FIXME: Need to cage strings!
6924 // https://bugs.webkit.org/show_bug.cgi?id=174924
6925 ValueFromBlock char8Bit = m_out.anchor(
6926 m_out.load8ZeroExt32(m_out.baseIndex(
6927 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
6928 provenValue(m_graph.child(m_node, 1)))));
6929 m_out.jump(bitsContinuation);
6930
6931 m_out.appendTo(is16Bit, bigCharacter);
6932
6933 LValue char16BitValue = m_out.load16ZeroExt32(
6934 m_out.baseIndex(
6935 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
6936 provenValue(m_graph.child(m_node, 1))));
6937 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
6938 m_out.branch(
6939 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
6940 rarely(bigCharacter), usually(bitsContinuation));
6941
6942 m_out.appendTo(bigCharacter, bitsContinuation);
6943
6944 Vector<ValueFromBlock, 4> results;
6945 results.append(m_out.anchor(vmCall(
6946 Int64, m_out.operation(operationSingleCharacterString),
6947 m_callFrame, char16BitValue)));
6948 m_out.jump(continuation);
6949
6950 m_out.appendTo(bitsContinuation, slowPath);
6951
6952 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
6953
6954 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
6955
6956 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
6957 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
6958 m_out.jump(continuation);
6959
6960 m_out.appendTo(slowPath, continuation);
6961
6962 if (m_node->arrayMode().isInBounds()) {
6963 speculate(OutOfBounds, noValue(), 0, m_out.booleanTrue);
6964 results.append(m_out.anchor(m_out.intPtrZero));
6965 } else {
6966 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6967
6968 bool prototypeChainIsSane = false;
6969 if (globalObject->stringPrototypeChainIsSane()) {
6970 // FIXME: This could be captured using a Speculation mode that means
6971 // "out-of-bounds loads return a trivial value", something like
6972 // SaneChainOutOfBounds.
6973 // https://bugs.webkit.org/show_bug.cgi?id=144668
6974
6975 m_graph.registerAndWatchStructureTransition(globalObject->stringPrototype()->structure(vm()));
6976 m_graph.registerAndWatchStructureTransition(globalObject->objectPrototype()->structure(vm()));
6977
6978 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
6979 }
6980 if (prototypeChainIsSane) {
6981 LBasicBlock negativeIndex = m_out.newBlock();
6982
6983 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
6984 m_out.branch(
6985 m_out.lessThan(index, m_out.int32Zero),
6986 rarely(negativeIndex), usually(continuation));
6987
6988 m_out.appendTo(negativeIndex, continuation);
6989 }
6990
6991 results.append(m_out.anchor(vmCall(
6992 Int64, m_out.operation(operationGetByValStringInt), m_callFrame, base, index)));
6993 }
6994
6995 m_out.jump(continuation);
6996
6997 m_out.appendTo(continuation, lastNext);
6998 setJSValue(m_out.phi(Int64, results));
6999 }
7000
7001 void compileStringCharCodeAt()
7002 {
7003 LBasicBlock is8Bit = m_out.newBlock();
7004 LBasicBlock is16Bit = m_out.newBlock();
7005 LBasicBlock continuation = m_out.newBlock();
7006
7007 LValue base = lowString(m_node->child1());
7008 LValue index = lowInt32(m_node->child2());
7009 LValue storage = lowStorage(m_node->child3());
7010
7011 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
7012
7013 speculate(
7014 Uncountable, noValue(), 0,
7015 m_out.aboveOrEqual(
7016 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)));
7017
7018 m_out.branch(
7019 m_out.testIsZero32(
7020 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
7021 m_out.constInt32(StringImpl::flagIs8Bit())),
7022 unsure(is16Bit), unsure(is8Bit));
7023
7024 LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
7025
7026 // FIXME: need to cage strings!
7027 // https://bugs.webkit.org/show_bug.cgi?id=174924
7028 ValueFromBlock char8Bit = m_out.anchor(
7029 m_out.load8ZeroExt32(m_out.baseIndex(
7030 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
7031 provenValue(m_node->child2()))));
7032 m_out.jump(continuation);
7033
7034 m_out.appendTo(is16Bit, continuation);
7035
7036 ValueFromBlock char16Bit = m_out.anchor(
7037 m_out.load16ZeroExt32(m_out.baseIndex(
7038 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
7039 provenValue(m_node->child2()))));
7040 m_out.jump(continuation);
7041
7042 m_out.appendTo(continuation, lastNext);
7043
7044 setInt32(m_out.phi(Int32, char8Bit, char16Bit));
7045 }
7046
7047 void compileStringFromCharCode()
7048 {
7049 Edge childEdge = m_node->child1();
7050
7051 if (childEdge.useKind() == UntypedUse) {
7052 LValue result = vmCall(
7053 Int64, m_out.operation(operationStringFromCharCodeUntyped), m_callFrame,
7054 lowJSValue(childEdge));
7055 setJSValue(result);
7056 return;
7057 }
7058
7059 DFG_ASSERT(m_graph, m_node, childEdge.useKind() == Int32Use, childEdge.useKind());
7060
7061 LValue value = lowInt32(childEdge);
7062
7063 LBasicBlock smallIntCase = m_out.newBlock();
7064 LBasicBlock slowCase = m_out.newBlock();
7065 LBasicBlock continuation = m_out.newBlock();
7066
7067 m_out.branch(
7068 m_out.above(value, m_out.constInt32(maxSingleCharacterString)),
7069 rarely(slowCase), usually(smallIntCase));
7070
7071 LBasicBlock lastNext = m_out.appendTo(smallIntCase, slowCase);
7072
7073 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
7074 LValue fastResultValue = m_out.loadPtr(
7075 m_out.baseIndex(m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(value)));
7076 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
7077 m_out.jump(continuation);
7078
7079 m_out.appendTo(slowCase, continuation);
7080
7081 LValue slowResultValue = vmCall(
7082 pointerType(), m_out.operation(operationStringFromCharCode), m_callFrame, value);
7083 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
7084 m_out.jump(continuation);
7085
7086 m_out.appendTo(continuation, lastNext);
7087
7088 setJSValue(m_out.phi(Int64, fastResult, slowResult));
7089 }
7090
7091 void compileGetByOffset()
7092 {
7093 StorageAccessData& data = m_node->storageAccessData();
7094
7095 setJSValue(loadProperty(
7096 lowStorage(m_node->child1()), data.identifierNumber, data.offset));
7097 }
7098
7099 void compileGetGetter()
7100 {
7101 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_getter));
7102 }
7103
7104 void compileGetSetter()
7105 {
7106 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_setter));
7107 }
7108
7109 void compileMultiGetByOffset()
7110 {
7111 LValue base = lowCell(m_node->child1());
7112
7113 MultiGetByOffsetData& data = m_node->multiGetByOffsetData();
7114
7115 Vector<LBasicBlock, 2> blocks(data.cases.size());
7116 for (unsigned i = data.cases.size(); i--;)
7117 blocks[i] = m_out.newBlock();
7118 LBasicBlock exit = m_out.newBlock();
7119 LBasicBlock continuation = m_out.newBlock();
7120
7121 Vector<SwitchCase, 2> cases;
7122 RegisteredStructureSet baseSet;
7123 for (unsigned i = data.cases.size(); i--;) {
7124 MultiGetByOffsetCase getCase = data.cases[i];
7125 for (unsigned j = getCase.set().size(); j--;) {
7126 RegisteredStructure structure = getCase.set()[j];
7127 baseSet.add(structure);
7128 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
7129 }
7130 }
7131 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7132 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
7133
7134 LBasicBlock lastNext = m_out.m_nextBlock;
7135
7136 Vector<ValueFromBlock, 2> results;
7137 for (unsigned i = data.cases.size(); i--;) {
7138 MultiGetByOffsetCase getCase = data.cases[i];
7139 GetByOffsetMethod method = getCase.method();
7140
7141 m_out.appendTo(blocks[i], i + 1 < data.cases.size() ? blocks[i + 1] : exit);
7142
7143 LValue result;
7144
7145 switch (method.kind()) {
7146 case GetByOffsetMethod::Invalid:
7147 RELEASE_ASSERT_NOT_REACHED();
7148 break;
7149
7150 case GetByOffsetMethod::Constant:
7151 result = m_out.constInt64(JSValue::encode(method.constant()->value()));
7152 break;
7153
7154 case GetByOffsetMethod::Load:
7155 case GetByOffsetMethod::LoadFromPrototype: {
7156 LValue propertyBase;
7157 if (method.kind() == GetByOffsetMethod::Load)
7158 propertyBase = base;
7159 else
7160 propertyBase = weakPointer(method.prototype()->value().asCell());
7161 if (!isInlineOffset(method.offset()))
7162 propertyBase = m_out.loadPtr(propertyBase, m_heaps.JSObject_butterfly);
7163 result = loadProperty(
7164 propertyBase, data.identifierNumber, method.offset());
7165 break;
7166 } }
7167
7168 results.append(m_out.anchor(result));
7169 m_out.jump(continuation);
7170 }
7171
7172 m_out.appendTo(exit, continuation);
7173 if (!structuresChecked)
7174 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7175 m_out.unreachable();
7176
7177 m_out.appendTo(continuation, lastNext);
7178 setJSValue(m_out.phi(Int64, results));
7179 }
7180
7181 void compilePutByOffset()
7182 {
7183 StorageAccessData& data = m_node->storageAccessData();
7184
7185 storeProperty(
7186 lowJSValue(m_node->child3()),
7187 lowStorage(m_node->child1()), data.identifierNumber, data.offset);
7188 }
7189
7190 void compileMultiPutByOffset()
7191 {
7192 LValue base = lowCell(m_node->child1());
7193 LValue value = lowJSValue(m_node->child2());
7194
7195 MultiPutByOffsetData& data = m_node->multiPutByOffsetData();
7196
7197 Vector<LBasicBlock, 2> blocks(data.variants.size());
7198 for (unsigned i = data.variants.size(); i--;)
7199 blocks[i] = m_out.newBlock();
7200 LBasicBlock exit = m_out.newBlock();
7201 LBasicBlock continuation = m_out.newBlock();
7202
7203 Vector<SwitchCase, 2> cases;
7204 RegisteredStructureSet baseSet;
7205 for (unsigned i = data.variants.size(); i--;) {
7206 PutByIdVariant variant = data.variants[i];
7207 for (unsigned j = variant.oldStructure().size(); j--;) {
7208 RegisteredStructure structure = m_graph.registerStructure(variant.oldStructure()[j]);
7209 baseSet.add(structure);
7210 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
7211 }
7212 }
7213 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7214 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
7215
7216 LBasicBlock lastNext = m_out.m_nextBlock;
7217
7218 for (unsigned i = data.variants.size(); i--;) {
7219 m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
7220
7221 PutByIdVariant variant = data.variants[i];
7222
7223 LValue storage;
7224 if (variant.kind() == PutByIdVariant::Replace) {
7225 if (isInlineOffset(variant.offset()))
7226 storage = base;
7227 else
7228 storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
7229 } else {
7230 DFG_ASSERT(m_graph, m_node, variant.kind() == PutByIdVariant::Transition, variant.kind());
7231 m_graph.m_plan.transitions().addLazily(
7232 codeBlock(), m_node->origin.semantic.codeOriginOwner(),
7233 variant.oldStructureForTransition(), variant.newStructure());
7234
7235 storage = storageForTransition(
7236 base, variant.offset(),
7237 variant.oldStructureForTransition(), variant.newStructure());
7238 }
7239
7240 storeProperty(value, storage, data.identifierNumber, variant.offset());
7241
7242 if (variant.kind() == PutByIdVariant::Transition) {
7243 ASSERT(variant.oldStructureForTransition()->indexingType() == variant.newStructure()->indexingType());
7244 ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
7245 ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type());
7246 m_out.store32(
7247 weakStructureID(m_graph.registerStructure(variant.newStructure())), base, m_heaps.JSCell_structureID);
7248 }
7249
7250 m_out.jump(continuation);
7251 }
7252
7253 m_out.appendTo(exit, continuation);
7254 if (!structuresChecked)
7255 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7256 m_out.unreachable();
7257
7258 m_out.appendTo(continuation, lastNext);
7259 }
7260
7261 void compileMatchStructure()
7262 {
7263 LValue base = lowCell(m_node->child1());
7264
7265 MatchStructureData& data = m_node->matchStructureData();
7266
7267 LBasicBlock trueBlock = m_out.newBlock();
7268 LBasicBlock falseBlock = m_out.newBlock();
7269 LBasicBlock exitBlock = m_out.newBlock();
7270 LBasicBlock continuation = m_out.newBlock();
7271
7272 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueBlock);
7273
7274 Vector<SwitchCase, 2> cases;
7275 RegisteredStructureSet baseSet;
7276 for (MatchStructureVariant& variant : data.variants) {
7277 baseSet.add(variant.structure);
7278 cases.append(SwitchCase(
7279 weakStructureID(variant.structure),
7280 variant.result ? trueBlock : falseBlock, Weight(1)));
7281 }
7282 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7283 emitSwitchForMultiByOffset(base, structuresChecked, cases, exitBlock);
7284
7285 m_out.appendTo(trueBlock, falseBlock);
7286 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
7287 m_out.jump(continuation);
7288
7289 m_out.appendTo(falseBlock, exitBlock);
7290 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
7291 m_out.jump(continuation);
7292
7293 m_out.appendTo(exitBlock, continuation);
7294 if (!structuresChecked)
7295 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7296 m_out.unreachable();
7297
7298 m_out.appendTo(continuation, lastNext);
7299 setBoolean(m_out.phi(Int32, trueResult, falseResult));
7300 }
7301
7302 void compileGetGlobalVariable()
7303 {
7304 setJSValue(m_out.load64(m_out.absolute(m_node->variablePointer())));
7305 }
7306
7307 void compilePutGlobalVariable()
7308 {
7309 m_out.store64(
7310 lowJSValue(m_node->child2()), m_out.absolute(m_node->variablePointer()));
7311 }
7312
7313 void compileNotifyWrite()
7314 {
7315 WatchpointSet* set = m_node->watchpointSet();
7316
7317 LBasicBlock isNotInvalidated = m_out.newBlock();
7318 LBasicBlock continuation = m_out.newBlock();
7319
7320 LValue state = m_out.load8ZeroExt32(m_out.absolute(set->addressOfState()));
7321 m_out.branch(
7322 m_out.equal(state, m_out.constInt32(IsInvalidated)),
7323 usually(continuation), rarely(isNotInvalidated));
7324
7325 LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
7326
7327 VM& vm = this->vm();
7328 lazySlowPath(
7329 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
7330 return createLazyCallGenerator(vm,
7331 operationNotifyWrite, InvalidGPRReg, CCallHelpers::TrustedImmPtr(set));
7332 });
7333 m_out.jump(continuation);
7334
7335 m_out.appendTo(continuation, lastNext);
7336 }
7337
7338 void compileGetCallee()
7339 {
7340 setJSValue(m_out.loadPtr(addressFor(CallFrameSlot::callee)));
7341 }
7342
7343 void compileSetCallee()
7344 {
7345 auto callee = lowCell(m_node->child1());
7346 m_out.storePtr(callee, payloadFor(CallFrameSlot::callee));
7347 }
7348
7349 void compileGetArgumentCountIncludingThis()
7350 {
7351 VirtualRegister argumentCountRegister;
7352 if (InlineCallFrame* inlineCallFrame = m_node->argumentsInlineCallFrame())
7353 argumentCountRegister = inlineCallFrame->argumentCountRegister;
7354 else
7355 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
7356 setInt32(m_out.load32(payloadFor(argumentCountRegister)));
7357 }
7358
7359 void compileSetArgumentCountIncludingThis()
7360 {
7361 m_out.store32(m_out.constInt32(m_node->argumentCountIncludingThis()), payloadFor(CallFrameSlot::argumentCount));
7362 }
7363
7364 void compileGetScope()
7365 {
7366 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope));
7367 }
7368
7369 void compileSkipScope()
7370 {
7371 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next));
7372 }
7373
7374 void compileGetGlobalObject()
7375 {
7376 LValue structure = loadStructure(lowCell(m_node->child1()));
7377 setJSValue(m_out.loadPtr(structure, m_heaps.Structure_globalObject));
7378 }
7379
7380 void compileGetGlobalThis()
7381 {
7382 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
7383 setJSValue(m_out.loadPtr(m_out.absolute(globalObject->addressOfGlobalThis())));
7384 }
7385
7386 void compileGetClosureVar()
7387 {
7388 setJSValue(
7389 m_out.load64(
7390 lowCell(m_node->child1()),
7391 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]));
7392 }
7393
7394 void compilePutClosureVar()
7395 {
7396 m_out.store64(
7397 lowJSValue(m_node->child2()),
7398 lowCell(m_node->child1()),
7399 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]);
7400 }
7401
7402 void compileGetFromArguments()
7403 {
7404 setJSValue(
7405 m_out.load64(
7406 lowCell(m_node->child1()),
7407 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]));
7408 }
7409
7410 void compilePutToArguments()
7411 {
7412 m_out.store64(
7413 lowJSValue(m_node->child2()),
7414 lowCell(m_node->child1()),
7415 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]);
7416 }
7417
7418 void compileGetArgument()
7419 {
7420 LValue argumentCount = m_out.load32(payloadFor(AssemblyHelpers::argumentCount(m_node->origin.semantic)));
7421
7422 LBasicBlock inBounds = m_out.newBlock();
7423 LBasicBlock outOfBounds = m_out.newBlock();
7424 LBasicBlock continuation = m_out.newBlock();
7425
7426 m_out.branch(m_out.lessThanOrEqual(argumentCount, m_out.constInt32(m_node->argumentIndex())), unsure(outOfBounds), unsure(inBounds));
7427
7428 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
7429 VirtualRegister arg = AssemblyHelpers::argumentsStart(m_node->origin.semantic) + m_node->argumentIndex() - 1;
7430 ValueFromBlock inBoundsResult = m_out.anchor(m_out.load64(addressFor(arg)));
7431 m_out.jump(continuation);
7432
7433 m_out.appendTo(outOfBounds, continuation);
7434 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueUndefined));
7435 m_out.jump(continuation);
7436
7437 m_out.appendTo(continuation, lastNext);
7438 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
7439 }
7440
7441 void compileCompareEq()
7442 {
7443 if (m_node->isBinaryUseKind(Int32Use)
7444 || m_node->isBinaryUseKind(Int52RepUse)
7445 || m_node->isBinaryUseKind(DoubleRepUse)
7446 || m_node->isBinaryUseKind(ObjectUse)
7447 || m_node->isBinaryUseKind(BooleanUse)
7448 || m_node->isBinaryUseKind(SymbolUse)
7449 || m_node->isBinaryUseKind(StringIdentUse)
7450 || m_node->isBinaryUseKind(StringUse)) {
7451 compileCompareStrictEq();
7452 return;
7453 }
7454
7455 if (m_node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
7456 compareEqObjectOrOtherToObject(m_node->child2(), m_node->child1());
7457 return;
7458 }
7459
7460 if (m_node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
7461 compareEqObjectOrOtherToObject(m_node->child1(), m_node->child2());
7462 return;
7463 }
7464
7465 if (m_node->child1().useKind() == KnownOtherUse) {
7466 ASSERT(!m_interpreter.needsTypeCheck(m_node->child1(), SpecOther));
7467 setBoolean(equalNullOrUndefined(m_node->child2(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
7468 return;
7469 }
7470
7471 if (m_node->child2().useKind() == KnownOtherUse) {
7472 ASSERT(!m_interpreter.needsTypeCheck(m_node->child2(), SpecOther));
7473 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
7474 return;
7475 }
7476
7477 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
7478 nonSpeculativeCompare(
7479 [&] (LValue left, LValue right) {
7480 return m_out.equal(left, right);
7481 },
7482 operationCompareEq);
7483 }
7484
7485 void compileCompareStrictEq()
7486 {
7487 if (m_node->isBinaryUseKind(Int32Use)) {
7488 setBoolean(
7489 m_out.equal(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7490 return;
7491 }
7492
7493 if (m_node->isBinaryUseKind(Int52RepUse)) {
7494 Int52Kind kind;
7495 LValue left = lowWhicheverInt52(m_node->child1(), kind);
7496 LValue right = lowInt52(m_node->child2(), kind);
7497 setBoolean(m_out.equal(left, right));
7498 return;
7499 }
7500
7501 if (m_node->isBinaryUseKind(DoubleRepUse)) {
7502 setBoolean(
7503 m_out.doubleEqual(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
7504 return;
7505 }
7506
7507 if (m_node->isBinaryUseKind(StringIdentUse)) {
7508 setBoolean(
7509 m_out.equal(lowStringIdent(m_node->child1()), lowStringIdent(m_node->child2())));
7510 return;
7511 }
7512
7513 if (m_node->isBinaryUseKind(StringUse)) {
7514 LValue left = lowCell(m_node->child1());
7515 LValue right = lowCell(m_node->child2());
7516
7517 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
7518 LBasicBlock continuation = m_out.newBlock();
7519
7520 speculateString(m_node->child1(), left);
7521
7522 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
7523 m_out.branch(
7524 m_out.equal(left, right), unsure(continuation), unsure(notTriviallyEqualCase));
7525
7526 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
7527
7528 speculateString(m_node->child2(), right);
7529
7530 ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right, m_node->child1(), m_node->child2()));
7531 m_out.jump(continuation);
7532
7533 m_out.appendTo(continuation, lastNext);
7534 setBoolean(m_out.phi(Int32, fastResult, slowResult));
7535 return;
7536 }
7537
7538 if (m_node->isBinaryUseKind(ObjectUse, UntypedUse)) {
7539 setBoolean(
7540 m_out.equal(
7541 lowNonNullObject(m_node->child1()),
7542 lowJSValue(m_node->child2())));
7543 return;
7544 }
7545
7546 if (m_node->isBinaryUseKind(UntypedUse, ObjectUse)) {
7547 setBoolean(
7548 m_out.equal(
7549 lowNonNullObject(m_node->child2()),
7550 lowJSValue(m_node->child1())));
7551 return;
7552 }
7553
7554 if (m_node->isBinaryUseKind(ObjectUse)) {
7555 setBoolean(
7556 m_out.equal(
7557 lowNonNullObject(m_node->child1()),
7558 lowNonNullObject(m_node->child2())));
7559 return;
7560 }
7561
7562 if (m_node->isBinaryUseKind(BooleanUse)) {
7563 setBoolean(
7564 m_out.equal(lowBoolean(m_node->child1()), lowBoolean(m_node->child2())));
7565 return;
7566 }
7567
7568 if (m_node->isBinaryUseKind(SymbolUse)) {
7569 LValue leftSymbol = lowSymbol(m_node->child1());
7570 LValue rightSymbol = lowSymbol(m_node->child2());
7571 setBoolean(m_out.equal(leftSymbol, rightSymbol));
7572 return;
7573 }
7574
7575 if (m_node->isBinaryUseKind(BigIntUse)) {
7576 // FIXME: [ESNext][BigInt] Create specialized version of strict equals for BigIntUse
7577 // https://bugs.webkit.org/show_bug.cgi?id=182895
7578 LValue left = lowBigInt(m_node->child1());
7579 LValue right = lowBigInt(m_node->child2());
7580
7581 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
7582 LBasicBlock continuation = m_out.newBlock();
7583
7584 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
7585 m_out.branch(m_out.equal(left, right), rarely(continuation), usually(notTriviallyEqualCase));
7586
7587 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
7588
7589 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
7590 pointerType(), m_out.operation(operationCompareStrictEq), m_callFrame, left, right)));
7591 m_out.jump(continuation);
7592
7593 m_out.appendTo(continuation, lastNext);
7594 setBoolean(m_out.phi(Int32, fastResult, slowResult));
7595 return;
7596 }
7597
7598 if (m_node->isBinaryUseKind(SymbolUse, UntypedUse)
7599 || m_node->isBinaryUseKind(UntypedUse, SymbolUse)) {
7600 Edge symbolEdge = m_node->child1();
7601 Edge untypedEdge = m_node->child2();
7602 if (symbolEdge.useKind() != SymbolUse)
7603 std::swap(symbolEdge, untypedEdge);
7604
7605 LValue leftSymbol = lowSymbol(symbolEdge);
7606 LValue untypedValue = lowJSValue(untypedEdge);
7607
7608 setBoolean(m_out.equal(leftSymbol, untypedValue));
7609 return;
7610 }
7611
7612 if (m_node->isBinaryUseKind(MiscUse, UntypedUse)
7613 || m_node->isBinaryUseKind(UntypedUse, MiscUse)) {
7614 speculate(m_node->child1());
7615 speculate(m_node->child2());
7616 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
7617 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
7618 setBoolean(m_out.equal(left, right));
7619 return;
7620 }
7621
7622 if (m_node->isBinaryUseKind(StringIdentUse, NotStringVarUse)
7623 || m_node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
7624 Edge leftEdge = m_node->childFor(StringIdentUse);
7625 Edge rightEdge = m_node->childFor(NotStringVarUse);
7626
7627 LValue left = lowStringIdent(leftEdge);
7628 LValue rightValue = lowJSValue(rightEdge, ManualOperandSpeculation);
7629
7630 LBasicBlock isCellCase = m_out.newBlock();
7631 LBasicBlock isStringCase = m_out.newBlock();
7632 LBasicBlock continuation = m_out.newBlock();
7633
7634 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
7635 m_out.branch(
7636 isCell(rightValue, provenType(rightEdge)),
7637 unsure(isCellCase), unsure(continuation));
7638
7639 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
7640 ValueFromBlock notStringResult = m_out.anchor(m_out.booleanFalse);
7641 m_out.branch(
7642 isString(rightValue, provenType(rightEdge)),
7643 unsure(isStringCase), unsure(continuation));
7644
7645 m_out.appendTo(isStringCase, continuation);
7646 LValue right = m_out.loadPtr(rightValue, m_heaps.JSString_value);
7647 speculateStringIdent(rightEdge, rightValue, right);
7648 ValueFromBlock isStringResult = m_out.anchor(m_out.equal(left, right));
7649 m_out.jump(continuation);
7650
7651 m_out.appendTo(continuation, lastNext);
7652 setBoolean(m_out.phi(Int32, notCellResult, notStringResult, isStringResult));
7653 return;
7654 }
7655
7656 if (m_node->isBinaryUseKind(StringUse, UntypedUse)) {
7657 compileStringToUntypedStrictEquality(m_node->child1(), m_node->child2());
7658 return;
7659 }
7660 if (m_node->isBinaryUseKind(UntypedUse, StringUse)) {
7661 compileStringToUntypedStrictEquality(m_node->child2(), m_node->child1());
7662 return;
7663 }
7664
7665 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
7666 nonSpeculativeCompare(
7667 [&] (LValue left, LValue right) {
7668 return m_out.equal(left, right);
7669 },
7670 operationCompareStrictEq);
7671 }
7672
7673 void compileStringToUntypedStrictEquality(Edge stringEdge, Edge untypedEdge)
7674 {
7675 ASSERT(stringEdge.useKind() == StringUse);
7676 ASSERT(untypedEdge.useKind() == UntypedUse);
7677
7678 LValue leftString = lowCell(stringEdge);
7679 LValue rightValue = lowJSValue(untypedEdge);
7680 SpeculatedType rightValueType = provenType(untypedEdge);
7681
7682 // Verify left is string.
7683 speculateString(stringEdge, leftString);
7684
7685 LBasicBlock testUntypedEdgeIsCell = m_out.newBlock();
7686 LBasicBlock testUntypedEdgeIsString = m_out.newBlock();
7687 LBasicBlock testStringEquality = m_out.newBlock();
7688 LBasicBlock continuation = m_out.newBlock();
7689
7690 // Given left is string. If the value are strictly equal, rightValue has to be the same string.
7691 ValueFromBlock fastTrue = m_out.anchor(m_out.booleanTrue);
7692 m_out.branch(m_out.equal(leftString, rightValue), unsure(continuation), unsure(testUntypedEdgeIsCell));
7693
7694 LBasicBlock lastNext = m_out.appendTo(testUntypedEdgeIsCell, testUntypedEdgeIsString);
7695 ValueFromBlock fastFalse = m_out.anchor(m_out.booleanFalse);
7696 m_out.branch(isNotCell(rightValue, rightValueType), unsure(continuation), unsure(testUntypedEdgeIsString));
7697
7698 // Check if the untyped edge is a string.
7699 m_out.appendTo(testUntypedEdgeIsString, testStringEquality);
7700 m_out.branch(isNotString(rightValue, rightValueType), unsure(continuation), unsure(testStringEquality));
7701
7702 // Full String compare.
7703 m_out.appendTo(testStringEquality, continuation);
7704 ValueFromBlock slowResult = m_out.anchor(stringsEqual(leftString, rightValue, stringEdge, untypedEdge));
7705 m_out.jump(continuation);
7706
7707 // Continuation.
7708 m_out.appendTo(continuation, lastNext);
7709 setBoolean(m_out.phi(Int32, fastTrue, fastFalse, slowResult));
7710 }
7711
7712 void compileCompareEqPtr()
7713 {
7714 setBoolean(
7715 m_out.equal(
7716 lowJSValue(m_node->child1()),
7717 weakPointer(m_node->cellOperand()->cell())));
7718 }
7719
7720 void compileCompareLess()
7721 {
7722 compare(
7723 [&] (LValue left, LValue right) {
7724 return m_out.lessThan(left, right);
7725 },
7726 [&] (LValue left, LValue right) {
7727 return m_out.doubleLessThan(left, right);
7728 },
7729 operationCompareStringImplLess,
7730 operationCompareStringLess,
7731 operationCompareLess);
7732 }
7733
7734 void compileCompareLessEq()
7735 {
7736 compare(
7737 [&] (LValue left, LValue right) {
7738 return m_out.lessThanOrEqual(left, right);
7739 },
7740 [&] (LValue left, LValue right) {
7741 return m_out.doubleLessThanOrEqual(left, right);
7742 },
7743 operationCompareStringImplLessEq,
7744 operationCompareStringLessEq,
7745 operationCompareLessEq);
7746 }
7747
7748 void compileCompareGreater()
7749 {
7750 compare(
7751 [&] (LValue left, LValue right) {
7752 return m_out.greaterThan(left, right);
7753 },
7754 [&] (LValue left, LValue right) {
7755 return m_out.doubleGreaterThan(left, right);
7756 },
7757 operationCompareStringImplGreater,
7758 operationCompareStringGreater,
7759 operationCompareGreater);
7760 }
7761
7762 void compileCompareGreaterEq()
7763 {
7764 compare(
7765 [&] (LValue left, LValue right) {
7766 return m_out.greaterThanOrEqual(left, right);
7767 },
7768 [&] (LValue left, LValue right) {
7769 return m_out.doubleGreaterThanOrEqual(left, right);
7770 },
7771 operationCompareStringImplGreaterEq,
7772 operationCompareStringGreaterEq,
7773 operationCompareGreaterEq);
7774 }
7775
7776 void compileCompareBelow()
7777 {
7778 setBoolean(m_out.below(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7779 }
7780
7781 void compileCompareBelowEq()
7782 {
7783 setBoolean(m_out.belowOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7784 }
7785
7786 void compileSameValue()
7787 {
7788 if (m_node->isBinaryUseKind(DoubleRepUse)) {
7789 LValue arg1 = lowDouble(m_node->child1());
7790 LValue arg2 = lowDouble(m_node->child2());
7791
7792 LBasicBlock numberCase = m_out.newBlock();
7793 LBasicBlock continuation = m_out.newBlock();
7794
7795 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
7796 patchpoint->append(arg1, ValueRep::SomeRegister);
7797 patchpoint->append(arg2, ValueRep::SomeRegister);
7798 patchpoint->numGPScratchRegisters = 1;
7799 patchpoint->setGenerator(
7800 [] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7801 GPRReg scratchGPR = params.gpScratch(0);
7802 jit.moveDoubleTo64(params[1].fpr(), scratchGPR);
7803 jit.moveDoubleTo64(params[2].fpr(), params[0].gpr());
7804 jit.compare64(CCallHelpers::Equal, scratchGPR, params[0].gpr(), params[0].gpr());
7805 });
7806 patchpoint->effects = Effects::none();
7807 ValueFromBlock compareResult = m_out.anchor(patchpoint);
7808 m_out.branch(patchpoint, unsure(continuation), unsure(numberCase));
7809
7810 LBasicBlock lastNext = m_out.appendTo(numberCase, continuation);
7811 LValue isArg1NaN = m_out.doubleNotEqualOrUnordered(arg1, arg1);
7812 LValue isArg2NaN = m_out.doubleNotEqualOrUnordered(arg2, arg2);
7813 ValueFromBlock nanResult = m_out.anchor(m_out.bitAnd(isArg1NaN, isArg2NaN));
7814 m_out.jump(continuation);
7815
7816 m_out.appendTo(continuation, lastNext);
7817 setBoolean(m_out.phi(Int32, compareResult, nanResult));
7818 return;
7819 }
7820
7821 ASSERT(m_node->isBinaryUseKind(UntypedUse));
7822 setBoolean(vmCall(Int32, m_out.operation(operationSameValue), m_callFrame, lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
7823 }
7824
7825 void compileLogicalNot()
7826 {
7827 setBoolean(m_out.logicalNot(boolify(m_node->child1())));
7828 }
7829
7830 void compileCallOrConstruct()
7831 {
7832 Node* node = m_node;
7833 unsigned numArgs = node->numChildren() - 1;
7834
7835 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
7836
7837 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
7838 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
7839
7840 // JS->JS calling convention requires that the caller allows this much space on top of stack to
7841 // get trashed by the callee, even if not all of that space is used to pass arguments. We tell
7842 // B3 this explicitly for two reasons:
7843 //
7844 // - We will only pass frameSize worth of stuff.
7845 // - The trashed stack guarantee is logically separate from the act of passing arguments, so we
7846 // shouldn't rely on Air to infer the trashed stack property based on the arguments it ends
7847 // up seeing.
7848 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
7849
7850 // Collect the arguments, since this can generate code and we want to generate it before we emit
7851 // the call.
7852 Vector<ConstrainedValue> arguments;
7853
7854 // Make sure that the callee goes into GPR0 because that's where the slow path thunks expect the
7855 // callee to be.
7856 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
7857
7858 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
7859 intptr_t offsetFromSP =
7860 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
7861 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
7862 };
7863
7864 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
7865 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
7866 for (unsigned i = 0; i < numArgs; ++i)
7867 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
7868
7869 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
7870 patchpoint->appendVector(arguments);
7871
7872 RefPtr<PatchpointExceptionHandle> exceptionHandle =
7873 preparePatchpointForExceptions(patchpoint);
7874
7875 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
7876 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
7877 patchpoint->clobber(RegisterSet::macroScratchRegisters());
7878 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
7879 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
7880
7881 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
7882 State* state = &m_ftlState;
7883 VM* vm = &this->vm();
7884 patchpoint->setGenerator(
7885 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7886 AllowMacroScratchRegisterUsage allowScratch(jit);
7887 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
7888
7889 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
7890
7891 jit.store32(
7892 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
7893 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
7894
7895 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
7896
7897 CCallHelpers::DataLabelPtr targetToCheck;
7898 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
7899 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
7900 CCallHelpers::TrustedImmPtr(nullptr));
7901
7902 CCallHelpers::Call fastCall = jit.nearCall();
7903 CCallHelpers::Jump done = jit.jump();
7904
7905 slowPath.link(&jit);
7906
7907 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
7908 CCallHelpers::Call slowCall = jit.nearCall();
7909 done.link(&jit);
7910
7911 callLinkInfo->setUpCall(
7912 node->op() == Construct ? CallLinkInfo::Construct : CallLinkInfo::Call,
7913 node->origin.semantic, GPRInfo::regT0);
7914
7915 jit.addPtr(
7916 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
7917 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
7918
7919 jit.addLinkTask(
7920 [=] (LinkBuffer& linkBuffer) {
7921 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
7922 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
7923
7924 callLinkInfo->setCallLocations(
7925 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
7926 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
7927 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
7928 });
7929 });
7930
7931 setJSValue(patchpoint);
7932 }
7933
7934 void compileDirectCallOrConstruct()
7935 {
7936 Node* node = m_node;
7937 bool isTail = node->op() == DirectTailCall;
7938 bool isConstruct = node->op() == DirectConstruct;
7939
7940 ExecutableBase* executable = node->castOperand<ExecutableBase*>();
7941 FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(vm(), executable);
7942
7943 unsigned numPassedArgs = node->numChildren() - 1;
7944 unsigned numAllocatedArgs = numPassedArgs;
7945
7946 if (functionExecutable) {
7947 numAllocatedArgs = std::max(
7948 numAllocatedArgs,
7949 std::min(
7950 static_cast<unsigned>(functionExecutable->parameterCount()) + 1,
7951 Options::maximumDirectCallStackSize()));
7952 }
7953
7954 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
7955
7956 if (!isTail) {
7957 unsigned frameSize = (CallFrame::headerSizeInRegisters + numAllocatedArgs) * sizeof(EncodedJSValue);
7958 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
7959
7960 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
7961 }
7962
7963 Vector<ConstrainedValue> arguments;
7964
7965 arguments.append(ConstrainedValue(jsCallee, ValueRep::SomeRegister));
7966 if (!isTail) {
7967 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
7968 intptr_t offsetFromSP =
7969 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
7970 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
7971 };
7972
7973 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
7974 addArgument(m_out.constInt32(numPassedArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
7975 for (unsigned i = 0; i < numPassedArgs; ++i)
7976 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
7977 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
7978 addArgument(m_out.constInt64(JSValue::encode(jsUndefined())), virtualRegisterForArgument(i), 0);
7979 } else {
7980 for (unsigned i = 0; i < numPassedArgs; ++i)
7981 arguments.append(ConstrainedValue(lowJSValue(m_graph.varArgChild(node, 1 + i)), ValueRep::WarmAny));
7982 }
7983
7984 PatchpointValue* patchpoint = m_out.patchpoint(isTail ? Void : Int64);
7985 patchpoint->appendVector(arguments);
7986
7987 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
7988
7989 if (isTail) {
7990 // The shuffler needs tags.
7991 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
7992 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
7993 }
7994
7995 patchpoint->clobber(RegisterSet::macroScratchRegisters());
7996 if (!isTail) {
7997 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
7998 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
7999 }
8000
8001 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8002 State* state = &m_ftlState;
8003 patchpoint->setGenerator(
8004 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8005 AllowMacroScratchRegisterUsage allowScratch(jit);
8006 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8007
8008 GPRReg calleeGPR = params[!isTail].gpr();
8009
8010 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8011
8012 Box<CCallHelpers::JumpList> exceptions =
8013 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8014
8015 if (isTail) {
8016 CallFrameShuffleData shuffleData;
8017 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
8018
8019 RegisterSet toSave = params.unavailableRegisters();
8020 shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatCell);
8021 toSave.set(calleeGPR);
8022 for (unsigned i = 0; i < numPassedArgs; ++i) {
8023 ValueRecovery recovery = params[1 + i].recoveryForJSValue();
8024 shuffleData.args.append(recovery);
8025 recovery.forEachReg(
8026 [&] (Reg reg) {
8027 toSave.set(reg);
8028 });
8029 }
8030 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
8031 shuffleData.args.append(ValueRecovery::constant(jsUndefined()));
8032 shuffleData.numPassedArgs = numPassedArgs;
8033 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
8034
8035 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8036
8037 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
8038 CCallHelpers::Label mainPath = jit.label();
8039
8040 jit.store32(
8041 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8042 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8043
8044 callLinkInfo->setFrameShuffleData(shuffleData);
8045 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
8046
8047 CCallHelpers::Call call = jit.nearTailCall();
8048
8049 jit.abortWithReason(JITDidReturnFromTailCall);
8050
8051 CCallHelpers::Label slowPath = jit.label();
8052 patchableJump.m_jump.linkTo(slowPath, &jit);
8053 callOperation(
8054 *state, toSave, jit,
8055 node->origin.semantic, exceptions.get(), operationLinkDirectCall,
8056 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo), calleeGPR).call();
8057 jit.jump().linkTo(mainPath, &jit);
8058
8059 callLinkInfo->setUpCall(
8060 CallLinkInfo::DirectTailCall, node->origin.semantic, InvalidGPRReg);
8061 callLinkInfo->setExecutableDuringCompilation(executable);
8062 if (numAllocatedArgs > numPassedArgs)
8063 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
8064
8065 jit.addLinkTask(
8066 [=] (LinkBuffer& linkBuffer) {
8067 CodeLocationLabel<JSInternalPtrTag> patchableJumpLocation = linkBuffer.locationOf<JSInternalPtrTag>(patchableJump);
8068 CodeLocationNearCall<JSInternalPtrTag> callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
8069 CodeLocationLabel<JSInternalPtrTag> slowPathLocation = linkBuffer.locationOf<JSInternalPtrTag>(slowPath);
8070
8071 callLinkInfo->setCallLocations(
8072 patchableJumpLocation,
8073 slowPathLocation,
8074 callLocation);
8075 });
8076 return;
8077 }
8078
8079 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8080
8081 CCallHelpers::Label mainPath = jit.label();
8082
8083 jit.store32(
8084 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8085 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8086
8087 CCallHelpers::Call call = jit.nearCall();
8088 jit.addPtr(
8089 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
8090 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8091
8092 callLinkInfo->setUpCall(
8093 isConstruct ? CallLinkInfo::DirectConstruct : CallLinkInfo::DirectCall,
8094 node->origin.semantic, InvalidGPRReg);
8095 callLinkInfo->setExecutableDuringCompilation(executable);
8096 if (numAllocatedArgs > numPassedArgs)
8097 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
8098
8099 params.addLatePath(
8100 [=] (CCallHelpers& jit) {
8101 AllowMacroScratchRegisterUsage allowScratch(jit);
8102
8103 CCallHelpers::Label slowPath = jit.label();
8104 if (isX86())
8105 jit.pop(CCallHelpers::selectScratchGPR(calleeGPR));
8106
8107 callOperation(
8108 *state, params.unavailableRegisters(), jit,
8109 node->origin.semantic, exceptions.get(), operationLinkDirectCall,
8110 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo),
8111 calleeGPR).call();
8112 jit.jump().linkTo(mainPath, &jit);
8113
8114 jit.addLinkTask(
8115 [=] (LinkBuffer& linkBuffer) {
8116 CodeLocationNearCall<JSInternalPtrTag> callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
8117 CodeLocationLabel<JSInternalPtrTag> slowPathLocation = linkBuffer.locationOf<JSInternalPtrTag>(slowPath);
8118
8119 linkBuffer.link(call, slowPathLocation);
8120
8121 callLinkInfo->setCallLocations(
8122 CodeLocationLabel<JSInternalPtrTag>(),
8123 slowPathLocation,
8124 callLocation);
8125 });
8126 });
8127 });
8128
8129 if (isTail)
8130 patchpoint->effects.terminal = true;
8131 else
8132 setJSValue(patchpoint);
8133 }
8134
8135 void compileTailCall()
8136 {
8137 Node* node = m_node;
8138 unsigned numArgs = node->numChildren() - 1;
8139
8140 // It seems counterintuitive that this is needed given that tail calls don't create a new frame
8141 // on the stack. However, the tail call slow path builds the frame at SP instead of FP before
8142 // calling into the slow path C code. This slow path may decide to throw an exception because
8143 // the callee we're trying to call is not callable. Throwing an exception will cause us to walk
8144 // the stack, which may read, for the sake of the correctness of this code, arbitrary slots on the
8145 // stack to recover state. This call arg area ensures the call frame shuffler does not overwrite
8146 // any of the slots the stack walking code requires when on the slow path.
8147 m_proc.requestCallArgAreaSizeInBytes(
8148 WTF::roundUpToMultipleOf(stackAlignmentBytes(), (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue)));
8149
8150 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
8151
8152 // We want B3 to give us all of the arguments using whatever mechanism it thinks is
8153 // convenient. The generator then shuffles those arguments into our own call frame,
8154 // destroying our frame in the process.
8155
8156 // Note that we don't have to do anything special for exceptions. A tail call is only a
8157 // tail call if it is not inside a try block.
8158
8159 Vector<ConstrainedValue> arguments;
8160
8161 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
8162
8163 for (unsigned i = 0; i < numArgs; ++i) {
8164 // Note: we could let the shuffler do boxing for us, but it's not super clear that this
8165 // would be better. Also, if we wanted to do that, then we'd have to teach the shuffler
8166 // that 32-bit values could land at 4-byte alignment but not 8-byte alignment.
8167
8168 ConstrainedValue constrainedValue(
8169 lowJSValue(m_graph.varArgChild(node, 1 + i)),
8170 ValueRep::WarmAny);
8171 arguments.append(constrainedValue);
8172 }
8173
8174 PatchpointValue* patchpoint = m_out.patchpoint(Void);
8175 patchpoint->appendVector(arguments);
8176
8177 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8178 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8179
8180 // Prevent any of the arguments from using the scratch register.
8181 patchpoint->clobberEarly(RegisterSet::macroScratchRegisters());
8182
8183 patchpoint->effects.terminal = true;
8184
8185 // We don't have to tell the patchpoint that we will clobber registers, since we won't return
8186 // anyway.
8187
8188 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8189 State* state = &m_ftlState;
8190 VM* vm = &this->vm();
8191 patchpoint->setGenerator(
8192 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8193 AllowMacroScratchRegisterUsage allowScratch(jit);
8194 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8195
8196 // Yes, this is really necessary. You could throw an exception in a host call on the
8197 // slow path. That'll route us to lookupExceptionHandler(), which unwinds starting
8198 // with the call site index of our frame. Bad things happen if it's not set.
8199 jit.store32(
8200 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8201 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8202
8203 CallFrameShuffleData shuffleData;
8204 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
8205 shuffleData.callee = ValueRecovery::inGPR(GPRInfo::regT0, DataFormatJS);
8206
8207 for (unsigned i = 0; i < numArgs; ++i)
8208 shuffleData.args.append(params[1 + i].recoveryForJSValue());
8209
8210 shuffleData.numPassedArgs = numArgs;
8211
8212 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
8213
8214 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8215
8216 CCallHelpers::DataLabelPtr targetToCheck;
8217 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8218 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8219 CCallHelpers::TrustedImmPtr(nullptr));
8220
8221 callLinkInfo->setFrameShuffleData(shuffleData);
8222 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
8223
8224 CCallHelpers::Call fastCall = jit.nearTailCall();
8225
8226 slowPath.link(&jit);
8227
8228 CallFrameShuffler slowPathShuffler(jit, shuffleData);
8229 slowPathShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
8230 slowPathShuffler.prepareForSlowPath();
8231
8232 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8233 CCallHelpers::Call slowCall = jit.nearCall();
8234
8235 jit.abortWithReason(JITDidReturnFromTailCall);
8236
8237 callLinkInfo->setUpCall(CallLinkInfo::TailCall, codeOrigin, GPRInfo::regT0);
8238
8239 jit.addLinkTask(
8240 [=] (LinkBuffer& linkBuffer) {
8241 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8242 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8243
8244 callLinkInfo->setCallLocations(
8245 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8246 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8247 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8248 });
8249 });
8250 }
8251
8252 void compileCallOrConstructVarargsSpread()
8253 {
8254 Node* node = m_node;
8255 Node* arguments = node->child3().node();
8256
8257 LValue jsCallee = lowJSValue(m_node->child1());
8258 LValue thisArg = lowJSValue(m_node->child2());
8259
8260 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread || arguments->op() == PhantomNewArrayBuffer);
8261
8262 unsigned staticArgumentCount = 0;
8263 Vector<LValue, 2> spreadLengths;
8264 Vector<LValue, 8> patchpointArguments;
8265 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
8266 auto pushAndCountArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
8267 if (target->op() == PhantomSpread) {
8268 self(target->child1().node());
8269 return;
8270 }
8271
8272 if (target->op() == PhantomNewArrayWithSpread) {
8273 BitVector* bitVector = target->bitVector();
8274 for (unsigned i = target->numChildren(); i--; ) {
8275 if (bitVector->get(i))
8276 self(m_graph.varArgChild(target, i).node());
8277 else {
8278 ++staticArgumentCount;
8279 LValue argument = this->lowJSValue(m_graph.varArgChild(target, i));
8280 patchpointArguments.append(argument);
8281 }
8282 }
8283 return;
8284 }
8285
8286 if (target->op() == PhantomNewArrayBuffer) {
8287 staticArgumentCount += target->castOperand<JSImmutableButterfly*>()->length();
8288 return;
8289 }
8290
8291 RELEASE_ASSERT(target->op() == PhantomCreateRest);
8292 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
8293 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
8294 LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
8295 return m_out.zeroExtPtr(this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
8296 }).iterator->value;
8297 patchpointArguments.append(length);
8298 spreadLengths.append(length);
8299 });
8300
8301 pushAndCountArgumentsFromRightToLeft(arguments);
8302 LValue argumentCountIncludingThis = m_out.constIntPtr(staticArgumentCount + 1);
8303 for (LValue length : spreadLengths)
8304 argumentCountIncludingThis = m_out.add(length, argumentCountIncludingThis);
8305
8306 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8307
8308 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
8309 patchpoint->append(thisArg, ValueRep::WarmAny);
8310 patchpoint->append(argumentCountIncludingThis, ValueRep::WarmAny);
8311 patchpoint->appendVectorWithRep(patchpointArguments, ValueRep::WarmAny);
8312 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8313 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8314
8315 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
8316
8317 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8318 patchpoint->clobber(RegisterSet::volatileRegistersForJSCall()); // No inputs will be in a volatile register.
8319 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8320
8321 patchpoint->numGPScratchRegisters = 0;
8322
8323 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
8324 unsigned minimumJSCallAreaSize =
8325 sizeof(CallerFrameAndPC) +
8326 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
8327
8328 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
8329
8330 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8331 State* state = &m_ftlState;
8332 VM* vm = &this->vm();
8333 patchpoint->setGenerator(
8334 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8335 AllowMacroScratchRegisterUsage allowScratch(jit);
8336 CallSiteIndex callSiteIndex =
8337 state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8338
8339 Box<CCallHelpers::JumpList> exceptions =
8340 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8341
8342 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8343
8344 jit.store32(
8345 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8346 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8347
8348 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8349
8350 RegisterSet usedRegisters = RegisterSet::allRegisters();
8351 usedRegisters.exclude(RegisterSet::volatileRegistersForJSCall());
8352 GPRReg calleeGPR = params[1].gpr();
8353 usedRegisters.set(calleeGPR);
8354
8355 ScratchRegisterAllocator allocator(usedRegisters);
8356 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
8357 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
8358 GPRReg scratchGPR3 = allocator.allocateScratchGPR();
8359 GPRReg scratchGPR4 = allocator.allocateScratchGPR();
8360 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
8361
8362 auto getValueFromRep = [&] (B3::ValueRep rep, GPRReg result) {
8363 ASSERT(!usedRegisters.get(result));
8364
8365 if (rep.isConstant()) {
8366 jit.move(CCallHelpers::Imm64(rep.value()), result);
8367 return;
8368 }
8369
8370 // Note: in this function, we only request 64 bit values.
8371 if (rep.isStack()) {
8372 jit.load64(
8373 CCallHelpers::Address(GPRInfo::callFrameRegister, rep.offsetFromFP()),
8374 result);
8375 return;
8376 }
8377
8378 RELEASE_ASSERT(rep.isGPR());
8379 ASSERT(usedRegisters.get(rep.gpr()));
8380 jit.move(rep.gpr(), result);
8381 };
8382
8383 auto callWithExceptionCheck = [&] (void* callee) {
8384 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
8385 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8386 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8387 };
8388
8389 CCallHelpers::JumpList slowCase;
8390 unsigned originalStackHeight = params.proc().frameSize();
8391
8392 {
8393 unsigned numUsedSlots = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), originalStackHeight / sizeof(EncodedJSValue));
8394 B3::ValueRep argumentCountIncludingThisRep = params[3];
8395 getValueFromRep(argumentCountIncludingThisRep, scratchGPR2);
8396 slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR2, CCallHelpers::TrustedImm32(JSC::maxArguments + 1)));
8397
8398 jit.move(scratchGPR2, scratchGPR1);
8399 jit.addPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(numUsedSlots + CallFrame::headerSizeInRegisters)), scratchGPR1);
8400 // scratchGPR1 now has the required frame size in Register units
8401 // Round scratchGPR1 to next multiple of stackAlignmentRegisters()
8402 jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), scratchGPR1);
8403 jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), scratchGPR1);
8404 jit.negPtr(scratchGPR1);
8405 jit.getEffectiveAddress(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight), scratchGPR1);
8406
8407 // Before touching stack values, we should update the stack pointer to protect them from signal stack.
8408 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR1, CCallHelpers::stackPointerRegister);
8409
8410 jit.store32(scratchGPR2, CCallHelpers::Address(scratchGPR1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
8411
8412 int storeOffset = CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register));
8413
8414 unsigned paramsOffset = 4;
8415 unsigned index = 0;
8416 auto emitArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
8417 if (target->op() == PhantomSpread) {
8418 self(target->child1().node());
8419 return;
8420 }
8421
8422 if (target->op() == PhantomNewArrayWithSpread) {
8423 BitVector* bitVector = target->bitVector();
8424 for (unsigned i = target->numChildren(); i--; ) {
8425 if (bitVector->get(i))
8426 self(state->graph.varArgChild(target, i).node());
8427 else {
8428 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
8429 getValueFromRep(params[paramsOffset + (index++)], scratchGPR3);
8430 jit.store64(scratchGPR3,
8431 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
8432 }
8433 }
8434 return;
8435 }
8436
8437 if (target->op() == PhantomNewArrayBuffer) {
8438 auto* array = target->castOperand<JSImmutableButterfly*>();
8439 Checked<int32_t> offsetCount { 1 };
8440 for (unsigned i = array->length(); i--; ++offsetCount) {
8441 // Because varargs values are drained as JSValue, we should not generate value
8442 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
8443 int64_t value = JSValue::encode(array->get(i));
8444 jit.move(CCallHelpers::TrustedImm64(value), scratchGPR3);
8445 Checked<int32_t> currentStoreOffset { storeOffset };
8446 currentStoreOffset -= (offsetCount * static_cast<int32_t>(sizeof(Register)));
8447 jit.store64(scratchGPR3,
8448 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, currentStoreOffset.unsafeGet()));
8449 }
8450 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(array->length())), scratchGPR2);
8451 return;
8452 }
8453
8454 RELEASE_ASSERT(target->op() == PhantomCreateRest);
8455 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
8456
8457 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
8458
8459 B3::ValueRep numArgumentsToCopy = params[paramsOffset + (index++)];
8460 getValueFromRep(numArgumentsToCopy, scratchGPR3);
8461 int loadOffset = (AssemblyHelpers::argumentsStart(inlineCallFrame).offset() + numberOfArgumentsToSkip) * static_cast<int>(sizeof(Register));
8462
8463 auto done = jit.branchTestPtr(MacroAssembler::Zero, scratchGPR3);
8464 auto loopStart = jit.label();
8465 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR3);
8466 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
8467 jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR3, CCallHelpers::TimesEight, loadOffset), scratchGPR4);
8468 jit.store64(scratchGPR4,
8469 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
8470 jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR3).linkTo(loopStart, &jit);
8471 done.link(&jit);
8472 });
8473 emitArgumentsFromRightToLeft(arguments);
8474 }
8475
8476 {
8477 CCallHelpers::Jump dontThrow = jit.jump();
8478 slowCase.link(&jit);
8479 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>();
8480 callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
8481 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
8482
8483 dontThrow.link(&jit);
8484 }
8485
8486 ASSERT(calleeGPR == GPRInfo::regT0);
8487 jit.store64(calleeGPR, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
8488 getValueFromRep(params[2], scratchGPR3);
8489 jit.store64(scratchGPR3, CCallHelpers::calleeArgumentSlot(0));
8490
8491 CallLinkInfo::CallType callType;
8492 if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
8493 callType = CallLinkInfo::ConstructVarargs;
8494 else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
8495 callType = CallLinkInfo::TailCallVarargs;
8496 else
8497 callType = CallLinkInfo::CallVarargs;
8498
8499 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
8500
8501 CCallHelpers::DataLabelPtr targetToCheck;
8502 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8503 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8504 CCallHelpers::TrustedImmPtr(nullptr));
8505
8506 CCallHelpers::Call fastCall;
8507 CCallHelpers::Jump done;
8508
8509 if (isTailCall) {
8510 jit.emitRestoreCalleeSaves();
8511 jit.prepareForTailCallSlow();
8512 fastCall = jit.nearTailCall();
8513 } else {
8514 fastCall = jit.nearCall();
8515 done = jit.jump();
8516 }
8517
8518 slowPath.link(&jit);
8519
8520 if (isTailCall)
8521 jit.emitRestoreCalleeSaves();
8522 ASSERT(!usedRegisters.get(GPRInfo::regT2));
8523 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8524 CCallHelpers::Call slowCall = jit.nearCall();
8525
8526 if (isTailCall)
8527 jit.abortWithReason(JITDidReturnFromTailCall);
8528 else
8529 done.link(&jit);
8530
8531 callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
8532
8533 jit.addPtr(
8534 CCallHelpers::TrustedImm32(-originalStackHeight),
8535 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8536
8537 jit.addLinkTask(
8538 [=] (LinkBuffer& linkBuffer) {
8539 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8540 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8541
8542 callLinkInfo->setCallLocations(
8543 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8544 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8545 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8546 });
8547 });
8548
8549 switch (node->op()) {
8550 case TailCallForwardVarargs:
8551 m_out.unreachable();
8552 break;
8553
8554 default:
8555 setJSValue(patchpoint);
8556 break;
8557 }
8558 }
8559
8560 void compileCallOrConstructVarargs()
8561 {
8562 Node* node = m_node;
8563 LValue jsCallee = lowJSValue(m_node->child1());
8564 LValue thisArg = lowJSValue(m_node->child2());
8565
8566 LValue jsArguments = nullptr;
8567 bool forwarding = false;
8568
8569 switch (node->op()) {
8570 case CallVarargs:
8571 case TailCallVarargs:
8572 case TailCallVarargsInlinedCaller:
8573 case ConstructVarargs:
8574 jsArguments = lowJSValue(node->child3());
8575 break;
8576 case CallForwardVarargs:
8577 case TailCallForwardVarargs:
8578 case TailCallForwardVarargsInlinedCaller:
8579 case ConstructForwardVarargs:
8580 forwarding = true;
8581 break;
8582 default:
8583 DFG_CRASH(m_graph, node, "bad node type");
8584 break;
8585 }
8586
8587 if (forwarding && m_node->child3()) {
8588 Node* arguments = m_node->child3().node();
8589 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
8590 compileCallOrConstructVarargsSpread();
8591 return;
8592 }
8593 }
8594
8595
8596 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8597
8598 // Append the forms of the arguments that we will use before any clobbering happens.
8599 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
8600 if (jsArguments)
8601 patchpoint->appendSomeRegister(jsArguments);
8602 patchpoint->appendSomeRegister(thisArg);
8603
8604 if (!forwarding) {
8605 // Now append them again for after clobbering. Note that the compiler may ask us to use a
8606 // different register for the late for the post-clobbering version of the value. This gives
8607 // the compiler a chance to spill these values without having to burn any callee-saves.
8608 patchpoint->append(jsCallee, ValueRep::LateColdAny);
8609 patchpoint->append(jsArguments, ValueRep::LateColdAny);
8610 patchpoint->append(thisArg, ValueRep::LateColdAny);
8611 }
8612
8613 RefPtr<PatchpointExceptionHandle> exceptionHandle =
8614 preparePatchpointForExceptions(patchpoint);
8615
8616 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8617 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8618
8619 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8620 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
8621 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8622
8623 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
8624 unsigned minimumJSCallAreaSize =
8625 sizeof(CallerFrameAndPC) +
8626 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
8627
8628 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
8629
8630 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8631 State* state = &m_ftlState;
8632 VM* vm = &this->vm();
8633 patchpoint->setGenerator(
8634 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8635 AllowMacroScratchRegisterUsage allowScratch(jit);
8636 CallSiteIndex callSiteIndex =
8637 state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8638
8639 Box<CCallHelpers::JumpList> exceptions =
8640 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8641
8642 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8643
8644 jit.store32(
8645 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8646 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8647
8648 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8649 CallVarargsData* data = node->callVarargsData();
8650
8651 unsigned argIndex = 1;
8652 GPRReg calleeGPR = params[argIndex++].gpr();
8653 ASSERT(calleeGPR == GPRInfo::regT0);
8654 GPRReg argumentsGPR = jsArguments ? params[argIndex++].gpr() : InvalidGPRReg;
8655 GPRReg thisGPR = params[argIndex++].gpr();
8656
8657 B3::ValueRep calleeLateRep;
8658 B3::ValueRep argumentsLateRep;
8659 B3::ValueRep thisLateRep;
8660 if (!forwarding) {
8661 // If we're not forwarding then we'll need callee, arguments, and this after we
8662 // have potentially clobbered calleeGPR, argumentsGPR, and thisGPR. Our technique
8663 // for this is to supply all of those operands as late uses in addition to
8664 // specifying them as early uses. It's possible that the late use uses a spill
8665 // while the early use uses a register, and it's possible for the late and early
8666 // uses to use different registers. We do know that the late uses interfere with
8667 // all volatile registers and so won't use those, but the early uses may use
8668 // volatile registers and in the case of calleeGPR, it's pinned to regT0 so it
8669 // definitely will.
8670 //
8671 // Note that we have to be super careful with these. It's possible that these
8672 // use a shuffling of the registers used for calleeGPR, argumentsGPR, and
8673 // thisGPR. If that happens and we do for example:
8674 //
8675 // calleeLateRep.emitRestore(jit, calleeGPR);
8676 // argumentsLateRep.emitRestore(jit, calleeGPR);
8677 //
8678 // Then we might end up with garbage if calleeLateRep.gpr() == argumentsGPR and
8679 // argumentsLateRep.gpr() == calleeGPR.
8680 //
8681 // We do a variety of things to prevent this from happening. For example, we use
8682 // argumentsLateRep before needing the other two and after we've already stopped
8683 // using the *GPRs. Also, we pin calleeGPR to regT0, and rely on the fact that
8684 // the *LateReps cannot use volatile registers (so they cannot be regT0, so
8685 // calleeGPR != argumentsLateRep.gpr() and calleeGPR != thisLateRep.gpr()).
8686 //
8687 // An alternative would have been to just use early uses and early-clobber all
8688 // volatile registers. But that would force callee, arguments, and this into
8689 // callee-save registers even if we have to spill them. We don't want spilling to
8690 // use up three callee-saves.
8691 //
8692 // TL;DR: The way we use LateReps here is dangerous and barely works but achieves
8693 // some desirable performance properties, so don't mistake the cleverness for
8694 // elegance.
8695 calleeLateRep = params[argIndex++];
8696 argumentsLateRep = params[argIndex++];
8697 thisLateRep = params[argIndex++];
8698 }
8699
8700 // Get some scratch registers.
8701 RegisterSet usedRegisters;
8702 usedRegisters.merge(RegisterSet::stackRegisters());
8703 usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
8704 usedRegisters.merge(RegisterSet::calleeSaveRegisters());
8705 usedRegisters.set(calleeGPR);
8706 if (argumentsGPR != InvalidGPRReg)
8707 usedRegisters.set(argumentsGPR);
8708 usedRegisters.set(thisGPR);
8709 if (calleeLateRep.isReg())
8710 usedRegisters.set(calleeLateRep.reg());
8711 if (argumentsLateRep.isReg())
8712 usedRegisters.set(argumentsLateRep.reg());
8713 if (thisLateRep.isReg())
8714 usedRegisters.set(thisLateRep.reg());
8715 ScratchRegisterAllocator allocator(usedRegisters);
8716 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
8717 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
8718 GPRReg scratchGPR3 = forwarding ? allocator.allocateScratchGPR() : InvalidGPRReg;
8719 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
8720
8721 auto callWithExceptionCheck = [&] (void* callee) {
8722 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
8723 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8724 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8725 };
8726
8727 unsigned originalStackHeight = params.proc().frameSize();
8728
8729 if (forwarding) {
8730 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
8731
8732 CCallHelpers::JumpList slowCase;
8733 InlineCallFrame* inlineCallFrame;
8734 if (node->child3())
8735 inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame();
8736 else
8737 inlineCallFrame = node->origin.semantic.inlineCallFrame();
8738
8739 // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
8740 emitSetupVarargsFrameFastCase(*vm, jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
8741
8742 CCallHelpers::Jump done = jit.jump();
8743 slowCase.link(&jit);
8744 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>();
8745 callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
8746 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
8747
8748 done.link(&jit);
8749 } else {
8750 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1);
8751 jit.setupArguments<decltype(operationSizeFrameForVarargs)>(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
8752 callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
8753
8754 jit.move(GPRInfo::returnValueGPR, scratchGPR1);
8755 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
8756 argumentsLateRep.emitRestore(jit, argumentsGPR);
8757 emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
8758 jit.addPtr(CCallHelpers::TrustedImm32(-minimumJSCallAreaSize), scratchGPR2, CCallHelpers::stackPointerRegister);
8759 jit.setupArguments<decltype(operationSetupVarargsFrame)>(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
8760 callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
8761
8762 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, CCallHelpers::stackPointerRegister);
8763
8764 calleeLateRep.emitRestore(jit, GPRInfo::regT0);
8765
8766 // This may not emit code if thisGPR got a callee-save. Also, we're guaranteed
8767 // that thisGPR != GPRInfo::regT0 because regT0 interferes with it.
8768 thisLateRep.emitRestore(jit, thisGPR);
8769 }
8770
8771 jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
8772 jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
8773
8774 CallLinkInfo::CallType callType;
8775 if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
8776 callType = CallLinkInfo::ConstructVarargs;
8777 else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
8778 callType = CallLinkInfo::TailCallVarargs;
8779 else
8780 callType = CallLinkInfo::CallVarargs;
8781
8782 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
8783
8784 CCallHelpers::DataLabelPtr targetToCheck;
8785 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8786 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8787 CCallHelpers::TrustedImmPtr(nullptr));
8788
8789 CCallHelpers::Call fastCall;
8790 CCallHelpers::Jump done;
8791
8792 if (isTailCall) {
8793 jit.emitRestoreCalleeSaves();
8794 jit.prepareForTailCallSlow();
8795 fastCall = jit.nearTailCall();
8796 } else {
8797 fastCall = jit.nearCall();
8798 done = jit.jump();
8799 }
8800
8801 slowPath.link(&jit);
8802
8803 if (isTailCall)
8804 jit.emitRestoreCalleeSaves();
8805 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8806 CCallHelpers::Call slowCall = jit.nearCall();
8807
8808 if (isTailCall)
8809 jit.abortWithReason(JITDidReturnFromTailCall);
8810 else
8811 done.link(&jit);
8812
8813 callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
8814
8815 jit.addPtr(
8816 CCallHelpers::TrustedImm32(-originalStackHeight),
8817 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8818
8819 jit.addLinkTask(
8820 [=] (LinkBuffer& linkBuffer) {
8821 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8822 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8823
8824 callLinkInfo->setCallLocations(
8825 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8826 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8827 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8828 });
8829 });
8830
8831 switch (node->op()) {
8832 case TailCallVarargs:
8833 case TailCallForwardVarargs:
8834 m_out.unreachable();
8835 break;
8836
8837 default:
8838 setJSValue(patchpoint);
8839 break;
8840 }
8841 }
8842
8843 void compileCallEval()
8844 {
8845 Node* node = m_node;
8846 unsigned numArgs = node->numChildren() - 1;
8847
8848 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
8849
8850 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
8851 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
8852
8853 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
8854
8855 Vector<ConstrainedValue> arguments;
8856 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
8857
8858 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
8859 intptr_t offsetFromSP =
8860 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
8861 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
8862 };
8863
8864 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
8865 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
8866 for (unsigned i = 0; i < numArgs; ++i)
8867 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
8868
8869 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8870 patchpoint->appendVector(arguments);
8871
8872 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
8873
8874 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8875 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8876 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8877 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
8878 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8879
8880 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8881 State* state = &m_ftlState;
8882 VM& vm = this->vm();
8883 patchpoint->setGenerator(
8884 [=, &vm] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8885 AllowMacroScratchRegisterUsage allowScratch(jit);
8886 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8887
8888 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8889
8890 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8891
8892 jit.store32(
8893 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8894 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8895
8896 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8897 callLinkInfo->setUpCall(CallLinkInfo::Call, node->origin.semantic, GPRInfo::regT0);
8898
8899 jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), CCallHelpers::stackPointerRegister, GPRInfo::regT1);
8900 jit.storePtr(GPRInfo::callFrameRegister, CCallHelpers::Address(GPRInfo::regT1, CallFrame::callerFrameOffset()));
8901
8902 // Now we need to make room for:
8903 // - The caller frame and PC for a call to operationCallEval.
8904 // - Potentially two arguments on the stack.
8905 unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2;
8906 requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
8907 jit.subPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
8908 jit.setupArguments<decltype(operationCallEval)>(GPRInfo::regT1);
8909 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR0);
8910 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8911 exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8912
8913 CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
8914
8915 jit.addPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
8916 jit.load64(CCallHelpers::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0);
8917 jit.emitDumbVirtualCall(vm, callLinkInfo);
8918
8919 done.link(&jit);
8920 jit.addPtr(
8921 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
8922 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8923 });
8924
8925 setJSValue(patchpoint);
8926 }
8927
8928 void compileLoadVarargs()
8929 {
8930 LoadVarargsData* data = m_node->loadVarargsData();
8931 LValue jsArguments = lowJSValue(m_node->child1());
8932
8933 LValue length = vmCall(
8934 Int32, m_out.operation(operationSizeOfVarargs), m_callFrame, jsArguments,
8935 m_out.constInt32(data->offset));
8936
8937 // FIXME: There is a chance that we will call an effectful length property twice. This is safe
8938 // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
8939 // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
8940 // past the sizing.
8941 // https://bugs.webkit.org/show_bug.cgi?id=141448
8942
8943 LValue lengthIncludingThis = m_out.add(length, m_out.int32One);
8944
8945 speculate(
8946 VarargsOverflow, noValue(), nullptr,
8947 m_out.above(length, lengthIncludingThis));
8948
8949 speculate(
8950 VarargsOverflow, noValue(), nullptr,
8951 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
8952
8953 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
8954
8955 // FIXME: This computation is rather silly. If operationLaodVarargs just took a pointer instead
8956 // of a VirtualRegister, we wouldn't have to do this.
8957 // https://bugs.webkit.org/show_bug.cgi?id=141660
8958 LValue machineStart = m_out.lShr(
8959 m_out.sub(addressFor(data->machineStart.offset()).value(), m_callFrame),
8960 m_out.constIntPtr(3));
8961
8962 vmCall(
8963 Void, m_out.operation(operationLoadVarargs), m_callFrame,
8964 m_out.castToInt32(machineStart), jsArguments, m_out.constInt32(data->offset),
8965 length, m_out.constInt32(data->mandatoryMinimum));
8966 }
8967
8968 void compileForwardVarargs()
8969 {
8970 if (m_node->child1()) {
8971 Node* arguments = m_node->child1().node();
8972 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
8973 compileForwardVarargsWithSpread();
8974 return;
8975 }
8976 }
8977
8978 LoadVarargsData* data = m_node->loadVarargsData();
8979 InlineCallFrame* inlineCallFrame;
8980 if (m_node->child1())
8981 inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
8982 else
8983 inlineCallFrame = m_node->origin.semantic.inlineCallFrame();
8984
8985 LValue length = nullptr;
8986 LValue lengthIncludingThis = nullptr;
8987 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
8988 if (argumentsLength.isKnown) {
8989 unsigned knownLength = argumentsLength.known;
8990 if (knownLength >= data->offset)
8991 knownLength = knownLength - data->offset;
8992 else
8993 knownLength = 0;
8994 length = m_out.constInt32(knownLength);
8995 lengthIncludingThis = m_out.constInt32(knownLength + 1);
8996 } else {
8997 // We need to perform the same logical operation as the code above, but through dynamic operations.
8998 if (!data->offset)
8999 length = argumentsLength.value;
9000 else {
9001 LBasicBlock isLarger = m_out.newBlock();
9002 LBasicBlock continuation = m_out.newBlock();
9003
9004 ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
9005 m_out.branch(
9006 m_out.above(argumentsLength.value, m_out.constInt32(data->offset)), unsure(isLarger), unsure(continuation));
9007 LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
9008 ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(data->offset)));
9009 m_out.jump(continuation);
9010
9011 m_out.appendTo(continuation, lastNext);
9012 length = m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
9013 }
9014 lengthIncludingThis = m_out.add(length, m_out.constInt32(1));
9015 }
9016
9017 speculate(
9018 VarargsOverflow, noValue(), nullptr,
9019 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
9020
9021 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
9022
9023 unsigned numberOfArgumentsToSkip = data->offset;
9024 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
9025 LValue targetStart = addressFor(data->machineStart).value();
9026
9027 LBasicBlock undefinedLoop = m_out.newBlock();
9028 LBasicBlock mainLoopEntry = m_out.newBlock();
9029 LBasicBlock mainLoop = m_out.newBlock();
9030 LBasicBlock continuation = m_out.newBlock();
9031
9032 LValue lengthAsPtr = m_out.zeroExtPtr(length);
9033 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
9034 ValueFromBlock loopBound = m_out.anchor(loopBoundValue);
9035 m_out.branch(
9036 m_out.above(loopBoundValue, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
9037
9038 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, mainLoopEntry);
9039 LValue previousIndex = m_out.phi(pointerType(), loopBound);
9040 LValue currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
9041 m_out.store64(
9042 m_out.constInt64(JSValue::encode(jsUndefined())),
9043 m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
9044 ValueFromBlock nextIndex = m_out.anchor(currentIndex);
9045 m_out.addIncomingToPhi(previousIndex, nextIndex);
9046 m_out.branch(
9047 m_out.above(currentIndex, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
9048
9049 m_out.appendTo(mainLoopEntry, mainLoop);
9050 loopBound = m_out.anchor(lengthAsPtr);
9051 m_out.branch(m_out.notNull(lengthAsPtr), unsure(mainLoop), unsure(continuation));
9052
9053 m_out.appendTo(mainLoop, continuation);
9054 previousIndex = m_out.phi(pointerType(), loopBound);
9055 currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
9056 LValue value = m_out.load64(
9057 m_out.baseIndex(m_heaps.variables, sourceStart, currentIndex));
9058 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
9059 nextIndex = m_out.anchor(currentIndex);
9060 m_out.addIncomingToPhi(previousIndex, nextIndex);
9061 m_out.branch(m_out.isNull(currentIndex), unsure(continuation), unsure(mainLoop));
9062
9063 m_out.appendTo(continuation, lastNext);
9064 }
9065
9066 LValue getSpreadLengthFromInlineCallFrame(InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip)
9067 {
9068 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
9069 if (argumentsLength.isKnown) {
9070 unsigned knownLength = argumentsLength.known;
9071 if (knownLength >= numberOfArgumentsToSkip)
9072 knownLength = knownLength - numberOfArgumentsToSkip;
9073 else
9074 knownLength = 0;
9075 return m_out.constInt32(knownLength);
9076 }
9077
9078
9079 // We need to perform the same logical operation as the code above, but through dynamic operations.
9080 if (!numberOfArgumentsToSkip)
9081 return argumentsLength.value;
9082
9083 LBasicBlock isLarger = m_out.newBlock();
9084 LBasicBlock continuation = m_out.newBlock();
9085
9086 ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
9087 m_out.branch(
9088 m_out.above(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)), unsure(isLarger), unsure(continuation));
9089 LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
9090 ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)));
9091 m_out.jump(continuation);
9092
9093 m_out.appendTo(continuation, lastNext);
9094 return m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
9095 }
9096
9097 void compileForwardVarargsWithSpread()
9098 {
9099 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
9100
9101 Node* arguments = m_node->child1().node();
9102 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread);
9103
9104 unsigned numberOfStaticArguments = 0;
9105 Vector<LValue, 2> spreadLengths;
9106
9107 auto collectArgumentCount = recursableLambda([&](auto self, Node* target) -> void {
9108 if (target->op() == PhantomSpread) {
9109 self(target->child1().node());
9110 return;
9111 }
9112
9113 if (target->op() == PhantomNewArrayWithSpread) {
9114 BitVector* bitVector = target->bitVector();
9115 for (unsigned i = 0; i < target->numChildren(); i++) {
9116 if (bitVector->get(i))
9117 self(m_graph.varArgChild(target, i).node());
9118 else
9119 ++numberOfStaticArguments;
9120 }
9121 return;
9122 }
9123
9124 if (target->op() == PhantomNewArrayBuffer) {
9125 numberOfStaticArguments += target->castOperand<JSImmutableButterfly*>()->length();
9126 return;
9127 }
9128
9129 ASSERT(target->op() == PhantomCreateRest);
9130 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
9131 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
9132 spreadLengths.append(cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
9133 return this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
9134 }).iterator->value);
9135 });
9136
9137 collectArgumentCount(arguments);
9138 LValue lengthIncludingThis = m_out.constInt32(1 + numberOfStaticArguments);
9139 for (LValue length : spreadLengths)
9140 lengthIncludingThis = m_out.add(lengthIncludingThis, length);
9141
9142 LoadVarargsData* data = m_node->loadVarargsData();
9143 speculate(
9144 VarargsOverflow, noValue(), nullptr,
9145 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
9146
9147 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
9148
9149 LValue targetStart = addressFor(data->machineStart).value();
9150
9151 auto forwardSpread = recursableLambda([this, &cachedSpreadLengths, &targetStart](auto self, Node* target, LValue storeIndex) -> LValue {
9152 if (target->op() == PhantomSpread)
9153 return self(target->child1().node(), storeIndex);
9154
9155 if (target->op() == PhantomNewArrayWithSpread) {
9156 BitVector* bitVector = target->bitVector();
9157 for (unsigned i = 0; i < target->numChildren(); i++) {
9158 if (bitVector->get(i))
9159 storeIndex = self(m_graph.varArgChild(target, i).node(), storeIndex);
9160 else {
9161 LValue value = this->lowJSValue(m_graph.varArgChild(target, i));
9162 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, storeIndex));
9163 storeIndex = m_out.add(m_out.constIntPtr(1), storeIndex);
9164 }
9165 }
9166 return storeIndex;
9167 }
9168
9169 if (target->op() == PhantomNewArrayBuffer) {
9170 auto* array = target->castOperand<JSImmutableButterfly*>();
9171 for (unsigned i = 0; i < array->length(); i++) {
9172 // Because forwarded values are drained as JSValue, we should not generate value
9173 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
9174 int64_t value = JSValue::encode(array->get(i));
9175 m_out.store64(m_out.constInt64(value), m_out.baseIndex(m_heaps.variables, targetStart, storeIndex, JSValue(), (Checked<int32_t>(sizeof(Register)) * i).unsafeGet()));
9176 }
9177 return m_out.add(m_out.constIntPtr(array->length()), storeIndex);
9178 }
9179
9180 RELEASE_ASSERT(target->op() == PhantomCreateRest);
9181 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
9182
9183 LValue sourceStart = this->getArgumentsStart(inlineCallFrame, target->numberOfArgumentsToSkip());
9184 LValue spreadLength = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
9185
9186 LBasicBlock loop = m_out.newBlock();
9187 LBasicBlock continuation = m_out.newBlock();
9188 ValueFromBlock startLoadIndex = m_out.anchor(m_out.constIntPtr(0));
9189 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
9190 ValueFromBlock startStoreIndexForEnd = m_out.anchor(storeIndex);
9191
9192 m_out.branch(m_out.isZero64(spreadLength), unsure(continuation), unsure(loop));
9193
9194 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
9195 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
9196 LValue loadIndex = m_out.phi(Int64, startLoadIndex);
9197 LValue value = m_out.load64(
9198 m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
9199 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
9200 LValue nextLoadIndex = m_out.add(m_out.constIntPtr(1), loadIndex);
9201 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
9202 LValue nextStoreIndex = m_out.add(m_out.constIntPtr(1), loopStoreIndex);
9203 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextStoreIndex));
9204 ValueFromBlock loopStoreIndexForEnd = m_out.anchor(nextStoreIndex);
9205 m_out.branch(m_out.below(nextLoadIndex, spreadLength), unsure(loop), unsure(continuation));
9206
9207 m_out.appendTo(continuation, lastNext);
9208 return m_out.phi(Int64, startStoreIndexForEnd, loopStoreIndexForEnd);
9209 });
9210
9211 LValue storeIndex = forwardSpread(arguments, m_out.constIntPtr(0));
9212
9213 LBasicBlock undefinedLoop = m_out.newBlock();
9214 LBasicBlock continuation = m_out.newBlock();
9215
9216 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
9217 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
9218 m_out.branch(m_out.below(storeIndex, loopBoundValue),
9219 unsure(undefinedLoop), unsure(continuation));
9220
9221 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, continuation);
9222 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
9223 m_out.store64(
9224 m_out.constInt64(JSValue::encode(jsUndefined())),
9225 m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
9226 LValue nextIndex = m_out.add(loopStoreIndex, m_out.constIntPtr(1));
9227 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextIndex));
9228 m_out.branch(
9229 m_out.below(nextIndex, loopBoundValue), unsure(undefinedLoop), unsure(continuation));
9230
9231 m_out.appendTo(continuation, lastNext);
9232 }
9233
9234 void compileJump()
9235 {
9236 m_out.jump(lowBlock(m_node->targetBlock()));
9237 }
9238
9239 void compileBranch()
9240 {
9241 m_out.branch(
9242 boolify(m_node->child1()),
9243 WeightedTarget(
9244 lowBlock(m_node->branchData()->taken.block),
9245 m_node->branchData()->taken.count),
9246 WeightedTarget(
9247 lowBlock(m_node->branchData()->notTaken.block),
9248 m_node->branchData()->notTaken.count));
9249 }
9250
9251 void compileSwitch()
9252 {
9253 SwitchData* data = m_node->switchData();
9254 switch (data->kind) {
9255 case SwitchImm: {
9256 Vector<ValueFromBlock, 2> intValues;
9257 LBasicBlock switchOnInts = m_out.newBlock();
9258
9259 LBasicBlock lastNext = m_out.appendTo(m_out.m_block, switchOnInts);
9260
9261 switch (m_node->child1().useKind()) {
9262 case Int32Use: {
9263 intValues.append(m_out.anchor(lowInt32(m_node->child1())));
9264 m_out.jump(switchOnInts);
9265 break;
9266 }
9267
9268 case UntypedUse: {
9269 LBasicBlock isInt = m_out.newBlock();
9270 LBasicBlock isNotInt = m_out.newBlock();
9271 LBasicBlock isDouble = m_out.newBlock();
9272
9273 LValue boxedValue = lowJSValue(m_node->child1());
9274 m_out.branch(isNotInt32(boxedValue), unsure(isNotInt), unsure(isInt));
9275
9276 LBasicBlock innerLastNext = m_out.appendTo(isInt, isNotInt);
9277
9278 intValues.append(m_out.anchor(unboxInt32(boxedValue)));
9279 m_out.jump(switchOnInts);
9280
9281 m_out.appendTo(isNotInt, isDouble);
9282 m_out.branch(
9283 isCellOrMisc(boxedValue, provenType(m_node->child1())),
9284 usually(lowBlock(data->fallThrough.block)), rarely(isDouble));
9285
9286 m_out.appendTo(isDouble, innerLastNext);
9287 LValue doubleValue = unboxDouble(boxedValue);
9288 LValue intInDouble = m_out.doubleToInt(doubleValue);
9289 intValues.append(m_out.anchor(intInDouble));
9290 m_out.branch(
9291 m_out.doubleEqual(m_out.intToDouble(intInDouble), doubleValue),
9292 unsure(switchOnInts), unsure(lowBlock(data->fallThrough.block)));
9293 break;
9294 }
9295
9296 default:
9297 DFG_CRASH(m_graph, m_node, "Bad use kind");
9298 break;
9299 }
9300
9301 m_out.appendTo(switchOnInts, lastNext);
9302 buildSwitch(data, Int32, m_out.phi(Int32, intValues));
9303 return;
9304 }
9305
9306 case SwitchChar: {
9307 LValue stringValue;
9308
9309 // FIXME: We should use something other than unsure() for the branch weight
9310 // of the fallThrough block. The main challenge is just that we have multiple
9311 // branches to fallThrough but a single count, so we would need to divvy it up
9312 // among the different lowered branches.
9313 // https://bugs.webkit.org/show_bug.cgi?id=129082
9314
9315 switch (m_node->child1().useKind()) {
9316 case StringUse: {
9317 stringValue = lowString(m_node->child1());
9318 break;
9319 }
9320
9321 case UntypedUse: {
9322 LValue unboxedValue = lowJSValue(m_node->child1());
9323
9324 LBasicBlock isCellCase = m_out.newBlock();
9325 LBasicBlock isStringCase = m_out.newBlock();
9326
9327 m_out.branch(
9328 isNotCell(unboxedValue, provenType(m_node->child1())),
9329 unsure(lowBlock(data->fallThrough.block)), unsure(isCellCase));
9330
9331 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9332 LValue cellValue = unboxedValue;
9333 m_out.branch(
9334 isNotString(cellValue, provenType(m_node->child1())),
9335 unsure(lowBlock(data->fallThrough.block)), unsure(isStringCase));
9336
9337 m_out.appendTo(isStringCase, lastNext);
9338 stringValue = cellValue;
9339 break;
9340 }
9341
9342 default:
9343 DFG_CRASH(m_graph, m_node, "Bad use kind");
9344 break;
9345 }
9346
9347 LBasicBlock lengthIs1 = m_out.newBlock();
9348 LBasicBlock needResolution = m_out.newBlock();
9349 LBasicBlock resolved = m_out.newBlock();
9350 LBasicBlock is8Bit = m_out.newBlock();
9351 LBasicBlock is16Bit = m_out.newBlock();
9352 LBasicBlock continuation = m_out.newBlock();
9353
9354 ValueFromBlock fastValue = m_out.anchor(m_out.loadPtr(stringValue, m_heaps.JSString_value));
9355 m_out.branch(
9356 isRopeString(stringValue, m_node->child1()),
9357 rarely(needResolution), usually(resolved));
9358
9359 LBasicBlock lastNext = m_out.appendTo(needResolution, resolved);
9360 ValueFromBlock slowValue = m_out.anchor(
9361 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, stringValue));
9362 m_out.jump(resolved);
9363
9364 m_out.appendTo(resolved, lengthIs1);
9365 LValue value = m_out.phi(pointerType(), fastValue, slowValue);
9366 m_out.branch(
9367 m_out.notEqual(
9368 m_out.load32NonNegative(value, m_heaps.StringImpl_length),
9369 m_out.int32One),
9370 unsure(lowBlock(data->fallThrough.block)), unsure(lengthIs1));
9371
9372 m_out.appendTo(lengthIs1, is8Bit);
9373 LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data);
9374 m_out.branch(
9375 m_out.testNonZero32(
9376 m_out.load32(value, m_heaps.StringImpl_hashAndFlags),
9377 m_out.constInt32(StringImpl::flagIs8Bit())),
9378 unsure(is8Bit), unsure(is16Bit));
9379
9380 Vector<ValueFromBlock, 2> characters;
9381 m_out.appendTo(is8Bit, is16Bit);
9382 characters.append(m_out.anchor(m_out.load8ZeroExt32(characterData, m_heaps.characters8[0])));
9383 m_out.jump(continuation);
9384
9385 m_out.appendTo(is16Bit, continuation);
9386 characters.append(m_out.anchor(m_out.load16ZeroExt32(characterData, m_heaps.characters16[0])));
9387 m_out.jump(continuation);
9388
9389 m_out.appendTo(continuation, lastNext);
9390 buildSwitch(data, Int32, m_out.phi(Int32, characters));
9391 return;
9392 }
9393
9394 case SwitchString: {
9395 switch (m_node->child1().useKind()) {
9396 case StringIdentUse: {
9397 LValue stringImpl = lowStringIdent(m_node->child1());
9398
9399 Vector<SwitchCase> cases;
9400 for (unsigned i = 0; i < data->cases.size(); ++i) {
9401 LValue value = m_out.constIntPtr(data->cases[i].value.stringImpl());
9402 LBasicBlock block = lowBlock(data->cases[i].target.block);
9403 Weight weight = Weight(data->cases[i].target.count);
9404 cases.append(SwitchCase(value, block, weight));
9405 }
9406
9407 m_out.switchInstruction(
9408 stringImpl, cases, lowBlock(data->fallThrough.block),
9409 Weight(data->fallThrough.count));
9410 return;
9411 }
9412
9413 case StringUse: {
9414 switchString(data, lowString(m_node->child1()), m_node->child1());
9415 return;
9416 }
9417
9418 case UntypedUse: {
9419 LValue value = lowJSValue(m_node->child1());
9420
9421 LBasicBlock isCellBlock = m_out.newBlock();
9422 LBasicBlock isStringBlock = m_out.newBlock();
9423
9424 m_out.branch(
9425 isCell(value, provenType(m_node->child1())),
9426 unsure(isCellBlock), unsure(lowBlock(data->fallThrough.block)));
9427
9428 LBasicBlock lastNext = m_out.appendTo(isCellBlock, isStringBlock);
9429
9430 m_out.branch(
9431 isString(value, provenType(m_node->child1())),
9432 unsure(isStringBlock), unsure(lowBlock(data->fallThrough.block)));
9433
9434 m_out.appendTo(isStringBlock, lastNext);
9435
9436 switchString(data, value, m_node->child1());
9437 return;
9438 }
9439
9440 default:
9441 DFG_CRASH(m_graph, m_node, "Bad use kind");
9442 return;
9443 }
9444 return;
9445 }
9446
9447 case SwitchCell: {
9448 LValue cell;
9449 switch (m_node->child1().useKind()) {
9450 case CellUse: {
9451 cell = lowCell(m_node->child1());
9452 break;
9453 }
9454
9455 case UntypedUse: {
9456 LValue value = lowJSValue(m_node->child1());
9457 LBasicBlock cellCase = m_out.newBlock();
9458 m_out.branch(
9459 isCell(value, provenType(m_node->child1())),
9460 unsure(cellCase), unsure(lowBlock(data->fallThrough.block)));
9461 m_out.appendTo(cellCase);
9462 cell = value;
9463 break;
9464 }
9465
9466 default:
9467 DFG_CRASH(m_graph, m_node, "Bad use kind");
9468 return;
9469 }
9470
9471 buildSwitch(m_node->switchData(), pointerType(), cell);
9472 return;
9473 } }
9474
9475 DFG_CRASH(m_graph, m_node, "Bad switch kind");
9476 }
9477
9478 void compileEntrySwitch()
9479 {
9480 Vector<LBasicBlock> successors;
9481 for (DFG::BasicBlock* successor : m_node->entrySwitchData()->cases)
9482 successors.append(lowBlock(successor));
9483 m_out.entrySwitch(successors);
9484 }
9485
9486 void compileReturn()
9487 {
9488 m_out.ret(lowJSValue(m_node->child1()));
9489 }
9490
9491 void compileForceOSRExit()
9492 {
9493 terminate(InadequateCoverage);
9494 }
9495
9496 void compileCPUIntrinsic()
9497 {
9498#if CPU(X86_64)
9499 Intrinsic intrinsic = m_node->intrinsic();
9500 switch (intrinsic) {
9501 case CPUMfenceIntrinsic:
9502 case CPUCpuidIntrinsic:
9503 case CPUPauseIntrinsic: {
9504 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9505 patchpoint->effects = Effects::forCall();
9506 if (intrinsic == CPUCpuidIntrinsic)
9507 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::ebx, X86Registers::ecx, X86Registers::edx });
9508
9509 patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
9510 switch (intrinsic) {
9511 case CPUMfenceIntrinsic:
9512 jit.mfence();
9513 break;
9514 case CPUCpuidIntrinsic:
9515 jit.cpuid();
9516 break;
9517 case CPUPauseIntrinsic:
9518 jit.pause();
9519 break;
9520 default:
9521 RELEASE_ASSERT_NOT_REACHED();
9522 }
9523 });
9524 setJSValue(m_out.constInt64(JSValue::encode(jsUndefined())));
9525 break;
9526 }
9527 case CPURdtscIntrinsic: {
9528 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9529 patchpoint->effects = Effects::forCall();
9530 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::edx });
9531 // The low 32-bits of rdtsc go into rax.
9532 patchpoint->resultConstraint = ValueRep::reg(X86Registers::eax);
9533 patchpoint->setGenerator( [=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
9534 jit.rdtsc();
9535 });
9536 setJSValue(boxInt32(patchpoint));
9537 break;
9538 }
9539 default:
9540 RELEASE_ASSERT_NOT_REACHED();
9541
9542 }
9543#endif
9544 }
9545
9546 void compileThrow()
9547 {
9548 LValue error = lowJSValue(m_node->child1());
9549 vmCall(Void, m_out.operation(operationThrowDFG), m_callFrame, error);
9550 // vmCall() does an exception check so we should never reach this.
9551 m_out.unreachable();
9552 }
9553
9554 void compileThrowStaticError()
9555 {
9556 LValue errorMessage = lowString(m_node->child1());
9557 LValue errorType = m_out.constInt32(m_node->errorType());
9558 vmCall(Void, m_out.operation(operationThrowStaticError), m_callFrame, errorMessage, errorType);
9559 // vmCall() does an exception check so we should never reach this.
9560 m_out.unreachable();
9561 }
9562
9563 void compileInvalidationPoint()
9564 {
9565 if (verboseCompilationEnabled())
9566 dataLog(" Invalidation point with availability: ", availabilityMap(), "\n");
9567
9568 DFG_ASSERT(m_graph, m_node, m_origin.exitOK);
9569
9570 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9571 OSRExitDescriptor* descriptor = appendOSRExitDescriptor(noValue(), nullptr);
9572 NodeOrigin origin = m_origin;
9573 patchpoint->appendColdAnys(buildExitArguments(descriptor, origin.forExit, noValue()));
9574
9575 State* state = &m_ftlState;
9576
9577 patchpoint->setGenerator(
9578 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
9579 // The MacroAssembler knows more about this than B3 does. The watchpointLabel() method
9580 // will ensure that this is followed by a nop shadow but only when this is actually
9581 // necessary.
9582 CCallHelpers::Label label = jit.watchpointLabel();
9583
9584 RefPtr<OSRExitHandle> handle = descriptor->emitOSRExitLater(
9585 *state, UncountableInvalidation, origin, params);
9586
9587 RefPtr<JITCode> jitCode = state->jitCode.get();
9588
9589 jit.addLinkTask(
9590 [=] (LinkBuffer& linkBuffer) {
9591 JumpReplacement jumpReplacement(
9592 linkBuffer.locationOf<JSInternalPtrTag>(label),
9593 linkBuffer.locationOf<OSRExitPtrTag>(handle->label));
9594 jitCode->common.jumpReplacements.append(jumpReplacement);
9595 });
9596 });
9597
9598 // Set some obvious things.
9599 patchpoint->effects.terminal = false;
9600 patchpoint->effects.writesLocalState = false;
9601 patchpoint->effects.readsLocalState = false;
9602
9603 // This is how we tell B3 about the possibility of jump replacement.
9604 patchpoint->effects.exitsSideways = true;
9605
9606 // It's not possible for some prior branch to determine the safety of this operation. It's always
9607 // fine to execute this on some path that wouldn't have originally executed it before
9608 // optimization.
9609 patchpoint->effects.controlDependent = false;
9610
9611 // If this falls through then it won't write anything.
9612 patchpoint->effects.writes = HeapRange();
9613
9614 // When this abruptly terminates, it could read any heap location.
9615 patchpoint->effects.reads = HeapRange::top();
9616 }
9617
9618 void compileIsEmpty()
9619 {
9620 setBoolean(m_out.isZero64(lowJSValue(m_node->child1())));
9621 }
9622
9623 void compileIsUndefined()
9624 {
9625 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined));
9626 }
9627
9628 void compileIsUndefinedOrNull()
9629 {
9630 setBoolean(isOther(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9631 }
9632
9633 void compileIsBoolean()
9634 {
9635 setBoolean(isBoolean(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9636 }
9637
9638 void compileIsNumber()
9639 {
9640 setBoolean(isNumber(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9641 }
9642
9643 void compileNumberIsInteger()
9644 {
9645 LBasicBlock notInt32 = m_out.newBlock();
9646 LBasicBlock doubleCase = m_out.newBlock();
9647 LBasicBlock doubleNotNanOrInf = m_out.newBlock();
9648 LBasicBlock continuation = m_out.newBlock();
9649
9650 LValue input = lowJSValue(m_node->child1());
9651
9652 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
9653 m_out.branch(
9654 isInt32(input, provenType(m_node->child1())), unsure(continuation), unsure(notInt32));
9655
9656 LBasicBlock lastNext = m_out.appendTo(notInt32, doubleCase);
9657 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
9658 m_out.branch(
9659 isNotNumber(input, provenType(m_node->child1())), unsure(continuation), unsure(doubleCase));
9660
9661 m_out.appendTo(doubleCase, doubleNotNanOrInf);
9662 LValue doubleAsInt;
9663 LValue asDouble = unboxDouble(input, &doubleAsInt);
9664 LValue expBits = m_out.bitAnd(m_out.lShr(doubleAsInt, m_out.constInt32(52)), m_out.constInt64(0x7ff));
9665 m_out.branch(
9666 m_out.equal(expBits, m_out.constInt64(0x7ff)),
9667 unsure(continuation), unsure(doubleNotNanOrInf));
9668
9669 m_out.appendTo(doubleNotNanOrInf, continuation);
9670 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9671 patchpoint->appendSomeRegister(asDouble);
9672 patchpoint->numFPScratchRegisters = 1;
9673 patchpoint->effects = Effects::none();
9674 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
9675 GPRReg result = params[0].gpr();
9676 FPRReg input = params[1].fpr();
9677 FPRReg temp = params.fpScratch(0);
9678 jit.roundTowardZeroDouble(input, temp);
9679 jit.compareDouble(MacroAssembler::DoubleEqual, input, temp, result);
9680 });
9681 ValueFromBlock patchpointResult = m_out.anchor(patchpoint);
9682 m_out.jump(continuation);
9683
9684 m_out.appendTo(continuation, lastNext);
9685 setBoolean(m_out.phi(Int32, trueResult, falseResult, patchpointResult));
9686 }
9687
9688 void compileIsCellWithType()
9689 {
9690 if (m_node->child1().useKind() == UntypedUse) {
9691 LValue value = lowJSValue(m_node->child1());
9692
9693 LBasicBlock isCellCase = m_out.newBlock();
9694 LBasicBlock continuation = m_out.newBlock();
9695
9696 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9697 m_out.branch(
9698 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
9699
9700 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
9701 ValueFromBlock cellResult = m_out.anchor(isCellWithType(value, m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
9702 m_out.jump(continuation);
9703
9704 m_out.appendTo(continuation, lastNext);
9705 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
9706 } else {
9707 ASSERT(m_node->child1().useKind() == CellUse);
9708 setBoolean(isCellWithType(lowCell(m_node->child1()), m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
9709 }
9710 }
9711
9712 void compileIsObject()
9713 {
9714 LValue value = lowJSValue(m_node->child1());
9715
9716 LBasicBlock isCellCase = m_out.newBlock();
9717 LBasicBlock continuation = m_out.newBlock();
9718
9719 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9720 m_out.branch(
9721 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
9722
9723 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
9724 ValueFromBlock cellResult = m_out.anchor(isObject(value, provenType(m_node->child1())));
9725 m_out.jump(continuation);
9726
9727 m_out.appendTo(continuation, lastNext);
9728 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
9729 }
9730
9731 LValue wangsInt64Hash(LValue input)
9732 {
9733 // key += ~(key << 32);
9734 LValue key = input;
9735 LValue temp = key;
9736 temp = m_out.shl(temp, m_out.constInt32(32));
9737 temp = m_out.bitNot(temp);
9738 key = m_out.add(key, temp);
9739 // key ^= (key >> 22);
9740 temp = key;
9741 temp = m_out.lShr(temp, m_out.constInt32(22));
9742 key = m_out.bitXor(key, temp);
9743 // key += ~(key << 13);
9744 temp = key;
9745 temp = m_out.shl(temp, m_out.constInt32(13));
9746 temp = m_out.bitNot(temp);
9747 key = m_out.add(key, temp);
9748 // key ^= (key >> 8);
9749 temp = key;
9750 temp = m_out.lShr(temp, m_out.constInt32(8));
9751 key = m_out.bitXor(key, temp);
9752 // key += (key << 3);
9753 temp = key;
9754 temp = m_out.shl(temp, m_out.constInt32(3));
9755 key = m_out.add(key, temp);
9756 // key ^= (key >> 15);
9757 temp = key;
9758 temp = m_out.lShr(temp, m_out.constInt32(15));
9759 key = m_out.bitXor(key, temp);
9760 // key += ~(key << 27);
9761 temp = key;
9762 temp = m_out.shl(temp, m_out.constInt32(27));
9763 temp = m_out.bitNot(temp);
9764 key = m_out.add(key, temp);
9765 // key ^= (key >> 31);
9766 temp = key;
9767 temp = m_out.lShr(temp, m_out.constInt32(31));
9768 key = m_out.bitXor(key, temp);
9769 key = m_out.castToInt32(key);
9770
9771 return key;
9772 }
9773
9774 LValue mapHashString(LValue string, Edge& edge)
9775 {
9776 LBasicBlock nonEmptyStringCase = m_out.newBlock();
9777 LBasicBlock slowCase = m_out.newBlock();
9778 LBasicBlock continuation = m_out.newBlock();
9779
9780 m_out.branch(isRopeString(string, edge), rarely(slowCase), usually(nonEmptyStringCase));
9781
9782 LBasicBlock lastNext = m_out.appendTo(nonEmptyStringCase, slowCase);
9783 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
9784 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
9785 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
9786 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
9787 unsure(slowCase), unsure(continuation));
9788
9789 m_out.appendTo(slowCase, continuation);
9790 ValueFromBlock slowResult = m_out.anchor(
9791 vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, string));
9792 m_out.jump(continuation);
9793
9794 m_out.appendTo(continuation, lastNext);
9795 return m_out.phi(Int32, slowResult, nonEmptyStringHashResult);
9796 }
9797
9798 void compileMapHash()
9799 {
9800 switch (m_node->child1().useKind()) {
9801 case BooleanUse:
9802 case Int32Use:
9803 case SymbolUse:
9804 case ObjectUse: {
9805 LValue key = lowJSValue(m_node->child1(), ManualOperandSpeculation);
9806 speculate(m_node->child1());
9807 setInt32(wangsInt64Hash(key));
9808 return;
9809 }
9810
9811 case CellUse: {
9812 LBasicBlock isString = m_out.newBlock();
9813 LBasicBlock notString = m_out.newBlock();
9814 LBasicBlock continuation = m_out.newBlock();
9815
9816 LValue value = lowCell(m_node->child1());
9817 LValue isStringValue = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
9818 m_out.branch(
9819 isStringValue, unsure(isString), unsure(notString));
9820
9821 LBasicBlock lastNext = m_out.appendTo(isString, notString);
9822 ValueFromBlock stringResult = m_out.anchor(mapHashString(value, m_node->child1()));
9823 m_out.jump(continuation);
9824
9825 m_out.appendTo(notString, continuation);
9826 ValueFromBlock notStringResult = m_out.anchor(wangsInt64Hash(value));
9827 m_out.jump(continuation);
9828
9829 m_out.appendTo(continuation, lastNext);
9830 setInt32(m_out.phi(Int32, stringResult, notStringResult));
9831 return;
9832 }
9833
9834 case StringUse: {
9835 LValue string = lowString(m_node->child1());
9836 setInt32(mapHashString(string, m_node->child1()));
9837 return;
9838 }
9839
9840 default:
9841 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse);
9842 break;
9843 }
9844
9845 LValue value = lowJSValue(m_node->child1());
9846
9847 LBasicBlock isCellCase = m_out.newBlock();
9848 LBasicBlock slowCase = m_out.newBlock();
9849 LBasicBlock straightHash = m_out.newBlock();
9850 LBasicBlock isStringCase = m_out.newBlock();
9851 LBasicBlock nonEmptyStringCase = m_out.newBlock();
9852 LBasicBlock continuation = m_out.newBlock();
9853
9854 m_out.branch(
9855 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(straightHash));
9856
9857 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9858 LValue isString = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
9859 m_out.branch(
9860 isString, unsure(isStringCase), unsure(straightHash));
9861
9862 m_out.appendTo(isStringCase, nonEmptyStringCase);
9863 m_out.branch(isRopeString(value, m_node->child1()), rarely(slowCase), usually(nonEmptyStringCase));
9864
9865 m_out.appendTo(nonEmptyStringCase, straightHash);
9866 LValue stringImpl = m_out.loadPtr(value, m_heaps.JSString_value);
9867 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
9868 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
9869 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
9870 unsure(slowCase), unsure(continuation));
9871
9872 m_out.appendTo(straightHash, slowCase);
9873 ValueFromBlock fastResult = m_out.anchor(wangsInt64Hash(value));
9874 m_out.jump(continuation);
9875
9876 m_out.appendTo(slowCase, continuation);
9877 ValueFromBlock slowResult = m_out.anchor(
9878 vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, value));
9879 m_out.jump(continuation);
9880
9881 m_out.appendTo(continuation, lastNext);
9882 setInt32(m_out.phi(Int32, fastResult, slowResult, nonEmptyStringHashResult));
9883 }
9884
9885 void compileNormalizeMapKey()
9886 {
9887 ASSERT(m_node->child1().useKind() == UntypedUse);
9888
9889 LBasicBlock isNumberCase = m_out.newBlock();
9890 LBasicBlock notInt32NumberCase = m_out.newBlock();
9891 LBasicBlock notNaNCase = m_out.newBlock();
9892 LBasicBlock convertibleCase = m_out.newBlock();
9893 LBasicBlock continuation = m_out.newBlock();
9894
9895 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isNumberCase);
9896
9897 LValue key = lowJSValue(m_node->child1());
9898 ValueFromBlock fastResult = m_out.anchor(key);
9899 m_out.branch(isNotNumber(key), unsure(continuation), unsure(isNumberCase));
9900
9901 m_out.appendTo(isNumberCase, notInt32NumberCase);
9902 m_out.branch(isInt32(key), unsure(continuation), unsure(notInt32NumberCase));
9903
9904 m_out.appendTo(notInt32NumberCase, notNaNCase);
9905 LValue doubleValue = unboxDouble(key);
9906 ValueFromBlock normalizedNaNResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsNaN())));
9907 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue), unsure(continuation), unsure(notNaNCase));
9908
9909 m_out.appendTo(notNaNCase, convertibleCase);
9910 LValue integerValue = m_out.doubleToInt(doubleValue);
9911 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
9912 ValueFromBlock doubleResult = m_out.anchor(key);
9913 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, integerValueConvertedToDouble), unsure(continuation), unsure(convertibleCase));
9914
9915 m_out.appendTo(convertibleCase, continuation);
9916 ValueFromBlock boxedIntResult = m_out.anchor(boxInt32(integerValue));
9917 m_out.jump(continuation);
9918
9919 m_out.appendTo(continuation, lastNext);
9920 setJSValue(m_out.phi(Int64, fastResult, normalizedNaNResult, doubleResult, boxedIntResult));
9921 }
9922
9923 void compileGetMapBucket()
9924 {
9925 LBasicBlock loopStart = m_out.newBlock();
9926 LBasicBlock loopAround = m_out.newBlock();
9927 LBasicBlock slowPath = m_out.newBlock();
9928 LBasicBlock notPresentInTable = m_out.newBlock();
9929 LBasicBlock notEmptyValue = m_out.newBlock();
9930 LBasicBlock notDeletedValue = m_out.newBlock();
9931 LBasicBlock continuation = m_out.newBlock();
9932
9933 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
9934
9935 LValue map;
9936 if (m_node->child1().useKind() == MapObjectUse)
9937 map = lowMapObject(m_node->child1());
9938 else if (m_node->child1().useKind() == SetObjectUse)
9939 map = lowSetObject(m_node->child1());
9940 else
9941 RELEASE_ASSERT_NOT_REACHED();
9942
9943 LValue key = lowJSValue(m_node->child2(), ManualOperandSpeculation);
9944 if (m_node->child2().useKind() != UntypedUse)
9945 speculate(m_node->child2());
9946
9947 LValue hash = lowInt32(m_node->child3());
9948
9949 LValue buffer = m_out.loadPtr(map, m_heaps.HashMapImpl_buffer);
9950 LValue mask = m_out.sub(m_out.load32(map, m_heaps.HashMapImpl_capacity), m_out.int32One);
9951
9952 ValueFromBlock indexStart = m_out.anchor(hash);
9953 m_out.jump(loopStart);
9954
9955 m_out.appendTo(loopStart, notEmptyValue);
9956 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
9957 LValue index = m_out.bitAnd(mask, unmaskedIndex);
9958 // FIXME: I think these buffers are caged?
9959 // https://bugs.webkit.org/show_bug.cgi?id=174925
9960 LValue hashMapBucket = m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), buffer, m_out.zeroExt(index, Int64), ScaleEight));
9961 ValueFromBlock bucketResult = m_out.anchor(hashMapBucket);
9962 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue()))),
9963 unsure(notPresentInTable), unsure(notEmptyValue));
9964
9965 m_out.appendTo(notEmptyValue, notDeletedValue);
9966 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue()))),
9967 unsure(loopAround), unsure(notDeletedValue));
9968
9969 m_out.appendTo(notDeletedValue, loopAround);
9970 LValue bucketKey = m_out.load64(hashMapBucket, m_heaps.HashMapBucket_key);
9971
9972 // Perform Object.is()
9973 switch (m_node->child2().useKind()) {
9974 case BooleanUse:
9975 case Int32Use:
9976 case SymbolUse:
9977 case ObjectUse: {
9978 m_out.branch(m_out.equal(key, bucketKey),
9979 unsure(continuation), unsure(loopAround));
9980 break;
9981 }
9982 case StringUse: {
9983 LBasicBlock notBitEqual = m_out.newBlock();
9984 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9985
9986 m_out.branch(m_out.equal(key, bucketKey),
9987 unsure(continuation), unsure(notBitEqual));
9988
9989 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9990 m_out.branch(isCell(bucketKey),
9991 unsure(bucketKeyIsCell), unsure(loopAround));
9992
9993 m_out.appendTo(bucketKeyIsCell, loopAround);
9994 m_out.branch(isString(bucketKey),
9995 unsure(slowPath), unsure(loopAround));
9996 break;
9997 }
9998 case CellUse: {
9999 LBasicBlock notBitEqual = m_out.newBlock();
10000 LBasicBlock bucketKeyIsCell = m_out.newBlock();
10001 LBasicBlock bucketKeyIsString = m_out.newBlock();
10002
10003 m_out.branch(m_out.equal(key, bucketKey),
10004 unsure(continuation), unsure(notBitEqual));
10005
10006 m_out.appendTo(notBitEqual, bucketKeyIsCell);
10007 m_out.branch(isCell(bucketKey),
10008 unsure(bucketKeyIsCell), unsure(loopAround));
10009
10010 m_out.appendTo(bucketKeyIsCell, bucketKeyIsString);
10011 m_out.branch(isString(bucketKey),
10012 unsure(bucketKeyIsString), unsure(loopAround));
10013
10014 m_out.appendTo(bucketKeyIsString, loopAround);
10015 m_out.branch(isString(key),
10016 unsure(slowPath), unsure(loopAround));
10017 break;
10018 }
10019 case UntypedUse: {
10020 LBasicBlock notBitEqual = m_out.newBlock();
10021 LBasicBlock bucketKeyIsCell = m_out.newBlock();
10022 LBasicBlock bothAreCells = m_out.newBlock();
10023 LBasicBlock bucketKeyIsString = m_out.newBlock();
10024
10025 m_out.branch(m_out.equal(key, bucketKey),
10026 unsure(continuation), unsure(notBitEqual));
10027
10028 m_out.appendTo(notBitEqual, bucketKeyIsCell);
10029 m_out.branch(isCell(bucketKey),
10030 unsure(bucketKeyIsCell), unsure(loopAround));
10031
10032 m_out.appendTo(bucketKeyIsCell, bothAreCells);
10033 m_out.branch(isCell(key),
10034 unsure(bothAreCells), unsure(loopAround));
10035
10036 m_out.appendTo(bothAreCells, bucketKeyIsString);
10037 m_out.branch(isString(bucketKey),
10038 unsure(bucketKeyIsString), unsure(loopAround));
10039
10040 m_out.appendTo(bucketKeyIsString, loopAround);
10041 m_out.branch(isString(key),
10042 unsure(slowPath), unsure(loopAround));
10043 break;
10044 }
10045 default:
10046 RELEASE_ASSERT_NOT_REACHED();
10047 }
10048
10049 m_out.appendTo(loopAround, slowPath);
10050 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
10051 m_out.jump(loopStart);
10052
10053 m_out.appendTo(slowPath, notPresentInTable);
10054 ValueFromBlock slowPathResult = m_out.anchor(vmCall(pointerType(),
10055 m_out.operation(m_node->child1().useKind() == MapObjectUse ? operationJSMapFindBucket : operationJSSetFindBucket), m_callFrame, map, key, hash));
10056 m_out.jump(continuation);
10057
10058 m_out.appendTo(notPresentInTable, continuation);
10059 ValueFromBlock notPresentResult;
10060 if (m_node->child1().useKind() == MapObjectUse)
10061 notPresentResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
10062 else if (m_node->child1().useKind() == SetObjectUse)
10063 notPresentResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
10064 else
10065 RELEASE_ASSERT_NOT_REACHED();
10066 m_out.jump(continuation);
10067
10068 m_out.appendTo(continuation, lastNext);
10069 setJSValue(m_out.phi(pointerType(), bucketResult, slowPathResult, notPresentResult));
10070 }
10071
10072 void compileGetMapBucketHead()
10073 {
10074 LValue map;
10075 if (m_node->child1().useKind() == MapObjectUse)
10076 map = lowMapObject(m_node->child1());
10077 else if (m_node->child1().useKind() == SetObjectUse)
10078 map = lowSetObject(m_node->child1());
10079 else
10080 RELEASE_ASSERT_NOT_REACHED();
10081
10082 ASSERT(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfHead() == HashMapImpl<HashMapBucket<HashMapBucketDataKeyValue>>::offsetOfHead());
10083 setJSValue(m_out.loadPtr(map, m_heaps.HashMapImpl_head));
10084 }
10085
10086 void compileGetMapBucketNext()
10087 {
10088 LBasicBlock loopStart = m_out.newBlock();
10089 LBasicBlock continuation = m_out.newBlock();
10090 LBasicBlock noBucket = m_out.newBlock();
10091 LBasicBlock hasBucket = m_out.newBlock();
10092 LBasicBlock nextBucket = m_out.newBlock();
10093
10094 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
10095
10096 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfNext() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfNext());
10097 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey());
10098 LValue mapBucketPrev = lowCell(m_node->child1());
10099 ValueFromBlock mapBucketStart = m_out.anchor(m_out.loadPtr(mapBucketPrev, m_heaps.HashMapBucket_next));
10100 m_out.jump(loopStart);
10101
10102 m_out.appendTo(loopStart, noBucket);
10103 LValue mapBucket = m_out.phi(pointerType(), mapBucketStart);
10104 m_out.branch(m_out.isNull(mapBucket), unsure(noBucket), unsure(hasBucket));
10105
10106 m_out.appendTo(noBucket, hasBucket);
10107 ValueFromBlock noBucketResult;
10108 if (m_node->bucketOwnerType() == BucketOwnerType::Map)
10109 noBucketResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
10110 else {
10111 ASSERT(m_node->bucketOwnerType() == BucketOwnerType::Set);
10112 noBucketResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
10113 }
10114 m_out.jump(continuation);
10115
10116 m_out.appendTo(hasBucket, nextBucket);
10117 ValueFromBlock bucketResult = m_out.anchor(mapBucket);
10118 m_out.branch(m_out.isZero64(m_out.load64(mapBucket, m_heaps.HashMapBucket_key)), unsure(nextBucket), unsure(continuation));
10119
10120 m_out.appendTo(nextBucket, continuation);
10121 m_out.addIncomingToPhi(mapBucket, m_out.anchor(m_out.loadPtr(mapBucket, m_heaps.HashMapBucket_next)));
10122 m_out.jump(loopStart);
10123
10124 m_out.appendTo(continuation, lastNext);
10125 setJSValue(m_out.phi(pointerType(), noBucketResult, bucketResult));
10126 }
10127
10128 void compileLoadValueFromMapBucket()
10129 {
10130 LValue mapBucket = lowCell(m_node->child1());
10131 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_value));
10132 }
10133
10134 void compileExtractValueFromWeakMapGet()
10135 {
10136 LValue value = lowJSValue(m_node->child1());
10137 setJSValue(m_out.select(m_out.isZero64(value),
10138 m_out.constInt64(JSValue::encode(jsUndefined())),
10139 value));
10140 }
10141
10142 void compileLoadKeyFromMapBucket()
10143 {
10144 LValue mapBucket = lowCell(m_node->child1());
10145 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_key));
10146 }
10147
10148 void compileSetAdd()
10149 {
10150 LValue set = lowSetObject(m_node->child1());
10151 LValue key = lowJSValue(m_node->child2());
10152 LValue hash = lowInt32(m_node->child3());
10153
10154 setJSValue(vmCall(pointerType(), m_out.operation(operationSetAdd), m_callFrame, set, key, hash));
10155 }
10156
10157 void compileMapSet()
10158 {
10159 LValue map = lowMapObject(m_graph.varArgChild(m_node, 0));
10160 LValue key = lowJSValue(m_graph.varArgChild(m_node, 1));
10161 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
10162 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
10163
10164 setJSValue(vmCall(pointerType(), m_out.operation(operationMapSet), m_callFrame, map, key, value, hash));
10165 }
10166
10167 void compileWeakMapGet()
10168 {
10169 LBasicBlock loopStart = m_out.newBlock();
10170 LBasicBlock loopAround = m_out.newBlock();
10171 LBasicBlock notEqualValue = m_out.newBlock();
10172 LBasicBlock continuation = m_out.newBlock();
10173
10174 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
10175
10176 LValue weakMap;
10177 if (m_node->child1().useKind() == WeakMapObjectUse)
10178 weakMap = lowWeakMapObject(m_node->child1());
10179 else if (m_node->child1().useKind() == WeakSetObjectUse)
10180 weakMap = lowWeakSetObject(m_node->child1());
10181 else
10182 RELEASE_ASSERT_NOT_REACHED();
10183 LValue key = lowObject(m_node->child2());
10184 LValue hash = lowInt32(m_node->child3());
10185
10186 LValue buffer = m_out.loadPtr(weakMap, m_heaps.WeakMapImpl_buffer);
10187 LValue mask = m_out.sub(m_out.load32(weakMap, m_heaps.WeakMapImpl_capacity), m_out.int32One);
10188
10189 ValueFromBlock indexStart = m_out.anchor(hash);
10190 m_out.jump(loopStart);
10191
10192 m_out.appendTo(loopStart, notEqualValue);
10193 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
10194 LValue index = m_out.bitAnd(mask, unmaskedIndex);
10195
10196 LValue bucket;
10197
10198 if (m_node->child1().useKind() == WeakMapObjectUse) {
10199 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)), "Should be a power of 2");
10200 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)))));
10201 } else {
10202 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)), "Should be a power of 2");
10203 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)))));
10204 }
10205
10206 LValue bucketKey = m_out.load64(bucket, m_heaps.WeakMapBucket_key);
10207 m_out.branch(m_out.equal(key, bucketKey), unsure(continuation), unsure(notEqualValue));
10208
10209 m_out.appendTo(notEqualValue, loopAround);
10210 m_out.branch(m_out.isNull(bucketKey), unsure(continuation), unsure(loopAround));
10211
10212 m_out.appendTo(loopAround, continuation);
10213 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
10214 m_out.jump(loopStart);
10215
10216 m_out.appendTo(continuation, lastNext);
10217 LValue result;
10218 if (m_node->child1().useKind() == WeakMapObjectUse)
10219 result = m_out.load64(bucket, m_heaps.WeakMapBucket_value);
10220 else
10221 result = bucketKey;
10222 setJSValue(result);
10223 }
10224
10225 void compileWeakSetAdd()
10226 {
10227 LValue set = lowWeakSetObject(m_node->child1());
10228 LValue key = lowObject(m_node->child2());
10229 LValue hash = lowInt32(m_node->child3());
10230
10231 vmCall(Void, m_out.operation(operationWeakSetAdd), m_callFrame, set, key, hash);
10232 }
10233
10234 void compileWeakMapSet()
10235 {
10236 LValue map = lowWeakMapObject(m_graph.varArgChild(m_node, 0));
10237 LValue key = lowObject(m_graph.varArgChild(m_node, 1));
10238 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
10239 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
10240
10241 vmCall(Void, m_out.operation(operationWeakMapSet), m_callFrame, map, key, value, hash);
10242 }
10243
10244 void compileIsObjectOrNull()
10245 {
10246 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
10247
10248 Edge child = m_node->child1();
10249 LValue value = lowJSValue(child);
10250
10251 LBasicBlock cellCase = m_out.newBlock();
10252 LBasicBlock notFunctionCase = m_out.newBlock();
10253 LBasicBlock objectCase = m_out.newBlock();
10254 LBasicBlock slowPath = m_out.newBlock();
10255 LBasicBlock notCellCase = m_out.newBlock();
10256 LBasicBlock continuation = m_out.newBlock();
10257
10258 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
10259
10260 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
10261 ValueFromBlock isFunctionResult = m_out.anchor(m_out.booleanFalse);
10262 m_out.branch(
10263 isFunction(value, provenType(child)),
10264 unsure(continuation), unsure(notFunctionCase));
10265
10266 m_out.appendTo(notFunctionCase, objectCase);
10267 ValueFromBlock notObjectResult = m_out.anchor(m_out.booleanFalse);
10268 m_out.branch(
10269 isObject(value, provenType(child)),
10270 unsure(objectCase), unsure(continuation));
10271
10272 m_out.appendTo(objectCase, slowPath);
10273 ValueFromBlock objectResult = m_out.anchor(m_out.booleanTrue);
10274 m_out.branch(
10275 isExoticForTypeof(value, provenType(child)),
10276 rarely(slowPath), usually(continuation));
10277
10278 m_out.appendTo(slowPath, notCellCase);
10279 VM& vm = this->vm();
10280 LValue slowResultValue = lazySlowPath(
10281 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
10282 return createLazyCallGenerator(vm,
10283 operationObjectIsObject, locations[0].directGPR(),
10284 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
10285 }, value);
10286 ValueFromBlock slowResult = m_out.anchor(m_out.notZero64(slowResultValue));
10287 m_out.jump(continuation);
10288
10289 m_out.appendTo(notCellCase, continuation);
10290 LValue notCellResultValue = m_out.equal(value, m_out.constInt64(JSValue::encode(jsNull())));
10291 ValueFromBlock notCellResult = m_out.anchor(notCellResultValue);
10292 m_out.jump(continuation);
10293
10294 m_out.appendTo(continuation, lastNext);
10295 LValue result = m_out.phi(
10296 Int32,
10297 isFunctionResult, notObjectResult, objectResult, slowResult, notCellResult);
10298 setBoolean(result);
10299 }
10300
10301 void compileIsFunction()
10302 {
10303 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
10304
10305 Edge child = m_node->child1();
10306 LValue value = lowJSValue(child);
10307
10308 LBasicBlock cellCase = m_out.newBlock();
10309 LBasicBlock notFunctionCase = m_out.newBlock();
10310 LBasicBlock slowPath = m_out.newBlock();
10311 LBasicBlock continuation = m_out.newBlock();
10312
10313 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
10314 m_out.branch(
10315 isCell(value, provenType(child)), unsure(cellCase), unsure(continuation));
10316
10317 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
10318 ValueFromBlock functionResult = m_out.anchor(m_out.booleanTrue);
10319 m_out.branch(
10320 isFunction(value, provenType(child)),
10321 unsure(continuation), unsure(notFunctionCase));
10322
10323 m_out.appendTo(notFunctionCase, slowPath);
10324 ValueFromBlock objectResult = m_out.anchor(m_out.booleanFalse);
10325 m_out.branch(
10326 isExoticForTypeof(value, provenType(child)),
10327 rarely(slowPath), usually(continuation));
10328
10329 m_out.appendTo(slowPath, continuation);
10330 VM& vm = this->vm();
10331 LValue slowResultValue = lazySlowPath(
10332 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
10333 return createLazyCallGenerator(vm,
10334 operationObjectIsFunction, locations[0].directGPR(),
10335 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
10336 }, value);
10337 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue));
10338 m_out.jump(continuation);
10339
10340 m_out.appendTo(continuation, lastNext);
10341 LValue result = m_out.phi(
10342 Int32, notCellResult, functionResult, objectResult, slowResult);
10343 setBoolean(result);
10344 }
10345
10346 void compileIsTypedArrayView()
10347 {
10348 LValue value = lowJSValue(m_node->child1());
10349
10350 LBasicBlock isCellCase = m_out.newBlock();
10351 LBasicBlock continuation = m_out.newBlock();
10352
10353 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
10354 m_out.branch(isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
10355
10356 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
10357 ValueFromBlock cellResult = m_out.anchor(isTypedArrayView(value, provenType(m_node->child1())));
10358 m_out.jump(continuation);
10359
10360 m_out.appendTo(continuation, lastNext);
10361 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
10362 }
10363
10364 void compileTypeOf()
10365 {
10366 Edge child = m_node->child1();
10367 LValue value = lowJSValue(child);
10368
10369 LBasicBlock continuation = m_out.newBlock();
10370 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
10371
10372 Vector<ValueFromBlock> results;
10373
10374 buildTypeOf(
10375 child, value,
10376 [&] (TypeofType type) {
10377 results.append(m_out.anchor(weakPointer(vm().smallStrings.typeString(type))));
10378 m_out.jump(continuation);
10379 });
10380
10381 m_out.appendTo(continuation, lastNext);
10382 setJSValue(m_out.phi(Int64, results));
10383 }
10384
10385 void compileInByVal()
10386 {
10387 setJSValue(vmCall(Int64, m_out.operation(operationInByVal), m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2())));
10388 }
10389
10390 void compileInById()
10391 {
10392 Node* node = m_node;
10393 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
10394 LValue base = lowCell(m_node->child1());
10395
10396 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10397 patchpoint->appendSomeRegister(base);
10398 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
10399 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
10400
10401 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10402
10403 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10404 preparePatchpointForExceptions(patchpoint);
10405
10406 State* state = &m_ftlState;
10407 patchpoint->setGenerator(
10408 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10409 AllowMacroScratchRegisterUsage allowScratch(jit);
10410
10411 CallSiteIndex callSiteIndex =
10412 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
10413
10414 // This is the direct exit target for operation calls.
10415 Box<CCallHelpers::JumpList> exceptions =
10416 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10417
10418 auto generator = Box<JITInByIdGenerator>::create(
10419 jit.codeBlock(), node->origin.semantic, callSiteIndex,
10420 params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
10421 JSValueRegs(params[0].gpr()));
10422
10423 generator->generateFastPath(jit);
10424 CCallHelpers::Label done = jit.label();
10425
10426 params.addLatePath(
10427 [=] (CCallHelpers& jit) {
10428 AllowMacroScratchRegisterUsage allowScratch(jit);
10429
10430 generator->slowPathJump().link(&jit);
10431 CCallHelpers::Label slowPathBegin = jit.label();
10432 CCallHelpers::Call slowPathCall = callOperation(
10433 *state, params.unavailableRegisters(), jit, node->origin.semantic,
10434 exceptions.get(), operationInByIdOptimize, params[0].gpr(),
10435 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
10436 CCallHelpers::TrustedImmPtr(uid)).call();
10437 jit.jump().linkTo(done, &jit);
10438
10439 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
10440
10441 jit.addLinkTask(
10442 [=] (LinkBuffer& linkBuffer) {
10443 generator->finalize(linkBuffer, linkBuffer);
10444 });
10445 });
10446 });
10447
10448 setJSValue(patchpoint);
10449 }
10450
10451 void compileHasOwnProperty()
10452 {
10453 LBasicBlock slowCase = m_out.newBlock();
10454 LBasicBlock continuation = m_out.newBlock();
10455 LBasicBlock lastNext = nullptr;
10456
10457 LValue object = lowObject(m_node->child1());
10458 LValue uniquedStringImpl;
10459 LValue keyAsValue = nullptr;
10460 switch (m_node->child2().useKind()) {
10461 case StringUse: {
10462 LBasicBlock isNonEmptyString = m_out.newBlock();
10463 LBasicBlock isAtomString = m_out.newBlock();
10464
10465 keyAsValue = lowString(m_node->child2());
10466 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
10467
10468 lastNext = m_out.appendTo(isNonEmptyString, isAtomString);
10469 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
10470 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtom()));
10471 m_out.branch(isNotAtomic, rarely(slowCase), usually(isAtomString));
10472
10473 m_out.appendTo(isAtomString, slowCase);
10474 break;
10475 }
10476 case SymbolUse: {
10477 keyAsValue = lowSymbol(m_node->child2());
10478 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl);
10479 lastNext = m_out.insertNewBlocksBefore(slowCase);
10480 break;
10481 }
10482 case UntypedUse: {
10483 LBasicBlock isCellCase = m_out.newBlock();
10484 LBasicBlock isStringCase = m_out.newBlock();
10485 LBasicBlock notStringCase = m_out.newBlock();
10486 LBasicBlock isNonEmptyString = m_out.newBlock();
10487 LBasicBlock isSymbolCase = m_out.newBlock();
10488 LBasicBlock hasUniquedStringImpl = m_out.newBlock();
10489
10490 keyAsValue = lowJSValue(m_node->child2());
10491 m_out.branch(isCell(keyAsValue), usually(isCellCase), rarely(slowCase));
10492
10493 lastNext = m_out.appendTo(isCellCase, isStringCase);
10494 m_out.branch(isString(keyAsValue), unsure(isStringCase), unsure(notStringCase));
10495
10496 m_out.appendTo(isStringCase, isNonEmptyString);
10497 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
10498
10499 m_out.appendTo(isNonEmptyString, notStringCase);
10500 LValue implFromString = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
10501 ValueFromBlock stringResult = m_out.anchor(implFromString);
10502 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(implFromString, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtom()));
10503 m_out.branch(isNotAtomic, rarely(slowCase), usually(hasUniquedStringImpl));
10504
10505 m_out.appendTo(notStringCase, isSymbolCase);
10506 m_out.branch(isSymbol(keyAsValue), unsure(isSymbolCase), unsure(slowCase));
10507
10508 m_out.appendTo(isSymbolCase, hasUniquedStringImpl);
10509 ValueFromBlock symbolResult = m_out.anchor(m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl));
10510 m_out.jump(hasUniquedStringImpl);
10511
10512 m_out.appendTo(hasUniquedStringImpl, slowCase);
10513 uniquedStringImpl = m_out.phi(pointerType(), stringResult, symbolResult);
10514 break;
10515 }
10516 default:
10517 RELEASE_ASSERT_NOT_REACHED();
10518 }
10519
10520 ASSERT(keyAsValue);
10521
10522 // Note that we don't test if the hash is zero here. AtomStringImpl's can't have a zero
10523 // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only
10524 // ever load the result from the cache if the cache entry matches what we are querying for.
10525 // So we either get super lucky and use zero for the hash and somehow collide with the entity
10526 // we're looking for, or we realize we're comparing against another entity, and go to the
10527 // slow path anyways.
10528 LValue hash = m_out.lShr(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
10529
10530 LValue structureID = m_out.load32(object, m_heaps.JSCell_structureID);
10531 LValue index = m_out.add(hash, structureID);
10532 index = m_out.zeroExtPtr(m_out.bitAnd(index, m_out.constInt32(HasOwnPropertyCache::mask)));
10533 ASSERT(vm().hasOwnPropertyCache());
10534 LValue cache = m_out.constIntPtr(vm().hasOwnPropertyCache());
10535
10536 IndexedAbstractHeap& heap = m_heaps.HasOwnPropertyCache;
10537 LValue sameStructureID = m_out.equal(structureID, m_out.load32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfStructureID())));
10538 LValue sameImpl = m_out.equal(uniquedStringImpl, m_out.loadPtr(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfImpl())));
10539 ValueFromBlock fastResult = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfResult())));
10540 LValue cacheHit = m_out.bitAnd(sameStructureID, sameImpl);
10541
10542 m_out.branch(m_out.notZero32(cacheHit), usually(continuation), rarely(slowCase));
10543
10544 m_out.appendTo(slowCase, continuation);
10545 ValueFromBlock slowResult;
10546 slowResult = m_out.anchor(vmCall(Int32, m_out.operation(operationHasOwnProperty), m_callFrame, object, keyAsValue));
10547 m_out.jump(continuation);
10548
10549 m_out.appendTo(continuation, lastNext);
10550 setBoolean(m_out.phi(Int32, fastResult, slowResult));
10551 }
10552
10553 void compileParseInt()
10554 {
10555 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse || m_node->child1().useKind() == StringUse);
10556 LValue result;
10557 if (m_node->child2()) {
10558 LValue radix = lowInt32(m_node->child2());
10559 if (m_node->child1().useKind() == UntypedUse)
10560 result = vmCall(Int64, m_out.operation(operationParseIntGeneric), m_callFrame, lowJSValue(m_node->child1()), radix);
10561 else
10562 result = vmCall(Int64, m_out.operation(operationParseIntString), m_callFrame, lowString(m_node->child1()), radix);
10563 } else {
10564 if (m_node->child1().useKind() == UntypedUse)
10565 result = vmCall(Int64, m_out.operation(operationParseIntNoRadixGeneric), m_callFrame, lowJSValue(m_node->child1()));
10566 else
10567 result = vmCall(Int64, m_out.operation(operationParseIntStringNoRadix), m_callFrame, lowString(m_node->child1()));
10568 }
10569 setJSValue(result);
10570 }
10571
10572 void compileOverridesHasInstance()
10573 {
10574 FrozenValue* defaultHasInstanceFunction = m_node->cellOperand();
10575 ASSERT(defaultHasInstanceFunction->cell()->inherits<JSFunction>(vm()));
10576
10577 LValue constructor = lowCell(m_node->child1());
10578 LValue hasInstance = lowJSValue(m_node->child2());
10579
10580 LBasicBlock defaultHasInstance = m_out.newBlock();
10581 LBasicBlock continuation = m_out.newBlock();
10582
10583 // Unlike in the DFG, we don't worry about cleaning this code up for the case where we have proven the hasInstanceValue is a constant as B3 should fix it for us.
10584
10585 ValueFromBlock notDefaultHasInstanceResult = m_out.anchor(m_out.booleanTrue);
10586 m_out.branch(m_out.notEqual(hasInstance, frozenPointer(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance));
10587
10588 LBasicBlock lastNext = m_out.appendTo(defaultHasInstance, continuation);
10589 ValueFromBlock implementsDefaultHasInstanceResult = m_out.anchor(m_out.testIsZero32(
10590 m_out.load8ZeroExt32(constructor, m_heaps.JSCell_typeInfoFlags),
10591 m_out.constInt32(ImplementsDefaultHasInstance)));
10592 m_out.jump(continuation);
10593
10594 m_out.appendTo(continuation, lastNext);
10595 setBoolean(m_out.phi(Int32, implementsDefaultHasInstanceResult, notDefaultHasInstanceResult));
10596 }
10597
10598 void compileCheckTypeInfoFlags()
10599 {
10600 speculate(
10601 BadTypeInfoFlags, noValue(), 0,
10602 m_out.testIsZero32(
10603 m_out.load8ZeroExt32(lowCell(m_node->child1()), m_heaps.JSCell_typeInfoFlags),
10604 m_out.constInt32(m_node->typeInfoOperand())));
10605 }
10606
10607 void compileInstanceOf()
10608 {
10609 Node* node = m_node;
10610 State* state = &m_ftlState;
10611
10612 LValue value;
10613 LValue prototype;
10614 bool valueIsCell;
10615 bool prototypeIsCell;
10616 if (m_node->child1().useKind() == CellUse
10617 && m_node->child2().useKind() == CellUse) {
10618 value = lowCell(m_node->child1());
10619 prototype = lowCell(m_node->child2());
10620
10621 valueIsCell = true;
10622 prototypeIsCell = true;
10623 } else {
10624 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
10625 DFG_ASSERT(m_graph, m_node, m_node->child2().useKind() == UntypedUse);
10626
10627 value = lowJSValue(m_node->child1());
10628 prototype = lowJSValue(m_node->child2());
10629
10630 valueIsCell = abstractValue(m_node->child1()).isType(SpecCell);
10631 prototypeIsCell = abstractValue(m_node->child2()).isType(SpecCell);
10632 }
10633
10634 bool prototypeIsObject = abstractValue(m_node->child2()).isType(SpecObject | ~SpecCell);
10635
10636 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10637 patchpoint->appendSomeRegister(value);
10638 patchpoint->appendSomeRegister(prototype);
10639 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
10640 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
10641 patchpoint->numGPScratchRegisters = 2;
10642 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
10643 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10644
10645 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10646 preparePatchpointForExceptions(patchpoint);
10647
10648 patchpoint->setGenerator(
10649 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10650 AllowMacroScratchRegisterUsage allowScratch(jit);
10651
10652 GPRReg resultGPR = params[0].gpr();
10653 GPRReg valueGPR = params[1].gpr();
10654 GPRReg prototypeGPR = params[2].gpr();
10655 GPRReg scratchGPR = params.gpScratch(0);
10656 GPRReg scratch2GPR = params.gpScratch(1);
10657
10658 CCallHelpers::Jump doneJump;
10659 if (!valueIsCell) {
10660 CCallHelpers::Jump isCell = jit.branchIfCell(valueGPR);
10661 jit.boxBooleanPayload(false, resultGPR);
10662 doneJump = jit.jump();
10663 isCell.link(&jit);
10664 }
10665
10666 CCallHelpers::JumpList slowCases;
10667 if (!prototypeIsCell)
10668 slowCases.append(jit.branchIfNotCell(prototypeGPR));
10669
10670 CallSiteIndex callSiteIndex =
10671 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
10672
10673 // This is the direct exit target for operation calls.
10674 Box<CCallHelpers::JumpList> exceptions =
10675 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10676
10677 auto generator = Box<JITInstanceOfGenerator>::create(
10678 jit.codeBlock(), node->origin.semantic, callSiteIndex,
10679 params.unavailableRegisters(), resultGPR, valueGPR, prototypeGPR, scratchGPR,
10680 scratch2GPR, prototypeIsObject);
10681 generator->generateFastPath(jit);
10682 CCallHelpers::Label done = jit.label();
10683
10684 params.addLatePath(
10685 [=] (CCallHelpers& jit) {
10686 AllowMacroScratchRegisterUsage allowScratch(jit);
10687
10688 J_JITOperation_ESsiJJ optimizationFunction = operationInstanceOfOptimize;
10689
10690 slowCases.link(&jit);
10691 CCallHelpers::Label slowPathBegin = jit.label();
10692 CCallHelpers::Call slowPathCall = callOperation(
10693 *state, params.unavailableRegisters(), jit, node->origin.semantic,
10694 exceptions.get(), optimizationFunction, resultGPR,
10695 CCallHelpers::TrustedImmPtr(generator->stubInfo()), valueGPR,
10696 prototypeGPR).call();
10697 jit.jump().linkTo(done, &jit);
10698
10699 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
10700
10701 jit.addLinkTask(
10702 [=] (LinkBuffer& linkBuffer) {
10703 generator->finalize(linkBuffer, linkBuffer);
10704 });
10705 });
10706
10707 if (doneJump.isSet())
10708 doneJump.link(&jit);
10709 });
10710
10711 // This returns a boxed boolean.
10712 setJSValue(patchpoint);
10713 }
10714
10715 void compileInstanceOfCustom()
10716 {
10717 LValue value = lowJSValue(m_node->child1());
10718 LValue constructor = lowCell(m_node->child2());
10719 LValue hasInstance = lowJSValue(m_node->child3());
10720
10721 setBoolean(m_out.logicalNot(m_out.equal(m_out.constInt32(0), vmCall(Int32, m_out.operation(operationInstanceOfCustom), m_callFrame, value, constructor, hasInstance))));
10722 }
10723
10724 void compileCountExecution()
10725 {
10726 TypedPointer counter = m_out.absolute(m_node->executionCounter()->address());
10727 m_out.store64(m_out.add(m_out.load64(counter), m_out.constInt64(1)), counter);
10728 }
10729
10730 void compileSuperSamplerBegin()
10731 {
10732 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
10733 m_out.store32(m_out.add(m_out.load32(counter), m_out.constInt32(1)), counter);
10734 }
10735
10736 void compileSuperSamplerEnd()
10737 {
10738 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
10739 m_out.store32(m_out.sub(m_out.load32(counter), m_out.constInt32(1)), counter);
10740 }
10741
10742 void compileStoreBarrier()
10743 {
10744 emitStoreBarrier(lowCell(m_node->child1()), m_node->op() == FencedStoreBarrier);
10745 }
10746
10747 void compileHasIndexedProperty()
10748 {
10749 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
10750 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
10751
10752 switch (m_node->arrayMode().type()) {
10753 case Array::Int32:
10754 case Array::Contiguous: {
10755 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10756 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10757
10758 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
10759 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
10760
10761 LBasicBlock slowCase = m_out.newBlock();
10762 LBasicBlock continuation = m_out.newBlock();
10763 LBasicBlock lastNext = nullptr;
10764
10765 if (!m_node->arrayMode().isInBounds()) {
10766 LBasicBlock checkHole = m_out.newBlock();
10767 m_out.branch(
10768 m_out.aboveOrEqual(
10769 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
10770 rarely(slowCase), usually(checkHole));
10771 lastNext = m_out.appendTo(checkHole, slowCase);
10772 } else
10773 lastNext = m_out.insertNewBlocksBefore(slowCase);
10774
10775 LValue checkHoleResultValue =
10776 m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1))));
10777 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10778 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10779
10780 m_out.appendTo(slowCase, continuation);
10781 ValueFromBlock slowResult = m_out.anchor(
10782 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10783 m_out.jump(continuation);
10784
10785 m_out.appendTo(continuation, lastNext);
10786 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10787 return;
10788 }
10789 case Array::Double: {
10790 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10791 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10792
10793 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
10794
10795 LBasicBlock slowCase = m_out.newBlock();
10796 LBasicBlock continuation = m_out.newBlock();
10797 LBasicBlock lastNext = nullptr;
10798
10799 if (!m_node->arrayMode().isInBounds()) {
10800 LBasicBlock checkHole = m_out.newBlock();
10801 m_out.branch(
10802 m_out.aboveOrEqual(
10803 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
10804 rarely(slowCase), usually(checkHole));
10805 lastNext = m_out.appendTo(checkHole, slowCase);
10806 } else
10807 lastNext = m_out.insertNewBlocksBefore(slowCase);
10808
10809 LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
10810 LValue checkHoleResultValue = m_out.doubleEqual(doubleValue, doubleValue);
10811 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10812 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10813
10814 m_out.appendTo(slowCase, continuation);
10815 ValueFromBlock slowResult = m_out.anchor(
10816 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10817 m_out.jump(continuation);
10818
10819 m_out.appendTo(continuation, lastNext);
10820 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10821 return;
10822 }
10823
10824 case Array::ArrayStorage: {
10825 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10826 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10827
10828 LBasicBlock slowCase = m_out.newBlock();
10829 LBasicBlock continuation = m_out.newBlock();
10830 LBasicBlock lastNext = nullptr;
10831
10832 if (!m_node->arrayMode().isInBounds()) {
10833 LBasicBlock checkHole = m_out.newBlock();
10834 m_out.branch(
10835 m_out.aboveOrEqual(
10836 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
10837 rarely(slowCase), usually(checkHole));
10838 lastNext = m_out.appendTo(checkHole, slowCase);
10839 } else
10840 lastNext = m_out.insertNewBlocksBefore(slowCase);
10841
10842 LValue checkHoleResultValue =
10843 m_out.notZero64(m_out.load64(baseIndex(m_heaps.ArrayStorage_vector, storage, index, m_graph.varArgChild(m_node, 1))));
10844 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10845 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10846
10847 m_out.appendTo(slowCase, continuation);
10848 ValueFromBlock slowResult = m_out.anchor(
10849 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10850 m_out.jump(continuation);
10851
10852 m_out.appendTo(continuation, lastNext);
10853 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10854 break;
10855 }
10856
10857 default: {
10858 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10859 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10860 break;
10861 }
10862 }
10863 }
10864
10865 void compileHasGenericProperty()
10866 {
10867 LValue base = lowJSValue(m_node->child1());
10868 LValue property = lowCell(m_node->child2());
10869 setJSValue(vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property));
10870 }
10871
10872 void compileHasStructureProperty()
10873 {
10874 LValue base = lowJSValue(m_node->child1());
10875 LValue property = lowString(m_node->child2());
10876 LValue enumerator = lowCell(m_node->child3());
10877
10878 LBasicBlock correctStructure = m_out.newBlock();
10879 LBasicBlock wrongStructure = m_out.newBlock();
10880 LBasicBlock continuation = m_out.newBlock();
10881
10882 m_out.branch(m_out.notEqual(
10883 m_out.load32(base, m_heaps.JSCell_structureID),
10884 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
10885 rarely(wrongStructure), usually(correctStructure));
10886
10887 LBasicBlock lastNext = m_out.appendTo(correctStructure, wrongStructure);
10888 ValueFromBlock correctStructureResult = m_out.anchor(m_out.booleanTrue);
10889 m_out.jump(continuation);
10890
10891 m_out.appendTo(wrongStructure, continuation);
10892 ValueFromBlock wrongStructureResult = m_out.anchor(
10893 m_out.equal(
10894 m_out.constInt64(JSValue::encode(jsBoolean(true))),
10895 vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property)));
10896 m_out.jump(continuation);
10897
10898 m_out.appendTo(continuation, lastNext);
10899 setBoolean(m_out.phi(Int32, correctStructureResult, wrongStructureResult));
10900 }
10901
10902 void compileGetDirectPname()
10903 {
10904 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
10905 LValue property = lowCell(m_graph.varArgChild(m_node, 1));
10906 LValue index = lowInt32(m_graph.varArgChild(m_node, 2));
10907 LValue enumerator = lowCell(m_graph.varArgChild(m_node, 3));
10908
10909 LBasicBlock checkOffset = m_out.newBlock();
10910 LBasicBlock inlineLoad = m_out.newBlock();
10911 LBasicBlock outOfLineLoad = m_out.newBlock();
10912 LBasicBlock slowCase = m_out.newBlock();
10913 LBasicBlock continuation = m_out.newBlock();
10914
10915 m_out.branch(m_out.notEqual(
10916 m_out.load32(base, m_heaps.JSCell_structureID),
10917 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
10918 rarely(slowCase), usually(checkOffset));
10919
10920 LBasicBlock lastNext = m_out.appendTo(checkOffset, inlineLoad);
10921 m_out.branch(m_out.aboveOrEqual(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)),
10922 unsure(outOfLineLoad), unsure(inlineLoad));
10923
10924 m_out.appendTo(inlineLoad, outOfLineLoad);
10925 ValueFromBlock inlineResult = m_out.anchor(
10926 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(),
10927 base, m_out.zeroExt(index, Int64), ScaleEight, JSObject::offsetOfInlineStorage())));
10928 m_out.jump(continuation);
10929
10930 m_out.appendTo(outOfLineLoad, slowCase);
10931 LValue storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
10932 LValue realIndex = m_out.signExt32To64(
10933 m_out.neg(m_out.sub(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity))));
10934 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
10935 ValueFromBlock outOfLineResult = m_out.anchor(
10936 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty)));
10937 m_out.jump(continuation);
10938
10939 m_out.appendTo(slowCase, continuation);
10940 ValueFromBlock slowCaseResult = m_out.anchor(
10941 vmCall(Int64, m_out.operation(operationGetByVal), m_callFrame, base, property));
10942 m_out.jump(continuation);
10943
10944 m_out.appendTo(continuation, lastNext);
10945 setJSValue(m_out.phi(Int64, inlineResult, outOfLineResult, slowCaseResult));
10946 }
10947
10948 void compileGetEnumerableLength()
10949 {
10950 LValue enumerator = lowCell(m_node->child1());
10951 setInt32(m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_indexLength));
10952 }
10953
10954 void compileGetPropertyEnumerator()
10955 {
10956 if (m_node->child1().useKind() == CellUse)
10957 setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumeratorCell), m_callFrame, lowCell(m_node->child1())));
10958 else
10959 setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumerator), m_callFrame, lowJSValue(m_node->child1())));
10960 }
10961
10962 void compileGetEnumeratorStructurePname()
10963 {
10964 LValue enumerator = lowCell(m_node->child1());
10965 LValue index = lowInt32(m_node->child2());
10966
10967 LBasicBlock inBounds = m_out.newBlock();
10968 LBasicBlock outOfBounds = m_out.newBlock();
10969 LBasicBlock continuation = m_out.newBlock();
10970
10971 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endStructurePropertyIndex)),
10972 usually(inBounds), rarely(outOfBounds));
10973
10974 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
10975 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
10976 ValueFromBlock inBoundsResult = m_out.anchor(
10977 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
10978 m_out.jump(continuation);
10979
10980 m_out.appendTo(outOfBounds, continuation);
10981 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
10982 m_out.jump(continuation);
10983
10984 m_out.appendTo(continuation, lastNext);
10985 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
10986 }
10987
10988 void compileGetEnumeratorGenericPname()
10989 {
10990 LValue enumerator = lowCell(m_node->child1());
10991 LValue index = lowInt32(m_node->child2());
10992
10993 LBasicBlock inBounds = m_out.newBlock();
10994 LBasicBlock outOfBounds = m_out.newBlock();
10995 LBasicBlock continuation = m_out.newBlock();
10996
10997 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endGenericPropertyIndex)),
10998 usually(inBounds), rarely(outOfBounds));
10999
11000 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
11001 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
11002 ValueFromBlock inBoundsResult = m_out.anchor(
11003 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
11004 m_out.jump(continuation);
11005
11006 m_out.appendTo(outOfBounds, continuation);
11007 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
11008 m_out.jump(continuation);
11009
11010 m_out.appendTo(continuation, lastNext);
11011 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
11012 }
11013
11014 void compileToIndexString()
11015 {
11016 LValue index = lowInt32(m_node->child1());
11017 setJSValue(vmCall(Int64, m_out.operation(operationToIndexString), m_callFrame, index));
11018 }
11019
11020 void compileCheckStructureImmediate()
11021 {
11022 LValue structure = lowCell(m_node->child1());
11023 checkStructure(
11024 structure, noValue(), BadCache, m_node->structureSet(),
11025 [this] (RegisteredStructure structure) {
11026 return weakStructure(structure);
11027 });
11028 }
11029
11030 void compileMaterializeNewObject()
11031 {
11032 ObjectMaterializationData& data = m_node->objectMaterializationData();
11033
11034 // Lower the values first, to avoid creating values inside a control flow diamond.
11035
11036 Vector<LValue, 8> values;
11037 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11038 Edge edge = m_graph.varArgChild(m_node, 1 + i);
11039 switch (data.m_properties[i].kind()) {
11040 case PublicLengthPLoc:
11041 case VectorLengthPLoc:
11042 values.append(lowInt32(edge));
11043 break;
11044 default:
11045 values.append(lowJSValue(edge));
11046 break;
11047 }
11048 }
11049
11050 RegisteredStructureSet set = m_node->structureSet();
11051
11052 Vector<LBasicBlock, 1> blocks(set.size());
11053 for (unsigned i = set.size(); i--;)
11054 blocks[i] = m_out.newBlock();
11055 LBasicBlock dummyDefault = m_out.newBlock();
11056 LBasicBlock outerContinuation = m_out.newBlock();
11057
11058 Vector<SwitchCase, 1> cases(set.size());
11059 for (unsigned i = set.size(); i--;)
11060 cases[i] = SwitchCase(weakStructure(set.at(i)), blocks[i], Weight(1));
11061 m_out.switchInstruction(
11062 lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0));
11063
11064 LBasicBlock outerLastNext = m_out.m_nextBlock;
11065
11066 Vector<ValueFromBlock, 1> results;
11067
11068 for (unsigned i = set.size(); i--;) {
11069 m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault);
11070
11071 RegisteredStructure structure = set.at(i);
11072
11073 LValue object;
11074 LValue butterfly;
11075
11076 if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
11077 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
11078 Allocator cellAllocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
11079
11080 bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
11081 unsigned indexingHeaderSize = 0;
11082 LValue indexingPayloadSizeInBytes = m_out.intPtrZero;
11083 LValue vectorLength = m_out.int32Zero;
11084 LValue publicLength = m_out.int32Zero;
11085 if (hasIndexingHeader) {
11086 indexingHeaderSize = sizeof(IndexingHeader);
11087 for (unsigned i = data.m_properties.size(); i--;) {
11088 PromotedLocationDescriptor descriptor = data.m_properties[i];
11089 switch (descriptor.kind()) {
11090 case PublicLengthPLoc:
11091 publicLength = values[i];
11092 break;
11093 case VectorLengthPLoc:
11094 vectorLength = values[i];
11095 break;
11096 default:
11097 break;
11098 }
11099 }
11100 indexingPayloadSizeInBytes =
11101 m_out.mul(m_out.zeroExtPtr(vectorLength), m_out.intPtrEight);
11102 }
11103
11104 LValue butterflySize = m_out.add(
11105 m_out.constIntPtr(
11106 structure->outOfLineCapacity() * sizeof(JSValue) + indexingHeaderSize),
11107 indexingPayloadSizeInBytes);
11108
11109 LBasicBlock slowPath = m_out.newBlock();
11110 LBasicBlock continuation = m_out.newBlock();
11111
11112 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11113
11114 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
11115
11116 LValue startOfStorage = allocateHeapCell(
11117 allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, slowPath),
11118 slowPath);
11119
11120 LValue fastButterflyValue = m_out.add(
11121 startOfStorage,
11122 m_out.constIntPtr(
11123 structure->outOfLineCapacity() * sizeof(JSValue) + sizeof(IndexingHeader)));
11124
11125 ValueFromBlock haveButterfly = m_out.anchor(fastButterflyValue);
11126
11127 splatWords(
11128 fastButterflyValue,
11129 m_out.constInt32(-structure->outOfLineCapacity() - 1),
11130 m_out.constInt32(-1),
11131 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11132
11133 m_out.store32(vectorLength, fastButterflyValue, m_heaps.Butterfly_vectorLength);
11134
11135 LValue fastObjectValue = allocateObject(
11136 m_out.constIntPtr(cellAllocator.localAllocator()), structure, fastButterflyValue,
11137 slowPath);
11138
11139 ValueFromBlock fastObject = m_out.anchor(fastObjectValue);
11140 ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue);
11141 m_out.jump(continuation);
11142
11143 m_out.appendTo(slowPath, continuation);
11144
11145 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
11146
11147 VM& vm = this->vm();
11148 LValue slowObjectValue;
11149 if (hasIndexingHeader) {
11150 slowObjectValue = lazySlowPath(
11151 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11152 return createLazyCallGenerator(vm,
11153 operationNewObjectWithButterflyWithIndexingHeaderAndVectorLength,
11154 locations[0].directGPR(), CCallHelpers::TrustedImmPtr(structure.get()),
11155 locations[1].directGPR(), locations[2].directGPR());
11156 },
11157 vectorLength, butterflyValue);
11158 } else {
11159 slowObjectValue = lazySlowPath(
11160 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11161 return createLazyCallGenerator(vm,
11162 operationNewObjectWithButterfly, locations[0].directGPR(),
11163 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR());
11164 },
11165 butterflyValue);
11166 }
11167 ValueFromBlock slowObject = m_out.anchor(slowObjectValue);
11168 ValueFromBlock slowButterfly = m_out.anchor(
11169 m_out.loadPtr(slowObjectValue, m_heaps.JSObject_butterfly));
11170
11171 m_out.jump(continuation);
11172
11173 m_out.appendTo(continuation, lastNext);
11174
11175 object = m_out.phi(pointerType(), fastObject, slowObject);
11176 butterfly = m_out.phi(pointerType(), fastButterfly, slowButterfly);
11177
11178 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
11179
11180 initializeArrayElements(m_out.constInt32(structure->indexingType()), m_out.int32Zero, vectorLength, butterfly);
11181
11182 HashMap<int32_t, LValue, DefaultHash<int32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<int32_t>> indexMap;
11183 Vector<int32_t> indices;
11184 for (unsigned i = data.m_properties.size(); i--;) {
11185 PromotedLocationDescriptor descriptor = data.m_properties[i];
11186 if (descriptor.kind() != IndexedPropertyPLoc)
11187 continue;
11188 int32_t index = static_cast<int32_t>(descriptor.info());
11189
11190 auto result = indexMap.add(index, values[i]);
11191 DFG_ASSERT(m_graph, m_node, result); // Duplicates are illegal.
11192
11193 indices.append(index);
11194 }
11195
11196 if (!indices.isEmpty()) {
11197 std::sort(indices.begin(), indices.end());
11198
11199 Vector<LBasicBlock> blocksWithStores(indices.size());
11200 Vector<LBasicBlock> blocksWithChecks(indices.size());
11201
11202 for (unsigned i = indices.size(); i--;) {
11203 blocksWithStores[i] = m_out.newBlock();
11204 blocksWithChecks[i] = m_out.newBlock(); // blocksWithChecks[0] is the continuation.
11205 }
11206
11207 LBasicBlock indexLastNext = m_out.m_nextBlock;
11208
11209 for (unsigned i = indices.size(); i--;) {
11210 int32_t index = indices[i];
11211 LValue value = indexMap.get(index);
11212
11213 m_out.branch(
11214 m_out.below(m_out.constInt32(index), publicLength),
11215 unsure(blocksWithStores[i]), unsure(blocksWithChecks[i]));
11216
11217 m_out.appendTo(blocksWithStores[i], blocksWithChecks[i]);
11218
11219 // This has to type-check and convert its inputs, but it cannot do so in a
11220 // way that updates AI. That's a bit annoying, but if you think about how
11221 // sinking works, it's actually not a bad thing. We are virtually guaranteed
11222 // that these type checks will not fail, since the type checks that guarded
11223 // the original stores to the array are still somewhere above this point.
11224 Output::StoreType storeType;
11225 IndexedAbstractHeap* heap;
11226 switch (structure->indexingType()) {
11227 case ALL_INT32_INDEXING_TYPES:
11228 // FIXME: This could use the proven type if we had the Edge for the
11229 // value. https://bugs.webkit.org/show_bug.cgi?id=155311
11230 speculate(BadType, noValue(), nullptr, isNotInt32(value));
11231 storeType = Output::Store64;
11232 heap = &m_heaps.indexedInt32Properties;
11233 break;
11234
11235 case ALL_DOUBLE_INDEXING_TYPES: {
11236 // FIXME: If the source is ValueRep, we should avoid emitting any
11237 // checks. We could also avoid emitting checks if we had the Edge of
11238 // this value. https://bugs.webkit.org/show_bug.cgi?id=155311
11239
11240 LBasicBlock intCase = m_out.newBlock();
11241 LBasicBlock doubleCase = m_out.newBlock();
11242 LBasicBlock continuation = m_out.newBlock();
11243
11244 m_out.branch(isInt32(value), unsure(intCase), unsure(doubleCase));
11245
11246 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
11247
11248 ValueFromBlock intResult =
11249 m_out.anchor(m_out.intToDouble(unboxInt32(value)));
11250 m_out.jump(continuation);
11251
11252 m_out.appendTo(doubleCase, continuation);
11253
11254 speculate(BadType, noValue(), nullptr, isNumber(value));
11255 ValueFromBlock doubleResult = m_out.anchor(unboxDouble(value));
11256 m_out.jump(continuation);
11257
11258 m_out.appendTo(continuation, lastNext);
11259 value = m_out.phi(Double, intResult, doubleResult);
11260 storeType = Output::StoreDouble;
11261 heap = &m_heaps.indexedDoubleProperties;
11262 break;
11263 }
11264
11265 case ALL_CONTIGUOUS_INDEXING_TYPES:
11266 storeType = Output::Store64;
11267 heap = &m_heaps.indexedContiguousProperties;
11268 break;
11269
11270 default:
11271 DFG_CRASH(m_graph, m_node, "Invalid indexing type");
11272 break;
11273 }
11274
11275 m_out.store(value, m_out.address(butterfly, heap->at(index)), storeType);
11276
11277 m_out.jump(blocksWithChecks[i]);
11278 m_out.appendTo(
11279 blocksWithChecks[i], i ? blocksWithStores[i - 1] : indexLastNext);
11280 }
11281 }
11282 } else {
11283 // In the easy case where we can do a one-shot allocation, we simply allocate the
11284 // object to directly have the desired structure.
11285 object = allocateObject(structure);
11286 butterfly = nullptr; // Don't have one, don't need one.
11287 }
11288
11289 BitVector setInlineOffsets;
11290 for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
11291 for (unsigned i = data.m_properties.size(); i--;) {
11292 PromotedLocationDescriptor descriptor = data.m_properties[i];
11293 if (descriptor.kind() != NamedPropertyPLoc)
11294 continue;
11295 if (m_graph.identifiers()[descriptor.info()] != entry.key)
11296 continue;
11297
11298 LValue base;
11299 if (isInlineOffset(entry.offset)) {
11300 setInlineOffsets.set(entry.offset);
11301 base = object;
11302 } else
11303 base = butterfly;
11304 storeProperty(values[i], base, descriptor.info(), entry.offset);
11305 break;
11306 }
11307 }
11308 for (unsigned i = structure->inlineCapacity(); i--;) {
11309 if (!setInlineOffsets.get(i))
11310 m_out.store64(m_out.int64Zero, m_out.address(m_heaps.properties.atAnyNumber(), object, offsetRelativeToBase(i)));
11311 }
11312
11313 results.append(m_out.anchor(object));
11314 m_out.jump(outerContinuation);
11315 }
11316
11317 m_out.appendTo(dummyDefault, outerContinuation);
11318 m_out.unreachable();
11319
11320 m_out.appendTo(outerContinuation, outerLastNext);
11321 setJSValue(m_out.phi(pointerType(), results));
11322 mutatorFence();
11323 }
11324
11325 void compileMaterializeCreateActivation()
11326 {
11327 ObjectMaterializationData& data = m_node->objectMaterializationData();
11328
11329 Vector<LValue, 8> values;
11330 for (unsigned i = 0; i < data.m_properties.size(); ++i)
11331 values.append(lowJSValue(m_graph.varArgChild(m_node, 2 + i)));
11332
11333 LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
11334 SymbolTable* table = m_node->castOperand<SymbolTable*>();
11335 ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>(vm()));
11336 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
11337
11338 LBasicBlock slowPath = m_out.newBlock();
11339 LBasicBlock continuation = m_out.newBlock();
11340
11341 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11342
11343 LValue fastObject = allocateObject<JSLexicalEnvironment>(
11344 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
11345
11346 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
11347 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
11348
11349
11350 ValueFromBlock fastResult = m_out.anchor(fastObject);
11351 m_out.jump(continuation);
11352
11353 m_out.appendTo(slowPath, continuation);
11354 // We ensure allocation sinking explictly sets bottom values for all field members.
11355 // Therefore, it doesn't matter what JSValue we pass in as the initialization value
11356 // because all fields will be overwritten.
11357 // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that
11358 // doesn't initialize every slot because we are guaranteed to do that here.
11359 VM& vm = this->vm();
11360 LValue callResult = lazySlowPath(
11361 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11362 return createLazyCallGenerator(vm,
11363 operationCreateActivationDirect, locations[0].directGPR(),
11364 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
11365 CCallHelpers::TrustedImmPtr(table),
11366 CCallHelpers::TrustedImm64(JSValue::encode(jsUndefined())));
11367 }, scope);
11368 ValueFromBlock slowResult = m_out.anchor(callResult);
11369 m_out.jump(continuation);
11370
11371 m_out.appendTo(continuation, lastNext);
11372 LValue activation = m_out.phi(pointerType(), fastResult, slowResult);
11373 RELEASE_ASSERT(data.m_properties.size() == table->scopeSize());
11374 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11375 PromotedLocationDescriptor descriptor = data.m_properties[i];
11376 ASSERT(descriptor.kind() == ClosureVarPLoc);
11377 m_out.store64(
11378 values[i], activation,
11379 m_heaps.JSLexicalEnvironment_variables[descriptor.info()]);
11380 }
11381
11382 if (validationEnabled()) {
11383 // Validate to make sure every slot in the scope has one value.
11384 ConcurrentJSLocker locker(table->m_lock);
11385 for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) {
11386 bool found = false;
11387 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11388 PromotedLocationDescriptor descriptor = data.m_properties[i];
11389 ASSERT(descriptor.kind() == ClosureVarPLoc);
11390 if (iter->value.scopeOffset().offset() == descriptor.info()) {
11391 found = true;
11392 break;
11393 }
11394 }
11395 ASSERT_UNUSED(found, found);
11396 }
11397 }
11398
11399 mutatorFence();
11400 setJSValue(activation);
11401 }
11402
11403 void compileCheckTraps()
11404 {
11405 ASSERT(Options::usePollingTraps());
11406 LBasicBlock needTrapHandling = m_out.newBlock();
11407 LBasicBlock continuation = m_out.newBlock();
11408
11409 LValue state = m_out.load8ZeroExt32(m_out.absolute(vm().needTrapHandlingAddress()));
11410 m_out.branch(m_out.isZero32(state),
11411 usually(continuation), rarely(needTrapHandling));
11412
11413 LBasicBlock lastNext = m_out.appendTo(needTrapHandling, continuation);
11414
11415 VM& vm = this->vm();
11416 lazySlowPath(
11417 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
11418 return createLazyCallGenerator(vm, operationHandleTraps, InvalidGPRReg);
11419 });
11420 m_out.jump(continuation);
11421
11422 m_out.appendTo(continuation, lastNext);
11423 }
11424
11425 void compileRegExpExec()
11426 {
11427 LValue globalObject = lowCell(m_node->child1());
11428
11429 if (m_node->child2().useKind() == RegExpObjectUse) {
11430 LValue base = lowRegExpObject(m_node->child2());
11431
11432 if (m_node->child3().useKind() == StringUse) {
11433 LValue argument = lowString(m_node->child3());
11434 LValue result = vmCall(
11435 Int64, m_out.operation(operationRegExpExecString), m_callFrame, globalObject,
11436 base, argument);
11437 setJSValue(result);
11438 return;
11439 }
11440
11441 LValue argument = lowJSValue(m_node->child3());
11442 LValue result = vmCall(
11443 Int64, m_out.operation(operationRegExpExec), m_callFrame, globalObject, base,
11444 argument);
11445 setJSValue(result);
11446 return;
11447 }
11448
11449 LValue base = lowJSValue(m_node->child2());
11450 LValue argument = lowJSValue(m_node->child3());
11451 LValue result = vmCall(
11452 Int64, m_out.operation(operationRegExpExecGeneric), m_callFrame, globalObject, base,
11453 argument);
11454 setJSValue(result);
11455 }
11456
11457 void compileRegExpExecNonGlobalOrSticky()
11458 {
11459 LValue globalObject = lowCell(m_node->child1());
11460 LValue argument = lowString(m_node->child2());
11461 LValue result = vmCall(
11462 Int64, m_out.operation(operationRegExpExecNonGlobalOrSticky), m_callFrame, globalObject, frozenPointer(m_node->cellOperand()), argument);
11463 setJSValue(result);
11464 }
11465
11466 void compileRegExpMatchFastGlobal()
11467 {
11468 LValue globalObject = lowCell(m_node->child1());
11469 LValue argument = lowString(m_node->child2());
11470 LValue result = vmCall(
11471 Int64, m_out.operation(operationRegExpMatchFastGlobalString), m_callFrame, globalObject, frozenPointer(m_node->cellOperand()), argument);
11472 setJSValue(result);
11473 }
11474
11475 void compileRegExpTest()
11476 {
11477 LValue globalObject = lowCell(m_node->child1());
11478
11479 if (m_node->child2().useKind() == RegExpObjectUse) {
11480 LValue base = lowRegExpObject(m_node->child2());
11481
11482 if (m_node->child3().useKind() == StringUse) {
11483 LValue argument = lowString(m_node->child3());
11484 LValue result = vmCall(
11485 Int32, m_out.operation(operationRegExpTestString), m_callFrame, globalObject,
11486 base, argument);
11487 setBoolean(result);
11488 return;
11489 }
11490
11491 LValue argument = lowJSValue(m_node->child3());
11492 LValue result = vmCall(
11493 Int32, m_out.operation(operationRegExpTest), m_callFrame, globalObject, base,
11494 argument);
11495 setBoolean(result);
11496 return;
11497 }
11498
11499 LValue base = lowJSValue(m_node->child2());
11500 LValue argument = lowJSValue(m_node->child3());
11501 LValue result = vmCall(
11502 Int32, m_out.operation(operationRegExpTestGeneric), m_callFrame, globalObject, base,
11503 argument);
11504 setBoolean(result);
11505 }
11506
11507 void compileRegExpMatchFast()
11508 {
11509 LValue globalObject = lowCell(m_node->child1());
11510 LValue base = lowRegExpObject(m_node->child2());
11511 LValue argument = lowString(m_node->child3());
11512 LValue result = vmCall(
11513 Int64, m_out.operation(operationRegExpMatchFastString), m_callFrame, globalObject,
11514 base, argument);
11515 setJSValue(result);
11516 }
11517
11518 void compileNewRegexp()
11519 {
11520 FrozenValue* regexp = m_node->cellOperand();
11521 LValue lastIndex = lowJSValue(m_node->child1());
11522 ASSERT(regexp->cell()->inherits<RegExp>(vm()));
11523
11524 LBasicBlock slowCase = m_out.newBlock();
11525 LBasicBlock continuation = m_out.newBlock();
11526
11527 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
11528
11529 auto structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->regExpStructure());
11530 LValue fastResultValue = allocateObject<RegExpObject>(structure, m_out.intPtrZero, slowCase);
11531 m_out.storePtr(frozenPointer(regexp), fastResultValue, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag);
11532 m_out.store64(lastIndex, fastResultValue, m_heaps.RegExpObject_lastIndex);
11533 mutatorFence();
11534 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
11535 m_out.jump(continuation);
11536
11537 m_out.appendTo(slowCase, continuation);
11538 VM& vm = this->vm();
11539 RegExp* regexpCell = regexp->cast<RegExp*>();
11540 LValue slowResultValue = lazySlowPath(
11541 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11542 return createLazyCallGenerator(vm,
11543 operationNewRegexpWithLastIndex, locations[0].directGPR(),
11544 CCallHelpers::TrustedImmPtr(regexpCell), locations[1].directGPR());
11545 }, lastIndex);
11546 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
11547 m_out.jump(continuation);
11548
11549 m_out.appendTo(continuation, lastNext);
11550 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
11551 }
11552
11553 void compileSetFunctionName()
11554 {
11555 vmCall(Void, m_out.operation(operationSetFunctionName), m_callFrame,
11556 lowCell(m_node->child1()), lowJSValue(m_node->child2()));
11557 }
11558
11559 void compileStringReplace()
11560 {
11561 if (m_node->child1().useKind() == StringUse
11562 && m_node->child2().useKind() == RegExpObjectUse
11563 && m_node->child3().useKind() == StringUse) {
11564
11565 if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>(vm())) {
11566 if (!replace->length()) {
11567 LValue string = lowString(m_node->child1());
11568 LValue regExp = lowRegExpObject(m_node->child2());
11569
11570 LValue result = vmCall(
11571 pointerType(), m_out.operation(operationStringProtoFuncReplaceRegExpEmptyStr),
11572 m_callFrame, string, regExp);
11573
11574 setJSValue(result);
11575 return;
11576 }
11577 }
11578
11579 LValue string = lowString(m_node->child1());
11580 LValue regExp = lowRegExpObject(m_node->child2());
11581 LValue replace = lowString(m_node->child3());
11582
11583 LValue result = vmCall(
11584 pointerType(), m_out.operation(operationStringProtoFuncReplaceRegExpString),
11585 m_callFrame, string, regExp, replace);
11586
11587 setJSValue(result);
11588 return;
11589 }
11590
11591 LValue search;
11592 if (m_node->child2().useKind() == StringUse)
11593 search = lowString(m_node->child2());
11594 else
11595 search = lowJSValue(m_node->child2());
11596
11597 LValue result = vmCall(
11598 pointerType(), m_out.operation(operationStringProtoFuncReplaceGeneric), m_callFrame,
11599 lowJSValue(m_node->child1()), search,
11600 lowJSValue(m_node->child3()));
11601
11602 setJSValue(result);
11603 }
11604
11605 void compileGetRegExpObjectLastIndex()
11606 {
11607 setJSValue(m_out.load64(lowRegExpObject(m_node->child1()), m_heaps.RegExpObject_lastIndex));
11608 }
11609
11610 void compileSetRegExpObjectLastIndex()
11611 {
11612 if (!m_node->ignoreLastIndexIsWritable()) {
11613 LValue regExp = lowRegExpObject(m_node->child1());
11614 LValue value = lowJSValue(m_node->child2());
11615
11616 speculate(
11617 ExoticObjectMode, noValue(), nullptr,
11618 m_out.testNonZeroPtr(
11619 m_out.loadPtr(regExp, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag),
11620 m_out.constIntPtr(RegExpObject::lastIndexIsNotWritableFlag)));
11621
11622 m_out.store64(value, regExp, m_heaps.RegExpObject_lastIndex);
11623 return;
11624 }
11625
11626 m_out.store64(lowJSValue(m_node->child2()), lowCell(m_node->child1()), m_heaps.RegExpObject_lastIndex);
11627 }
11628
11629 void compileLogShadowChickenPrologue()
11630 {
11631 LValue packet = ensureShadowChickenPacket();
11632 LValue scope = lowCell(m_node->child1());
11633
11634 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
11635 m_out.storePtr(m_out.loadPtr(addressFor(0)), packet, m_heaps.ShadowChicken_Packet_callerFrame);
11636 m_out.storePtr(m_out.loadPtr(payloadFor(CallFrameSlot::callee)), packet, m_heaps.ShadowChicken_Packet_callee);
11637 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
11638 }
11639
11640 void compileLogShadowChickenTail()
11641 {
11642 LValue packet = ensureShadowChickenPacket();
11643 LValue thisValue = lowJSValue(m_node->child1());
11644 LValue scope = lowCell(m_node->child2());
11645 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(m_node->origin.semantic);
11646
11647 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
11648 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(ShadowChicken::Packet::tailMarker())), packet, m_heaps.ShadowChicken_Packet_callee);
11649 m_out.store64(thisValue, packet, m_heaps.ShadowChicken_Packet_thisValue);
11650 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
11651 // We don't want the CodeBlock to have a weak pointer to itself because
11652 // that would cause it to always get collected.
11653 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), packet, m_heaps.ShadowChicken_Packet_codeBlock);
11654 m_out.store32(m_out.constInt32(callSiteIndex.bits()), packet, m_heaps.ShadowChicken_Packet_callSiteIndex);
11655 }
11656
11657 void compileRecordRegExpCachedResult()
11658 {
11659 Edge globalObjectEdge = m_graph.varArgChild(m_node, 0);
11660 Edge regExpEdge = m_graph.varArgChild(m_node, 1);
11661 Edge stringEdge = m_graph.varArgChild(m_node, 2);
11662 Edge startEdge = m_graph.varArgChild(m_node, 3);
11663 Edge endEdge = m_graph.varArgChild(m_node, 4);
11664
11665 LValue globalObject = lowCell(globalObjectEdge);
11666 LValue regExp = lowCell(regExpEdge);
11667 LValue string = lowCell(stringEdge);
11668 LValue start = lowInt32(startEdge);
11669 LValue end = lowInt32(endEdge);
11670
11671 m_out.storePtr(regExp, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastRegExp);
11672 m_out.storePtr(string, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastInput);
11673 m_out.store32(start, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_start);
11674 m_out.store32(end, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_end);
11675 m_out.store32As8(
11676 m_out.constInt32(0),
11677 m_out.address(globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_reified));
11678 }
11679
11680 struct ArgumentsLength {
11681 ArgumentsLength()
11682 : isKnown(false)
11683 , known(UINT_MAX)
11684 , value(nullptr)
11685 {
11686 }
11687
11688 bool isKnown;
11689 unsigned known;
11690 LValue value;
11691 };
11692 ArgumentsLength getArgumentsLength(InlineCallFrame* inlineCallFrame)
11693 {
11694 ArgumentsLength length;
11695
11696 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
11697 length.known = inlineCallFrame->argumentCountIncludingThis - 1;
11698 length.isKnown = true;
11699 length.value = m_out.constInt32(length.known);
11700 } else {
11701 length.known = UINT_MAX;
11702 length.isKnown = false;
11703
11704 VirtualRegister argumentCountRegister;
11705 if (!inlineCallFrame)
11706 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
11707 else
11708 argumentCountRegister = inlineCallFrame->argumentCountRegister;
11709 length.value = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
11710 }
11711
11712 return length;
11713 }
11714
11715 ArgumentsLength getArgumentsLength()
11716 {
11717 return getArgumentsLength(m_node->origin.semantic.inlineCallFrame());
11718 }
11719
11720 LValue getCurrentCallee()
11721 {
11722 if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame()) {
11723 if (frame->isClosureCall)
11724 return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister()));
11725 return weakPointer(frame->calleeRecovery.constant().asCell());
11726 }
11727 return m_out.loadPtr(addressFor(CallFrameSlot::callee));
11728 }
11729
11730 LValue getArgumentsStart(InlineCallFrame* inlineCallFrame, unsigned offset = 0)
11731 {
11732 VirtualRegister start = AssemblyHelpers::argumentsStart(inlineCallFrame) + offset;
11733 return addressFor(start).value();
11734 }
11735
11736 LValue getArgumentsStart()
11737 {
11738 return getArgumentsStart(m_node->origin.semantic.inlineCallFrame());
11739 }
11740
11741 template<typename Functor>
11742 void checkStructure(
11743 LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind,
11744 const RegisteredStructureSet& set, const Functor& weakStructureDiscriminant)
11745 {
11746 if (set.isEmpty()) {
11747 terminate(exitKind);
11748 return;
11749 }
11750
11751 if (set.size() == 1) {
11752 speculate(
11753 exitKind, formattedValue, 0,
11754 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set[0])));
11755 return;
11756 }
11757
11758 LBasicBlock continuation = m_out.newBlock();
11759
11760 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
11761 for (unsigned i = 0; i < set.size() - 1; ++i) {
11762 LBasicBlock nextStructure = m_out.newBlock();
11763 m_out.branch(
11764 m_out.equal(structureDiscriminant, weakStructureDiscriminant(set[i])),
11765 unsure(continuation), unsure(nextStructure));
11766 m_out.appendTo(nextStructure);
11767 }
11768
11769 speculate(
11770 exitKind, formattedValue, 0,
11771 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set.last())));
11772
11773 m_out.jump(continuation);
11774 m_out.appendTo(continuation, lastNext);
11775 }
11776
11777 LValue numberOrNotCellToInt32(Edge edge, LValue value)
11778 {
11779 LBasicBlock intCase = m_out.newBlock();
11780 LBasicBlock notIntCase = m_out.newBlock();
11781 LBasicBlock doubleCase = 0;
11782 LBasicBlock notNumberCase = 0;
11783 if (edge.useKind() == NotCellUse) {
11784 doubleCase = m_out.newBlock();
11785 notNumberCase = m_out.newBlock();
11786 }
11787 LBasicBlock continuation = m_out.newBlock();
11788
11789 Vector<ValueFromBlock> results;
11790
11791 m_out.branch(isNotInt32(value), unsure(notIntCase), unsure(intCase));
11792
11793 LBasicBlock lastNext = m_out.appendTo(intCase, notIntCase);
11794 results.append(m_out.anchor(unboxInt32(value)));
11795 m_out.jump(continuation);
11796
11797 if (edge.useKind() == NumberUse) {
11798 m_out.appendTo(notIntCase, continuation);
11799 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isCellOrMisc(value));
11800 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
11801 m_out.jump(continuation);
11802 } else {
11803 m_out.appendTo(notIntCase, doubleCase);
11804 m_out.branch(
11805 isCellOrMisc(value, provenType(edge)), unsure(notNumberCase), unsure(doubleCase));
11806
11807 m_out.appendTo(doubleCase, notNumberCase);
11808 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
11809 m_out.jump(continuation);
11810
11811 m_out.appendTo(notNumberCase, continuation);
11812
11813 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecCellCheck, isCell(value));
11814
11815 LValue specialResult = m_out.select(
11816 m_out.equal(value, m_out.constInt64(JSValue::encode(jsBoolean(true)))),
11817 m_out.int32One, m_out.int32Zero);
11818 results.append(m_out.anchor(specialResult));
11819 m_out.jump(continuation);
11820 }
11821
11822 m_out.appendTo(continuation, lastNext);
11823 return m_out.phi(Int32, results);
11824 }
11825
11826 LValue loadProperty(LValue storage, unsigned identifierNumber, PropertyOffset offset)
11827 {
11828 return m_out.load64(addressOfProperty(storage, identifierNumber, offset));
11829 }
11830
11831 void storeProperty(
11832 LValue value, LValue storage, unsigned identifierNumber, PropertyOffset offset)
11833 {
11834 m_out.store64(value, addressOfProperty(storage, identifierNumber, offset));
11835 }
11836
11837 TypedPointer addressOfProperty(
11838 LValue storage, unsigned identifierNumber, PropertyOffset offset)
11839 {
11840 return m_out.address(
11841 m_heaps.properties[identifierNumber], storage, offsetRelativeToBase(offset));
11842 }
11843
11844 LValue storageForTransition(
11845 LValue object, PropertyOffset offset,
11846 Structure* previousStructure, Structure* nextStructure)
11847 {
11848 if (isInlineOffset(offset))
11849 return object;
11850
11851 if (previousStructure->outOfLineCapacity() == nextStructure->outOfLineCapacity())
11852 return m_out.loadPtr(object, m_heaps.JSObject_butterfly);
11853
11854 LValue result;
11855 if (!previousStructure->outOfLineCapacity())
11856 result = allocatePropertyStorage(object, previousStructure);
11857 else {
11858 result = reallocatePropertyStorage(
11859 object, m_out.loadPtr(object, m_heaps.JSObject_butterfly),
11860 previousStructure, nextStructure);
11861 }
11862
11863 nukeStructureAndSetButterfly(result, object);
11864 return result;
11865 }
11866
11867 void initializeArrayElements(LValue indexingType, LValue begin, LValue end, LValue butterfly)
11868 {
11869
11870 if (begin == end)
11871 return;
11872
11873 if (indexingType->hasInt32()) {
11874 IndexingType rawIndexingType = static_cast<IndexingType>(indexingType->asInt32());
11875 if (hasUndecided(rawIndexingType))
11876 return;
11877 IndexedAbstractHeap* heap = m_heaps.forIndexingType(rawIndexingType);
11878 DFG_ASSERT(m_graph, m_node, heap);
11879
11880 LValue hole;
11881 if (hasDouble(rawIndexingType))
11882 hole = m_out.constInt64(bitwise_cast<int64_t>(PNaN));
11883 else
11884 hole = m_out.constInt64(JSValue::encode(JSValue()));
11885
11886 splatWords(butterfly, begin, end, hole, heap->atAnyIndex());
11887 } else {
11888 LValue hole = m_out.select(
11889 m_out.equal(m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)), m_out.constInt32(DoubleShape)),
11890 m_out.constInt64(bitwise_cast<int64_t>(PNaN)),
11891 m_out.constInt64(JSValue::encode(JSValue())));
11892 splatWords(butterfly, begin, end, hole, m_heaps.root);
11893 }
11894 }
11895
11896 void splatWords(LValue base, LValue begin, LValue end, LValue value, const AbstractHeap& heap)
11897 {
11898 const uint64_t unrollingLimit = 10;
11899 if (begin->hasInt() && end->hasInt()) {
11900 uint64_t beginConst = static_cast<uint64_t>(begin->asInt());
11901 uint64_t endConst = static_cast<uint64_t>(end->asInt());
11902
11903 if (endConst - beginConst <= unrollingLimit) {
11904 for (uint64_t i = beginConst; i < endConst; ++i) {
11905 LValue pointer = m_out.add(base, m_out.constIntPtr(i * sizeof(uint64_t)));
11906 m_out.store64(value, TypedPointer(heap, pointer));
11907 }
11908 return;
11909 }
11910 }
11911
11912 LBasicBlock initLoop = m_out.newBlock();
11913 LBasicBlock initDone = m_out.newBlock();
11914
11915 LBasicBlock lastNext = m_out.insertNewBlocksBefore(initLoop);
11916
11917 ValueFromBlock originalIndex = m_out.anchor(end);
11918 ValueFromBlock originalPointer = m_out.anchor(
11919 m_out.add(base, m_out.shl(m_out.signExt32ToPtr(begin), m_out.constInt32(3))));
11920 m_out.branch(m_out.notEqual(end, begin), unsure(initLoop), unsure(initDone));
11921
11922 m_out.appendTo(initLoop, initDone);
11923 LValue index = m_out.phi(Int32, originalIndex);
11924 LValue pointer = m_out.phi(pointerType(), originalPointer);
11925
11926 m_out.store64(value, TypedPointer(heap, pointer));
11927
11928 LValue nextIndex = m_out.sub(index, m_out.int32One);
11929 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
11930 m_out.addIncomingToPhi(pointer, m_out.anchor(m_out.add(pointer, m_out.intPtrEight)));
11931 m_out.branch(
11932 m_out.notEqual(nextIndex, begin), unsure(initLoop), unsure(initDone));
11933
11934 m_out.appendTo(initDone, lastNext);
11935 }
11936
11937 LValue allocatePropertyStorage(LValue object, Structure* previousStructure)
11938 {
11939 if (previousStructure->couldHaveIndexingHeader()) {
11940 return vmCall(
11941 pointerType(),
11942 m_out.operation(operationAllocateComplexPropertyStorageWithInitialCapacity),
11943 m_callFrame, object);
11944 }
11945
11946 LValue result = allocatePropertyStorageWithSizeImpl(initialOutOfLineCapacity);
11947
11948 splatWords(
11949 result,
11950 m_out.constInt32(-initialOutOfLineCapacity - 1), m_out.constInt32(-1),
11951 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11952
11953 return result;
11954 }
11955
11956 LValue reallocatePropertyStorage(
11957 LValue object, LValue oldStorage, Structure* previous, Structure* next)
11958 {
11959 size_t oldSize = previous->outOfLineCapacity();
11960 size_t newSize = oldSize * outOfLineGrowthFactor;
11961
11962 ASSERT_UNUSED(next, newSize == next->outOfLineCapacity());
11963
11964 if (previous->couldHaveIndexingHeader()) {
11965 LValue newAllocSize = m_out.constIntPtr(newSize);
11966 return vmCall(pointerType(), m_out.operation(operationAllocateComplexPropertyStorage), m_callFrame, object, newAllocSize);
11967 }
11968
11969 LValue result = allocatePropertyStorageWithSizeImpl(newSize);
11970
11971 ptrdiff_t headerSize = -sizeof(IndexingHeader) - sizeof(void*);
11972 ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize * sizeof(JSValue));
11973
11974 for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) {
11975 LValue loaded =
11976 m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset));
11977 m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset));
11978 }
11979
11980 splatWords(
11981 result,
11982 m_out.constInt32(-newSize - 1), m_out.constInt32(-oldSize - 1),
11983 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11984
11985 return result;
11986 }
11987
11988 LValue allocatePropertyStorageWithSizeImpl(size_t sizeInValues)
11989 {
11990 LBasicBlock slowPath = m_out.newBlock();
11991 LBasicBlock continuation = m_out.newBlock();
11992
11993 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11994
11995 size_t sizeInBytes = sizeInValues * sizeof(JSValue);
11996 Allocator allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(sizeInBytes, AllocatorForMode::AllocatorIfExists);
11997 LValue startOfStorage = allocateHeapCell(
11998 m_out.constIntPtr(allocator.localAllocator()), slowPath);
11999 ValueFromBlock fastButterfly = m_out.anchor(
12000 m_out.add(m_out.constIntPtr(sizeInBytes + sizeof(IndexingHeader)), startOfStorage));
12001 m_out.jump(continuation);
12002
12003 m_out.appendTo(slowPath, continuation);
12004
12005 LValue slowButterflyValue;
12006 VM& vm = this->vm();
12007 if (sizeInValues == initialOutOfLineCapacity) {
12008 slowButterflyValue = lazySlowPath(
12009 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
12010 return createLazyCallGenerator(vm,
12011 operationAllocateSimplePropertyStorageWithInitialCapacity,
12012 locations[0].directGPR());
12013 });
12014 } else {
12015 slowButterflyValue = lazySlowPath(
12016 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
12017 return createLazyCallGenerator(vm,
12018 operationAllocateSimplePropertyStorage, locations[0].directGPR(),
12019 CCallHelpers::TrustedImmPtr(sizeInValues));
12020 });
12021 }
12022 ValueFromBlock slowButterfly = m_out.anchor(slowButterflyValue);
12023
12024 m_out.jump(continuation);
12025
12026 m_out.appendTo(continuation, lastNext);
12027
12028 return m_out.phi(pointerType(), fastButterfly, slowButterfly);
12029 }
12030
12031 LValue getById(LValue base, AccessType type)
12032 {
12033 Node* node = m_node;
12034 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
12035
12036 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12037 patchpoint->appendSomeRegister(base);
12038 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
12039 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
12040
12041 // FIXME: If this is a GetByIdFlush/GetByIdDirectFlush, we might get some performance boost if we claim that it
12042 // clobbers volatile registers late. It's not necessary for correctness, though, since the
12043 // IC code is super smart about saving registers.
12044 // https://bugs.webkit.org/show_bug.cgi?id=152848
12045
12046 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12047
12048 RefPtr<PatchpointExceptionHandle> exceptionHandle =
12049 preparePatchpointForExceptions(patchpoint);
12050
12051 State* state = &m_ftlState;
12052 patchpoint->setGenerator(
12053 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12054 AllowMacroScratchRegisterUsage allowScratch(jit);
12055
12056 CallSiteIndex callSiteIndex =
12057 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
12058
12059 // This is the direct exit target for operation calls.
12060 Box<CCallHelpers::JumpList> exceptions =
12061 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12062
12063 // This is the exit for call IC's created by the getById for getters. We don't have
12064 // to do anything weird other than call this, since it will associate the exit with
12065 // the callsite index.
12066 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
12067
12068 auto generator = Box<JITGetByIdGenerator>::create(
12069 jit.codeBlock(), node->origin.semantic, callSiteIndex,
12070 params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
12071 JSValueRegs(params[0].gpr()), type);
12072
12073 generator->generateFastPath(jit);
12074 CCallHelpers::Label done = jit.label();
12075
12076 params.addLatePath(
12077 [=] (CCallHelpers& jit) {
12078 AllowMacroScratchRegisterUsage allowScratch(jit);
12079
12080 J_JITOperation_ESsiJI optimizationFunction = appropriateOptimizingGetByIdFunction(type);
12081
12082 generator->slowPathJump().link(&jit);
12083 CCallHelpers::Label slowPathBegin = jit.label();
12084 CCallHelpers::Call slowPathCall = callOperation(
12085 *state, params.unavailableRegisters(), jit, node->origin.semantic,
12086 exceptions.get(), optimizationFunction, params[0].gpr(),
12087 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
12088 CCallHelpers::TrustedImmPtr(uid)).call();
12089 jit.jump().linkTo(done, &jit);
12090
12091 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12092
12093 jit.addLinkTask(
12094 [=] (LinkBuffer& linkBuffer) {
12095 generator->finalize(linkBuffer, linkBuffer);
12096 });
12097 });
12098 });
12099
12100 return patchpoint;
12101 }
12102
12103 LValue getByIdWithThis(LValue base, LValue thisValue)
12104 {
12105 Node* node = m_node;
12106 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
12107
12108 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12109 patchpoint->appendSomeRegister(base);
12110 patchpoint->appendSomeRegister(thisValue);
12111 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
12112 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
12113
12114 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12115
12116 RefPtr<PatchpointExceptionHandle> exceptionHandle =
12117 preparePatchpointForExceptions(patchpoint);
12118
12119 State* state = &m_ftlState;
12120 patchpoint->setGenerator(
12121 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12122 AllowMacroScratchRegisterUsage allowScratch(jit);
12123
12124 CallSiteIndex callSiteIndex =
12125 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
12126
12127 // This is the direct exit target for operation calls.
12128 Box<CCallHelpers::JumpList> exceptions =
12129 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12130
12131 // This is the exit for call IC's created by the getById for getters. We don't have
12132 // to do anything weird other than call this, since it will associate the exit with
12133 // the callsite index.
12134 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
12135
12136 auto generator = Box<JITGetByIdWithThisGenerator>::create(
12137 jit.codeBlock(), node->origin.semantic, callSiteIndex,
12138 params.unavailableRegisters(), uid, JSValueRegs(params[0].gpr()),
12139 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), AccessType::GetWithThis);
12140
12141 generator->generateFastPath(jit);
12142 CCallHelpers::Label done = jit.label();
12143
12144 params.addLatePath(
12145 [=] (CCallHelpers& jit) {
12146 AllowMacroScratchRegisterUsage allowScratch(jit);
12147
12148 J_JITOperation_ESsiJJI optimizationFunction = operationGetByIdWithThisOptimize;
12149
12150 generator->slowPathJump().link(&jit);
12151 CCallHelpers::Label slowPathBegin = jit.label();
12152 CCallHelpers::Call slowPathCall = callOperation(
12153 *state, params.unavailableRegisters(), jit, node->origin.semantic,
12154 exceptions.get(), optimizationFunction, params[0].gpr(),
12155 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
12156 params[2].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
12157 jit.jump().linkTo(done, &jit);
12158
12159 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12160
12161 jit.addLinkTask(
12162 [=] (LinkBuffer& linkBuffer) {
12163 generator->finalize(linkBuffer, linkBuffer);
12164 });
12165 });
12166 });
12167
12168 return patchpoint;
12169 }
12170
12171 LValue isFastTypedArray(LValue object)
12172 {
12173 return m_out.equal(
12174 m_out.load32(object, m_heaps.JSArrayBufferView_mode),
12175 m_out.constInt32(FastTypedArray));
12176 }
12177
12178 TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0)
12179 {
12180 return m_out.baseIndex(
12181 heap, storage, m_out.zeroExtPtr(index), provenValue(edge), offset);
12182 }
12183
12184 template<typename IntFunctor, typename DoubleFunctor>
12185 void compare(
12186 const IntFunctor& intFunctor, const DoubleFunctor& doubleFunctor,
12187 C_JITOperation_TT stringIdentFunction,
12188 C_JITOperation_B_EJssJss stringFunction,
12189 S_JITOperation_EJJ fallbackFunction)
12190 {
12191 if (m_node->isBinaryUseKind(Int32Use)) {
12192 LValue left = lowInt32(m_node->child1());
12193 LValue right = lowInt32(m_node->child2());
12194 setBoolean(intFunctor(left, right));
12195 return;
12196 }
12197
12198 if (m_node->isBinaryUseKind(Int52RepUse)) {
12199 Int52Kind kind;
12200 LValue left = lowWhicheverInt52(m_node->child1(), kind);
12201 LValue right = lowInt52(m_node->child2(), kind);
12202 setBoolean(intFunctor(left, right));
12203 return;
12204 }
12205
12206 if (m_node->isBinaryUseKind(DoubleRepUse)) {
12207 LValue left = lowDouble(m_node->child1());
12208 LValue right = lowDouble(m_node->child2());
12209 setBoolean(doubleFunctor(left, right));
12210 return;
12211 }
12212
12213 if (m_node->isBinaryUseKind(StringIdentUse)) {
12214 LValue left = lowStringIdent(m_node->child1());
12215 LValue right = lowStringIdent(m_node->child2());
12216 setBoolean(m_out.callWithoutSideEffects(Int32, stringIdentFunction, left, right));
12217 return;
12218 }
12219
12220 if (m_node->isBinaryUseKind(StringUse)) {
12221 LValue left = lowCell(m_node->child1());
12222 LValue right = lowCell(m_node->child2());
12223 speculateString(m_node->child1(), left);
12224 speculateString(m_node->child2(), right);
12225
12226 LValue result = vmCall(
12227 Int32, m_out.operation(stringFunction),
12228 m_callFrame, left, right);
12229 setBoolean(result);
12230 return;
12231 }
12232
12233 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
12234 nonSpeculativeCompare(intFunctor, fallbackFunction);
12235 }
12236
12237 void compileStringSlice()
12238 {
12239 LBasicBlock lengthCheckCase = m_out.newBlock();
12240 LBasicBlock emptyCase = m_out.newBlock();
12241 LBasicBlock notEmptyCase = m_out.newBlock();
12242 LBasicBlock oneCharCase = m_out.newBlock();
12243 LBasicBlock is8Bit = m_out.newBlock();
12244 LBasicBlock is16Bit = m_out.newBlock();
12245 LBasicBlock bitsContinuation = m_out.newBlock();
12246 LBasicBlock bigCharacter = m_out.newBlock();
12247 LBasicBlock slowCase = m_out.newBlock();
12248 LBasicBlock ropeSlowCase = m_out.newBlock();
12249 LBasicBlock continuation = m_out.newBlock();
12250
12251 LValue string = lowString(m_node->child1());
12252 LValue start = lowInt32(m_node->child2());
12253 LValue end = nullptr;
12254 if (m_node->child3())
12255 end = lowInt32(m_node->child3());
12256 else
12257 end = m_out.constInt32(std::numeric_limits<int32_t>::max());
12258 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropeSlowCase), usually(lengthCheckCase));
12259
12260 LBasicBlock lastNext = m_out.appendTo(lengthCheckCase, emptyCase);
12261 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
12262 LValue length = m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length);
12263 auto range = populateSliceRange(start, end, length);
12264 LValue from = range.first;
12265 LValue to = range.second;
12266 LValue span = m_out.sub(to, from);
12267 m_out.branch(m_out.lessThanOrEqual(span, m_out.int32Zero), unsure(emptyCase), unsure(notEmptyCase));
12268
12269 Vector<ValueFromBlock, 5> results;
12270
12271 m_out.appendTo(emptyCase, notEmptyCase);
12272 results.append(m_out.anchor(weakPointer(jsEmptyString(&vm()))));
12273 m_out.jump(continuation);
12274
12275 m_out.appendTo(notEmptyCase, oneCharCase);
12276 m_out.branch(m_out.equal(span, m_out.int32One), unsure(oneCharCase), unsure(slowCase));
12277
12278 m_out.appendTo(oneCharCase, is8Bit);
12279 LValue storage = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
12280 m_out.branch(
12281 m_out.testIsZero32(
12282 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
12283 m_out.constInt32(StringImpl::flagIs8Bit())),
12284 unsure(is16Bit), unsure(is8Bit));
12285
12286 m_out.appendTo(is8Bit, is16Bit);
12287 ValueFromBlock char8Bit = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, storage, m_out.zeroExtPtr(from))));
12288 m_out.jump(bitsContinuation);
12289
12290 m_out.appendTo(is16Bit, bigCharacter);
12291 LValue char16BitValue = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(from)));
12292 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
12293 m_out.branch(
12294 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
12295 rarely(bigCharacter), usually(bitsContinuation));
12296
12297 m_out.appendTo(bigCharacter, bitsContinuation);
12298 results.append(m_out.anchor(vmCall(
12299 Int64, m_out.operation(operationSingleCharacterString),
12300 m_callFrame, char16BitValue)));
12301 m_out.jump(continuation);
12302
12303 m_out.appendTo(bitsContinuation, slowCase);
12304 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
12305 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
12306 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
12307 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
12308 m_out.jump(continuation);
12309
12310 m_out.appendTo(slowCase, ropeSlowCase);
12311 results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSubstr), m_callFrame, string, from, span)));
12312 m_out.jump(continuation);
12313
12314 m_out.appendTo(ropeSlowCase, continuation);
12315 results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSlice), m_callFrame, string, start, end)));
12316 m_out.jump(continuation);
12317
12318 m_out.appendTo(continuation, lastNext);
12319 setJSValue(m_out.phi(pointerType(), results));
12320 }
12321
12322 void compileToLowerCase()
12323 {
12324 LBasicBlock notRope = m_out.newBlock();
12325 LBasicBlock is8Bit = m_out.newBlock();
12326 LBasicBlock loopTop = m_out.newBlock();
12327 LBasicBlock loopBody = m_out.newBlock();
12328 LBasicBlock slowPath = m_out.newBlock();
12329 LBasicBlock continuation = m_out.newBlock();
12330
12331 LValue string = lowString(m_node->child1());
12332 ValueFromBlock startIndex = m_out.anchor(m_out.constInt32(0));
12333 ValueFromBlock startIndexForCall = m_out.anchor(m_out.constInt32(0));
12334 m_out.branch(isRopeString(string, m_node->child1()),
12335 unsure(slowPath), unsure(notRope));
12336
12337 LBasicBlock lastNext = m_out.appendTo(notRope, is8Bit);
12338 LValue impl = m_out.loadPtr(string, m_heaps.JSString_value);
12339 m_out.branch(
12340 m_out.testIsZero32(
12341 m_out.load32(impl, m_heaps.StringImpl_hashAndFlags),
12342 m_out.constInt32(StringImpl::flagIs8Bit())),
12343 unsure(slowPath), unsure(is8Bit));
12344
12345 m_out.appendTo(is8Bit, loopTop);
12346 LValue length = m_out.load32(impl, m_heaps.StringImpl_length);
12347 LValue buffer = m_out.loadPtr(impl, m_heaps.StringImpl_data);
12348 ValueFromBlock fastResult = m_out.anchor(string);
12349 m_out.jump(loopTop);
12350
12351 m_out.appendTo(loopTop, loopBody);
12352 LValue index = m_out.phi(Int32, startIndex);
12353 ValueFromBlock indexFromBlock = m_out.anchor(index);
12354 m_out.branch(m_out.below(index, length),
12355 unsure(loopBody), unsure(continuation));
12356
12357 m_out.appendTo(loopBody, slowPath);
12358
12359 // FIXME: Strings needs to be caged.
12360 // https://bugs.webkit.org/show_bug.cgi?id=174924
12361 LValue byte = m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, buffer, m_out.zeroExtPtr(index)));
12362 LValue isInvalidAsciiRange = m_out.bitAnd(byte, m_out.constInt32(~0x7F));
12363 LValue isUpperCase = m_out.belowOrEqual(m_out.sub(byte, m_out.constInt32('A')), m_out.constInt32('Z' - 'A'));
12364 LValue isBadCharacter = m_out.bitOr(isInvalidAsciiRange, isUpperCase);
12365 m_out.addIncomingToPhi(index, m_out.anchor(m_out.add(index, m_out.int32One)));
12366 m_out.branch(isBadCharacter, unsure(slowPath), unsure(loopTop));
12367
12368 m_out.appendTo(slowPath, continuation);
12369 LValue slowPathIndex = m_out.phi(Int32, startIndexForCall, indexFromBlock);
12370 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationToLowerCase), m_callFrame, string, slowPathIndex));
12371 m_out.jump(continuation);
12372
12373 m_out.appendTo(continuation, lastNext);
12374 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
12375 }
12376
12377 void compileNumberToStringWithRadix()
12378 {
12379 bool validRadixIsGuaranteed = false;
12380 if (m_node->child2()->isInt32Constant()) {
12381 int32_t radix = m_node->child2()->asInt32();
12382 if (radix >= 2 && radix <= 36)
12383 validRadixIsGuaranteed = true;
12384 }
12385
12386 switch (m_node->child1().useKind()) {
12387 case Int32Use:
12388 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt32ToStringWithValidRadix : operationInt32ToString), m_callFrame, lowInt32(m_node->child1()), lowInt32(m_node->child2())));
12389 break;
12390 case Int52RepUse:
12391 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt52ToStringWithValidRadix : operationInt52ToString), m_callFrame, lowStrictInt52(m_node->child1()), lowInt32(m_node->child2())));
12392 break;
12393 case DoubleRepUse:
12394 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationDoubleToStringWithValidRadix : operationDoubleToString), m_callFrame, lowDouble(m_node->child1()), lowInt32(m_node->child2())));
12395 break;
12396 default:
12397 RELEASE_ASSERT_NOT_REACHED();
12398 }
12399 }
12400
12401 void compileNumberToStringWithValidRadixConstant()
12402 {
12403 switch (m_node->child1().useKind()) {
12404 case Int32Use:
12405 setJSValue(vmCall(pointerType(), m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12406 break;
12407 case Int52RepUse:
12408 setJSValue(vmCall(pointerType(), m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12409 break;
12410 case DoubleRepUse:
12411 setJSValue(vmCall(pointerType(), m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12412 break;
12413 default:
12414 RELEASE_ASSERT_NOT_REACHED();
12415 }
12416 }
12417
12418 void compileResolveScopeForHoistingFuncDeclInEval()
12419 {
12420 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12421 setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScopeForHoistingFuncDeclInEval), m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
12422 }
12423
12424 void compileResolveScope()
12425 {
12426 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12427 setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScope),
12428 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
12429 }
12430
12431 void compileGetDynamicVar()
12432 {
12433 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12434 setJSValue(vmCall(Int64, m_out.operation(operationGetDynamicVar),
12435 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
12436 }
12437
12438 void compilePutDynamicVar()
12439 {
12440 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12441 setJSValue(vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutDynamicVarStrict : operationPutDynamicVarNonStrict),
12442 m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
12443 }
12444
12445 void compileUnreachable()
12446 {
12447 // It's so tempting to assert that AI has proved that this is unreachable. But that's
12448 // simply not a requirement of the Unreachable opcode at all. If you emit an opcode that
12449 // *you* know will not return, then it's fine to end the basic block with Unreachable
12450 // after that opcode. You don't have to also prove to AI that your opcode does not return.
12451 // Hence, there is nothing to do here but emit code that will crash, so that we catch
12452 // cases where you said Unreachable but you lied.
12453 //
12454 // It's also also worth noting that some clients emit this opcode because they're not 100% sure
12455 // if the code is unreachable, but they would really prefer if we crashed rather than kept going
12456 // if it did turn out to be reachable. Hence, this needs to deterministically crash.
12457
12458 crash();
12459 }
12460
12461 void compileCheckSubClass()
12462 {
12463 LValue cell = lowCell(m_node->child1());
12464
12465 const ClassInfo* classInfo = m_node->classInfo();
12466 if (!classInfo->checkSubClassSnippet) {
12467 LBasicBlock loop = m_out.newBlock();
12468 LBasicBlock parentClass = m_out.newBlock();
12469 LBasicBlock continuation = m_out.newBlock();
12470
12471 LValue structure = loadStructure(cell);
12472 LValue classInfo = m_out.loadPtr(structure, m_heaps.Structure_classInfo);
12473 ValueFromBlock otherAtStart = m_out.anchor(classInfo);
12474 m_out.jump(loop);
12475
12476 LBasicBlock lastNext = m_out.appendTo(loop, parentClass);
12477 LValue other = m_out.phi(pointerType(), otherAtStart);
12478 m_out.branch(m_out.equal(other, m_out.constIntPtr(classInfo)), unsure(continuation), unsure(parentClass));
12479
12480 m_out.appendTo(parentClass, continuation);
12481 LValue parent = m_out.loadPtr(other, m_heaps.ClassInfo_parentClass);
12482 speculate(BadType, jsValueValue(cell), m_node->child1().node(), m_out.isNull(parent));
12483 m_out.addIncomingToPhi(other, m_out.anchor(parent));
12484 m_out.jump(loop);
12485
12486 m_out.appendTo(continuation, lastNext);
12487 return;
12488 }
12489
12490 RefPtr<Snippet> domJIT = classInfo->checkSubClassSnippet();
12491 PatchpointValue* patchpoint = m_out.patchpoint(Void);
12492 patchpoint->appendSomeRegister(cell);
12493 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
12494 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
12495
12496 NodeOrigin origin = m_origin;
12497 unsigned osrExitArgumentOffset = patchpoint->numChildren();
12498 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(jsValueValue(cell), m_node->child1().node());
12499 patchpoint->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, jsValueValue(cell)));
12500
12501 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
12502 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
12503 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12504
12505 State* state = &m_ftlState;
12506 Node* node = m_node;
12507 JSValue child1Constant = m_state.forNode(m_node->child1()).value();
12508
12509 patchpoint->setGenerator(
12510 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12511 AllowMacroScratchRegisterUsage allowScratch(jit);
12512
12513 Vector<GPRReg> gpScratch;
12514 Vector<FPRReg> fpScratch;
12515 Vector<SnippetParams::Value> regs;
12516
12517 regs.append(SnippetParams::Value(params[0].gpr(), child1Constant));
12518
12519 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
12520 gpScratch.append(params.gpScratch(i));
12521
12522 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
12523 fpScratch.append(params.fpScratch(i));
12524
12525 RefPtr<OSRExitHandle> handle = exitDescriptor->emitOSRExitLater(*state, BadType, origin, params, osrExitArgumentOffset);
12526
12527 SnippetParams domJITParams(*state, params, node, nullptr, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
12528 CCallHelpers::JumpList failureCases = domJIT->generator()->run(jit, domJITParams);
12529
12530 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
12531 linkBuffer.link(failureCases, linkBuffer.locationOf<NoPtrTag>(handle->label));
12532 });
12533 });
12534 patchpoint->effects = Effects::forCheck();
12535 }
12536
12537 void compileCallDOM()
12538 {
12539 const DOMJIT::Signature* signature = m_node->signature();
12540
12541 // FIXME: We should have a way to call functions with the vector of registers.
12542 // https://bugs.webkit.org/show_bug.cgi?id=163099
12543 Vector<LValue, JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS> operands;
12544
12545 unsigned index = 0;
12546 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, [&](Node*, Edge edge) {
12547 if (!index)
12548 operands.append(lowCell(edge));
12549 else {
12550 switch (signature->arguments[index - 1]) {
12551 case SpecString:
12552 operands.append(lowString(edge));
12553 break;
12554 case SpecInt32Only:
12555 operands.append(lowInt32(edge));
12556 break;
12557 case SpecBoolean:
12558 operands.append(lowBoolean(edge));
12559 break;
12560 default:
12561 RELEASE_ASSERT_NOT_REACHED();
12562 break;
12563 }
12564 }
12565 ++index;
12566 });
12567
12568 unsigned argumentCountIncludingThis = signature->argumentCount + 1;
12569 LValue result;
12570 assertIsTaggedWith(reinterpret_cast<void*>(signature->unsafeFunction), CFunctionPtrTag);
12571 switch (argumentCountIncludingThis) {
12572 case 1:
12573 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EP>(signature->unsafeFunction)), m_callFrame, operands[0]);
12574 break;
12575 case 2:
12576 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1]);
12577 break;
12578 case 3:
12579 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1], operands[2]);
12580 break;
12581 default:
12582 RELEASE_ASSERT_NOT_REACHED();
12583 break;
12584 }
12585
12586 setJSValue(result);
12587 }
12588
12589 void compileCallDOMGetter()
12590 {
12591 DOMJIT::CallDOMGetterSnippet* domJIT = m_node->callDOMGetterData()->snippet;
12592 if (!domJIT) {
12593 // The following function is not an operation: we directly call a custom accessor getter.
12594 // Since the getter does not have code setting topCallFrame, As is the same to IC, we should set topCallFrame in caller side.
12595 m_out.storePtr(m_callFrame, m_out.absolute(&vm().topCallFrame));
12596 setJSValue(
12597 vmCall(Int64, m_out.operation(m_node->callDOMGetterData()->customAccessorGetter.retaggedExecutableAddress<CFunctionPtrTag>()),
12598 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(m_graph.identifiers()[m_node->callDOMGetterData()->identifierNumber])));
12599 return;
12600 }
12601
12602 Edge& baseEdge = m_node->child1();
12603 LValue base = lowCell(baseEdge);
12604 JSValue baseConstant = m_state.forNode(baseEdge).value();
12605
12606 LValue globalObject;
12607 JSValue globalObjectConstant;
12608 if (domJIT->requireGlobalObject) {
12609 Edge& globalObjectEdge = m_node->child2();
12610 globalObject = lowCell(globalObjectEdge);
12611 globalObjectConstant = m_state.forNode(globalObjectEdge).value();
12612 }
12613
12614 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12615 patchpoint->appendSomeRegister(base);
12616 if (domJIT->requireGlobalObject)
12617 patchpoint->appendSomeRegister(globalObject);
12618 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
12619 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
12620 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
12621 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12622 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
12623 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
12624 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
12625
12626 State* state = &m_ftlState;
12627 Node* node = m_node;
12628 patchpoint->setGenerator(
12629 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12630 AllowMacroScratchRegisterUsage allowScratch(jit);
12631
12632 Vector<GPRReg> gpScratch;
12633 Vector<FPRReg> fpScratch;
12634 Vector<SnippetParams::Value> regs;
12635
12636 regs.append(JSValueRegs(params[0].gpr()));
12637 regs.append(SnippetParams::Value(params[1].gpr(), baseConstant));
12638 if (domJIT->requireGlobalObject)
12639 regs.append(SnippetParams::Value(params[2].gpr(), globalObjectConstant));
12640
12641 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
12642 gpScratch.append(params.gpScratch(i));
12643
12644 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
12645 fpScratch.append(params.fpScratch(i));
12646
12647 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12648
12649 SnippetParams domJITParams(*state, params, node, exceptions, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
12650 domJIT->generator()->run(jit, domJITParams);
12651 });
12652 patchpoint->effects = Effects::forCall();
12653 setJSValue(patchpoint);
12654 }
12655
12656 void compileFilterICStatus()
12657 {
12658 m_interpreter.filterICStatus(m_node);
12659 }
12660
12661 LValue byteSwap32(LValue value)
12662 {
12663 // FIXME: teach B3 byteswap
12664 // https://bugs.webkit.org/show_bug.cgi?id=188759
12665
12666 RELEASE_ASSERT(value->type() == Int32);
12667 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12668 patchpoint->appendSomeRegister(value);
12669 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12670 jit.move(params[1].gpr(), params[0].gpr());
12671 jit.byteSwap32(params[0].gpr());
12672 });
12673 patchpoint->effects = Effects::none();
12674 return patchpoint;
12675 }
12676
12677 LValue byteSwap64(LValue value)
12678 {
12679 // FIXME: teach B3 byteswap
12680 // https://bugs.webkit.org/show_bug.cgi?id=188759
12681
12682 RELEASE_ASSERT(value->type() == Int64);
12683 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12684 patchpoint->appendSomeRegister(value);
12685 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12686 jit.move(params[1].gpr(), params[0].gpr());
12687 jit.byteSwap64(params[0].gpr());
12688 });
12689 patchpoint->effects = Effects::none();
12690 return patchpoint;
12691 }
12692
12693 template <typename F1, typename F2>
12694 LValue emitCodeBasedOnEndiannessBranch(LValue isLittleEndian, const F1& emitLittleEndianCode, const F2& emitBigEndianCode)
12695 {
12696 LType type;
12697
12698 LBasicBlock bigEndianCase = m_out.newBlock();
12699 LBasicBlock littleEndianCase = m_out.newBlock();
12700 LBasicBlock continuation = m_out.newBlock();
12701
12702 m_out.branch(m_out.testIsZero32(isLittleEndian, m_out.constInt32(1)),
12703 unsure(bigEndianCase), unsure(littleEndianCase));
12704
12705 LBasicBlock lastNext = m_out.appendTo(bigEndianCase, littleEndianCase);
12706 LValue bigEndianValue = emitBigEndianCode();
12707 type = bigEndianValue ? bigEndianValue->type() : Void;
12708 ValueFromBlock bigEndianResult = bigEndianValue ? m_out.anchor(bigEndianValue) : ValueFromBlock();
12709 m_out.jump(continuation);
12710
12711 m_out.appendTo(littleEndianCase, continuation);
12712 LValue littleEndianValue = emitLittleEndianCode();
12713 ValueFromBlock littleEndianResult = littleEndianValue ? m_out.anchor(littleEndianValue) : ValueFromBlock();
12714 RELEASE_ASSERT((!littleEndianValue && !bigEndianValue) || type == littleEndianValue->type());
12715 m_out.jump(continuation);
12716
12717 m_out.appendTo(continuation, lastNext);
12718 RELEASE_ASSERT(!!bigEndianResult == !!littleEndianResult);
12719 if (bigEndianResult)
12720 return m_out.phi(type, bigEndianResult, littleEndianResult);
12721 return nullptr;
12722 }
12723
12724 void compileDataViewGet()
12725 {
12726 LValue dataView = lowDataViewObject(m_node->child1());
12727 LValue index = lowInt32(m_node->child2());
12728 LValue isLittleEndian = nullptr;
12729 if (m_node->child3())
12730 isLittleEndian = lowBoolean(m_node->child3());
12731
12732 DataViewData data = m_node->dataViewData();
12733
12734 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
12735 LValue indexToCheck = m_out.zeroExtPtr(index);
12736 if (data.byteSize > 1)
12737 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
12738 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
12739
12740 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
12741
12742 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
12743
12744 if (m_node->op() == DataViewGetInt) {
12745 switch (data.byteSize) {
12746 case 1:
12747 if (data.isSigned)
12748 setInt32(m_out.load8SignExt32(pointer));
12749 else
12750 setInt32(m_out.load8ZeroExt32(pointer));
12751 break;
12752 case 2: {
12753 auto emitLittleEndianLoad = [&] {
12754 if (data.isSigned)
12755 return m_out.load16SignExt32(pointer);
12756 return m_out.load16ZeroExt32(pointer);
12757 };
12758
12759 auto emitBigEndianLoad = [&] {
12760 LValue val = m_out.load16ZeroExt32(pointer);
12761
12762 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12763 patchpoint->appendSomeRegister(val);
12764 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12765 jit.move(params[1].gpr(), params[0].gpr());
12766 jit.byteSwap16(params[0].gpr());
12767 if (data.isSigned)
12768 jit.signExtend16To32(params[0].gpr(), params[0].gpr());
12769 });
12770 patchpoint->effects = Effects::none();
12771
12772 return patchpoint;
12773 };
12774
12775 if (data.isLittleEndian == FalseTriState)
12776 setInt32(emitBigEndianLoad());
12777 else if (data.isLittleEndian == TrueTriState)
12778 setInt32(emitLittleEndianLoad());
12779 else
12780 setInt32(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianLoad, emitBigEndianLoad));
12781
12782 break;
12783 }
12784 case 4: {
12785 LValue loadedValue = m_out.load32(pointer);
12786
12787 if (data.isLittleEndian == FalseTriState)
12788 loadedValue = byteSwap32(loadedValue);
12789 else if (data.isLittleEndian == MixedTriState) {
12790 auto emitLittleEndianCode = [&] {
12791 return loadedValue;
12792 };
12793 auto emitBigEndianCode = [&] {
12794 return byteSwap32(loadedValue);
12795 };
12796
12797 loadedValue = emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12798 }
12799
12800 if (data.isSigned)
12801 setInt32(loadedValue);
12802 else
12803 setStrictInt52(m_out.zeroExt(loadedValue, Int64));
12804
12805 break;
12806 }
12807 default:
12808 RELEASE_ASSERT_NOT_REACHED();
12809 }
12810 } else {
12811 switch (data.byteSize) {
12812 case 4: {
12813 auto emitLittleEndianCode = [&] {
12814 return m_out.floatToDouble(m_out.loadFloat(pointer));
12815 };
12816
12817 auto emitBigEndianCode = [&] {
12818 LValue loadedValue = m_out.load32(pointer);
12819 PatchpointValue* patchpoint = m_out.patchpoint(Double);
12820 patchpoint->appendSomeRegister(loadedValue);
12821 patchpoint->numGPScratchRegisters = 1;
12822 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12823 jit.move(params[1].gpr(), params.gpScratch(0));
12824 jit.byteSwap32(params.gpScratch(0));
12825 jit.move32ToFloat(params.gpScratch(0), params[0].fpr());
12826 jit.convertFloatToDouble(params[0].fpr(), params[0].fpr());
12827 });
12828 patchpoint->effects = Effects::none();
12829 return patchpoint;
12830 };
12831
12832 if (data.isLittleEndian == TrueTriState)
12833 setDouble(emitLittleEndianCode());
12834 else if (data.isLittleEndian == FalseTriState)
12835 setDouble(emitBigEndianCode());
12836 else
12837 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
12838
12839 break;
12840 }
12841 case 8: {
12842 auto emitLittleEndianCode = [&] {
12843 return m_out.loadDouble(pointer);
12844 };
12845
12846 auto emitBigEndianCode = [&] {
12847 LValue loadedValue = m_out.load64(pointer);
12848 loadedValue = byteSwap64(loadedValue);
12849 return m_out.bitCast(loadedValue, Double);
12850 };
12851
12852 if (data.isLittleEndian == TrueTriState)
12853 setDouble(emitLittleEndianCode());
12854 else if (data.isLittleEndian == FalseTriState)
12855 setDouble(emitBigEndianCode());
12856 else
12857 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
12858
12859 break;
12860 }
12861 default:
12862 RELEASE_ASSERT_NOT_REACHED();
12863 }
12864 }
12865 }
12866
12867 void compileDataViewSet()
12868 {
12869 LValue dataView = lowDataViewObject(m_graph.varArgChild(m_node, 0));
12870 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
12871 LValue isLittleEndian = nullptr;
12872 if (m_graph.varArgChild(m_node, 3))
12873 isLittleEndian = lowBoolean(m_graph.varArgChild(m_node, 3));
12874
12875 DataViewData data = m_node->dataViewData();
12876
12877 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
12878 LValue indexToCheck = m_out.zeroExtPtr(index);
12879 if (data.byteSize > 1)
12880 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
12881 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
12882
12883 Edge& valueEdge = m_graph.varArgChild(m_node, 2);
12884 LValue valueToStore;
12885 switch (valueEdge.useKind()) {
12886 case Int32Use:
12887 valueToStore = lowInt32(valueEdge);
12888 break;
12889 case DoubleRepUse:
12890 valueToStore = lowDouble(valueEdge);
12891 break;
12892 case Int52RepUse:
12893 valueToStore = lowStrictInt52(valueEdge);
12894 break;
12895 default:
12896 RELEASE_ASSERT_NOT_REACHED();
12897 }
12898
12899 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
12900 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
12901
12902 if (data.isFloatingPoint) {
12903 if (data.byteSize == 4) {
12904 valueToStore = m_out.doubleToFloat(valueToStore);
12905
12906 auto emitLittleEndianCode = [&] () -> LValue {
12907 m_out.storeFloat(valueToStore, pointer);
12908 return nullptr;
12909 };
12910
12911 auto emitBigEndianCode = [&] () -> LValue {
12912 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12913 patchpoint->appendSomeRegister(valueToStore);
12914 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12915 jit.moveFloatTo32(params[1].fpr(), params[0].gpr());
12916 jit.byteSwap32(params[0].gpr());
12917 });
12918 patchpoint->effects = Effects::none();
12919 m_out.store32(patchpoint, pointer);
12920 return nullptr;
12921 };
12922
12923 if (data.isLittleEndian == FalseTriState)
12924 emitBigEndianCode();
12925 else if (data.isLittleEndian == TrueTriState)
12926 emitLittleEndianCode();
12927 else
12928 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12929
12930 } else {
12931 RELEASE_ASSERT(data.byteSize == 8);
12932 auto emitLittleEndianCode = [&] () -> LValue {
12933 m_out.storeDouble(valueToStore, pointer);
12934 return nullptr;
12935 };
12936 auto emitBigEndianCode = [&] () -> LValue {
12937 m_out.store64(byteSwap64(m_out.bitCast(valueToStore, Int64)), pointer);
12938 return nullptr;
12939 };
12940
12941 if (data.isLittleEndian == FalseTriState)
12942 emitBigEndianCode();
12943 else if (data.isLittleEndian == TrueTriState)
12944 emitLittleEndianCode();
12945 else
12946 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12947 }
12948 } else {
12949 switch (data.byteSize) {
12950 case 1:
12951 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
12952 m_out.store32As8(valueToStore, pointer);
12953 break;
12954 case 2: {
12955 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
12956
12957 auto emitLittleEndianCode = [&] () -> LValue {
12958 m_out.store32As16(valueToStore, pointer);
12959 return nullptr;
12960 };
12961 auto emitBigEndianCode = [&] () -> LValue {
12962 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12963 patchpoint->appendSomeRegister(valueToStore);
12964 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12965 jit.move(params[1].gpr(), params[0].gpr());
12966 jit.byteSwap16(params[0].gpr());
12967 });
12968 patchpoint->effects = Effects::none();
12969
12970 m_out.store32As16(patchpoint, pointer);
12971 return nullptr;
12972 };
12973
12974 if (data.isLittleEndian == FalseTriState)
12975 emitBigEndianCode();
12976 else if (data.isLittleEndian == TrueTriState)
12977 emitLittleEndianCode();
12978 else
12979 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12980 break;
12981 }
12982 case 4: {
12983 RELEASE_ASSERT(valueEdge.useKind() == Int32Use || valueEdge.useKind() == Int52RepUse);
12984
12985 if (valueEdge.useKind() == Int52RepUse)
12986 valueToStore = m_out.castToInt32(valueToStore);
12987
12988 auto emitLittleEndianCode = [&] () -> LValue {
12989 m_out.store32(valueToStore, pointer);
12990 return nullptr;
12991 };
12992 auto emitBigEndianCode = [&] () -> LValue {
12993 m_out.store32(byteSwap32(valueToStore), pointer);
12994 return nullptr;
12995 };
12996
12997 if (data.isLittleEndian == FalseTriState)
12998 emitBigEndianCode();
12999 else if (data.isLittleEndian == TrueTriState)
13000 emitLittleEndianCode();
13001 else
13002 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
13003
13004 break;
13005 }
13006 default:
13007 RELEASE_ASSERT_NOT_REACHED();
13008 }
13009 }
13010 }
13011
13012 void emitSwitchForMultiByOffset(LValue base, bool structuresChecked, Vector<SwitchCase, 2>& cases, LBasicBlock exit)
13013 {
13014 if (cases.isEmpty()) {
13015 m_out.jump(exit);
13016 return;
13017 }
13018
13019 if (structuresChecked) {
13020 std::sort(
13021 cases.begin(), cases.end(),
13022 [&] (const SwitchCase& a, const SwitchCase& b) -> bool {
13023 return a.value()->asInt() < b.value()->asInt();
13024 });
13025 SwitchCase last = cases.takeLast();
13026 m_out.switchInstruction(
13027 m_out.load32(base, m_heaps.JSCell_structureID), cases, last.target(), Weight(0));
13028 return;
13029 }
13030
13031 m_out.switchInstruction(
13032 m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0));
13033 }
13034
13035 void compareEqObjectOrOtherToObject(Edge leftChild, Edge rightChild)
13036 {
13037 LValue rightCell = lowCell(rightChild);
13038 LValue leftValue = lowJSValue(leftChild, ManualOperandSpeculation);
13039
13040 speculateTruthyObject(rightChild, rightCell, SpecObject);
13041
13042 LBasicBlock leftCellCase = m_out.newBlock();
13043 LBasicBlock leftNotCellCase = m_out.newBlock();
13044 LBasicBlock continuation = m_out.newBlock();
13045
13046 m_out.branch(
13047 isCell(leftValue, provenType(leftChild)),
13048 unsure(leftCellCase), unsure(leftNotCellCase));
13049
13050 LBasicBlock lastNext = m_out.appendTo(leftCellCase, leftNotCellCase);
13051 speculateTruthyObject(leftChild, leftValue, SpecObject | (~SpecCellCheck));
13052 ValueFromBlock cellResult = m_out.anchor(m_out.equal(rightCell, leftValue));
13053 m_out.jump(continuation);
13054
13055 m_out.appendTo(leftNotCellCase, continuation);
13056 FTL_TYPE_CHECK(
13057 jsValueValue(leftValue), leftChild, SpecOther | SpecCellCheck, isNotOther(leftValue));
13058 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
13059 m_out.jump(continuation);
13060
13061 m_out.appendTo(continuation, lastNext);
13062 setBoolean(m_out.phi(Int32, cellResult, notCellResult));
13063 }
13064
13065 void speculateTruthyObject(Edge edge, LValue cell, SpeculatedType filter)
13066 {
13067 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
13068 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
13069 return;
13070 }
13071
13072 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
13073 speculate(
13074 BadType, jsValueValue(cell), edge.node(),
13075 m_out.testNonZero32(
13076 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
13077 m_out.constInt32(MasqueradesAsUndefined)));
13078 }
13079
13080 template<typename IntFunctor>
13081 void nonSpeculativeCompare(const IntFunctor& intFunctor, S_JITOperation_EJJ helperFunction)
13082 {
13083 LValue left = lowJSValue(m_node->child1());
13084 LValue right = lowJSValue(m_node->child2());
13085
13086 LBasicBlock leftIsInt = m_out.newBlock();
13087 LBasicBlock fastPath = m_out.newBlock();
13088 LBasicBlock slowPath = m_out.newBlock();
13089 LBasicBlock continuation = m_out.newBlock();
13090
13091 m_out.branch(isNotInt32(left, provenType(m_node->child1())), rarely(slowPath), usually(leftIsInt));
13092
13093 LBasicBlock lastNext = m_out.appendTo(leftIsInt, fastPath);
13094 m_out.branch(isNotInt32(right, provenType(m_node->child2())), rarely(slowPath), usually(fastPath));
13095
13096 m_out.appendTo(fastPath, slowPath);
13097 ValueFromBlock fastResult = m_out.anchor(intFunctor(unboxInt32(left), unboxInt32(right)));
13098 m_out.jump(continuation);
13099
13100 m_out.appendTo(slowPath, continuation);
13101 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
13102 pointerType(), m_out.operation(helperFunction), m_callFrame, left, right)));
13103 m_out.jump(continuation);
13104
13105 m_out.appendTo(continuation, lastNext);
13106 setBoolean(m_out.phi(Int32, fastResult, slowResult));
13107 }
13108
13109 LValue stringsEqual(LValue leftJSString, LValue rightJSString, Edge leftJSStringEdge = Edge(), Edge rightJSStringEdge = Edge())
13110 {
13111 LBasicBlock notTriviallyUnequalCase = m_out.newBlock();
13112 LBasicBlock notEmptyCase = m_out.newBlock();
13113 LBasicBlock leftReadyCase = m_out.newBlock();
13114 LBasicBlock rightReadyCase = m_out.newBlock();
13115 LBasicBlock left8BitCase = m_out.newBlock();
13116 LBasicBlock right8BitCase = m_out.newBlock();
13117 LBasicBlock loop = m_out.newBlock();
13118 LBasicBlock bytesEqual = m_out.newBlock();
13119 LBasicBlock trueCase = m_out.newBlock();
13120 LBasicBlock falseCase = m_out.newBlock();
13121 LBasicBlock slowCase = m_out.newBlock();
13122 LBasicBlock continuation = m_out.newBlock();
13123
13124 m_out.branch(isRopeString(leftJSString, leftJSStringEdge), rarely(slowCase), usually(leftReadyCase));
13125
13126 LBasicBlock lastNext = m_out.appendTo(leftReadyCase, rightReadyCase);
13127 m_out.branch(isRopeString(rightJSString, rightJSStringEdge), rarely(slowCase), usually(rightReadyCase));
13128
13129 m_out.appendTo(rightReadyCase, notTriviallyUnequalCase);
13130 LValue left = m_out.loadPtr(leftJSString, m_heaps.JSString_value);
13131 LValue right = m_out.loadPtr(rightJSString, m_heaps.JSString_value);
13132 LValue length = m_out.load32(left, m_heaps.StringImpl_length);
13133 m_out.branch(
13134 m_out.notEqual(length, m_out.load32(right, m_heaps.StringImpl_length)),
13135 unsure(falseCase), unsure(notTriviallyUnequalCase));
13136
13137 m_out.appendTo(notTriviallyUnequalCase, notEmptyCase);
13138 m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase));
13139
13140 m_out.appendTo(notEmptyCase, left8BitCase);
13141 m_out.branch(
13142 m_out.testIsZero32(
13143 m_out.load32(left, m_heaps.StringImpl_hashAndFlags),
13144 m_out.constInt32(StringImpl::flagIs8Bit())),
13145 unsure(slowCase), unsure(left8BitCase));
13146
13147 m_out.appendTo(left8BitCase, right8BitCase);
13148 m_out.branch(
13149 m_out.testIsZero32(
13150 m_out.load32(right, m_heaps.StringImpl_hashAndFlags),
13151 m_out.constInt32(StringImpl::flagIs8Bit())),
13152 unsure(slowCase), unsure(right8BitCase));
13153
13154 m_out.appendTo(right8BitCase, loop);
13155
13156 LValue leftData = m_out.loadPtr(left, m_heaps.StringImpl_data);
13157 LValue rightData = m_out.loadPtr(right, m_heaps.StringImpl_data);
13158
13159 ValueFromBlock indexAtStart = m_out.anchor(length);
13160
13161 m_out.jump(loop);
13162
13163 m_out.appendTo(loop, bytesEqual);
13164
13165 LValue indexAtLoopTop = m_out.phi(Int32, indexAtStart);
13166 LValue indexInLoop = m_out.sub(indexAtLoopTop, m_out.int32One);
13167
13168 LValue leftByte = m_out.load8ZeroExt32(
13169 m_out.baseIndex(m_heaps.characters8, leftData, m_out.zeroExtPtr(indexInLoop)));
13170 LValue rightByte = m_out.load8ZeroExt32(
13171 m_out.baseIndex(m_heaps.characters8, rightData, m_out.zeroExtPtr(indexInLoop)));
13172
13173 m_out.branch(m_out.notEqual(leftByte, rightByte), unsure(falseCase), unsure(bytesEqual));
13174
13175 m_out.appendTo(bytesEqual, trueCase);
13176
13177 ValueFromBlock indexForNextIteration = m_out.anchor(indexInLoop);
13178 m_out.addIncomingToPhi(indexAtLoopTop, indexForNextIteration);
13179 m_out.branch(m_out.notZero32(indexInLoop), unsure(loop), unsure(trueCase));
13180
13181 m_out.appendTo(trueCase, falseCase);
13182
13183 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
13184 m_out.jump(continuation);
13185
13186 m_out.appendTo(falseCase, slowCase);
13187
13188 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
13189 m_out.jump(continuation);
13190
13191 m_out.appendTo(slowCase, continuation);
13192
13193 LValue slowResultValue = vmCall(
13194 Int64, m_out.operation(operationCompareStringEq), m_callFrame,
13195 leftJSString, rightJSString);
13196 ValueFromBlock slowResult = m_out.anchor(unboxBoolean(slowResultValue));
13197 m_out.jump(continuation);
13198
13199 m_out.appendTo(continuation, lastNext);
13200 return m_out.phi(Int32, trueResult, falseResult, slowResult);
13201 }
13202
13203 enum ScratchFPRUsage {
13204 DontNeedScratchFPR,
13205 NeedScratchFPR
13206 };
13207 template<typename BinaryArithOpGenerator, ScratchFPRUsage scratchFPRUsage = DontNeedScratchFPR>
13208 void emitBinarySnippet(J_JITOperation_EJJ slowPathFunction)
13209 {
13210 Node* node = m_node;
13211
13212 LValue left = lowJSValue(node->child1());
13213 LValue right = lowJSValue(node->child2());
13214
13215 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13216 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13217
13218 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13219 patchpoint->appendSomeRegister(left);
13220 patchpoint->appendSomeRegister(right);
13221 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13222 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13223 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13224 preparePatchpointForExceptions(patchpoint);
13225 patchpoint->numGPScratchRegisters = 1;
13226 patchpoint->numFPScratchRegisters = 2;
13227 if (scratchFPRUsage == NeedScratchFPR)
13228 patchpoint->numFPScratchRegisters++;
13229 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13230 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13231 State* state = &m_ftlState;
13232 patchpoint->setGenerator(
13233 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13234 AllowMacroScratchRegisterUsage allowScratch(jit);
13235
13236 Box<CCallHelpers::JumpList> exceptions =
13237 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13238
13239 auto generator = Box<BinaryArithOpGenerator>::create(
13240 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13241 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
13242 params.fpScratch(0), params.fpScratch(1), params.gpScratch(0),
13243 scratchFPRUsage == NeedScratchFPR ? params.fpScratch(2) : InvalidFPRReg);
13244
13245 generator->generateFastPath(jit);
13246
13247 if (generator->didEmitFastPath()) {
13248 generator->endJumpList().link(&jit);
13249 CCallHelpers::Label done = jit.label();
13250
13251 params.addLatePath(
13252 [=] (CCallHelpers& jit) {
13253 AllowMacroScratchRegisterUsage allowScratch(jit);
13254
13255 generator->slowPathJumpList().link(&jit);
13256 callOperation(
13257 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13258 exceptions.get(), slowPathFunction, params[0].gpr(),
13259 params[1].gpr(), params[2].gpr());
13260 jit.jump().linkTo(done, &jit);
13261 });
13262 } else {
13263 callOperation(
13264 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13265 exceptions.get(), slowPathFunction, params[0].gpr(), params[1].gpr(),
13266 params[2].gpr());
13267 }
13268 });
13269
13270 setJSValue(patchpoint);
13271 }
13272
13273 template<typename BinaryBitOpGenerator>
13274 void emitBinaryBitOpSnippet(J_JITOperation_EJJ slowPathFunction)
13275 {
13276 Node* node = m_node;
13277
13278 LValue left = lowJSValue(node->child1());
13279 LValue right = lowJSValue(node->child2());
13280
13281 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13282 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13283
13284 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13285 patchpoint->appendSomeRegister(left);
13286 patchpoint->appendSomeRegister(right);
13287 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13288 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13289 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13290 preparePatchpointForExceptions(patchpoint);
13291 patchpoint->numGPScratchRegisters = 1;
13292 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13293 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13294 State* state = &m_ftlState;
13295 patchpoint->setGenerator(
13296 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13297 AllowMacroScratchRegisterUsage allowScratch(jit);
13298
13299 Box<CCallHelpers::JumpList> exceptions =
13300 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13301
13302 auto generator = Box<BinaryBitOpGenerator>::create(
13303 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13304 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.gpScratch(0));
13305
13306 generator->generateFastPath(jit);
13307 generator->endJumpList().link(&jit);
13308 CCallHelpers::Label done = jit.label();
13309
13310 params.addLatePath(
13311 [=] (CCallHelpers& jit) {
13312 AllowMacroScratchRegisterUsage allowScratch(jit);
13313
13314 generator->slowPathJumpList().link(&jit);
13315 callOperation(
13316 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13317 exceptions.get(), slowPathFunction, params[0].gpr(),
13318 params[1].gpr(), params[2].gpr());
13319 jit.jump().linkTo(done, &jit);
13320 });
13321 });
13322
13323 setJSValue(patchpoint);
13324 }
13325
13326 void emitRightShiftSnippet(JITRightShiftGenerator::ShiftType shiftType)
13327 {
13328 Node* node = m_node;
13329
13330 // FIXME: Make this do exceptions.
13331 // https://bugs.webkit.org/show_bug.cgi?id=151686
13332
13333 LValue left = lowJSValue(node->child1());
13334 LValue right = lowJSValue(node->child2());
13335
13336 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13337 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13338
13339 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13340 patchpoint->appendSomeRegister(left);
13341 patchpoint->appendSomeRegister(right);
13342 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13343 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13344 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13345 preparePatchpointForExceptions(patchpoint);
13346 patchpoint->numGPScratchRegisters = 1;
13347 patchpoint->numFPScratchRegisters = 1;
13348 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13349 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13350 State* state = &m_ftlState;
13351 patchpoint->setGenerator(
13352 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13353 AllowMacroScratchRegisterUsage allowScratch(jit);
13354
13355 Box<CCallHelpers::JumpList> exceptions =
13356 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13357
13358 auto generator = Box<JITRightShiftGenerator>::create(
13359 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13360 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
13361 params.fpScratch(0), params.gpScratch(0), InvalidFPRReg, shiftType);
13362
13363 generator->generateFastPath(jit);
13364 generator->endJumpList().link(&jit);
13365 CCallHelpers::Label done = jit.label();
13366
13367 params.addLatePath(
13368 [=] (CCallHelpers& jit) {
13369 AllowMacroScratchRegisterUsage allowScratch(jit);
13370
13371 generator->slowPathJumpList().link(&jit);
13372
13373 J_JITOperation_EJJ slowPathFunction =
13374 shiftType == JITRightShiftGenerator::SignedShift
13375 ? operationValueBitRShift : operationValueBitURShift;
13376
13377 callOperation(
13378 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13379 exceptions.get(), slowPathFunction, params[0].gpr(),
13380 params[1].gpr(), params[2].gpr());
13381 jit.jump().linkTo(done, &jit);
13382 });
13383 });
13384
13385 setJSValue(patchpoint);
13386 }
13387
13388 LValue allocateHeapCell(LValue allocator, LBasicBlock slowPath)
13389 {
13390 JITAllocator actualAllocator;
13391 if (allocator->hasIntPtr())
13392 actualAllocator = JITAllocator::constant(Allocator(bitwise_cast<LocalAllocator*>(allocator->asIntPtr())));
13393 else
13394 actualAllocator = JITAllocator::variable();
13395
13396 if (actualAllocator.isConstant()) {
13397 if (!actualAllocator.allocator()) {
13398 LBasicBlock haveAllocator = m_out.newBlock();
13399 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
13400 m_out.jump(slowPath);
13401 m_out.appendTo(haveAllocator, lastNext);
13402 return m_out.intPtrZero;
13403 }
13404 } else {
13405 // This means that either we know that the allocator is null or we don't know what the
13406 // allocator is. In either case, we need the null check.
13407 LBasicBlock haveAllocator = m_out.newBlock();
13408 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
13409 m_out.branch(
13410 m_out.notEqual(allocator, m_out.intPtrZero),
13411 usually(haveAllocator), rarely(slowPath));
13412 m_out.appendTo(haveAllocator, lastNext);
13413 }
13414
13415 LBasicBlock continuation = m_out.newBlock();
13416
13417 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13418
13419 PatchpointValue* patchpoint = m_out.patchpoint(pointerType());
13420 if (isARM64()) {
13421 // emitAllocateWithNonNullAllocator uses the scratch registers on ARM.
13422 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13423 }
13424 patchpoint->effects.terminal = true;
13425 if (actualAllocator.isConstant())
13426 patchpoint->numGPScratchRegisters++;
13427 else
13428 patchpoint->appendSomeRegisterWithClobber(allocator);
13429 patchpoint->numGPScratchRegisters++;
13430 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13431
13432 m_out.appendSuccessor(usually(continuation));
13433 m_out.appendSuccessor(rarely(slowPath));
13434
13435 patchpoint->setGenerator(
13436 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13437 AllowMacroScratchRegisterUsageIf allowScratchIf(jit, isARM64());
13438 CCallHelpers::JumpList jumpToSlowPath;
13439
13440 GPRReg allocatorGPR;
13441 if (actualAllocator.isConstant())
13442 allocatorGPR = params.gpScratch(1);
13443 else
13444 allocatorGPR = params[1].gpr();
13445
13446 // We use a patchpoint to emit the allocation path because whenever we mess with
13447 // allocation paths, we already reason about them at the machine code level. We know
13448 // exactly what instruction sequence we want. We're confident that no compiler
13449 // optimization could make this code better. So, it's best to have the code in
13450 // AssemblyHelpers::emitAllocate(). That way, the same optimized path is shared by
13451 // all of the compiler tiers.
13452 jit.emitAllocateWithNonNullAllocator(
13453 params[0].gpr(), actualAllocator, allocatorGPR, params.gpScratch(0),
13454 jumpToSlowPath);
13455
13456 CCallHelpers::Jump jumpToSuccess;
13457 if (!params.fallsThroughToSuccessor(0))
13458 jumpToSuccess = jit.jump();
13459
13460 Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
13461
13462 params.addLatePath(
13463 [=] (CCallHelpers& jit) {
13464 jumpToSlowPath.linkTo(*labels[1], &jit);
13465 if (jumpToSuccess.isSet())
13466 jumpToSuccess.linkTo(*labels[0], &jit);
13467 });
13468 });
13469
13470 m_out.appendTo(continuation, lastNext);
13471 return patchpoint;
13472 }
13473
13474 void storeStructure(LValue object, Structure* structure)
13475 {
13476 m_out.store32(m_out.constInt32(structure->id()), object, m_heaps.JSCell_structureID);
13477 m_out.store32(
13478 m_out.constInt32(structure->objectInitializationBlob()),
13479 object, m_heaps.JSCell_usefulBytes);
13480 }
13481
13482 void storeStructure(LValue object, LValue structure)
13483 {
13484 if (structure->hasIntPtr()) {
13485 storeStructure(object, bitwise_cast<Structure*>(structure->asIntPtr()));
13486 return;
13487 }
13488
13489 LValue id = m_out.load32(structure, m_heaps.Structure_structureID);
13490 m_out.store32(id, object, m_heaps.JSCell_structureID);
13491
13492 LValue blob = m_out.load32(structure, m_heaps.Structure_indexingModeIncludingHistory);
13493 m_out.store32(blob, object, m_heaps.JSCell_usefulBytes);
13494 }
13495
13496 template <typename StructureType>
13497 LValue allocateCell(LValue allocator, StructureType structure, LBasicBlock slowPath)
13498 {
13499 LValue result = allocateHeapCell(allocator, slowPath);
13500 storeStructure(result, structure);
13501 return result;
13502 }
13503
13504 LValue allocateObject(LValue allocator, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
13505 {
13506 return allocateObject(allocator, weakStructure(structure), butterfly, slowPath);
13507 }
13508
13509 LValue allocateObject(LValue allocator, LValue structure, LValue butterfly, LBasicBlock slowPath)
13510 {
13511 LValue result = allocateCell(allocator, structure, slowPath);
13512 if (structure->hasIntPtr()) {
13513 splatWords(
13514 result,
13515 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13516 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8 + bitwise_cast<Structure*>(structure->asIntPtr())->inlineCapacity()),
13517 m_out.int64Zero,
13518 m_heaps.properties.atAnyNumber());
13519 } else {
13520 LValue end = m_out.add(
13521 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13522 m_out.load8ZeroExt32(structure, m_heaps.Structure_inlineCapacity));
13523 splatWords(
13524 result,
13525 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13526 end,
13527 m_out.int64Zero,
13528 m_heaps.properties.atAnyNumber());
13529 }
13530
13531 m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly);
13532 return result;
13533 }
13534
13535 template<typename ClassType, typename StructureType>
13536 LValue allocateObject(
13537 size_t size, StructureType structure, LValue butterfly, LBasicBlock slowPath)
13538 {
13539 Allocator allocator = allocatorForNonVirtualConcurrently<ClassType>(vm(), size, AllocatorForMode::AllocatorIfExists);
13540 return allocateObject(
13541 m_out.constIntPtr(allocator.localAllocator()), structure, butterfly, slowPath);
13542 }
13543
13544 template<typename ClassType, typename StructureType>
13545 LValue allocateObject(StructureType structure, LValue butterfly, LBasicBlock slowPath)
13546 {
13547 return allocateObject<ClassType>(
13548 ClassType::allocationSize(0), structure, butterfly, slowPath);
13549 }
13550
13551 LValue allocatorForSize(LValue subspace, LValue size, LBasicBlock slowPath)
13552 {
13553 static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
13554
13555 // Try to do some constant-folding here.
13556 if (subspace->hasIntPtr() && size->hasIntPtr()) {
13557 CompleteSubspace* actualSubspace = bitwise_cast<CompleteSubspace*>(subspace->asIntPtr());
13558 size_t actualSize = size->asIntPtr();
13559
13560 Allocator actualAllocator = actualSubspace->allocatorForNonVirtual(actualSize, AllocatorForMode::AllocatorIfExists);
13561 if (!actualAllocator) {
13562 LBasicBlock continuation = m_out.newBlock();
13563 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13564 m_out.jump(slowPath);
13565 m_out.appendTo(continuation, lastNext);
13566 return m_out.intPtrZero;
13567 }
13568
13569 return m_out.constIntPtr(actualAllocator.localAllocator());
13570 }
13571
13572 unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
13573
13574 LBasicBlock continuation = m_out.newBlock();
13575
13576 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13577
13578 LValue sizeClassIndex = m_out.lShr(
13579 m_out.add(size, m_out.constIntPtr(MarkedSpace::sizeStep - 1)),
13580 m_out.constInt32(stepShift));
13581
13582 m_out.branch(
13583 m_out.above(sizeClassIndex, m_out.constIntPtr(MarkedSpace::largeCutoff >> stepShift)),
13584 rarely(slowPath), usually(continuation));
13585
13586 m_out.appendTo(continuation, lastNext);
13587
13588 return m_out.loadPtr(
13589 m_out.baseIndex(
13590 m_heaps.CompleteSubspace_allocatorForSizeStep,
13591 subspace, sizeClassIndex));
13592 }
13593
13594 LValue allocatorForSize(CompleteSubspace& subspace, LValue size, LBasicBlock slowPath)
13595 {
13596 return allocatorForSize(m_out.constIntPtr(&subspace), size, slowPath);
13597 }
13598
13599 template<typename ClassType>
13600 LValue allocateVariableSizedObject(
13601 LValue size, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
13602 {
13603 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
13604 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
13605 LValue allocator = allocatorForSize(*subspace, size, slowPath);
13606 return allocateObject(allocator, structure, butterfly, slowPath);
13607 }
13608
13609 template<typename ClassType>
13610 LValue allocateVariableSizedCell(
13611 LValue size, Structure* structure, LBasicBlock slowPath)
13612 {
13613 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
13614 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
13615 LValue allocator = allocatorForSize(*subspace, size, slowPath);
13616 return allocateCell(allocator, structure, slowPath);
13617 }
13618
13619 LValue allocateObject(RegisteredStructure structure)
13620 {
13621 size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
13622 Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
13623
13624 // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
13625 // instead of putting it on the slow path.
13626 // https://bugs.webkit.org/show_bug.cgi?id=161062
13627
13628 LBasicBlock slowPath = m_out.newBlock();
13629 LBasicBlock continuation = m_out.newBlock();
13630
13631 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
13632
13633 ValueFromBlock fastResult = m_out.anchor(allocateObject(
13634 m_out.constIntPtr(allocator.localAllocator()), structure, m_out.intPtrZero, slowPath));
13635
13636 m_out.jump(continuation);
13637
13638 m_out.appendTo(slowPath, continuation);
13639
13640 VM& vm = this->vm();
13641 LValue slowResultValue = lazySlowPath(
13642 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13643 return createLazyCallGenerator(vm,
13644 operationNewObject, locations[0].directGPR(),
13645 CCallHelpers::TrustedImmPtr(structure.get()));
13646 });
13647 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13648 m_out.jump(continuation);
13649
13650 m_out.appendTo(continuation, lastNext);
13651 return m_out.phi(pointerType(), fastResult, slowResult);
13652 }
13653
13654 struct ArrayValues {
13655 ArrayValues()
13656 : array(0)
13657 , butterfly(0)
13658 {
13659 }
13660
13661 ArrayValues(LValue array, LValue butterfly)
13662 : array(array)
13663 , butterfly(butterfly)
13664 {
13665 }
13666
13667 LValue array;
13668 LValue butterfly;
13669 };
13670
13671 ArrayValues allocateJSArray(LValue publicLength, LValue vectorLength, LValue structure, LValue indexingType, bool shouldInitializeElements = true, bool shouldLargeArraySizeCreateArrayStorage = true)
13672 {
13673 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
13674 if (indexingType->hasInt32()) {
13675 IndexingType type = static_cast<IndexingType>(indexingType->asInt32());
13676 ASSERT_UNUSED(type,
13677 hasUndecided(type)
13678 || hasInt32(type)
13679 || hasDouble(type)
13680 || hasContiguous(type));
13681 }
13682
13683 LBasicBlock fastCase = m_out.newBlock();
13684 LBasicBlock largeCase = m_out.newBlock();
13685 LBasicBlock failCase = m_out.newBlock();
13686 LBasicBlock continuation = m_out.newBlock();
13687 LBasicBlock slowCase = m_out.newBlock();
13688
13689 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastCase);
13690
13691 Optional<unsigned> staticVectorLength;
13692 Optional<unsigned> staticVectorLengthFromPublicLength;
13693 if (structure->hasIntPtr()) {
13694 if (publicLength->hasInt32()) {
13695 unsigned publicLengthConst = static_cast<unsigned>(publicLength->asInt32());
13696 if (publicLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
13697 publicLengthConst = Butterfly::optimalContiguousVectorLength(
13698 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), publicLengthConst);
13699 staticVectorLengthFromPublicLength = publicLengthConst;
13700 }
13701
13702 }
13703 if (vectorLength->hasInt32()) {
13704 unsigned vectorLengthConst = static_cast<unsigned>(vectorLength->asInt32());
13705 if (vectorLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
13706 vectorLengthConst = Butterfly::optimalContiguousVectorLength(
13707 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), vectorLengthConst);
13708 vectorLength = m_out.constInt32(vectorLengthConst);
13709 staticVectorLength = vectorLengthConst;
13710 }
13711 }
13712 } else {
13713 // We don't compute the optimal vector length for new Array(blah) where blah is not
13714 // statically known, since the compute effort of doing it here is probably not worth it.
13715 }
13716
13717 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
13718
13719 LValue predicate;
13720 if (shouldLargeArraySizeCreateArrayStorage)
13721 predicate = m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
13722 else
13723 predicate = m_out.booleanFalse;
13724
13725 m_out.branch(predicate, rarely(largeCase), usually(fastCase));
13726
13727 m_out.appendTo(fastCase, largeCase);
13728
13729 LValue payloadSize =
13730 m_out.shl(m_out.zeroExt(vectorLength, pointerType()), m_out.constIntPtr(3));
13731
13732 LValue butterflySize = m_out.add(
13733 payloadSize, m_out.constIntPtr(sizeof(IndexingHeader)));
13734
13735 LValue allocator = allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, failCase);
13736 LValue startOfStorage = allocateHeapCell(allocator, failCase);
13737
13738 LValue butterfly = m_out.add(startOfStorage, m_out.constIntPtr(sizeof(IndexingHeader)));
13739
13740 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
13741 m_out.store32(vectorLength, butterfly, m_heaps.Butterfly_vectorLength);
13742
13743 initializeArrayElements(
13744 indexingType,
13745 shouldInitializeElements ? m_out.int32Zero : publicLength, vectorLength,
13746 butterfly);
13747
13748 ValueFromBlock haveButterfly = m_out.anchor(butterfly);
13749
13750 LValue object = allocateObject<JSArray>(structure, butterfly, failCase);
13751
13752 ValueFromBlock fastResult = m_out.anchor(object);
13753 ValueFromBlock fastButterfly = m_out.anchor(butterfly);
13754 m_out.jump(continuation);
13755
13756 m_out.appendTo(largeCase, failCase);
13757 ValueFromBlock largeStructure = m_out.anchor(
13758 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))));
13759 m_out.jump(slowCase);
13760
13761 m_out.appendTo(failCase, slowCase);
13762 ValueFromBlock failStructure = m_out.anchor(structure);
13763 m_out.jump(slowCase);
13764
13765 m_out.appendTo(slowCase, continuation);
13766 LValue structureValue = m_out.phi(pointerType(), largeStructure, failStructure);
13767 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
13768
13769 VM& vm = this->vm();
13770 LValue slowResultValue = nullptr;
13771 if (vectorLength == publicLength
13772 || (staticVectorLengthFromPublicLength && staticVectorLength && staticVectorLength.value() == staticVectorLengthFromPublicLength.value())) {
13773 slowResultValue = lazySlowPath(
13774 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13775 return createLazyCallGenerator(vm,
13776 operationNewArrayWithSize, locations[0].directGPR(),
13777 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR());
13778 },
13779 structureValue, publicLength, butterflyValue);
13780 } else {
13781 slowResultValue = lazySlowPath(
13782 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13783 return createLazyCallGenerator(vm,
13784 operationNewArrayWithSizeAndHint, locations[0].directGPR(),
13785 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR(), locations[4].directGPR());
13786 },
13787 structureValue, publicLength, vectorLength, butterflyValue);
13788 }
13789
13790 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13791 ValueFromBlock slowButterfly = m_out.anchor(
13792 m_out.loadPtr(slowResultValue, m_heaps.JSObject_butterfly));
13793 m_out.jump(continuation);
13794
13795 m_out.appendTo(continuation, lastNext);
13796 return ArrayValues(
13797 m_out.phi(pointerType(), fastResult, slowResult),
13798 m_out.phi(pointerType(), fastButterfly, slowButterfly));
13799 }
13800
13801 ArrayValues allocateUninitializedContiguousJSArrayInternal(LValue publicLength, LValue vectorLength, RegisteredStructure structure)
13802 {
13803 bool shouldInitializeElements = false;
13804 bool shouldLargeArraySizeCreateArrayStorage = false;
13805 return allocateJSArray(
13806 publicLength, vectorLength, weakStructure(structure), m_out.constInt32(structure->indexingType()), shouldInitializeElements,
13807 shouldLargeArraySizeCreateArrayStorage);
13808 }
13809
13810 ArrayValues allocateUninitializedContiguousJSArray(LValue publicLength, RegisteredStructure structure)
13811 {
13812 return allocateUninitializedContiguousJSArrayInternal(publicLength, publicLength, structure);
13813 }
13814
13815 ArrayValues allocateUninitializedContiguousJSArray(unsigned publicLength, unsigned vectorLength, RegisteredStructure structure)
13816 {
13817 ASSERT(vectorLength >= publicLength);
13818 return allocateUninitializedContiguousJSArrayInternal(m_out.constInt32(publicLength), m_out.constInt32(vectorLength), structure);
13819 }
13820
13821 LValue ensureShadowChickenPacket()
13822 {
13823 ShadowChicken* shadowChicken = vm().shadowChicken();
13824 RELEASE_ASSERT(shadowChicken);
13825 LBasicBlock slowCase = m_out.newBlock();
13826 LBasicBlock continuation = m_out.newBlock();
13827
13828 TypedPointer addressOfLogCursor = m_out.absolute(shadowChicken->addressOfLogCursor());
13829 LValue logCursor = m_out.loadPtr(addressOfLogCursor);
13830
13831 ValueFromBlock fastResult = m_out.anchor(logCursor);
13832
13833 m_out.branch(
13834 m_out.below(logCursor, m_out.constIntPtr(shadowChicken->logEnd())),
13835 usually(continuation), rarely(slowCase));
13836
13837 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
13838
13839 vmCall(Void, m_out.operation(operationProcessShadowChickenLog), m_callFrame);
13840
13841 ValueFromBlock slowResult = m_out.anchor(m_out.loadPtr(addressOfLogCursor));
13842 m_out.jump(continuation);
13843
13844 m_out.appendTo(continuation, lastNext);
13845 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
13846
13847 m_out.storePtr(
13848 m_out.add(result, m_out.constIntPtr(sizeof(ShadowChicken::Packet))),
13849 addressOfLogCursor);
13850
13851 return result;
13852 }
13853
13854 LValue boolify(Edge edge)
13855 {
13856 switch (edge.useKind()) {
13857 case BooleanUse:
13858 case KnownBooleanUse:
13859 return lowBoolean(edge);
13860 case Int32Use:
13861 return m_out.notZero32(lowInt32(edge));
13862 case DoubleRepUse:
13863 return m_out.doubleNotEqualAndOrdered(lowDouble(edge), m_out.doubleZero);
13864 case ObjectOrOtherUse:
13865 return m_out.logicalNot(
13866 equalNullOrUndefined(
13867 edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
13868 ManualOperandSpeculation));
13869 case StringUse:
13870 return m_out.notEqual(lowString(edge), weakPointer(jsEmptyString(&m_graph.m_vm)));
13871 case StringOrOtherUse: {
13872 LValue value = lowJSValue(edge, ManualOperandSpeculation);
13873
13874 LBasicBlock cellCase = m_out.newBlock();
13875 LBasicBlock notCellCase = m_out.newBlock();
13876 LBasicBlock continuation = m_out.newBlock();
13877
13878 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
13879
13880 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
13881 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
13882 ValueFromBlock stringResult = m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(&m_graph.m_vm))));
13883 m_out.jump(continuation);
13884
13885 m_out.appendTo(notCellCase, continuation);
13886 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
13887 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
13888 m_out.jump(continuation);
13889
13890 m_out.appendTo(continuation, lastNext);
13891 return m_out.phi(Int32, stringResult, notCellResult);
13892 }
13893 case UntypedUse: {
13894 LValue value = lowJSValue(edge);
13895
13896 // Implements the following control flow structure:
13897 // if (value is cell) {
13898 // if (value is string or value is BigInt)
13899 // result = !!value->length
13900 // else {
13901 // do evil things for masquerades-as-undefined
13902 // result = true
13903 // }
13904 // } else if (value is int32) {
13905 // result = !!unboxInt32(value)
13906 // } else if (value is number) {
13907 // result = !!unboxDouble(value)
13908 // } else {
13909 // result = value == jsTrue
13910 // }
13911
13912 LBasicBlock cellCase = m_out.newBlock();
13913 LBasicBlock notStringCase = m_out.newBlock();
13914 LBasicBlock stringCase = m_out.newBlock();
13915 LBasicBlock bigIntCase = m_out.newBlock();
13916 LBasicBlock notStringOrBigIntCase = m_out.newBlock();
13917 LBasicBlock notCellCase = m_out.newBlock();
13918 LBasicBlock int32Case = m_out.newBlock();
13919 LBasicBlock notInt32Case = m_out.newBlock();
13920 LBasicBlock doubleCase = m_out.newBlock();
13921 LBasicBlock notDoubleCase = m_out.newBlock();
13922 LBasicBlock continuation = m_out.newBlock();
13923
13924 Vector<ValueFromBlock> results;
13925
13926 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
13927
13928 LBasicBlock lastNext = m_out.appendTo(cellCase, notStringCase);
13929 m_out.branch(
13930 isString(value, provenType(edge) & SpecCell),
13931 unsure(stringCase), unsure(notStringCase));
13932
13933 m_out.appendTo(notStringCase, stringCase);
13934 m_out.branch(
13935 isBigInt(value, provenType(edge) & (SpecCell - SpecString)),
13936 unsure(bigIntCase), unsure(notStringOrBigIntCase));
13937
13938 m_out.appendTo(stringCase, bigIntCase);
13939 results.append(m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(&m_graph.m_vm)))));
13940 m_out.jump(continuation);
13941
13942 m_out.appendTo(bigIntCase, notStringOrBigIntCase);
13943 LValue nonZeroBigInt = m_out.notZero32(
13944 m_out.load32NonNegative(value, m_heaps.JSBigInt_length));
13945 results.append(m_out.anchor(nonZeroBigInt));
13946 m_out.jump(continuation);
13947
13948 m_out.appendTo(notStringOrBigIntCase, notCellCase);
13949 LValue isTruthyObject;
13950 if (masqueradesAsUndefinedWatchpointIsStillValid())
13951 isTruthyObject = m_out.booleanTrue;
13952 else {
13953 LBasicBlock masqueradesCase = m_out.newBlock();
13954
13955 results.append(m_out.anchor(m_out.booleanTrue));
13956
13957 m_out.branch(
13958 m_out.testIsZero32(
13959 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
13960 m_out.constInt32(MasqueradesAsUndefined)),
13961 usually(continuation), rarely(masqueradesCase));
13962
13963 m_out.appendTo(masqueradesCase);
13964
13965 isTruthyObject = m_out.notEqual(
13966 weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
13967 m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject));
13968 }
13969 results.append(m_out.anchor(isTruthyObject));
13970 m_out.jump(continuation);
13971
13972 m_out.appendTo(notCellCase, int32Case);
13973 m_out.branch(
13974 isInt32(value, provenType(edge) & ~SpecCell),
13975 unsure(int32Case), unsure(notInt32Case));
13976
13977 m_out.appendTo(int32Case, notInt32Case);
13978 results.append(m_out.anchor(m_out.notZero32(unboxInt32(value))));
13979 m_out.jump(continuation);
13980
13981 m_out.appendTo(notInt32Case, doubleCase);
13982 m_out.branch(
13983 isNumber(value, provenType(edge) & ~SpecCell),
13984 unsure(doubleCase), unsure(notDoubleCase));
13985
13986 m_out.appendTo(doubleCase, notDoubleCase);
13987 LValue doubleIsTruthy = m_out.doubleNotEqualAndOrdered(
13988 unboxDouble(value), m_out.constDouble(0));
13989 results.append(m_out.anchor(doubleIsTruthy));
13990 m_out.jump(continuation);
13991
13992 m_out.appendTo(notDoubleCase, continuation);
13993 LValue miscIsTruthy = m_out.equal(
13994 value, m_out.constInt64(JSValue::encode(jsBoolean(true))));
13995 results.append(m_out.anchor(miscIsTruthy));
13996 m_out.jump(continuation);
13997
13998 m_out.appendTo(continuation, lastNext);
13999 return m_out.phi(Int32, results);
14000 }
14001 default:
14002 DFG_CRASH(m_graph, m_node, "Bad use kind");
14003 return 0;
14004 }
14005 }
14006
14007 enum StringOrObjectMode {
14008 AllCellsAreFalse,
14009 CellCaseSpeculatesObject
14010 };
14011 enum EqualNullOrUndefinedMode {
14012 EqualNull,
14013 EqualUndefined,
14014 EqualNullOrUndefined,
14015 SpeculateNullOrUndefined
14016 };
14017 LValue equalNullOrUndefined(
14018 Edge edge, StringOrObjectMode cellMode, EqualNullOrUndefinedMode primitiveMode,
14019 OperandSpeculationMode operandMode = AutomaticOperandSpeculation)
14020 {
14021 bool validWatchpoint = masqueradesAsUndefinedWatchpointIsStillValid();
14022
14023 LValue value = lowJSValue(edge, operandMode);
14024
14025 LBasicBlock cellCase = m_out.newBlock();
14026 LBasicBlock primitiveCase = m_out.newBlock();
14027 LBasicBlock continuation = m_out.newBlock();
14028
14029 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
14030
14031 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
14032
14033 Vector<ValueFromBlock, 3> results;
14034
14035 switch (cellMode) {
14036 case AllCellsAreFalse:
14037 break;
14038 case CellCaseSpeculatesObject:
14039 FTL_TYPE_CHECK(
14040 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
14041 break;
14042 }
14043
14044 if (validWatchpoint) {
14045 results.append(m_out.anchor(m_out.booleanFalse));
14046 m_out.jump(continuation);
14047 } else {
14048 LBasicBlock masqueradesCase =
14049 m_out.newBlock();
14050
14051 results.append(m_out.anchor(m_out.booleanFalse));
14052
14053 m_out.branch(
14054 m_out.testNonZero32(
14055 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
14056 m_out.constInt32(MasqueradesAsUndefined)),
14057 rarely(masqueradesCase), usually(continuation));
14058
14059 m_out.appendTo(masqueradesCase, primitiveCase);
14060
14061 LValue structure = loadStructure(value);
14062
14063 results.append(m_out.anchor(
14064 m_out.equal(
14065 weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
14066 m_out.loadPtr(structure, m_heaps.Structure_globalObject))));
14067 m_out.jump(continuation);
14068 }
14069
14070 m_out.appendTo(primitiveCase, continuation);
14071
14072 LValue primitiveResult;
14073 switch (primitiveMode) {
14074 case EqualNull:
14075 primitiveResult = m_out.equal(value, m_out.constInt64(ValueNull));
14076 break;
14077 case EqualUndefined:
14078 primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined));
14079 break;
14080 case EqualNullOrUndefined:
14081 primitiveResult = isOther(value, provenType(edge));
14082 break;
14083 case SpeculateNullOrUndefined:
14084 FTL_TYPE_CHECK(
14085 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
14086 primitiveResult = m_out.booleanTrue;
14087 break;
14088 }
14089 results.append(m_out.anchor(primitiveResult));
14090 m_out.jump(continuation);
14091
14092 m_out.appendTo(continuation, lastNext);
14093
14094 return m_out.phi(Int32, results);
14095 }
14096
14097 template<typename FunctionType>
14098 void contiguousPutByValOutOfBounds(
14099 FunctionType slowPathFunction, LValue base, LValue storage, LValue index, LValue value,
14100 LBasicBlock continuation)
14101 {
14102 if (!m_node->arrayMode().isInBounds()) {
14103 LBasicBlock notInBoundsCase =
14104 m_out.newBlock();
14105 LBasicBlock performStore =
14106 m_out.newBlock();
14107
14108 LValue isNotInBounds = m_out.aboveOrEqual(
14109 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength));
14110 m_out.branch(isNotInBounds, unsure(notInBoundsCase), unsure(performStore));
14111
14112 LBasicBlock lastNext = m_out.appendTo(notInBoundsCase, performStore);
14113
14114 LValue isOutOfBounds = m_out.aboveOrEqual(
14115 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_vectorLength));
14116
14117 if (!m_node->arrayMode().isOutOfBounds())
14118 speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
14119 else {
14120 LBasicBlock outOfBoundsCase =
14121 m_out.newBlock();
14122 LBasicBlock holeCase =
14123 m_out.newBlock();
14124
14125 m_out.branch(isOutOfBounds, rarely(outOfBoundsCase), usually(holeCase));
14126
14127 LBasicBlock innerLastNext = m_out.appendTo(outOfBoundsCase, holeCase);
14128
14129 vmCall(
14130 Void, m_out.operation(slowPathFunction),
14131 m_callFrame, base, index, value);
14132
14133 m_out.jump(continuation);
14134
14135 m_out.appendTo(holeCase, innerLastNext);
14136 }
14137
14138 m_out.store32(
14139 m_out.add(index, m_out.int32One),
14140 storage, m_heaps.Butterfly_publicLength);
14141
14142 m_out.jump(performStore);
14143 m_out.appendTo(performStore, lastNext);
14144 }
14145 }
14146
14147 LValue untagArrayPtr(LValue ptr, LValue size)
14148 {
14149#if CPU(ARM64E)
14150 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
14151 authenticate->appendSomeRegister(ptr);
14152 authenticate->append(size, B3::ValueRep(B3::ValueRep::SomeLateRegister));
14153 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14154 jit.move(params[1].gpr(), params[0].gpr());
14155 jit.untagArrayPtr(params[2].gpr(), params[0].gpr());
14156 });
14157 return authenticate;
14158#else
14159 UNUSED_PARAM(size);
14160 return ptr;
14161#endif
14162 }
14163
14164 LValue removeArrayPtrTag(LValue ptr)
14165 {
14166#if CPU(ARM64E)
14167 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
14168 authenticate->appendSomeRegister(ptr);
14169 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14170 jit.move(params[1].gpr(), params[0].gpr());
14171 jit.removeArrayPtrTag(params[0].gpr());
14172 });
14173 return authenticate;
14174#endif
14175 return ptr;
14176 }
14177
14178 LValue caged(Gigacage::Kind kind, LValue ptr, LValue base)
14179 {
14180#if GIGACAGE_ENABLED
14181 if (!Gigacage::isEnabled(kind))
14182 return ptr;
14183
14184 if (kind == Gigacage::Primitive && Gigacage::canPrimitiveGigacageBeDisabled()) {
14185 if (vm().primitiveGigacageEnabled().isStillValid())
14186 m_graph.watchpoints().addLazily(vm().primitiveGigacageEnabled());
14187 else
14188 return ptr;
14189 }
14190
14191 LValue basePtr = m_out.constIntPtr(Gigacage::basePtr(kind));
14192 LValue mask = m_out.constIntPtr(Gigacage::mask(kind));
14193
14194 LValue masked = m_out.bitAnd(ptr, mask);
14195 LValue result = m_out.add(masked, basePtr);
14196
14197#if CPU(ARM64E)
14198 if (kind == Gigacage::Primitive) {
14199 PatchpointValue* merge = m_out.patchpoint(pointerType());
14200 merge->append(result, B3::ValueRep(B3::ValueRep::SomeLateRegister));
14201 merge->appendSomeRegister(ptr);
14202 merge->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14203 jit.move(params[2].gpr(), params[0].gpr());
14204 jit.bitFieldInsert64(params[1].gpr(), 0, 64 - MacroAssembler::numberOfPACBits, params[0].gpr());
14205 });
14206
14207 LValue size = m_out.load32(base, m_heaps.JSArrayBufferView_length);
14208 result = untagArrayPtr(merge, size);
14209 }
14210#endif // CPU(ARM64E)
14211
14212 // Make sure that B3 doesn't try to do smart reassociation of these pointer bits.
14213 // FIXME: In an ideal world, B3 would not do harmful reassociations, and if it did, it would be able
14214 // to undo them during constant hoisting and regalloc. As it stands, if you remove this then Octane
14215 // gets 1.6% slower and Kraken gets 5% slower. It's all because the basePtr, which is a constant,
14216 // gets reassociated out of the add above and into the address arithmetic. This disables hoisting of
14217 // the basePtr constant. Hoisting that constant is worth a lot more perf than the reassociation. One
14218 // way to make this all work happily is to combine offset legalization with constant hoisting, and
14219 // then teach it reassociation. So, Add(Add(a, b), const) where a is loop-invariant while b isn't
14220 // will turn into Add(Add(a, const), b) by the constant hoister. We would have to teach B3 to do this
14221 // and possibly other smart things if we want to be able to remove this opaque.
14222 // https://bugs.webkit.org/show_bug.cgi?id=175493
14223 return m_out.opaque(result);
14224#endif
14225
14226 UNUSED_PARAM(kind);
14227 UNUSED_PARAM(base);
14228 return ptr;
14229 }
14230
14231 void buildSwitch(SwitchData* data, LType type, LValue switchValue)
14232 {
14233 ASSERT(type == pointerType() || type == Int32);
14234
14235 Vector<SwitchCase> cases;
14236 for (unsigned i = 0; i < data->cases.size(); ++i) {
14237 SwitchCase newCase;
14238
14239 if (type == pointerType()) {
14240 newCase = SwitchCase(m_out.constIntPtr(data->cases[i].value.switchLookupValue(data->kind)),
14241 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
14242 } else if (type == Int32) {
14243 newCase = SwitchCase(m_out.constInt32(data->cases[i].value.switchLookupValue(data->kind)),
14244 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
14245 } else
14246 CRASH();
14247
14248 cases.append(newCase);
14249 }
14250
14251 m_out.switchInstruction(
14252 switchValue, cases,
14253 lowBlock(data->fallThrough.block), Weight(data->fallThrough.count));
14254 }
14255
14256 void switchString(SwitchData* data, LValue string, Edge& edge)
14257 {
14258 bool canDoBinarySwitch = true;
14259 unsigned totalLength = 0;
14260
14261 for (DFG::SwitchCase myCase : data->cases) {
14262 StringImpl* string = myCase.value.stringImpl();
14263 if (!string->is8Bit()) {
14264 canDoBinarySwitch = false;
14265 break;
14266 }
14267 if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
14268 canDoBinarySwitch = false;
14269 break;
14270 }
14271 totalLength += string->length();
14272 }
14273
14274 if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
14275 switchStringSlow(data, string);
14276 return;
14277 }
14278
14279 LBasicBlock hasImplBlock = m_out.newBlock();
14280 LBasicBlock is8BitBlock = m_out.newBlock();
14281 LBasicBlock slowBlock = m_out.newBlock();
14282
14283 m_out.branch(isRopeString(string, edge), unsure(slowBlock), unsure(hasImplBlock));
14284
14285 LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock);
14286
14287 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
14288 LValue length = m_out.load32(stringImpl, m_heaps.StringImpl_length);
14289
14290 m_out.branch(
14291 m_out.testIsZero32(
14292 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
14293 m_out.constInt32(StringImpl::flagIs8Bit())),
14294 unsure(slowBlock), unsure(is8BitBlock));
14295
14296 m_out.appendTo(is8BitBlock, slowBlock);
14297
14298 LValue buffer = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
14299
14300 // FIXME: We should propagate branch weight data to the cases of this switch.
14301 // https://bugs.webkit.org/show_bug.cgi?id=144368
14302
14303 Vector<StringSwitchCase> cases;
14304 for (DFG::SwitchCase myCase : data->cases)
14305 cases.append(StringSwitchCase(myCase.value.stringImpl(), lowBlock(myCase.target.block)));
14306 std::sort(cases.begin(), cases.end());
14307 switchStringRecurse(data, buffer, length, cases, 0, 0, cases.size(), 0, false);
14308
14309 m_out.appendTo(slowBlock, lastNext);
14310 switchStringSlow(data, string);
14311 }
14312
14313 // The code for string switching is based closely on the same code in the DFG backend. While it
14314 // would be nice to reduce the amount of similar-looking code, it seems like this is one of
14315 // those algorithms where factoring out the common bits would result in more code than just
14316 // duplicating.
14317
14318 struct StringSwitchCase {
14319 StringSwitchCase() { }
14320
14321 StringSwitchCase(StringImpl* string, LBasicBlock target)
14322 : string(string)
14323 , target(target)
14324 {
14325 }
14326
14327 bool operator<(const StringSwitchCase& other) const
14328 {
14329 return stringLessThan(*string, *other.string);
14330 }
14331
14332 StringImpl* string;
14333 LBasicBlock target;
14334 };
14335
14336 struct CharacterCase {
14337 CharacterCase()
14338 : character(0)
14339 , begin(0)
14340 , end(0)
14341 {
14342 }
14343
14344 CharacterCase(LChar character, unsigned begin, unsigned end)
14345 : character(character)
14346 , begin(begin)
14347 , end(end)
14348 {
14349 }
14350
14351 bool operator<(const CharacterCase& other) const
14352 {
14353 return character < other.character;
14354 }
14355
14356 LChar character;
14357 unsigned begin;
14358 unsigned end;
14359 };
14360
14361 void switchStringRecurse(
14362 SwitchData* data, LValue buffer, LValue length, const Vector<StringSwitchCase>& cases,
14363 unsigned numChecked, unsigned begin, unsigned end, unsigned alreadyCheckedLength,
14364 unsigned checkedExactLength)
14365 {
14366 LBasicBlock fallThrough = lowBlock(data->fallThrough.block);
14367
14368 if (begin == end) {
14369 m_out.jump(fallThrough);
14370 return;
14371 }
14372
14373 unsigned minLength = cases[begin].string->length();
14374 unsigned commonChars = minLength;
14375 bool allLengthsEqual = true;
14376 for (unsigned i = begin + 1; i < end; ++i) {
14377 unsigned myCommonChars = numChecked;
14378 unsigned limit = std::min(cases[begin].string->length(), cases[i].string->length());
14379 for (unsigned j = numChecked; j < limit; ++j) {
14380 if (cases[begin].string->at(j) != cases[i].string->at(j))
14381 break;
14382 myCommonChars++;
14383 }
14384 commonChars = std::min(commonChars, myCommonChars);
14385 if (minLength != cases[i].string->length())
14386 allLengthsEqual = false;
14387 minLength = std::min(minLength, cases[i].string->length());
14388 }
14389
14390 if (checkedExactLength) {
14391 DFG_ASSERT(m_graph, m_node, alreadyCheckedLength == minLength, alreadyCheckedLength, minLength);
14392 DFG_ASSERT(m_graph, m_node, allLengthsEqual);
14393 }
14394
14395 DFG_ASSERT(m_graph, m_node, minLength >= commonChars, minLength, commonChars);
14396
14397 if (!allLengthsEqual && alreadyCheckedLength < minLength)
14398 m_out.check(m_out.below(length, m_out.constInt32(minLength)), unsure(fallThrough));
14399 if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
14400 m_out.check(m_out.notEqual(length, m_out.constInt32(minLength)), unsure(fallThrough));
14401
14402 for (unsigned i = numChecked; i < commonChars; ++i) {
14403 m_out.check(
14404 m_out.notEqual(
14405 m_out.load8ZeroExt32(buffer, m_heaps.characters8[i]),
14406 m_out.constInt32(static_cast<uint16_t>(cases[begin].string->at(i)))),
14407 unsure(fallThrough));
14408 }
14409
14410 if (minLength == commonChars) {
14411 // This is the case where one of the cases is a prefix of all of the other cases.
14412 // We've already checked that the input string is a prefix of all of the cases,
14413 // so we just check length to jump to that case.
14414
14415 DFG_ASSERT(m_graph, m_node, cases[begin].string->length() == commonChars, cases[begin].string->length(), commonChars);
14416 for (unsigned i = begin + 1; i < end; ++i)
14417 DFG_ASSERT(m_graph, m_node, cases[i].string->length() > commonChars, cases[i].string->length(), commonChars);
14418
14419 if (allLengthsEqual) {
14420 DFG_ASSERT(m_graph, m_node, end == begin + 1, end, begin);
14421 m_out.jump(cases[begin].target);
14422 return;
14423 }
14424
14425 m_out.check(
14426 m_out.equal(length, m_out.constInt32(commonChars)),
14427 unsure(cases[begin].target));
14428
14429 // We've checked if the length is >= minLength, and then we checked if the length is
14430 // == commonChars. We get to this point if it is >= minLength but not == commonChars.
14431 // Hence we know that it now must be > minLength, i.e. that it's >= minLength + 1.
14432 switchStringRecurse(
14433 data, buffer, length, cases, commonChars, begin + 1, end, minLength + 1, false);
14434 return;
14435 }
14436
14437 // At this point we know that the string is longer than commonChars, and we've only verified
14438 // commonChars. Use a binary switch on the next unchecked character, i.e.
14439 // string[commonChars].
14440
14441 DFG_ASSERT(m_graph, m_node, end >= begin + 2, end, begin);
14442
14443 LValue uncheckedChar = m_out.load8ZeroExt32(buffer, m_heaps.characters8[commonChars]);
14444
14445 Vector<CharacterCase> characterCases;
14446 CharacterCase currentCase(cases[begin].string->at(commonChars), begin, begin + 1);
14447 for (unsigned i = begin + 1; i < end; ++i) {
14448 LChar currentChar = cases[i].string->at(commonChars);
14449 if (currentChar != currentCase.character) {
14450 currentCase.end = i;
14451 characterCases.append(currentCase);
14452 currentCase = CharacterCase(currentChar, i, i + 1);
14453 } else
14454 currentCase.end = i + 1;
14455 }
14456 characterCases.append(currentCase);
14457
14458 Vector<LBasicBlock> characterBlocks;
14459 for (unsigned i = characterCases.size(); i--;)
14460 characterBlocks.append(m_out.newBlock());
14461
14462 Vector<SwitchCase> switchCases;
14463 for (unsigned i = 0; i < characterCases.size(); ++i) {
14464 if (i)
14465 DFG_ASSERT(m_graph, m_node, characterCases[i - 1].character < characterCases[i].character);
14466 switchCases.append(SwitchCase(
14467 m_out.constInt32(characterCases[i].character), characterBlocks[i], Weight()));
14468 }
14469 m_out.switchInstruction(uncheckedChar, switchCases, fallThrough, Weight());
14470
14471 LBasicBlock lastNext = m_out.m_nextBlock;
14472 characterBlocks.append(lastNext); // Makes it convenient to set nextBlock.
14473 for (unsigned i = 0; i < characterCases.size(); ++i) {
14474 m_out.appendTo(characterBlocks[i], characterBlocks[i + 1]);
14475 switchStringRecurse(
14476 data, buffer, length, cases, commonChars + 1,
14477 characterCases[i].begin, characterCases[i].end, minLength, allLengthsEqual);
14478 }
14479
14480 DFG_ASSERT(m_graph, m_node, m_out.m_nextBlock == lastNext);
14481 }
14482
14483 void switchStringSlow(SwitchData* data, LValue string)
14484 {
14485 // FIXME: We ought to be able to use computed gotos here. We would save the labels of the
14486 // blocks we want to jump to, and then request their addresses after compilation completes.
14487 // https://bugs.webkit.org/show_bug.cgi?id=144369
14488
14489 LValue branchOffset = vmCall(
14490 Int32, m_out.operation(operationSwitchStringAndGetBranchOffset),
14491 m_callFrame, m_out.constIntPtr(data->switchTableIndex), string);
14492
14493 StringJumpTable& table = codeBlock()->stringSwitchJumpTable(data->switchTableIndex);
14494
14495 Vector<SwitchCase> cases;
14496 // These may be negative, or zero, or probably other stuff, too. We don't want to mess with HashSet's corner cases and we don't really care about throughput here.
14497 StdUnorderedSet<int32_t> alreadyHandled;
14498 for (unsigned i = 0; i < data->cases.size(); ++i) {
14499 // FIXME: The fact that we're using the bytecode's switch table means that the
14500 // following DFG IR transformation would be invalid.
14501 //
14502 // Original code:
14503 // switch (v) {
14504 // case "foo":
14505 // case "bar":
14506 // things();
14507 // break;
14508 // default:
14509 // break;
14510 // }
14511 //
14512 // New code:
14513 // switch (v) {
14514 // case "foo":
14515 // instrumentFoo();
14516 // goto _things;
14517 // case "bar":
14518 // instrumentBar();
14519 // _things:
14520 // things();
14521 // break;
14522 // default:
14523 // break;
14524 // }
14525 //
14526 // Luckily, we don't currently do any such transformation. But it's kind of silly that
14527 // this is an issue.
14528 // https://bugs.webkit.org/show_bug.cgi?id=144635
14529
14530 DFG::SwitchCase myCase = data->cases[i];
14531 StringJumpTable::StringOffsetTable::iterator iter =
14532 table.offsetTable.find(myCase.value.stringImpl());
14533 DFG_ASSERT(m_graph, m_node, iter != table.offsetTable.end());
14534
14535 if (!alreadyHandled.insert(iter->value.branchOffset).second)
14536 continue;
14537
14538 cases.append(SwitchCase(
14539 m_out.constInt32(iter->value.branchOffset),
14540 lowBlock(myCase.target.block), Weight(myCase.target.count)));
14541 }
14542
14543 m_out.switchInstruction(
14544 branchOffset, cases, lowBlock(data->fallThrough.block),
14545 Weight(data->fallThrough.count));
14546 }
14547
14548 // Calls the functor at the point of code generation where we know what the result type is.
14549 // You can emit whatever code you like at that point. Expects you to terminate the basic block.
14550 // When buildTypeOf() returns, it will have terminated all basic blocks that it created. So, if
14551 // you aren't using this as the terminator of a high-level block, you should create your own
14552 // contination and set it as the nextBlock (m_out.insertNewBlocksBefore(continuation)) before
14553 // calling this. For example:
14554 //
14555 // LBasicBlock continuation = m_out.newBlock();
14556 // LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
14557 // buildTypeOf(
14558 // child, value,
14559 // [&] (TypeofType type) {
14560 // do things;
14561 // m_out.jump(continuation);
14562 // });
14563 // m_out.appendTo(continuation, lastNext);
14564 template<typename Functor>
14565 void buildTypeOf(Edge child, LValue value, const Functor& functor)
14566 {
14567 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
14568
14569 // Implements the following branching structure:
14570 //
14571 // if (is cell) {
14572 // if (is object) {
14573 // if (is function) {
14574 // return function;
14575 // } else if (doesn't have call trap and doesn't masquerade as undefined) {
14576 // return object
14577 // } else {
14578 // return slowPath();
14579 // }
14580 // } else if (is string) {
14581 // return string
14582 // } else if (is bigint) {
14583 // return bigint
14584 // } else {
14585 // return symbol
14586 // }
14587 // } else if (is number) {
14588 // return number
14589 // } else if (is null) {
14590 // return object
14591 // } else if (is boolean) {
14592 // return boolean
14593 // } else {
14594 // return undefined
14595 // }
14596 //
14597 // FIXME: typeof Symbol should be more frequently seen than BigInt.
14598 // We should change the order of type detection based on this frequency.
14599 // https://bugs.webkit.org/show_bug.cgi?id=192650
14600
14601 LBasicBlock cellCase = m_out.newBlock();
14602 LBasicBlock objectCase = m_out.newBlock();
14603 LBasicBlock functionCase = m_out.newBlock();
14604 LBasicBlock notFunctionCase = m_out.newBlock();
14605 LBasicBlock reallyObjectCase = m_out.newBlock();
14606 LBasicBlock slowPath = m_out.newBlock();
14607 LBasicBlock unreachable = m_out.newBlock();
14608 LBasicBlock notObjectCase = m_out.newBlock();
14609 LBasicBlock stringCase = m_out.newBlock();
14610 LBasicBlock notStringCase = m_out.newBlock();
14611 LBasicBlock bigIntCase = m_out.newBlock();
14612 LBasicBlock symbolCase = m_out.newBlock();
14613 LBasicBlock notCellCase = m_out.newBlock();
14614 LBasicBlock numberCase = m_out.newBlock();
14615 LBasicBlock notNumberCase = m_out.newBlock();
14616 LBasicBlock notNullCase = m_out.newBlock();
14617 LBasicBlock booleanCase = m_out.newBlock();
14618 LBasicBlock undefinedCase = m_out.newBlock();
14619
14620 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
14621
14622 LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase);
14623 m_out.branch(isObject(value, provenType(child)), unsure(objectCase), unsure(notObjectCase));
14624
14625 m_out.appendTo(objectCase, functionCase);
14626 m_out.branch(
14627 isFunction(value, provenType(child) & SpecObject),
14628 unsure(functionCase), unsure(notFunctionCase));
14629
14630 m_out.appendTo(functionCase, notFunctionCase);
14631 functor(TypeofType::Function);
14632
14633 m_out.appendTo(notFunctionCase, reallyObjectCase);
14634 m_out.branch(
14635 isExoticForTypeof(value, provenType(child) & (SpecObject - SpecFunction)),
14636 rarely(slowPath), usually(reallyObjectCase));
14637
14638 m_out.appendTo(reallyObjectCase, slowPath);
14639 functor(TypeofType::Object);
14640
14641 m_out.appendTo(slowPath, unreachable);
14642 VM& vm = this->vm();
14643 LValue result = lazySlowPath(
14644 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14645 return createLazyCallGenerator(vm,
14646 operationTypeOfObjectAsTypeofType, locations[0].directGPR(),
14647 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
14648 }, value);
14649 Vector<SwitchCase, 3> cases;
14650 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Undefined)), undefinedCase));
14651 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Object)), reallyObjectCase));
14652 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Function)), functionCase));
14653 m_out.switchInstruction(m_out.castToInt32(result), cases, unreachable, Weight());
14654
14655 m_out.appendTo(unreachable, notObjectCase);
14656 m_out.unreachable();
14657
14658 m_out.appendTo(notObjectCase, stringCase);
14659 m_out.branch(
14660 isString(value, provenType(child) & (SpecCell - SpecObject)),
14661 unsure(stringCase), unsure(notStringCase));
14662
14663 m_out.appendTo(stringCase, notStringCase);
14664 functor(TypeofType::String);
14665
14666 m_out.appendTo(notStringCase, bigIntCase);
14667 m_out.branch(
14668 isBigInt(value, provenType(child) & (SpecCell - SpecObject - SpecString)),
14669 unsure(bigIntCase), unsure(symbolCase));
14670
14671 m_out.appendTo(bigIntCase, symbolCase);
14672 functor(TypeofType::BigInt);
14673
14674 m_out.appendTo(symbolCase, notCellCase);
14675 functor(TypeofType::Symbol);
14676
14677 m_out.appendTo(notCellCase, numberCase);
14678 m_out.branch(
14679 isNumber(value, provenType(child) & ~SpecCell),
14680 unsure(numberCase), unsure(notNumberCase));
14681
14682 m_out.appendTo(numberCase, notNumberCase);
14683 functor(TypeofType::Number);
14684
14685 m_out.appendTo(notNumberCase, notNullCase);
14686 LValue isNull;
14687 if (provenType(child) & SpecOther)
14688 isNull = m_out.equal(value, m_out.constInt64(ValueNull));
14689 else
14690 isNull = m_out.booleanFalse;
14691 m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase));
14692
14693 m_out.appendTo(notNullCase, booleanCase);
14694 m_out.branch(
14695 isBoolean(value, provenType(child) & ~(SpecCell | SpecFullNumber)),
14696 unsure(booleanCase), unsure(undefinedCase));
14697
14698 m_out.appendTo(booleanCase, undefinedCase);
14699 functor(TypeofType::Boolean);
14700
14701 m_out.appendTo(undefinedCase, lastNext);
14702 functor(TypeofType::Undefined);
14703 }
14704
14705 TypedPointer pointerIntoTypedArray(LValue storage, LValue index, TypedArrayType type)
14706 {
14707 LValue offset = m_out.shl(m_out.zeroExtPtr(index), m_out.constIntPtr(logElementSize(type)));
14708
14709 return TypedPointer(
14710 m_heaps.typedArrayProperties,
14711 m_out.add(
14712 storage,
14713 offset
14714 ));
14715 }
14716
14717 LValue loadFromIntTypedArray(TypedPointer pointer, TypedArrayType type)
14718 {
14719 switch (elementSize(type)) {
14720 case 1:
14721 return isSigned(type) ? m_out.load8SignExt32(pointer) : m_out.load8ZeroExt32(pointer);
14722 case 2:
14723 return isSigned(type) ? m_out.load16SignExt32(pointer) : m_out.load16ZeroExt32(pointer);
14724 case 4:
14725 return m_out.load32(pointer);
14726 default:
14727 DFG_CRASH(m_graph, m_node, "Bad element size");
14728 }
14729 }
14730
14731 Output::StoreType storeType(TypedArrayType type)
14732 {
14733 if (isInt(type)) {
14734 switch (elementSize(type)) {
14735 case 1:
14736 return Output::Store32As8;
14737 case 2:
14738 return Output::Store32As16;
14739 case 4:
14740 return Output::Store32;
14741 default:
14742 DFG_CRASH(m_graph, m_node, "Bad element size");
14743 return Output::Store32;
14744 }
14745 }
14746 switch (type) {
14747 case TypeFloat32:
14748 return Output::StoreFloat;
14749 case TypeFloat64:
14750 return Output::StoreDouble;
14751 default:
14752 DFG_CRASH(m_graph, m_node, "Bad typed array type");
14753 }
14754 }
14755
14756 void setIntTypedArrayLoadResult(LValue result, TypedArrayType type, bool canSpeculate = false)
14757 {
14758 if (elementSize(type) < 4 || isSigned(type)) {
14759 setInt32(result);
14760 return;
14761 }
14762
14763 if (m_node->shouldSpeculateInt32() && canSpeculate) {
14764 speculate(
14765 Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
14766 setInt32(result);
14767 return;
14768 }
14769
14770 if (m_node->shouldSpeculateInt52()) {
14771 setStrictInt52(m_out.zeroExt(result, Int64));
14772 return;
14773 }
14774
14775 setDouble(m_out.unsignedToDouble(result));
14776 }
14777
14778 LValue getIntTypedArrayStoreOperand(Edge edge, bool isClamped = false)
14779 {
14780 LValue intValue;
14781 switch (edge.useKind()) {
14782 case Int52RepUse:
14783 case Int32Use: {
14784 if (edge.useKind() == Int32Use)
14785 intValue = lowInt32(edge);
14786 else
14787 intValue = m_out.castToInt32(lowStrictInt52(edge));
14788
14789 if (isClamped) {
14790 LBasicBlock atLeastZero = m_out.newBlock();
14791 LBasicBlock continuation = m_out.newBlock();
14792
14793 Vector<ValueFromBlock, 2> intValues;
14794 intValues.append(m_out.anchor(m_out.int32Zero));
14795 m_out.branch(
14796 m_out.lessThan(intValue, m_out.int32Zero),
14797 unsure(continuation), unsure(atLeastZero));
14798
14799 LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
14800
14801 intValues.append(m_out.anchor(m_out.select(
14802 m_out.greaterThan(intValue, m_out.constInt32(255)),
14803 m_out.constInt32(255),
14804 intValue)));
14805 m_out.jump(continuation);
14806
14807 m_out.appendTo(continuation, lastNext);
14808 intValue = m_out.phi(Int32, intValues);
14809 }
14810 break;
14811 }
14812
14813 case DoubleRepUse: {
14814 LValue doubleValue = lowDouble(edge);
14815
14816 if (isClamped) {
14817 LBasicBlock atLeastZero = m_out.newBlock();
14818 LBasicBlock withinRange = m_out.newBlock();
14819 LBasicBlock continuation = m_out.newBlock();
14820
14821 Vector<ValueFromBlock, 3> intValues;
14822 intValues.append(m_out.anchor(m_out.int32Zero));
14823 m_out.branch(
14824 m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
14825 unsure(continuation), unsure(atLeastZero));
14826
14827 LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
14828 intValues.append(m_out.anchor(m_out.constInt32(255)));
14829 m_out.branch(
14830 m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
14831 unsure(continuation), unsure(withinRange));
14832
14833 m_out.appendTo(withinRange, continuation);
14834 intValues.append(m_out.anchor(m_out.doubleToInt(doubleValue)));
14835 m_out.jump(continuation);
14836
14837 m_out.appendTo(continuation, lastNext);
14838 intValue = m_out.phi(Int32, intValues);
14839 } else
14840 intValue = doubleToInt32(doubleValue);
14841 break;
14842 }
14843
14844 default:
14845 DFG_CRASH(m_graph, m_node, "Bad use kind");
14846 }
14847
14848 return intValue;
14849 }
14850
14851 LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true)
14852 {
14853 LBasicBlock greatEnough = m_out.newBlock();
14854 LBasicBlock withinRange = m_out.newBlock();
14855 LBasicBlock slowPath = m_out.newBlock();
14856 LBasicBlock continuation = m_out.newBlock();
14857
14858 Vector<ValueFromBlock, 2> results;
14859
14860 m_out.branch(
14861 m_out.doubleGreaterThanOrEqual(doubleValue, m_out.constDouble(low)),
14862 unsure(greatEnough), unsure(slowPath));
14863
14864 LBasicBlock lastNext = m_out.appendTo(greatEnough, withinRange);
14865 m_out.branch(
14866 m_out.doubleLessThanOrEqual(doubleValue, m_out.constDouble(high)),
14867 unsure(withinRange), unsure(slowPath));
14868
14869 m_out.appendTo(withinRange, slowPath);
14870 LValue fastResult;
14871 if (isSigned)
14872 fastResult = m_out.doubleToInt(doubleValue);
14873 else
14874 fastResult = m_out.doubleToUInt(doubleValue);
14875 results.append(m_out.anchor(fastResult));
14876 m_out.jump(continuation);
14877
14878 m_out.appendTo(slowPath, continuation);
14879 results.append(m_out.anchor(m_out.call(Int32, m_out.operation(operationToInt32), doubleValue)));
14880 m_out.jump(continuation);
14881
14882 m_out.appendTo(continuation, lastNext);
14883 return m_out.phi(Int32, results);
14884 }
14885
14886 LValue doubleToInt32(LValue doubleValue)
14887 {
14888#if CPU(ARM64)
14889 if (MacroAssemblerARM64::supportsDoubleToInt32ConversionUsingJavaScriptSemantics()) {
14890 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
14891 patchpoint->append(ConstrainedValue(doubleValue, B3::ValueRep::SomeRegister));
14892 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14893 jit.convertDoubleToInt32UsingJavaScriptSemantics(params[1].fpr(), params[0].gpr());
14894 });
14895 patchpoint->effects = Effects::none();
14896 return patchpoint;
14897 }
14898#endif
14899
14900 if (hasSensibleDoubleToInt())
14901 return sensibleDoubleToInt32(doubleValue);
14902
14903 double limit = pow(2, 31) - 1;
14904 return doubleToInt32(doubleValue, -limit, limit);
14905 }
14906
14907 LValue sensibleDoubleToInt32(LValue doubleValue)
14908 {
14909 LBasicBlock slowPath = m_out.newBlock();
14910 LBasicBlock continuation = m_out.newBlock();
14911
14912 LValue fastResultValue = m_out.doubleToInt(doubleValue);
14913 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
14914 m_out.branch(
14915 m_out.equal(fastResultValue, m_out.constInt32(0x80000000)),
14916 rarely(slowPath), usually(continuation));
14917
14918 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
14919 ValueFromBlock slowResult = m_out.anchor(
14920 m_out.call(Int32, m_out.operation(operationToInt32SensibleSlow), doubleValue));
14921 m_out.jump(continuation);
14922
14923 m_out.appendTo(continuation, lastNext);
14924 return m_out.phi(Int32, fastResult, slowResult);
14925 }
14926
14927 // This is a mechanism for creating a code generator that fills in a gap in the code using our
14928 // own MacroAssembler. This is useful for slow paths that involve a lot of code and we don't want
14929 // to pay the price of B3 optimizing it. A lazy slow path will only be generated if it actually
14930 // executes. On the other hand, a lazy slow path always incurs the cost of two additional jumps.
14931 // Also, the lazy slow path's register allocation state is slaved to whatever B3 did, so you
14932 // have to use a ScratchRegisterAllocator to try to use some unused registers and you may have
14933 // to spill to top of stack if there aren't enough registers available.
14934 //
14935 // Lazy slow paths involve three different stages of execution. Each stage has unique
14936 // capabilities and knowledge. The stages are:
14937 //
14938 // 1) DFG->B3 lowering, i.e. code that runs in this phase. Lowering is the last time you will
14939 // have access to LValues. If there is an LValue that needs to be fed as input to a lazy slow
14940 // path, then you must pass it as an argument here (as one of the varargs arguments after the
14941 // functor). But, lowering doesn't know which registers will be used for those LValues. Hence
14942 // you pass a lambda to lazySlowPath() and that lambda will run during stage (2):
14943 //
14944 // 2) FTLCompile.cpp's fixFunctionBasedOnStackMaps. This code is the only stage at which we know
14945 // the mapping from arguments passed to this method in (1) and the registers that B3
14946 // selected for those arguments. You don't actually want to generate any code here, since then
14947 // the slow path wouldn't actually be lazily generated. Instead, you want to save the
14948 // registers being used for the arguments and defer code generation to stage (3) by creating
14949 // and returning a LazySlowPath::Generator:
14950 //
14951 // 3) LazySlowPath's generate() method. This code runs in response to the lazy slow path
14952 // executing for the first time. It will call the generator you created in stage (2).
14953 //
14954 // Note that each time you invoke stage (1), stage (2) may be invoked zero, one, or many times.
14955 // Stage (2) will usually be invoked once for stage (1). But, B3 may kill the code, in which
14956 // case stage (2) won't run. B3 may duplicate the code (for example via tail duplication),
14957 // leading to many calls to your stage (2) lambda. Stage (3) may be called zero or once for each
14958 // stage (2). It will be called zero times if the slow path never runs. This is what you hope for
14959 // whenever you use the lazySlowPath() mechanism.
14960 //
14961 // A typical use of lazySlowPath() will look like the example below, which just creates a slow
14962 // path that adds some value to the input and returns it.
14963 //
14964 // // Stage (1) is here. This is your last chance to figure out which LValues to use as inputs.
14965 // // Notice how we pass "input" as an argument to lazySlowPath().
14966 // LValue input = ...;
14967 // int addend = ...;
14968 // LValue output = lazySlowPath(
14969 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14970 // // Stage (2) is here. This is your last chance to figure out which registers are used
14971 // // for which values. Location zero is always the return value. You can ignore it if
14972 // // you don't want to return anything. Location 1 is the register for the first
14973 // // argument to the lazySlowPath(), i.e. "input". Note that the Location object could
14974 // // also hold an FPR, if you are passing a double.
14975 // GPRReg outputGPR = locations[0].directGPR();
14976 // GPRReg inputGPR = locations[1].directGPR();
14977 // return LazySlowPath::createGenerator(
14978 // [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
14979 // // Stage (3) is here. This is when you generate code. You have access to the
14980 // // registers you collected in stage (2) because this lambda closes over those
14981 // // variables (outputGPR and inputGPR). You also have access to whatever extra
14982 // // data you collected in stage (1), such as the addend in this case.
14983 // jit.add32(TrustedImm32(addend), inputGPR, outputGPR);
14984 // // You have to end by jumping to done. There is nothing to fall through to.
14985 // // You can also jump to the exception handler (see LazySlowPath.h for more
14986 // // info). Note that currently you cannot OSR exit.
14987 // params.doneJumps.append(jit.jump());
14988 // });
14989 // },
14990 // input);
14991 //
14992 // You can basically pass as many inputs as you like, either using this varargs form, or by
14993 // passing a Vector of LValues.
14994 //
14995 // Note that if your slow path is only doing a call, you can use the createLazyCallGenerator()
14996 // helper. For example:
14997 //
14998 // LValue input = ...;
14999 // LValue output = lazySlowPath(
15000 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
15001 // return createLazyCallGenerator(
15002 // operationDoThings, locations[0].directGPR(), locations[1].directGPR());
15003 // }, input);
15004 //
15005 // Finally, note that all of the lambdas - both the stage (2) lambda and the stage (3) lambda -
15006 // run after the function that created them returns. Hence, you should not use by-reference
15007 // capture (i.e. [&]) in any of these lambdas.
15008 template<typename Functor, typename... ArgumentTypes>
15009 PatchpointValue* lazySlowPath(const Functor& functor, ArgumentTypes... arguments)
15010 {
15011 return lazySlowPath(functor, Vector<LValue>{ arguments... });
15012 }
15013
15014 template<typename Functor>
15015 PatchpointValue* lazySlowPath(const Functor& functor, const Vector<LValue>& userArguments)
15016 {
15017 CodeOrigin origin = m_node->origin.semantic;
15018
15019 PatchpointValue* result = m_out.patchpoint(B3::Int64);
15020 for (LValue arg : userArguments)
15021 result->append(ConstrainedValue(arg, B3::ValueRep::SomeRegister));
15022
15023 RefPtr<PatchpointExceptionHandle> exceptionHandle =
15024 preparePatchpointForExceptions(result);
15025
15026 result->clobber(RegisterSet::macroScratchRegisters());
15027 State* state = &m_ftlState;
15028
15029 result->setGenerator(
15030 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15031 Vector<Location> locations;
15032 for (const B3::ValueRep& rep : params)
15033 locations.append(Location::forValueRep(rep));
15034
15035 RefPtr<LazySlowPath::Generator> generator = functor(locations);
15036
15037 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
15038 CCallHelpers::Label done = jit.label();
15039
15040 RegisterSet usedRegisters = params.unavailableRegisters();
15041
15042 RefPtr<ExceptionTarget> exceptionTarget =
15043 exceptionHandle->scheduleExitCreation(params);
15044
15045 // FIXME: As part of handling exceptions, we need to create a concrete OSRExit here.
15046 // Doing so should automagically register late paths that emit exit thunks.
15047
15048 params.addLatePath(
15049 [=] (CCallHelpers& jit) {
15050 AllowMacroScratchRegisterUsage allowScratch(jit);
15051 patchableJump.m_jump.link(&jit);
15052 unsigned index = state->jitCode->lazySlowPaths.size();
15053 state->jitCode->lazySlowPaths.append(nullptr);
15054 jit.pushToSaveImmediateWithoutTouchingRegisters(
15055 CCallHelpers::TrustedImm32(index));
15056 CCallHelpers::Jump generatorJump = jit.jump();
15057
15058 // Note that so long as we're here, we don't really know if our late path
15059 // runs before or after any other late paths that we might depend on, like
15060 // the exception thunk.
15061
15062 RefPtr<JITCode> jitCode = state->jitCode;
15063 VM* vm = &state->graph.m_vm;
15064
15065 jit.addLinkTask(
15066 [=] (LinkBuffer& linkBuffer) {
15067 linkBuffer.link(generatorJump,
15068 CodeLocationLabel<JITThunkPtrTag>(vm->getCTIStub(lazySlowPathGenerationThunkGenerator).code()));
15069
15070 std::unique_ptr<LazySlowPath> lazySlowPath = std::make_unique<LazySlowPath>();
15071
15072 auto linkedPatchableJump = CodeLocationJump<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(patchableJump));
15073
15074 CodeLocationLabel<JSInternalPtrTag> linkedDone = linkBuffer.locationOf<JSInternalPtrTag>(done);
15075
15076 CallSiteIndex callSiteIndex =
15077 jitCode->common.addUniqueCallSiteIndex(origin);
15078
15079 lazySlowPath->initialize(
15080 linkedPatchableJump, linkedDone,
15081 exceptionTarget->label(linkBuffer), usedRegisters,
15082 callSiteIndex, generator);
15083
15084 jitCode->lazySlowPaths[index] = WTFMove(lazySlowPath);
15085 });
15086 });
15087 });
15088 return result;
15089 }
15090
15091 void speculate(
15092 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
15093 {
15094 appendOSRExit(kind, lowValue, highValue, failCondition, m_origin);
15095 }
15096
15097 void speculate(
15098 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition)
15099 {
15100 appendOSRExit(kind, lowValue, profile, failCondition, m_origin);
15101 }
15102
15103 void terminate(ExitKind kind)
15104 {
15105 speculate(kind, noValue(), nullptr, m_out.booleanTrue);
15106 didAlreadyTerminate();
15107 }
15108
15109 void didAlreadyTerminate()
15110 {
15111 m_state.setIsValid(false);
15112 }
15113
15114 void simulatedTypeCheck(Edge highValue, SpeculatedType typesPassedThrough)
15115 {
15116 m_interpreter.filter(highValue, typesPassedThrough);
15117 }
15118
15119 void typeCheck(
15120 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
15121 LValue failCondition, ExitKind exitKind = BadType)
15122 {
15123 appendTypeCheck(lowValue, highValue, typesPassedThrough, failCondition, exitKind);
15124 }
15125
15126 void appendTypeCheck(
15127 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
15128 LValue failCondition, ExitKind exitKind)
15129 {
15130 if (!m_interpreter.needsTypeCheck(highValue, typesPassedThrough))
15131 return;
15132 ASSERT(mayHaveTypeCheck(highValue.useKind()));
15133 appendOSRExit(exitKind, lowValue, highValue.node(), failCondition, m_origin);
15134 m_interpreter.filter(highValue, typesPassedThrough);
15135 }
15136
15137 LValue lowInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15138 {
15139 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
15140
15141 if (edge->hasConstant()) {
15142 JSValue value = edge->asJSValue();
15143 simulatedTypeCheck(edge, SpecInt32Only);
15144 if (!value.isInt32()) {
15145 if (mayHaveTypeCheck(edge.useKind()))
15146 terminate(Uncountable);
15147 return m_out.int32Zero;
15148 }
15149 LValue result = m_out.constInt32(value.asInt32());
15150 result->setOrigin(B3::Origin(edge.node()));
15151 return result;
15152 }
15153
15154 LoweredNodeValue value = m_int32Values.get(edge.node());
15155 if (isValid(value)) {
15156 simulatedTypeCheck(edge, SpecInt32Only);
15157 return value.value();
15158 }
15159
15160 value = m_strictInt52Values.get(edge.node());
15161 if (isValid(value))
15162 return strictInt52ToInt32(edge, value.value());
15163
15164 value = m_int52Values.get(edge.node());
15165 if (isValid(value))
15166 return strictInt52ToInt32(edge, int52ToStrictInt52(value.value()));
15167
15168 value = m_jsValueValues.get(edge.node());
15169 if (isValid(value)) {
15170 LValue boxedResult = value.value();
15171 FTL_TYPE_CHECK(
15172 jsValueValue(boxedResult), edge, SpecInt32Only, isNotInt32(boxedResult));
15173 LValue result = unboxInt32(boxedResult);
15174 setInt32(edge.node(), result);
15175 return result;
15176 }
15177
15178 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecInt32Only), provenType(edge));
15179 if (mayHaveTypeCheck(edge.useKind()))
15180 terminate(Uncountable);
15181 return m_out.int32Zero;
15182 }
15183
15184 enum Int52Kind { StrictInt52, Int52 };
15185 LValue lowInt52(Edge edge, Int52Kind kind)
15186 {
15187 DFG_ASSERT(m_graph, m_node, edge.useKind() == Int52RepUse, edge.useKind());
15188
15189 LoweredNodeValue value;
15190
15191 switch (kind) {
15192 case Int52:
15193 value = m_int52Values.get(edge.node());
15194 if (isValid(value))
15195 return value.value();
15196
15197 value = m_strictInt52Values.get(edge.node());
15198 if (isValid(value))
15199 return strictInt52ToInt52(value.value());
15200 break;
15201
15202 case StrictInt52:
15203 value = m_strictInt52Values.get(edge.node());
15204 if (isValid(value))
15205 return value.value();
15206
15207 value = m_int52Values.get(edge.node());
15208 if (isValid(value))
15209 return int52ToStrictInt52(value.value());
15210 break;
15211 }
15212
15213 DFG_ASSERT(m_graph, m_node, !provenType(edge), provenType(edge));
15214 if (mayHaveTypeCheck(edge.useKind()))
15215 terminate(Uncountable);
15216 return m_out.int64Zero;
15217 }
15218
15219 LValue lowInt52(Edge edge)
15220 {
15221 return lowInt52(edge, Int52);
15222 }
15223
15224 LValue lowStrictInt52(Edge edge)
15225 {
15226 return lowInt52(edge, StrictInt52);
15227 }
15228
15229 bool betterUseStrictInt52(Node* node)
15230 {
15231 return !isValid(m_int52Values.get(node));
15232 }
15233 bool betterUseStrictInt52(Edge edge)
15234 {
15235 return betterUseStrictInt52(edge.node());
15236 }
15237 template<typename T>
15238 Int52Kind bestInt52Kind(T node)
15239 {
15240 return betterUseStrictInt52(node) ? StrictInt52 : Int52;
15241 }
15242 Int52Kind opposite(Int52Kind kind)
15243 {
15244 switch (kind) {
15245 case Int52:
15246 return StrictInt52;
15247 case StrictInt52:
15248 return Int52;
15249 }
15250 DFG_CRASH(m_graph, m_node, "Bad use kind");
15251 return Int52;
15252 }
15253
15254 LValue lowWhicheverInt52(Edge edge, Int52Kind& kind)
15255 {
15256 kind = bestInt52Kind(edge);
15257 return lowInt52(edge, kind);
15258 }
15259
15260 LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15261 {
15262 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()), edge.useKind());
15263
15264 if (edge->op() == JSConstant) {
15265 FrozenValue* value = edge->constant();
15266 simulatedTypeCheck(edge, SpecCellCheck);
15267 if (!value->value().isCell()) {
15268 if (mayHaveTypeCheck(edge.useKind()))
15269 terminate(Uncountable);
15270 return m_out.intPtrZero;
15271 }
15272 LValue result = frozenPointer(value);
15273 result->setOrigin(B3::Origin(edge.node()));
15274 return result;
15275 }
15276
15277 LoweredNodeValue value = m_jsValueValues.get(edge.node());
15278 if (isValid(value)) {
15279 LValue uncheckedValue = value.value();
15280 FTL_TYPE_CHECK(
15281 jsValueValue(uncheckedValue), edge, SpecCellCheck, isNotCell(uncheckedValue));
15282 return uncheckedValue;
15283 }
15284
15285 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecCellCheck), provenType(edge));
15286 if (mayHaveTypeCheck(edge.useKind()))
15287 terminate(Uncountable);
15288 return m_out.intPtrZero;
15289 }
15290
15291 LValue lowObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15292 {
15293 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
15294
15295 LValue result = lowCell(edge, mode);
15296 speculateObject(edge, result);
15297 return result;
15298 }
15299
15300 LValue lowRegExpObject(Edge edge)
15301 {
15302 LValue result = lowCell(edge);
15303 speculateRegExpObject(edge, result);
15304 return result;
15305 }
15306
15307 LValue lowMapObject(Edge edge)
15308 {
15309 LValue result = lowCell(edge);
15310 speculateMapObject(edge, result);
15311 return result;
15312 }
15313
15314 LValue lowSetObject(Edge edge)
15315 {
15316 LValue result = lowCell(edge);
15317 speculateSetObject(edge, result);
15318 return result;
15319 }
15320
15321 LValue lowWeakMapObject(Edge edge)
15322 {
15323 LValue result = lowCell(edge);
15324 speculateWeakMapObject(edge, result);
15325 return result;
15326 }
15327
15328 LValue lowWeakSetObject(Edge edge)
15329 {
15330 LValue result = lowCell(edge);
15331 speculateWeakSetObject(edge, result);
15332 return result;
15333 }
15334
15335 LValue lowDataViewObject(Edge edge)
15336 {
15337 LValue result = lowCell(edge);
15338 speculateDataViewObject(edge, result);
15339 return result;
15340 }
15341
15342 LValue lowString(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15343 {
15344 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringUse || edge.useKind() == KnownStringUse || edge.useKind() == StringIdentUse);
15345
15346 LValue result = lowCell(edge, mode);
15347 speculateString(edge, result);
15348 return result;
15349 }
15350
15351 LValue lowStringIdent(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15352 {
15353 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringIdentUse);
15354
15355 LValue string = lowString(edge, mode);
15356 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
15357 speculateStringIdent(edge, string, stringImpl);
15358 return stringImpl;
15359 }
15360
15361 LValue lowSymbol(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15362 {
15363 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == SymbolUse);
15364
15365 LValue result = lowCell(edge, mode);
15366 speculateSymbol(edge, result);
15367 return result;
15368 }
15369
15370 LValue lowBigInt(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15371 {
15372 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BigIntUse);
15373
15374 LValue result = lowCell(edge, mode);
15375 speculateBigInt(edge, result);
15376 return result;
15377 }
15378
15379 LValue lowNonNullObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15380 {
15381 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
15382
15383 LValue result = lowCell(edge, mode);
15384 speculateNonNullObject(edge, result);
15385 return result;
15386 }
15387
15388 LValue lowBoolean(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15389 {
15390 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse);
15391
15392 if (edge->hasConstant()) {
15393 JSValue value = edge->asJSValue();
15394 simulatedTypeCheck(edge, SpecBoolean);
15395 if (!value.isBoolean()) {
15396 if (mayHaveTypeCheck(edge.useKind()))
15397 terminate(Uncountable);
15398 return m_out.booleanFalse;
15399 }
15400 LValue result = m_out.constBool(value.asBoolean());
15401 result->setOrigin(B3::Origin(edge.node()));
15402 return result;
15403 }
15404
15405 LoweredNodeValue value = m_booleanValues.get(edge.node());
15406 if (isValid(value)) {
15407 simulatedTypeCheck(edge, SpecBoolean);
15408 return value.value();
15409 }
15410
15411 value = m_jsValueValues.get(edge.node());
15412 if (isValid(value)) {
15413 LValue unboxedResult = value.value();
15414 FTL_TYPE_CHECK(
15415 jsValueValue(unboxedResult), edge, SpecBoolean, isNotBoolean(unboxedResult));
15416 LValue result = unboxBoolean(unboxedResult);
15417 setBoolean(edge.node(), result);
15418 return result;
15419 }
15420
15421 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecBoolean), provenType(edge));
15422 if (mayHaveTypeCheck(edge.useKind()))
15423 terminate(Uncountable);
15424 return m_out.booleanFalse;
15425 }
15426
15427 LValue lowDouble(Edge edge)
15428 {
15429 DFG_ASSERT(m_graph, m_node, isDouble(edge.useKind()), edge.useKind());
15430
15431 LoweredNodeValue value = m_doubleValues.get(edge.node());
15432 if (isValid(value))
15433 return value.value();
15434 DFG_ASSERT(m_graph, m_node, !provenType(edge), provenType(edge));
15435 if (mayHaveTypeCheck(edge.useKind()))
15436 terminate(Uncountable);
15437 return m_out.doubleZero;
15438 }
15439
15440 LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15441 {
15442 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse, m_node->op(), edge.useKind());
15443 DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind()), m_node->op(), edge.useKind());
15444 DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse, m_node->op(), edge.useKind());
15445
15446 if (edge->hasConstant()) {
15447 LValue result = m_out.constInt64(JSValue::encode(edge->asJSValue()));
15448 result->setOrigin(B3::Origin(edge.node()));
15449 return result;
15450 }
15451
15452 LoweredNodeValue value = m_jsValueValues.get(edge.node());
15453 if (isValid(value))
15454 return value.value();
15455
15456 value = m_int32Values.get(edge.node());
15457 if (isValid(value)) {
15458 LValue result = boxInt32(value.value());
15459 setJSValue(edge.node(), result);
15460 return result;
15461 }
15462
15463 value = m_booleanValues.get(edge.node());
15464 if (isValid(value)) {
15465 LValue result = boxBoolean(value.value());
15466 setJSValue(edge.node(), result);
15467 return result;
15468 }
15469
15470 DFG_CRASH(m_graph, m_node, makeString("Value not defined: ", String::number(edge.node()->index())).ascii().data());
15471 return 0;
15472 }
15473
15474 LValue lowNotCell(Edge edge)
15475 {
15476 LValue result = lowJSValue(edge, ManualOperandSpeculation);
15477 FTL_TYPE_CHECK(jsValueValue(result), edge, ~SpecCellCheck, isCell(result));
15478 return result;
15479 }
15480
15481 LValue lowStorage(Edge edge)
15482 {
15483 LoweredNodeValue value = m_storageValues.get(edge.node());
15484 if (isValid(value))
15485 return value.value();
15486
15487 LValue result = lowCell(edge);
15488 setStorage(edge.node(), result);
15489 return result;
15490 }
15491
15492 LValue strictInt52ToInt32(Edge edge, LValue value)
15493 {
15494 LValue result = m_out.castToInt32(value);
15495 FTL_TYPE_CHECK(
15496 noValue(), edge, SpecInt32Only,
15497 m_out.notEqual(m_out.signExt32To64(result), value));
15498 setInt32(edge.node(), result);
15499 return result;
15500 }
15501
15502 LValue strictInt52ToDouble(LValue value)
15503 {
15504 return m_out.intToDouble(value);
15505 }
15506
15507 LValue strictInt52ToJSValue(LValue value)
15508 {
15509 LBasicBlock isInt32 = m_out.newBlock();
15510 LBasicBlock isDouble = m_out.newBlock();
15511 LBasicBlock continuation = m_out.newBlock();
15512
15513 Vector<ValueFromBlock, 2> results;
15514
15515 LValue int32Value = m_out.castToInt32(value);
15516 m_out.branch(
15517 m_out.equal(m_out.signExt32To64(int32Value), value),
15518 unsure(isInt32), unsure(isDouble));
15519
15520 LBasicBlock lastNext = m_out.appendTo(isInt32, isDouble);
15521
15522 results.append(m_out.anchor(boxInt32(int32Value)));
15523 m_out.jump(continuation);
15524
15525 m_out.appendTo(isDouble, continuation);
15526
15527 results.append(m_out.anchor(boxDouble(m_out.intToDouble(value))));
15528 m_out.jump(continuation);
15529
15530 m_out.appendTo(continuation, lastNext);
15531 return m_out.phi(Int64, results);
15532 }
15533
15534 LValue strictInt52ToInt52(LValue value)
15535 {
15536 return m_out.shl(value, m_out.constInt64(JSValue::int52ShiftAmount));
15537 }
15538
15539 LValue int52ToStrictInt52(LValue value)
15540 {
15541 return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount));
15542 }
15543
15544 LValue isInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
15545 {
15546 if (LValue proven = isProvenValue(type, SpecInt32Only))
15547 return proven;
15548 return m_out.aboveOrEqual(jsValue, m_tagTypeNumber);
15549 }
15550 LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
15551 {
15552 if (LValue proven = isProvenValue(type, ~SpecInt32Only))
15553 return proven;
15554 return m_out.below(jsValue, m_tagTypeNumber);
15555 }
15556 LValue unboxInt32(LValue jsValue)
15557 {
15558 return m_out.castToInt32(jsValue);
15559 }
15560 LValue boxInt32(LValue value)
15561 {
15562 return m_out.add(m_out.zeroExt(value, Int64), m_tagTypeNumber);
15563 }
15564
15565 LValue isCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
15566 {
15567 if (LValue proven = isProvenValue(type, SpecCellCheck | SpecMisc))
15568 return proven;
15569 return m_out.testIsZero64(jsValue, m_tagTypeNumber);
15570 }
15571 LValue isNotCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
15572 {
15573 if (LValue proven = isProvenValue(type, ~(SpecCellCheck | SpecMisc)))
15574 return proven;
15575 return m_out.testNonZero64(jsValue, m_tagTypeNumber);
15576 }
15577
15578 LValue unboxDouble(LValue jsValue, LValue* unboxedAsInt = nullptr)
15579 {
15580 LValue asInt = m_out.add(jsValue, m_tagTypeNumber);
15581 if (unboxedAsInt)
15582 *unboxedAsInt = asInt;
15583 return m_out.bitCast(asInt, Double);
15584 }
15585 LValue boxDouble(LValue doubleValue)
15586 {
15587 return m_out.sub(m_out.bitCast(doubleValue, Int64), m_tagTypeNumber);
15588 }
15589
15590 LValue jsValueToStrictInt52(Edge edge, LValue boxedValue)
15591 {
15592 LBasicBlock intCase = m_out.newBlock();
15593 LBasicBlock doubleCase = m_out.newBlock();
15594 LBasicBlock continuation = m_out.newBlock();
15595
15596 LValue isNotInt32;
15597 if (!m_interpreter.needsTypeCheck(edge, SpecInt32Only))
15598 isNotInt32 = m_out.booleanFalse;
15599 else if (!m_interpreter.needsTypeCheck(edge, ~SpecInt32Only))
15600 isNotInt32 = m_out.booleanTrue;
15601 else
15602 isNotInt32 = this->isNotInt32(boxedValue);
15603 m_out.branch(isNotInt32, unsure(doubleCase), unsure(intCase));
15604
15605 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
15606
15607 ValueFromBlock intToInt52 = m_out.anchor(
15608 m_out.signExt32To64(unboxInt32(boxedValue)));
15609 m_out.jump(continuation);
15610
15611 m_out.appendTo(doubleCase, continuation);
15612
15613 LValue possibleResult = m_out.call(
15614 Int64, m_out.operation(operationConvertBoxedDoubleToInt52), boxedValue);
15615 FTL_TYPE_CHECK(
15616 jsValueValue(boxedValue), edge, SpecInt32Only | SpecAnyIntAsDouble,
15617 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
15618
15619 ValueFromBlock doubleToInt52 = m_out.anchor(possibleResult);
15620 m_out.jump(continuation);
15621
15622 m_out.appendTo(continuation, lastNext);
15623
15624 return m_out.phi(Int64, intToInt52, doubleToInt52);
15625 }
15626
15627 LValue doubleToStrictInt52(Edge edge, LValue value)
15628 {
15629 LValue possibleResult = m_out.call(
15630 Int64, m_out.operation(operationConvertDoubleToInt52), value);
15631 FTL_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow,
15632 doubleValue(value), edge, SpecAnyIntAsDouble,
15633 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
15634
15635 return possibleResult;
15636 }
15637
15638 LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero)
15639 {
15640 LValue integerValue = m_out.doubleToInt(value);
15641 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
15642 LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
15643 speculate(Overflow, FormattedValue(DataFormatDouble, value), m_node, valueNotConvertibleToInteger);
15644
15645 if (shouldCheckNegativeZero) {
15646 LBasicBlock valueIsZero = m_out.newBlock();
15647 LBasicBlock continuation = m_out.newBlock();
15648 m_out.branch(m_out.isZero32(integerValue), unsure(valueIsZero), unsure(continuation));
15649
15650 LBasicBlock lastNext = m_out.appendTo(valueIsZero, continuation);
15651
15652 LValue doubleBitcastToInt64 = m_out.bitCast(value, Int64);
15653 LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
15654
15655 speculate(NegativeZero, FormattedValue(DataFormatDouble, value), m_node, signBitSet);
15656 m_out.jump(continuation);
15657 m_out.appendTo(continuation, lastNext);
15658 }
15659 return integerValue;
15660 }
15661
15662 LValue isNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
15663 {
15664 if (LValue proven = isProvenValue(type, SpecFullNumber))
15665 return proven;
15666 return isNotCellOrMisc(jsValue);
15667 }
15668 LValue isNotNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
15669 {
15670 if (LValue proven = isProvenValue(type, ~SpecFullNumber))
15671 return proven;
15672 return isCellOrMisc(jsValue);
15673 }
15674
15675 LValue isNotCell(LValue jsValue, SpeculatedType type = SpecFullTop)
15676 {
15677 if (LValue proven = isProvenValue(type, ~SpecCellCheck))
15678 return proven;
15679 return m_out.testNonZero64(jsValue, m_tagMask);
15680 }
15681
15682 LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop)
15683 {
15684 if (LValue proven = isProvenValue(type, SpecCellCheck))
15685 return proven;
15686 return m_out.testIsZero64(jsValue, m_tagMask);
15687 }
15688
15689 LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop)
15690 {
15691 if (LValue proven = isProvenValue(type, ~SpecMisc))
15692 return proven;
15693 return m_out.above(value, m_out.constInt64(TagBitTypeOther | TagBitBool | TagBitUndefined));
15694 }
15695
15696 LValue isMisc(LValue value, SpeculatedType type = SpecFullTop)
15697 {
15698 if (LValue proven = isProvenValue(type, SpecMisc))
15699 return proven;
15700 return m_out.logicalNot(isNotMisc(value));
15701 }
15702
15703 LValue isNotBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
15704 {
15705 if (LValue proven = isProvenValue(type, ~SpecBoolean))
15706 return proven;
15707 return m_out.testNonZero64(
15708 m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)),
15709 m_out.constInt64(~1));
15710 }
15711 LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
15712 {
15713 if (LValue proven = isProvenValue(type, SpecBoolean))
15714 return proven;
15715 return m_out.logicalNot(isNotBoolean(jsValue));
15716 }
15717 LValue unboxBoolean(LValue jsValue)
15718 {
15719 // We want to use a cast that guarantees that B3 knows that even the integer
15720 // value is just 0 or 1. But for now we do it the dumb way.
15721 return m_out.notZero64(m_out.bitAnd(jsValue, m_out.constInt64(1)));
15722 }
15723 LValue boxBoolean(LValue value)
15724 {
15725 return m_out.select(
15726 value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse));
15727 }
15728
15729 LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop)
15730 {
15731 if (LValue proven = isProvenValue(type, ~SpecOther))
15732 return proven;
15733 return m_out.notEqual(
15734 m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
15735 m_out.constInt64(ValueNull));
15736 }
15737 LValue isOther(LValue value, SpeculatedType type = SpecFullTop)
15738 {
15739 if (LValue proven = isProvenValue(type, SpecOther))
15740 return proven;
15741 return m_out.equal(
15742 m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
15743 m_out.constInt64(ValueNull));
15744 }
15745
15746 LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType)
15747 {
15748 if (!(provenType & ~wantedType))
15749 return m_out.booleanTrue;
15750 if (!(provenType & wantedType))
15751 return m_out.booleanFalse;
15752 return nullptr;
15753 }
15754
15755 void speculate(Edge edge)
15756 {
15757 switch (edge.useKind()) {
15758 case UntypedUse:
15759 break;
15760 case KnownInt32Use:
15761 case KnownStringUse:
15762 case KnownPrimitiveUse:
15763 case KnownOtherUse:
15764 case DoubleRepUse:
15765 case Int52RepUse:
15766 case KnownCellUse:
15767 case KnownBooleanUse:
15768 ASSERT(!m_interpreter.needsTypeCheck(edge));
15769 break;
15770 case Int32Use:
15771 speculateInt32(edge);
15772 break;
15773 case CellUse:
15774 speculateCell(edge);
15775 break;
15776 case CellOrOtherUse:
15777 speculateCellOrOther(edge);
15778 break;
15779 case AnyIntUse:
15780 speculateAnyInt(edge);
15781 break;
15782 case ObjectUse:
15783 speculateObject(edge);
15784 break;
15785 case ArrayUse:
15786 speculateArray(edge);
15787 break;
15788 case FunctionUse:
15789 speculateFunction(edge);
15790 break;
15791 case ObjectOrOtherUse:
15792 speculateObjectOrOther(edge);
15793 break;
15794 case FinalObjectUse:
15795 speculateFinalObject(edge);
15796 break;
15797 case RegExpObjectUse:
15798 speculateRegExpObject(edge);
15799 break;
15800 case ProxyObjectUse:
15801 speculateProxyObject(edge);
15802 break;
15803 case DerivedArrayUse:
15804 speculateDerivedArray(edge);
15805 break;
15806 case MapObjectUse:
15807 speculateMapObject(edge);
15808 break;
15809 case SetObjectUse:
15810 speculateSetObject(edge);
15811 break;
15812 case WeakMapObjectUse:
15813 speculateWeakMapObject(edge);
15814 break;
15815 case WeakSetObjectUse:
15816 speculateWeakSetObject(edge);
15817 break;
15818 case DataViewObjectUse:
15819 speculateDataViewObject(edge);
15820 break;
15821 case StringUse:
15822 speculateString(edge);
15823 break;
15824 case StringOrOtherUse:
15825 speculateStringOrOther(edge);
15826 break;
15827 case StringIdentUse:
15828 speculateStringIdent(edge);
15829 break;
15830 case SymbolUse:
15831 speculateSymbol(edge);
15832 break;
15833 case StringObjectUse:
15834 speculateStringObject(edge);
15835 break;
15836 case StringOrStringObjectUse:
15837 speculateStringOrStringObject(edge);
15838 break;
15839 case NumberUse:
15840 speculateNumber(edge);
15841 break;
15842 case RealNumberUse:
15843 speculateRealNumber(edge);
15844 break;
15845 case DoubleRepRealUse:
15846 speculateDoubleRepReal(edge);
15847 break;
15848 case DoubleRepAnyIntUse:
15849 speculateDoubleRepAnyInt(edge);
15850 break;
15851 case BooleanUse:
15852 speculateBoolean(edge);
15853 break;
15854 case BigIntUse:
15855 speculateBigInt(edge);
15856 break;
15857 case NotStringVarUse:
15858 speculateNotStringVar(edge);
15859 break;
15860 case NotSymbolUse:
15861 speculateNotSymbol(edge);
15862 break;
15863 case NotCellUse:
15864 speculateNotCell(edge);
15865 break;
15866 case OtherUse:
15867 speculateOther(edge);
15868 break;
15869 case MiscUse:
15870 speculateMisc(edge);
15871 break;
15872 default:
15873 DFG_CRASH(m_graph, m_node, "Unsupported speculation use kind");
15874 }
15875 }
15876
15877 void speculate(Node*, Edge edge)
15878 {
15879 speculate(edge);
15880 }
15881
15882 void speculateInt32(Edge edge)
15883 {
15884 lowInt32(edge);
15885 }
15886
15887 void speculateCell(Edge edge)
15888 {
15889 lowCell(edge);
15890 }
15891
15892 void speculateNotCell(Edge edge)
15893 {
15894 if (!m_interpreter.needsTypeCheck(edge))
15895 return;
15896 lowNotCell(edge);
15897 }
15898
15899 void speculateCellOrOther(Edge edge)
15900 {
15901 if (shouldNotHaveTypeCheck(edge.useKind()))
15902 return;
15903
15904 LValue value = lowJSValue(edge, ManualOperandSpeculation);
15905
15906 LBasicBlock isNotCell = m_out.newBlock();
15907 LBasicBlock continuation = m_out.newBlock();
15908
15909 m_out.branch(isCell(value, provenType(edge)), unsure(continuation), unsure(isNotCell));
15910
15911 LBasicBlock lastNext = m_out.appendTo(isNotCell, continuation);
15912 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
15913 m_out.jump(continuation);
15914
15915 m_out.appendTo(continuation, lastNext);
15916 }
15917
15918 void speculateAnyInt(Edge edge)
15919 {
15920 if (!m_interpreter.needsTypeCheck(edge))
15921 return;
15922
15923 jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation));
15924 }
15925
15926 LValue isCellWithType(LValue cell, JSType queriedType, SpeculatedType speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
15927 {
15928 if (LValue proven = isProvenValue(type & SpecCell, speculatedTypeForQuery))
15929 return proven;
15930 return m_out.equal(
15931 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15932 m_out.constInt32(queriedType));
15933 }
15934
15935 LValue isTypedArrayView(LValue cell, SpeculatedType type = SpecFullTop)
15936 {
15937 if (LValue proven = isProvenValue(type & SpecCell, SpecTypedArrayView))
15938 return proven;
15939 LValue jsType = m_out.sub(
15940 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15941 m_out.constInt32(FirstTypedArrayType));
15942 return m_out.below(
15943 jsType,
15944 m_out.constInt32(NumberOfTypedArrayTypesExcludingDataView));
15945 }
15946
15947 LValue isObject(LValue cell, SpeculatedType type = SpecFullTop)
15948 {
15949 if (LValue proven = isProvenValue(type & SpecCell, SpecObject))
15950 return proven;
15951 return m_out.aboveOrEqual(
15952 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15953 m_out.constInt32(ObjectType));
15954 }
15955
15956 LValue isNotObject(LValue cell, SpeculatedType type = SpecFullTop)
15957 {
15958 if (LValue proven = isProvenValue(type & SpecCell, ~SpecObject))
15959 return proven;
15960 return m_out.below(
15961 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15962 m_out.constInt32(ObjectType));
15963 }
15964
15965 LValue isNotString(LValue cell, SpeculatedType type = SpecFullTop)
15966 {
15967 if (LValue proven = isProvenValue(type & SpecCell, ~SpecString))
15968 return proven;
15969 return m_out.notEqual(
15970 m_out.load32(cell, m_heaps.JSCell_structureID),
15971 m_out.constInt32(vm().stringStructure->id()));
15972 }
15973
15974 LValue isString(LValue cell, SpeculatedType type = SpecFullTop)
15975 {
15976 if (LValue proven = isProvenValue(type & SpecCell, SpecString))
15977 return proven;
15978 return m_out.equal(
15979 m_out.load32(cell, m_heaps.JSCell_structureID),
15980 m_out.constInt32(vm().stringStructure->id()));
15981 }
15982
15983 LValue isRopeString(LValue string, Edge edge = Edge())
15984 {
15985 if (edge) {
15986 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
15987 return m_out.booleanFalse;
15988 if (JSValue value = provenValue(edge)) {
15989 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
15990 return m_out.booleanFalse;
15991 }
15992 String value = edge->tryGetString(m_graph);
15993 if (!value.isNull()) {
15994 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
15995 return m_out.booleanFalse;
15996 }
15997 }
15998
15999 return m_out.testNonZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
16000 }
16001
16002 LValue isNotRopeString(LValue string, Edge edge = Edge())
16003 {
16004 if (edge) {
16005 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
16006 return m_out.booleanTrue;
16007 if (JSValue value = provenValue(edge)) {
16008 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
16009 return m_out.booleanTrue;
16010 }
16011 String value = edge->tryGetString(m_graph);
16012 if (!value.isNull()) {
16013 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
16014 return m_out.booleanTrue;
16015 }
16016 }
16017
16018 return m_out.testIsZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
16019 }
16020
16021 LValue isNotSymbol(LValue cell, SpeculatedType type = SpecFullTop)
16022 {
16023 if (LValue proven = isProvenValue(type & SpecCell, ~SpecSymbol))
16024 return proven;
16025 return m_out.notEqual(
16026 m_out.load32(cell, m_heaps.JSCell_structureID),
16027 m_out.constInt32(vm().symbolStructure->id()));
16028 }
16029
16030 LValue isSymbol(LValue cell, SpeculatedType type = SpecFullTop)
16031 {
16032 if (LValue proven = isProvenValue(type & SpecCell, SpecSymbol))
16033 return proven;
16034 return m_out.equal(
16035 m_out.load32(cell, m_heaps.JSCell_structureID),
16036 m_out.constInt32(vm().symbolStructure->id()));
16037 }
16038
16039 LValue isNotBigInt(LValue cell, SpeculatedType type = SpecFullTop)
16040 {
16041 if (LValue proven = isProvenValue(type & SpecCell, ~SpecBigInt))
16042 return proven;
16043 return m_out.notEqual(
16044 m_out.load32(cell, m_heaps.JSCell_structureID),
16045 m_out.constInt32(vm().bigIntStructure->id()));
16046 }
16047
16048 LValue isBigInt(LValue cell, SpeculatedType type = SpecFullTop)
16049 {
16050 if (LValue proven = isProvenValue(type & SpecCell, SpecBigInt))
16051 return proven;
16052 return m_out.equal(
16053 m_out.load32(cell, m_heaps.JSCell_structureID),
16054 m_out.constInt32(vm().bigIntStructure->id()));
16055 }
16056
16057 LValue isArrayTypeForArrayify(LValue cell, ArrayMode arrayMode)
16058 {
16059 switch (arrayMode.type()) {
16060 case Array::Int32:
16061 case Array::Double:
16062 case Array::Contiguous:
16063 case Array::Undecided:
16064 case Array::ArrayStorage: {
16065 IndexingType indexingModeMask = IsArray | IndexingShapeMask;
16066 if (arrayMode.action() == Array::Write)
16067 indexingModeMask |= CopyOnWrite;
16068
16069 IndexingType shape = arrayMode.shapeMask();
16070 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
16071
16072 switch (arrayMode.arrayClass()) {
16073 case Array::OriginalArray:
16074 case Array::OriginalCopyOnWriteArray:
16075 DFG_CRASH(m_graph, m_node, "Unexpected original array");
16076 return nullptr;
16077
16078 case Array::Array:
16079 return m_out.equal(
16080 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
16081 m_out.constInt32(IsArray | shape));
16082
16083 case Array::NonArray:
16084 case Array::OriginalNonArray:
16085 return m_out.equal(
16086 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
16087 m_out.constInt32(shape));
16088
16089 case Array::PossiblyArray:
16090 return m_out.equal(
16091 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask & ~IsArray)),
16092 m_out.constInt32(shape));
16093 }
16094 break;
16095 }
16096
16097 case Array::SlowPutArrayStorage: {
16098 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
16099 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
16100
16101 LBasicBlock trueCase = m_out.newBlock();
16102 LBasicBlock checkCase = m_out.newBlock();
16103 LBasicBlock continuation = m_out.newBlock();
16104
16105 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
16106 LValue isAnArrayStorageShape = m_out.belowOrEqual(
16107 m_out.sub(
16108 m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)),
16109 m_out.constInt32(ArrayStorageShape)),
16110 m_out.constInt32(SlowPutArrayStorageShape - ArrayStorageShape));
16111 m_out.branch(isAnArrayStorageShape, unsure(checkCase), unsure(continuation));
16112
16113 LBasicBlock lastNext = m_out.appendTo(checkCase, trueCase);
16114 switch (arrayMode.arrayClass()) {
16115 case Array::OriginalArray:
16116 case Array::OriginalCopyOnWriteArray:
16117 DFG_CRASH(m_graph, m_node, "Unexpected original array");
16118 return nullptr;
16119
16120 case Array::Array:
16121 m_out.branch(
16122 m_out.testNonZero32(indexingType, m_out.constInt32(IsArray)),
16123 unsure(trueCase), unsure(continuation));
16124 break;
16125
16126 case Array::NonArray:
16127 case Array::OriginalNonArray:
16128 m_out.branch(
16129 m_out.testIsZero32(indexingType, m_out.constInt32(IsArray)),
16130 unsure(trueCase), unsure(continuation));
16131 break;
16132
16133 case Array::PossiblyArray:
16134 m_out.jump(trueCase);
16135 break;
16136 }
16137
16138 m_out.appendTo(trueCase, continuation);
16139 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
16140 m_out.jump(continuation);
16141
16142 m_out.appendTo(continuation, lastNext);
16143 return m_out.phi(Int32, falseValue, trueValue);
16144 }
16145
16146 default:
16147 break;
16148 }
16149 DFG_CRASH(m_graph, m_node, "Corrupt array class");
16150 }
16151
16152 LValue isArrayTypeForCheckArray(LValue cell, ArrayMode arrayMode)
16153 {
16154 switch (arrayMode.type()) {
16155 case Array::Int32:
16156 case Array::Double:
16157 case Array::Contiguous:
16158 case Array::Undecided:
16159 case Array::ArrayStorage:
16160 case Array::SlowPutArrayStorage:
16161 return isArrayTypeForArrayify(cell, arrayMode);
16162
16163 case Array::DirectArguments:
16164 return m_out.equal(
16165 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16166 m_out.constInt32(DirectArgumentsType));
16167
16168 case Array::ScopedArguments:
16169 return m_out.equal(
16170 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16171 m_out.constInt32(ScopedArgumentsType));
16172
16173 default:
16174 return m_out.equal(
16175 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16176 m_out.constInt32(typeForTypedArrayType(arrayMode.typedArrayType())));
16177 }
16178 }
16179
16180 LValue isFunction(LValue cell, SpeculatedType type = SpecFullTop)
16181 {
16182 if (LValue proven = isProvenValue(type & SpecCell, SpecFunction))
16183 return proven;
16184 return isType(cell, JSFunctionType);
16185 }
16186 LValue isNotFunction(LValue cell, SpeculatedType type = SpecFullTop)
16187 {
16188 if (LValue proven = isProvenValue(type & SpecCell, ~SpecFunction))
16189 return proven;
16190 return isNotType(cell, JSFunctionType);
16191 }
16192
16193 LValue isExoticForTypeof(LValue cell, SpeculatedType type = SpecFullTop)
16194 {
16195 if (!(type & SpecObjectOther))
16196 return m_out.booleanFalse;
16197 return m_out.testNonZero32(
16198 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
16199 m_out.constInt32(MasqueradesAsUndefined | OverridesGetCallData));
16200 }
16201
16202 LValue isType(LValue cell, JSType type)
16203 {
16204 return m_out.equal(
16205 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16206 m_out.constInt32(type));
16207 }
16208
16209 LValue isNotType(LValue cell, JSType type)
16210 {
16211 return m_out.logicalNot(isType(cell, type));
16212 }
16213
16214 void speculateObject(Edge edge, LValue cell)
16215 {
16216 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
16217 }
16218
16219 void speculateObject(Edge edge)
16220 {
16221 speculateObject(edge, lowCell(edge));
16222 }
16223
16224 void speculateArray(Edge edge, LValue cell)
16225 {
16226 FTL_TYPE_CHECK(
16227 jsValueValue(cell), edge, SpecArray, isNotType(cell, ArrayType));
16228 }
16229
16230 void speculateArray(Edge edge)
16231 {
16232 speculateArray(edge, lowCell(edge));
16233 }
16234
16235 void speculateFunction(Edge edge, LValue cell)
16236 {
16237 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecFunction, isNotFunction(cell));
16238 }
16239
16240 void speculateFunction(Edge edge)
16241 {
16242 speculateFunction(edge, lowCell(edge));
16243 }
16244
16245 void speculateObjectOrOther(Edge edge)
16246 {
16247 if (!m_interpreter.needsTypeCheck(edge))
16248 return;
16249
16250 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16251
16252 LBasicBlock cellCase = m_out.newBlock();
16253 LBasicBlock primitiveCase = m_out.newBlock();
16254 LBasicBlock continuation = m_out.newBlock();
16255
16256 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
16257
16258 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
16259
16260 FTL_TYPE_CHECK(
16261 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
16262
16263 m_out.jump(continuation);
16264
16265 m_out.appendTo(primitiveCase, continuation);
16266
16267 FTL_TYPE_CHECK(
16268 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16269
16270 m_out.jump(continuation);
16271
16272 m_out.appendTo(continuation, lastNext);
16273 }
16274
16275 void speculateFinalObject(Edge edge, LValue cell)
16276 {
16277 FTL_TYPE_CHECK(
16278 jsValueValue(cell), edge, SpecFinalObject, isNotType(cell, FinalObjectType));
16279 }
16280
16281 void speculateFinalObject(Edge edge)
16282 {
16283 speculateFinalObject(edge, lowCell(edge));
16284 }
16285
16286 void speculateRegExpObject(Edge edge, LValue cell)
16287 {
16288 FTL_TYPE_CHECK(
16289 jsValueValue(cell), edge, SpecRegExpObject, isNotType(cell, RegExpObjectType));
16290 }
16291
16292 void speculateRegExpObject(Edge edge)
16293 {
16294 speculateRegExpObject(edge, lowCell(edge));
16295 }
16296
16297 void speculateProxyObject(Edge edge, LValue cell)
16298 {
16299 FTL_TYPE_CHECK(
16300 jsValueValue(cell), edge, SpecProxyObject, isNotType(cell, ProxyObjectType));
16301 }
16302
16303 void speculateProxyObject(Edge edge)
16304 {
16305 speculateProxyObject(edge, lowCell(edge));
16306 }
16307
16308 void speculateDerivedArray(Edge edge, LValue cell)
16309 {
16310 FTL_TYPE_CHECK(
16311 jsValueValue(cell), edge, SpecDerivedArray, isNotType(cell, DerivedArrayType));
16312 }
16313
16314 void speculateDerivedArray(Edge edge)
16315 {
16316 speculateDerivedArray(edge, lowCell(edge));
16317 }
16318
16319 void speculateMapObject(Edge edge, LValue cell)
16320 {
16321 FTL_TYPE_CHECK(
16322 jsValueValue(cell), edge, SpecMapObject, isNotType(cell, JSMapType));
16323 }
16324
16325 void speculateMapObject(Edge edge)
16326 {
16327 speculateMapObject(edge, lowCell(edge));
16328 }
16329
16330 void speculateSetObject(Edge edge, LValue cell)
16331 {
16332 FTL_TYPE_CHECK(
16333 jsValueValue(cell), edge, SpecSetObject, isNotType(cell, JSSetType));
16334 }
16335
16336 void speculateSetObject(Edge edge)
16337 {
16338 speculateSetObject(edge, lowCell(edge));
16339 }
16340
16341 void speculateWeakMapObject(Edge edge, LValue cell)
16342 {
16343 FTL_TYPE_CHECK(
16344 jsValueValue(cell), edge, SpecWeakMapObject, isNotType(cell, JSWeakMapType));
16345 }
16346
16347 void speculateWeakMapObject(Edge edge)
16348 {
16349 speculateWeakMapObject(edge, lowCell(edge));
16350 }
16351
16352 void speculateWeakSetObject(Edge edge, LValue cell)
16353 {
16354 FTL_TYPE_CHECK(
16355 jsValueValue(cell), edge, SpecWeakSetObject, isNotType(cell, JSWeakSetType));
16356 }
16357
16358 void speculateWeakSetObject(Edge edge)
16359 {
16360 speculateWeakSetObject(edge, lowCell(edge));
16361 }
16362
16363 void speculateDataViewObject(Edge edge, LValue cell)
16364 {
16365 FTL_TYPE_CHECK(
16366 jsValueValue(cell), edge, SpecDataViewObject, isNotType(cell, DataViewType));
16367 }
16368
16369 void speculateDataViewObject(Edge edge)
16370 {
16371 speculateDataViewObject(edge, lowCell(edge));
16372 }
16373
16374 void speculateString(Edge edge, LValue cell)
16375 {
16376 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecString, isNotString(cell));
16377 }
16378
16379 void speculateString(Edge edge)
16380 {
16381 speculateString(edge, lowCell(edge));
16382 }
16383
16384 void speculateStringOrOther(Edge edge, LValue value)
16385 {
16386 if (!m_interpreter.needsTypeCheck(edge))
16387 return;
16388
16389 LBasicBlock cellCase = m_out.newBlock();
16390 LBasicBlock notCellCase = m_out.newBlock();
16391 LBasicBlock continuation = m_out.newBlock();
16392
16393 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
16394
16395 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
16396
16397 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
16398
16399 m_out.jump(continuation);
16400 m_out.appendTo(notCellCase, continuation);
16401
16402 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16403
16404 m_out.jump(continuation);
16405 m_out.appendTo(continuation, lastNext);
16406 }
16407
16408 void speculateStringOrOther(Edge edge)
16409 {
16410 speculateStringOrOther(edge, lowJSValue(edge, ManualOperandSpeculation));
16411 }
16412
16413 void speculateStringIdent(Edge edge, LValue string, LValue stringImpl)
16414 {
16415 if (!m_interpreter.needsTypeCheck(edge, SpecStringIdent | ~SpecString))
16416 return;
16417
16418 speculate(BadType, jsValueValue(string), edge.node(), isRopeString(string));
16419 speculate(
16420 BadType, jsValueValue(string), edge.node(),
16421 m_out.testIsZero32(
16422 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
16423 m_out.constInt32(StringImpl::flagIsAtom())));
16424 m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
16425 }
16426
16427 void speculateStringIdent(Edge edge)
16428 {
16429 lowStringIdent(edge);
16430 }
16431
16432 void speculateStringObject(Edge edge)
16433 {
16434 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
16435 return;
16436
16437 speculateStringObjectForCell(edge, lowCell(edge));
16438 }
16439
16440 void speculateStringOrStringObject(Edge edge)
16441 {
16442 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
16443 return;
16444
16445 LValue cellBase = lowCell(edge);
16446 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
16447 return;
16448
16449 LBasicBlock notString = m_out.newBlock();
16450 LBasicBlock continuation = m_out.newBlock();
16451
16452 LValue type = m_out.load8ZeroExt32(cellBase, m_heaps.JSCell_typeInfoType);
16453 m_out.branch(
16454 m_out.equal(type, m_out.constInt32(StringType)),
16455 unsure(continuation), unsure(notString));
16456
16457 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
16458 speculate(
16459 BadType, jsValueValue(cellBase), edge.node(),
16460 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
16461 m_out.jump(continuation);
16462
16463 m_out.appendTo(continuation, lastNext);
16464 m_interpreter.filter(edge, SpecString | SpecStringObject);
16465 }
16466
16467 void speculateStringObjectForCell(Edge edge, LValue cell)
16468 {
16469 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
16470 return;
16471
16472 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
16473 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecStringObject, m_out.notEqual(type, m_out.constInt32(StringObjectType)));
16474 }
16475
16476 void speculateSymbol(Edge edge, LValue cell)
16477 {
16478 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecSymbol, isNotSymbol(cell));
16479 }
16480
16481 void speculateSymbol(Edge edge)
16482 {
16483 speculateSymbol(edge, lowCell(edge));
16484 }
16485
16486 void speculateBigInt(Edge edge, LValue cell)
16487 {
16488 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecBigInt, isNotBigInt(cell));
16489 }
16490
16491 void speculateBigInt(Edge edge)
16492 {
16493 speculateBigInt(edge, lowCell(edge));
16494 }
16495
16496 void speculateNonNullObject(Edge edge, LValue cell)
16497 {
16498 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
16499 if (masqueradesAsUndefinedWatchpointIsStillValid())
16500 return;
16501
16502 speculate(
16503 BadType, jsValueValue(cell), edge.node(),
16504 m_out.testNonZero32(
16505 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
16506 m_out.constInt32(MasqueradesAsUndefined)));
16507 }
16508
16509 void speculateNumber(Edge edge)
16510 {
16511 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16512 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isNotNumber(value));
16513 }
16514
16515 void speculateRealNumber(Edge edge)
16516 {
16517 // Do an early return here because lowDouble() can create a lot of control flow.
16518 if (!m_interpreter.needsTypeCheck(edge))
16519 return;
16520
16521 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16522 LValue doubleValue = unboxDouble(value);
16523
16524 LBasicBlock intCase = m_out.newBlock();
16525 LBasicBlock continuation = m_out.newBlock();
16526
16527 m_out.branch(
16528 m_out.doubleEqual(doubleValue, doubleValue),
16529 usually(continuation), rarely(intCase));
16530
16531 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
16532
16533 typeCheck(
16534 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
16535 isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
16536 m_out.jump(continuation);
16537
16538 m_out.appendTo(continuation, lastNext);
16539 }
16540
16541 void speculateDoubleRepReal(Edge edge)
16542 {
16543 // Do an early return here because lowDouble() can create a lot of control flow.
16544 if (!m_interpreter.needsTypeCheck(edge))
16545 return;
16546
16547 LValue value = lowDouble(edge);
16548 FTL_TYPE_CHECK(
16549 doubleValue(value), edge, SpecDoubleReal,
16550 m_out.doubleNotEqualOrUnordered(value, value));
16551 }
16552
16553 void speculateDoubleRepAnyInt(Edge edge)
16554 {
16555 if (!m_interpreter.needsTypeCheck(edge))
16556 return;
16557
16558 doubleToStrictInt52(edge, lowDouble(edge));
16559 }
16560
16561 void speculateBoolean(Edge edge)
16562 {
16563 lowBoolean(edge);
16564 }
16565
16566 void speculateNotStringVar(Edge edge)
16567 {
16568 if (!m_interpreter.needsTypeCheck(edge, ~SpecStringVar))
16569 return;
16570
16571 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16572
16573 LBasicBlock isCellCase = m_out.newBlock();
16574 LBasicBlock isStringCase = m_out.newBlock();
16575 LBasicBlock continuation = m_out.newBlock();
16576
16577 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
16578
16579 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
16580 m_out.branch(isString(value, provenType(edge)), unsure(isStringCase), unsure(continuation));
16581
16582 m_out.appendTo(isStringCase, continuation);
16583 speculateStringIdent(edge, value, m_out.loadPtr(value, m_heaps.JSString_value));
16584 m_out.jump(continuation);
16585
16586 m_out.appendTo(continuation, lastNext);
16587 }
16588
16589 void speculateNotSymbol(Edge edge)
16590 {
16591 if (!m_interpreter.needsTypeCheck(edge, ~SpecSymbol))
16592 return;
16593
16594 ASSERT(mayHaveTypeCheck(edge.useKind()));
16595 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16596
16597 LBasicBlock isCellCase = m_out.newBlock();
16598 LBasicBlock continuation = m_out.newBlock();
16599
16600 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
16601
16602 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
16603 speculate(BadType, jsValueValue(value), edge.node(), isSymbol(value));
16604 m_out.jump(continuation);
16605
16606 m_out.appendTo(continuation, lastNext);
16607
16608 m_interpreter.filter(edge, ~SpecSymbol);
16609 }
16610
16611 void speculateOther(Edge edge)
16612 {
16613 if (!m_interpreter.needsTypeCheck(edge))
16614 return;
16615
16616 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16617 typeCheck(jsValueValue(value), edge, SpecOther, isNotOther(value));
16618 }
16619
16620 void speculateMisc(Edge edge)
16621 {
16622 if (!m_interpreter.needsTypeCheck(edge))
16623 return;
16624
16625 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16626 typeCheck(jsValueValue(value), edge, SpecMisc, isNotMisc(value));
16627 }
16628
16629 void speculateTypedArrayIsNotNeutered(LValue base)
16630 {
16631 LBasicBlock isWasteful = m_out.newBlock();
16632 LBasicBlock continuation = m_out.newBlock();
16633
16634 LValue mode = m_out.load32(base, m_heaps.JSArrayBufferView_mode);
16635 m_out.branch(m_out.equal(mode, m_out.constInt32(WastefulTypedArray)),
16636 unsure(isWasteful), unsure(continuation));
16637
16638 LBasicBlock lastNext = m_out.appendTo(isWasteful, continuation);
16639 LValue vector = m_out.loadPtr(base, m_heaps.JSArrayBufferView_vector);
16640 // FIXME: We could probably make this a mask.
16641 // https://bugs.webkit.org/show_bug.cgi?id=197701
16642 vector = removeArrayPtrTag(vector);
16643 speculate(Uncountable, jsValueValue(vector), m_node, m_out.isZero64(vector));
16644 m_out.jump(continuation);
16645
16646 m_out.appendTo(continuation, lastNext);
16647 }
16648
16649 bool masqueradesAsUndefinedWatchpointIsStillValid()
16650 {
16651 return m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->origin.semantic);
16652 }
16653
16654 LValue loadCellState(LValue base)
16655 {
16656 return m_out.load8ZeroExt32(base, m_heaps.JSCell_cellState);
16657 }
16658
16659 void emitStoreBarrier(LValue base, bool isFenced)
16660 {
16661 LBasicBlock recheckPath = nullptr;
16662 if (isFenced)
16663 recheckPath = m_out.newBlock();
16664 LBasicBlock slowPath = m_out.newBlock();
16665 LBasicBlock continuation = m_out.newBlock();
16666
16667 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isFenced ? recheckPath : slowPath);
16668
16669 LValue threshold;
16670 if (isFenced)
16671 threshold = m_out.load32(m_out.absolute(vm().heap.addressOfBarrierThreshold()));
16672 else
16673 threshold = m_out.constInt32(blackThreshold);
16674
16675 m_out.branch(
16676 m_out.above(loadCellState(base), threshold),
16677 usually(continuation), rarely(isFenced ? recheckPath : slowPath));
16678
16679 if (isFenced) {
16680 m_out.appendTo(recheckPath, slowPath);
16681
16682 m_out.fence(&m_heaps.root, &m_heaps.JSCell_cellState);
16683
16684 m_out.branch(
16685 m_out.above(loadCellState(base), m_out.constInt32(blackThreshold)),
16686 usually(continuation), rarely(slowPath));
16687 }
16688
16689 m_out.appendTo(slowPath, continuation);
16690
16691 LValue call = vmCall(Void, m_out.operation(operationWriteBarrierSlowPath), m_callFrame, base);
16692 m_heaps.decorateCCallRead(&m_heaps.root, call);
16693 m_heaps.decorateCCallWrite(&m_heaps.JSCell_cellState, call);
16694
16695 m_out.jump(continuation);
16696
16697 m_out.appendTo(continuation, lastNext);
16698 }
16699
16700 void mutatorFence()
16701 {
16702 if (isX86()) {
16703 m_out.fence(&m_heaps.root, nullptr);
16704 return;
16705 }
16706
16707 LBasicBlock slowPath = m_out.newBlock();
16708 LBasicBlock continuation = m_out.newBlock();
16709
16710 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
16711
16712 m_out.branch(
16713 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
16714 rarely(slowPath), usually(continuation));
16715
16716 m_out.appendTo(slowPath, continuation);
16717
16718 m_out.fence(&m_heaps.root, nullptr);
16719 m_out.jump(continuation);
16720
16721 m_out.appendTo(continuation, lastNext);
16722 }
16723
16724 void nukeStructureAndSetButterfly(LValue butterfly, LValue object)
16725 {
16726 if (isX86()) {
16727 m_out.store32(
16728 m_out.bitOr(
16729 m_out.load32(object, m_heaps.JSCell_structureID),
16730 m_out.constInt32(nukedStructureIDBit())),
16731 object, m_heaps.JSCell_structureID);
16732 m_out.fence(&m_heaps.root, nullptr);
16733 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16734 m_out.fence(&m_heaps.root, nullptr);
16735 return;
16736 }
16737
16738 LBasicBlock fastPath = m_out.newBlock();
16739 LBasicBlock slowPath = m_out.newBlock();
16740 LBasicBlock continuation = m_out.newBlock();
16741
16742 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastPath);
16743
16744 m_out.branch(
16745 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
16746 rarely(slowPath), usually(fastPath));
16747
16748 m_out.appendTo(fastPath, slowPath);
16749
16750 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16751 m_out.jump(continuation);
16752
16753 m_out.appendTo(slowPath, continuation);
16754
16755 m_out.store32(
16756 m_out.bitOr(
16757 m_out.load32(object, m_heaps.JSCell_structureID),
16758 m_out.constInt32(nukedStructureIDBit())),
16759 object, m_heaps.JSCell_structureID);
16760 m_out.fence(&m_heaps.root, nullptr);
16761 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16762 m_out.fence(&m_heaps.root, nullptr);
16763 m_out.jump(continuation);
16764
16765 m_out.appendTo(continuation, lastNext);
16766 }
16767
16768 LValue preciseIndexMask64(LValue value, LValue index, LValue limit)
16769 {
16770 return m_out.bitAnd(
16771 value,
16772 m_out.aShr(
16773 m_out.sub(
16774 index,
16775 m_out.opaque(limit)),
16776 m_out.constInt32(63)));
16777 }
16778
16779 LValue preciseIndexMask32(LValue value, LValue index, LValue limit)
16780 {
16781 return preciseIndexMask64(value, m_out.zeroExt(index, Int64), m_out.zeroExt(limit, Int64));
16782 }
16783
16784 template<typename... Args>
16785 LValue vmCall(LType type, LValue function, Args&&... args)
16786 {
16787 callPreflight();
16788 LValue result = m_out.call(type, function, std::forward<Args>(args)...);
16789 if (mayExit(m_graph, m_node))
16790 callCheck();
16791 else {
16792 // We can't exit due to an exception, so we also can't throw an exception.
16793#ifndef NDEBUG
16794 LBasicBlock crash = m_out.newBlock();
16795 LBasicBlock continuation = m_out.newBlock();
16796
16797 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
16798 LValue hadException = m_out.notZero64(exception);
16799
16800 m_out.branch(
16801 hadException, rarely(crash), usually(continuation));
16802
16803 LBasicBlock lastNext = m_out.appendTo(crash, continuation);
16804 m_out.unreachable();
16805
16806 m_out.appendTo(continuation, lastNext);
16807#endif
16808 }
16809 return result;
16810 }
16811
16812 void callPreflight(CodeOrigin codeOrigin)
16813 {
16814 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(codeOrigin);
16815 m_out.store32(
16816 m_out.constInt32(callSiteIndex.bits()),
16817 tagFor(CallFrameSlot::argumentCount));
16818 }
16819
16820 void callPreflight()
16821 {
16822 callPreflight(codeOriginDescriptionOfCallSite());
16823 }
16824
16825 CodeOrigin codeOriginDescriptionOfCallSite() const
16826 {
16827 CodeOrigin codeOrigin = m_node->origin.semantic;
16828 if (m_node->op() == TailCallInlinedCaller
16829 || m_node->op() == TailCallVarargsInlinedCaller
16830 || m_node->op() == TailCallForwardVarargsInlinedCaller
16831 || m_node->op() == DirectTailCallInlinedCaller) {
16832 // This case arises when you have a situation like this:
16833 // foo makes a call to bar, bar is inlined in foo. bar makes a call
16834 // to baz and baz is inlined in bar. And then baz makes a tail-call to jaz,
16835 // and jaz is inlined in baz. We want the callframe for jaz to appear to
16836 // have caller be bar.
16837 codeOrigin = *codeOrigin.inlineCallFrame()->getCallerSkippingTailCalls();
16838 }
16839
16840 return codeOrigin;
16841 }
16842
16843 void callCheck()
16844 {
16845 if (Options::useExceptionFuzz())
16846 m_out.call(Void, m_out.operation(operationExceptionFuzz), m_callFrame);
16847
16848 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
16849 LValue hadException = m_out.notZero64(exception);
16850
16851 CodeOrigin opCatchOrigin;
16852 HandlerInfo* exceptionHandler;
16853 if (m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler)) {
16854 bool exitOK = true;
16855 bool isExceptionHandler = true;
16856 appendOSRExit(
16857 ExceptionCheck, noValue(), nullptr, hadException,
16858 m_origin.withForExitAndExitOK(opCatchOrigin, exitOK), isExceptionHandler);
16859 return;
16860 }
16861
16862 LBasicBlock continuation = m_out.newBlock();
16863
16864 m_out.branch(
16865 hadException, rarely(m_handleExceptions), usually(continuation));
16866
16867 m_out.appendTo(continuation);
16868 }
16869
16870 RefPtr<PatchpointExceptionHandle> preparePatchpointForExceptions(PatchpointValue* value)
16871 {
16872 CodeOrigin opCatchOrigin;
16873 HandlerInfo* exceptionHandler;
16874 bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler);
16875 if (!willCatchException)
16876 return PatchpointExceptionHandle::defaultHandle(m_ftlState);
16877
16878 dataLogLnIf(verboseCompilationEnabled(), " Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
16879
16880 bool exitOK = true;
16881 NodeOrigin origin = m_origin.withForExitAndExitOK(opCatchOrigin, exitOK);
16882
16883 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(noValue(), nullptr);
16884
16885 // Compute the offset into the StackmapGenerationParams where we will find the exit arguments
16886 // we are about to append. We need to account for both the children we've already added, and
16887 // for the possibility of a result value if the patchpoint is not void.
16888 unsigned offset = value->numChildren();
16889 if (value->type() != Void)
16890 offset++;
16891
16892 // Use LateColdAny to ensure that the stackmap arguments interfere with the patchpoint's
16893 // result and with any late-clobbered registers.
16894 value->appendVectorWithRep(
16895 buildExitArguments(exitDescriptor, opCatchOrigin, noValue()),
16896 ValueRep::LateColdAny);
16897
16898 return PatchpointExceptionHandle::create(
16899 m_ftlState, exitDescriptor, origin, offset, *exceptionHandler);
16900 }
16901
16902 LBasicBlock lowBlock(DFG::BasicBlock* block)
16903 {
16904 return m_blocks.get(block);
16905 }
16906
16907 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, Node* highValue)
16908 {
16909 return appendOSRExitDescriptor(lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue));
16910 }
16911
16912 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, const MethodOfGettingAValueProfile& profile)
16913 {
16914 return &m_ftlState.jitCode->osrExitDescriptors.alloc(
16915 lowValue.format(), profile,
16916 availabilityMap().m_locals.numberOfArguments(),
16917 availabilityMap().m_locals.numberOfLocals());
16918 }
16919
16920 void appendOSRExit(
16921 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition,
16922 NodeOrigin origin, bool isExceptionHandler = false)
16923 {
16924 return appendOSRExit(kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue),
16925 failCondition, origin, isExceptionHandler);
16926 }
16927
16928 void appendOSRExit(
16929 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition,
16930 NodeOrigin origin, bool isExceptionHandler = false)
16931 {
16932 dataLogLnIf(verboseCompilationEnabled(), " OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
16933
16934 DFG_ASSERT(m_graph, m_node, origin.exitOK);
16935
16936 if (!isExceptionHandler
16937 && Options::useOSRExitFuzz()
16938 && canUseOSRExitFuzzing(m_graph.baselineCodeBlockFor(m_node->origin.semantic))
16939 && doOSRExitFuzzing()) {
16940 LValue numberOfFuzzChecks = m_out.add(
16941 m_out.load32(m_out.absolute(&g_numberOfOSRExitFuzzChecks)),
16942 m_out.int32One);
16943
16944 m_out.store32(numberOfFuzzChecks, m_out.absolute(&g_numberOfOSRExitFuzzChecks));
16945
16946 if (unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter()) {
16947 failCondition = m_out.bitOr(
16948 failCondition,
16949 m_out.aboveOrEqual(numberOfFuzzChecks, m_out.constInt32(atOrAfter)));
16950 }
16951 if (unsigned at = Options::fireOSRExitFuzzAt()) {
16952 failCondition = m_out.bitOr(
16953 failCondition,
16954 m_out.equal(numberOfFuzzChecks, m_out.constInt32(at)));
16955 }
16956 }
16957
16958 if (failCondition == m_out.booleanFalse)
16959 return;
16960
16961 blessSpeculation(
16962 m_out.speculate(failCondition), kind, lowValue, profile, origin);
16963 }
16964
16965 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, Node* highValue, NodeOrigin origin)
16966 {
16967 blessSpeculation(value, kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue), origin);
16968 }
16969
16970 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, NodeOrigin origin)
16971 {
16972 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, profile);
16973
16974 value->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, lowValue));
16975
16976 State* state = &m_ftlState;
16977 value->setGenerator(
16978 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
16979 exitDescriptor->emitOSRExit(
16980 *state, kind, origin, jit, params, 0);
16981 });
16982 }
16983
16984 StackmapArgumentList buildExitArguments(
16985 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, FormattedValue lowValue,
16986 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
16987 {
16988 StackmapArgumentList result;
16989 buildExitArguments(
16990 exitDescriptor, exitOrigin, result, lowValue, offsetOfExitArgumentsInStackmapLocations);
16991 return result;
16992 }
16993
16994 void buildExitArguments(
16995 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, StackmapArgumentList& arguments, FormattedValue lowValue,
16996 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
16997 {
16998 if (!!lowValue)
16999 arguments.append(lowValue.value());
17000
17001 AvailabilityMap availabilityMap = this->availabilityMap();
17002 availabilityMap.pruneByLiveness(m_graph, exitOrigin);
17003
17004 HashMap<Node*, ExitTimeObjectMaterialization*> map;
17005 availabilityMap.forEachAvailability(
17006 [&] (Availability availability) {
17007 if (!availability.shouldUseNode())
17008 return;
17009
17010 Node* node = availability.node();
17011 if (!node->isPhantomAllocation())
17012 return;
17013
17014 auto result = map.add(node, nullptr);
17015 if (result.isNewEntry) {
17016 result.iterator->value =
17017 exitDescriptor->m_materializations.add(node->op(), node->origin.semantic);
17018 }
17019 });
17020
17021 for (unsigned i = 0; i < exitDescriptor->m_values.size(); ++i) {
17022 int operand = exitDescriptor->m_values.operandForIndex(i);
17023
17024 Availability availability = availabilityMap.m_locals[i];
17025
17026 if (Options::validateFTLOSRExitLiveness()
17027 && m_graph.m_plan.mode() != FTLForOSREntryMode) {
17028
17029 if (availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), exitOrigin))
17030 DFG_CRASH(m_graph, m_node, toCString("Live bytecode local not available: operand = ", VirtualRegister(operand), ", availability = ", availability, ", origin = ", exitOrigin).data());
17031 }
17032 ExitValue exitValue = exitValueForAvailability(arguments, map, availability);
17033 if (exitValue.hasIndexInStackmapLocations())
17034 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
17035 exitDescriptor->m_values[i] = exitValue;
17036 }
17037
17038 for (auto heapPair : availabilityMap.m_heap) {
17039 Node* node = heapPair.key.base();
17040 ExitTimeObjectMaterialization* materialization = map.get(node);
17041 if (!materialization)
17042 DFG_CRASH(m_graph, m_node, toCString("Could not find materialization for ", node, " in ", availabilityMap).data());
17043 ExitValue exitValue = exitValueForAvailability(arguments, map, heapPair.value);
17044 if (exitValue.hasIndexInStackmapLocations())
17045 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
17046 materialization->add(
17047 heapPair.key.descriptor(),
17048 exitValue);
17049 }
17050
17051 if (verboseCompilationEnabled()) {
17052 dataLog(" Exit values: ", exitDescriptor->m_values, "\n");
17053 if (!exitDescriptor->m_materializations.isEmpty()) {
17054 dataLog(" Materializations: \n");
17055 for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations)
17056 dataLog(" ", pointerDump(materialization), "\n");
17057 }
17058 }
17059 }
17060
17061 ExitValue exitValueForAvailability(
17062 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
17063 Availability availability)
17064 {
17065 FlushedAt flush = availability.flushedAt();
17066 switch (flush.format()) {
17067 case DeadFlush:
17068 case ConflictingFlush:
17069 if (availability.hasNode())
17070 return exitValueForNode(arguments, map, availability.node());
17071
17072 // This means that the value is dead. It could be dead in bytecode or it could have
17073 // been killed by our DCE, which can sometimes kill things even if they were live in
17074 // bytecode.
17075 return ExitValue::dead();
17076
17077 case FlushedJSValue:
17078 case FlushedCell:
17079 case FlushedBoolean:
17080 return ExitValue::inJSStack(flush.virtualRegister());
17081
17082 case FlushedInt32:
17083 return ExitValue::inJSStackAsInt32(flush.virtualRegister());
17084
17085 case FlushedInt52:
17086 return ExitValue::inJSStackAsInt52(flush.virtualRegister());
17087
17088 case FlushedDouble:
17089 return ExitValue::inJSStackAsDouble(flush.virtualRegister());
17090 }
17091
17092 DFG_CRASH(m_graph, m_node, "Invalid flush format");
17093 return ExitValue::dead();
17094 }
17095
17096 ExitValue exitValueForNode(
17097 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
17098 Node* node)
17099 {
17100 // NOTE: In FTL->B3, we cannot generate code here, because m_output is positioned after the
17101 // stackmap value. Like all values, the stackmap value cannot use a child that is defined after
17102 // it.
17103
17104 ASSERT(node->shouldGenerate());
17105 ASSERT(node->hasResult());
17106
17107 if (node) {
17108 switch (node->op()) {
17109 case BottomValue:
17110 // This might arise in object materializations. I actually doubt that it would,
17111 // but it seems worthwhile to be conservative.
17112 return ExitValue::dead();
17113
17114 case JSConstant:
17115 case Int52Constant:
17116 case DoubleConstant:
17117 return ExitValue::constant(node->asJSValue());
17118
17119 default:
17120 if (node->isPhantomAllocation())
17121 return ExitValue::materializeNewObject(map.get(node));
17122 break;
17123 }
17124 }
17125
17126 LoweredNodeValue value = m_int32Values.get(node);
17127 if (isValid(value))
17128 return exitArgument(arguments, DataFormatInt32, value.value());
17129
17130 value = m_int52Values.get(node);
17131 if (isValid(value))
17132 return exitArgument(arguments, DataFormatInt52, value.value());
17133
17134 value = m_strictInt52Values.get(node);
17135 if (isValid(value))
17136 return exitArgument(arguments, DataFormatStrictInt52, value.value());
17137
17138 value = m_booleanValues.get(node);
17139 if (isValid(value))
17140 return exitArgument(arguments, DataFormatBoolean, value.value());
17141
17142 value = m_jsValueValues.get(node);
17143 if (isValid(value))
17144 return exitArgument(arguments, DataFormatJS, value.value());
17145
17146 value = m_doubleValues.get(node);
17147 if (isValid(value))
17148 return exitArgument(arguments, DataFormatDouble, value.value());
17149
17150 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
17151 return ExitValue::dead();
17152 }
17153
17154 ExitValue exitArgument(StackmapArgumentList& arguments, DataFormat format, LValue value)
17155 {
17156 ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
17157 arguments.append(value);
17158 return result;
17159 }
17160
17161 ExitValue exitValueForTailCall(StackmapArgumentList& arguments, Node* node)
17162 {
17163 ASSERT(node->shouldGenerate());
17164 ASSERT(node->hasResult());
17165
17166 switch (node->op()) {
17167 case JSConstant:
17168 case Int52Constant:
17169 case DoubleConstant:
17170 return ExitValue::constant(node->asJSValue());
17171
17172 default:
17173 break;
17174 }
17175
17176 LoweredNodeValue value = m_jsValueValues.get(node);
17177 if (isValid(value))
17178 return exitArgument(arguments, DataFormatJS, value.value());
17179
17180 value = m_int32Values.get(node);
17181 if (isValid(value))
17182 return exitArgument(arguments, DataFormatJS, boxInt32(value.value()));
17183
17184 value = m_booleanValues.get(node);
17185 if (isValid(value))
17186 return exitArgument(arguments, DataFormatJS, boxBoolean(value.value()));
17187
17188 // Doubles and Int52 have been converted by ValueRep()
17189 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
17190 }
17191
17192 void setInt32(Node* node, LValue value)
17193 {
17194 m_int32Values.set(node, LoweredNodeValue(value, m_highBlock));
17195 }
17196 void setInt52(Node* node, LValue value)
17197 {
17198 m_int52Values.set(node, LoweredNodeValue(value, m_highBlock));
17199 }
17200 void setStrictInt52(Node* node, LValue value)
17201 {
17202 m_strictInt52Values.set(node, LoweredNodeValue(value, m_highBlock));
17203 }
17204 void setInt52(Node* node, LValue value, Int52Kind kind)
17205 {
17206 switch (kind) {
17207 case Int52:
17208 setInt52(node, value);
17209 return;
17210
17211 case StrictInt52:
17212 setStrictInt52(node, value);
17213 return;
17214 }
17215
17216 DFG_CRASH(m_graph, m_node, "Corrupt int52 kind");
17217 }
17218 void setJSValue(Node* node, LValue value)
17219 {
17220 m_jsValueValues.set(node, LoweredNodeValue(value, m_highBlock));
17221 }
17222 void setBoolean(Node* node, LValue value)
17223 {
17224 m_booleanValues.set(node, LoweredNodeValue(value, m_highBlock));
17225 }
17226 void setStorage(Node* node, LValue value)
17227 {
17228 m_storageValues.set(node, LoweredNodeValue(value, m_highBlock));
17229 }
17230 void setDouble(Node* node, LValue value)
17231 {
17232 m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock));
17233 }
17234
17235 void setInt32(LValue value)
17236 {
17237 setInt32(m_node, value);
17238 }
17239 void setInt52(LValue value)
17240 {
17241 setInt52(m_node, value);
17242 }
17243 void setStrictInt52(LValue value)
17244 {
17245 setStrictInt52(m_node, value);
17246 }
17247 void setInt52(LValue value, Int52Kind kind)
17248 {
17249 setInt52(m_node, value, kind);
17250 }
17251 void setJSValue(LValue value)
17252 {
17253 setJSValue(m_node, value);
17254 }
17255 void setBoolean(LValue value)
17256 {
17257 setBoolean(m_node, value);
17258 }
17259 void setStorage(LValue value)
17260 {
17261 setStorage(m_node, value);
17262 }
17263 void setDouble(LValue value)
17264 {
17265 setDouble(m_node, value);
17266 }
17267
17268 bool isValid(const LoweredNodeValue& value)
17269 {
17270 if (!value)
17271 return false;
17272 if (!m_graph.m_ssaDominators->dominates(value.block(), m_highBlock))
17273 return false;
17274 return true;
17275 }
17276
17277 void keepAlive(LValue value)
17278 {
17279 PatchpointValue* patchpoint = m_out.patchpoint(Void);
17280 patchpoint->effects = Effects::none();
17281 patchpoint->effects.writesLocalState = true;
17282 patchpoint->effects.reads = HeapRange::top();
17283 patchpoint->append(value, ValueRep::ColdAny);
17284 patchpoint->setGenerator([=] (CCallHelpers&, const StackmapGenerationParams&) { });
17285 }
17286
17287 void addWeakReference(JSCell* target)
17288 {
17289 m_graph.m_plan.weakReferences().addLazily(target);
17290 }
17291
17292 LValue loadStructure(LValue value)
17293 {
17294 LValue structureID = m_out.load32(value, m_heaps.JSCell_structureID);
17295 LValue tableBase = m_out.loadPtr(m_out.absolute(vm().heap.structureIDTable().base()));
17296 LValue tableIndex = m_out.aShr(structureID, m_out.constInt32(StructureIDTable::s_numberOfEntropyBits));
17297 LValue entropyBits = m_out.shl(m_out.zeroExtPtr(structureID), m_out.constInt32(StructureIDTable::s_entropyBitsShiftForStructurePointer));
17298 TypedPointer address = m_out.baseIndex(m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
17299 LValue encodedStructureBits = m_out.loadPtr(address);
17300 return m_out.bitXor(encodedStructureBits, entropyBits);
17301 }
17302
17303 LValue weakPointer(JSCell* pointer)
17304 {
17305 addWeakReference(pointer);
17306 return m_out.weakPointer(m_graph, pointer);
17307 }
17308
17309 LValue frozenPointer(FrozenValue* value)
17310 {
17311 return m_out.weakPointer(value);
17312 }
17313
17314 LValue weakStructureID(RegisteredStructure structure)
17315 {
17316 return m_out.constInt32(structure->id());
17317 }
17318
17319 LValue weakStructure(RegisteredStructure structure)
17320 {
17321 ASSERT(!!structure.get());
17322 return m_out.weakPointer(m_graph, structure.get());
17323 }
17324
17325 TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
17326 {
17327 return m_out.address(base, m_heaps.variables[operand], offset);
17328 }
17329 TypedPointer payloadFor(LValue base, int operand)
17330 {
17331 return addressFor(base, operand, PayloadOffset);
17332 }
17333 TypedPointer tagFor(LValue base, int operand)
17334 {
17335 return addressFor(base, operand, TagOffset);
17336 }
17337 TypedPointer addressFor(int operand, ptrdiff_t offset = 0)
17338 {
17339 return addressFor(VirtualRegister(operand), offset);
17340 }
17341 TypedPointer addressFor(VirtualRegister operand, ptrdiff_t offset = 0)
17342 {
17343 if (operand.isLocal())
17344 return addressFor(m_captured, operand.offset(), offset);
17345 return addressFor(m_callFrame, operand.offset(), offset);
17346 }
17347 TypedPointer payloadFor(int operand)
17348 {
17349 return payloadFor(VirtualRegister(operand));
17350 }
17351 TypedPointer payloadFor(VirtualRegister operand)
17352 {
17353 return addressFor(operand, PayloadOffset);
17354 }
17355 TypedPointer tagFor(int operand)
17356 {
17357 return tagFor(VirtualRegister(operand));
17358 }
17359 TypedPointer tagFor(VirtualRegister operand)
17360 {
17361 return addressFor(operand, TagOffset);
17362 }
17363
17364 AbstractValue abstractValue(Node* node)
17365 {
17366 return m_state.forNode(node);
17367 }
17368 AbstractValue abstractValue(Edge edge)
17369 {
17370 return abstractValue(edge.node());
17371 }
17372
17373 SpeculatedType provenType(Node* node)
17374 {
17375 return abstractValue(node).m_type;
17376 }
17377 SpeculatedType provenType(Edge edge)
17378 {
17379 return provenType(edge.node());
17380 }
17381
17382 JSValue provenValue(Node* node)
17383 {
17384 return abstractValue(node).m_value;
17385 }
17386 JSValue provenValue(Edge edge)
17387 {
17388 return provenValue(edge.node());
17389 }
17390
17391 StructureAbstractValue abstractStructure(Node* node)
17392 {
17393 return abstractValue(node).m_structure;
17394 }
17395 StructureAbstractValue abstractStructure(Edge edge)
17396 {
17397 return abstractStructure(edge.node());
17398 }
17399
17400 void crash()
17401 {
17402 crash(m_highBlock, m_node);
17403 }
17404 void crash(DFG::BasicBlock* block, Node* node)
17405 {
17406 BlockIndex blockIndex = block->index;
17407 unsigned nodeIndex = node ? node->index() : UINT_MAX;
17408#if ASSERT_DISABLED
17409 m_out.patchpoint(Void)->setGenerator(
17410 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
17411 AllowMacroScratchRegisterUsage allowScratch(jit);
17412
17413 jit.move(CCallHelpers::TrustedImm32(blockIndex), GPRInfo::regT0);
17414 jit.move(CCallHelpers::TrustedImm32(nodeIndex), GPRInfo::regT1);
17415 if (node)
17416 jit.move(CCallHelpers::TrustedImm32(node->op()), GPRInfo::regT2);
17417 jit.abortWithReason(FTLCrash);
17418 });
17419#else
17420 m_out.call(
17421 Void,
17422 m_out.constIntPtr(ftlUnreachable),
17423 // We don't want the CodeBlock to have a weak pointer to itself because
17424 // that would cause it to always get collected.
17425 m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), m_out.constInt32(blockIndex),
17426 m_out.constInt32(nodeIndex));
17427#endif
17428 m_out.unreachable();
17429 }
17430
17431 AvailabilityMap& availabilityMap() { return m_availabilityCalculator.m_availability; }
17432
17433 VM& vm() { return m_graph.m_vm; }
17434 CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
17435
17436 Graph& m_graph;
17437 State& m_ftlState;
17438 AbstractHeapRepository m_heaps;
17439 Output m_out;
17440 Procedure& m_proc;
17441
17442 LBasicBlock m_handleExceptions;
17443 HashMap<DFG::BasicBlock*, LBasicBlock> m_blocks;
17444
17445 LValue m_callFrame;
17446 LValue m_captured;
17447 LValue m_tagTypeNumber;
17448 LValue m_tagMask;
17449
17450 HashMap<Node*, LoweredNodeValue> m_int32Values;
17451 HashMap<Node*, LoweredNodeValue> m_strictInt52Values;
17452 HashMap<Node*, LoweredNodeValue> m_int52Values;
17453 HashMap<Node*, LoweredNodeValue> m_jsValueValues;
17454 HashMap<Node*, LoweredNodeValue> m_booleanValues;
17455 HashMap<Node*, LoweredNodeValue> m_storageValues;
17456 HashMap<Node*, LoweredNodeValue> m_doubleValues;
17457
17458 HashMap<Node*, LValue> m_phis;
17459
17460 LocalOSRAvailabilityCalculator m_availabilityCalculator;
17461
17462 InPlaceAbstractState m_state;
17463 AbstractInterpreter<InPlaceAbstractState> m_interpreter;
17464 DFG::BasicBlock* m_highBlock;
17465 DFG::BasicBlock* m_nextHighBlock;
17466 LBasicBlock m_nextLowBlock;
17467
17468 enum IndexMaskingMode { IndexMaskingDisabled, IndexMaskingEnabled };
17469
17470 IndexMaskingMode m_indexMaskingMode;
17471
17472 NodeOrigin m_origin;
17473 unsigned m_nodeIndex;
17474 Node* m_node;
17475
17476 // These are used for validating AI state.
17477 HashMap<Node*, NodeSet> m_liveInToNode;
17478 HashMap<Node*, AbstractValue> m_aiCheckedNodes;
17479 String m_graphDump;
17480};
17481
17482} // anonymous namespace
17483
17484void lowerDFGToB3(State& state)
17485{
17486 LowerDFGToB3 lowering(state);
17487 lowering.lower();
17488}
17489
17490} } // namespace JSC::FTL
17491
17492#endif // ENABLE(FTL_JIT)
17493
17494