1/*
2 * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "FTLLowerDFGToB3.h"
28
29#if ENABLE(FTL_JIT)
30
31#include "AirCode.h"
32#include "AirGenerationContext.h"
33#include "AllowMacroScratchRegisterUsage.h"
34#include "AllowMacroScratchRegisterUsageIf.h"
35#include "AtomicsObject.h"
36#include "B3CheckValue.h"
37#include "B3FenceValue.h"
38#include "B3PatchpointValue.h"
39#include "B3SlotBaseValue.h"
40#include "B3StackmapGenerationParams.h"
41#include "B3ValueInlines.h"
42#include "CallFrameShuffler.h"
43#include "CodeBlockWithJITType.h"
44#include "DFGAbstractInterpreterInlines.h"
45#include "DFGCapabilities.h"
46#include "DFGDoesGC.h"
47#include "DFGDominators.h"
48#include "DFGInPlaceAbstractState.h"
49#include "DFGLivenessAnalysisPhase.h"
50#include "DFGMayExit.h"
51#include "DFGOSRAvailabilityAnalysisPhase.h"
52#include "DFGOSRExitFuzz.h"
53#include "DirectArguments.h"
54#include "FTLAbstractHeapRepository.h"
55#include "FTLAvailableRecovery.h"
56#include "FTLExceptionTarget.h"
57#include "FTLForOSREntryJITCode.h"
58#include "FTLFormattedValue.h"
59#include "FTLLazySlowPathCall.h"
60#include "FTLLoweredNodeValue.h"
61#include "FTLOperations.h"
62#include "FTLOutput.h"
63#include "FTLPatchpointExceptionHandle.h"
64#include "FTLSnippetParams.h"
65#include "FTLThunks.h"
66#include "FTLWeightedTarget.h"
67#include "JITAddGenerator.h"
68#include "JITBitAndGenerator.h"
69#include "JITBitOrGenerator.h"
70#include "JITBitXorGenerator.h"
71#include "JITDivGenerator.h"
72#include "JITInlineCacheGenerator.h"
73#include "JITLeftShiftGenerator.h"
74#include "JITMathIC.h"
75#include "JITMulGenerator.h"
76#include "JITRightShiftGenerator.h"
77#include "JITSubGenerator.h"
78#include "JSAsyncFunction.h"
79#include "JSAsyncGeneratorFunction.h"
80#include "JSCInlines.h"
81#include "JSGeneratorFunction.h"
82#include "JSImmutableButterfly.h"
83#include "JSLexicalEnvironment.h"
84#include "JSMap.h"
85#include "OperandsInlines.h"
86#include "ProbeContext.h"
87#include "RegExpObject.h"
88#include "ScopedArguments.h"
89#include "ScopedArgumentsTable.h"
90#include "ScratchRegisterAllocator.h"
91#include "SetupVarargsFrame.h"
92#include "ShadowChicken.h"
93#include "StructureStubInfo.h"
94#include "SuperSampler.h"
95#include "ThunkGenerators.h"
96#include "VirtualRegister.h"
97#include "Watchdog.h"
98#include <atomic>
99#include <wtf/Box.h>
100#include <wtf/Gigacage.h>
101#include <wtf/RecursableLambda.h>
102#include <wtf/StdUnorderedSet.h>
103
104#undef RELEASE_ASSERT
105#define RELEASE_ASSERT(assertion) do { \
106 if (!(assertion)) { \
107 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
108 CRASH(); \
109 } \
110} while (0)
111
112namespace JSC { namespace FTL {
113
114using namespace B3;
115using namespace DFG;
116
117namespace {
118
119std::atomic<int> compileCounter;
120
121#if !ASSERT_DISABLED
122NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
123 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
124{
125 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
126 if (nodeIndex != UINT_MAX)
127 dataLog(", node @", nodeIndex);
128 dataLog(".\n");
129 CRASH();
130}
131#endif
132
133// Using this instead of typeCheck() helps to reduce the load on B3, by creating
134// significantly less dead code.
135#define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
136 FormattedValue _ftc_lowValue = (lowValue); \
137 Edge _ftc_highValue = (highValue); \
138 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
139 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
140 break; \
141 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
142 } while (false)
143
144#define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
145 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
146
147class LowerDFGToB3 {
148 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
149public:
150 LowerDFGToB3(State& state)
151 : m_graph(state.graph)
152 , m_ftlState(state)
153 , m_out(state)
154 , m_proc(*state.proc)
155 , m_availabilityCalculator(m_graph)
156 , m_state(state.graph)
157 , m_interpreter(state.graph, m_state)
158 , m_indexMaskingMode(Options::enableSpectreMitigations() ? IndexMaskingEnabled : IndexMaskingDisabled)
159 {
160 if (Options::validateAbstractInterpreterState()) {
161 performLivenessAnalysis(m_graph);
162
163 // We only use node liveness here, not combined liveness, as we only track
164 // AI state for live nodes.
165 for (DFG::BasicBlock* block : m_graph.blocksInNaturalOrder()) {
166 NodeSet live;
167
168 for (NodeFlowProjection node : block->ssa->liveAtTail) {
169 if (node.kind() == NodeFlowProjection::Primary)
170 live.addVoid(node.node());
171 }
172
173 for (unsigned i = block->size(); i--; ) {
174 Node* node = block->at(i);
175 live.remove(node);
176 m_graph.doToChildren(node, [&] (Edge child) {
177 live.addVoid(child.node());
178 });
179 m_liveInToNode.add(node, live);
180 }
181 }
182 }
183 }
184
185 void lower()
186 {
187 State* state = &m_ftlState;
188
189 CString name;
190 if (verboseCompilationEnabled()) {
191 name = toCString(
192 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
193 "_", codeBlock()->hash());
194 } else
195 name = "jsBody";
196
197 {
198 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
199 CodeBlock* codeBlock = m_graph.m_codeBlock;
200
201 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
202 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
203 AllowMacroScratchRegisterUsage allowScratch(jit);
204 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
205 if (Options::zeroStackFrame())
206 jit.clearStackFrame(GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister, GPRInfo::regT0, code.frameSize());
207
208 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
209 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
210 });
211
212 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
213 RELEASE_ASSERT(catchEntrypointIndex != 0);
214 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
215 }
216
217 if (m_graph.m_maxLocalsForCatchOSREntry) {
218 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
219 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
220 }
221 }
222
223 m_graph.ensureSSADominators();
224
225 if (verboseCompilationEnabled())
226 dataLog("Function ready, beginning lowering.\n");
227
228 m_out.initialize(m_heaps);
229
230 // We use prologue frequency for all of the initialization code.
231 m_out.setFrequency(1);
232
233 bool hasMultipleEntrypoints = m_graph.m_numberOfEntrypoints > 1;
234
235 LBasicBlock prologue = m_out.newBlock();
236 LBasicBlock callEntrypointArgumentSpeculations = hasMultipleEntrypoints ? m_out.newBlock() : nullptr;
237 m_handleExceptions = m_out.newBlock();
238
239 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
240 m_highBlock = m_graph.block(blockIndex);
241 if (!m_highBlock)
242 continue;
243 m_out.setFrequency(m_highBlock->executionCount);
244 m_blocks.add(m_highBlock, m_out.newBlock());
245 }
246
247 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
248 m_out.setFrequency(1);
249
250 m_out.appendTo(prologue, hasMultipleEntrypoints ? callEntrypointArgumentSpeculations : m_handleExceptions);
251 m_out.initializeConstants(m_proc, prologue);
252 createPhiVariables();
253
254 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
255 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
256 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
257 state->capturedValue = capturedBase->slot();
258
259 auto preOrder = m_graph.blocksInPreOrder();
260
261 m_callFrame = m_out.framePointer();
262 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
263 m_tagMask = m_out.constInt64(TagMask);
264
265 // Make sure that B3 knows that we really care about the mask registers. This forces the
266 // constants to be materialized in registers.
267 m_proc.addFastConstant(m_tagTypeNumber->key());
268 m_proc.addFastConstant(m_tagMask->key());
269
270 // We don't want the CodeBlock to have a weak pointer to itself because
271 // that would cause it to always get collected.
272 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
273
274 VM* vm = &this->vm();
275
276 // Stack Overflow Check.
277 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
278 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
279 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
280 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
281 stackOverflowHandler->appendSomeRegister(m_callFrame);
282 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
283 stackOverflowHandler->numGPScratchRegisters = 1;
284 stackOverflowHandler->setGenerator(
285 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
286 AllowMacroScratchRegisterUsage allowScratch(jit);
287 GPRReg fp = params[0].gpr();
288 GPRReg scratch = params.gpScratch(0);
289
290 unsigned ftlFrameSize = params.proc().frameSize();
291 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
292
293 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
294 MacroAssembler::JumpList stackOverflow;
295 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
296 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
297 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
298
299 params.addLatePath([=] (CCallHelpers& jit) {
300 AllowMacroScratchRegisterUsage allowScratch(jit);
301
302 stackOverflow.link(&jit);
303
304 // FIXME: We would not have to do this if the stack check was part of the Air
305 // prologue. Then, we would know that there is no way for the callee-saves to
306 // get clobbered.
307 // https://bugs.webkit.org/show_bug.cgi?id=172456
308 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
309
310 jit.store32(
311 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
312 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
313 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
314
315 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
316 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
317 CCallHelpers::Call throwCall = jit.call(OperationPtrTag);
318
319 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
320 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
321 CCallHelpers::Call lookupExceptionHandlerCall = jit.call(OperationPtrTag);
322 jit.jumpToExceptionHandler(*vm);
323
324 jit.addLinkTask(
325 [=] (LinkBuffer& linkBuffer) {
326 linkBuffer.link(throwCall, FunctionPtr<OperationPtrTag>(operationThrowStackOverflowError));
327 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame));
328 });
329 });
330 });
331
332 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
333
334 {
335 if (hasMultipleEntrypoints) {
336 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
337 successors[0] = callEntrypointArgumentSpeculations;
338 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
339 // Currently, the only other entrypoint is an op_catch entrypoint.
340 // We do OSR entry at op_catch, and we prove argument formats before
341 // jumping to FTL code, so we don't need to check argument types here
342 // for these entrypoints.
343 successors[i] = firstDFGBasicBlock;
344 }
345
346 m_out.entrySwitch(successors);
347 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
348 }
349
350 m_node = nullptr;
351 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
352
353 // Check Arguments.
354 availabilityMap().clear();
355 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
356 for (unsigned i = codeBlock()->numParameters(); i--;) {
357 availabilityMap().m_locals.argument(i) =
358 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
359 }
360
361 for (unsigned i = codeBlock()->numParameters(); i--;) {
362 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
363 VirtualRegister operand = virtualRegisterForArgument(i);
364 LValue jsValue = m_out.load64(addressFor(operand));
365
366 switch (m_graph.m_argumentFormats[0][i]) {
367 case FlushedInt32:
368 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
369 break;
370 case FlushedBoolean:
371 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
372 break;
373 case FlushedCell:
374 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
375 break;
376 case FlushedJSValue:
377 break;
378 default:
379 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
380 break;
381 }
382 }
383 m_out.jump(firstDFGBasicBlock);
384 }
385
386
387 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
388 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
389 m_out.patchpoint(Void)->setGenerator(
390 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
391 CCallHelpers::Jump jump = jit.jump();
392 jit.addLinkTask(
393 [=] (LinkBuffer& linkBuffer) {
394 linkBuffer.link(jump, linkBuffer.locationOf<ExceptionHandlerPtrTag>(*exceptionHandler));
395 });
396 });
397 m_out.unreachable();
398
399 for (DFG::BasicBlock* block : preOrder)
400 compileBlock(block);
401
402 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
403 // to happen last because our abstract heaps are generated lazily. They have to be
404 // generated lazily because we have an infinite number of numbered, indexed, and
405 // absolute heaps. We only become aware of the ones we actually mention while lowering.
406 m_heaps.computeRangesAndDecorateInstructions();
407
408 // We create all Phi's up front, but we may then decide not to compile the basic block
409 // that would have contained one of them. So this creates orphans, which triggers B3
410 // validation failures. Calling this fixes the issue.
411 //
412 // Note that you should avoid the temptation to make this call conditional upon
413 // validation being enabled. B3 makes no guarantees of any kind of correctness when
414 // dealing with IR that would have failed validation. For example, it would be valid to
415 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
416 // if any orphans were around. We might even have such phases already.
417 m_proc.deleteOrphans();
418
419 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
420 m_out.applyBlockOrder();
421 }
422
423private:
424
425 void createPhiVariables()
426 {
427 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
428 DFG::BasicBlock* block = m_graph.block(blockIndex);
429 if (!block)
430 continue;
431 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
432 Node* node = block->at(nodeIndex);
433 if (node->op() != DFG::Phi)
434 continue;
435 LType type;
436 switch (node->flags() & NodeResultMask) {
437 case NodeResultDouble:
438 type = Double;
439 break;
440 case NodeResultInt32:
441 type = Int32;
442 break;
443 case NodeResultInt52:
444 type = Int64;
445 break;
446 case NodeResultBoolean:
447 type = Int32;
448 break;
449 case NodeResultJS:
450 type = Int64;
451 break;
452 default:
453 DFG_CRASH(m_graph, node, "Bad Phi node result type");
454 break;
455 }
456 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
457 }
458 }
459 }
460
461 void compileBlock(DFG::BasicBlock* block)
462 {
463 if (!block)
464 return;
465
466 if (verboseCompilationEnabled())
467 dataLog("Compiling block ", *block, "\n");
468
469 m_highBlock = block;
470
471 // Make sure that any blocks created while lowering code in the high block have the frequency of
472 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
473 // something roughly approximate for things like register allocation.
474 m_out.setFrequency(m_highBlock->executionCount);
475
476 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
477
478 m_nextHighBlock = 0;
479 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
480 m_nextHighBlock = m_graph.block(nextBlockIndex);
481 if (m_nextHighBlock)
482 break;
483 }
484 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
485
486 // All of this effort to find the next block gives us the ability to keep the
487 // generated IR in roughly program order. This ought not affect the performance
488 // of the generated code (since we expect B3 to reorder things) but it will
489 // make IR dumps easier to read.
490 m_out.appendTo(lowBlock, m_nextLowBlock);
491
492 if (Options::ftlCrashes())
493 m_out.trap();
494
495 if (!m_highBlock->cfaHasVisited) {
496 if (verboseCompilationEnabled())
497 dataLog("Bailing because CFA didn't reach.\n");
498 crash(m_highBlock, nullptr);
499 return;
500 }
501
502 m_aiCheckedNodes.clear();
503
504 m_availabilityCalculator.beginBlock(m_highBlock);
505
506 m_state.reset();
507 m_state.beginBasicBlock(m_highBlock);
508
509 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
510 if (!compileNode(m_nodeIndex))
511 break;
512 }
513 }
514
515 void safelyInvalidateAfterTermination()
516 {
517 if (verboseCompilationEnabled())
518 dataLog("Bailing.\n");
519 crash();
520
521 // Invalidate dominated blocks. Under normal circumstances we would expect
522 // them to be invalidated already. But you can have the CFA become more
523 // precise over time because the structures of objects change on the main
524 // thread. Failing to do this would result in weird crashes due to a value
525 // being used but not defined. Race conditions FTW!
526 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
527 DFG::BasicBlock* target = m_graph.block(blockIndex);
528 if (!target)
529 continue;
530 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
531 if (verboseCompilationEnabled())
532 dataLog("Block ", *target, " will bail also.\n");
533 target->cfaHasVisited = false;
534 }
535 }
536 }
537
538 void validateAIState(Node* node)
539 {
540 if (!m_graphDump) {
541 StringPrintStream out;
542 m_graph.dump(out);
543 m_graphDump = out.toString();
544 }
545
546 switch (node->op()) {
547 case MovHint:
548 case ZombieHint:
549 case JSConstant:
550 case LazyJSConstant:
551 case DoubleConstant:
552 case Int52Constant:
553 case GetStack:
554 case PutStack:
555 case KillStack:
556 case ExitOK:
557 return;
558 default:
559 break;
560 }
561
562 // Before we execute node.
563 NodeSet& live = m_liveInToNode.find(node)->value;
564 unsigned highParentIndex = node->index();
565 {
566 uint64_t hash = WTF::intHash(highParentIndex);
567 if (hash >= static_cast<uint64_t>((static_cast<double>(std::numeric_limits<unsigned>::max()) + 1) * Options::validateAbstractInterpreterStateProbability()))
568 return;
569 }
570
571 for (Node* node : live) {
572 if (node->isPhantomAllocation())
573 continue;
574
575 if (node->op() == CheckInBounds)
576 continue;
577
578 AbstractValue value = m_interpreter.forNode(node);
579 {
580 auto iter = m_aiCheckedNodes.find(node);
581 if (iter != m_aiCheckedNodes.end()) {
582 AbstractValue checkedValue = iter->value;
583 if (checkedValue == value) {
584 if (!(value.m_type & SpecCell))
585 continue;
586 }
587 }
588 m_aiCheckedNodes.set(node, value);
589 }
590
591 FlushFormat flushFormat;
592 LValue input;
593 if (node->hasJSResult()) {
594 input = lowJSValue(Edge(node, UntypedUse));
595 flushFormat = FlushedJSValue;
596 } else if (node->hasDoubleResult()) {
597 input = lowDouble(Edge(node, DoubleRepUse));
598 flushFormat = FlushedDouble;
599 } else if (node->hasInt52Result()) {
600 input = strictInt52ToJSValue(lowStrictInt52(Edge(node, Int52RepUse)));
601 flushFormat = FlushedInt52;
602 } else
603 continue;
604
605 unsigned highChildIndex = node->index();
606
607 String graphDump = m_graphDump;
608
609 PatchpointValue* patchpoint = m_out.patchpoint(Void);
610 patchpoint->effects = Effects::none();
611 patchpoint->effects.writesLocalState = true;
612 patchpoint->appendSomeRegister(input);
613 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
614 GPRReg reg = InvalidGPRReg;
615 FPRReg fpReg = InvalidFPRReg;
616 if (flushFormat == FlushedDouble)
617 fpReg = params[0].fpr();
618 else
619 reg = params[0].gpr();
620 jit.probe([=] (Probe::Context& context) {
621 JSValue input;
622 double doubleInput;
623
624 auto dumpAndCrash = [&] {
625 dataLogLn("Validation failed at node: @", highParentIndex);
626 dataLogLn("Failed validating live value: @", highChildIndex);
627 dataLogLn();
628 dataLogLn("Expected AI value = ", value);
629 if (flushFormat != FlushedDouble)
630 dataLogLn("Unexpected value = ", input);
631 else
632 dataLogLn("Unexpected double value = ", doubleInput);
633 dataLogLn();
634 dataLogLn(graphDump);
635 CRASH();
636 };
637
638 if (flushFormat == FlushedDouble) {
639 doubleInput = context.fpr(fpReg);
640 SpeculatedType type;
641 if (!std::isnan(doubleInput))
642 type = speculationFromValue(jsDoubleNumber(doubleInput));
643 else if (isImpureNaN(doubleInput))
644 type = SpecDoubleImpureNaN;
645 else
646 type = SpecDoublePureNaN;
647
648 if (!value.couldBeType(type))
649 dumpAndCrash();
650 } else {
651 input = JSValue::decode(context.gpr(reg));
652 if (flushFormat == FlushedInt52) {
653 RELEASE_ASSERT(input.isAnyInt());
654 input = jsDoubleNumber(input.asAnyInt());
655 }
656 if (!value.validateOSREntryValue(input, flushFormat))
657 dumpAndCrash();
658 }
659
660 });
661 });
662 }
663 }
664
665 bool compileNode(unsigned nodeIndex)
666 {
667 if (!m_state.isValid()) {
668 safelyInvalidateAfterTermination();
669 return false;
670 }
671
672 m_node = m_highBlock->at(nodeIndex);
673 m_origin = m_node->origin;
674 m_out.setOrigin(m_node);
675
676 if (verboseCompilationEnabled())
677 dataLog("Lowering ", m_node, "\n");
678
679 m_interpreter.startExecuting();
680 m_interpreter.executeKnownEdgeTypes(m_node);
681
682 if (Options::validateAbstractInterpreterState())
683 validateAIState(m_node);
684
685 if (validateDFGDoesGC) {
686 bool expectDoesGC = doesGC(m_graph, m_node);
687 m_out.store(m_out.constBool(expectDoesGC), m_out.absolute(vm().heap.addressOfExpectDoesGC()));
688 }
689
690 switch (m_node->op()) {
691 case DFG::Upsilon:
692 compileUpsilon();
693 break;
694 case DFG::Phi:
695 compilePhi();
696 break;
697 case JSConstant:
698 break;
699 case DoubleConstant:
700 compileDoubleConstant();
701 break;
702 case Int52Constant:
703 compileInt52Constant();
704 break;
705 case LazyJSConstant:
706 compileLazyJSConstant();
707 break;
708 case DoubleRep:
709 compileDoubleRep();
710 break;
711 case DoubleAsInt32:
712 compileDoubleAsInt32();
713 break;
714 case DFG::ValueRep:
715 compileValueRep();
716 break;
717 case Int52Rep:
718 compileInt52Rep();
719 break;
720 case ValueToInt32:
721 compileValueToInt32();
722 break;
723 case BooleanToNumber:
724 compileBooleanToNumber();
725 break;
726 case ExtractOSREntryLocal:
727 compileExtractOSREntryLocal();
728 break;
729 case ExtractCatchLocal:
730 compileExtractCatchLocal();
731 break;
732 case ClearCatchLocals:
733 compileClearCatchLocals();
734 break;
735 case GetStack:
736 compileGetStack();
737 break;
738 case PutStack:
739 compilePutStack();
740 break;
741 case DFG::Check:
742 case CheckVarargs:
743 compileNoOp();
744 break;
745 case ToObject:
746 case CallObjectConstructor:
747 compileToObjectOrCallObjectConstructor();
748 break;
749 case ToThis:
750 compileToThis();
751 break;
752 case ValueNegate:
753 compileValueNegate();
754 break;
755 case ValueAdd:
756 compileValueAdd();
757 break;
758 case ValueSub:
759 compileValueSub();
760 break;
761 case ValueMul:
762 compileValueMul();
763 break;
764 case StrCat:
765 compileStrCat();
766 break;
767 case ArithAdd:
768 case ArithSub:
769 compileArithAddOrSub();
770 break;
771 case ArithClz32:
772 compileArithClz32();
773 break;
774 case ArithMul:
775 compileArithMul();
776 break;
777 case ValueDiv:
778 compileValueDiv();
779 break;
780 case ArithDiv:
781 compileArithDiv();
782 break;
783 case ValueMod:
784 compileValueMod();
785 break;
786 case ArithMod:
787 compileArithMod();
788 break;
789 case ArithMin:
790 case ArithMax:
791 compileArithMinOrMax();
792 break;
793 case ArithAbs:
794 compileArithAbs();
795 break;
796 case ValuePow:
797 compileValuePow();
798 break;
799 case ArithPow:
800 compileArithPow();
801 break;
802 case ArithRandom:
803 compileArithRandom();
804 break;
805 case ArithRound:
806 compileArithRound();
807 break;
808 case ArithFloor:
809 compileArithFloor();
810 break;
811 case ArithCeil:
812 compileArithCeil();
813 break;
814 case ArithTrunc:
815 compileArithTrunc();
816 break;
817 case ArithSqrt:
818 compileArithSqrt();
819 break;
820 case ArithFRound:
821 compileArithFRound();
822 break;
823 case ArithNegate:
824 compileArithNegate();
825 break;
826 case ArithUnary:
827 compileArithUnary();
828 break;
829 case ValueBitNot:
830 compileValueBitNot();
831 break;
832 case ArithBitNot:
833 compileArithBitNot();
834 break;
835 case ValueBitAnd:
836 compileValueBitAnd();
837 break;
838 case ArithBitAnd:
839 compileArithBitAnd();
840 break;
841 case ValueBitOr:
842 compileValueBitOr();
843 break;
844 case ArithBitOr:
845 compileArithBitOr();
846 break;
847 case ArithBitXor:
848 compileArithBitXor();
849 break;
850 case ValueBitXor:
851 compileValueBitXor();
852 break;
853 case BitRShift:
854 compileBitRShift();
855 break;
856 case BitLShift:
857 compileBitLShift();
858 break;
859 case BitURShift:
860 compileBitURShift();
861 break;
862 case UInt32ToNumber:
863 compileUInt32ToNumber();
864 break;
865 case CheckStructure:
866 compileCheckStructure();
867 break;
868 case CheckStructureOrEmpty:
869 compileCheckStructureOrEmpty();
870 break;
871 case CheckCell:
872 compileCheckCell();
873 break;
874 case CheckNotEmpty:
875 compileCheckNotEmpty();
876 break;
877 case AssertNotEmpty:
878 compileAssertNotEmpty();
879 break;
880 case CheckBadCell:
881 compileCheckBadCell();
882 break;
883 case CheckStringIdent:
884 compileCheckStringIdent();
885 break;
886 case GetExecutable:
887 compileGetExecutable();
888 break;
889 case Arrayify:
890 case ArrayifyToStructure:
891 compileArrayify();
892 break;
893 case PutStructure:
894 compilePutStructure();
895 break;
896 case TryGetById:
897 compileGetById(AccessType::TryGet);
898 break;
899 case GetById:
900 case GetByIdFlush:
901 compileGetById(AccessType::Get);
902 break;
903 case GetByIdWithThis:
904 compileGetByIdWithThis();
905 break;
906 case GetByIdDirect:
907 case GetByIdDirectFlush:
908 compileGetById(AccessType::GetDirect);
909 break;
910 case InById:
911 compileInById();
912 break;
913 case InByVal:
914 compileInByVal();
915 break;
916 case HasOwnProperty:
917 compileHasOwnProperty();
918 break;
919 case PutById:
920 case PutByIdDirect:
921 case PutByIdFlush:
922 compilePutById();
923 break;
924 case PutByIdWithThis:
925 compilePutByIdWithThis();
926 break;
927 case PutGetterById:
928 case PutSetterById:
929 compilePutAccessorById();
930 break;
931 case PutGetterSetterById:
932 compilePutGetterSetterById();
933 break;
934 case PutGetterByVal:
935 case PutSetterByVal:
936 compilePutAccessorByVal();
937 break;
938 case DeleteById:
939 compileDeleteById();
940 break;
941 case DeleteByVal:
942 compileDeleteByVal();
943 break;
944 case GetButterfly:
945 compileGetButterfly();
946 break;
947 case ConstantStoragePointer:
948 compileConstantStoragePointer();
949 break;
950 case GetIndexedPropertyStorage:
951 compileGetIndexedPropertyStorage();
952 break;
953 case CheckArray:
954 compileCheckArray();
955 break;
956 case GetArrayLength:
957 compileGetArrayLength();
958 break;
959 case GetVectorLength:
960 compileGetVectorLength();
961 break;
962 case CheckInBounds:
963 compileCheckInBounds();
964 break;
965 case GetByVal:
966 compileGetByVal();
967 break;
968 case GetMyArgumentByVal:
969 case GetMyArgumentByValOutOfBounds:
970 compileGetMyArgumentByVal();
971 break;
972 case GetByValWithThis:
973 compileGetByValWithThis();
974 break;
975 case PutByVal:
976 case PutByValAlias:
977 case PutByValDirect:
978 compilePutByVal();
979 break;
980 case PutByValWithThis:
981 compilePutByValWithThis();
982 break;
983 case AtomicsAdd:
984 case AtomicsAnd:
985 case AtomicsCompareExchange:
986 case AtomicsExchange:
987 case AtomicsLoad:
988 case AtomicsOr:
989 case AtomicsStore:
990 case AtomicsSub:
991 case AtomicsXor:
992 compileAtomicsReadModifyWrite();
993 break;
994 case AtomicsIsLockFree:
995 compileAtomicsIsLockFree();
996 break;
997 case DefineDataProperty:
998 compileDefineDataProperty();
999 break;
1000 case DefineAccessorProperty:
1001 compileDefineAccessorProperty();
1002 break;
1003 case ArrayPush:
1004 compileArrayPush();
1005 break;
1006 case ArrayPop:
1007 compileArrayPop();
1008 break;
1009 case ArraySlice:
1010 compileArraySlice();
1011 break;
1012 case ArrayIndexOf:
1013 compileArrayIndexOf();
1014 break;
1015 case CreateActivation:
1016 compileCreateActivation();
1017 break;
1018 case PushWithScope:
1019 compilePushWithScope();
1020 break;
1021 case NewFunction:
1022 case NewGeneratorFunction:
1023 case NewAsyncGeneratorFunction:
1024 case NewAsyncFunction:
1025 compileNewFunction();
1026 break;
1027 case CreateDirectArguments:
1028 compileCreateDirectArguments();
1029 break;
1030 case CreateScopedArguments:
1031 compileCreateScopedArguments();
1032 break;
1033 case CreateClonedArguments:
1034 compileCreateClonedArguments();
1035 break;
1036 case ObjectCreate:
1037 compileObjectCreate();
1038 break;
1039 case ObjectKeys:
1040 compileObjectKeys();
1041 break;
1042 case NewObject:
1043 compileNewObject();
1044 break;
1045 case NewStringObject:
1046 compileNewStringObject();
1047 break;
1048 case NewSymbol:
1049 compileNewSymbol();
1050 break;
1051 case NewArray:
1052 compileNewArray();
1053 break;
1054 case NewArrayWithSpread:
1055 compileNewArrayWithSpread();
1056 break;
1057 case CreateThis:
1058 compileCreateThis();
1059 break;
1060 case Spread:
1061 compileSpread();
1062 break;
1063 case NewArrayBuffer:
1064 compileNewArrayBuffer();
1065 break;
1066 case NewArrayWithSize:
1067 compileNewArrayWithSize();
1068 break;
1069 case NewTypedArray:
1070 compileNewTypedArray();
1071 break;
1072 case GetTypedArrayByteOffset:
1073 compileGetTypedArrayByteOffset();
1074 break;
1075 case GetPrototypeOf:
1076 compileGetPrototypeOf();
1077 break;
1078 case AllocatePropertyStorage:
1079 compileAllocatePropertyStorage();
1080 break;
1081 case ReallocatePropertyStorage:
1082 compileReallocatePropertyStorage();
1083 break;
1084 case NukeStructureAndSetButterfly:
1085 compileNukeStructureAndSetButterfly();
1086 break;
1087 case ToNumber:
1088 compileToNumber();
1089 break;
1090 case ToString:
1091 case CallStringConstructor:
1092 case StringValueOf:
1093 compileToStringOrCallStringConstructorOrStringValueOf();
1094 break;
1095 case ToPrimitive:
1096 compileToPrimitive();
1097 break;
1098 case MakeRope:
1099 compileMakeRope();
1100 break;
1101 case StringCharAt:
1102 compileStringCharAt();
1103 break;
1104 case StringCharCodeAt:
1105 compileStringCharCodeAt();
1106 break;
1107 case StringFromCharCode:
1108 compileStringFromCharCode();
1109 break;
1110 case GetByOffset:
1111 case GetGetterSetterByOffset:
1112 compileGetByOffset();
1113 break;
1114 case GetGetter:
1115 compileGetGetter();
1116 break;
1117 case GetSetter:
1118 compileGetSetter();
1119 break;
1120 case MultiGetByOffset:
1121 compileMultiGetByOffset();
1122 break;
1123 case PutByOffset:
1124 compilePutByOffset();
1125 break;
1126 case MultiPutByOffset:
1127 compileMultiPutByOffset();
1128 break;
1129 case MatchStructure:
1130 compileMatchStructure();
1131 break;
1132 case GetGlobalVar:
1133 case GetGlobalLexicalVariable:
1134 compileGetGlobalVariable();
1135 break;
1136 case PutGlobalVariable:
1137 compilePutGlobalVariable();
1138 break;
1139 case NotifyWrite:
1140 compileNotifyWrite();
1141 break;
1142 case GetCallee:
1143 compileGetCallee();
1144 break;
1145 case SetCallee:
1146 compileSetCallee();
1147 break;
1148 case GetArgumentCountIncludingThis:
1149 compileGetArgumentCountIncludingThis();
1150 break;
1151 case SetArgumentCountIncludingThis:
1152 compileSetArgumentCountIncludingThis();
1153 break;
1154 case GetScope:
1155 compileGetScope();
1156 break;
1157 case SkipScope:
1158 compileSkipScope();
1159 break;
1160 case GetGlobalObject:
1161 compileGetGlobalObject();
1162 break;
1163 case GetGlobalThis:
1164 compileGetGlobalThis();
1165 break;
1166 case GetClosureVar:
1167 compileGetClosureVar();
1168 break;
1169 case PutClosureVar:
1170 compilePutClosureVar();
1171 break;
1172 case GetFromArguments:
1173 compileGetFromArguments();
1174 break;
1175 case PutToArguments:
1176 compilePutToArguments();
1177 break;
1178 case GetArgument:
1179 compileGetArgument();
1180 break;
1181 case CompareEq:
1182 compileCompareEq();
1183 break;
1184 case CompareStrictEq:
1185 compileCompareStrictEq();
1186 break;
1187 case CompareLess:
1188 compileCompareLess();
1189 break;
1190 case CompareLessEq:
1191 compileCompareLessEq();
1192 break;
1193 case CompareGreater:
1194 compileCompareGreater();
1195 break;
1196 case CompareGreaterEq:
1197 compileCompareGreaterEq();
1198 break;
1199 case CompareBelow:
1200 compileCompareBelow();
1201 break;
1202 case CompareBelowEq:
1203 compileCompareBelowEq();
1204 break;
1205 case CompareEqPtr:
1206 compileCompareEqPtr();
1207 break;
1208 case SameValue:
1209 compileSameValue();
1210 break;
1211 case LogicalNot:
1212 compileLogicalNot();
1213 break;
1214 case Call:
1215 case TailCallInlinedCaller:
1216 case Construct:
1217 compileCallOrConstruct();
1218 break;
1219 case DirectCall:
1220 case DirectTailCallInlinedCaller:
1221 case DirectConstruct:
1222 case DirectTailCall:
1223 compileDirectCallOrConstruct();
1224 break;
1225 case TailCall:
1226 compileTailCall();
1227 break;
1228 case CallVarargs:
1229 case CallForwardVarargs:
1230 case TailCallVarargs:
1231 case TailCallVarargsInlinedCaller:
1232 case TailCallForwardVarargs:
1233 case TailCallForwardVarargsInlinedCaller:
1234 case ConstructVarargs:
1235 case ConstructForwardVarargs:
1236 compileCallOrConstructVarargs();
1237 break;
1238 case CallEval:
1239 compileCallEval();
1240 break;
1241 case LoadVarargs:
1242 compileLoadVarargs();
1243 break;
1244 case ForwardVarargs:
1245 compileForwardVarargs();
1246 break;
1247 case DFG::Jump:
1248 compileJump();
1249 break;
1250 case DFG::Branch:
1251 compileBranch();
1252 break;
1253 case DFG::Switch:
1254 compileSwitch();
1255 break;
1256 case DFG::EntrySwitch:
1257 compileEntrySwitch();
1258 break;
1259 case DFG::Return:
1260 compileReturn();
1261 break;
1262 case ForceOSRExit:
1263 compileForceOSRExit();
1264 break;
1265 case CPUIntrinsic:
1266#if CPU(X86_64)
1267 compileCPUIntrinsic();
1268#else
1269 RELEASE_ASSERT_NOT_REACHED();
1270#endif
1271 break;
1272 case Throw:
1273 compileThrow();
1274 break;
1275 case ThrowStaticError:
1276 compileThrowStaticError();
1277 break;
1278 case InvalidationPoint:
1279 compileInvalidationPoint();
1280 break;
1281 case IsEmpty:
1282 compileIsEmpty();
1283 break;
1284 case IsUndefined:
1285 compileIsUndefined();
1286 break;
1287 case IsUndefinedOrNull:
1288 compileIsUndefinedOrNull();
1289 break;
1290 case IsBoolean:
1291 compileIsBoolean();
1292 break;
1293 case IsNumber:
1294 compileIsNumber();
1295 break;
1296 case NumberIsInteger:
1297 compileNumberIsInteger();
1298 break;
1299 case IsCellWithType:
1300 compileIsCellWithType();
1301 break;
1302 case MapHash:
1303 compileMapHash();
1304 break;
1305 case NormalizeMapKey:
1306 compileNormalizeMapKey();
1307 break;
1308 case GetMapBucket:
1309 compileGetMapBucket();
1310 break;
1311 case GetMapBucketHead:
1312 compileGetMapBucketHead();
1313 break;
1314 case GetMapBucketNext:
1315 compileGetMapBucketNext();
1316 break;
1317 case LoadKeyFromMapBucket:
1318 compileLoadKeyFromMapBucket();
1319 break;
1320 case LoadValueFromMapBucket:
1321 compileLoadValueFromMapBucket();
1322 break;
1323 case ExtractValueFromWeakMapGet:
1324 compileExtractValueFromWeakMapGet();
1325 break;
1326 case SetAdd:
1327 compileSetAdd();
1328 break;
1329 case MapSet:
1330 compileMapSet();
1331 break;
1332 case WeakMapGet:
1333 compileWeakMapGet();
1334 break;
1335 case WeakSetAdd:
1336 compileWeakSetAdd();
1337 break;
1338 case WeakMapSet:
1339 compileWeakMapSet();
1340 break;
1341 case IsObject:
1342 compileIsObject();
1343 break;
1344 case IsObjectOrNull:
1345 compileIsObjectOrNull();
1346 break;
1347 case IsFunction:
1348 compileIsFunction();
1349 break;
1350 case IsTypedArrayView:
1351 compileIsTypedArrayView();
1352 break;
1353 case ParseInt:
1354 compileParseInt();
1355 break;
1356 case TypeOf:
1357 compileTypeOf();
1358 break;
1359 case CheckTypeInfoFlags:
1360 compileCheckTypeInfoFlags();
1361 break;
1362 case OverridesHasInstance:
1363 compileOverridesHasInstance();
1364 break;
1365 case InstanceOf:
1366 compileInstanceOf();
1367 break;
1368 case InstanceOfCustom:
1369 compileInstanceOfCustom();
1370 break;
1371 case CountExecution:
1372 compileCountExecution();
1373 break;
1374 case SuperSamplerBegin:
1375 compileSuperSamplerBegin();
1376 break;
1377 case SuperSamplerEnd:
1378 compileSuperSamplerEnd();
1379 break;
1380 case StoreBarrier:
1381 case FencedStoreBarrier:
1382 compileStoreBarrier();
1383 break;
1384 case HasIndexedProperty:
1385 compileHasIndexedProperty();
1386 break;
1387 case HasGenericProperty:
1388 compileHasGenericProperty();
1389 break;
1390 case HasStructureProperty:
1391 compileHasStructureProperty();
1392 break;
1393 case GetDirectPname:
1394 compileGetDirectPname();
1395 break;
1396 case GetEnumerableLength:
1397 compileGetEnumerableLength();
1398 break;
1399 case GetPropertyEnumerator:
1400 compileGetPropertyEnumerator();
1401 break;
1402 case GetEnumeratorStructurePname:
1403 compileGetEnumeratorStructurePname();
1404 break;
1405 case GetEnumeratorGenericPname:
1406 compileGetEnumeratorGenericPname();
1407 break;
1408 case ToIndexString:
1409 compileToIndexString();
1410 break;
1411 case CheckStructureImmediate:
1412 compileCheckStructureImmediate();
1413 break;
1414 case MaterializeNewObject:
1415 compileMaterializeNewObject();
1416 break;
1417 case MaterializeCreateActivation:
1418 compileMaterializeCreateActivation();
1419 break;
1420 case CheckTraps:
1421 compileCheckTraps();
1422 break;
1423 case CreateRest:
1424 compileCreateRest();
1425 break;
1426 case GetRestLength:
1427 compileGetRestLength();
1428 break;
1429 case RegExpExec:
1430 compileRegExpExec();
1431 break;
1432 case RegExpExecNonGlobalOrSticky:
1433 compileRegExpExecNonGlobalOrSticky();
1434 break;
1435 case RegExpTest:
1436 compileRegExpTest();
1437 break;
1438 case RegExpMatchFast:
1439 compileRegExpMatchFast();
1440 break;
1441 case RegExpMatchFastGlobal:
1442 compileRegExpMatchFastGlobal();
1443 break;
1444 case NewRegexp:
1445 compileNewRegexp();
1446 break;
1447 case SetFunctionName:
1448 compileSetFunctionName();
1449 break;
1450 case StringReplace:
1451 case StringReplaceRegExp:
1452 compileStringReplace();
1453 break;
1454 case GetRegExpObjectLastIndex:
1455 compileGetRegExpObjectLastIndex();
1456 break;
1457 case SetRegExpObjectLastIndex:
1458 compileSetRegExpObjectLastIndex();
1459 break;
1460 case LogShadowChickenPrologue:
1461 compileLogShadowChickenPrologue();
1462 break;
1463 case LogShadowChickenTail:
1464 compileLogShadowChickenTail();
1465 break;
1466 case RecordRegExpCachedResult:
1467 compileRecordRegExpCachedResult();
1468 break;
1469 case ResolveScopeForHoistingFuncDeclInEval:
1470 compileResolveScopeForHoistingFuncDeclInEval();
1471 break;
1472 case ResolveScope:
1473 compileResolveScope();
1474 break;
1475 case GetDynamicVar:
1476 compileGetDynamicVar();
1477 break;
1478 case PutDynamicVar:
1479 compilePutDynamicVar();
1480 break;
1481 case Unreachable:
1482 compileUnreachable();
1483 break;
1484 case StringSlice:
1485 compileStringSlice();
1486 break;
1487 case ToLowerCase:
1488 compileToLowerCase();
1489 break;
1490 case NumberToStringWithRadix:
1491 compileNumberToStringWithRadix();
1492 break;
1493 case NumberToStringWithValidRadixConstant:
1494 compileNumberToStringWithValidRadixConstant();
1495 break;
1496 case CheckSubClass:
1497 compileCheckSubClass();
1498 break;
1499 case CallDOM:
1500 compileCallDOM();
1501 break;
1502 case CallDOMGetter:
1503 compileCallDOMGetter();
1504 break;
1505 case FilterCallLinkStatus:
1506 case FilterGetByIdStatus:
1507 case FilterPutByIdStatus:
1508 case FilterInByIdStatus:
1509 compileFilterICStatus();
1510 break;
1511 case DataViewGetInt:
1512 case DataViewGetFloat:
1513 compileDataViewGet();
1514 break;
1515 case DataViewSet:
1516 compileDataViewSet();
1517 break;
1518
1519 case PhantomLocal:
1520 case LoopHint:
1521 case MovHint:
1522 case ZombieHint:
1523 case ExitOK:
1524 case PhantomNewObject:
1525 case PhantomNewFunction:
1526 case PhantomNewGeneratorFunction:
1527 case PhantomNewAsyncGeneratorFunction:
1528 case PhantomNewAsyncFunction:
1529 case PhantomCreateActivation:
1530 case PhantomDirectArguments:
1531 case PhantomCreateRest:
1532 case PhantomSpread:
1533 case PhantomNewArrayWithSpread:
1534 case PhantomNewArrayBuffer:
1535 case PhantomClonedArguments:
1536 case PhantomNewRegexp:
1537 case PutHint:
1538 case BottomValue:
1539 case KillStack:
1540 case InitializeEntrypointArguments:
1541 break;
1542 default:
1543 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1544 break;
1545 }
1546
1547 if (m_node->isTerminal())
1548 return false;
1549
1550 if (!m_state.isValid()) {
1551 safelyInvalidateAfterTermination();
1552 return false;
1553 }
1554
1555 m_availabilityCalculator.executeNode(m_node);
1556 m_interpreter.executeEffects(nodeIndex);
1557
1558 return true;
1559 }
1560
1561 void compileUpsilon()
1562 {
1563 LValue upsilonValue = nullptr;
1564 switch (m_node->child1().useKind()) {
1565 case DoubleRepUse:
1566 upsilonValue = lowDouble(m_node->child1());
1567 break;
1568 case Int32Use:
1569 case KnownInt32Use:
1570 upsilonValue = lowInt32(m_node->child1());
1571 break;
1572 case Int52RepUse:
1573 upsilonValue = lowInt52(m_node->child1());
1574 break;
1575 case BooleanUse:
1576 case KnownBooleanUse:
1577 upsilonValue = lowBoolean(m_node->child1());
1578 break;
1579 case CellUse:
1580 case KnownCellUse:
1581 upsilonValue = lowCell(m_node->child1());
1582 break;
1583 case UntypedUse:
1584 upsilonValue = lowJSValue(m_node->child1());
1585 break;
1586 default:
1587 DFG_CRASH(m_graph, m_node, "Bad use kind");
1588 break;
1589 }
1590 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1591 LValue phiNode = m_phis.get(m_node->phi());
1592 m_out.addIncomingToPhi(phiNode, upsilon);
1593 }
1594
1595 void compilePhi()
1596 {
1597 LValue phi = m_phis.get(m_node);
1598 m_out.m_block->append(phi);
1599
1600 switch (m_node->flags() & NodeResultMask) {
1601 case NodeResultDouble:
1602 setDouble(phi);
1603 break;
1604 case NodeResultInt32:
1605 setInt32(phi);
1606 break;
1607 case NodeResultInt52:
1608 setInt52(phi);
1609 break;
1610 case NodeResultBoolean:
1611 setBoolean(phi);
1612 break;
1613 case NodeResultJS:
1614 setJSValue(phi);
1615 break;
1616 default:
1617 DFG_CRASH(m_graph, m_node, "Bad result type");
1618 break;
1619 }
1620 }
1621
1622 void compileDoubleConstant()
1623 {
1624 setDouble(m_out.constDouble(m_node->asNumber()));
1625 }
1626
1627 void compileInt52Constant()
1628 {
1629 int64_t value = m_node->asAnyInt();
1630
1631 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1632 setStrictInt52(m_out.constInt64(value));
1633 }
1634
1635 void compileLazyJSConstant()
1636 {
1637 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1638 LazyJSValue value = m_node->lazyJSValue();
1639 patchpoint->setGenerator(
1640 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1641 value.emit(jit, JSValueRegs(params[0].gpr()));
1642 });
1643 patchpoint->effects = Effects::none();
1644 setJSValue(patchpoint);
1645 }
1646
1647 void compileDoubleRep()
1648 {
1649 switch (m_node->child1().useKind()) {
1650 case RealNumberUse: {
1651 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1652
1653 LValue doubleValue = unboxDouble(value);
1654
1655 LBasicBlock intCase = m_out.newBlock();
1656 LBasicBlock continuation = m_out.newBlock();
1657
1658 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1659 m_out.branch(
1660 m_out.doubleEqual(doubleValue, doubleValue),
1661 usually(continuation), rarely(intCase));
1662
1663 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1664
1665 FTL_TYPE_CHECK(
1666 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1667 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1668 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1669 m_out.jump(continuation);
1670
1671 m_out.appendTo(continuation, lastNext);
1672
1673 setDouble(m_out.phi(Double, fastResult, slowResult));
1674 return;
1675 }
1676
1677 case NotCellUse:
1678 case NumberUse: {
1679 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1680
1681 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1682
1683 LBasicBlock intCase = m_out.newBlock();
1684 LBasicBlock doubleTesting = m_out.newBlock();
1685 LBasicBlock doubleCase = m_out.newBlock();
1686 LBasicBlock nonDoubleCase = m_out.newBlock();
1687 LBasicBlock continuation = m_out.newBlock();
1688
1689 m_out.branch(
1690 isNotInt32(value, provenType(m_node->child1())),
1691 unsure(doubleTesting), unsure(intCase));
1692
1693 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1694
1695 ValueFromBlock intToDouble = m_out.anchor(
1696 m_out.intToDouble(unboxInt32(value)));
1697 m_out.jump(continuation);
1698
1699 m_out.appendTo(doubleTesting, doubleCase);
1700 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1701 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1702
1703 m_out.appendTo(doubleCase, nonDoubleCase);
1704 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1705 m_out.jump(continuation);
1706
1707 if (shouldConvertNonNumber) {
1708 LBasicBlock undefinedCase = m_out.newBlock();
1709 LBasicBlock testNullCase = m_out.newBlock();
1710 LBasicBlock nullCase = m_out.newBlock();
1711 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1712 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1713 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1714
1715 m_out.appendTo(nonDoubleCase, undefinedCase);
1716 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1717 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1718
1719 m_out.appendTo(undefinedCase, testNullCase);
1720 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1721 m_out.jump(continuation);
1722
1723 m_out.appendTo(testNullCase, nullCase);
1724 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1725 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1726
1727 m_out.appendTo(nullCase, testBooleanTrueCase);
1728 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1729 m_out.jump(continuation);
1730
1731 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1732 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1733 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1734
1735 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1736 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1737 m_out.jump(continuation);
1738
1739 m_out.appendTo(convertBooleanFalseCase, continuation);
1740
1741 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1742 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1743 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1744 m_out.jump(continuation);
1745
1746 m_out.appendTo(continuation, lastNext);
1747 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1748 return;
1749 }
1750 m_out.appendTo(nonDoubleCase, continuation);
1751 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1752 m_out.unreachable();
1753
1754 m_out.appendTo(continuation, lastNext);
1755
1756 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1757 return;
1758 }
1759
1760 case Int52RepUse: {
1761 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1762 return;
1763 }
1764
1765 default:
1766 DFG_CRASH(m_graph, m_node, "Bad use kind");
1767 }
1768 }
1769
1770 void compileDoubleAsInt32()
1771 {
1772 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1773 setInt32(integerValue);
1774 }
1775
1776 void compileValueRep()
1777 {
1778 switch (m_node->child1().useKind()) {
1779 case DoubleRepUse: {
1780 LValue value = lowDouble(m_node->child1());
1781
1782 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1783 value = m_out.select(
1784 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1785 }
1786
1787 setJSValue(boxDouble(value));
1788 return;
1789 }
1790
1791 case Int52RepUse: {
1792 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1793 return;
1794 }
1795
1796 default:
1797 DFG_CRASH(m_graph, m_node, "Bad use kind");
1798 }
1799 }
1800
1801 void compileInt52Rep()
1802 {
1803 switch (m_node->child1().useKind()) {
1804 case Int32Use:
1805 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1806 return;
1807
1808 case AnyIntUse:
1809 setStrictInt52(
1810 jsValueToStrictInt52(
1811 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1812 return;
1813
1814 case DoubleRepAnyIntUse:
1815 setStrictInt52(
1816 doubleToStrictInt52(
1817 m_node->child1(), lowDouble(m_node->child1())));
1818 return;
1819
1820 default:
1821 RELEASE_ASSERT_NOT_REACHED();
1822 }
1823 }
1824
1825 void compileValueToInt32()
1826 {
1827 switch (m_node->child1().useKind()) {
1828 case Int52RepUse:
1829 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1830 break;
1831
1832 case DoubleRepUse:
1833 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1834 break;
1835
1836 case NumberUse:
1837 case NotCellUse: {
1838 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1839 if (isValid(value)) {
1840 setInt32(value.value());
1841 break;
1842 }
1843
1844 value = m_jsValueValues.get(m_node->child1().node());
1845 if (isValid(value)) {
1846 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1847 break;
1848 }
1849
1850 // We'll basically just get here for constants. But it's good to have this
1851 // catch-all since we often add new representations into the mix.
1852 setInt32(
1853 numberOrNotCellToInt32(
1854 m_node->child1(),
1855 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1856 break;
1857 }
1858
1859 default:
1860 DFG_CRASH(m_graph, m_node, "Bad use kind");
1861 break;
1862 }
1863 }
1864
1865 void compileBooleanToNumber()
1866 {
1867 switch (m_node->child1().useKind()) {
1868 case BooleanUse: {
1869 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1870 return;
1871 }
1872
1873 case UntypedUse: {
1874 LValue value = lowJSValue(m_node->child1());
1875
1876 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1877 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1878 return;
1879 }
1880
1881 LBasicBlock booleanCase = m_out.newBlock();
1882 LBasicBlock continuation = m_out.newBlock();
1883
1884 ValueFromBlock notBooleanResult = m_out.anchor(value);
1885 m_out.branch(
1886 isBoolean(value, provenType(m_node->child1())),
1887 unsure(booleanCase), unsure(continuation));
1888
1889 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1890 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1891 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1892 m_out.jump(continuation);
1893
1894 m_out.appendTo(continuation, lastNext);
1895 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1896 return;
1897 }
1898
1899 default:
1900 RELEASE_ASSERT_NOT_REACHED();
1901 return;
1902 }
1903 }
1904
1905 void compileExtractOSREntryLocal()
1906 {
1907 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1908 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1909 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1910 }
1911
1912 void compileExtractCatchLocal()
1913 {
1914 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1915 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1916 }
1917
1918 void compileClearCatchLocals()
1919 {
1920 ScratchBuffer* scratchBuffer = m_ftlState.jitCode->common.catchOSREntryBuffer;
1921 ASSERT(scratchBuffer);
1922 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
1923 }
1924
1925 void compileGetStack()
1926 {
1927 StackAccessData* data = m_node->stackAccessData();
1928 AbstractValue& value = m_state.operand(data->local);
1929
1930 DFG_ASSERT(m_graph, m_node, isConcrete(data->format), data->format);
1931
1932 switch (data->format) {
1933 case FlushedDouble:
1934 setDouble(m_out.loadDouble(addressFor(data->machineLocal)));
1935 break;
1936 case FlushedInt52:
1937 setInt52(m_out.load64(addressFor(data->machineLocal)));
1938 break;
1939 default:
1940 if (isInt32Speculation(value.m_type))
1941 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1942 else
1943 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1944 break;
1945 }
1946 }
1947
1948 void compilePutStack()
1949 {
1950 StackAccessData* data = m_node->stackAccessData();
1951 switch (data->format) {
1952 case FlushedJSValue: {
1953 LValue value = lowJSValue(m_node->child1());
1954 m_out.store64(value, addressFor(data->machineLocal));
1955 break;
1956 }
1957
1958 case FlushedDouble: {
1959 LValue value = lowDouble(m_node->child1());
1960 m_out.storeDouble(value, addressFor(data->machineLocal));
1961 break;
1962 }
1963
1964 case FlushedInt32: {
1965 LValue value = lowInt32(m_node->child1());
1966 m_out.store32(value, payloadFor(data->machineLocal));
1967 break;
1968 }
1969
1970 case FlushedInt52: {
1971 LValue value = lowInt52(m_node->child1());
1972 m_out.store64(value, addressFor(data->machineLocal));
1973 break;
1974 }
1975
1976 case FlushedCell: {
1977 LValue value = lowCell(m_node->child1());
1978 m_out.store64(value, addressFor(data->machineLocal));
1979 break;
1980 }
1981
1982 case FlushedBoolean: {
1983 speculateBoolean(m_node->child1());
1984 m_out.store64(
1985 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1986 addressFor(data->machineLocal));
1987 break;
1988 }
1989
1990 default:
1991 DFG_CRASH(m_graph, m_node, "Bad flush format");
1992 break;
1993 }
1994 }
1995
1996 void compileNoOp()
1997 {
1998 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1999 }
2000
2001 void compileToObjectOrCallObjectConstructor()
2002 {
2003 LValue value = lowJSValue(m_node->child1());
2004
2005 LBasicBlock isCellCase = m_out.newBlock();
2006 LBasicBlock slowCase = m_out.newBlock();
2007 LBasicBlock continuation = m_out.newBlock();
2008
2009 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2010
2011 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2012 ValueFromBlock fastResult = m_out.anchor(value);
2013 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
2014
2015 m_out.appendTo(slowCase, continuation);
2016
2017 ValueFromBlock slowResult;
2018 if (m_node->op() == ToObject) {
2019 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2020 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2021 } else
2022 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
2023 m_out.jump(continuation);
2024
2025 m_out.appendTo(continuation, lastNext);
2026 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2027 }
2028
2029 void compileToThis()
2030 {
2031 LValue value = lowJSValue(m_node->child1());
2032
2033 LBasicBlock isCellCase = m_out.newBlock();
2034 LBasicBlock slowCase = m_out.newBlock();
2035 LBasicBlock continuation = m_out.newBlock();
2036
2037 m_out.branch(
2038 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2039
2040 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2041 ValueFromBlock fastResult = m_out.anchor(value);
2042 m_out.branch(
2043 m_out.testIsZero32(
2044 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
2045 m_out.constInt32(OverridesToThis)),
2046 usually(continuation), rarely(slowCase));
2047
2048 m_out.appendTo(slowCase, continuation);
2049 J_JITOperation_EJ function;
2050 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2051 function = operationToThisStrict;
2052 else
2053 function = operationToThis;
2054 ValueFromBlock slowResult = m_out.anchor(
2055 vmCall(Int64, m_out.operation(function), m_callFrame, value));
2056 m_out.jump(continuation);
2057
2058 m_out.appendTo(continuation, lastNext);
2059 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2060 }
2061
2062 void compileValueAdd()
2063 {
2064 if (m_node->isBinaryUseKind(BigIntUse)) {
2065 LValue left = lowBigInt(m_node->child1());
2066 LValue right = lowBigInt(m_node->child2());
2067
2068 LValue result = vmCall(pointerType(), m_out.operation(operationAddBigInt), m_callFrame, left, right);
2069 setJSValue(result);
2070 return;
2071 }
2072
2073 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2074 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2075 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2076 auto repatchingFunction = operationValueAddOptimize;
2077 auto nonRepatchingFunction = operationValueAdd;
2078 compileBinaryMathIC<JITAddGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2079 }
2080
2081 void compileValueSub()
2082 {
2083 if (m_node->isBinaryUseKind(BigIntUse)) {
2084 LValue left = lowBigInt(m_node->child1());
2085 LValue right = lowBigInt(m_node->child2());
2086
2087 LValue result = vmCall(pointerType(), m_out.operation(operationSubBigInt), m_callFrame, left, right);
2088 setJSValue(result);
2089 return;
2090 }
2091
2092 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2093 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2094 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2095 auto repatchingFunction = operationValueSubOptimize;
2096 auto nonRepatchingFunction = operationValueSub;
2097 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2098 }
2099
2100 void compileValueMul()
2101 {
2102 if (m_node->isBinaryUseKind(BigIntUse)) {
2103 LValue left = lowBigInt(m_node->child1());
2104 LValue right = lowBigInt(m_node->child2());
2105
2106 LValue result = vmCall(Int64, m_out.operation(operationMulBigInt), m_callFrame, left, right);
2107 setJSValue(result);
2108 return;
2109 }
2110
2111 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2112 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2113 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2114 auto repatchingFunction = operationValueMulOptimize;
2115 auto nonRepatchingFunction = operationValueMul;
2116 compileBinaryMathIC<JITMulGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2117 }
2118
2119 template <typename Generator, typename Func1, typename Func2,
2120 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2121 void compileUnaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2122 {
2123 Node* node = m_node;
2124
2125 LValue operand = lowJSValue(node->child1());
2126
2127 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2128 patchpoint->appendSomeRegister(operand);
2129 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2130 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2131 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
2132 patchpoint->numGPScratchRegisters = 1;
2133 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2134 State* state = &m_ftlState;
2135 patchpoint->setGenerator(
2136 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2137 AllowMacroScratchRegisterUsage allowScratch(jit);
2138
2139 Box<CCallHelpers::JumpList> exceptions =
2140 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2141
2142#if ENABLE(MATH_IC_STATS)
2143 auto inlineStart = jit.label();
2144#endif
2145
2146 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2147 JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2148 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
2149
2150 bool shouldEmitProfiling = false;
2151 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2152
2153 if (generatedInline) {
2154 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2155 auto done = jit.label();
2156 params.addLatePath([=] (CCallHelpers& jit) {
2157 AllowMacroScratchRegisterUsage allowScratch(jit);
2158 mathICGenerationState->slowPathJumps.link(&jit);
2159 mathICGenerationState->slowPathStart = jit.label();
2160#if ENABLE(MATH_IC_STATS)
2161 auto slowPathStart = jit.label();
2162#endif
2163
2164 if (mathICGenerationState->shouldSlowPathRepatch) {
2165 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2166 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2167 mathICGenerationState->slowPathCall = call.call();
2168 } else {
2169 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2170 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2171 mathICGenerationState->slowPathCall = call.call();
2172 }
2173 jit.jump().linkTo(done, &jit);
2174
2175 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2176 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2177 });
2178
2179#if ENABLE(MATH_IC_STATS)
2180 auto slowPathEnd = jit.label();
2181 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2182 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2183 mathIC->m_generatedCodeSize += size;
2184 });
2185#endif
2186 });
2187 } else {
2188 callOperation(
2189 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2190 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2191 }
2192
2193#if ENABLE(MATH_IC_STATS)
2194 auto inlineEnd = jit.label();
2195 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2196 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2197 mathIC->m_generatedCodeSize += size;
2198 });
2199#endif
2200 });
2201
2202 setJSValue(patchpoint);
2203 }
2204
2205 template <typename Generator, typename Func1, typename Func2,
2206 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2207 void compileBinaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2208 {
2209 Node* node = m_node;
2210
2211 LValue left = lowJSValue(node->child1());
2212 LValue right = lowJSValue(node->child2());
2213
2214 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2215 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2216
2217 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2218 patchpoint->appendSomeRegister(left);
2219 patchpoint->appendSomeRegister(right);
2220 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2221 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2222 RefPtr<PatchpointExceptionHandle> exceptionHandle =
2223 preparePatchpointForExceptions(patchpoint);
2224 patchpoint->numGPScratchRegisters = 1;
2225 patchpoint->numFPScratchRegisters = 2;
2226 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2227 State* state = &m_ftlState;
2228 patchpoint->setGenerator(
2229 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2230 AllowMacroScratchRegisterUsage allowScratch(jit);
2231
2232
2233 Box<CCallHelpers::JumpList> exceptions =
2234 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2235
2236#if ENABLE(MATH_IC_STATS)
2237 auto inlineStart = jit.label();
2238#endif
2239
2240 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2241 JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2242 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
2243 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
2244 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
2245
2246 bool shouldEmitProfiling = false;
2247 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2248
2249 if (generatedInline) {
2250 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2251 auto done = jit.label();
2252 params.addLatePath([=] (CCallHelpers& jit) {
2253 AllowMacroScratchRegisterUsage allowScratch(jit);
2254 mathICGenerationState->slowPathJumps.link(&jit);
2255 mathICGenerationState->slowPathStart = jit.label();
2256#if ENABLE(MATH_IC_STATS)
2257 auto slowPathStart = jit.label();
2258#endif
2259
2260 if (mathICGenerationState->shouldSlowPathRepatch) {
2261 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2262 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2263 mathICGenerationState->slowPathCall = call.call();
2264 } else {
2265 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2266 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2267 mathICGenerationState->slowPathCall = call.call();
2268 }
2269 jit.jump().linkTo(done, &jit);
2270
2271 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2272 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2273 });
2274
2275#if ENABLE(MATH_IC_STATS)
2276 auto slowPathEnd = jit.label();
2277 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2278 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2279 mathIC->m_generatedCodeSize += size;
2280 });
2281#endif
2282 });
2283 } else {
2284 callOperation(
2285 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2286 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2287 }
2288
2289#if ENABLE(MATH_IC_STATS)
2290 auto inlineEnd = jit.label();
2291 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2292 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2293 mathIC->m_generatedCodeSize += size;
2294 });
2295#endif
2296 });
2297
2298 setJSValue(patchpoint);
2299 }
2300
2301 void compileStrCat()
2302 {
2303 LValue result;
2304 if (m_node->child3()) {
2305 result = vmCall(
2306 Int64, m_out.operation(operationStrCat3), m_callFrame,
2307 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2308 lowJSValue(m_node->child2(), ManualOperandSpeculation),
2309 lowJSValue(m_node->child3(), ManualOperandSpeculation));
2310 } else {
2311 result = vmCall(
2312 Int64, m_out.operation(operationStrCat2), m_callFrame,
2313 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2314 lowJSValue(m_node->child2(), ManualOperandSpeculation));
2315 }
2316 setJSValue(result);
2317 }
2318
2319 void compileArithAddOrSub()
2320 {
2321 bool isSub = m_node->op() == ArithSub;
2322 switch (m_node->binaryUseKind()) {
2323 case Int32Use: {
2324 LValue left = lowInt32(m_node->child1());
2325 LValue right = lowInt32(m_node->child2());
2326
2327 if (!shouldCheckOverflow(m_node->arithMode())) {
2328 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
2329 break;
2330 }
2331
2332 CheckValue* result =
2333 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2334 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2335 setInt32(result);
2336 break;
2337 }
2338
2339 case Int52RepUse: {
2340 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)
2341 && !abstractValue(m_node->child2()).couldBeType(SpecNonInt32AsInt52)) {
2342 Int52Kind kind;
2343 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2344 LValue right = lowInt52(m_node->child2(), kind);
2345 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
2346 break;
2347 }
2348
2349 LValue left = lowInt52(m_node->child1());
2350 LValue right = lowInt52(m_node->child2());
2351 CheckValue* result =
2352 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2353 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2354 setInt52(result);
2355 break;
2356 }
2357
2358 case DoubleRepUse: {
2359 LValue C1 = lowDouble(m_node->child1());
2360 LValue C2 = lowDouble(m_node->child2());
2361
2362 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2363 break;
2364 }
2365
2366 case UntypedUse: {
2367 if (!isSub) {
2368 DFG_CRASH(m_graph, m_node, "Bad use kind");
2369 break;
2370 }
2371
2372 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2373 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2374 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2375 auto repatchingFunction = operationValueSubOptimize;
2376 auto nonRepatchingFunction = operationValueSub;
2377 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2378 break;
2379 }
2380
2381 default:
2382 DFG_CRASH(m_graph, m_node, "Bad use kind");
2383 break;
2384 }
2385 }
2386
2387 void compileArithClz32()
2388 {
2389 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2390 LValue operand = lowInt32(m_node->child1());
2391 setInt32(m_out.ctlz32(operand));
2392 return;
2393 }
2394 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2395 LValue argument = lowJSValue(m_node->child1());
2396 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2397 setInt32(result);
2398 }
2399
2400 void compileArithMul()
2401 {
2402 switch (m_node->binaryUseKind()) {
2403 case Int32Use: {
2404 LValue left = lowInt32(m_node->child1());
2405 LValue right = lowInt32(m_node->child2());
2406
2407 LValue result;
2408
2409 if (!shouldCheckOverflow(m_node->arithMode()))
2410 result = m_out.mul(left, right);
2411 else {
2412 CheckValue* speculation = m_out.speculateMul(left, right);
2413 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2414 result = speculation;
2415 }
2416
2417 if (shouldCheckNegativeZero(m_node->arithMode())) {
2418 LBasicBlock slowCase = m_out.newBlock();
2419 LBasicBlock continuation = m_out.newBlock();
2420
2421 m_out.branch(
2422 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2423
2424 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2425 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2426 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2427 m_out.jump(continuation);
2428 m_out.appendTo(continuation, lastNext);
2429 }
2430
2431 setInt32(result);
2432 break;
2433 }
2434
2435 case Int52RepUse: {
2436 Int52Kind kind;
2437 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2438 LValue right = lowInt52(m_node->child2(), opposite(kind));
2439
2440 CheckValue* result = m_out.speculateMul(left, right);
2441 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2442
2443 if (shouldCheckNegativeZero(m_node->arithMode())) {
2444 LBasicBlock slowCase = m_out.newBlock();
2445 LBasicBlock continuation = m_out.newBlock();
2446
2447 m_out.branch(
2448 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2449
2450 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2451 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2452 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2453 m_out.jump(continuation);
2454 m_out.appendTo(continuation, lastNext);
2455 }
2456
2457 setInt52(result);
2458 break;
2459 }
2460
2461 case DoubleRepUse: {
2462 setDouble(
2463 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2464 break;
2465 }
2466
2467 default:
2468 DFG_CRASH(m_graph, m_node, "Bad use kind");
2469 break;
2470 }
2471 }
2472
2473 void compileValueDiv()
2474 {
2475 if (m_node->isBinaryUseKind(BigIntUse)) {
2476 LValue left = lowBigInt(m_node->child1());
2477 LValue right = lowBigInt(m_node->child2());
2478
2479 LValue result = vmCall(pointerType(), m_out.operation(operationDivBigInt), m_callFrame, left, right);
2480 setJSValue(result);
2481 return;
2482 }
2483
2484 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2485 }
2486
2487 void compileArithDiv()
2488 {
2489 switch (m_node->binaryUseKind()) {
2490 case Int32Use: {
2491 LValue numerator = lowInt32(m_node->child1());
2492 LValue denominator = lowInt32(m_node->child2());
2493
2494 if (shouldCheckNegativeZero(m_node->arithMode())) {
2495 LBasicBlock zeroNumerator = m_out.newBlock();
2496 LBasicBlock numeratorContinuation = m_out.newBlock();
2497
2498 m_out.branch(
2499 m_out.isZero32(numerator),
2500 rarely(zeroNumerator), usually(numeratorContinuation));
2501
2502 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2503
2504 speculate(
2505 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2506
2507 m_out.jump(numeratorContinuation);
2508
2509 m_out.appendTo(numeratorContinuation, innerLastNext);
2510 }
2511
2512 if (shouldCheckOverflow(m_node->arithMode())) {
2513 LBasicBlock unsafeDenominator = m_out.newBlock();
2514 LBasicBlock continuation = m_out.newBlock();
2515
2516 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2517 m_out.branch(
2518 m_out.above(adjustedDenominator, m_out.int32One),
2519 usually(continuation), rarely(unsafeDenominator));
2520
2521 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2522 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2523 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2524 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2525 m_out.jump(continuation);
2526
2527 m_out.appendTo(continuation, lastNext);
2528 LValue result = m_out.div(numerator, denominator);
2529 speculate(
2530 Overflow, noValue(), 0,
2531 m_out.notEqual(m_out.mul(result, denominator), numerator));
2532 setInt32(result);
2533 } else
2534 setInt32(m_out.chillDiv(numerator, denominator));
2535
2536 break;
2537 }
2538
2539 case DoubleRepUse: {
2540 setDouble(m_out.doubleDiv(
2541 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2542 break;
2543 }
2544
2545 default:
2546 DFG_CRASH(m_graph, m_node, "Bad use kind");
2547 break;
2548 }
2549 }
2550
2551 void compileValueMod()
2552 {
2553 if (m_node->binaryUseKind() == BigIntUse) {
2554 LValue left = lowBigInt(m_node->child1());
2555 LValue right = lowBigInt(m_node->child2());
2556
2557 LValue result = vmCall(pointerType(), m_out.operation(operationModBigInt), m_callFrame, left, right);
2558 setJSValue(result);
2559 return;
2560 }
2561
2562 DFG_ASSERT(m_graph, m_node, m_node->binaryUseKind() == UntypedUse, m_node->binaryUseKind());
2563 LValue left = lowJSValue(m_node->child1());
2564 LValue right = lowJSValue(m_node->child2());
2565 LValue result = vmCall(Int64, m_out.operation(operationValueMod), m_callFrame, left, right);
2566 setJSValue(result);
2567 }
2568
2569 void compileArithMod()
2570 {
2571 switch (m_node->binaryUseKind()) {
2572 case Int32Use: {
2573 LValue numerator = lowInt32(m_node->child1());
2574 LValue denominator = lowInt32(m_node->child2());
2575
2576 LValue remainder;
2577 if (shouldCheckOverflow(m_node->arithMode())) {
2578 LBasicBlock unsafeDenominator = m_out.newBlock();
2579 LBasicBlock continuation = m_out.newBlock();
2580
2581 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2582 m_out.branch(
2583 m_out.above(adjustedDenominator, m_out.int32One),
2584 usually(continuation), rarely(unsafeDenominator));
2585
2586 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2587 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2588 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2589 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2590 m_out.jump(continuation);
2591
2592 m_out.appendTo(continuation, lastNext);
2593 LValue result = m_out.mod(numerator, denominator);
2594 remainder = result;
2595 } else
2596 remainder = m_out.chillMod(numerator, denominator);
2597
2598 if (shouldCheckNegativeZero(m_node->arithMode())) {
2599 LBasicBlock negativeNumerator = m_out.newBlock();
2600 LBasicBlock numeratorContinuation = m_out.newBlock();
2601
2602 m_out.branch(
2603 m_out.lessThan(numerator, m_out.int32Zero),
2604 unsure(negativeNumerator), unsure(numeratorContinuation));
2605
2606 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2607
2608 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2609
2610 m_out.jump(numeratorContinuation);
2611
2612 m_out.appendTo(numeratorContinuation, innerLastNext);
2613 }
2614
2615 setInt32(remainder);
2616 break;
2617 }
2618
2619 case DoubleRepUse: {
2620 setDouble(
2621 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2622 break;
2623 }
2624
2625 default:
2626 DFG_CRASH(m_graph, m_node, "Bad use kind");
2627 break;
2628 }
2629 }
2630
2631 void compileArithMinOrMax()
2632 {
2633 switch (m_node->binaryUseKind()) {
2634 case Int32Use: {
2635 LValue left = lowInt32(m_node->child1());
2636 LValue right = lowInt32(m_node->child2());
2637
2638 setInt32(
2639 m_out.select(
2640 m_node->op() == ArithMin
2641 ? m_out.lessThan(left, right)
2642 : m_out.lessThan(right, left),
2643 left, right));
2644 break;
2645 }
2646
2647 case DoubleRepUse: {
2648 LValue left = lowDouble(m_node->child1());
2649 LValue right = lowDouble(m_node->child2());
2650
2651 LBasicBlock notLessThan = m_out.newBlock();
2652 LBasicBlock continuation = m_out.newBlock();
2653
2654 Vector<ValueFromBlock, 2> results;
2655
2656 results.append(m_out.anchor(left));
2657 m_out.branch(
2658 m_node->op() == ArithMin
2659 ? m_out.doubleLessThan(left, right)
2660 : m_out.doubleGreaterThan(left, right),
2661 unsure(continuation), unsure(notLessThan));
2662
2663 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2664 results.append(m_out.anchor(m_out.select(
2665 m_node->op() == ArithMin
2666 ? m_out.doubleGreaterThanOrEqual(left, right)
2667 : m_out.doubleLessThanOrEqual(left, right),
2668 right, m_out.constDouble(PNaN))));
2669 m_out.jump(continuation);
2670
2671 m_out.appendTo(continuation, lastNext);
2672 setDouble(m_out.phi(Double, results));
2673 break;
2674 }
2675
2676 default:
2677 DFG_CRASH(m_graph, m_node, "Bad use kind");
2678 break;
2679 }
2680 }
2681
2682 void compileArithAbs()
2683 {
2684 switch (m_node->child1().useKind()) {
2685 case Int32Use: {
2686 LValue value = lowInt32(m_node->child1());
2687
2688 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2689 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2690
2691 if (shouldCheckOverflow(m_node->arithMode()))
2692 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2693
2694 setInt32(result);
2695 break;
2696 }
2697
2698 case DoubleRepUse: {
2699 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2700 break;
2701 }
2702
2703 default: {
2704 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2705 LValue argument = lowJSValue(m_node->child1());
2706 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2707 setDouble(result);
2708 break;
2709 }
2710 }
2711 }
2712
2713 void compileArithUnary()
2714 {
2715 if (m_node->child1().useKind() == DoubleRepUse) {
2716 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2717 return;
2718 }
2719 LValue argument = lowJSValue(m_node->child1());
2720 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2721 setDouble(result);
2722 }
2723
2724 void compileValuePow()
2725 {
2726 if (m_node->isBinaryUseKind(BigIntUse)) {
2727 LValue base = lowBigInt(m_node->child1());
2728 LValue exponent = lowBigInt(m_node->child2());
2729
2730 LValue result = vmCall(pointerType(), m_out.operation(operationPowBigInt), m_callFrame, base, exponent);
2731 setJSValue(result);
2732 return;
2733 }
2734
2735 LValue base = lowJSValue(m_node->child1());
2736 LValue exponent = lowJSValue(m_node->child2());
2737 LValue result = vmCall(Int64, m_out.operation(operationValuePow), m_callFrame, base, exponent);
2738 setJSValue(result);
2739 }
2740
2741 void compileArithPow()
2742 {
2743 if (m_node->child2().useKind() == Int32Use)
2744 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2745 else {
2746 LValue base = lowDouble(m_node->child1());
2747 LValue exponent = lowDouble(m_node->child2());
2748
2749 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2750 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2751 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2752 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2753 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2754 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2755 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2756 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2757 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2758 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2759 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2760 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2761 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2762 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2763 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2764 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2765 LBasicBlock powBlock = m_out.newBlock();
2766 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2767 LBasicBlock continuation = m_out.newBlock();
2768
2769 LValue integerExponent = m_out.doubleToInt(exponent);
2770 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2771 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2772 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2773
2774 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2775 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2776 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2777
2778 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2779 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2780 m_out.jump(continuation);
2781
2782 // If y is NaN, the result is NaN.
2783 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2784 LValue exponentIsNaN;
2785 if (provenType(m_node->child2()) & SpecDoubleNaN)
2786 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2787 else
2788 exponentIsNaN = m_out.booleanFalse;
2789 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2790
2791 // If abs(x) is 1 and y is +infinity, the result is NaN.
2792 // If abs(x) is 1 and y is -infinity, the result is NaN.
2793
2794 // Test if base == 1.
2795 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2796 LValue absoluteBase = m_out.doubleAbs(base);
2797 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2798 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2799
2800 // Test if abs(y) == Infinity.
2801 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2802 LValue absoluteExponent = m_out.doubleAbs(exponent);
2803 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2804 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2805
2806 // If y == 0.5 or y == -0.5, handle it through SQRT.
2807 // We have be carefuly with -0 and -Infinity.
2808
2809 // Test if y == 0.5
2810 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2811 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2812 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2813
2814 // Handle x == -0.
2815 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2816 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2817 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2818 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2819
2820 // Test if abs(x) == Infinity.
2821 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2822 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2823 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2824
2825 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2826 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2827 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2828 m_out.jump(continuation);
2829
2830 // The exponent is 0.5, the base is infinite, the result is always infinite.
2831 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2832 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2833 m_out.jump(continuation);
2834
2835 // Test if y == -0.5
2836 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2837 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2838 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2839
2840 // Handle x == -0.
2841 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2842 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2843 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2844
2845 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2846 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2847 m_out.jump(continuation);
2848
2849 // Test if abs(x) == Infinity.
2850 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2851 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2852 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2853
2854 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2855 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2856 LValue sqrtBase = m_out.doubleSqrt(base);
2857 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2858 m_out.jump(continuation);
2859
2860 // The exponent is -0.5, the base is infinite, the result is always zero.
2861 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2862 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2863 m_out.jump(continuation);
2864
2865 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2866 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2867 m_out.jump(continuation);
2868
2869 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2870 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2871 m_out.jump(continuation);
2872
2873 m_out.appendTo(continuation, lastNext);
2874 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2875 }
2876 }
2877
2878 void compileArithRandom()
2879 {
2880 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2881
2882 // Inlined WeakRandom::advance().
2883 // uint64_t x = m_low;
2884 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2885 LValue low = m_out.load64(m_out.absolute(lowAddress));
2886 // uint64_t y = m_high;
2887 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2888 LValue high = m_out.load64(m_out.absolute(highAddress));
2889 // m_low = y;
2890 m_out.store64(high, m_out.absolute(lowAddress));
2891
2892 // x ^= x << 23;
2893 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2894
2895 // x ^= x >> 17;
2896 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2897
2898 // x ^= y ^ (y >> 26);
2899 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2900
2901 // m_high = x;
2902 m_out.store64(phase3, m_out.absolute(highAddress));
2903
2904 // return x + y;
2905 LValue random64 = m_out.add(phase3, high);
2906
2907 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2908 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2909
2910 LValue double53Integer = m_out.intToDouble(random53);
2911
2912 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2913 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2914 static const double scale = 1.0 / (1ULL << 53);
2915
2916 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2917 // It just reduces the exp part of the given 53bit double integer.
2918 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2919 // Now we get 53bit precision random double value in [0, 1).
2920 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2921
2922 setDouble(result);
2923 }
2924
2925 void compileArithRound()
2926 {
2927 if (m_node->child1().useKind() == DoubleRepUse) {
2928 LValue result = nullptr;
2929 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2930 LValue value = lowDouble(m_node->child1());
2931 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2932 } else {
2933 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2934 LBasicBlock continuation = m_out.newBlock();
2935
2936 LValue value = lowDouble(m_node->child1());
2937 LValue integerValue = m_out.doubleCeil(value);
2938 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2939
2940 LValue realPart = m_out.doubleSub(integerValue, value);
2941
2942 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2943
2944 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2945 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2946 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2947 m_out.jump(continuation);
2948 m_out.appendTo(continuation, lastNext);
2949
2950 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2951 }
2952
2953 if (producesInteger(m_node->arithRoundingMode())) {
2954 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2955 setInt32(integerValue);
2956 } else
2957 setDouble(result);
2958 return;
2959 }
2960
2961 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2962 LValue argument = lowJSValue(m_node->child1());
2963 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2964 }
2965
2966 void compileArithFloor()
2967 {
2968 if (m_node->child1().useKind() == DoubleRepUse) {
2969 LValue value = lowDouble(m_node->child1());
2970 LValue integerValue = m_out.doubleFloor(value);
2971 if (producesInteger(m_node->arithRoundingMode()))
2972 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2973 else
2974 setDouble(integerValue);
2975 return;
2976 }
2977 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2978 LValue argument = lowJSValue(m_node->child1());
2979 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2980 }
2981
2982 void compileArithCeil()
2983 {
2984 if (m_node->child1().useKind() == DoubleRepUse) {
2985 LValue value = lowDouble(m_node->child1());
2986 LValue integerValue = m_out.doubleCeil(value);
2987 if (producesInteger(m_node->arithRoundingMode()))
2988 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2989 else
2990 setDouble(integerValue);
2991 return;
2992 }
2993 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2994 LValue argument = lowJSValue(m_node->child1());
2995 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2996 }
2997
2998 void compileArithTrunc()
2999 {
3000 if (m_node->child1().useKind() == DoubleRepUse) {
3001 LValue value = lowDouble(m_node->child1());
3002 LValue result = m_out.doubleTrunc(value);
3003 if (producesInteger(m_node->arithRoundingMode()))
3004 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
3005 else
3006 setDouble(result);
3007 return;
3008 }
3009 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
3010 LValue argument = lowJSValue(m_node->child1());
3011 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
3012 }
3013
3014 void compileArithSqrt()
3015 {
3016 if (m_node->child1().useKind() == DoubleRepUse) {
3017 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
3018 return;
3019 }
3020 LValue argument = lowJSValue(m_node->child1());
3021 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
3022 setDouble(result);
3023 }
3024
3025 void compileArithFRound()
3026 {
3027 if (m_node->child1().useKind() == DoubleRepUse) {
3028 setDouble(m_out.fround(lowDouble(m_node->child1())));
3029 return;
3030 }
3031 LValue argument = lowJSValue(m_node->child1());
3032 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
3033 setDouble(result);
3034 }
3035
3036 void compileValueNegate()
3037 {
3038 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
3039 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
3040 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
3041 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
3042 auto repatchingFunction = operationArithNegateOptimize;
3043 auto nonRepatchingFunction = operationArithNegate;
3044 compileUnaryMathIC<JITNegGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
3045 }
3046
3047 void compileArithNegate()
3048 {
3049 switch (m_node->child1().useKind()) {
3050 case Int32Use: {
3051 LValue value = lowInt32(m_node->child1());
3052
3053 LValue result;
3054 if (!shouldCheckOverflow(m_node->arithMode()))
3055 result = m_out.neg(value);
3056 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
3057 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
3058 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
3059 result = check;
3060 } else {
3061 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
3062 result = m_out.neg(value);
3063 }
3064
3065 setInt32(result);
3066 break;
3067 }
3068
3069 case Int52RepUse: {
3070 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)) {
3071 Int52Kind kind;
3072 LValue value = lowWhicheverInt52(m_node->child1(), kind);
3073 LValue result = m_out.neg(value);
3074 if (shouldCheckNegativeZero(m_node->arithMode()))
3075 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3076 setInt52(result, kind);
3077 break;
3078 }
3079
3080 LValue value = lowInt52(m_node->child1());
3081 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
3082 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
3083 if (shouldCheckNegativeZero(m_node->arithMode()))
3084 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3085 setInt52(result);
3086 break;
3087 }
3088
3089 case DoubleRepUse: {
3090 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
3091 break;
3092 }
3093
3094 default:
3095 DFG_CRASH(m_graph, m_node, "Bad use kind");
3096 break;
3097 }
3098 }
3099
3100 void compileValueBitNot()
3101 {
3102 if (m_node->child1().useKind() == BigIntUse) {
3103 LValue operand = lowBigInt(m_node->child1());
3104 LValue result = vmCall(pointerType(), m_out.operation(operationBitNotBigInt), m_callFrame, operand);
3105 setJSValue(result);
3106 return;
3107 }
3108
3109 LValue operand = lowJSValue(m_node->child1());
3110 LValue result = vmCall(Int64, m_out.operation(operationValueBitNot), m_callFrame, operand);
3111 setJSValue(result);
3112 }
3113
3114 void compileArithBitNot()
3115 {
3116 setInt32(m_out.bitNot(lowInt32(m_node->child1())));
3117 }
3118
3119 void compileValueBitAnd()
3120 {
3121 if (m_node->isBinaryUseKind(BigIntUse)) {
3122 LValue left = lowBigInt(m_node->child1());
3123 LValue right = lowBigInt(m_node->child2());
3124
3125 LValue result = vmCall(pointerType(), m_out.operation(operationBitAndBigInt), m_callFrame, left, right);
3126 setJSValue(result);
3127 return;
3128 }
3129
3130 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
3131 }
3132
3133 void compileArithBitAnd()
3134 {
3135 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3136 }
3137
3138 void compileValueBitOr()
3139 {
3140 if (m_node->isBinaryUseKind(BigIntUse)) {
3141 LValue left = lowBigInt(m_node->child1());
3142 LValue right = lowBigInt(m_node->child2());
3143
3144 LValue result = vmCall(pointerType(), m_out.operation(operationBitOrBigInt), m_callFrame, left, right);
3145 setJSValue(result);
3146 return;
3147 }
3148
3149 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
3150 }
3151
3152 void compileArithBitOr()
3153 {
3154 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3155 }
3156
3157 void compileValueBitXor()
3158 {
3159 if (m_node->isBinaryUseKind(BigIntUse)) {
3160 LValue left = lowBigInt(m_node->child1());
3161 LValue right = lowBigInt(m_node->child2());
3162
3163 LValue result = vmCall(pointerType(), m_out.operation(operationBitXorBigInt), m_callFrame, left, right);
3164 setJSValue(result);
3165 return;
3166 }
3167
3168 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
3169 }
3170
3171 void compileArithBitXor()
3172 {
3173 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3174 }
3175
3176 void compileBitRShift()
3177 {
3178 if (m_node->isBinaryUseKind(UntypedUse)) {
3179 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
3180 return;
3181 }
3182 setInt32(m_out.aShr(
3183 lowInt32(m_node->child1()),
3184 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3185 }
3186
3187 void compileBitLShift()
3188 {
3189 if (m_node->isBinaryUseKind(UntypedUse)) {
3190 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
3191 return;
3192 }
3193 setInt32(m_out.shl(
3194 lowInt32(m_node->child1()),
3195 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3196 }
3197
3198 void compileBitURShift()
3199 {
3200 if (m_node->isBinaryUseKind(UntypedUse)) {
3201 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
3202 return;
3203 }
3204 setInt32(m_out.lShr(
3205 lowInt32(m_node->child1()),
3206 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3207 }
3208
3209 void compileUInt32ToNumber()
3210 {
3211 LValue value = lowInt32(m_node->child1());
3212
3213 if (doesOverflow(m_node->arithMode())) {
3214 setStrictInt52(m_out.zeroExtPtr(value));
3215 return;
3216 }
3217
3218 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
3219 setInt32(value);
3220 }
3221
3222 void compileCheckStructure()
3223 {
3224 ExitKind exitKind;
3225 if (m_node->child1()->hasConstant())
3226 exitKind = BadConstantCache;
3227 else
3228 exitKind = BadCache;
3229
3230 switch (m_node->child1().useKind()) {
3231 case CellUse:
3232 case KnownCellUse: {
3233 LValue cell = lowCell(m_node->child1());
3234
3235 checkStructure(
3236 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3237 exitKind, m_node->structureSet(),
3238 [&] (RegisteredStructure structure) {
3239 return weakStructureID(structure);
3240 });
3241 return;
3242 }
3243
3244 case CellOrOtherUse: {
3245 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
3246
3247 LBasicBlock cellCase = m_out.newBlock();
3248 LBasicBlock notCellCase = m_out.newBlock();
3249 LBasicBlock continuation = m_out.newBlock();
3250
3251 m_out.branch(
3252 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3253
3254 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3255 checkStructure(
3256 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
3257 exitKind, m_node->structureSet(),
3258 [&] (RegisteredStructure structure) {
3259 return weakStructureID(structure);
3260 });
3261 m_out.jump(continuation);
3262
3263 m_out.appendTo(notCellCase, continuation);
3264 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
3265 m_out.jump(continuation);
3266
3267 m_out.appendTo(continuation, lastNext);
3268 return;
3269 }
3270
3271 default:
3272 DFG_CRASH(m_graph, m_node, "Bad use kind");
3273 return;
3274 }
3275 }
3276
3277 void compileCheckStructureOrEmpty()
3278 {
3279 ExitKind exitKind;
3280 if (m_node->child1()->hasConstant())
3281 exitKind = BadConstantCache;
3282 else
3283 exitKind = BadCache;
3284
3285 LValue cell = lowCell(m_node->child1());
3286 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
3287 LBasicBlock notEmpty;
3288 LBasicBlock continuation;
3289 LBasicBlock lastNext;
3290 if (maySeeEmptyValue) {
3291 notEmpty = m_out.newBlock();
3292 continuation = m_out.newBlock();
3293 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
3294 lastNext = m_out.appendTo(notEmpty, continuation);
3295 }
3296
3297 checkStructure(
3298 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3299 exitKind, m_node->structureSet(),
3300 [&] (RegisteredStructure structure) {
3301 return weakStructureID(structure);
3302 });
3303
3304 if (maySeeEmptyValue) {
3305 m_out.jump(continuation);
3306 m_out.appendTo(continuation, lastNext);
3307 }
3308 }
3309
3310 void compileCheckCell()
3311 {
3312 LValue cell = lowCell(m_node->child1());
3313
3314 speculate(
3315 BadCell, jsValueValue(cell), m_node->child1().node(),
3316 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
3317 }
3318
3319 void compileCheckBadCell()
3320 {
3321 terminate(BadCell);
3322 }
3323
3324 void compileCheckNotEmpty()
3325 {
3326 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
3327 }
3328
3329 void compileAssertNotEmpty()
3330 {
3331 if (!validationEnabled())
3332 return;
3333
3334 LValue val = lowJSValue(m_node->child1());
3335 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3336 patchpoint->appendSomeRegister(val);
3337 patchpoint->setGenerator(
3338 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3339 AllowMacroScratchRegisterUsage allowScratch(jit);
3340 GPRReg input = params[0].gpr();
3341 CCallHelpers::Jump done = jit.branchIfNotEmpty(input);
3342 jit.breakpoint();
3343 done.link(&jit);
3344 });
3345 }
3346
3347 void compileCheckStringIdent()
3348 {
3349 UniquedStringImpl* uid = m_node->uidOperand();
3350 LValue stringImpl = lowStringIdent(m_node->child1());
3351 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
3352 }
3353
3354 void compileGetExecutable()
3355 {
3356 LValue cell = lowCell(m_node->child1());
3357 speculateFunction(m_node->child1(), cell);
3358 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
3359 }
3360
3361 void compileArrayify()
3362 {
3363 LValue cell = lowCell(m_node->child1());
3364 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
3365
3366 LBasicBlock unexpectedStructure = m_out.newBlock();
3367 LBasicBlock continuation = m_out.newBlock();
3368
3369 auto isUnexpectedArray = [&] (LValue cell) {
3370 if (m_node->op() == Arrayify)
3371 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
3372
3373 ASSERT(m_node->op() == ArrayifyToStructure);
3374 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
3375 };
3376
3377 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
3378
3379 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
3380
3381 if (property) {
3382 switch (m_node->arrayMode().type()) {
3383 case Array::Int32:
3384 case Array::Double:
3385 case Array::Contiguous:
3386 speculate(
3387 Uncountable, noValue(), 0,
3388 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
3389 break;
3390 default:
3391 break;
3392 }
3393 }
3394
3395 switch (m_node->arrayMode().type()) {
3396 case Array::Int32:
3397 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
3398 break;
3399 case Array::Double:
3400 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
3401 break;
3402 case Array::Contiguous:
3403 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
3404 break;
3405 case Array::ArrayStorage:
3406 case Array::SlowPutArrayStorage:
3407 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
3408 break;
3409 default:
3410 DFG_CRASH(m_graph, m_node, "Bad array type");
3411 break;
3412 }
3413
3414 speculate(BadIndexingType, jsValueValue(cell), 0, isUnexpectedArray(cell));
3415 m_out.jump(continuation);
3416
3417 m_out.appendTo(continuation, lastNext);
3418 }
3419
3420 void compilePutStructure()
3421 {
3422 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
3423
3424 RegisteredStructure oldStructure = m_node->transition()->previous;
3425 RegisteredStructure newStructure = m_node->transition()->next;
3426 ASSERT_UNUSED(oldStructure, oldStructure->indexingMode() == newStructure->indexingMode());
3427 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
3428 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
3429
3430 LValue cell = lowCell(m_node->child1());
3431 m_out.store32(
3432 weakStructureID(newStructure),
3433 cell, m_heaps.JSCell_structureID);
3434 }
3435
3436 void compileGetById(AccessType type)
3437 {
3438 ASSERT(type == AccessType::Get || type == AccessType::TryGet || type == AccessType::GetDirect);
3439 switch (m_node->child1().useKind()) {
3440 case CellUse: {
3441 setJSValue(getById(lowCell(m_node->child1()), type));
3442 return;
3443 }
3444
3445 case UntypedUse: {
3446 // This is pretty weird, since we duplicate the slow path both here and in the
3447 // code generated by the IC. We should investigate making this less bad.
3448 // https://bugs.webkit.org/show_bug.cgi?id=127830
3449 LValue value = lowJSValue(m_node->child1());
3450
3451 LBasicBlock cellCase = m_out.newBlock();
3452 LBasicBlock notCellCase = m_out.newBlock();
3453 LBasicBlock continuation = m_out.newBlock();
3454
3455 m_out.branch(
3456 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3457
3458 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3459 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
3460 m_out.jump(continuation);
3461
3462 J_JITOperation_EJI getByIdFunction = appropriateGenericGetByIdFunction(type);
3463
3464 m_out.appendTo(notCellCase, continuation);
3465 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3466 Int64, m_out.operation(getByIdFunction),
3467 m_callFrame, value,
3468 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3469 m_out.jump(continuation);
3470
3471 m_out.appendTo(continuation, lastNext);
3472 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3473 return;
3474 }
3475
3476 default:
3477 DFG_CRASH(m_graph, m_node, "Bad use kind");
3478 return;
3479 }
3480 }
3481
3482 void compileGetByIdWithThis()
3483 {
3484 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3485 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3486 else {
3487 LValue base = lowJSValue(m_node->child1());
3488 LValue thisValue = lowJSValue(m_node->child2());
3489
3490 LBasicBlock baseCellCase = m_out.newBlock();
3491 LBasicBlock notCellCase = m_out.newBlock();
3492 LBasicBlock thisValueCellCase = m_out.newBlock();
3493 LBasicBlock continuation = m_out.newBlock();
3494
3495 m_out.branch(
3496 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3497
3498 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3499
3500 m_out.branch(
3501 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3502
3503 m_out.appendTo(thisValueCellCase, notCellCase);
3504 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3505 m_out.jump(continuation);
3506
3507 m_out.appendTo(notCellCase, continuation);
3508 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3509 Int64, m_out.operation(operationGetByIdWithThisGeneric),
3510 m_callFrame, base, thisValue,
3511 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3512 m_out.jump(continuation);
3513
3514 m_out.appendTo(continuation, lastNext);
3515 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3516 }
3517
3518 }
3519
3520 void compileGetByValWithThis()
3521 {
3522 LValue base = lowJSValue(m_node->child1());
3523 LValue thisValue = lowJSValue(m_node->child2());
3524 LValue subscript = lowJSValue(m_node->child3());
3525
3526 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
3527 setJSValue(result);
3528 }
3529
3530 void compilePutByIdWithThis()
3531 {
3532 LValue base = lowJSValue(m_node->child1());
3533 LValue thisValue = lowJSValue(m_node->child2());
3534 LValue value = lowJSValue(m_node->child3());
3535
3536 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
3537 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
3538 }
3539
3540 void compilePutByValWithThis()
3541 {
3542 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
3543 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
3544 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
3545 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
3546
3547 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
3548 m_callFrame, base, thisValue, property, value);
3549 }
3550
3551 void compileAtomicsReadModifyWrite()
3552 {
3553 TypedArrayType type = m_node->arrayMode().typedArrayType();
3554 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
3555 Edge baseEdge = m_graph.child(m_node, 0);
3556 Edge indexEdge = m_graph.child(m_node, 1);
3557 Edge argEdges[maxNumExtraAtomicsArgs];
3558 for (unsigned i = numExtraArgs; i--;)
3559 argEdges[i] = m_graph.child(m_node, 2 + i);
3560 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
3561
3562 auto operation = [&] () -> LValue {
3563 switch (m_node->op()) {
3564 case AtomicsAdd:
3565 return m_out.operation(operationAtomicsAdd);
3566 case AtomicsAnd:
3567 return m_out.operation(operationAtomicsAnd);
3568 case AtomicsCompareExchange:
3569 return m_out.operation(operationAtomicsCompareExchange);
3570 case AtomicsExchange:
3571 return m_out.operation(operationAtomicsExchange);
3572 case AtomicsLoad:
3573 return m_out.operation(operationAtomicsLoad);
3574 case AtomicsOr:
3575 return m_out.operation(operationAtomicsOr);
3576 case AtomicsStore:
3577 return m_out.operation(operationAtomicsStore);
3578 case AtomicsSub:
3579 return m_out.operation(operationAtomicsSub);
3580 case AtomicsXor:
3581 return m_out.operation(operationAtomicsXor);
3582 default:
3583 RELEASE_ASSERT_NOT_REACHED();
3584 break;
3585 }
3586 };
3587
3588 if (!storageEdge) {
3589 Vector<LValue> args;
3590 args.append(m_callFrame);
3591 args.append(lowJSValue(baseEdge));
3592 args.append(lowJSValue(indexEdge));
3593 for (unsigned i = 0; i < numExtraArgs; ++i)
3594 args.append(lowJSValue(argEdges[i]));
3595 LValue result = vmCall(Int64, operation(), args);
3596 setJSValue(result);
3597 return;
3598 }
3599
3600 LValue index = lowInt32(indexEdge);
3601 LValue args[2];
3602 for (unsigned i = numExtraArgs; i--;)
3603 args[i] = getIntTypedArrayStoreOperand(argEdges[i]);
3604 LValue storage = lowStorage(storageEdge);
3605
3606 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
3607 Width width = widthForBytes(elementSize(type));
3608
3609 LValue atomicValue;
3610 LValue result;
3611
3612 auto sanitizeResult = [&] (LValue value) -> LValue {
3613 if (isSigned(type)) {
3614 switch (elementSize(type)) {
3615 case 1:
3616 value = m_out.bitAnd(value, m_out.constInt32(0xff));
3617 break;
3618 case 2:
3619 value = m_out.bitAnd(value, m_out.constInt32(0xffff));
3620 break;
3621 case 4:
3622 break;
3623 default:
3624 RELEASE_ASSERT_NOT_REACHED();
3625 break;
3626 }
3627 }
3628 return value;
3629 };
3630
3631 switch (m_node->op()) {
3632 case AtomicsAdd:
3633 atomicValue = m_out.atomicXchgAdd(args[0], pointer, width);
3634 result = sanitizeResult(atomicValue);
3635 break;
3636 case AtomicsAnd:
3637 atomicValue = m_out.atomicXchgAnd(args[0], pointer, width);
3638 result = sanitizeResult(atomicValue);
3639 break;
3640 case AtomicsCompareExchange:
3641 atomicValue = m_out.atomicStrongCAS(args[0], args[1], pointer, width);
3642 result = sanitizeResult(atomicValue);
3643 break;
3644 case AtomicsExchange:
3645 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3646 result = sanitizeResult(atomicValue);
3647 break;
3648 case AtomicsLoad:
3649 atomicValue = m_out.atomicXchgAdd(m_out.int32Zero, pointer, width);
3650 result = sanitizeResult(atomicValue);
3651 break;
3652 case AtomicsOr:
3653 atomicValue = m_out.atomicXchgOr(args[0], pointer, width);
3654 result = sanitizeResult(atomicValue);
3655 break;
3656 case AtomicsStore:
3657 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3658 result = args[0];
3659 break;
3660 case AtomicsSub:
3661 atomicValue = m_out.atomicXchgSub(args[0], pointer, width);
3662 result = sanitizeResult(atomicValue);
3663 break;
3664 case AtomicsXor:
3665 atomicValue = m_out.atomicXchgXor(args[0], pointer, width);
3666 result = sanitizeResult(atomicValue);
3667 break;
3668 default:
3669 RELEASE_ASSERT_NOT_REACHED();
3670 break;
3671 }
3672 // Signify that the state against which the atomic operations are serialized is confined to just
3673 // the typed array storage, since that's as precise of an abstraction as we can have of shared
3674 // array buffer storage.
3675 m_heaps.decorateFencedAccess(&m_heaps.typedArrayProperties, atomicValue);
3676
3677 setIntTypedArrayLoadResult(result, type);
3678 }
3679
3680 void compileAtomicsIsLockFree()
3681 {
3682 if (m_node->child1().useKind() != Int32Use) {
3683 setJSValue(vmCall(Int64, m_out.operation(operationAtomicsIsLockFree), m_callFrame, lowJSValue(m_node->child1())));
3684 return;
3685 }
3686
3687 LValue bytes = lowInt32(m_node->child1());
3688
3689 LBasicBlock trueCase = m_out.newBlock();
3690 LBasicBlock falseCase = m_out.newBlock();
3691 LBasicBlock continuation = m_out.newBlock();
3692
3693 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueCase);
3694
3695 Vector<SwitchCase> cases;
3696 cases.append(SwitchCase(m_out.constInt32(1), trueCase, Weight()));
3697 cases.append(SwitchCase(m_out.constInt32(2), trueCase, Weight()));
3698 cases.append(SwitchCase(m_out.constInt32(4), trueCase, Weight()));
3699 m_out.switchInstruction(bytes, cases, falseCase, Weight());
3700
3701 m_out.appendTo(trueCase, falseCase);
3702 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
3703 m_out.jump(continuation);
3704 m_out.appendTo(falseCase, continuation);
3705 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
3706 m_out.jump(continuation);
3707
3708 m_out.appendTo(continuation, lastNext);
3709 setBoolean(m_out.phi(Int32, trueValue, falseValue));
3710 }
3711
3712 void compileDefineDataProperty()
3713 {
3714 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3715 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
3716 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 3));
3717 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3718 switch (propertyEdge.useKind()) {
3719 case StringUse: {
3720 LValue property = lowString(propertyEdge);
3721 vmCall(Void, m_out.operation(operationDefineDataPropertyString), m_callFrame, base, property, value, attributes);
3722 break;
3723 }
3724 case StringIdentUse: {
3725 LValue property = lowStringIdent(propertyEdge);
3726 vmCall(Void, m_out.operation(operationDefineDataPropertyStringIdent), m_callFrame, base, property, value, attributes);
3727 break;
3728 }
3729 case SymbolUse: {
3730 LValue property = lowSymbol(propertyEdge);
3731 vmCall(Void, m_out.operation(operationDefineDataPropertySymbol), m_callFrame, base, property, value, attributes);
3732 break;
3733 }
3734 case UntypedUse: {
3735 LValue property = lowJSValue(propertyEdge);
3736 vmCall(Void, m_out.operation(operationDefineDataProperty), m_callFrame, base, property, value, attributes);
3737 break;
3738 }
3739 default:
3740 RELEASE_ASSERT_NOT_REACHED();
3741 }
3742 }
3743
3744 void compileDefineAccessorProperty()
3745 {
3746 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3747 LValue getter = lowCell(m_graph.varArgChild(m_node, 2));
3748 LValue setter = lowCell(m_graph.varArgChild(m_node, 3));
3749 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 4));
3750 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3751 switch (propertyEdge.useKind()) {
3752 case StringUse: {
3753 LValue property = lowString(propertyEdge);
3754 vmCall(Void, m_out.operation(operationDefineAccessorPropertyString), m_callFrame, base, property, getter, setter, attributes);
3755 break;
3756 }
3757 case StringIdentUse: {
3758 LValue property = lowStringIdent(propertyEdge);
3759 vmCall(Void, m_out.operation(operationDefineAccessorPropertyStringIdent), m_callFrame, base, property, getter, setter, attributes);
3760 break;
3761 }
3762 case SymbolUse: {
3763 LValue property = lowSymbol(propertyEdge);
3764 vmCall(Void, m_out.operation(operationDefineAccessorPropertySymbol), m_callFrame, base, property, getter, setter, attributes);
3765 break;
3766 }
3767 case UntypedUse: {
3768 LValue property = lowJSValue(propertyEdge);
3769 vmCall(Void, m_out.operation(operationDefineAccessorProperty), m_callFrame, base, property, getter, setter, attributes);
3770 break;
3771 }
3772 default:
3773 RELEASE_ASSERT_NOT_REACHED();
3774 }
3775 }
3776
3777 void compilePutById()
3778 {
3779 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse, m_node->child1().useKind());
3780
3781 Node* node = m_node;
3782 LValue base = lowCell(node->child1());
3783 LValue value = lowJSValue(node->child2());
3784 auto uid = m_graph.identifiers()[node->identifierNumber()];
3785
3786 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3787 patchpoint->appendSomeRegister(base);
3788 patchpoint->appendSomeRegister(value);
3789 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
3790 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
3791 patchpoint->clobber(RegisterSet::macroScratchRegisters());
3792
3793 // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
3794 // https://bugs.webkit.org/show_bug.cgi?id=152848
3795
3796 RefPtr<PatchpointExceptionHandle> exceptionHandle =
3797 preparePatchpointForExceptions(patchpoint);
3798
3799 State* state = &m_ftlState;
3800 ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode();
3801
3802 patchpoint->setGenerator(
3803 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3804 AllowMacroScratchRegisterUsage allowScratch(jit);
3805
3806 CallSiteIndex callSiteIndex =
3807 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
3808
3809 Box<CCallHelpers::JumpList> exceptions =
3810 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
3811
3812 // JS setter call ICs generated by the PutById IC will need this.
3813 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
3814
3815 auto generator = Box<JITPutByIdGenerator>::create(
3816 jit.codeBlock(), node->origin.semantic, callSiteIndex,
3817 params.unavailableRegisters(), JSValueRegs(params[0].gpr()),
3818 JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode,
3819 node->op() == PutByIdDirect ? Direct : NotDirect);
3820
3821 generator->generateFastPath(jit);
3822 CCallHelpers::Label done = jit.label();
3823
3824 params.addLatePath(
3825 [=] (CCallHelpers& jit) {
3826 AllowMacroScratchRegisterUsage allowScratch(jit);
3827
3828 generator->slowPathJump().link(&jit);
3829 CCallHelpers::Label slowPathBegin = jit.label();
3830 CCallHelpers::Call slowPathCall = callOperation(
3831 *state, params.unavailableRegisters(), jit, node->origin.semantic,
3832 exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
3833 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
3834 params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
3835 jit.jump().linkTo(done, &jit);
3836
3837 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
3838
3839 jit.addLinkTask(
3840 [=] (LinkBuffer& linkBuffer) {
3841 generator->finalize(linkBuffer, linkBuffer);
3842 });
3843 });
3844 });
3845 }
3846
3847 void compileGetButterfly()
3848 {
3849 LValue butterfly = m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly);
3850 setStorage(butterfly);
3851 }
3852
3853 void compileConstantStoragePointer()
3854 {
3855 setStorage(m_out.constIntPtr(m_node->storagePointer()));
3856 }
3857
3858 void compileGetIndexedPropertyStorage()
3859 {
3860 LValue cell = lowCell(m_node->child1());
3861
3862 if (m_node->arrayMode().type() == Array::String) {
3863 LBasicBlock slowPath = m_out.newBlock();
3864 LBasicBlock continuation = m_out.newBlock();
3865
3866 LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
3867 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
3868
3869 m_out.branch(isRopeString(cell, m_node->child1()), rarely(slowPath), usually(continuation));
3870
3871 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
3872
3873 ValueFromBlock slowResult = m_out.anchor(
3874 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, cell));
3875
3876 m_out.jump(continuation);
3877
3878 m_out.appendTo(continuation, lastNext);
3879
3880 setStorage(m_out.loadPtr(m_out.phi(pointerType(), fastResult, slowResult), m_heaps.StringImpl_data));
3881 return;
3882 }
3883
3884 DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()), m_node->arrayMode().typedArrayType());
3885 LValue vector = m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector);
3886 setStorage(caged(Gigacage::Primitive, vector, cell));
3887 }
3888
3889 void compileCheckArray()
3890 {
3891 Edge edge = m_node->child1();
3892 LValue cell = lowCell(edge);
3893
3894 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
3895 return;
3896
3897 speculate(
3898 BadIndexingType, jsValueValue(cell), 0,
3899 m_out.logicalNot(isArrayTypeForCheckArray(cell, m_node->arrayMode())));
3900 }
3901
3902 void compileGetTypedArrayByteOffset()
3903 {
3904 LValue basePtr = lowCell(m_node->child1());
3905
3906 LBasicBlock simpleCase = m_out.newBlock();
3907 LBasicBlock wastefulCase = m_out.newBlock();
3908 LBasicBlock notNull = m_out.newBlock();
3909 LBasicBlock continuation = m_out.newBlock();
3910
3911 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
3912 m_out.branch(
3913 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
3914 unsure(simpleCase), unsure(wastefulCase));
3915
3916 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
3917
3918 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
3919
3920 m_out.jump(continuation);
3921
3922 m_out.appendTo(wastefulCase, notNull);
3923
3924 LValue vector = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
3925 ValueFromBlock nullVectorOut = m_out.anchor(vector);
3926 m_out.branch(vector, unsure(notNull), unsure(continuation));
3927
3928 m_out.appendTo(notNull, continuation);
3929
3930 LValue butterflyPtr = caged(Gigacage::JSValue, m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly), basePtr);
3931 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
3932
3933 LValue vectorPtr = caged(Gigacage::Primitive, vector, basePtr);
3934
3935 // FIXME: This needs caging.
3936 // https://bugs.webkit.org/show_bug.cgi?id=175515
3937 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
3938 dataPtr = removeArrayPtrTag(dataPtr);
3939
3940 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
3941
3942 m_out.jump(continuation);
3943 m_out.appendTo(continuation, lastNext);
3944
3945 setInt32(m_out.castToInt32(m_out.phi(pointerType(), simpleOut, nullVectorOut, wastefulOut)));
3946 }
3947
3948 void compileGetPrototypeOf()
3949 {
3950 switch (m_node->child1().useKind()) {
3951 case ArrayUse:
3952 case FunctionUse:
3953 case FinalObjectUse: {
3954 LValue object = lowCell(m_node->child1());
3955 switch (m_node->child1().useKind()) {
3956 case ArrayUse:
3957 speculateArray(m_node->child1(), object);
3958 break;
3959 case FunctionUse:
3960 speculateFunction(m_node->child1(), object);
3961 break;
3962 case FinalObjectUse:
3963 speculateFinalObject(m_node->child1(), object);
3964 break;
3965 default:
3966 RELEASE_ASSERT_NOT_REACHED();
3967 break;
3968 }
3969
3970 LValue structure = loadStructure(object);
3971
3972 AbstractValue& value = m_state.forNode(m_node->child1());
3973 if ((value.m_type && !(value.m_type & ~SpecObject)) && value.m_structure.isFinite()) {
3974 bool hasPolyProto = false;
3975 bool hasMonoProto = false;
3976 value.m_structure.forEach([&] (RegisteredStructure structure) {
3977 if (structure->hasPolyProto())
3978 hasPolyProto = true;
3979 else
3980 hasMonoProto = true;
3981 });
3982
3983 if (hasMonoProto && !hasPolyProto) {
3984 setJSValue(m_out.load64(structure, m_heaps.Structure_prototype));
3985 return;
3986 }
3987
3988 if (hasPolyProto && !hasMonoProto) {
3989 setJSValue(m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
3990 return;
3991 }
3992 }
3993
3994 LBasicBlock continuation = m_out.newBlock();
3995 LBasicBlock loadPolyProto = m_out.newBlock();
3996
3997 LValue prototypeBits = m_out.load64(structure, m_heaps.Structure_prototype);
3998 ValueFromBlock directPrototype = m_out.anchor(prototypeBits);
3999 m_out.branch(m_out.isZero64(prototypeBits), unsure(loadPolyProto), unsure(continuation));
4000
4001 LBasicBlock lastNext = m_out.appendTo(loadPolyProto, continuation);
4002 ValueFromBlock polyProto = m_out.anchor(
4003 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
4004 m_out.jump(continuation);
4005
4006 m_out.appendTo(continuation, lastNext);
4007 setJSValue(m_out.phi(Int64, directPrototype, polyProto));
4008 return;
4009 }
4010 case ObjectUse: {
4011 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOfObject), m_callFrame, lowObject(m_node->child1())));
4012 return;
4013 }
4014 default: {
4015 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOf), m_callFrame, lowJSValue(m_node->child1())));
4016 return;
4017 }
4018 }
4019 }
4020
4021 void compileGetArrayLength()
4022 {
4023 switch (m_node->arrayMode().type()) {
4024 case Array::Undecided:
4025 case Array::Int32:
4026 case Array::Double:
4027 case Array::Contiguous: {
4028 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
4029 return;
4030 }
4031
4032 case Array::ArrayStorage:
4033 case Array::SlowPutArrayStorage: {
4034 LValue length = m_out.load32(lowStorage(m_node->child2()), m_heaps.ArrayStorage_publicLength);
4035 speculate(Uncountable, noValue(), nullptr, m_out.lessThan(length, m_out.int32Zero));
4036 setInt32(length);
4037 return;
4038 }
4039
4040 case Array::String: {
4041 LValue string = lowCell(m_node->child1());
4042
4043 LBasicBlock ropePath = m_out.newBlock();
4044 LBasicBlock nonRopePath = m_out.newBlock();
4045 LBasicBlock continuation = m_out.newBlock();
4046
4047 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropePath), usually(nonRopePath));
4048
4049 LBasicBlock lastNext = m_out.appendTo(ropePath, nonRopePath);
4050 ValueFromBlock ropeLength = m_out.anchor(m_out.load32NonNegative(string, m_heaps.JSRopeString_length));
4051 m_out.jump(continuation);
4052
4053 m_out.appendTo(nonRopePath, continuation);
4054 ValueFromBlock nonRopeLength = m_out.anchor(m_out.load32NonNegative(m_out.loadPtr(string, m_heaps.JSString_value), m_heaps.StringImpl_length));
4055 m_out.jump(continuation);
4056
4057 m_out.appendTo(continuation, lastNext);
4058 setInt32(m_out.phi(Int32, ropeLength, nonRopeLength));
4059 return;
4060 }
4061
4062 case Array::DirectArguments: {
4063 LValue arguments = lowCell(m_node->child1());
4064 speculate(
4065 ExoticObjectMode, noValue(), nullptr,
4066 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_mappedArguments)));
4067 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
4068 return;
4069 }
4070
4071 case Array::ScopedArguments: {
4072 LValue arguments = lowCell(m_node->child1());
4073 LValue storage = m_out.loadPtr(arguments, m_heaps.ScopedArguments_storage);
4074 speculate(
4075 ExoticObjectMode, noValue(), nullptr,
4076 m_out.notZero32(m_out.load8ZeroExt32(storage, m_heaps.ScopedArguments_Storage_overrodeThings)));
4077 setInt32(m_out.load32NonNegative(storage, m_heaps.ScopedArguments_Storage_totalLength));
4078 return;
4079 }
4080
4081 default:
4082 if (m_node->arrayMode().isSomeTypedArrayView()) {
4083 setInt32(
4084 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
4085 return;
4086 }
4087
4088 DFG_CRASH(m_graph, m_node, "Bad array type");
4089 return;
4090 }
4091 }
4092
4093 void compileGetVectorLength()
4094 {
4095 switch (m_node->arrayMode().type()) {
4096 case Array::ArrayStorage:
4097 case Array::SlowPutArrayStorage:
4098 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.ArrayStorage_vectorLength));
4099 return;
4100 default:
4101 return;
4102 }
4103 }
4104
4105 void compileCheckInBounds()
4106 {
4107 speculate(
4108 OutOfBounds, noValue(), 0,
4109 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
4110
4111 // Even though we claim to have JSValue result, no user of us should
4112 // depend on our value. Users of this node just need to maintain that
4113 // we dominate them.
4114 }
4115
4116 void compileGetByVal()
4117 {
4118 switch (m_node->arrayMode().type()) {
4119 case Array::Int32:
4120 case Array::Contiguous: {
4121 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4122 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4123
4124 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
4125 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
4126
4127 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4128
4129 if (m_node->arrayMode().isInBounds()) {
4130 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4131 LValue isHole = m_out.isZero64(result);
4132 if (m_node->arrayMode().isSaneChain()) {
4133 DFG_ASSERT(
4134 m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous, m_node->arrayMode().type());
4135 result = m_out.select(
4136 isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
4137 } else
4138 speculate(LoadFromHole, noValue(), 0, isHole);
4139 setJSValue(result);
4140 return;
4141 }
4142
4143 LBasicBlock fastCase = m_out.newBlock();
4144 LBasicBlock slowCase = m_out.newBlock();
4145 LBasicBlock continuation = m_out.newBlock();
4146
4147 m_out.branch(
4148 m_out.aboveOrEqual(
4149 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
4150 rarely(slowCase), usually(fastCase));
4151
4152 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
4153
4154 LValue fastResultValue = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4155 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
4156 m_out.branch(
4157 m_out.isZero64(fastResultValue), rarely(slowCase), usually(continuation));
4158
4159 m_out.appendTo(slowCase, continuation);
4160 ValueFromBlock slowResult = m_out.anchor(
4161 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4162 m_out.jump(continuation);
4163
4164 m_out.appendTo(continuation, lastNext);
4165 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4166 return;
4167 }
4168
4169 case Array::Double: {
4170 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4171 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4172 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4173
4174 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
4175
4176 if (m_node->arrayMode().isInBounds()) {
4177 LValue result = m_out.loadDouble(
4178 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4179
4180 if (!m_node->arrayMode().isSaneChain()) {
4181 speculate(
4182 LoadFromHole, noValue(), 0,
4183 m_out.doubleNotEqualOrUnordered(result, result));
4184 }
4185 setDouble(result);
4186 break;
4187 }
4188
4189 LBasicBlock inBounds = m_out.newBlock();
4190 LBasicBlock boxPath = m_out.newBlock();
4191 LBasicBlock slowCase = m_out.newBlock();
4192 LBasicBlock continuation = m_out.newBlock();
4193
4194 m_out.branch(
4195 m_out.aboveOrEqual(
4196 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
4197 rarely(slowCase), usually(inBounds));
4198
4199 LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
4200 LValue doubleValue = m_out.loadDouble(
4201 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4202 m_out.branch(
4203 m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
4204 rarely(slowCase), usually(boxPath));
4205
4206 m_out.appendTo(boxPath, slowCase);
4207 ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
4208 m_out.jump(continuation);
4209
4210 m_out.appendTo(slowCase, continuation);
4211 ValueFromBlock slowResult = m_out.anchor(
4212 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4213 m_out.jump(continuation);
4214
4215 m_out.appendTo(continuation, lastNext);
4216 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4217 return;
4218 }
4219
4220 case Array::Undecided: {
4221 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4222
4223 speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero));
4224 setJSValue(m_out.constInt64(ValueUndefined));
4225 return;
4226 }
4227
4228 case Array::DirectArguments: {
4229 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4230 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4231
4232 speculate(
4233 ExoticObjectMode, noValue(), nullptr,
4234 m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_mappedArguments)));
4235
4236 LValue length = m_out.load32NonNegative(base, m_heaps.DirectArguments_length);
4237 auto isOutOfBounds = m_out.aboveOrEqual(index, length);
4238 if (m_node->arrayMode().isInBounds()) {
4239 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
4240 TypedPointer address = m_out.baseIndex(
4241 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
4242 setJSValue(m_out.load64(address));
4243 return;
4244 }
4245
4246 LBasicBlock inBounds = m_out.newBlock();
4247 LBasicBlock slowCase = m_out.newBlock();
4248 LBasicBlock continuation = m_out.newBlock();
4249
4250 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBounds));
4251
4252 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
4253 TypedPointer address = m_out.baseIndex(
4254 m_heaps.DirectArguments_storage,
4255 base,
4256 m_out.zeroExt(index, pointerType()));
4257 ValueFromBlock fastResult = m_out.anchor(m_out.load64(address));
4258 m_out.jump(continuation);
4259
4260 m_out.appendTo(slowCase, continuation);
4261 ValueFromBlock slowResult = m_out.anchor(
4262 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4263 m_out.jump(continuation);
4264
4265 m_out.appendTo(continuation, lastNext);
4266 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4267 return;
4268 }
4269
4270 case Array::ScopedArguments: {
4271 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4272 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4273
4274 LValue storage = m_out.loadPtr(base, m_heaps.ScopedArguments_storage);
4275 LValue totalLength = m_out.load32NonNegative(
4276 storage, m_heaps.ScopedArguments_Storage_totalLength);
4277 speculate(
4278 ExoticObjectMode, noValue(), nullptr,
4279 m_out.aboveOrEqual(index, totalLength));
4280
4281 LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
4282 LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
4283
4284 LBasicBlock namedCase = m_out.newBlock();
4285 LBasicBlock overflowCase = m_out.newBlock();
4286 LBasicBlock continuation = m_out.newBlock();
4287
4288 m_out.branch(
4289 m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
4290
4291 LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
4292
4293 LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
4294 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
4295
4296 TypedPointer address = m_out.baseIndex(
4297 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
4298 LValue scopeOffset = m_out.load32(address);
4299
4300 speculate(
4301 ExoticObjectMode, noValue(), nullptr,
4302 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
4303
4304 address = m_out.baseIndex(
4305 m_heaps.JSLexicalEnvironment_variables, scope, m_out.zeroExtPtr(scopeOffset));
4306 ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
4307 m_out.jump(continuation);
4308
4309 m_out.appendTo(overflowCase, continuation);
4310
4311 address = m_out.baseIndex(
4312 m_heaps.ScopedArguments_Storage_storage, storage,
4313 m_out.zeroExtPtr(m_out.sub(index, namedLength)));
4314 LValue overflowValue = m_out.load64(address);
4315 speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
4316 ValueFromBlock overflowResult = m_out.anchor(overflowValue);
4317 m_out.jump(continuation);
4318
4319 m_out.appendTo(continuation, lastNext);
4320
4321 LValue result = m_out.phi(Int64, namedResult, overflowResult);
4322 result = preciseIndexMask32(result, index, totalLength);
4323
4324 setJSValue(result);
4325 return;
4326 }
4327
4328 case Array::Generic: {
4329 if (m_graph.varArgChild(m_node, 0).useKind() == ObjectUse) {
4330 if (m_graph.varArgChild(m_node, 1).useKind() == StringUse) {
4331 setJSValue(vmCall(
4332 Int64, m_out.operation(operationGetByValObjectString), m_callFrame,
4333 lowObject(m_graph.varArgChild(m_node, 0)), lowString(m_graph.varArgChild(m_node, 1))));
4334 return;
4335 }
4336
4337 if (m_graph.varArgChild(m_node, 1).useKind() == SymbolUse) {
4338 setJSValue(vmCall(
4339 Int64, m_out.operation(operationGetByValObjectSymbol), m_callFrame,
4340 lowObject(m_graph.varArgChild(m_node, 0)), lowSymbol(m_graph.varArgChild(m_node, 1))));
4341 return;
4342 }
4343 }
4344 setJSValue(vmCall(
4345 Int64, m_out.operation(operationGetByVal), m_callFrame,
4346 lowJSValue(m_graph.varArgChild(m_node, 0)), lowJSValue(m_graph.varArgChild(m_node, 1))));
4347 return;
4348 }
4349
4350 case Array::ArrayStorage:
4351 case Array::SlowPutArrayStorage: {
4352 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4353 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4354 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4355
4356 IndexedAbstractHeap& heap = m_heaps.ArrayStorage_vector;
4357
4358 if (m_node->arrayMode().isInBounds()) {
4359 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4360 speculate(LoadFromHole, noValue(), 0, m_out.isZero64(result));
4361 setJSValue(result);
4362 break;
4363 }
4364
4365 LBasicBlock inBounds = m_out.newBlock();
4366 LBasicBlock slowCase = m_out.newBlock();
4367 LBasicBlock continuation = m_out.newBlock();
4368
4369 m_out.branch(
4370 m_out.aboveOrEqual(index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
4371 rarely(slowCase), usually(inBounds));
4372
4373 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
4374 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4375 ValueFromBlock fastResult = m_out.anchor(result);
4376 m_out.branch(
4377 m_out.isZero64(result),
4378 rarely(slowCase), usually(continuation));
4379
4380 m_out.appendTo(slowCase, continuation);
4381 ValueFromBlock slowResult = m_out.anchor(
4382 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4383 m_out.jump(continuation);
4384
4385 m_out.appendTo(continuation, lastNext);
4386 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4387 return;
4388 }
4389
4390 case Array::String: {
4391 compileStringCharAt();
4392 return;
4393 }
4394
4395 case Array::Int8Array:
4396 case Array::Int16Array:
4397 case Array::Int32Array:
4398 case Array::Uint8Array:
4399 case Array::Uint8ClampedArray:
4400 case Array::Uint16Array:
4401 case Array::Uint32Array:
4402 case Array::Float32Array:
4403 case Array::Float64Array: {
4404 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4405 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4406
4407 TypedArrayType type = m_node->arrayMode().typedArrayType();
4408 ASSERT(isTypedView(type));
4409 {
4410 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
4411
4412 if (isInt(type)) {
4413 LValue result = loadFromIntTypedArray(pointer, type);
4414 bool canSpeculate = true;
4415 setIntTypedArrayLoadResult(result, type, canSpeculate);
4416 return;
4417 }
4418
4419 ASSERT(isFloat(type));
4420
4421 LValue result;
4422 switch (type) {
4423 case TypeFloat32:
4424 result = m_out.floatToDouble(m_out.loadFloat(pointer));
4425 break;
4426 case TypeFloat64:
4427 result = m_out.loadDouble(pointer);
4428 break;
4429 default:
4430 DFG_CRASH(m_graph, m_node, "Bad typed array type");
4431 }
4432
4433 setDouble(result);
4434 return;
4435 }
4436 }
4437
4438 case Array::AnyTypedArray:
4439 case Array::ForceExit:
4440 case Array::SelectUsingArguments:
4441 case Array::SelectUsingPredictions:
4442 case Array::Unprofiled:
4443 DFG_CRASH(m_graph, m_node, "Bad array type");
4444 return;
4445 }
4446 }
4447
4448 void compileGetMyArgumentByVal()
4449 {
4450 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
4451
4452 LValue originalIndex = lowInt32(m_node->child2());
4453
4454 LValue numberOfArgsIncludingThis;
4455 if (inlineCallFrame && !inlineCallFrame->isVarargs())
4456 numberOfArgsIncludingThis = m_out.constInt32(inlineCallFrame->argumentCountIncludingThis);
4457 else {
4458 VirtualRegister argumentCountRegister = AssemblyHelpers::argumentCount(inlineCallFrame);
4459 numberOfArgsIncludingThis = m_out.load32(payloadFor(argumentCountRegister));
4460 }
4461
4462 LValue numberOfArgs = m_out.sub(numberOfArgsIncludingThis, m_out.int32One);
4463 LValue indexToCheck = originalIndex;
4464 LValue numberOfArgumentsToSkip = m_out.int32Zero;
4465 if (m_node->numberOfArgumentsToSkip()) {
4466 numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
4467 CheckValue* check = m_out.speculateAdd(indexToCheck, numberOfArgumentsToSkip);
4468 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
4469 indexToCheck = check;
4470 }
4471
4472 LValue isOutOfBounds = m_out.bitOr(m_out.aboveOrEqual(indexToCheck, numberOfArgs), m_out.below(indexToCheck, numberOfArgumentsToSkip));
4473 LBasicBlock continuation = nullptr;
4474 LBasicBlock lastNext = nullptr;
4475 ValueFromBlock slowResult;
4476 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
4477 LBasicBlock normalCase = m_out.newBlock();
4478 continuation = m_out.newBlock();
4479
4480 slowResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())));
4481 m_out.branch(isOutOfBounds, unsure(continuation), unsure(normalCase));
4482
4483 lastNext = m_out.appendTo(normalCase, continuation);
4484 } else
4485 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
4486
4487 LValue index = m_out.add(indexToCheck, m_out.int32One);
4488
4489 TypedPointer base;
4490 if (inlineCallFrame) {
4491 if (inlineCallFrame->argumentCountIncludingThis > 1)
4492 base = addressFor(inlineCallFrame->argumentsWithFixup[0].virtualRegister());
4493 } else
4494 base = addressFor(virtualRegisterForArgument(0));
4495
4496 LValue result;
4497 if (base) {
4498 LValue pointer = m_out.baseIndex(
4499 base.value(), m_out.zeroExt(index, pointerType()), ScaleEight);
4500 result = m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer));
4501 result = preciseIndexMask32(result, indexToCheck, numberOfArgs);
4502 } else
4503 result = m_out.constInt64(JSValue::encode(jsUndefined()));
4504
4505 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
4506 ValueFromBlock normalResult = m_out.anchor(result);
4507 m_out.jump(continuation);
4508
4509 m_out.appendTo(continuation, lastNext);
4510 result = m_out.phi(Int64, slowResult, normalResult);
4511 }
4512
4513 setJSValue(result);
4514 }
4515
4516 void compilePutByVal()
4517 {
4518 Edge child1 = m_graph.varArgChild(m_node, 0);
4519 Edge child2 = m_graph.varArgChild(m_node, 1);
4520 Edge child3 = m_graph.varArgChild(m_node, 2);
4521 Edge child4 = m_graph.varArgChild(m_node, 3);
4522 Edge child5 = m_graph.varArgChild(m_node, 4);
4523
4524 ArrayMode arrayMode = m_node->arrayMode().modeForPut();
4525 switch (arrayMode.type()) {
4526 case Array::Generic: {
4527 if (child1.useKind() == CellUse) {
4528 V_JITOperation_ECCJ operation = nullptr;
4529 if (child2.useKind() == StringUse) {
4530 if (m_node->op() == PutByValDirect) {
4531 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4532 operation = operationPutByValDirectCellStringStrict;
4533 else
4534 operation = operationPutByValDirectCellStringNonStrict;
4535 } else {
4536 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4537 operation = operationPutByValCellStringStrict;
4538 else
4539 operation = operationPutByValCellStringNonStrict;
4540 }
4541 vmCall(Void, m_out.operation(operation), m_callFrame, lowCell(child1), lowString(child2), lowJSValue(child3));
4542 return;
4543 }
4544
4545 if (child2.useKind() == SymbolUse) {
4546 if (m_node->op() == PutByValDirect) {
4547 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4548 operation = operationPutByValDirectCellSymbolStrict;
4549 else
4550 operation = operationPutByValDirectCellSymbolNonStrict;
4551 } else {
4552 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4553 operation = operationPutByValCellSymbolStrict;
4554 else
4555 operation = operationPutByValCellSymbolNonStrict;
4556 }
4557 vmCall(Void, m_out.operation(operation), m_callFrame, lowCell(child1), lowSymbol(child2), lowJSValue(child3));
4558 return;
4559 }
4560 }
4561
4562 V_JITOperation_EJJJ operation;
4563 if (m_node->op() == PutByValDirect) {
4564 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4565 operation = operationPutByValDirectStrict;
4566 else
4567 operation = operationPutByValDirectNonStrict;
4568 } else {
4569 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4570 operation = operationPutByValStrict;
4571 else
4572 operation = operationPutByValNonStrict;
4573 }
4574
4575 vmCall(
4576 Void, m_out.operation(operation), m_callFrame,
4577 lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
4578 return;
4579 }
4580
4581 default:
4582 break;
4583 }
4584
4585 LValue base = lowCell(child1);
4586 LValue index = lowInt32(child2);
4587 LValue storage = lowStorage(child4);
4588
4589 switch (arrayMode.type()) {
4590 case Array::Int32:
4591 case Array::Double:
4592 case Array::Contiguous: {
4593 LBasicBlock continuation = m_out.newBlock();
4594 LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
4595
4596 switch (arrayMode.type()) {
4597 case Array::Int32:
4598 case Array::Contiguous: {
4599 LValue value = lowJSValue(child3, ManualOperandSpeculation);
4600
4601 if (arrayMode.type() == Array::Int32)
4602 FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32Only, isNotInt32(value));
4603
4604 TypedPointer elementPointer = m_out.baseIndex(
4605 arrayMode.type() == Array::Int32 ?
4606 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
4607 storage, m_out.zeroExtPtr(index), provenValue(child2));
4608
4609 if (m_node->op() == PutByValAlias) {
4610 m_out.store64(value, elementPointer);
4611 break;
4612 }
4613
4614 contiguousPutByValOutOfBounds(
4615 m_graph.isStrictModeFor(m_node->origin.semantic)
4616 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
4617 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict),
4618 base, storage, index, value, continuation);
4619
4620 m_out.store64(value, elementPointer);
4621 break;
4622 }
4623
4624 case Array::Double: {
4625 LValue value = lowDouble(child3);
4626
4627 FTL_TYPE_CHECK(
4628 doubleValue(value), child3, SpecDoubleReal,
4629 m_out.doubleNotEqualOrUnordered(value, value));
4630
4631 TypedPointer elementPointer = m_out.baseIndex(
4632 m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
4633 provenValue(child2));
4634
4635 if (m_node->op() == PutByValAlias) {
4636 m_out.storeDouble(value, elementPointer);
4637 break;
4638 }
4639
4640 contiguousPutByValOutOfBounds(
4641 m_graph.isStrictModeFor(m_node->origin.semantic)
4642 ? (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
4643 : (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
4644 base, storage, index, value, continuation);
4645
4646 m_out.storeDouble(value, elementPointer);
4647 break;
4648 }
4649
4650 default:
4651 DFG_CRASH(m_graph, m_node, "Bad array type");
4652 }
4653
4654 m_out.jump(continuation);
4655 m_out.appendTo(continuation, outerLastNext);
4656 return;
4657 }
4658
4659 case Array::ArrayStorage:
4660 case Array::SlowPutArrayStorage: {
4661 LValue value = lowJSValue(child3);
4662
4663 TypedPointer elementPointer = m_out.baseIndex(
4664 m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(index),
4665 provenValue(child2));
4666
4667 if (m_node->op() == PutByValAlias) {
4668 m_out.store64(value, elementPointer);
4669 return;
4670 }
4671
4672 if (arrayMode.isInBounds()) {
4673 speculate(StoreToHole, noValue(), 0, m_out.isZero64(m_out.load64(elementPointer)));
4674 m_out.store64(value, elementPointer);
4675 return;
4676 }
4677
4678 LValue isOutOfBounds = m_out.aboveOrEqual(
4679 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength));
4680
4681 auto slowPathFunction = m_graph.isStrictModeFor(m_node->origin.semantic)
4682 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
4683 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict);
4684 if (!arrayMode.isOutOfBounds()) {
4685 speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
4686 isOutOfBounds = m_out.booleanFalse;
4687 }
4688
4689 LBasicBlock inBoundCase = m_out.newBlock();
4690 LBasicBlock slowCase = m_out.newBlock();
4691 LBasicBlock holeCase = m_out.newBlock();
4692 LBasicBlock doStoreCase = m_out.newBlock();
4693 LBasicBlock lengthUpdateCase = m_out.newBlock();
4694 LBasicBlock continuation = m_out.newBlock();
4695
4696 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBoundCase));
4697
4698 LBasicBlock lastNext = m_out.appendTo(slowCase, inBoundCase);
4699 vmCall(
4700 Void, m_out.operation(slowPathFunction),
4701 m_callFrame, base, index, value);
4702 m_out.jump(continuation);
4703
4704
4705 if (arrayMode.isSlowPut()) {
4706 m_out.appendTo(inBoundCase, doStoreCase);
4707 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(slowCase), usually(doStoreCase));
4708 } else {
4709 m_out.appendTo(inBoundCase, holeCase);
4710 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(holeCase), usually(doStoreCase));
4711
4712 m_out.appendTo(holeCase, lengthUpdateCase);
4713 m_out.store32(
4714 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
4715 storage, m_heaps.ArrayStorage_numValuesInVector);
4716 m_out.branch(
4717 m_out.below(
4718 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_publicLength)),
4719 unsure(doStoreCase), unsure(lengthUpdateCase));
4720
4721 m_out.appendTo(lengthUpdateCase, doStoreCase);
4722 m_out.store32(
4723 m_out.add(index, m_out.int32One),
4724 storage, m_heaps.ArrayStorage_publicLength);
4725 m_out.jump(doStoreCase);
4726 }
4727
4728 m_out.appendTo(doStoreCase, continuation);
4729 m_out.store64(value, elementPointer);
4730 m_out.jump(continuation);
4731
4732 m_out.appendTo(continuation, lastNext);
4733 return;
4734 }
4735
4736 case Array::Int8Array:
4737 case Array::Int16Array:
4738 case Array::Int32Array:
4739 case Array::Uint8Array:
4740 case Array::Uint8ClampedArray:
4741 case Array::Uint16Array:
4742 case Array::Uint32Array:
4743 case Array::Float32Array:
4744 case Array::Float64Array: {
4745 TypedArrayType type = arrayMode.typedArrayType();
4746
4747 ASSERT(isTypedView(type));
4748 {
4749 TypedPointer pointer = TypedPointer(
4750 m_heaps.typedArrayProperties,
4751 m_out.add(
4752 storage,
4753 m_out.shl(
4754 m_out.zeroExt(index, pointerType()),
4755 m_out.constIntPtr(logElementSize(type)))));
4756
4757 LValue valueToStore;
4758
4759 if (isInt(type)) {
4760 LValue intValue = getIntTypedArrayStoreOperand(child3, isClamped(type));
4761
4762 valueToStore = intValue;
4763 } else /* !isInt(type) */ {
4764 LValue value = lowDouble(child3);
4765 switch (type) {
4766 case TypeFloat32:
4767 valueToStore = m_out.doubleToFloat(value);
4768 break;
4769 case TypeFloat64:
4770 valueToStore = value;
4771 break;
4772 default:
4773 DFG_CRASH(m_graph, m_node, "Bad typed array type");
4774 }
4775 }
4776
4777 if (arrayMode.isInBounds() || m_node->op() == PutByValAlias)
4778 m_out.store(valueToStore, pointer, storeType(type));
4779 else {
4780 LBasicBlock isInBounds = m_out.newBlock();
4781 LBasicBlock isOutOfBounds = m_out.newBlock();
4782 LBasicBlock continuation = m_out.newBlock();
4783
4784 m_out.branch(
4785 m_out.aboveOrEqual(index, lowInt32(child5)),
4786 unsure(isOutOfBounds), unsure(isInBounds));
4787
4788 LBasicBlock lastNext = m_out.appendTo(isInBounds, isOutOfBounds);
4789 m_out.store(valueToStore, pointer, storeType(type));
4790 m_out.jump(continuation);
4791
4792 m_out.appendTo(isOutOfBounds, continuation);
4793 speculateTypedArrayIsNotNeutered(base);
4794 m_out.jump(continuation);
4795
4796 m_out.appendTo(continuation, lastNext);
4797 }
4798
4799 return;
4800 }
4801 }
4802
4803 case Array::AnyTypedArray:
4804 case Array::String:
4805 case Array::DirectArguments:
4806 case Array::ForceExit:
4807 case Array::Generic:
4808 case Array::ScopedArguments:
4809 case Array::SelectUsingArguments:
4810 case Array::SelectUsingPredictions:
4811 case Array::Undecided:
4812 case Array::Unprofiled:
4813 DFG_CRASH(m_graph, m_node, "Bad array type");
4814 break;
4815 }
4816 }
4817
4818 void compilePutAccessorById()
4819 {
4820 LValue base = lowCell(m_node->child1());
4821 LValue accessor = lowCell(m_node->child2());
4822 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4823 vmCall(
4824 Void,
4825 m_out.operation(m_node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById),
4826 m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), accessor);
4827 }
4828
4829 void compilePutGetterSetterById()
4830 {
4831 LValue base = lowCell(m_node->child1());
4832 LValue getter = lowJSValue(m_node->child2());
4833 LValue setter = lowJSValue(m_node->child3());
4834 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4835 vmCall(
4836 Void, m_out.operation(operationPutGetterSetter),
4837 m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), getter, setter);
4838
4839 }
4840
4841 void compilePutAccessorByVal()
4842 {
4843 LValue base = lowCell(m_node->child1());
4844 LValue subscript = lowJSValue(m_node->child2());
4845 LValue accessor = lowCell(m_node->child3());
4846 vmCall(
4847 Void,
4848 m_out.operation(m_node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal),
4849 m_callFrame, base, subscript, m_out.constInt32(m_node->accessorAttributes()), accessor);
4850 }
4851
4852 void compileDeleteById()
4853 {
4854 LValue base = lowJSValue(m_node->child1());
4855 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4856 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationDeleteById), m_callFrame, base, m_out.constIntPtr(uid))));
4857 }
4858
4859 void compileDeleteByVal()
4860 {
4861 LValue base = lowJSValue(m_node->child1());
4862 LValue subscript = lowJSValue(m_node->child2());
4863 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationDeleteByVal), m_callFrame, base, subscript)));
4864 }
4865
4866 void compileArrayPush()
4867 {
4868 LValue base = lowCell(m_graph.varArgChild(m_node, 1));
4869 LValue storage = lowStorage(m_graph.varArgChild(m_node, 0));
4870 unsigned elementOffset = 2;
4871 unsigned elementCount = m_node->numChildren() - elementOffset;
4872
4873 switch (m_node->arrayMode().type()) {
4874 case Array::Int32:
4875 case Array::Contiguous:
4876 case Array::Double: {
4877 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
4878
4879 if (elementCount == 1) {
4880 LValue value;
4881 Output::StoreType storeType;
4882
4883 Edge& element = m_graph.varArgChild(m_node, elementOffset);
4884 speculate(element);
4885 if (m_node->arrayMode().type() != Array::Double) {
4886 value = lowJSValue(element, ManualOperandSpeculation);
4887 storeType = Output::Store64;
4888 } else {
4889 value = lowDouble(element);
4890 storeType = Output::StoreDouble;
4891 }
4892
4893 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
4894
4895 LBasicBlock fastPath = m_out.newBlock();
4896 LBasicBlock slowPath = m_out.newBlock();
4897 LBasicBlock continuation = m_out.newBlock();
4898
4899 m_out.branch(
4900 m_out.aboveOrEqual(
4901 prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)),
4902 unsure(slowPath), unsure(fastPath));
4903
4904 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4905 m_out.store(
4906 value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), storeType);
4907 LValue newLength = m_out.add(prevLength, m_out.int32One);
4908 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
4909
4910 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4911 m_out.jump(continuation);
4912
4913 m_out.appendTo(slowPath, continuation);
4914 LValue operation;
4915 if (m_node->arrayMode().type() != Array::Double)
4916 operation = m_out.operation(operationArrayPush);
4917 else
4918 operation = m_out.operation(operationArrayPushDouble);
4919 ValueFromBlock slowResult = m_out.anchor(
4920 vmCall(Int64, operation, m_callFrame, value, base));
4921 m_out.jump(continuation);
4922
4923 m_out.appendTo(continuation, lastNext);
4924 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4925 return;
4926 }
4927
4928 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
4929 Edge element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
4930 speculate(element);
4931 }
4932
4933 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
4934 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
4935
4936 LBasicBlock fastPath = m_out.newBlock();
4937 LBasicBlock slowPath = m_out.newBlock();
4938 LBasicBlock setup = m_out.newBlock();
4939 LBasicBlock slowCallPath = m_out.newBlock();
4940 LBasicBlock continuation = m_out.newBlock();
4941
4942 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength));
4943
4944 m_out.branch(beyondVectorLength, unsure(slowPath), unsure(fastPath));
4945
4946 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4947 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
4948 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight));
4949 m_out.jump(setup);
4950
4951 m_out.appendTo(slowPath, setup);
4952 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
4953 static_assert(sizeof(EncodedJSValue) == sizeof(double), "");
4954 ASSERT(scratchSize);
4955 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
4956 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
4957 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
4958 m_out.jump(setup);
4959
4960 m_out.appendTo(setup, slowCallPath);
4961 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
4962 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
4963 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
4964
4965 LValue value;
4966 Output::StoreType storeType;
4967 if (m_node->arrayMode().type() != Array::Double) {
4968 value = lowJSValue(element, ManualOperandSpeculation);
4969 storeType = Output::Store64;
4970 } else {
4971 value = lowDouble(element);
4972 storeType = Output::StoreDouble;
4973 }
4974
4975 m_out.store(value, m_out.baseIndex(heap, buffer, m_out.constInt32(elementIndex), jsNumber(elementIndex)), storeType);
4976 }
4977 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4978
4979 m_out.branch(beyondVectorLength, unsure(slowCallPath), unsure(continuation));
4980
4981 m_out.appendTo(slowCallPath, continuation);
4982 LValue operation;
4983 if (m_node->arrayMode().type() != Array::Double)
4984 operation = m_out.operation(operationArrayPushMultiple);
4985 else
4986 operation = m_out.operation(operationArrayPushDoubleMultiple);
4987 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, base, buffer, m_out.constInt32(elementCount)));
4988 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
4989 m_out.jump(continuation);
4990
4991 m_out.appendTo(continuation, lastNext);
4992 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4993 return;
4994 }
4995
4996 case Array::ArrayStorage: {
4997 // This ensures that the result of ArrayPush is Int32 in AI.
4998 int32_t largestPositiveInt32Length = 0x7fffffff - elementCount;
4999
5000 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
5001 // Refuse to handle bizarre lengths.
5002 speculate(Uncountable, noValue(), nullptr, m_out.above(prevLength, m_out.constInt32(largestPositiveInt32Length)));
5003
5004 if (elementCount == 1) {
5005 Edge& element = m_graph.varArgChild(m_node, elementOffset);
5006
5007 LValue value = lowJSValue(element);
5008
5009 LBasicBlock fastPath = m_out.newBlock();
5010 LBasicBlock slowPath = m_out.newBlock();
5011 LBasicBlock continuation = m_out.newBlock();
5012
5013 m_out.branch(
5014 m_out.aboveOrEqual(
5015 prevLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)),
5016 rarely(slowPath), usually(fastPath));
5017
5018 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
5019 m_out.store64(
5020 value, m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(prevLength)));
5021 LValue newLength = m_out.add(prevLength, m_out.int32One);
5022 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5023 m_out.store32(
5024 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
5025 storage, m_heaps.ArrayStorage_numValuesInVector);
5026
5027 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
5028 m_out.jump(continuation);
5029
5030 m_out.appendTo(slowPath, continuation);
5031 ValueFromBlock slowResult = m_out.anchor(
5032 vmCall(Int64, m_out.operation(operationArrayPush), m_callFrame, value, base));
5033 m_out.jump(continuation);
5034
5035 m_out.appendTo(continuation, lastNext);
5036 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5037 return;
5038 }
5039
5040 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
5041
5042 LBasicBlock fastPath = m_out.newBlock();
5043 LBasicBlock slowPath = m_out.newBlock();
5044 LBasicBlock setup = m_out.newBlock();
5045 LBasicBlock slowCallPath = m_out.newBlock();
5046 LBasicBlock continuation = m_out.newBlock();
5047
5048 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength));
5049
5050 m_out.branch(beyondVectorLength, rarely(slowPath), usually(fastPath));
5051
5052 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
5053 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5054 m_out.store32(
5055 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.constInt32(elementCount)),
5056 storage, m_heaps.ArrayStorage_numValuesInVector);
5057 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight, ArrayStorage::vectorOffset()));
5058 m_out.jump(setup);
5059
5060 m_out.appendTo(slowPath, setup);
5061 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
5062 ASSERT(scratchSize);
5063 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
5064 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5065 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
5066 m_out.jump(setup);
5067
5068 m_out.appendTo(setup, slowCallPath);
5069 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
5070 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
5071 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
5072
5073 LValue value = lowJSValue(element);
5074 m_out.store64(value, m_out.baseIndex(m_heaps.ArrayStorage_vector.atAnyIndex(), buffer, m_out.constIntPtr(elementIndex), ScaleEight));
5075 }
5076 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
5077
5078 m_out.branch(beyondVectorLength, rarely(slowCallPath), usually(continuation));
5079
5080 m_out.appendTo(slowCallPath, continuation);
5081 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationArrayPushMultiple), m_callFrame, base, buffer, m_out.constInt32(elementCount)));
5082 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5083 m_out.jump(continuation);
5084
5085 m_out.appendTo(continuation, lastNext);
5086 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5087 return;
5088 }
5089
5090 default:
5091 DFG_CRASH(m_graph, m_node, "Bad array type");
5092 return;
5093 }
5094 }
5095
5096 std::pair<LValue, LValue> populateSliceRange(LValue start, LValue end, LValue length)
5097 {
5098 // end can be nullptr.
5099 ASSERT(start);
5100 ASSERT(length);
5101
5102 auto pickIndex = [&] (LValue index) {
5103 return m_out.select(m_out.greaterThanOrEqual(index, m_out.int32Zero),
5104 m_out.select(m_out.above(index, length), length, index),
5105 m_out.select(m_out.lessThan(m_out.add(length, index), m_out.int32Zero), m_out.int32Zero, m_out.add(length, index)));
5106 };
5107
5108 LValue endBoundary = length;
5109 if (end)
5110 endBoundary = pickIndex(end);
5111 LValue startIndex = pickIndex(start);
5112 return std::make_pair(startIndex, endBoundary);
5113 }
5114
5115 void compileArraySlice()
5116 {
5117 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5118
5119 LValue sourceStorage = lowStorage(m_graph.varArgChild(m_node, m_node->numChildren() - 1));
5120 LValue inputLength = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
5121
5122 LValue startIndex = nullptr;
5123 LValue resultLength = nullptr;
5124 if (m_node->numChildren() == 2) {
5125 startIndex = m_out.constInt32(0);
5126 resultLength = inputLength;
5127 } else {
5128 LValue start = lowInt32(m_graph.varArgChild(m_node, 1));
5129 LValue end = nullptr;
5130 if (m_node->numChildren() != 3)
5131 end = lowInt32(m_graph.varArgChild(m_node, 2));
5132
5133 auto range = populateSliceRange(start, end, inputLength);
5134 startIndex = range.first;
5135 LValue endBoundary = range.second;
5136
5137 resultLength = m_out.select(m_out.belowOrEqual(startIndex, endBoundary),
5138 m_out.sub(endBoundary, startIndex),
5139 m_out.constInt32(0));
5140 }
5141
5142 ArrayValues arrayResult;
5143 {
5144 LValue indexingType = m_out.load8ZeroExt32(lowCell(m_graph.varArgChild(m_node, 0)), m_heaps.JSCell_indexingTypeAndMisc);
5145 // We can ignore the writability of the cell since we won't write to the source.
5146 indexingType = m_out.bitAnd(indexingType, m_out.constInt32(AllWritableArrayTypesAndHistory));
5147 // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
5148 // to ensure the incoming array is one to be one of the original array structures
5149 // with one of the following indexing shapes: Int32, Contiguous, Double.
5150 LValue structure = m_out.select(
5151 m_out.equal(indexingType, m_out.constInt32(ArrayWithInt32)),
5152 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithInt32))),
5153 m_out.select(m_out.equal(indexingType, m_out.constInt32(ArrayWithContiguous)),
5154 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous))),
5155 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithDouble)))));
5156 arrayResult = allocateJSArray(resultLength, resultLength, structure, indexingType, false, false);
5157 }
5158
5159 LBasicBlock loop = m_out.newBlock();
5160 LBasicBlock continuation = m_out.newBlock();
5161
5162 resultLength = m_out.zeroExtPtr(resultLength);
5163 ValueFromBlock startLoadIndex = m_out.anchor(m_out.zeroExtPtr(startIndex));
5164 ValueFromBlock startStoreIndex = m_out.anchor(m_out.constIntPtr(0));
5165
5166 m_out.branch(
5167 m_out.below(m_out.constIntPtr(0), resultLength), unsure(loop), unsure(continuation));
5168
5169 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
5170 LValue storeIndex = m_out.phi(pointerType(), startStoreIndex);
5171 LValue loadIndex = m_out.phi(pointerType(), startLoadIndex);
5172 LValue value = m_out.load64(m_out.baseIndex(m_heaps.root, sourceStorage, loadIndex, ScaleEight));
5173 m_out.store64(value, m_out.baseIndex(m_heaps.root, arrayResult.butterfly, storeIndex, ScaleEight));
5174 LValue nextStoreIndex = m_out.add(storeIndex, m_out.constIntPtr(1));
5175 m_out.addIncomingToPhi(storeIndex, m_out.anchor(nextStoreIndex));
5176 m_out.addIncomingToPhi(loadIndex, m_out.anchor(m_out.add(loadIndex, m_out.constIntPtr(1))));
5177 m_out.branch(
5178 m_out.below(nextStoreIndex, resultLength), unsure(loop), unsure(continuation));
5179
5180 m_out.appendTo(continuation, lastNext);
5181
5182 mutatorFence();
5183 setJSValue(arrayResult.array);
5184 }
5185
5186 void compileArrayIndexOf()
5187 {
5188 LValue storage = lowStorage(m_node->numChildren() == 3 ? m_graph.varArgChild(m_node, 2) : m_graph.varArgChild(m_node, 3));
5189 LValue length = m_out.load32(storage, m_heaps.Butterfly_publicLength);
5190
5191 LValue startIndex;
5192 if (m_node->numChildren() == 4) {
5193 startIndex = lowInt32(m_graph.varArgChild(m_node, 2));
5194 startIndex = m_out.select(m_out.greaterThanOrEqual(startIndex, m_out.int32Zero),
5195 m_out.select(m_out.above(startIndex, length), length, startIndex),
5196 m_out.select(m_out.lessThan(m_out.add(length, startIndex), m_out.int32Zero), m_out.int32Zero, m_out.add(length, startIndex)));
5197 } else
5198 startIndex = m_out.int32Zero;
5199
5200 Edge& searchElementEdge = m_graph.varArgChild(m_node, 1);
5201 switch (searchElementEdge.useKind()) {
5202 case Int32Use:
5203 case ObjectUse:
5204 case SymbolUse:
5205 case OtherUse:
5206 case DoubleRepUse: {
5207 LBasicBlock loopHeader = m_out.newBlock();
5208 LBasicBlock loopBody = m_out.newBlock();
5209 LBasicBlock loopNext = m_out.newBlock();
5210 LBasicBlock notFound = m_out.newBlock();
5211 LBasicBlock continuation = m_out.newBlock();
5212
5213 LValue searchElement;
5214 switch (searchElementEdge.useKind()) {
5215 case Int32Use:
5216 ASSERT(m_node->arrayMode().type() == Array::Int32);
5217 speculate(searchElementEdge);
5218 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
5219 break;
5220 case ObjectUse:
5221 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5222 searchElement = lowObject(searchElementEdge);
5223 break;
5224 case SymbolUse:
5225 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5226 searchElement = lowSymbol(searchElementEdge);
5227 break;
5228 case OtherUse:
5229 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5230 speculate(searchElementEdge);
5231 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
5232 break;
5233 case DoubleRepUse:
5234 ASSERT(m_node->arrayMode().type() == Array::Double);
5235 searchElement = lowDouble(searchElementEdge);
5236 break;
5237 default:
5238 RELEASE_ASSERT_NOT_REACHED();
5239 break;
5240 }
5241
5242 startIndex = m_out.zeroExtPtr(startIndex);
5243 length = m_out.zeroExtPtr(length);
5244
5245 ValueFromBlock initialStartIndex = m_out.anchor(startIndex);
5246 m_out.jump(loopHeader);
5247
5248 LBasicBlock lastNext = m_out.appendTo(loopHeader, loopBody);
5249 LValue index = m_out.phi(pointerType(), initialStartIndex);
5250 m_out.branch(m_out.notEqual(index, length), unsure(loopBody), unsure(notFound));
5251
5252 m_out.appendTo(loopBody, loopNext);
5253 ValueFromBlock foundResult = m_out.anchor(index);
5254 switch (searchElementEdge.useKind()) {
5255 case Int32Use: {
5256 // Empty value is ignored because of TagTypeNumber.
5257 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedInt32Properties, storage, index));
5258 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
5259 break;
5260 }
5261 case ObjectUse:
5262 case SymbolUse:
5263 case OtherUse: {
5264 // Empty value never matches against non-empty JS values.
5265 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, index));
5266 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
5267 break;
5268 }
5269 case DoubleRepUse: {
5270 // Empty value is ignored because of NaN.
5271 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, storage, index));
5272 m_out.branch(m_out.doubleEqual(value, searchElement), unsure(continuation), unsure(loopNext));
5273 break;
5274 }
5275 default:
5276 RELEASE_ASSERT_NOT_REACHED();
5277 break;
5278 }
5279
5280 m_out.appendTo(loopNext, notFound);
5281 LValue nextIndex = m_out.add(index, m_out.intPtrOne);
5282 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
5283 m_out.jump(loopHeader);
5284
5285 m_out.appendTo(notFound, continuation);
5286 ValueFromBlock notFoundResult = m_out.anchor(m_out.constIntPtr(-1));
5287 m_out.jump(continuation);
5288
5289 m_out.appendTo(continuation, lastNext);
5290 setInt32(m_out.castToInt32(m_out.phi(pointerType(), notFoundResult, foundResult)));
5291 break;
5292 }
5293
5294 case StringUse:
5295 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5296 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfString), m_callFrame, storage, lowString(searchElementEdge), startIndex));
5297 break;
5298
5299 case UntypedUse:
5300 switch (m_node->arrayMode().type()) {
5301 case Array::Double:
5302 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfValueDouble), m_callFrame, storage, lowJSValue(searchElementEdge), startIndex));
5303 break;
5304 case Array::Int32:
5305 case Array::Contiguous:
5306 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfValueInt32OrContiguous), m_callFrame, storage, lowJSValue(searchElementEdge), startIndex));
5307 break;
5308 default:
5309 RELEASE_ASSERT_NOT_REACHED();
5310 break;
5311 }
5312 break;
5313
5314 default:
5315 RELEASE_ASSERT_NOT_REACHED();
5316 break;
5317 }
5318 }
5319
5320
5321 void compileArrayPop()
5322 {
5323 LValue base = lowCell(m_node->child1());
5324 LValue storage = lowStorage(m_node->child2());
5325
5326 switch (m_node->arrayMode().type()) {
5327 case Array::Int32:
5328 case Array::Double:
5329 case Array::Contiguous: {
5330 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
5331
5332 LBasicBlock fastCase = m_out.newBlock();
5333 LBasicBlock slowCase = m_out.newBlock();
5334 LBasicBlock continuation = m_out.newBlock();
5335
5336 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
5337
5338 Vector<ValueFromBlock, 3> results;
5339 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
5340 m_out.branch(
5341 m_out.isZero32(prevLength), rarely(continuation), usually(fastCase));
5342
5343 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
5344 LValue newLength = m_out.sub(prevLength, m_out.int32One);
5345 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
5346 TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
5347 if (m_node->arrayMode().type() != Array::Double) {
5348 LValue result = m_out.load64(pointer);
5349 m_out.store64(m_out.int64Zero, pointer);
5350 results.append(m_out.anchor(result));
5351 m_out.branch(
5352 m_out.notZero64(result), usually(continuation), rarely(slowCase));
5353 } else {
5354 LValue result = m_out.loadDouble(pointer);
5355 m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer);
5356 results.append(m_out.anchor(boxDouble(result)));
5357 m_out.branch(
5358 m_out.doubleEqual(result, result),
5359 usually(continuation), rarely(slowCase));
5360 }
5361
5362 m_out.appendTo(slowCase, continuation);
5363 results.append(m_out.anchor(vmCall(
5364 Int64, m_out.operation(operationArrayPopAndRecoverLength), m_callFrame, base)));
5365 m_out.jump(continuation);
5366
5367 m_out.appendTo(continuation, lastNext);
5368 setJSValue(m_out.phi(Int64, results));
5369 return;
5370 }
5371
5372 case Array::ArrayStorage: {
5373 LBasicBlock vectorLengthCheckCase = m_out.newBlock();
5374 LBasicBlock popCheckCase = m_out.newBlock();
5375 LBasicBlock fastCase = m_out.newBlock();
5376 LBasicBlock slowCase = m_out.newBlock();
5377 LBasicBlock continuation = m_out.newBlock();
5378
5379 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
5380
5381 Vector<ValueFromBlock, 3> results;
5382 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
5383 m_out.branch(
5384 m_out.isZero32(prevLength), rarely(continuation), usually(vectorLengthCheckCase));
5385
5386 LBasicBlock lastNext = m_out.appendTo(vectorLengthCheckCase, popCheckCase);
5387 LValue newLength = m_out.sub(prevLength, m_out.int32One);
5388 m_out.branch(
5389 m_out.aboveOrEqual(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)), rarely(slowCase), usually(popCheckCase));
5390
5391 m_out.appendTo(popCheckCase, fastCase);
5392 TypedPointer pointer = m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(newLength));
5393 LValue result = m_out.load64(pointer);
5394 m_out.branch(m_out.notZero64(result), usually(fastCase), rarely(slowCase));
5395
5396 m_out.appendTo(fastCase, slowCase);
5397 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5398 m_out.store64(m_out.int64Zero, pointer);
5399 m_out.store32(
5400 m_out.sub(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
5401 storage, m_heaps.ArrayStorage_numValuesInVector);
5402 results.append(m_out.anchor(result));
5403 m_out.jump(continuation);
5404
5405 m_out.appendTo(slowCase, continuation);
5406 results.append(m_out.anchor(vmCall(
5407 Int64, m_out.operation(operationArrayPop), m_callFrame, base)));
5408 m_out.jump(continuation);
5409
5410 m_out.appendTo(continuation, lastNext);
5411 setJSValue(m_out.phi(Int64, results));
5412 return;
5413 }
5414
5415 default:
5416 DFG_CRASH(m_graph, m_node, "Bad array type");
5417 return;
5418 }
5419 }
5420
5421 void compilePushWithScope()
5422 {
5423 LValue parentScope = lowCell(m_node->child1());
5424 auto objectEdge = m_node->child2();
5425 if (objectEdge.useKind() == ObjectUse) {
5426 LValue object = lowNonNullObject(objectEdge);
5427 LValue result = vmCall(Int64, m_out.operation(operationPushWithScopeObject), m_callFrame, parentScope, object);
5428 setJSValue(result);
5429 } else {
5430 ASSERT(objectEdge.useKind() == UntypedUse);
5431 LValue object = lowJSValue(m_node->child2());
5432 LValue result = vmCall(Int64, m_out.operation(operationPushWithScope), m_callFrame, parentScope, object);
5433 setJSValue(result);
5434 }
5435 }
5436
5437 void compileCreateActivation()
5438 {
5439 LValue scope = lowCell(m_node->child1());
5440 SymbolTable* table = m_node->castOperand<SymbolTable*>();
5441 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
5442 JSValue initializationValue = m_node->initializationValueForActivation();
5443 ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue());
5444 if (table->singleton().isStillValid()) {
5445 LValue callResult = vmCall(
5446 Int64,
5447 m_out.operation(operationCreateActivationDirect), m_callFrame, weakStructure(structure),
5448 scope, weakPointer(table), m_out.constInt64(JSValue::encode(initializationValue)));
5449 setJSValue(callResult);
5450 return;
5451 }
5452
5453 LBasicBlock slowPath = m_out.newBlock();
5454 LBasicBlock continuation = m_out.newBlock();
5455
5456 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5457
5458 LValue fastObject = allocateObject<JSLexicalEnvironment>(
5459 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
5460
5461 // We don't need memory barriers since we just fast-created the activation, so the
5462 // activation must be young.
5463 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
5464 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
5465
5466 for (unsigned i = 0; i < table->scopeSize(); ++i) {
5467 m_out.store64(
5468 m_out.constInt64(JSValue::encode(initializationValue)),
5469 fastObject, m_heaps.JSLexicalEnvironment_variables[i]);
5470 }
5471
5472 mutatorFence();
5473
5474 ValueFromBlock fastResult = m_out.anchor(fastObject);
5475 m_out.jump(continuation);
5476
5477 m_out.appendTo(slowPath, continuation);
5478 VM& vm = this->vm();
5479 LValue callResult = lazySlowPath(
5480 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5481 return createLazyCallGenerator(vm,
5482 operationCreateActivationDirect, locations[0].directGPR(),
5483 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
5484 CCallHelpers::TrustedImmPtr(table),
5485 CCallHelpers::TrustedImm64(JSValue::encode(initializationValue)));
5486 },
5487 scope);
5488 ValueFromBlock slowResult = m_out.anchor(callResult);
5489 m_out.jump(continuation);
5490
5491 m_out.appendTo(continuation, lastNext);
5492 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5493 }
5494
5495 void compileNewFunction()
5496 {
5497 ASSERT(m_node->op() == NewFunction || m_node->op() == NewGeneratorFunction || m_node->op() == NewAsyncGeneratorFunction || m_node->op() == NewAsyncFunction);
5498 bool isGeneratorFunction = m_node->op() == NewGeneratorFunction;
5499 bool isAsyncFunction = m_node->op() == NewAsyncFunction;
5500 bool isAsyncGeneratorFunction = m_node->op() == NewAsyncGeneratorFunction;
5501
5502 LValue scope = lowCell(m_node->child1());
5503
5504 FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
5505 if (executable->singleton().isStillValid()) {
5506 LValue callResult =
5507 isGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
5508 isAsyncFunction ? vmCall(Int64, m_out.operation(operationNewAsyncFunction), m_callFrame, scope, weakPointer(executable)) :
5509 isAsyncGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewAsyncGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
5510 vmCall(Int64, m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable));
5511 setJSValue(callResult);
5512 return;
5513 }
5514
5515 RegisteredStructure structure = m_graph.registerStructure(
5516 [&] () {
5517 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5518 switch (m_node->op()) {
5519 case NewGeneratorFunction:
5520 return globalObject->generatorFunctionStructure();
5521 case NewAsyncFunction:
5522 return globalObject->asyncFunctionStructure();
5523 case NewAsyncGeneratorFunction:
5524 return globalObject->asyncGeneratorFunctionStructure();
5525 case NewFunction:
5526 return JSFunction::selectStructureForNewFuncExp(globalObject, m_node->castOperand<FunctionExecutable*>());
5527 default:
5528 RELEASE_ASSERT_NOT_REACHED();
5529 }
5530 }());
5531
5532 LBasicBlock slowPath = m_out.newBlock();
5533 LBasicBlock continuation = m_out.newBlock();
5534
5535 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5536
5537 LValue fastObject =
5538 isGeneratorFunction ? allocateObject<JSGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
5539 isAsyncFunction ? allocateObject<JSAsyncFunction>(structure, m_out.intPtrZero, slowPath) :
5540 isAsyncGeneratorFunction ? allocateObject<JSAsyncGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
5541 allocateObject<JSFunction>(structure, m_out.intPtrZero, slowPath);
5542
5543
5544 // We don't need memory barriers since we just fast-created the function, so it
5545 // must be young.
5546 m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
5547 m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
5548 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
5549
5550 mutatorFence();
5551
5552 ValueFromBlock fastResult = m_out.anchor(fastObject);
5553 m_out.jump(continuation);
5554
5555 m_out.appendTo(slowPath, continuation);
5556
5557 Vector<LValue> slowPathArguments;
5558 slowPathArguments.append(scope);
5559 VM& vm = this->vm();
5560 LValue callResult = lazySlowPath(
5561 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5562 auto* operation = operationNewFunctionWithInvalidatedReallocationWatchpoint;
5563 if (isGeneratorFunction)
5564 operation = operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint;
5565 else if (isAsyncFunction)
5566 operation = operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint;
5567 else if (isAsyncGeneratorFunction)
5568 operation = operationNewAsyncGeneratorFunctionWithInvalidatedReallocationWatchpoint;
5569
5570 return createLazyCallGenerator(vm, operation,
5571 locations[0].directGPR(), locations[1].directGPR(),
5572 CCallHelpers::TrustedImmPtr(executable));
5573 },
5574 slowPathArguments);
5575 ValueFromBlock slowResult = m_out.anchor(callResult);
5576 m_out.jump(continuation);
5577
5578 m_out.appendTo(continuation, lastNext);
5579 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5580 }
5581
5582 void compileCreateDirectArguments()
5583 {
5584 // FIXME: A more effective way of dealing with the argument count and callee is to have
5585 // them be explicit arguments to this node.
5586 // https://bugs.webkit.org/show_bug.cgi?id=142207
5587
5588 RegisteredStructure structure =
5589 m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure());
5590
5591 unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
5592
5593 LBasicBlock slowPath = m_out.newBlock();
5594 LBasicBlock continuation = m_out.newBlock();
5595
5596 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5597
5598 ArgumentsLength length = getArgumentsLength();
5599
5600 LValue fastObject;
5601 if (length.isKnown) {
5602 fastObject = allocateObject<DirectArguments>(
5603 DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
5604 m_out.intPtrZero, slowPath);
5605 } else {
5606 LValue size = m_out.add(
5607 m_out.shl(length.value, m_out.constInt32(3)),
5608 m_out.constInt32(DirectArguments::storageOffset()));
5609
5610 size = m_out.select(
5611 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
5612 size, m_out.constInt32(DirectArguments::allocationSize(minCapacity)));
5613
5614 fastObject = allocateVariableSizedObject<DirectArguments>(
5615 m_out.zeroExtPtr(size), structure, m_out.intPtrZero, slowPath);
5616 }
5617
5618 m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
5619 m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
5620 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_mappedArguments);
5621 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_modifiedArgumentsDescriptor);
5622
5623 ValueFromBlock fastResult = m_out.anchor(fastObject);
5624 m_out.jump(continuation);
5625
5626 m_out.appendTo(slowPath, continuation);
5627 VM& vm = this->vm();
5628 LValue callResult = lazySlowPath(
5629 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5630 return createLazyCallGenerator(vm,
5631 operationCreateDirectArguments, locations[0].directGPR(),
5632 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
5633 CCallHelpers::TrustedImm32(minCapacity));
5634 }, length.value);
5635 ValueFromBlock slowResult = m_out.anchor(callResult);
5636 m_out.jump(continuation);
5637
5638 m_out.appendTo(continuation, lastNext);
5639 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
5640
5641 m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee);
5642
5643 if (length.isKnown) {
5644 VirtualRegister start = AssemblyHelpers::argumentsStart(m_node->origin.semantic);
5645 for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) {
5646 m_out.store64(
5647 m_out.load64(addressFor(start + i)),
5648 result, m_heaps.DirectArguments_storage[i]);
5649 }
5650 } else {
5651 LValue stackBase = getArgumentsStart();
5652
5653 LBasicBlock loop = m_out.newBlock();
5654 LBasicBlock end = m_out.newBlock();
5655
5656 ValueFromBlock originalLength;
5657 if (minCapacity) {
5658 LValue capacity = m_out.select(
5659 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
5660 length.value,
5661 m_out.constInt32(minCapacity));
5662 LValue originalLengthValue = m_out.zeroExtPtr(capacity);
5663 originalLength = m_out.anchor(originalLengthValue);
5664 m_out.jump(loop);
5665 } else {
5666 LValue originalLengthValue = m_out.zeroExtPtr(length.value);
5667 originalLength = m_out.anchor(originalLengthValue);
5668 m_out.branch(m_out.isNull(originalLengthValue), unsure(end), unsure(loop));
5669 }
5670
5671 lastNext = m_out.appendTo(loop, end);
5672 LValue previousIndex = m_out.phi(pointerType(), originalLength);
5673 LValue index = m_out.sub(previousIndex, m_out.intPtrOne);
5674 m_out.store64(
5675 m_out.load64(m_out.baseIndex(m_heaps.variables, stackBase, index)),
5676 m_out.baseIndex(m_heaps.DirectArguments_storage, result, index));
5677 ValueFromBlock nextIndex = m_out.anchor(index);
5678 m_out.addIncomingToPhi(previousIndex, nextIndex);
5679 m_out.branch(m_out.isNull(index), unsure(end), unsure(loop));
5680
5681 m_out.appendTo(end, lastNext);
5682 }
5683
5684 mutatorFence();
5685
5686 setJSValue(result);
5687 }
5688
5689 void compileCreateScopedArguments()
5690 {
5691 LValue scope = lowCell(m_node->child1());
5692
5693 LValue result = vmCall(
5694 Int64, m_out.operation(operationCreateScopedArguments), m_callFrame,
5695 weakPointer(
5696 m_graph.globalObjectFor(m_node->origin.semantic)->scopedArgumentsStructure()),
5697 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee(), scope);
5698
5699 setJSValue(result);
5700 }
5701
5702 void compileCreateClonedArguments()
5703 {
5704 LValue result = vmCall(
5705 Int64, m_out.operation(operationCreateClonedArguments), m_callFrame,
5706 weakPointer(
5707 m_graph.globalObjectFor(m_node->origin.semantic)->clonedArgumentsStructure()),
5708 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee());
5709
5710 setJSValue(result);
5711 }
5712
5713 void compileCreateRest()
5714 {
5715 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5716 LBasicBlock continuation = m_out.newBlock();
5717 LValue arrayLength = lowInt32(m_node->child1());
5718 LBasicBlock loopStart = m_out.newBlock();
5719 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5720 RegisteredStructure structure = m_graph.registerStructure(globalObject->originalRestParameterStructure());
5721 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(arrayLength, structure);
5722 LValue array = arrayValues.array;
5723 LValue butterfly = arrayValues.butterfly;
5724 ValueFromBlock startLength = m_out.anchor(arrayLength);
5725 LValue argumentRegion = m_out.add(getArgumentsStart(), m_out.constInt64(sizeof(Register) * m_node->numberOfArgumentsToSkip()));
5726 m_out.branch(m_out.equal(arrayLength, m_out.constInt32(0)),
5727 unsure(continuation), unsure(loopStart));
5728
5729 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
5730 LValue phiOffset = m_out.phi(Int32, startLength);
5731 LValue currentOffset = m_out.sub(phiOffset, m_out.int32One);
5732 m_out.addIncomingToPhi(phiOffset, m_out.anchor(currentOffset));
5733 LValue loadedValue = m_out.load64(m_out.baseIndex(m_heaps.variables, argumentRegion, m_out.zeroExtPtr(currentOffset)));
5734 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
5735 m_out.store64(loadedValue, m_out.baseIndex(heap, butterfly, m_out.zeroExtPtr(currentOffset)));
5736 m_out.branch(m_out.equal(currentOffset, m_out.constInt32(0)), unsure(continuation), unsure(loopStart));
5737
5738 m_out.appendTo(continuation, lastNext);
5739 mutatorFence();
5740 setJSValue(array);
5741 return;
5742 }
5743
5744 LValue arrayLength = lowInt32(m_node->child1());
5745 LValue argumentStart = getArgumentsStart();
5746 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5747 setJSValue(vmCall(
5748 Int64, m_out.operation(operationCreateRest), m_callFrame, argumentStart, numberOfArgumentsToSkip, arrayLength));
5749 }
5750
5751 void compileGetRestLength()
5752 {
5753 LBasicBlock nonZeroLength = m_out.newBlock();
5754 LBasicBlock continuation = m_out.newBlock();
5755
5756 ValueFromBlock zeroLengthResult = m_out.anchor(m_out.constInt32(0));
5757
5758 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5759 LValue argumentsLength = getArgumentsLength().value;
5760 m_out.branch(m_out.above(argumentsLength, numberOfArgumentsToSkip),
5761 unsure(nonZeroLength), unsure(continuation));
5762
5763 LBasicBlock lastNext = m_out.appendTo(nonZeroLength, continuation);
5764 ValueFromBlock nonZeroLengthResult = m_out.anchor(m_out.sub(argumentsLength, numberOfArgumentsToSkip));
5765 m_out.jump(continuation);
5766
5767 m_out.appendTo(continuation, lastNext);
5768 setInt32(m_out.phi(Int32, zeroLengthResult, nonZeroLengthResult));
5769 }
5770
5771 void compileObjectKeys()
5772 {
5773 switch (m_node->child1().useKind()) {
5774 case ObjectUse: {
5775 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5776 LBasicBlock notNullCase = m_out.newBlock();
5777 LBasicBlock rareDataCase = m_out.newBlock();
5778 LBasicBlock useCacheCase = m_out.newBlock();
5779 LBasicBlock slowButArrayBufferCase = m_out.newBlock();
5780 LBasicBlock slowCase = m_out.newBlock();
5781 LBasicBlock continuation = m_out.newBlock();
5782
5783 LValue object = lowObject(m_node->child1());
5784 LValue structure = loadStructure(object);
5785 LValue previousOrRareData = m_out.loadPtr(structure, m_heaps.Structure_previousOrRareData);
5786 m_out.branch(m_out.notNull(previousOrRareData), unsure(notNullCase), unsure(slowCase));
5787
5788 LBasicBlock lastNext = m_out.appendTo(notNullCase, rareDataCase);
5789 m_out.branch(
5790 m_out.notEqual(m_out.load32(previousOrRareData, m_heaps.JSCell_structureID), m_out.constInt32(m_graph.m_vm.structureStructure->structureID())),
5791 unsure(rareDataCase), unsure(slowCase));
5792
5793 m_out.appendTo(rareDataCase, useCacheCase);
5794 ASSERT(bitwise_cast<uintptr_t>(StructureRareData::cachedOwnKeysSentinel()) == 1);
5795 LValue cachedOwnKeys = m_out.loadPtr(previousOrRareData, m_heaps.StructureRareData_cachedOwnKeys);
5796 m_out.branch(m_out.belowOrEqual(cachedOwnKeys, m_out.constIntPtr(bitwise_cast<void*>(StructureRareData::cachedOwnKeysSentinel()))), unsure(slowCase), unsure(useCacheCase));
5797
5798 m_out.appendTo(useCacheCase, slowButArrayBufferCase);
5799 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5800 RegisteredStructure arrayStructure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(CopyOnWriteArrayWithContiguous));
5801 LValue fastArray = allocateObject<JSArray>(arrayStructure, m_out.addPtr(cachedOwnKeys, JSImmutableButterfly::offsetOfData()), slowButArrayBufferCase);
5802 ValueFromBlock fastResult = m_out.anchor(fastArray);
5803 m_out.jump(continuation);
5804
5805 m_out.appendTo(slowButArrayBufferCase, slowCase);
5806 LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(arrayStructure), cachedOwnKeys);
5807 ValueFromBlock slowButArrayBufferResult = m_out.anchor(slowArray);
5808 m_out.jump(continuation);
5809
5810 m_out.appendTo(slowCase, continuation);
5811 VM& vm = this->vm();
5812 LValue slowResultValue = lazySlowPath(
5813 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5814 return createLazyCallGenerator(vm,
5815 operationObjectKeysObject, locations[0].directGPR(), locations[1].directGPR());
5816 },
5817 object);
5818 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
5819 m_out.jump(continuation);
5820
5821 m_out.appendTo(continuation, lastNext);
5822 setJSValue(m_out.phi(pointerType(), fastResult, slowButArrayBufferResult, slowResult));
5823 break;
5824 }
5825 setJSValue(vmCall(Int64, m_out.operation(operationObjectKeysObject), m_callFrame, lowObject(m_node->child1())));
5826 break;
5827 }
5828 case UntypedUse:
5829 setJSValue(vmCall(Int64, m_out.operation(operationObjectKeys), m_callFrame, lowJSValue(m_node->child1())));
5830 break;
5831 default:
5832 RELEASE_ASSERT_NOT_REACHED();
5833 break;
5834 }
5835 }
5836
5837 void compileObjectCreate()
5838 {
5839 switch (m_node->child1().useKind()) {
5840 case ObjectUse:
5841 setJSValue(vmCall(Int64, m_out.operation(operationObjectCreateObject), m_callFrame, lowObject(m_node->child1())));
5842 break;
5843 case UntypedUse:
5844 setJSValue(vmCall(Int64, m_out.operation(operationObjectCreate), m_callFrame, lowJSValue(m_node->child1())));
5845 break;
5846 default:
5847 RELEASE_ASSERT_NOT_REACHED();
5848 break;
5849 }
5850 }
5851
5852 void compileNewObject()
5853 {
5854 setJSValue(allocateObject(m_node->structure()));
5855 mutatorFence();
5856 }
5857
5858 void compileNewStringObject()
5859 {
5860 RegisteredStructure structure = m_node->structure();
5861 LValue string = lowString(m_node->child1());
5862
5863 LBasicBlock slowCase = m_out.newBlock();
5864 LBasicBlock continuation = m_out.newBlock();
5865
5866 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
5867
5868 LValue fastResultValue = allocateObject<StringObject>(structure, m_out.intPtrZero, slowCase);
5869 m_out.storePtr(m_out.constIntPtr(StringObject::info()), fastResultValue, m_heaps.JSDestructibleObject_classInfo);
5870 m_out.store64(string, fastResultValue, m_heaps.JSWrapperObject_internalValue);
5871 mutatorFence();
5872 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
5873 m_out.jump(continuation);
5874
5875 m_out.appendTo(slowCase, continuation);
5876 VM& vm = this->vm();
5877 LValue slowResultValue = lazySlowPath(
5878 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5879 return createLazyCallGenerator(vm,
5880 operationNewStringObject, locations[0].directGPR(), locations[1].directGPR(),
5881 CCallHelpers::TrustedImmPtr(structure.get()));
5882 },
5883 string);
5884 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
5885 m_out.jump(continuation);
5886
5887 m_out.appendTo(continuation, lastNext);
5888 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5889 }
5890
5891 void compileNewSymbol()
5892 {
5893 if (!m_node->child1()) {
5894 setJSValue(vmCall(pointerType(), m_out.operation(operationNewSymbol), m_callFrame));
5895 return;
5896 }
5897 ASSERT(m_node->child1().useKind() == KnownStringUse);
5898 setJSValue(vmCall(pointerType(), m_out.operation(operationNewSymbolWithDescription), m_callFrame, lowString(m_node->child1())));
5899 }
5900
5901 void compileNewArray()
5902 {
5903 // First speculate appropriately on all of the children. Do this unconditionally up here
5904 // because some of the slow paths may otherwise forget to do it. It's sort of arguable
5905 // that doing the speculations up here might be unprofitable for RA - so we can consider
5906 // sinking this to below the allocation fast path if we find that this has a lot of
5907 // register pressure.
5908 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
5909 speculate(m_graph.varArgChild(m_node, operandIndex));
5910
5911 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5912 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
5913 m_node->indexingType()));
5914
5915 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
5916 unsigned numElements = m_node->numChildren();
5917 unsigned vectorLengthHint = m_node->vectorLengthHint();
5918 ASSERT(vectorLengthHint >= numElements);
5919
5920 ArrayValues arrayValues =
5921 allocateUninitializedContiguousJSArray(numElements, vectorLengthHint, structure);
5922
5923 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
5924 Edge edge = m_graph.varArgChild(m_node, operandIndex);
5925
5926 switch (m_node->indexingType()) {
5927 case ALL_BLANK_INDEXING_TYPES:
5928 case ALL_UNDECIDED_INDEXING_TYPES:
5929 DFG_CRASH(m_graph, m_node, "Bad indexing type");
5930 break;
5931
5932 case ALL_DOUBLE_INDEXING_TYPES:
5933 m_out.storeDouble(
5934 lowDouble(edge),
5935 arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
5936 break;
5937
5938 case ALL_INT32_INDEXING_TYPES:
5939 case ALL_CONTIGUOUS_INDEXING_TYPES:
5940 m_out.store64(
5941 lowJSValue(edge, ManualOperandSpeculation),
5942 arrayValues.butterfly,
5943 m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
5944 break;
5945
5946 default:
5947 DFG_CRASH(m_graph, m_node, "Corrupt indexing type");
5948 break;
5949 }
5950 }
5951
5952 setJSValue(arrayValues.array);
5953 mutatorFence();
5954 return;
5955 }
5956
5957 if (!m_node->numChildren()) {
5958 setJSValue(vmCall(
5959 Int64, m_out.operation(operationNewEmptyArray), m_callFrame,
5960 weakStructure(structure)));
5961 return;
5962 }
5963
5964 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
5965 ASSERT(scratchSize);
5966 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
5967 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
5968
5969 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
5970 Edge edge = m_graph.varArgChild(m_node, operandIndex);
5971 LValue valueToStore;
5972 switch (m_node->indexingType()) {
5973 case ALL_DOUBLE_INDEXING_TYPES:
5974 valueToStore = boxDouble(lowDouble(edge));
5975 break;
5976 default:
5977 valueToStore = lowJSValue(edge, ManualOperandSpeculation);
5978 break;
5979 }
5980 m_out.store64(valueToStore, m_out.absolute(buffer + operandIndex));
5981 }
5982
5983 m_out.storePtr(
5984 m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5985
5986 LValue result = vmCall(
5987 Int64, m_out.operation(operationNewArray), m_callFrame,
5988 weakStructure(structure), m_out.constIntPtr(buffer),
5989 m_out.constIntPtr(m_node->numChildren()));
5990
5991 m_out.storePtr(m_out.intPtrZero, m_out.absolute(scratchBuffer->addressOfActiveLength()));
5992
5993 setJSValue(result);
5994 }
5995
5996 void compileNewArrayWithSpread()
5997 {
5998 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5999 CheckedInt32 startLength = 0;
6000 BitVector* bitVector = m_node->bitVector();
6001 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
6002
6003 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6004 if (!bitVector->get(i))
6005 ++startLength;
6006 else {
6007 Edge& child = m_graph.varArgChild(m_node, i);
6008 if (child->op() == PhantomSpread && child->child1()->op() == PhantomNewArrayBuffer)
6009 startLength += child->child1()->castOperand<JSImmutableButterfly*>()->length();
6010 }
6011 }
6012
6013 if (startLength.hasOverflowed()) {
6014 terminate(Overflow);
6015 return;
6016 }
6017
6018 LValue length = m_out.constInt32(startLength.unsafeGet());
6019
6020 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6021 if (bitVector->get(i)) {
6022 Edge use = m_graph.varArgChild(m_node, i);
6023 CheckValue* lengthCheck = nullptr;
6024 if (use->op() == PhantomSpread) {
6025 if (use->child1()->op() == PhantomCreateRest) {
6026 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
6027 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
6028 LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
6029 return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
6030 }).iterator->value;
6031 lengthCheck = m_out.speculateAdd(length, spreadLength);
6032 }
6033 } else {
6034 LValue fixedArray = lowCell(use);
6035 lengthCheck = m_out.speculateAdd(length, m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
6036 }
6037
6038 if (lengthCheck) {
6039 blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
6040 length = lengthCheck;
6041 }
6042 }
6043 }
6044
6045 LValue exceedsMaxAllowedLength = m_out.aboveOrEqual(length, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
6046 blessSpeculation(m_out.speculate(exceedsMaxAllowedLength), Overflow, noValue(), nullptr, m_origin);
6047
6048 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous));
6049 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(length, structure);
6050 LValue result = arrayValues.array;
6051 LValue storage = arrayValues.butterfly;
6052 LValue index = m_out.constIntPtr(0);
6053
6054 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6055 Edge use = m_graph.varArgChild(m_node, i);
6056 if (bitVector->get(i)) {
6057 if (use->op() == PhantomSpread) {
6058 if (use->child1()->op() == PhantomNewArrayBuffer) {
6059 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6060 auto* array = use->child1()->castOperand<JSImmutableButterfly*>();
6061 for (unsigned i = 0; i < array->length(); ++i) {
6062 // Because resulted array from NewArrayWithSpread is always contiguous, we should not generate value
6063 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
6064 int64_t value = JSValue::encode(array->get(i));
6065 m_out.store64(m_out.constInt64(value), m_out.baseIndex(heap, storage, index, JSValue(), (Checked<int32_t>(sizeof(JSValue)) * i).unsafeGet()));
6066 }
6067 index = m_out.add(index, m_out.constIntPtr(array->length()));
6068 } else {
6069 RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
6070 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
6071 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
6072
6073 LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
6074 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
6075
6076 LBasicBlock loopStart = m_out.newBlock();
6077 LBasicBlock continuation = m_out.newBlock();
6078
6079 ValueFromBlock loadIndexStart = m_out.anchor(m_out.constIntPtr(0));
6080 ValueFromBlock arrayIndexStart = m_out.anchor(index);
6081 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
6082
6083 m_out.branch(
6084 m_out.isZero64(length),
6085 unsure(continuation), unsure(loopStart));
6086
6087 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6088
6089 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
6090 LValue loadIndex = m_out.phi(pointerType(), loadIndexStart);
6091
6092 LValue item = m_out.load64(m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
6093 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
6094
6095 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
6096 LValue nextLoadIndex = m_out.add(loadIndex, m_out.constIntPtr(1));
6097 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
6098
6099 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
6100 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
6101
6102 m_out.branch(
6103 m_out.below(nextLoadIndex, length),
6104 unsure(loopStart), unsure(continuation));
6105
6106 m_out.appendTo(continuation, lastNext);
6107 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
6108 }
6109 } else {
6110 LBasicBlock loopStart = m_out.newBlock();
6111 LBasicBlock continuation = m_out.newBlock();
6112
6113 LValue fixedArray = lowCell(use);
6114
6115 ValueFromBlock fixedIndexStart = m_out.anchor(m_out.constIntPtr(0));
6116 ValueFromBlock arrayIndexStart = m_out.anchor(index);
6117 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
6118
6119 LValue fixedArraySize = m_out.zeroExtPtr(m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
6120
6121 m_out.branch(
6122 m_out.isZero64(fixedArraySize),
6123 unsure(continuation), unsure(loopStart));
6124
6125 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6126
6127 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
6128 LValue fixedArrayIndex = m_out.phi(pointerType(), fixedIndexStart);
6129
6130 LValue item = m_out.load64(m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, fixedArrayIndex));
6131 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
6132
6133 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
6134 LValue nextFixedArrayIndex = m_out.add(fixedArrayIndex, m_out.constIntPtr(1));
6135 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
6136
6137 m_out.addIncomingToPhi(fixedArrayIndex, m_out.anchor(nextFixedArrayIndex));
6138 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
6139
6140 m_out.branch(
6141 m_out.below(nextFixedArrayIndex, fixedArraySize),
6142 unsure(loopStart), unsure(continuation));
6143
6144 m_out.appendTo(continuation, lastNext);
6145 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
6146 }
6147 } else {
6148 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6149 LValue item = lowJSValue(use);
6150 m_out.store64(item, m_out.baseIndex(heap, storage, index));
6151 index = m_out.add(index, m_out.constIntPtr(1));
6152 }
6153 }
6154
6155 mutatorFence();
6156 setJSValue(result);
6157 return;
6158 }
6159
6160 ASSERT(m_node->numChildren());
6161 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
6162 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
6163 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
6164 BitVector* bitVector = m_node->bitVector();
6165 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6166 Edge use = m_graph.m_varArgChildren[m_node->firstChild() + i];
6167 LValue value;
6168 if (bitVector->get(i))
6169 value = lowCell(use);
6170 else
6171 value = lowJSValue(use);
6172 m_out.store64(value, m_out.absolute(&buffer[i]));
6173 }
6174
6175 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6176 LValue result = vmCall(Int64, m_out.operation(operationNewArrayWithSpreadSlow), m_callFrame, m_out.constIntPtr(buffer), m_out.constInt32(m_node->numChildren()));
6177 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6178
6179 setJSValue(result);
6180 }
6181
6182 void compileCreateThis()
6183 {
6184 LValue callee = lowCell(m_node->child1());
6185
6186 LBasicBlock isFunctionBlock = m_out.newBlock();
6187 LBasicBlock hasRareData = m_out.newBlock();
6188 LBasicBlock slowPath = m_out.newBlock();
6189 LBasicBlock continuation = m_out.newBlock();
6190
6191 m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowPath));
6192
6193 LBasicBlock lastNext = m_out.appendTo(isFunctionBlock, hasRareData);
6194 LValue rareData = m_out.loadPtr(callee, m_heaps.JSFunction_rareData);
6195 m_out.branch(m_out.isZero64(rareData), rarely(slowPath), usually(hasRareData));
6196
6197 m_out.appendTo(hasRareData, slowPath);
6198 LValue allocator = m_out.loadPtr(rareData, m_heaps.FunctionRareData_allocator);
6199 LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_structure);
6200 LValue butterfly = m_out.constIntPtr(0);
6201 ValueFromBlock fastResult = m_out.anchor(allocateObject(allocator, structure, butterfly, slowPath));
6202 m_out.jump(continuation);
6203
6204 m_out.appendTo(slowPath, continuation);
6205 ValueFromBlock slowResult = m_out.anchor(vmCall(
6206 Int64, m_out.operation(operationCreateThis), m_callFrame, callee, m_out.constInt32(m_node->inlineCapacity())));
6207 m_out.jump(continuation);
6208
6209 m_out.appendTo(continuation, lastNext);
6210 LValue result = m_out.phi(Int64, fastResult, slowResult);
6211
6212 mutatorFence();
6213 setJSValue(result);
6214 }
6215
6216 void compileSpread()
6217 {
6218 if (m_node->child1()->op() == PhantomNewArrayBuffer) {
6219 LBasicBlock slowAllocation = m_out.newBlock();
6220 LBasicBlock continuation = m_out.newBlock();
6221
6222 auto* immutableButterfly = m_node->child1()->castOperand<JSImmutableButterfly*>();
6223
6224 LValue fastFixedArrayValue = allocateVariableSizedCell<JSFixedArray>(
6225 m_out.constIntPtr(JSFixedArray::allocationSize(immutableButterfly->length()).unsafeGet()),
6226 m_graph.m_vm.fixedArrayStructure.get(), slowAllocation);
6227 m_out.store32(m_out.constInt32(immutableButterfly->length()), fastFixedArrayValue, m_heaps.JSFixedArray_size);
6228 ValueFromBlock fastFixedArray = m_out.anchor(fastFixedArrayValue);
6229 m_out.jump(continuation);
6230
6231 LBasicBlock lastNext = m_out.appendTo(slowAllocation, continuation);
6232 ValueFromBlock slowFixedArray = m_out.anchor(vmCall(pointerType(), m_out.operation(operationCreateFixedArray), m_callFrame, m_out.constInt32(immutableButterfly->length())));
6233 m_out.jump(continuation);
6234
6235 m_out.appendTo(continuation, lastNext);
6236 LValue fixedArray = m_out.phi(pointerType(), fastFixedArray, slowFixedArray);
6237 for (unsigned i = 0; i < immutableButterfly->length(); i++) {
6238 // Because forwarded values are drained as JSValue, we should not generate value
6239 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
6240 int64_t value = JSValue::encode(immutableButterfly->get(i));
6241 m_out.store64(
6242 m_out.constInt64(value),
6243 m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, m_out.constIntPtr(i), jsNumber(i)));
6244 }
6245 mutatorFence();
6246 setJSValue(fixedArray);
6247 return;
6248 }
6249
6250 if (m_node->child1()->op() == PhantomCreateRest) {
6251 // This IR is rare to generate since it requires escaping the Spread
6252 // but not the CreateRest. In bytecode, we have only few operations that
6253 // accept Spread's result as input. This usually leads to the Spread node not
6254 // escaping. However, this can happen if for example we generate a PutStack on
6255 // the Spread but nothing escapes the CreateRest.
6256 LBasicBlock loopHeader = m_out.newBlock();
6257 LBasicBlock loopBody = m_out.newBlock();
6258 LBasicBlock slowAllocation = m_out.newBlock();
6259 LBasicBlock continuation = m_out.newBlock();
6260 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopHeader);
6261
6262 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
6263 unsigned numberOfArgumentsToSkip = m_node->child1()->numberOfArgumentsToSkip();
6264 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
6265 LValue length = getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
6266 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
6267 LValue size = m_out.add(
6268 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
6269 m_out.constIntPtr(JSFixedArray::offsetOfData()));
6270
6271 LValue fastArrayValue = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowAllocation);
6272 m_out.store32(length, fastArrayValue, m_heaps.JSFixedArray_size);
6273 ValueFromBlock fastArray = m_out.anchor(fastArrayValue);
6274 m_out.jump(loopHeader);
6275
6276 m_out.appendTo(slowAllocation, loopHeader);
6277 ValueFromBlock slowArray = m_out.anchor(vmCall(pointerType(), m_out.operation(operationCreateFixedArray), m_callFrame, length));
6278 m_out.jump(loopHeader);
6279
6280 m_out.appendTo(loopHeader, loopBody);
6281 LValue fixedArray = m_out.phi(pointerType(), fastArray, slowArray);
6282 ValueFromBlock startIndex = m_out.anchor(m_out.constIntPtr(0));
6283 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopBody));
6284
6285 m_out.appendTo(loopBody, continuation);
6286 LValue index = m_out.phi(pointerType(), startIndex);
6287 LValue value = m_out.load64(
6288 m_out.baseIndex(m_heaps.variables, sourceStart, index));
6289 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, index));
6290 LValue nextIndex = m_out.add(m_out.constIntPtr(1), index);
6291 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6292 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)), unsure(loopBody), unsure(continuation));
6293
6294 m_out.appendTo(continuation, lastNext);
6295 mutatorFence();
6296 setJSValue(fixedArray);
6297 return;
6298 }
6299
6300 LValue argument = lowCell(m_node->child1());
6301
6302 LValue result;
6303
6304 if (m_node->child1().useKind() == ArrayUse)
6305 speculateArray(m_node->child1());
6306
6307 if (m_graph.canDoFastSpread(m_node, m_state.forNode(m_node->child1()))) {
6308 LBasicBlock preLoop = m_out.newBlock();
6309 LBasicBlock loopSelection = m_out.newBlock();
6310 LBasicBlock contiguousLoopStart = m_out.newBlock();
6311 LBasicBlock doubleLoopStart = m_out.newBlock();
6312 LBasicBlock slowPath = m_out.newBlock();
6313 LBasicBlock continuation = m_out.newBlock();
6314
6315 LValue indexingShape = m_out.load8ZeroExt32(argument, m_heaps.JSCell_indexingTypeAndMisc);
6316 indexingShape = m_out.bitAnd(indexingShape, m_out.constInt32(IndexingShapeMask));
6317 LValue isOKIndexingType = m_out.belowOrEqual(
6318 m_out.sub(indexingShape, m_out.constInt32(Int32Shape)),
6319 m_out.constInt32(ContiguousShape - Int32Shape));
6320
6321 m_out.branch(isOKIndexingType, unsure(preLoop), unsure(slowPath));
6322 LBasicBlock lastNext = m_out.appendTo(preLoop, loopSelection);
6323
6324 LValue butterfly = m_out.loadPtr(argument, m_heaps.JSObject_butterfly);
6325 LValue length = m_out.load32NonNegative(butterfly, m_heaps.Butterfly_publicLength);
6326 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
6327 LValue size = m_out.add(
6328 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
6329 m_out.constIntPtr(JSFixedArray::offsetOfData()));
6330
6331 LValue fastAllocation = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowPath);
6332 ValueFromBlock fastResult = m_out.anchor(fastAllocation);
6333 m_out.store32(length, fastAllocation, m_heaps.JSFixedArray_size);
6334
6335 ValueFromBlock startIndexForContiguous = m_out.anchor(m_out.constIntPtr(0));
6336 ValueFromBlock startIndexForDouble = m_out.anchor(m_out.constIntPtr(0));
6337
6338 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopSelection));
6339
6340 m_out.appendTo(loopSelection, contiguousLoopStart);
6341 m_out.branch(m_out.equal(indexingShape, m_out.constInt32(DoubleShape)),
6342 unsure(doubleLoopStart), unsure(contiguousLoopStart));
6343
6344 {
6345 m_out.appendTo(contiguousLoopStart, doubleLoopStart);
6346 LValue index = m_out.phi(pointerType(), startIndexForContiguous);
6347
6348 TypedPointer loadSite = m_out.baseIndex(m_heaps.root, butterfly, index, ScaleEight); // We read TOP here since we can be reading either int32 or contiguous properties.
6349 LValue value = m_out.load64(loadSite);
6350 value = m_out.select(m_out.isZero64(value), m_out.constInt64(JSValue::encode(jsUndefined())), value);
6351 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
6352
6353 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
6354 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6355
6356 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
6357 unsure(contiguousLoopStart), unsure(continuation));
6358 }
6359
6360 {
6361 m_out.appendTo(doubleLoopStart, slowPath);
6362 LValue index = m_out.phi(pointerType(), startIndexForDouble);
6363
6364 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, butterfly, index));
6365 LValue isNaN = m_out.doubleNotEqualOrUnordered(value, value);
6366 LValue holeResult = m_out.constInt64(JSValue::encode(jsUndefined()));
6367 LValue normalResult = boxDouble(value);
6368 value = m_out.select(isNaN, holeResult, normalResult);
6369 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
6370
6371 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
6372 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6373
6374 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
6375 unsure(doubleLoopStart), unsure(continuation));
6376 }
6377
6378 m_out.appendTo(slowPath, continuation);
6379 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationSpreadFastArray), m_callFrame, argument));
6380 m_out.jump(continuation);
6381
6382 m_out.appendTo(continuation, lastNext);
6383 result = m_out.phi(pointerType(), fastResult, slowResult);
6384 mutatorFence();
6385 } else
6386 result = vmCall(pointerType(), m_out.operation(operationSpreadGeneric), m_callFrame, argument);
6387
6388 setJSValue(result);
6389 }
6390
6391 void compileNewArrayBuffer()
6392 {
6393 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6394 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
6395 m_node->indexingMode()));
6396 auto* immutableButterfly = m_node->castOperand<JSImmutableButterfly*>();
6397
6398 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingMode())) {
6399 LBasicBlock slowPath = m_out.newBlock();
6400 LBasicBlock continuation = m_out.newBlock();
6401
6402 LValue fastArray = allocateObject<JSArray>(structure, m_out.constIntPtr(immutableButterfly->toButterfly()), slowPath);
6403 ValueFromBlock fastResult = m_out.anchor(fastArray);
6404 m_out.jump(continuation);
6405
6406 m_out.appendTo(slowPath, continuation);
6407 LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(structure), m_out.weakPointer(m_node->cellOperand()));
6408 ValueFromBlock slowResult = m_out.anchor(slowArray);
6409 m_out.jump(continuation);
6410
6411 m_out.appendTo(continuation);
6412
6413 mutatorFence();
6414 setJSValue(m_out.phi(pointerType(), slowResult, fastResult));
6415 return;
6416 }
6417
6418 setJSValue(vmCall(
6419 Int64, m_out.operation(operationNewArrayBuffer), m_callFrame,
6420 weakStructure(structure), m_out.weakPointer(m_node->cellOperand())));
6421 }
6422
6423 void compileNewArrayWithSize()
6424 {
6425 LValue publicLength = lowInt32(m_node->child1());
6426
6427 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6428 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
6429 m_node->indexingType()));
6430
6431 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
6432 IndexingType indexingType = m_node->indexingType();
6433 setJSValue(
6434 allocateJSArray(
6435 publicLength, publicLength, weakPointer(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType)), m_out.constInt32(indexingType)).array);
6436 mutatorFence();
6437 return;
6438 }
6439
6440 LValue structureValue = m_out.select(
6441 m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
6442 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))),
6443 weakStructure(structure));
6444 setJSValue(vmCall(Int64, m_out.operation(operationNewArrayWithSize), m_callFrame, structureValue, publicLength, m_out.intPtrZero));
6445 }
6446
6447 void compileNewTypedArray()
6448 {
6449 TypedArrayType typedArrayType = m_node->typedArrayType();
6450 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6451
6452 switch (m_node->child1().useKind()) {
6453 case Int32Use: {
6454 RegisteredStructure structure = m_graph.registerStructure(globalObject->typedArrayStructureConcurrently(typedArrayType));
6455
6456 LValue size = lowInt32(m_node->child1());
6457
6458 LBasicBlock smallEnoughCase = m_out.newBlock();
6459 LBasicBlock slowCase = m_out.newBlock();
6460 LBasicBlock continuation = m_out.newBlock();
6461
6462 ValueFromBlock noStorage = m_out.anchor(m_out.intPtrZero);
6463
6464 m_out.branch(
6465 m_out.above(size, m_out.constInt32(JSArrayBufferView::fastSizeLimit)),
6466 rarely(slowCase), usually(smallEnoughCase));
6467
6468 LBasicBlock lastNext = m_out.appendTo(smallEnoughCase, slowCase);
6469
6470 LValue byteSize =
6471 m_out.shl(m_out.zeroExtPtr(size), m_out.constInt32(logElementSize(typedArrayType)));
6472 if (elementSize(typedArrayType) < 8) {
6473 byteSize = m_out.bitAnd(
6474 m_out.add(byteSize, m_out.constIntPtr(7)),
6475 m_out.constIntPtr(~static_cast<intptr_t>(7)));
6476 }
6477
6478 LValue allocator = allocatorForSize(vm().primitiveGigacageAuxiliarySpace, byteSize, slowCase);
6479 LValue storage = allocateHeapCell(allocator, slowCase);
6480
6481 splatWords(
6482 storage,
6483 m_out.int32Zero,
6484 m_out.castToInt32(m_out.lShr(byteSize, m_out.constIntPtr(3))),
6485 m_out.int64Zero,
6486 m_heaps.typedArrayProperties);
6487
6488#if CPU(ARM64E)
6489 {
6490 LValue sizePtr = m_out.zeroExtPtr(size);
6491 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
6492 authenticate->appendSomeRegister(storage);
6493 authenticate->append(sizePtr, B3::ValueRep(B3::ValueRep::SomeLateRegister));
6494 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
6495 jit.move(params[1].gpr(), params[0].gpr());
6496 jit.tagArrayPtr(params[2].gpr(), params[0].gpr());
6497 });
6498 storage = authenticate;
6499 }
6500#endif
6501
6502 ValueFromBlock haveStorage = m_out.anchor(storage);
6503
6504 LValue fastResultValue =
6505 allocateObject<JSArrayBufferView>(structure, m_out.intPtrZero, slowCase);
6506
6507 m_out.storePtr(storage, fastResultValue, m_heaps.JSArrayBufferView_vector);
6508 m_out.store32(size, fastResultValue, m_heaps.JSArrayBufferView_length);
6509 m_out.store32(m_out.constInt32(FastTypedArray), fastResultValue, m_heaps.JSArrayBufferView_mode);
6510
6511 mutatorFence();
6512 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
6513 m_out.jump(continuation);
6514
6515 m_out.appendTo(slowCase, continuation);
6516 LValue storageValue = m_out.phi(pointerType(), noStorage, haveStorage);
6517
6518 VM& vm = this->vm();
6519 LValue slowResultValue = lazySlowPath(
6520 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6521 return createLazyCallGenerator(vm,
6522 operationNewTypedArrayWithSizeForType(typedArrayType), locations[0].directGPR(),
6523 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
6524 locations[2].directGPR());
6525 },
6526 size, storageValue);
6527 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
6528 m_out.jump(continuation);
6529
6530 m_out.appendTo(continuation, lastNext);
6531 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
6532 return;
6533 }
6534
6535 case UntypedUse: {
6536 LValue argument = lowJSValue(m_node->child1());
6537
6538 LValue result = vmCall(
6539 pointerType(), m_out.operation(operationNewTypedArrayWithOneArgumentForType(typedArrayType)),
6540 m_callFrame, weakPointer(globalObject->typedArrayStructureConcurrently(typedArrayType)), argument);
6541
6542 setJSValue(result);
6543 return;
6544 }
6545
6546 default:
6547 DFG_CRASH(m_graph, m_node, "Bad use kind");
6548 return;
6549 }
6550 }
6551
6552 void compileAllocatePropertyStorage()
6553 {
6554 LValue object = lowCell(m_node->child1());
6555 setStorage(allocatePropertyStorage(object, m_node->transition()->previous.get()));
6556 }
6557
6558 void compileReallocatePropertyStorage()
6559 {
6560 Transition* transition = m_node->transition();
6561 LValue object = lowCell(m_node->child1());
6562 LValue oldStorage = lowStorage(m_node->child2());
6563
6564 setStorage(
6565 reallocatePropertyStorage(
6566 object, oldStorage, transition->previous.get(), transition->next.get()));
6567 }
6568
6569 void compileNukeStructureAndSetButterfly()
6570 {
6571 nukeStructureAndSetButterfly(lowStorage(m_node->child2()), lowCell(m_node->child1()));
6572 }
6573
6574 void compileToNumber()
6575 {
6576 LValue value = lowJSValue(m_node->child1());
6577
6578 if (!(abstractValue(m_node->child1()).m_type & SpecBytecodeNumber))
6579 setJSValue(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
6580 else {
6581 LBasicBlock notNumber = m_out.newBlock();
6582 LBasicBlock continuation = m_out.newBlock();
6583
6584 ValueFromBlock fastResult = m_out.anchor(value);
6585 m_out.branch(isNumber(value, provenType(m_node->child1())), unsure(continuation), unsure(notNumber));
6586
6587 // notNumber case.
6588 LBasicBlock lastNext = m_out.appendTo(notNumber, continuation);
6589 // We have several attempts to remove ToNumber. But ToNumber still exists.
6590 // It means that converting non-numbers to numbers by this ToNumber is not rare.
6591 // Instead of the lazy slow path generator, we call the operation here.
6592 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
6593 m_out.jump(continuation);
6594
6595 // continuation case.
6596 m_out.appendTo(continuation, lastNext);
6597 setJSValue(m_out.phi(Int64, fastResult, slowResult));
6598 }
6599 }
6600
6601 void compileToStringOrCallStringConstructorOrStringValueOf()
6602 {
6603 ASSERT(m_node->op() != StringValueOf || m_node->child1().useKind() == UntypedUse);
6604 switch (m_node->child1().useKind()) {
6605 case StringObjectUse: {
6606 LValue cell = lowCell(m_node->child1());
6607 speculateStringObjectForCell(m_node->child1(), cell);
6608 setJSValue(m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
6609 return;
6610 }
6611
6612 case StringOrStringObjectUse: {
6613 LValue cell = lowCell(m_node->child1());
6614 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
6615
6616 LBasicBlock notString = m_out.newBlock();
6617 LBasicBlock continuation = m_out.newBlock();
6618
6619 ValueFromBlock simpleResult = m_out.anchor(cell);
6620 m_out.branch(
6621 m_out.equal(type, m_out.constInt32(StringType)),
6622 unsure(continuation), unsure(notString));
6623
6624 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
6625 speculate(
6626 BadType, jsValueValue(cell), m_node->child1().node(),
6627 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
6628 ValueFromBlock unboxedResult = m_out.anchor(
6629 m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
6630 m_out.jump(continuation);
6631
6632 m_out.appendTo(continuation, lastNext);
6633 setJSValue(m_out.phi(Int64, simpleResult, unboxedResult));
6634
6635 m_interpreter.filter(m_node->child1(), SpecString | SpecStringObject);
6636 return;
6637 }
6638
6639 case CellUse:
6640 case NotCellUse:
6641 case UntypedUse: {
6642 LValue value;
6643 if (m_node->child1().useKind() == CellUse)
6644 value = lowCell(m_node->child1());
6645 else if (m_node->child1().useKind() == NotCellUse)
6646 value = lowNotCell(m_node->child1());
6647 else
6648 value = lowJSValue(m_node->child1());
6649
6650 LBasicBlock isCell = m_out.newBlock();
6651 LBasicBlock notString = m_out.newBlock();
6652 LBasicBlock continuation = m_out.newBlock();
6653
6654 LValue isCellPredicate;
6655 if (m_node->child1().useKind() == CellUse)
6656 isCellPredicate = m_out.booleanTrue;
6657 else if (m_node->child1().useKind() == NotCellUse)
6658 isCellPredicate = m_out.booleanFalse;
6659 else
6660 isCellPredicate = this->isCell(value, provenType(m_node->child1()));
6661 m_out.branch(isCellPredicate, unsure(isCell), unsure(notString));
6662
6663 LBasicBlock lastNext = m_out.appendTo(isCell, notString);
6664 ValueFromBlock simpleResult = m_out.anchor(value);
6665 LValue isStringPredicate;
6666 if (m_node->child1()->prediction() & SpecString) {
6667 isStringPredicate = isString(value, provenType(m_node->child1()));
6668 } else
6669 isStringPredicate = m_out.booleanFalse;
6670 m_out.branch(isStringPredicate, unsure(continuation), unsure(notString));
6671
6672 m_out.appendTo(notString, continuation);
6673 LValue operation;
6674 if (m_node->child1().useKind() == CellUse) {
6675 ASSERT(m_node->op() != StringValueOf);
6676 operation = m_out.operation(m_node->op() == ToString ? operationToStringOnCell : operationCallStringConstructorOnCell);
6677 } else {
6678 operation = m_out.operation(m_node->op() == ToString
6679 ? operationToString : m_node->op() == StringValueOf
6680 ? operationStringValueOf : operationCallStringConstructor);
6681 }
6682 ValueFromBlock convertedResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, value));
6683 m_out.jump(continuation);
6684
6685 m_out.appendTo(continuation, lastNext);
6686 setJSValue(m_out.phi(Int64, simpleResult, convertedResult));
6687 return;
6688 }
6689
6690 case Int32Use:
6691 setJSValue(vmCall(Int64, m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(10)));
6692 return;
6693
6694 case Int52RepUse:
6695 setJSValue(vmCall(Int64, m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(10)));
6696 return;
6697
6698 case DoubleRepUse:
6699 setJSValue(vmCall(Int64, m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(10)));
6700 return;
6701
6702 default:
6703 DFG_CRASH(m_graph, m_node, "Bad use kind");
6704 break;
6705 }
6706 }
6707
6708 void compileToPrimitive()
6709 {
6710 LValue value = lowJSValue(m_node->child1());
6711
6712 LBasicBlock isCellCase = m_out.newBlock();
6713 LBasicBlock isObjectCase = m_out.newBlock();
6714 LBasicBlock continuation = m_out.newBlock();
6715
6716 Vector<ValueFromBlock, 3> results;
6717
6718 results.append(m_out.anchor(value));
6719 m_out.branch(
6720 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
6721
6722 LBasicBlock lastNext = m_out.appendTo(isCellCase, isObjectCase);
6723 results.append(m_out.anchor(value));
6724 m_out.branch(
6725 isObject(value, provenType(m_node->child1())),
6726 unsure(isObjectCase), unsure(continuation));
6727
6728 m_out.appendTo(isObjectCase, continuation);
6729 results.append(m_out.anchor(vmCall(
6730 Int64, m_out.operation(operationToPrimitive), m_callFrame, value)));
6731 m_out.jump(continuation);
6732
6733 m_out.appendTo(continuation, lastNext);
6734 setJSValue(m_out.phi(Int64, results));
6735 }
6736
6737 void compileMakeRope()
6738 {
6739 struct FlagsAndLength {
6740 LValue flags;
6741 LValue length;
6742 };
6743
6744 Edge edges[3] = {
6745 m_node->child1(),
6746 m_node->child2(),
6747 m_node->child3(),
6748 };
6749 LValue kids[3];
6750 unsigned numKids;
6751 kids[0] = lowCell(edges[0]);
6752 kids[1] = lowCell(edges[1]);
6753 if (edges[2]) {
6754 kids[2] = lowCell(edges[2]);
6755 numKids = 3;
6756 } else {
6757 kids[2] = 0;
6758 numKids = 2;
6759 }
6760
6761 LBasicBlock emptyCase = m_out.newBlock();
6762 LBasicBlock slowPath = m_out.newBlock();
6763 LBasicBlock continuation = m_out.newBlock();
6764
6765 Allocator allocator = allocatorForNonVirtualConcurrently<JSRopeString>(vm(), sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
6766
6767 LValue result = allocateCell(
6768 m_out.constIntPtr(allocator.localAllocator()), vm().stringStructure.get(), slowPath);
6769
6770 // This puts nullptr for the first fiber. It makes visitChildren safe even if this JSRopeString is discarded due to the speculation failure in the following path.
6771 m_out.storePtr(m_out.constIntPtr(JSString::isRopeInPointer), result, m_heaps.JSRopeString_fiber0);
6772
6773 auto getFlagsAndLength = [&] (Edge& edge, LValue child) {
6774 if (JSString* string = edge->dynamicCastConstant<JSString*>(vm())) {
6775 return FlagsAndLength {
6776 m_out.constInt32(string->is8Bit() ? StringImpl::flagIs8Bit() : 0),
6777 m_out.constInt32(string->length())
6778 };
6779 }
6780
6781 LBasicBlock continuation = m_out.newBlock();
6782 LBasicBlock ropeCase = m_out.newBlock();
6783 LBasicBlock notRopeCase = m_out.newBlock();
6784
6785 m_out.branch(isRopeString(child, edge), unsure(ropeCase), unsure(notRopeCase));
6786
6787 LBasicBlock lastNext = m_out.appendTo(ropeCase, notRopeCase);
6788 ValueFromBlock flagsForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_flags));
6789 ValueFromBlock lengthForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_length));
6790 m_out.jump(continuation);
6791
6792 m_out.appendTo(notRopeCase, continuation);
6793 LValue stringImpl = m_out.loadPtr(child, m_heaps.JSString_value);
6794 ValueFromBlock flagsForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_hashAndFlags));
6795 ValueFromBlock lengthForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length));
6796 m_out.jump(continuation);
6797
6798 m_out.appendTo(continuation, lastNext);
6799 return FlagsAndLength {
6800 m_out.phi(Int32, flagsForRope, flagsForNonRope),
6801 m_out.phi(Int32, lengthForRope, lengthForNonRope)
6802 };
6803 };
6804
6805 FlagsAndLength flagsAndLength = getFlagsAndLength(edges[0], kids[0]);
6806 for (unsigned i = 1; i < numKids; ++i) {
6807 auto mergeFlagsAndLength = [&] (Edge& edge, LValue child, FlagsAndLength previousFlagsAndLength) {
6808 FlagsAndLength flagsAndLength = getFlagsAndLength(edge, child);
6809 LValue flags = m_out.bitAnd(previousFlagsAndLength.flags, flagsAndLength.flags);
6810 CheckValue* lengthCheck = m_out.speculateAdd(previousFlagsAndLength.length, flagsAndLength.length);
6811 blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin);
6812 return FlagsAndLength {
6813 flags,
6814 lengthCheck
6815 };
6816 };
6817 flagsAndLength = mergeFlagsAndLength(edges[i], kids[i], flagsAndLength);
6818 }
6819
6820 m_out.storePtr(
6821 m_out.bitOr(
6822 m_out.bitOr(kids[0], m_out.constIntPtr(JSString::isRopeInPointer)),
6823 m_out.bitAnd(m_out.constIntPtr(JSRopeString::is8BitInPointer), m_out.zeroExtPtr(flagsAndLength.flags))),
6824 result, m_heaps.JSRopeString_fiber0);
6825 m_out.storePtr(
6826 m_out.bitOr(m_out.zeroExtPtr(flagsAndLength.length), m_out.shl(kids[1], m_out.constInt32(32))),
6827 result, m_heaps.JSRopeString_fiber1);
6828 if (numKids == 2)
6829 m_out.storePtr(m_out.lShr(kids[1], m_out.constInt32(32)), result, m_heaps.JSRopeString_fiber2);
6830 else
6831 m_out.storePtr(m_out.bitOr(m_out.lShr(kids[1], m_out.constInt32(32)), m_out.shl(kids[2], m_out.constInt32(16))), result, m_heaps.JSRopeString_fiber2);
6832
6833 mutatorFence();
6834 ValueFromBlock fastResult = m_out.anchor(result);
6835 m_out.branch(m_out.isZero32(flagsAndLength.length), rarely(emptyCase), usually(continuation));
6836
6837 LBasicBlock lastNext = m_out.appendTo(emptyCase, slowPath);
6838 ValueFromBlock emptyResult = m_out.anchor(weakPointer(jsEmptyString(&m_graph.m_vm)));
6839 m_out.jump(continuation);
6840
6841 m_out.appendTo(slowPath, continuation);
6842 LValue slowResultValue;
6843 VM& vm = this->vm();
6844 switch (numKids) {
6845 case 2:
6846 slowResultValue = lazySlowPath(
6847 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6848 return createLazyCallGenerator(vm,
6849 operationMakeRope2, locations[0].directGPR(), locations[1].directGPR(),
6850 locations[2].directGPR());
6851 }, kids[0], kids[1]);
6852 break;
6853 case 3:
6854 slowResultValue = lazySlowPath(
6855 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6856 return createLazyCallGenerator(vm,
6857 operationMakeRope3, locations[0].directGPR(), locations[1].directGPR(),
6858 locations[2].directGPR(), locations[3].directGPR());
6859 }, kids[0], kids[1], kids[2]);
6860 break;
6861 default:
6862 DFG_CRASH(m_graph, m_node, "Bad number of children");
6863 break;
6864 }
6865 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
6866 m_out.jump(continuation);
6867
6868 m_out.appendTo(continuation, lastNext);
6869 setJSValue(m_out.phi(Int64, fastResult, emptyResult, slowResult));
6870 }
6871
6872 void compileStringCharAt()
6873 {
6874 LValue base = lowString(m_graph.child(m_node, 0));
6875 LValue index = lowInt32(m_graph.child(m_node, 1));
6876 LValue storage = lowStorage(m_graph.child(m_node, 2));
6877
6878 LBasicBlock fastPath = m_out.newBlock();
6879 LBasicBlock slowPath = m_out.newBlock();
6880 LBasicBlock continuation = m_out.newBlock();
6881
6882 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
6883 m_out.branch(
6884 m_out.aboveOrEqual(
6885 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)),
6886 rarely(slowPath), usually(fastPath));
6887
6888 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
6889
6890 LBasicBlock is8Bit = m_out.newBlock();
6891 LBasicBlock is16Bit = m_out.newBlock();
6892 LBasicBlock bitsContinuation = m_out.newBlock();
6893 LBasicBlock bigCharacter = m_out.newBlock();
6894
6895 m_out.branch(
6896 m_out.testIsZero32(
6897 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
6898 m_out.constInt32(StringImpl::flagIs8Bit())),
6899 unsure(is16Bit), unsure(is8Bit));
6900
6901 m_out.appendTo(is8Bit, is16Bit);
6902
6903 // FIXME: Need to cage strings!
6904 // https://bugs.webkit.org/show_bug.cgi?id=174924
6905 ValueFromBlock char8Bit = m_out.anchor(
6906 m_out.load8ZeroExt32(m_out.baseIndex(
6907 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
6908 provenValue(m_graph.child(m_node, 1)))));
6909 m_out.jump(bitsContinuation);
6910
6911 m_out.appendTo(is16Bit, bigCharacter);
6912
6913 LValue char16BitValue = m_out.load16ZeroExt32(
6914 m_out.baseIndex(
6915 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
6916 provenValue(m_graph.child(m_node, 1))));
6917 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
6918 m_out.branch(
6919 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
6920 rarely(bigCharacter), usually(bitsContinuation));
6921
6922 m_out.appendTo(bigCharacter, bitsContinuation);
6923
6924 Vector<ValueFromBlock, 4> results;
6925 results.append(m_out.anchor(vmCall(
6926 Int64, m_out.operation(operationSingleCharacterString),
6927 m_callFrame, char16BitValue)));
6928 m_out.jump(continuation);
6929
6930 m_out.appendTo(bitsContinuation, slowPath);
6931
6932 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
6933
6934 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
6935
6936 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
6937 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
6938 m_out.jump(continuation);
6939
6940 m_out.appendTo(slowPath, continuation);
6941
6942 if (m_node->arrayMode().isInBounds()) {
6943 speculate(OutOfBounds, noValue(), 0, m_out.booleanTrue);
6944 results.append(m_out.anchor(m_out.intPtrZero));
6945 } else {
6946 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6947
6948 bool prototypeChainIsSane = false;
6949 if (globalObject->stringPrototypeChainIsSane()) {
6950 // FIXME: This could be captured using a Speculation mode that means
6951 // "out-of-bounds loads return a trivial value", something like
6952 // SaneChainOutOfBounds.
6953 // https://bugs.webkit.org/show_bug.cgi?id=144668
6954
6955 m_graph.registerAndWatchStructureTransition(globalObject->stringPrototype()->structure(vm()));
6956 m_graph.registerAndWatchStructureTransition(globalObject->objectPrototype()->structure(vm()));
6957
6958 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
6959 }
6960 if (prototypeChainIsSane) {
6961 LBasicBlock negativeIndex = m_out.newBlock();
6962
6963 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
6964 m_out.branch(
6965 m_out.lessThan(index, m_out.int32Zero),
6966 rarely(negativeIndex), usually(continuation));
6967
6968 m_out.appendTo(negativeIndex, continuation);
6969 }
6970
6971 results.append(m_out.anchor(vmCall(
6972 Int64, m_out.operation(operationGetByValStringInt), m_callFrame, base, index)));
6973 }
6974
6975 m_out.jump(continuation);
6976
6977 m_out.appendTo(continuation, lastNext);
6978 setJSValue(m_out.phi(Int64, results));
6979 }
6980
6981 void compileStringCharCodeAt()
6982 {
6983 LBasicBlock is8Bit = m_out.newBlock();
6984 LBasicBlock is16Bit = m_out.newBlock();
6985 LBasicBlock continuation = m_out.newBlock();
6986
6987 LValue base = lowString(m_node->child1());
6988 LValue index = lowInt32(m_node->child2());
6989 LValue storage = lowStorage(m_node->child3());
6990
6991 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
6992
6993 speculate(
6994 Uncountable, noValue(), 0,
6995 m_out.aboveOrEqual(
6996 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)));
6997
6998 m_out.branch(
6999 m_out.testIsZero32(
7000 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
7001 m_out.constInt32(StringImpl::flagIs8Bit())),
7002 unsure(is16Bit), unsure(is8Bit));
7003
7004 LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
7005
7006 // FIXME: need to cage strings!
7007 // https://bugs.webkit.org/show_bug.cgi?id=174924
7008 ValueFromBlock char8Bit = m_out.anchor(
7009 m_out.load8ZeroExt32(m_out.baseIndex(
7010 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
7011 provenValue(m_node->child2()))));
7012 m_out.jump(continuation);
7013
7014 m_out.appendTo(is16Bit, continuation);
7015
7016 ValueFromBlock char16Bit = m_out.anchor(
7017 m_out.load16ZeroExt32(m_out.baseIndex(
7018 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
7019 provenValue(m_node->child2()))));
7020 m_out.jump(continuation);
7021
7022 m_out.appendTo(continuation, lastNext);
7023
7024 setInt32(m_out.phi(Int32, char8Bit, char16Bit));
7025 }
7026
7027 void compileStringFromCharCode()
7028 {
7029 Edge childEdge = m_node->child1();
7030
7031 if (childEdge.useKind() == UntypedUse) {
7032 LValue result = vmCall(
7033 Int64, m_out.operation(operationStringFromCharCodeUntyped), m_callFrame,
7034 lowJSValue(childEdge));
7035 setJSValue(result);
7036 return;
7037 }
7038
7039 DFG_ASSERT(m_graph, m_node, childEdge.useKind() == Int32Use, childEdge.useKind());
7040
7041 LValue value = lowInt32(childEdge);
7042
7043 LBasicBlock smallIntCase = m_out.newBlock();
7044 LBasicBlock slowCase = m_out.newBlock();
7045 LBasicBlock continuation = m_out.newBlock();
7046
7047 m_out.branch(
7048 m_out.above(value, m_out.constInt32(maxSingleCharacterString)),
7049 rarely(slowCase), usually(smallIntCase));
7050
7051 LBasicBlock lastNext = m_out.appendTo(smallIntCase, slowCase);
7052
7053 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
7054 LValue fastResultValue = m_out.loadPtr(
7055 m_out.baseIndex(m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(value)));
7056 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
7057 m_out.jump(continuation);
7058
7059 m_out.appendTo(slowCase, continuation);
7060
7061 LValue slowResultValue = vmCall(
7062 pointerType(), m_out.operation(operationStringFromCharCode), m_callFrame, value);
7063 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
7064 m_out.jump(continuation);
7065
7066 m_out.appendTo(continuation, lastNext);
7067
7068 setJSValue(m_out.phi(Int64, fastResult, slowResult));
7069 }
7070
7071 void compileGetByOffset()
7072 {
7073 StorageAccessData& data = m_node->storageAccessData();
7074
7075 setJSValue(loadProperty(
7076 lowStorage(m_node->child1()), data.identifierNumber, data.offset));
7077 }
7078
7079 void compileGetGetter()
7080 {
7081 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_getter));
7082 }
7083
7084 void compileGetSetter()
7085 {
7086 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_setter));
7087 }
7088
7089 void compileMultiGetByOffset()
7090 {
7091 LValue base = lowCell(m_node->child1());
7092
7093 MultiGetByOffsetData& data = m_node->multiGetByOffsetData();
7094
7095 Vector<LBasicBlock, 2> blocks(data.cases.size());
7096 for (unsigned i = data.cases.size(); i--;)
7097 blocks[i] = m_out.newBlock();
7098 LBasicBlock exit = m_out.newBlock();
7099 LBasicBlock continuation = m_out.newBlock();
7100
7101 Vector<SwitchCase, 2> cases;
7102 RegisteredStructureSet baseSet;
7103 for (unsigned i = data.cases.size(); i--;) {
7104 MultiGetByOffsetCase getCase = data.cases[i];
7105 for (unsigned j = getCase.set().size(); j--;) {
7106 RegisteredStructure structure = getCase.set()[j];
7107 baseSet.add(structure);
7108 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
7109 }
7110 }
7111 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7112 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
7113
7114 LBasicBlock lastNext = m_out.m_nextBlock;
7115
7116 Vector<ValueFromBlock, 2> results;
7117 for (unsigned i = data.cases.size(); i--;) {
7118 MultiGetByOffsetCase getCase = data.cases[i];
7119 GetByOffsetMethod method = getCase.method();
7120
7121 m_out.appendTo(blocks[i], i + 1 < data.cases.size() ? blocks[i + 1] : exit);
7122
7123 LValue result;
7124
7125 switch (method.kind()) {
7126 case GetByOffsetMethod::Invalid:
7127 RELEASE_ASSERT_NOT_REACHED();
7128 break;
7129
7130 case GetByOffsetMethod::Constant:
7131 result = m_out.constInt64(JSValue::encode(method.constant()->value()));
7132 break;
7133
7134 case GetByOffsetMethod::Load:
7135 case GetByOffsetMethod::LoadFromPrototype: {
7136 LValue propertyBase;
7137 if (method.kind() == GetByOffsetMethod::Load)
7138 propertyBase = base;
7139 else
7140 propertyBase = weakPointer(method.prototype()->value().asCell());
7141 if (!isInlineOffset(method.offset()))
7142 propertyBase = m_out.loadPtr(propertyBase, m_heaps.JSObject_butterfly);
7143 result = loadProperty(
7144 propertyBase, data.identifierNumber, method.offset());
7145 break;
7146 } }
7147
7148 results.append(m_out.anchor(result));
7149 m_out.jump(continuation);
7150 }
7151
7152 m_out.appendTo(exit, continuation);
7153 if (!structuresChecked)
7154 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7155 m_out.unreachable();
7156
7157 m_out.appendTo(continuation, lastNext);
7158 setJSValue(m_out.phi(Int64, results));
7159 }
7160
7161 void compilePutByOffset()
7162 {
7163 StorageAccessData& data = m_node->storageAccessData();
7164
7165 storeProperty(
7166 lowJSValue(m_node->child3()),
7167 lowStorage(m_node->child1()), data.identifierNumber, data.offset);
7168 }
7169
7170 void compileMultiPutByOffset()
7171 {
7172 LValue base = lowCell(m_node->child1());
7173 LValue value = lowJSValue(m_node->child2());
7174
7175 MultiPutByOffsetData& data = m_node->multiPutByOffsetData();
7176
7177 Vector<LBasicBlock, 2> blocks(data.variants.size());
7178 for (unsigned i = data.variants.size(); i--;)
7179 blocks[i] = m_out.newBlock();
7180 LBasicBlock exit = m_out.newBlock();
7181 LBasicBlock continuation = m_out.newBlock();
7182
7183 Vector<SwitchCase, 2> cases;
7184 RegisteredStructureSet baseSet;
7185 for (unsigned i = data.variants.size(); i--;) {
7186 PutByIdVariant variant = data.variants[i];
7187 for (unsigned j = variant.oldStructure().size(); j--;) {
7188 RegisteredStructure structure = m_graph.registerStructure(variant.oldStructure()[j]);
7189 baseSet.add(structure);
7190 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
7191 }
7192 }
7193 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7194 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
7195
7196 LBasicBlock lastNext = m_out.m_nextBlock;
7197
7198 for (unsigned i = data.variants.size(); i--;) {
7199 m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
7200
7201 PutByIdVariant variant = data.variants[i];
7202
7203 LValue storage;
7204 if (variant.kind() == PutByIdVariant::Replace) {
7205 if (isInlineOffset(variant.offset()))
7206 storage = base;
7207 else
7208 storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
7209 } else {
7210 DFG_ASSERT(m_graph, m_node, variant.kind() == PutByIdVariant::Transition, variant.kind());
7211 m_graph.m_plan.transitions().addLazily(
7212 codeBlock(), m_node->origin.semantic.codeOriginOwner(),
7213 variant.oldStructureForTransition(), variant.newStructure());
7214
7215 storage = storageForTransition(
7216 base, variant.offset(),
7217 variant.oldStructureForTransition(), variant.newStructure());
7218 }
7219
7220 storeProperty(value, storage, data.identifierNumber, variant.offset());
7221
7222 if (variant.kind() == PutByIdVariant::Transition) {
7223 ASSERT(variant.oldStructureForTransition()->indexingType() == variant.newStructure()->indexingType());
7224 ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
7225 ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type());
7226 m_out.store32(
7227 weakStructureID(m_graph.registerStructure(variant.newStructure())), base, m_heaps.JSCell_structureID);
7228 }
7229
7230 m_out.jump(continuation);
7231 }
7232
7233 m_out.appendTo(exit, continuation);
7234 if (!structuresChecked)
7235 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7236 m_out.unreachable();
7237
7238 m_out.appendTo(continuation, lastNext);
7239 }
7240
7241 void compileMatchStructure()
7242 {
7243 LValue base = lowCell(m_node->child1());
7244
7245 MatchStructureData& data = m_node->matchStructureData();
7246
7247 LBasicBlock trueBlock = m_out.newBlock();
7248 LBasicBlock falseBlock = m_out.newBlock();
7249 LBasicBlock exitBlock = m_out.newBlock();
7250 LBasicBlock continuation = m_out.newBlock();
7251
7252 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueBlock);
7253
7254 Vector<SwitchCase, 2> cases;
7255 RegisteredStructureSet baseSet;
7256 for (MatchStructureVariant& variant : data.variants) {
7257 baseSet.add(variant.structure);
7258 cases.append(SwitchCase(
7259 weakStructureID(variant.structure),
7260 variant.result ? trueBlock : falseBlock, Weight(1)));
7261 }
7262 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7263 emitSwitchForMultiByOffset(base, structuresChecked, cases, exitBlock);
7264
7265 m_out.appendTo(trueBlock, falseBlock);
7266 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
7267 m_out.jump(continuation);
7268
7269 m_out.appendTo(falseBlock, exitBlock);
7270 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
7271 m_out.jump(continuation);
7272
7273 m_out.appendTo(exitBlock, continuation);
7274 if (!structuresChecked)
7275 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7276 m_out.unreachable();
7277
7278 m_out.appendTo(continuation, lastNext);
7279 setBoolean(m_out.phi(Int32, trueResult, falseResult));
7280 }
7281
7282 void compileGetGlobalVariable()
7283 {
7284 setJSValue(m_out.load64(m_out.absolute(m_node->variablePointer())));
7285 }
7286
7287 void compilePutGlobalVariable()
7288 {
7289 m_out.store64(
7290 lowJSValue(m_node->child2()), m_out.absolute(m_node->variablePointer()));
7291 }
7292
7293 void compileNotifyWrite()
7294 {
7295 WatchpointSet* set = m_node->watchpointSet();
7296
7297 LBasicBlock isNotInvalidated = m_out.newBlock();
7298 LBasicBlock continuation = m_out.newBlock();
7299
7300 LValue state = m_out.load8ZeroExt32(m_out.absolute(set->addressOfState()));
7301 m_out.branch(
7302 m_out.equal(state, m_out.constInt32(IsInvalidated)),
7303 usually(continuation), rarely(isNotInvalidated));
7304
7305 LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
7306
7307 VM& vm = this->vm();
7308 lazySlowPath(
7309 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
7310 return createLazyCallGenerator(vm,
7311 operationNotifyWrite, InvalidGPRReg, CCallHelpers::TrustedImmPtr(set));
7312 });
7313 m_out.jump(continuation);
7314
7315 m_out.appendTo(continuation, lastNext);
7316 }
7317
7318 void compileGetCallee()
7319 {
7320 setJSValue(m_out.loadPtr(addressFor(CallFrameSlot::callee)));
7321 }
7322
7323 void compileSetCallee()
7324 {
7325 auto callee = lowCell(m_node->child1());
7326 m_out.storePtr(callee, payloadFor(CallFrameSlot::callee));
7327 }
7328
7329 void compileGetArgumentCountIncludingThis()
7330 {
7331 VirtualRegister argumentCountRegister;
7332 if (InlineCallFrame* inlineCallFrame = m_node->argumentsInlineCallFrame())
7333 argumentCountRegister = inlineCallFrame->argumentCountRegister;
7334 else
7335 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
7336 setInt32(m_out.load32(payloadFor(argumentCountRegister)));
7337 }
7338
7339 void compileSetArgumentCountIncludingThis()
7340 {
7341 m_out.store32(m_out.constInt32(m_node->argumentCountIncludingThis()), payloadFor(CallFrameSlot::argumentCount));
7342 }
7343
7344 void compileGetScope()
7345 {
7346 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope));
7347 }
7348
7349 void compileSkipScope()
7350 {
7351 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next));
7352 }
7353
7354 void compileGetGlobalObject()
7355 {
7356 LValue structure = loadStructure(lowCell(m_node->child1()));
7357 setJSValue(m_out.loadPtr(structure, m_heaps.Structure_globalObject));
7358 }
7359
7360 void compileGetGlobalThis()
7361 {
7362 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
7363 setJSValue(m_out.loadPtr(m_out.absolute(globalObject->addressOfGlobalThis())));
7364 }
7365
7366 void compileGetClosureVar()
7367 {
7368 setJSValue(
7369 m_out.load64(
7370 lowCell(m_node->child1()),
7371 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]));
7372 }
7373
7374 void compilePutClosureVar()
7375 {
7376 m_out.store64(
7377 lowJSValue(m_node->child2()),
7378 lowCell(m_node->child1()),
7379 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]);
7380 }
7381
7382 void compileGetFromArguments()
7383 {
7384 setJSValue(
7385 m_out.load64(
7386 lowCell(m_node->child1()),
7387 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]));
7388 }
7389
7390 void compilePutToArguments()
7391 {
7392 m_out.store64(
7393 lowJSValue(m_node->child2()),
7394 lowCell(m_node->child1()),
7395 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]);
7396 }
7397
7398 void compileGetArgument()
7399 {
7400 LValue argumentCount = m_out.load32(payloadFor(AssemblyHelpers::argumentCount(m_node->origin.semantic)));
7401
7402 LBasicBlock inBounds = m_out.newBlock();
7403 LBasicBlock outOfBounds = m_out.newBlock();
7404 LBasicBlock continuation = m_out.newBlock();
7405
7406 m_out.branch(m_out.lessThanOrEqual(argumentCount, m_out.constInt32(m_node->argumentIndex())), unsure(outOfBounds), unsure(inBounds));
7407
7408 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
7409 VirtualRegister arg = AssemblyHelpers::argumentsStart(m_node->origin.semantic) + m_node->argumentIndex() - 1;
7410 ValueFromBlock inBoundsResult = m_out.anchor(m_out.load64(addressFor(arg)));
7411 m_out.jump(continuation);
7412
7413 m_out.appendTo(outOfBounds, continuation);
7414 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueUndefined));
7415 m_out.jump(continuation);
7416
7417 m_out.appendTo(continuation, lastNext);
7418 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
7419 }
7420
7421 void compileCompareEq()
7422 {
7423 if (m_node->isBinaryUseKind(Int32Use)
7424 || m_node->isBinaryUseKind(Int52RepUse)
7425 || m_node->isBinaryUseKind(DoubleRepUse)
7426 || m_node->isBinaryUseKind(ObjectUse)
7427 || m_node->isBinaryUseKind(BooleanUse)
7428 || m_node->isBinaryUseKind(SymbolUse)
7429 || m_node->isBinaryUseKind(StringIdentUse)
7430 || m_node->isBinaryUseKind(StringUse)) {
7431 compileCompareStrictEq();
7432 return;
7433 }
7434
7435 if (m_node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
7436 compareEqObjectOrOtherToObject(m_node->child2(), m_node->child1());
7437 return;
7438 }
7439
7440 if (m_node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
7441 compareEqObjectOrOtherToObject(m_node->child1(), m_node->child2());
7442 return;
7443 }
7444
7445 if (m_node->child1().useKind() == KnownOtherUse) {
7446 ASSERT(!m_interpreter.needsTypeCheck(m_node->child1(), SpecOther));
7447 setBoolean(equalNullOrUndefined(m_node->child2(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
7448 return;
7449 }
7450
7451 if (m_node->child2().useKind() == KnownOtherUse) {
7452 ASSERT(!m_interpreter.needsTypeCheck(m_node->child2(), SpecOther));
7453 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
7454 return;
7455 }
7456
7457 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
7458 nonSpeculativeCompare(
7459 [&] (LValue left, LValue right) {
7460 return m_out.equal(left, right);
7461 },
7462 operationCompareEq);
7463 }
7464
7465 void compileCompareStrictEq()
7466 {
7467 if (m_node->isBinaryUseKind(Int32Use)) {
7468 setBoolean(
7469 m_out.equal(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7470 return;
7471 }
7472
7473 if (m_node->isBinaryUseKind(Int52RepUse)) {
7474 Int52Kind kind;
7475 LValue left = lowWhicheverInt52(m_node->child1(), kind);
7476 LValue right = lowInt52(m_node->child2(), kind);
7477 setBoolean(m_out.equal(left, right));
7478 return;
7479 }
7480
7481 if (m_node->isBinaryUseKind(DoubleRepUse)) {
7482 setBoolean(
7483 m_out.doubleEqual(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
7484 return;
7485 }
7486
7487 if (m_node->isBinaryUseKind(StringIdentUse)) {
7488 setBoolean(
7489 m_out.equal(lowStringIdent(m_node->child1()), lowStringIdent(m_node->child2())));
7490 return;
7491 }
7492
7493 if (m_node->isBinaryUseKind(StringUse)) {
7494 LValue left = lowCell(m_node->child1());
7495 LValue right = lowCell(m_node->child2());
7496
7497 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
7498 LBasicBlock continuation = m_out.newBlock();
7499
7500 speculateString(m_node->child1(), left);
7501
7502 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
7503 m_out.branch(
7504 m_out.equal(left, right), unsure(continuation), unsure(notTriviallyEqualCase));
7505
7506 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
7507
7508 speculateString(m_node->child2(), right);
7509
7510 ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right, m_node->child1(), m_node->child2()));
7511 m_out.jump(continuation);
7512
7513 m_out.appendTo(continuation, lastNext);
7514 setBoolean(m_out.phi(Int32, fastResult, slowResult));
7515 return;
7516 }
7517
7518 if (m_node->isBinaryUseKind(ObjectUse, UntypedUse)) {
7519 setBoolean(
7520 m_out.equal(
7521 lowNonNullObject(m_node->child1()),
7522 lowJSValue(m_node->child2())));
7523 return;
7524 }
7525
7526 if (m_node->isBinaryUseKind(UntypedUse, ObjectUse)) {
7527 setBoolean(
7528 m_out.equal(
7529 lowNonNullObject(m_node->child2()),
7530 lowJSValue(m_node->child1())));
7531 return;
7532 }
7533
7534 if (m_node->isBinaryUseKind(ObjectUse)) {
7535 setBoolean(
7536 m_out.equal(
7537 lowNonNullObject(m_node->child1()),
7538 lowNonNullObject(m_node->child2())));
7539 return;
7540 }
7541
7542 if (m_node->isBinaryUseKind(BooleanUse)) {
7543 setBoolean(
7544 m_out.equal(lowBoolean(m_node->child1()), lowBoolean(m_node->child2())));
7545 return;
7546 }
7547
7548 if (m_node->isBinaryUseKind(SymbolUse)) {
7549 LValue leftSymbol = lowSymbol(m_node->child1());
7550 LValue rightSymbol = lowSymbol(m_node->child2());
7551 setBoolean(m_out.equal(leftSymbol, rightSymbol));
7552 return;
7553 }
7554
7555 if (m_node->isBinaryUseKind(BigIntUse)) {
7556 // FIXME: [ESNext][BigInt] Create specialized version of strict equals for BigIntUse
7557 // https://bugs.webkit.org/show_bug.cgi?id=182895
7558 LValue left = lowBigInt(m_node->child1());
7559 LValue right = lowBigInt(m_node->child2());
7560
7561 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
7562 LBasicBlock continuation = m_out.newBlock();
7563
7564 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
7565 m_out.branch(m_out.equal(left, right), rarely(continuation), usually(notTriviallyEqualCase));
7566
7567 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
7568
7569 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
7570 pointerType(), m_out.operation(operationCompareStrictEq), m_callFrame, left, right)));
7571 m_out.jump(continuation);
7572
7573 m_out.appendTo(continuation, lastNext);
7574 setBoolean(m_out.phi(Int32, fastResult, slowResult));
7575 return;
7576 }
7577
7578 if (m_node->isBinaryUseKind(SymbolUse, UntypedUse)
7579 || m_node->isBinaryUseKind(UntypedUse, SymbolUse)) {
7580 Edge symbolEdge = m_node->child1();
7581 Edge untypedEdge = m_node->child2();
7582 if (symbolEdge.useKind() != SymbolUse)
7583 std::swap(symbolEdge, untypedEdge);
7584
7585 LValue leftSymbol = lowSymbol(symbolEdge);
7586 LValue untypedValue = lowJSValue(untypedEdge);
7587
7588 setBoolean(m_out.equal(leftSymbol, untypedValue));
7589 return;
7590 }
7591
7592 if (m_node->isBinaryUseKind(MiscUse, UntypedUse)
7593 || m_node->isBinaryUseKind(UntypedUse, MiscUse)) {
7594 speculate(m_node->child1());
7595 speculate(m_node->child2());
7596 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
7597 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
7598 setBoolean(m_out.equal(left, right));
7599 return;
7600 }
7601
7602 if (m_node->isBinaryUseKind(StringIdentUse, NotStringVarUse)
7603 || m_node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
7604 Edge leftEdge = m_node->childFor(StringIdentUse);
7605 Edge rightEdge = m_node->childFor(NotStringVarUse);
7606
7607 LValue left = lowStringIdent(leftEdge);
7608 LValue rightValue = lowJSValue(rightEdge, ManualOperandSpeculation);
7609
7610 LBasicBlock isCellCase = m_out.newBlock();
7611 LBasicBlock isStringCase = m_out.newBlock();
7612 LBasicBlock continuation = m_out.newBlock();
7613
7614 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
7615 m_out.branch(
7616 isCell(rightValue, provenType(rightEdge)),
7617 unsure(isCellCase), unsure(continuation));
7618
7619 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
7620 ValueFromBlock notStringResult = m_out.anchor(m_out.booleanFalse);
7621 m_out.branch(
7622 isString(rightValue, provenType(rightEdge)),
7623 unsure(isStringCase), unsure(continuation));
7624
7625 m_out.appendTo(isStringCase, continuation);
7626 LValue right = m_out.loadPtr(rightValue, m_heaps.JSString_value);
7627 speculateStringIdent(rightEdge, rightValue, right);
7628 ValueFromBlock isStringResult = m_out.anchor(m_out.equal(left, right));
7629 m_out.jump(continuation);
7630
7631 m_out.appendTo(continuation, lastNext);
7632 setBoolean(m_out.phi(Int32, notCellResult, notStringResult, isStringResult));
7633 return;
7634 }
7635
7636 if (m_node->isBinaryUseKind(StringUse, UntypedUse)) {
7637 compileStringToUntypedStrictEquality(m_node->child1(), m_node->child2());
7638 return;
7639 }
7640 if (m_node->isBinaryUseKind(UntypedUse, StringUse)) {
7641 compileStringToUntypedStrictEquality(m_node->child2(), m_node->child1());
7642 return;
7643 }
7644
7645 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
7646 nonSpeculativeCompare(
7647 [&] (LValue left, LValue right) {
7648 return m_out.equal(left, right);
7649 },
7650 operationCompareStrictEq);
7651 }
7652
7653 void compileStringToUntypedStrictEquality(Edge stringEdge, Edge untypedEdge)
7654 {
7655 ASSERT(stringEdge.useKind() == StringUse);
7656 ASSERT(untypedEdge.useKind() == UntypedUse);
7657
7658 LValue leftString = lowCell(stringEdge);
7659 LValue rightValue = lowJSValue(untypedEdge);
7660 SpeculatedType rightValueType = provenType(untypedEdge);
7661
7662 // Verify left is string.
7663 speculateString(stringEdge, leftString);
7664
7665 LBasicBlock testUntypedEdgeIsCell = m_out.newBlock();
7666 LBasicBlock testUntypedEdgeIsString = m_out.newBlock();
7667 LBasicBlock testStringEquality = m_out.newBlock();
7668 LBasicBlock continuation = m_out.newBlock();
7669
7670 // Given left is string. If the value are strictly equal, rightValue has to be the same string.
7671 ValueFromBlock fastTrue = m_out.anchor(m_out.booleanTrue);
7672 m_out.branch(m_out.equal(leftString, rightValue), unsure(continuation), unsure(testUntypedEdgeIsCell));
7673
7674 LBasicBlock lastNext = m_out.appendTo(testUntypedEdgeIsCell, testUntypedEdgeIsString);
7675 ValueFromBlock fastFalse = m_out.anchor(m_out.booleanFalse);
7676 m_out.branch(isNotCell(rightValue, rightValueType), unsure(continuation), unsure(testUntypedEdgeIsString));
7677
7678 // Check if the untyped edge is a string.
7679 m_out.appendTo(testUntypedEdgeIsString, testStringEquality);
7680 m_out.branch(isNotString(rightValue, rightValueType), unsure(continuation), unsure(testStringEquality));
7681
7682 // Full String compare.
7683 m_out.appendTo(testStringEquality, continuation);
7684 ValueFromBlock slowResult = m_out.anchor(stringsEqual(leftString, rightValue, stringEdge, untypedEdge));
7685 m_out.jump(continuation);
7686
7687 // Continuation.
7688 m_out.appendTo(continuation, lastNext);
7689 setBoolean(m_out.phi(Int32, fastTrue, fastFalse, slowResult));
7690 }
7691
7692 void compileCompareEqPtr()
7693 {
7694 setBoolean(
7695 m_out.equal(
7696 lowJSValue(m_node->child1()),
7697 weakPointer(m_node->cellOperand()->cell())));
7698 }
7699
7700 void compileCompareLess()
7701 {
7702 compare(
7703 [&] (LValue left, LValue right) {
7704 return m_out.lessThan(left, right);
7705 },
7706 [&] (LValue left, LValue right) {
7707 return m_out.doubleLessThan(left, right);
7708 },
7709 operationCompareStringImplLess,
7710 operationCompareStringLess,
7711 operationCompareLess);
7712 }
7713
7714 void compileCompareLessEq()
7715 {
7716 compare(
7717 [&] (LValue left, LValue right) {
7718 return m_out.lessThanOrEqual(left, right);
7719 },
7720 [&] (LValue left, LValue right) {
7721 return m_out.doubleLessThanOrEqual(left, right);
7722 },
7723 operationCompareStringImplLessEq,
7724 operationCompareStringLessEq,
7725 operationCompareLessEq);
7726 }
7727
7728 void compileCompareGreater()
7729 {
7730 compare(
7731 [&] (LValue left, LValue right) {
7732 return m_out.greaterThan(left, right);
7733 },
7734 [&] (LValue left, LValue right) {
7735 return m_out.doubleGreaterThan(left, right);
7736 },
7737 operationCompareStringImplGreater,
7738 operationCompareStringGreater,
7739 operationCompareGreater);
7740 }
7741
7742 void compileCompareGreaterEq()
7743 {
7744 compare(
7745 [&] (LValue left, LValue right) {
7746 return m_out.greaterThanOrEqual(left, right);
7747 },
7748 [&] (LValue left, LValue right) {
7749 return m_out.doubleGreaterThanOrEqual(left, right);
7750 },
7751 operationCompareStringImplGreaterEq,
7752 operationCompareStringGreaterEq,
7753 operationCompareGreaterEq);
7754 }
7755
7756 void compileCompareBelow()
7757 {
7758 setBoolean(m_out.below(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7759 }
7760
7761 void compileCompareBelowEq()
7762 {
7763 setBoolean(m_out.belowOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7764 }
7765
7766 void compileSameValue()
7767 {
7768 if (m_node->isBinaryUseKind(DoubleRepUse)) {
7769 LValue arg1 = lowDouble(m_node->child1());
7770 LValue arg2 = lowDouble(m_node->child2());
7771
7772 LBasicBlock numberCase = m_out.newBlock();
7773 LBasicBlock continuation = m_out.newBlock();
7774
7775 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
7776 patchpoint->append(arg1, ValueRep::SomeRegister);
7777 patchpoint->append(arg2, ValueRep::SomeRegister);
7778 patchpoint->numGPScratchRegisters = 1;
7779 patchpoint->setGenerator(
7780 [] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7781 GPRReg scratchGPR = params.gpScratch(0);
7782 jit.moveDoubleTo64(params[1].fpr(), scratchGPR);
7783 jit.moveDoubleTo64(params[2].fpr(), params[0].gpr());
7784 jit.compare64(CCallHelpers::Equal, scratchGPR, params[0].gpr(), params[0].gpr());
7785 });
7786 patchpoint->effects = Effects::none();
7787 ValueFromBlock compareResult = m_out.anchor(patchpoint);
7788 m_out.branch(patchpoint, unsure(continuation), unsure(numberCase));
7789
7790 LBasicBlock lastNext = m_out.appendTo(numberCase, continuation);
7791 LValue isArg1NaN = m_out.doubleNotEqualOrUnordered(arg1, arg1);
7792 LValue isArg2NaN = m_out.doubleNotEqualOrUnordered(arg2, arg2);
7793 ValueFromBlock nanResult = m_out.anchor(m_out.bitAnd(isArg1NaN, isArg2NaN));
7794 m_out.jump(continuation);
7795
7796 m_out.appendTo(continuation, lastNext);
7797 setBoolean(m_out.phi(Int32, compareResult, nanResult));
7798 return;
7799 }
7800
7801 ASSERT(m_node->isBinaryUseKind(UntypedUse));
7802 setBoolean(vmCall(Int32, m_out.operation(operationSameValue), m_callFrame, lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
7803 }
7804
7805 void compileLogicalNot()
7806 {
7807 setBoolean(m_out.logicalNot(boolify(m_node->child1())));
7808 }
7809
7810 void compileCallOrConstruct()
7811 {
7812 Node* node = m_node;
7813 unsigned numArgs = node->numChildren() - 1;
7814
7815 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
7816
7817 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
7818 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
7819
7820 // JS->JS calling convention requires that the caller allows this much space on top of stack to
7821 // get trashed by the callee, even if not all of that space is used to pass arguments. We tell
7822 // B3 this explicitly for two reasons:
7823 //
7824 // - We will only pass frameSize worth of stuff.
7825 // - The trashed stack guarantee is logically separate from the act of passing arguments, so we
7826 // shouldn't rely on Air to infer the trashed stack property based on the arguments it ends
7827 // up seeing.
7828 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
7829
7830 // Collect the arguments, since this can generate code and we want to generate it before we emit
7831 // the call.
7832 Vector<ConstrainedValue> arguments;
7833
7834 // Make sure that the callee goes into GPR0 because that's where the slow path thunks expect the
7835 // callee to be.
7836 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
7837
7838 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
7839 intptr_t offsetFromSP =
7840 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
7841 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
7842 };
7843
7844 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
7845 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
7846 for (unsigned i = 0; i < numArgs; ++i)
7847 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
7848
7849 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
7850 patchpoint->appendVector(arguments);
7851
7852 RefPtr<PatchpointExceptionHandle> exceptionHandle =
7853 preparePatchpointForExceptions(patchpoint);
7854
7855 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
7856 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
7857 patchpoint->clobber(RegisterSet::macroScratchRegisters());
7858 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
7859 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
7860
7861 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
7862 State* state = &m_ftlState;
7863 VM* vm = &this->vm();
7864 patchpoint->setGenerator(
7865 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7866 AllowMacroScratchRegisterUsage allowScratch(jit);
7867 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
7868
7869 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
7870
7871 jit.store32(
7872 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
7873 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
7874
7875 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
7876
7877 CCallHelpers::DataLabelPtr targetToCheck;
7878 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
7879 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
7880 CCallHelpers::TrustedImmPtr(nullptr));
7881
7882 CCallHelpers::Call fastCall = jit.nearCall();
7883 CCallHelpers::Jump done = jit.jump();
7884
7885 slowPath.link(&jit);
7886
7887 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
7888 CCallHelpers::Call slowCall = jit.nearCall();
7889 done.link(&jit);
7890
7891 callLinkInfo->setUpCall(
7892 node->op() == Construct ? CallLinkInfo::Construct : CallLinkInfo::Call,
7893 node->origin.semantic, GPRInfo::regT0);
7894
7895 jit.addPtr(
7896 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
7897 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
7898
7899 jit.addLinkTask(
7900 [=] (LinkBuffer& linkBuffer) {
7901 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
7902 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
7903
7904 callLinkInfo->setCallLocations(
7905 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
7906 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
7907 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
7908 });
7909 });
7910
7911 setJSValue(patchpoint);
7912 }
7913
7914 void compileDirectCallOrConstruct()
7915 {
7916 Node* node = m_node;
7917 bool isTail = node->op() == DirectTailCall;
7918 bool isConstruct = node->op() == DirectConstruct;
7919
7920 ExecutableBase* executable = node->castOperand<ExecutableBase*>();
7921 FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(vm(), executable);
7922
7923 unsigned numPassedArgs = node->numChildren() - 1;
7924 unsigned numAllocatedArgs = numPassedArgs;
7925
7926 if (functionExecutable) {
7927 numAllocatedArgs = std::max(
7928 numAllocatedArgs,
7929 std::min(
7930 static_cast<unsigned>(functionExecutable->parameterCount()) + 1,
7931 Options::maximumDirectCallStackSize()));
7932 }
7933
7934 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
7935
7936 if (!isTail) {
7937 unsigned frameSize = (CallFrame::headerSizeInRegisters + numAllocatedArgs) * sizeof(EncodedJSValue);
7938 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
7939
7940 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
7941 }
7942
7943 Vector<ConstrainedValue> arguments;
7944
7945 arguments.append(ConstrainedValue(jsCallee, ValueRep::SomeRegister));
7946 if (!isTail) {
7947 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
7948 intptr_t offsetFromSP =
7949 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
7950 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
7951 };
7952
7953 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
7954 addArgument(m_out.constInt32(numPassedArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
7955 for (unsigned i = 0; i < numPassedArgs; ++i)
7956 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
7957 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
7958 addArgument(m_out.constInt64(JSValue::encode(jsUndefined())), virtualRegisterForArgument(i), 0);
7959 } else {
7960 for (unsigned i = 0; i < numPassedArgs; ++i)
7961 arguments.append(ConstrainedValue(lowJSValue(m_graph.varArgChild(node, 1 + i)), ValueRep::WarmAny));
7962 }
7963
7964 PatchpointValue* patchpoint = m_out.patchpoint(isTail ? Void : Int64);
7965 patchpoint->appendVector(arguments);
7966
7967 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
7968
7969 if (isTail) {
7970 // The shuffler needs tags.
7971 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
7972 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
7973 }
7974
7975 patchpoint->clobber(RegisterSet::macroScratchRegisters());
7976 if (!isTail) {
7977 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
7978 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
7979 }
7980
7981 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
7982 State* state = &m_ftlState;
7983 patchpoint->setGenerator(
7984 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7985 AllowMacroScratchRegisterUsage allowScratch(jit);
7986 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
7987
7988 GPRReg calleeGPR = params[!isTail].gpr();
7989
7990 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
7991
7992 Box<CCallHelpers::JumpList> exceptions =
7993 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
7994
7995 if (isTail) {
7996 CallFrameShuffleData shuffleData;
7997 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
7998
7999 RegisterSet toSave = params.unavailableRegisters();
8000 shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatCell);
8001 toSave.set(calleeGPR);
8002 for (unsigned i = 0; i < numPassedArgs; ++i) {
8003 ValueRecovery recovery = params[1 + i].recoveryForJSValue();
8004 shuffleData.args.append(recovery);
8005 recovery.forEachReg(
8006 [&] (Reg reg) {
8007 toSave.set(reg);
8008 });
8009 }
8010 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
8011 shuffleData.args.append(ValueRecovery::constant(jsUndefined()));
8012 shuffleData.numPassedArgs = numPassedArgs;
8013 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
8014
8015 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8016
8017 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
8018 CCallHelpers::Label mainPath = jit.label();
8019
8020 jit.store32(
8021 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8022 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8023
8024 callLinkInfo->setFrameShuffleData(shuffleData);
8025 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
8026
8027 CCallHelpers::Call call = jit.nearTailCall();
8028
8029 jit.abortWithReason(JITDidReturnFromTailCall);
8030
8031 CCallHelpers::Label slowPath = jit.label();
8032 patchableJump.m_jump.linkTo(slowPath, &jit);
8033 callOperation(
8034 *state, toSave, jit,
8035 node->origin.semantic, exceptions.get(), operationLinkDirectCall,
8036 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo), calleeGPR).call();
8037 jit.jump().linkTo(mainPath, &jit);
8038
8039 callLinkInfo->setUpCall(
8040 CallLinkInfo::DirectTailCall, node->origin.semantic, InvalidGPRReg);
8041 callLinkInfo->setExecutableDuringCompilation(executable);
8042 if (numAllocatedArgs > numPassedArgs)
8043 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
8044
8045 jit.addLinkTask(
8046 [=] (LinkBuffer& linkBuffer) {
8047 CodeLocationLabel<JSInternalPtrTag> patchableJumpLocation = linkBuffer.locationOf<JSInternalPtrTag>(patchableJump);
8048 CodeLocationNearCall<JSInternalPtrTag> callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
8049 CodeLocationLabel<JSInternalPtrTag> slowPathLocation = linkBuffer.locationOf<JSInternalPtrTag>(slowPath);
8050
8051 callLinkInfo->setCallLocations(
8052 patchableJumpLocation,
8053 slowPathLocation,
8054 callLocation);
8055 });
8056 return;
8057 }
8058
8059 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8060
8061 CCallHelpers::Label mainPath = jit.label();
8062
8063 jit.store32(
8064 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8065 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8066
8067 CCallHelpers::Call call = jit.nearCall();
8068 jit.addPtr(
8069 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
8070 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8071
8072 callLinkInfo->setUpCall(
8073 isConstruct ? CallLinkInfo::DirectConstruct : CallLinkInfo::DirectCall,
8074 node->origin.semantic, InvalidGPRReg);
8075 callLinkInfo->setExecutableDuringCompilation(executable);
8076 if (numAllocatedArgs > numPassedArgs)
8077 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
8078
8079 params.addLatePath(
8080 [=] (CCallHelpers& jit) {
8081 AllowMacroScratchRegisterUsage allowScratch(jit);
8082
8083 CCallHelpers::Label slowPath = jit.label();
8084 if (isX86())
8085 jit.pop(CCallHelpers::selectScratchGPR(calleeGPR));
8086
8087 callOperation(
8088 *state, params.unavailableRegisters(), jit,
8089 node->origin.semantic, exceptions.get(), operationLinkDirectCall,
8090 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo),
8091 calleeGPR).call();
8092 jit.jump().linkTo(mainPath, &jit);
8093
8094 jit.addLinkTask(
8095 [=] (LinkBuffer& linkBuffer) {
8096 CodeLocationNearCall<JSInternalPtrTag> callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
8097 CodeLocationLabel<JSInternalPtrTag> slowPathLocation = linkBuffer.locationOf<JSInternalPtrTag>(slowPath);
8098
8099 linkBuffer.link(call, slowPathLocation);
8100
8101 callLinkInfo->setCallLocations(
8102 CodeLocationLabel<JSInternalPtrTag>(),
8103 slowPathLocation,
8104 callLocation);
8105 });
8106 });
8107 });
8108
8109 if (isTail)
8110 patchpoint->effects.terminal = true;
8111 else
8112 setJSValue(patchpoint);
8113 }
8114
8115 void compileTailCall()
8116 {
8117 Node* node = m_node;
8118 unsigned numArgs = node->numChildren() - 1;
8119
8120 // It seems counterintuitive that this is needed given that tail calls don't create a new frame
8121 // on the stack. However, the tail call slow path builds the frame at SP instead of FP before
8122 // calling into the slow path C code. This slow path may decide to throw an exception because
8123 // the callee we're trying to call is not callable. Throwing an exception will cause us to walk
8124 // the stack, which may read, for the sake of the correctness of this code, arbitrary slots on the
8125 // stack to recover state. This call arg area ensures the call frame shuffler does not overwrite
8126 // any of the slots the stack walking code requires when on the slow path.
8127 m_proc.requestCallArgAreaSizeInBytes(
8128 WTF::roundUpToMultipleOf(stackAlignmentBytes(), (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue)));
8129
8130 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
8131
8132 // We want B3 to give us all of the arguments using whatever mechanism it thinks is
8133 // convenient. The generator then shuffles those arguments into our own call frame,
8134 // destroying our frame in the process.
8135
8136 // Note that we don't have to do anything special for exceptions. A tail call is only a
8137 // tail call if it is not inside a try block.
8138
8139 Vector<ConstrainedValue> arguments;
8140
8141 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
8142
8143 for (unsigned i = 0; i < numArgs; ++i) {
8144 // Note: we could let the shuffler do boxing for us, but it's not super clear that this
8145 // would be better. Also, if we wanted to do that, then we'd have to teach the shuffler
8146 // that 32-bit values could land at 4-byte alignment but not 8-byte alignment.
8147
8148 ConstrainedValue constrainedValue(
8149 lowJSValue(m_graph.varArgChild(node, 1 + i)),
8150 ValueRep::WarmAny);
8151 arguments.append(constrainedValue);
8152 }
8153
8154 PatchpointValue* patchpoint = m_out.patchpoint(Void);
8155 patchpoint->appendVector(arguments);
8156
8157 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8158 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8159
8160 // Prevent any of the arguments from using the scratch register.
8161 patchpoint->clobberEarly(RegisterSet::macroScratchRegisters());
8162
8163 patchpoint->effects.terminal = true;
8164
8165 // We don't have to tell the patchpoint that we will clobber registers, since we won't return
8166 // anyway.
8167
8168 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8169 State* state = &m_ftlState;
8170 VM* vm = &this->vm();
8171 patchpoint->setGenerator(
8172 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8173 AllowMacroScratchRegisterUsage allowScratch(jit);
8174 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8175
8176 // Yes, this is really necessary. You could throw an exception in a host call on the
8177 // slow path. That'll route us to lookupExceptionHandler(), which unwinds starting
8178 // with the call site index of our frame. Bad things happen if it's not set.
8179 jit.store32(
8180 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8181 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8182
8183 CallFrameShuffleData shuffleData;
8184 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
8185 shuffleData.callee = ValueRecovery::inGPR(GPRInfo::regT0, DataFormatJS);
8186
8187 for (unsigned i = 0; i < numArgs; ++i)
8188 shuffleData.args.append(params[1 + i].recoveryForJSValue());
8189
8190 shuffleData.numPassedArgs = numArgs;
8191
8192 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
8193
8194 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8195
8196 CCallHelpers::DataLabelPtr targetToCheck;
8197 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8198 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8199 CCallHelpers::TrustedImmPtr(nullptr));
8200
8201 callLinkInfo->setFrameShuffleData(shuffleData);
8202 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
8203
8204 CCallHelpers::Call fastCall = jit.nearTailCall();
8205
8206 slowPath.link(&jit);
8207
8208 CallFrameShuffler slowPathShuffler(jit, shuffleData);
8209 slowPathShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
8210 slowPathShuffler.prepareForSlowPath();
8211
8212 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8213 CCallHelpers::Call slowCall = jit.nearCall();
8214
8215 jit.abortWithReason(JITDidReturnFromTailCall);
8216
8217 callLinkInfo->setUpCall(CallLinkInfo::TailCall, codeOrigin, GPRInfo::regT0);
8218
8219 jit.addLinkTask(
8220 [=] (LinkBuffer& linkBuffer) {
8221 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8222 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8223
8224 callLinkInfo->setCallLocations(
8225 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8226 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8227 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8228 });
8229 });
8230 }
8231
8232 void compileCallOrConstructVarargsSpread()
8233 {
8234 Node* node = m_node;
8235 Node* arguments = node->child3().node();
8236
8237 LValue jsCallee = lowJSValue(m_node->child1());
8238 LValue thisArg = lowJSValue(m_node->child2());
8239
8240 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread || arguments->op() == PhantomNewArrayBuffer);
8241
8242 unsigned staticArgumentCount = 0;
8243 Vector<LValue, 2> spreadLengths;
8244 Vector<LValue, 8> patchpointArguments;
8245 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
8246 auto pushAndCountArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
8247 if (target->op() == PhantomSpread) {
8248 self(target->child1().node());
8249 return;
8250 }
8251
8252 if (target->op() == PhantomNewArrayWithSpread) {
8253 BitVector* bitVector = target->bitVector();
8254 for (unsigned i = target->numChildren(); i--; ) {
8255 if (bitVector->get(i))
8256 self(m_graph.varArgChild(target, i).node());
8257 else {
8258 ++staticArgumentCount;
8259 LValue argument = this->lowJSValue(m_graph.varArgChild(target, i));
8260 patchpointArguments.append(argument);
8261 }
8262 }
8263 return;
8264 }
8265
8266 if (target->op() == PhantomNewArrayBuffer) {
8267 staticArgumentCount += target->castOperand<JSImmutableButterfly*>()->length();
8268 return;
8269 }
8270
8271 RELEASE_ASSERT(target->op() == PhantomCreateRest);
8272 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
8273 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
8274 LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
8275 return m_out.zeroExtPtr(this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
8276 }).iterator->value;
8277 patchpointArguments.append(length);
8278 spreadLengths.append(length);
8279 });
8280
8281 pushAndCountArgumentsFromRightToLeft(arguments);
8282 LValue argumentCountIncludingThis = m_out.constIntPtr(staticArgumentCount + 1);
8283 for (LValue length : spreadLengths)
8284 argumentCountIncludingThis = m_out.add(length, argumentCountIncludingThis);
8285
8286 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8287
8288 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
8289 patchpoint->append(thisArg, ValueRep::WarmAny);
8290 patchpoint->append(argumentCountIncludingThis, ValueRep::WarmAny);
8291 patchpoint->appendVectorWithRep(patchpointArguments, ValueRep::WarmAny);
8292 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8293 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8294
8295 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
8296
8297 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8298 patchpoint->clobber(RegisterSet::volatileRegistersForJSCall()); // No inputs will be in a volatile register.
8299 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8300
8301 patchpoint->numGPScratchRegisters = 0;
8302
8303 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
8304 unsigned minimumJSCallAreaSize =
8305 sizeof(CallerFrameAndPC) +
8306 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
8307
8308 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
8309
8310 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8311 State* state = &m_ftlState;
8312 VM* vm = &this->vm();
8313 patchpoint->setGenerator(
8314 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8315 AllowMacroScratchRegisterUsage allowScratch(jit);
8316 CallSiteIndex callSiteIndex =
8317 state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8318
8319 Box<CCallHelpers::JumpList> exceptions =
8320 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8321
8322 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8323
8324 jit.store32(
8325 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8326 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8327
8328 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8329
8330 RegisterSet usedRegisters = RegisterSet::allRegisters();
8331 usedRegisters.exclude(RegisterSet::volatileRegistersForJSCall());
8332 GPRReg calleeGPR = params[1].gpr();
8333 usedRegisters.set(calleeGPR);
8334
8335 ScratchRegisterAllocator allocator(usedRegisters);
8336 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
8337 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
8338 GPRReg scratchGPR3 = allocator.allocateScratchGPR();
8339 GPRReg scratchGPR4 = allocator.allocateScratchGPR();
8340 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
8341
8342 auto getValueFromRep = [&] (B3::ValueRep rep, GPRReg result) {
8343 ASSERT(!usedRegisters.get(result));
8344
8345 if (rep.isConstant()) {
8346 jit.move(CCallHelpers::Imm64(rep.value()), result);
8347 return;
8348 }
8349
8350 // Note: in this function, we only request 64 bit values.
8351 if (rep.isStack()) {
8352 jit.load64(
8353 CCallHelpers::Address(GPRInfo::callFrameRegister, rep.offsetFromFP()),
8354 result);
8355 return;
8356 }
8357
8358 RELEASE_ASSERT(rep.isGPR());
8359 ASSERT(usedRegisters.get(rep.gpr()));
8360 jit.move(rep.gpr(), result);
8361 };
8362
8363 auto callWithExceptionCheck = [&] (void* callee) {
8364 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
8365 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8366 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8367 };
8368
8369 CCallHelpers::JumpList slowCase;
8370 unsigned originalStackHeight = params.proc().frameSize();
8371
8372 {
8373 unsigned numUsedSlots = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), originalStackHeight / sizeof(EncodedJSValue));
8374 B3::ValueRep argumentCountIncludingThisRep = params[3];
8375 getValueFromRep(argumentCountIncludingThisRep, scratchGPR2);
8376 slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR2, CCallHelpers::TrustedImm32(JSC::maxArguments + 1)));
8377
8378 jit.move(scratchGPR2, scratchGPR1);
8379 jit.addPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(numUsedSlots + CallFrame::headerSizeInRegisters)), scratchGPR1);
8380 // scratchGPR1 now has the required frame size in Register units
8381 // Round scratchGPR1 to next multiple of stackAlignmentRegisters()
8382 jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), scratchGPR1);
8383 jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), scratchGPR1);
8384 jit.negPtr(scratchGPR1);
8385 jit.getEffectiveAddress(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight), scratchGPR1);
8386
8387 // Before touching stack values, we should update the stack pointer to protect them from signal stack.
8388 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR1, CCallHelpers::stackPointerRegister);
8389
8390 jit.store32(scratchGPR2, CCallHelpers::Address(scratchGPR1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
8391
8392 int storeOffset = CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register));
8393
8394 unsigned paramsOffset = 4;
8395 unsigned index = 0;
8396 auto emitArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
8397 if (target->op() == PhantomSpread) {
8398 self(target->child1().node());
8399 return;
8400 }
8401
8402 if (target->op() == PhantomNewArrayWithSpread) {
8403 BitVector* bitVector = target->bitVector();
8404 for (unsigned i = target->numChildren(); i--; ) {
8405 if (bitVector->get(i))
8406 self(state->graph.varArgChild(target, i).node());
8407 else {
8408 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
8409 getValueFromRep(params[paramsOffset + (index++)], scratchGPR3);
8410 jit.store64(scratchGPR3,
8411 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
8412 }
8413 }
8414 return;
8415 }
8416
8417 if (target->op() == PhantomNewArrayBuffer) {
8418 auto* array = target->castOperand<JSImmutableButterfly*>();
8419 Checked<int32_t> offsetCount { 1 };
8420 for (unsigned i = array->length(); i--; ++offsetCount) {
8421 // Because varargs values are drained as JSValue, we should not generate value
8422 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
8423 int64_t value = JSValue::encode(array->get(i));
8424 jit.move(CCallHelpers::TrustedImm64(value), scratchGPR3);
8425 Checked<int32_t> currentStoreOffset { storeOffset };
8426 currentStoreOffset -= (offsetCount * static_cast<int32_t>(sizeof(Register)));
8427 jit.store64(scratchGPR3,
8428 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, currentStoreOffset.unsafeGet()));
8429 }
8430 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(array->length())), scratchGPR2);
8431 return;
8432 }
8433
8434 RELEASE_ASSERT(target->op() == PhantomCreateRest);
8435 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
8436
8437 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
8438
8439 B3::ValueRep numArgumentsToCopy = params[paramsOffset + (index++)];
8440 getValueFromRep(numArgumentsToCopy, scratchGPR3);
8441 int loadOffset = (AssemblyHelpers::argumentsStart(inlineCallFrame).offset() + numberOfArgumentsToSkip) * static_cast<int>(sizeof(Register));
8442
8443 auto done = jit.branchTestPtr(MacroAssembler::Zero, scratchGPR3);
8444 auto loopStart = jit.label();
8445 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR3);
8446 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
8447 jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR3, CCallHelpers::TimesEight, loadOffset), scratchGPR4);
8448 jit.store64(scratchGPR4,
8449 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
8450 jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR3).linkTo(loopStart, &jit);
8451 done.link(&jit);
8452 });
8453 emitArgumentsFromRightToLeft(arguments);
8454 }
8455
8456 {
8457 CCallHelpers::Jump dontThrow = jit.jump();
8458 slowCase.link(&jit);
8459 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>();
8460 callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
8461 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
8462
8463 dontThrow.link(&jit);
8464 }
8465
8466 ASSERT(calleeGPR == GPRInfo::regT0);
8467 jit.store64(calleeGPR, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
8468 getValueFromRep(params[2], scratchGPR3);
8469 jit.store64(scratchGPR3, CCallHelpers::calleeArgumentSlot(0));
8470
8471 CallLinkInfo::CallType callType;
8472 if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
8473 callType = CallLinkInfo::ConstructVarargs;
8474 else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
8475 callType = CallLinkInfo::TailCallVarargs;
8476 else
8477 callType = CallLinkInfo::CallVarargs;
8478
8479 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
8480
8481 CCallHelpers::DataLabelPtr targetToCheck;
8482 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8483 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8484 CCallHelpers::TrustedImmPtr(nullptr));
8485
8486 CCallHelpers::Call fastCall;
8487 CCallHelpers::Jump done;
8488
8489 if (isTailCall) {
8490 jit.emitRestoreCalleeSaves();
8491 jit.prepareForTailCallSlow();
8492 fastCall = jit.nearTailCall();
8493 } else {
8494 fastCall = jit.nearCall();
8495 done = jit.jump();
8496 }
8497
8498 slowPath.link(&jit);
8499
8500 if (isTailCall)
8501 jit.emitRestoreCalleeSaves();
8502 ASSERT(!usedRegisters.get(GPRInfo::regT2));
8503 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8504 CCallHelpers::Call slowCall = jit.nearCall();
8505
8506 if (isTailCall)
8507 jit.abortWithReason(JITDidReturnFromTailCall);
8508 else
8509 done.link(&jit);
8510
8511 callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
8512
8513 jit.addPtr(
8514 CCallHelpers::TrustedImm32(-originalStackHeight),
8515 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8516
8517 jit.addLinkTask(
8518 [=] (LinkBuffer& linkBuffer) {
8519 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8520 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8521
8522 callLinkInfo->setCallLocations(
8523 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8524 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8525 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8526 });
8527 });
8528
8529 switch (node->op()) {
8530 case TailCallForwardVarargs:
8531 m_out.unreachable();
8532 break;
8533
8534 default:
8535 setJSValue(patchpoint);
8536 break;
8537 }
8538 }
8539
8540 void compileCallOrConstructVarargs()
8541 {
8542 Node* node = m_node;
8543 LValue jsCallee = lowJSValue(m_node->child1());
8544 LValue thisArg = lowJSValue(m_node->child2());
8545
8546 LValue jsArguments = nullptr;
8547 bool forwarding = false;
8548
8549 switch (node->op()) {
8550 case CallVarargs:
8551 case TailCallVarargs:
8552 case TailCallVarargsInlinedCaller:
8553 case ConstructVarargs:
8554 jsArguments = lowJSValue(node->child3());
8555 break;
8556 case CallForwardVarargs:
8557 case TailCallForwardVarargs:
8558 case TailCallForwardVarargsInlinedCaller:
8559 case ConstructForwardVarargs:
8560 forwarding = true;
8561 break;
8562 default:
8563 DFG_CRASH(m_graph, node, "bad node type");
8564 break;
8565 }
8566
8567 if (forwarding && m_node->child3()) {
8568 Node* arguments = m_node->child3().node();
8569 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
8570 compileCallOrConstructVarargsSpread();
8571 return;
8572 }
8573 }
8574
8575
8576 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8577
8578 // Append the forms of the arguments that we will use before any clobbering happens.
8579 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
8580 if (jsArguments)
8581 patchpoint->appendSomeRegister(jsArguments);
8582 patchpoint->appendSomeRegister(thisArg);
8583
8584 if (!forwarding) {
8585 // Now append them again for after clobbering. Note that the compiler may ask us to use a
8586 // different register for the late for the post-clobbering version of the value. This gives
8587 // the compiler a chance to spill these values without having to burn any callee-saves.
8588 patchpoint->append(jsCallee, ValueRep::LateColdAny);
8589 patchpoint->append(jsArguments, ValueRep::LateColdAny);
8590 patchpoint->append(thisArg, ValueRep::LateColdAny);
8591 }
8592
8593 RefPtr<PatchpointExceptionHandle> exceptionHandle =
8594 preparePatchpointForExceptions(patchpoint);
8595
8596 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8597 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8598
8599 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8600 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
8601 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8602
8603 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
8604 unsigned minimumJSCallAreaSize =
8605 sizeof(CallerFrameAndPC) +
8606 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
8607
8608 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
8609
8610 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8611 State* state = &m_ftlState;
8612 VM* vm = &this->vm();
8613 patchpoint->setGenerator(
8614 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8615 AllowMacroScratchRegisterUsage allowScratch(jit);
8616 CallSiteIndex callSiteIndex =
8617 state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8618
8619 Box<CCallHelpers::JumpList> exceptions =
8620 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8621
8622 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8623
8624 jit.store32(
8625 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8626 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8627
8628 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8629 CallVarargsData* data = node->callVarargsData();
8630
8631 unsigned argIndex = 1;
8632 GPRReg calleeGPR = params[argIndex++].gpr();
8633 ASSERT(calleeGPR == GPRInfo::regT0);
8634 GPRReg argumentsGPR = jsArguments ? params[argIndex++].gpr() : InvalidGPRReg;
8635 GPRReg thisGPR = params[argIndex++].gpr();
8636
8637 B3::ValueRep calleeLateRep;
8638 B3::ValueRep argumentsLateRep;
8639 B3::ValueRep thisLateRep;
8640 if (!forwarding) {
8641 // If we're not forwarding then we'll need callee, arguments, and this after we
8642 // have potentially clobbered calleeGPR, argumentsGPR, and thisGPR. Our technique
8643 // for this is to supply all of those operands as late uses in addition to
8644 // specifying them as early uses. It's possible that the late use uses a spill
8645 // while the early use uses a register, and it's possible for the late and early
8646 // uses to use different registers. We do know that the late uses interfere with
8647 // all volatile registers and so won't use those, but the early uses may use
8648 // volatile registers and in the case of calleeGPR, it's pinned to regT0 so it
8649 // definitely will.
8650 //
8651 // Note that we have to be super careful with these. It's possible that these
8652 // use a shuffling of the registers used for calleeGPR, argumentsGPR, and
8653 // thisGPR. If that happens and we do for example:
8654 //
8655 // calleeLateRep.emitRestore(jit, calleeGPR);
8656 // argumentsLateRep.emitRestore(jit, calleeGPR);
8657 //
8658 // Then we might end up with garbage if calleeLateRep.gpr() == argumentsGPR and
8659 // argumentsLateRep.gpr() == calleeGPR.
8660 //
8661 // We do a variety of things to prevent this from happening. For example, we use
8662 // argumentsLateRep before needing the other two and after we've already stopped
8663 // using the *GPRs. Also, we pin calleeGPR to regT0, and rely on the fact that
8664 // the *LateReps cannot use volatile registers (so they cannot be regT0, so
8665 // calleeGPR != argumentsLateRep.gpr() and calleeGPR != thisLateRep.gpr()).
8666 //
8667 // An alternative would have been to just use early uses and early-clobber all
8668 // volatile registers. But that would force callee, arguments, and this into
8669 // callee-save registers even if we have to spill them. We don't want spilling to
8670 // use up three callee-saves.
8671 //
8672 // TL;DR: The way we use LateReps here is dangerous and barely works but achieves
8673 // some desirable performance properties, so don't mistake the cleverness for
8674 // elegance.
8675 calleeLateRep = params[argIndex++];
8676 argumentsLateRep = params[argIndex++];
8677 thisLateRep = params[argIndex++];
8678 }
8679
8680 // Get some scratch registers.
8681 RegisterSet usedRegisters;
8682 usedRegisters.merge(RegisterSet::stackRegisters());
8683 usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
8684 usedRegisters.merge(RegisterSet::calleeSaveRegisters());
8685 usedRegisters.set(calleeGPR);
8686 if (argumentsGPR != InvalidGPRReg)
8687 usedRegisters.set(argumentsGPR);
8688 usedRegisters.set(thisGPR);
8689 if (calleeLateRep.isReg())
8690 usedRegisters.set(calleeLateRep.reg());
8691 if (argumentsLateRep.isReg())
8692 usedRegisters.set(argumentsLateRep.reg());
8693 if (thisLateRep.isReg())
8694 usedRegisters.set(thisLateRep.reg());
8695 ScratchRegisterAllocator allocator(usedRegisters);
8696 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
8697 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
8698 GPRReg scratchGPR3 = forwarding ? allocator.allocateScratchGPR() : InvalidGPRReg;
8699 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
8700
8701 auto callWithExceptionCheck = [&] (void* callee) {
8702 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
8703 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8704 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8705 };
8706
8707 unsigned originalStackHeight = params.proc().frameSize();
8708
8709 if (forwarding) {
8710 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
8711
8712 CCallHelpers::JumpList slowCase;
8713 InlineCallFrame* inlineCallFrame;
8714 if (node->child3())
8715 inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame();
8716 else
8717 inlineCallFrame = node->origin.semantic.inlineCallFrame();
8718
8719 // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
8720 emitSetupVarargsFrameFastCase(*vm, jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
8721
8722 CCallHelpers::Jump done = jit.jump();
8723 slowCase.link(&jit);
8724 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>();
8725 callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
8726 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
8727
8728 done.link(&jit);
8729 } else {
8730 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1);
8731 jit.setupArguments<decltype(operationSizeFrameForVarargs)>(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
8732 callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
8733
8734 jit.move(GPRInfo::returnValueGPR, scratchGPR1);
8735 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
8736 argumentsLateRep.emitRestore(jit, argumentsGPR);
8737 emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
8738 jit.addPtr(CCallHelpers::TrustedImm32(-minimumJSCallAreaSize), scratchGPR2, CCallHelpers::stackPointerRegister);
8739 jit.setupArguments<decltype(operationSetupVarargsFrame)>(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
8740 callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
8741
8742 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, CCallHelpers::stackPointerRegister);
8743
8744 calleeLateRep.emitRestore(jit, GPRInfo::regT0);
8745
8746 // This may not emit code if thisGPR got a callee-save. Also, we're guaranteed
8747 // that thisGPR != GPRInfo::regT0 because regT0 interferes with it.
8748 thisLateRep.emitRestore(jit, thisGPR);
8749 }
8750
8751 jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
8752 jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
8753
8754 CallLinkInfo::CallType callType;
8755 if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
8756 callType = CallLinkInfo::ConstructVarargs;
8757 else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
8758 callType = CallLinkInfo::TailCallVarargs;
8759 else
8760 callType = CallLinkInfo::CallVarargs;
8761
8762 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
8763
8764 CCallHelpers::DataLabelPtr targetToCheck;
8765 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8766 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8767 CCallHelpers::TrustedImmPtr(nullptr));
8768
8769 CCallHelpers::Call fastCall;
8770 CCallHelpers::Jump done;
8771
8772 if (isTailCall) {
8773 jit.emitRestoreCalleeSaves();
8774 jit.prepareForTailCallSlow();
8775 fastCall = jit.nearTailCall();
8776 } else {
8777 fastCall = jit.nearCall();
8778 done = jit.jump();
8779 }
8780
8781 slowPath.link(&jit);
8782
8783 if (isTailCall)
8784 jit.emitRestoreCalleeSaves();
8785 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8786 CCallHelpers::Call slowCall = jit.nearCall();
8787
8788 if (isTailCall)
8789 jit.abortWithReason(JITDidReturnFromTailCall);
8790 else
8791 done.link(&jit);
8792
8793 callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
8794
8795 jit.addPtr(
8796 CCallHelpers::TrustedImm32(-originalStackHeight),
8797 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8798
8799 jit.addLinkTask(
8800 [=] (LinkBuffer& linkBuffer) {
8801 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8802 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8803
8804 callLinkInfo->setCallLocations(
8805 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8806 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8807 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8808 });
8809 });
8810
8811 switch (node->op()) {
8812 case TailCallVarargs:
8813 case TailCallForwardVarargs:
8814 m_out.unreachable();
8815 break;
8816
8817 default:
8818 setJSValue(patchpoint);
8819 break;
8820 }
8821 }
8822
8823 void compileCallEval()
8824 {
8825 Node* node = m_node;
8826 unsigned numArgs = node->numChildren() - 1;
8827
8828 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
8829
8830 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
8831 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
8832
8833 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
8834
8835 Vector<ConstrainedValue> arguments;
8836 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
8837
8838 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
8839 intptr_t offsetFromSP =
8840 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
8841 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
8842 };
8843
8844 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
8845 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
8846 for (unsigned i = 0; i < numArgs; ++i)
8847 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
8848
8849 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8850 patchpoint->appendVector(arguments);
8851
8852 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
8853
8854 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8855 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8856 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8857 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
8858 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8859
8860 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8861 State* state = &m_ftlState;
8862 VM& vm = this->vm();
8863 patchpoint->setGenerator(
8864 [=, &vm] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8865 AllowMacroScratchRegisterUsage allowScratch(jit);
8866 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8867
8868 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8869
8870 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8871
8872 jit.store32(
8873 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8874 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8875
8876 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8877 callLinkInfo->setUpCall(CallLinkInfo::Call, node->origin.semantic, GPRInfo::regT0);
8878
8879 jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), CCallHelpers::stackPointerRegister, GPRInfo::regT1);
8880 jit.storePtr(GPRInfo::callFrameRegister, CCallHelpers::Address(GPRInfo::regT1, CallFrame::callerFrameOffset()));
8881
8882 // Now we need to make room for:
8883 // - The caller frame and PC for a call to operationCallEval.
8884 // - Potentially two arguments on the stack.
8885 unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2;
8886 requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
8887 jit.subPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
8888 jit.setupArguments<decltype(operationCallEval)>(GPRInfo::regT1);
8889 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR0);
8890 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8891 exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8892
8893 CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
8894
8895 jit.addPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
8896 jit.load64(CCallHelpers::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0);
8897 jit.emitDumbVirtualCall(vm, callLinkInfo);
8898
8899 done.link(&jit);
8900 jit.addPtr(
8901 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
8902 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8903 });
8904
8905 setJSValue(patchpoint);
8906 }
8907
8908 void compileLoadVarargs()
8909 {
8910 LoadVarargsData* data = m_node->loadVarargsData();
8911 LValue jsArguments = lowJSValue(m_node->child1());
8912
8913 LValue length = vmCall(
8914 Int32, m_out.operation(operationSizeOfVarargs), m_callFrame, jsArguments,
8915 m_out.constInt32(data->offset));
8916
8917 // FIXME: There is a chance that we will call an effectful length property twice. This is safe
8918 // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
8919 // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
8920 // past the sizing.
8921 // https://bugs.webkit.org/show_bug.cgi?id=141448
8922
8923 LValue lengthIncludingThis = m_out.add(length, m_out.int32One);
8924
8925 speculate(
8926 VarargsOverflow, noValue(), nullptr,
8927 m_out.above(length, lengthIncludingThis));
8928
8929 speculate(
8930 VarargsOverflow, noValue(), nullptr,
8931 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
8932
8933 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
8934
8935 // FIXME: This computation is rather silly. If operationLaodVarargs just took a pointer instead
8936 // of a VirtualRegister, we wouldn't have to do this.
8937 // https://bugs.webkit.org/show_bug.cgi?id=141660
8938 LValue machineStart = m_out.lShr(
8939 m_out.sub(addressFor(data->machineStart.offset()).value(), m_callFrame),
8940 m_out.constIntPtr(3));
8941
8942 vmCall(
8943 Void, m_out.operation(operationLoadVarargs), m_callFrame,
8944 m_out.castToInt32(machineStart), jsArguments, m_out.constInt32(data->offset),
8945 length, m_out.constInt32(data->mandatoryMinimum));
8946 }
8947
8948 void compileForwardVarargs()
8949 {
8950 if (m_node->child1()) {
8951 Node* arguments = m_node->child1().node();
8952 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
8953 compileForwardVarargsWithSpread();
8954 return;
8955 }
8956 }
8957
8958 LoadVarargsData* data = m_node->loadVarargsData();
8959 InlineCallFrame* inlineCallFrame;
8960 if (m_node->child1())
8961 inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
8962 else
8963 inlineCallFrame = m_node->origin.semantic.inlineCallFrame();
8964
8965 LValue length = nullptr;
8966 LValue lengthIncludingThis = nullptr;
8967 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
8968 if (argumentsLength.isKnown) {
8969 unsigned knownLength = argumentsLength.known;
8970 if (knownLength >= data->offset)
8971 knownLength = knownLength - data->offset;
8972 else
8973 knownLength = 0;
8974 length = m_out.constInt32(knownLength);
8975 lengthIncludingThis = m_out.constInt32(knownLength + 1);
8976 } else {
8977 // We need to perform the same logical operation as the code above, but through dynamic operations.
8978 if (!data->offset)
8979 length = argumentsLength.value;
8980 else {
8981 LBasicBlock isLarger = m_out.newBlock();
8982 LBasicBlock continuation = m_out.newBlock();
8983
8984 ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
8985 m_out.branch(
8986 m_out.above(argumentsLength.value, m_out.constInt32(data->offset)), unsure(isLarger), unsure(continuation));
8987 LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
8988 ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(data->offset)));
8989 m_out.jump(continuation);
8990
8991 m_out.appendTo(continuation, lastNext);
8992 length = m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
8993 }
8994 lengthIncludingThis = m_out.add(length, m_out.constInt32(1));
8995 }
8996
8997 speculate(
8998 VarargsOverflow, noValue(), nullptr,
8999 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
9000
9001 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
9002
9003 unsigned numberOfArgumentsToSkip = data->offset;
9004 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
9005 LValue targetStart = addressFor(data->machineStart).value();
9006
9007 LBasicBlock undefinedLoop = m_out.newBlock();
9008 LBasicBlock mainLoopEntry = m_out.newBlock();
9009 LBasicBlock mainLoop = m_out.newBlock();
9010 LBasicBlock continuation = m_out.newBlock();
9011
9012 LValue lengthAsPtr = m_out.zeroExtPtr(length);
9013 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
9014 ValueFromBlock loopBound = m_out.anchor(loopBoundValue);
9015 m_out.branch(
9016 m_out.above(loopBoundValue, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
9017
9018 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, mainLoopEntry);
9019 LValue previousIndex = m_out.phi(pointerType(), loopBound);
9020 LValue currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
9021 m_out.store64(
9022 m_out.constInt64(JSValue::encode(jsUndefined())),
9023 m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
9024 ValueFromBlock nextIndex = m_out.anchor(currentIndex);
9025 m_out.addIncomingToPhi(previousIndex, nextIndex);
9026 m_out.branch(
9027 m_out.above(currentIndex, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
9028
9029 m_out.appendTo(mainLoopEntry, mainLoop);
9030 loopBound = m_out.anchor(lengthAsPtr);
9031 m_out.branch(m_out.notNull(lengthAsPtr), unsure(mainLoop), unsure(continuation));
9032
9033 m_out.appendTo(mainLoop, continuation);
9034 previousIndex = m_out.phi(pointerType(), loopBound);
9035 currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
9036 LValue value = m_out.load64(
9037 m_out.baseIndex(m_heaps.variables, sourceStart, currentIndex));
9038 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
9039 nextIndex = m_out.anchor(currentIndex);
9040 m_out.addIncomingToPhi(previousIndex, nextIndex);
9041 m_out.branch(m_out.isNull(currentIndex), unsure(continuation), unsure(mainLoop));
9042
9043 m_out.appendTo(continuation, lastNext);
9044 }
9045
9046 LValue getSpreadLengthFromInlineCallFrame(InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip)
9047 {
9048 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
9049 if (argumentsLength.isKnown) {
9050 unsigned knownLength = argumentsLength.known;
9051 if (knownLength >= numberOfArgumentsToSkip)
9052 knownLength = knownLength - numberOfArgumentsToSkip;
9053 else
9054 knownLength = 0;
9055 return m_out.constInt32(knownLength);
9056 }
9057
9058
9059 // We need to perform the same logical operation as the code above, but through dynamic operations.
9060 if (!numberOfArgumentsToSkip)
9061 return argumentsLength.value;
9062
9063 LBasicBlock isLarger = m_out.newBlock();
9064 LBasicBlock continuation = m_out.newBlock();
9065
9066 ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
9067 m_out.branch(
9068 m_out.above(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)), unsure(isLarger), unsure(continuation));
9069 LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
9070 ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)));
9071 m_out.jump(continuation);
9072
9073 m_out.appendTo(continuation, lastNext);
9074 return m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
9075 }
9076
9077 void compileForwardVarargsWithSpread()
9078 {
9079 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
9080
9081 Node* arguments = m_node->child1().node();
9082 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread);
9083
9084 unsigned numberOfStaticArguments = 0;
9085 Vector<LValue, 2> spreadLengths;
9086
9087 auto collectArgumentCount = recursableLambda([&](auto self, Node* target) -> void {
9088 if (target->op() == PhantomSpread) {
9089 self(target->child1().node());
9090 return;
9091 }
9092
9093 if (target->op() == PhantomNewArrayWithSpread) {
9094 BitVector* bitVector = target->bitVector();
9095 for (unsigned i = 0; i < target->numChildren(); i++) {
9096 if (bitVector->get(i))
9097 self(m_graph.varArgChild(target, i).node());
9098 else
9099 ++numberOfStaticArguments;
9100 }
9101 return;
9102 }
9103
9104 if (target->op() == PhantomNewArrayBuffer) {
9105 numberOfStaticArguments += target->castOperand<JSImmutableButterfly*>()->length();
9106 return;
9107 }
9108
9109 ASSERT(target->op() == PhantomCreateRest);
9110 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
9111 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
9112 spreadLengths.append(cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
9113 return this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
9114 }).iterator->value);
9115 });
9116
9117 collectArgumentCount(arguments);
9118 LValue lengthIncludingThis = m_out.constInt32(1 + numberOfStaticArguments);
9119 for (LValue length : spreadLengths)
9120 lengthIncludingThis = m_out.add(lengthIncludingThis, length);
9121
9122 LoadVarargsData* data = m_node->loadVarargsData();
9123 speculate(
9124 VarargsOverflow, noValue(), nullptr,
9125 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
9126
9127 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
9128
9129 LValue targetStart = addressFor(data->machineStart).value();
9130
9131 auto forwardSpread = recursableLambda([this, &cachedSpreadLengths, &targetStart](auto self, Node* target, LValue storeIndex) -> LValue {
9132 if (target->op() == PhantomSpread)
9133 return self(target->child1().node(), storeIndex);
9134
9135 if (target->op() == PhantomNewArrayWithSpread) {
9136 BitVector* bitVector = target->bitVector();
9137 for (unsigned i = 0; i < target->numChildren(); i++) {
9138 if (bitVector->get(i))
9139 storeIndex = self(m_graph.varArgChild(target, i).node(), storeIndex);
9140 else {
9141 LValue value = this->lowJSValue(m_graph.varArgChild(target, i));
9142 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, storeIndex));
9143 storeIndex = m_out.add(m_out.constIntPtr(1), storeIndex);
9144 }
9145 }
9146 return storeIndex;
9147 }
9148
9149 if (target->op() == PhantomNewArrayBuffer) {
9150 auto* array = target->castOperand<JSImmutableButterfly*>();
9151 for (unsigned i = 0; i < array->length(); i++) {
9152 // Because forwarded values are drained as JSValue, we should not generate value
9153 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
9154 int64_t value = JSValue::encode(array->get(i));
9155 m_out.store64(m_out.constInt64(value), m_out.baseIndex(m_heaps.variables, targetStart, storeIndex, JSValue(), (Checked<int32_t>(sizeof(Register)) * i).unsafeGet()));
9156 }
9157 return m_out.add(m_out.constIntPtr(array->length()), storeIndex);
9158 }
9159
9160 RELEASE_ASSERT(target->op() == PhantomCreateRest);
9161 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
9162
9163 LValue sourceStart = this->getArgumentsStart(inlineCallFrame, target->numberOfArgumentsToSkip());
9164 LValue spreadLength = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
9165
9166 LBasicBlock loop = m_out.newBlock();
9167 LBasicBlock continuation = m_out.newBlock();
9168 ValueFromBlock startLoadIndex = m_out.anchor(m_out.constIntPtr(0));
9169 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
9170 ValueFromBlock startStoreIndexForEnd = m_out.anchor(storeIndex);
9171
9172 m_out.branch(m_out.isZero64(spreadLength), unsure(continuation), unsure(loop));
9173
9174 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
9175 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
9176 LValue loadIndex = m_out.phi(Int64, startLoadIndex);
9177 LValue value = m_out.load64(
9178 m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
9179 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
9180 LValue nextLoadIndex = m_out.add(m_out.constIntPtr(1), loadIndex);
9181 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
9182 LValue nextStoreIndex = m_out.add(m_out.constIntPtr(1), loopStoreIndex);
9183 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextStoreIndex));
9184 ValueFromBlock loopStoreIndexForEnd = m_out.anchor(nextStoreIndex);
9185 m_out.branch(m_out.below(nextLoadIndex, spreadLength), unsure(loop), unsure(continuation));
9186
9187 m_out.appendTo(continuation, lastNext);
9188 return m_out.phi(Int64, startStoreIndexForEnd, loopStoreIndexForEnd);
9189 });
9190
9191 LValue storeIndex = forwardSpread(arguments, m_out.constIntPtr(0));
9192
9193 LBasicBlock undefinedLoop = m_out.newBlock();
9194 LBasicBlock continuation = m_out.newBlock();
9195
9196 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
9197 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
9198 m_out.branch(m_out.below(storeIndex, loopBoundValue),
9199 unsure(undefinedLoop), unsure(continuation));
9200
9201 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, continuation);
9202 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
9203 m_out.store64(
9204 m_out.constInt64(JSValue::encode(jsUndefined())),
9205 m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
9206 LValue nextIndex = m_out.add(loopStoreIndex, m_out.constIntPtr(1));
9207 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextIndex));
9208 m_out.branch(
9209 m_out.below(nextIndex, loopBoundValue), unsure(undefinedLoop), unsure(continuation));
9210
9211 m_out.appendTo(continuation, lastNext);
9212 }
9213
9214 void compileJump()
9215 {
9216 m_out.jump(lowBlock(m_node->targetBlock()));
9217 }
9218
9219 void compileBranch()
9220 {
9221 m_out.branch(
9222 boolify(m_node->child1()),
9223 WeightedTarget(
9224 lowBlock(m_node->branchData()->taken.block),
9225 m_node->branchData()->taken.count),
9226 WeightedTarget(
9227 lowBlock(m_node->branchData()->notTaken.block),
9228 m_node->branchData()->notTaken.count));
9229 }
9230
9231 void compileSwitch()
9232 {
9233 SwitchData* data = m_node->switchData();
9234 switch (data->kind) {
9235 case SwitchImm: {
9236 Vector<ValueFromBlock, 2> intValues;
9237 LBasicBlock switchOnInts = m_out.newBlock();
9238
9239 LBasicBlock lastNext = m_out.appendTo(m_out.m_block, switchOnInts);
9240
9241 switch (m_node->child1().useKind()) {
9242 case Int32Use: {
9243 intValues.append(m_out.anchor(lowInt32(m_node->child1())));
9244 m_out.jump(switchOnInts);
9245 break;
9246 }
9247
9248 case UntypedUse: {
9249 LBasicBlock isInt = m_out.newBlock();
9250 LBasicBlock isNotInt = m_out.newBlock();
9251 LBasicBlock isDouble = m_out.newBlock();
9252
9253 LValue boxedValue = lowJSValue(m_node->child1());
9254 m_out.branch(isNotInt32(boxedValue), unsure(isNotInt), unsure(isInt));
9255
9256 LBasicBlock innerLastNext = m_out.appendTo(isInt, isNotInt);
9257
9258 intValues.append(m_out.anchor(unboxInt32(boxedValue)));
9259 m_out.jump(switchOnInts);
9260
9261 m_out.appendTo(isNotInt, isDouble);
9262 m_out.branch(
9263 isCellOrMisc(boxedValue, provenType(m_node->child1())),
9264 usually(lowBlock(data->fallThrough.block)), rarely(isDouble));
9265
9266 m_out.appendTo(isDouble, innerLastNext);
9267 LValue doubleValue = unboxDouble(boxedValue);
9268 LValue intInDouble = m_out.doubleToInt(doubleValue);
9269 intValues.append(m_out.anchor(intInDouble));
9270 m_out.branch(
9271 m_out.doubleEqual(m_out.intToDouble(intInDouble), doubleValue),
9272 unsure(switchOnInts), unsure(lowBlock(data->fallThrough.block)));
9273 break;
9274 }
9275
9276 default:
9277 DFG_CRASH(m_graph, m_node, "Bad use kind");
9278 break;
9279 }
9280
9281 m_out.appendTo(switchOnInts, lastNext);
9282 buildSwitch(data, Int32, m_out.phi(Int32, intValues));
9283 return;
9284 }
9285
9286 case SwitchChar: {
9287 LValue stringValue;
9288
9289 // FIXME: We should use something other than unsure() for the branch weight
9290 // of the fallThrough block. The main challenge is just that we have multiple
9291 // branches to fallThrough but a single count, so we would need to divvy it up
9292 // among the different lowered branches.
9293 // https://bugs.webkit.org/show_bug.cgi?id=129082
9294
9295 switch (m_node->child1().useKind()) {
9296 case StringUse: {
9297 stringValue = lowString(m_node->child1());
9298 break;
9299 }
9300
9301 case UntypedUse: {
9302 LValue unboxedValue = lowJSValue(m_node->child1());
9303
9304 LBasicBlock isCellCase = m_out.newBlock();
9305 LBasicBlock isStringCase = m_out.newBlock();
9306
9307 m_out.branch(
9308 isNotCell(unboxedValue, provenType(m_node->child1())),
9309 unsure(lowBlock(data->fallThrough.block)), unsure(isCellCase));
9310
9311 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9312 LValue cellValue = unboxedValue;
9313 m_out.branch(
9314 isNotString(cellValue, provenType(m_node->child1())),
9315 unsure(lowBlock(data->fallThrough.block)), unsure(isStringCase));
9316
9317 m_out.appendTo(isStringCase, lastNext);
9318 stringValue = cellValue;
9319 break;
9320 }
9321
9322 default:
9323 DFG_CRASH(m_graph, m_node, "Bad use kind");
9324 break;
9325 }
9326
9327 LBasicBlock lengthIs1 = m_out.newBlock();
9328 LBasicBlock needResolution = m_out.newBlock();
9329 LBasicBlock resolved = m_out.newBlock();
9330 LBasicBlock is8Bit = m_out.newBlock();
9331 LBasicBlock is16Bit = m_out.newBlock();
9332 LBasicBlock continuation = m_out.newBlock();
9333
9334 ValueFromBlock fastValue = m_out.anchor(m_out.loadPtr(stringValue, m_heaps.JSString_value));
9335 m_out.branch(
9336 isRopeString(stringValue, m_node->child1()),
9337 rarely(needResolution), usually(resolved));
9338
9339 LBasicBlock lastNext = m_out.appendTo(needResolution, resolved);
9340 ValueFromBlock slowValue = m_out.anchor(
9341 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, stringValue));
9342 m_out.jump(resolved);
9343
9344 m_out.appendTo(resolved, lengthIs1);
9345 LValue value = m_out.phi(pointerType(), fastValue, slowValue);
9346 m_out.branch(
9347 m_out.notEqual(
9348 m_out.load32NonNegative(value, m_heaps.StringImpl_length),
9349 m_out.int32One),
9350 unsure(lowBlock(data->fallThrough.block)), unsure(lengthIs1));
9351
9352 m_out.appendTo(lengthIs1, is8Bit);
9353 LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data);
9354 m_out.branch(
9355 m_out.testNonZero32(
9356 m_out.load32(value, m_heaps.StringImpl_hashAndFlags),
9357 m_out.constInt32(StringImpl::flagIs8Bit())),
9358 unsure(is8Bit), unsure(is16Bit));
9359
9360 Vector<ValueFromBlock, 2> characters;
9361 m_out.appendTo(is8Bit, is16Bit);
9362 characters.append(m_out.anchor(m_out.load8ZeroExt32(characterData, m_heaps.characters8[0])));
9363 m_out.jump(continuation);
9364
9365 m_out.appendTo(is16Bit, continuation);
9366 characters.append(m_out.anchor(m_out.load16ZeroExt32(characterData, m_heaps.characters16[0])));
9367 m_out.jump(continuation);
9368
9369 m_out.appendTo(continuation, lastNext);
9370 buildSwitch(data, Int32, m_out.phi(Int32, characters));
9371 return;
9372 }
9373
9374 case SwitchString: {
9375 switch (m_node->child1().useKind()) {
9376 case StringIdentUse: {
9377 LValue stringImpl = lowStringIdent(m_node->child1());
9378
9379 Vector<SwitchCase> cases;
9380 for (unsigned i = 0; i < data->cases.size(); ++i) {
9381 LValue value = m_out.constIntPtr(data->cases[i].value.stringImpl());
9382 LBasicBlock block = lowBlock(data->cases[i].target.block);
9383 Weight weight = Weight(data->cases[i].target.count);
9384 cases.append(SwitchCase(value, block, weight));
9385 }
9386
9387 m_out.switchInstruction(
9388 stringImpl, cases, lowBlock(data->fallThrough.block),
9389 Weight(data->fallThrough.count));
9390 return;
9391 }
9392
9393 case StringUse: {
9394 switchString(data, lowString(m_node->child1()), m_node->child1());
9395 return;
9396 }
9397
9398 case UntypedUse: {
9399 LValue value = lowJSValue(m_node->child1());
9400
9401 LBasicBlock isCellBlock = m_out.newBlock();
9402 LBasicBlock isStringBlock = m_out.newBlock();
9403
9404 m_out.branch(
9405 isCell(value, provenType(m_node->child1())),
9406 unsure(isCellBlock), unsure(lowBlock(data->fallThrough.block)));
9407
9408 LBasicBlock lastNext = m_out.appendTo(isCellBlock, isStringBlock);
9409
9410 m_out.branch(
9411 isString(value, provenType(m_node->child1())),
9412 unsure(isStringBlock), unsure(lowBlock(data->fallThrough.block)));
9413
9414 m_out.appendTo(isStringBlock, lastNext);
9415
9416 switchString(data, value, m_node->child1());
9417 return;
9418 }
9419
9420 default:
9421 DFG_CRASH(m_graph, m_node, "Bad use kind");
9422 return;
9423 }
9424 return;
9425 }
9426
9427 case SwitchCell: {
9428 LValue cell;
9429 switch (m_node->child1().useKind()) {
9430 case CellUse: {
9431 cell = lowCell(m_node->child1());
9432 break;
9433 }
9434
9435 case UntypedUse: {
9436 LValue value = lowJSValue(m_node->child1());
9437 LBasicBlock cellCase = m_out.newBlock();
9438 m_out.branch(
9439 isCell(value, provenType(m_node->child1())),
9440 unsure(cellCase), unsure(lowBlock(data->fallThrough.block)));
9441 m_out.appendTo(cellCase);
9442 cell = value;
9443 break;
9444 }
9445
9446 default:
9447 DFG_CRASH(m_graph, m_node, "Bad use kind");
9448 return;
9449 }
9450
9451 buildSwitch(m_node->switchData(), pointerType(), cell);
9452 return;
9453 } }
9454
9455 DFG_CRASH(m_graph, m_node, "Bad switch kind");
9456 }
9457
9458 void compileEntrySwitch()
9459 {
9460 Vector<LBasicBlock> successors;
9461 for (DFG::BasicBlock* successor : m_node->entrySwitchData()->cases)
9462 successors.append(lowBlock(successor));
9463 m_out.entrySwitch(successors);
9464 }
9465
9466 void compileReturn()
9467 {
9468 m_out.ret(lowJSValue(m_node->child1()));
9469 }
9470
9471 void compileForceOSRExit()
9472 {
9473 terminate(InadequateCoverage);
9474 }
9475
9476 void compileCPUIntrinsic()
9477 {
9478#if CPU(X86_64)
9479 Intrinsic intrinsic = m_node->intrinsic();
9480 switch (intrinsic) {
9481 case CPUMfenceIntrinsic:
9482 case CPUCpuidIntrinsic:
9483 case CPUPauseIntrinsic: {
9484 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9485 patchpoint->effects = Effects::forCall();
9486 if (intrinsic == CPUCpuidIntrinsic)
9487 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::ebx, X86Registers::ecx, X86Registers::edx });
9488
9489 patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
9490 switch (intrinsic) {
9491 case CPUMfenceIntrinsic:
9492 jit.mfence();
9493 break;
9494 case CPUCpuidIntrinsic:
9495 jit.cpuid();
9496 break;
9497 case CPUPauseIntrinsic:
9498 jit.pause();
9499 break;
9500 default:
9501 RELEASE_ASSERT_NOT_REACHED();
9502 }
9503 });
9504 setJSValue(m_out.constInt64(JSValue::encode(jsUndefined())));
9505 break;
9506 }
9507 case CPURdtscIntrinsic: {
9508 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9509 patchpoint->effects = Effects::forCall();
9510 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::edx });
9511 // The low 32-bits of rdtsc go into rax.
9512 patchpoint->resultConstraint = ValueRep::reg(X86Registers::eax);
9513 patchpoint->setGenerator( [=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
9514 jit.rdtsc();
9515 });
9516 setJSValue(boxInt32(patchpoint));
9517 break;
9518 }
9519 default:
9520 RELEASE_ASSERT_NOT_REACHED();
9521
9522 }
9523#endif
9524 }
9525
9526 void compileThrow()
9527 {
9528 LValue error = lowJSValue(m_node->child1());
9529 vmCall(Void, m_out.operation(operationThrowDFG), m_callFrame, error);
9530 // vmCall() does an exception check so we should never reach this.
9531 m_out.unreachable();
9532 }
9533
9534 void compileThrowStaticError()
9535 {
9536 LValue errorMessage = lowString(m_node->child1());
9537 LValue errorType = m_out.constInt32(m_node->errorType());
9538 vmCall(Void, m_out.operation(operationThrowStaticError), m_callFrame, errorMessage, errorType);
9539 // vmCall() does an exception check so we should never reach this.
9540 m_out.unreachable();
9541 }
9542
9543 void compileInvalidationPoint()
9544 {
9545 if (verboseCompilationEnabled())
9546 dataLog(" Invalidation point with availability: ", availabilityMap(), "\n");
9547
9548 DFG_ASSERT(m_graph, m_node, m_origin.exitOK);
9549
9550 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9551 OSRExitDescriptor* descriptor = appendOSRExitDescriptor(noValue(), nullptr);
9552 NodeOrigin origin = m_origin;
9553 patchpoint->appendColdAnys(buildExitArguments(descriptor, origin.forExit, noValue()));
9554
9555 State* state = &m_ftlState;
9556
9557 patchpoint->setGenerator(
9558 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
9559 // The MacroAssembler knows more about this than B3 does. The watchpointLabel() method
9560 // will ensure that this is followed by a nop shadow but only when this is actually
9561 // necessary.
9562 CCallHelpers::Label label = jit.watchpointLabel();
9563
9564 RefPtr<OSRExitHandle> handle = descriptor->emitOSRExitLater(
9565 *state, UncountableInvalidation, origin, params);
9566
9567 RefPtr<JITCode> jitCode = state->jitCode.get();
9568
9569 jit.addLinkTask(
9570 [=] (LinkBuffer& linkBuffer) {
9571 JumpReplacement jumpReplacement(
9572 linkBuffer.locationOf<JSInternalPtrTag>(label),
9573 linkBuffer.locationOf<OSRExitPtrTag>(handle->label));
9574 jitCode->common.jumpReplacements.append(jumpReplacement);
9575 });
9576 });
9577
9578 // Set some obvious things.
9579 patchpoint->effects.terminal = false;
9580 patchpoint->effects.writesLocalState = false;
9581 patchpoint->effects.readsLocalState = false;
9582
9583 // This is how we tell B3 about the possibility of jump replacement.
9584 patchpoint->effects.exitsSideways = true;
9585
9586 // It's not possible for some prior branch to determine the safety of this operation. It's always
9587 // fine to execute this on some path that wouldn't have originally executed it before
9588 // optimization.
9589 patchpoint->effects.controlDependent = false;
9590
9591 // If this falls through then it won't write anything.
9592 patchpoint->effects.writes = HeapRange();
9593
9594 // When this abruptly terminates, it could read any heap location.
9595 patchpoint->effects.reads = HeapRange::top();
9596 }
9597
9598 void compileIsEmpty()
9599 {
9600 setBoolean(m_out.isZero64(lowJSValue(m_node->child1())));
9601 }
9602
9603 void compileIsUndefined()
9604 {
9605 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined));
9606 }
9607
9608 void compileIsUndefinedOrNull()
9609 {
9610 setBoolean(isOther(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9611 }
9612
9613 void compileIsBoolean()
9614 {
9615 setBoolean(isBoolean(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9616 }
9617
9618 void compileIsNumber()
9619 {
9620 setBoolean(isNumber(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9621 }
9622
9623 void compileNumberIsInteger()
9624 {
9625 LBasicBlock notInt32 = m_out.newBlock();
9626 LBasicBlock doubleCase = m_out.newBlock();
9627 LBasicBlock doubleNotNanOrInf = m_out.newBlock();
9628 LBasicBlock continuation = m_out.newBlock();
9629
9630 LValue input = lowJSValue(m_node->child1());
9631
9632 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
9633 m_out.branch(
9634 isInt32(input, provenType(m_node->child1())), unsure(continuation), unsure(notInt32));
9635
9636 LBasicBlock lastNext = m_out.appendTo(notInt32, doubleCase);
9637 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
9638 m_out.branch(
9639 isNotNumber(input, provenType(m_node->child1())), unsure(continuation), unsure(doubleCase));
9640
9641 m_out.appendTo(doubleCase, doubleNotNanOrInf);
9642 LValue doubleAsInt;
9643 LValue asDouble = unboxDouble(input, &doubleAsInt);
9644 LValue expBits = m_out.bitAnd(m_out.lShr(doubleAsInt, m_out.constInt32(52)), m_out.constInt64(0x7ff));
9645 m_out.branch(
9646 m_out.equal(expBits, m_out.constInt64(0x7ff)),
9647 unsure(continuation), unsure(doubleNotNanOrInf));
9648
9649 m_out.appendTo(doubleNotNanOrInf, continuation);
9650 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9651 patchpoint->appendSomeRegister(asDouble);
9652 patchpoint->numFPScratchRegisters = 1;
9653 patchpoint->effects = Effects::none();
9654 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
9655 GPRReg result = params[0].gpr();
9656 FPRReg input = params[1].fpr();
9657 FPRReg temp = params.fpScratch(0);
9658 jit.roundTowardZeroDouble(input, temp);
9659 jit.compareDouble(MacroAssembler::DoubleEqual, input, temp, result);
9660 });
9661 ValueFromBlock patchpointResult = m_out.anchor(patchpoint);
9662 m_out.jump(continuation);
9663
9664 m_out.appendTo(continuation, lastNext);
9665 setBoolean(m_out.phi(Int32, trueResult, falseResult, patchpointResult));
9666 }
9667
9668 void compileIsCellWithType()
9669 {
9670 if (m_node->child1().useKind() == UntypedUse) {
9671 LValue value = lowJSValue(m_node->child1());
9672
9673 LBasicBlock isCellCase = m_out.newBlock();
9674 LBasicBlock continuation = m_out.newBlock();
9675
9676 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9677 m_out.branch(
9678 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
9679
9680 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
9681 ValueFromBlock cellResult = m_out.anchor(isCellWithType(value, m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
9682 m_out.jump(continuation);
9683
9684 m_out.appendTo(continuation, lastNext);
9685 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
9686 } else {
9687 ASSERT(m_node->child1().useKind() == CellUse);
9688 setBoolean(isCellWithType(lowCell(m_node->child1()), m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
9689 }
9690 }
9691
9692 void compileIsObject()
9693 {
9694 LValue value = lowJSValue(m_node->child1());
9695
9696 LBasicBlock isCellCase = m_out.newBlock();
9697 LBasicBlock continuation = m_out.newBlock();
9698
9699 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9700 m_out.branch(
9701 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
9702
9703 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
9704 ValueFromBlock cellResult = m_out.anchor(isObject(value, provenType(m_node->child1())));
9705 m_out.jump(continuation);
9706
9707 m_out.appendTo(continuation, lastNext);
9708 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
9709 }
9710
9711 LValue wangsInt64Hash(LValue input)
9712 {
9713 // key += ~(key << 32);
9714 LValue key = input;
9715 LValue temp = key;
9716 temp = m_out.shl(temp, m_out.constInt32(32));
9717 temp = m_out.bitNot(temp);
9718 key = m_out.add(key, temp);
9719 // key ^= (key >> 22);
9720 temp = key;
9721 temp = m_out.lShr(temp, m_out.constInt32(22));
9722 key = m_out.bitXor(key, temp);
9723 // key += ~(key << 13);
9724 temp = key;
9725 temp = m_out.shl(temp, m_out.constInt32(13));
9726 temp = m_out.bitNot(temp);
9727 key = m_out.add(key, temp);
9728 // key ^= (key >> 8);
9729 temp = key;
9730 temp = m_out.lShr(temp, m_out.constInt32(8));
9731 key = m_out.bitXor(key, temp);
9732 // key += (key << 3);
9733 temp = key;
9734 temp = m_out.shl(temp, m_out.constInt32(3));
9735 key = m_out.add(key, temp);
9736 // key ^= (key >> 15);
9737 temp = key;
9738 temp = m_out.lShr(temp, m_out.constInt32(15));
9739 key = m_out.bitXor(key, temp);
9740 // key += ~(key << 27);
9741 temp = key;
9742 temp = m_out.shl(temp, m_out.constInt32(27));
9743 temp = m_out.bitNot(temp);
9744 key = m_out.add(key, temp);
9745 // key ^= (key >> 31);
9746 temp = key;
9747 temp = m_out.lShr(temp, m_out.constInt32(31));
9748 key = m_out.bitXor(key, temp);
9749 key = m_out.castToInt32(key);
9750
9751 return key;
9752 }
9753
9754 LValue mapHashString(LValue string, Edge& edge)
9755 {
9756 LBasicBlock nonEmptyStringCase = m_out.newBlock();
9757 LBasicBlock slowCase = m_out.newBlock();
9758 LBasicBlock continuation = m_out.newBlock();
9759
9760 m_out.branch(isRopeString(string, edge), rarely(slowCase), usually(nonEmptyStringCase));
9761
9762 LBasicBlock lastNext = m_out.appendTo(nonEmptyStringCase, slowCase);
9763 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
9764 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
9765 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
9766 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
9767 unsure(slowCase), unsure(continuation));
9768
9769 m_out.appendTo(slowCase, continuation);
9770 ValueFromBlock slowResult = m_out.anchor(
9771 vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, string));
9772 m_out.jump(continuation);
9773
9774 m_out.appendTo(continuation, lastNext);
9775 return m_out.phi(Int32, slowResult, nonEmptyStringHashResult);
9776 }
9777
9778 void compileMapHash()
9779 {
9780 switch (m_node->child1().useKind()) {
9781 case BooleanUse:
9782 case Int32Use:
9783 case SymbolUse:
9784 case ObjectUse: {
9785 LValue key = lowJSValue(m_node->child1(), ManualOperandSpeculation);
9786 speculate(m_node->child1());
9787 setInt32(wangsInt64Hash(key));
9788 return;
9789 }
9790
9791 case CellUse: {
9792 LBasicBlock isString = m_out.newBlock();
9793 LBasicBlock notString = m_out.newBlock();
9794 LBasicBlock continuation = m_out.newBlock();
9795
9796 LValue value = lowCell(m_node->child1());
9797 LValue isStringValue = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
9798 m_out.branch(
9799 isStringValue, unsure(isString), unsure(notString));
9800
9801 LBasicBlock lastNext = m_out.appendTo(isString, notString);
9802 ValueFromBlock stringResult = m_out.anchor(mapHashString(value, m_node->child1()));
9803 m_out.jump(continuation);
9804
9805 m_out.appendTo(notString, continuation);
9806 ValueFromBlock notStringResult = m_out.anchor(wangsInt64Hash(value));
9807 m_out.jump(continuation);
9808
9809 m_out.appendTo(continuation, lastNext);
9810 setInt32(m_out.phi(Int32, stringResult, notStringResult));
9811 return;
9812 }
9813
9814 case StringUse: {
9815 LValue string = lowString(m_node->child1());
9816 setInt32(mapHashString(string, m_node->child1()));
9817 return;
9818 }
9819
9820 default:
9821 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse);
9822 break;
9823 }
9824
9825 LValue value = lowJSValue(m_node->child1());
9826
9827 LBasicBlock isCellCase = m_out.newBlock();
9828 LBasicBlock slowCase = m_out.newBlock();
9829 LBasicBlock straightHash = m_out.newBlock();
9830 LBasicBlock isStringCase = m_out.newBlock();
9831 LBasicBlock nonEmptyStringCase = m_out.newBlock();
9832 LBasicBlock continuation = m_out.newBlock();
9833
9834 m_out.branch(
9835 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(straightHash));
9836
9837 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9838 LValue isString = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
9839 m_out.branch(
9840 isString, unsure(isStringCase), unsure(straightHash));
9841
9842 m_out.appendTo(isStringCase, nonEmptyStringCase);
9843 m_out.branch(isRopeString(value, m_node->child1()), rarely(slowCase), usually(nonEmptyStringCase));
9844
9845 m_out.appendTo(nonEmptyStringCase, straightHash);
9846 LValue stringImpl = m_out.loadPtr(value, m_heaps.JSString_value);
9847 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
9848 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
9849 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
9850 unsure(slowCase), unsure(continuation));
9851
9852 m_out.appendTo(straightHash, slowCase);
9853 ValueFromBlock fastResult = m_out.anchor(wangsInt64Hash(value));
9854 m_out.jump(continuation);
9855
9856 m_out.appendTo(slowCase, continuation);
9857 ValueFromBlock slowResult = m_out.anchor(
9858 vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, value));
9859 m_out.jump(continuation);
9860
9861 m_out.appendTo(continuation, lastNext);
9862 setInt32(m_out.phi(Int32, fastResult, slowResult, nonEmptyStringHashResult));
9863 }
9864
9865 void compileNormalizeMapKey()
9866 {
9867 ASSERT(m_node->child1().useKind() == UntypedUse);
9868
9869 LBasicBlock isNumberCase = m_out.newBlock();
9870 LBasicBlock notInt32NumberCase = m_out.newBlock();
9871 LBasicBlock notNaNCase = m_out.newBlock();
9872 LBasicBlock convertibleCase = m_out.newBlock();
9873 LBasicBlock continuation = m_out.newBlock();
9874
9875 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isNumberCase);
9876
9877 LValue key = lowJSValue(m_node->child1());
9878 ValueFromBlock fastResult = m_out.anchor(key);
9879 m_out.branch(isNotNumber(key), unsure(continuation), unsure(isNumberCase));
9880
9881 m_out.appendTo(isNumberCase, notInt32NumberCase);
9882 m_out.branch(isInt32(key), unsure(continuation), unsure(notInt32NumberCase));
9883
9884 m_out.appendTo(notInt32NumberCase, notNaNCase);
9885 LValue doubleValue = unboxDouble(key);
9886 ValueFromBlock normalizedNaNResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsNaN())));
9887 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue), unsure(continuation), unsure(notNaNCase));
9888
9889 m_out.appendTo(notNaNCase, convertibleCase);
9890 LValue integerValue = m_out.doubleToInt(doubleValue);
9891 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
9892 ValueFromBlock doubleResult = m_out.anchor(key);
9893 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, integerValueConvertedToDouble), unsure(continuation), unsure(convertibleCase));
9894
9895 m_out.appendTo(convertibleCase, continuation);
9896 ValueFromBlock boxedIntResult = m_out.anchor(boxInt32(integerValue));
9897 m_out.jump(continuation);
9898
9899 m_out.appendTo(continuation, lastNext);
9900 setJSValue(m_out.phi(Int64, fastResult, normalizedNaNResult, doubleResult, boxedIntResult));
9901 }
9902
9903 void compileGetMapBucket()
9904 {
9905 LBasicBlock loopStart = m_out.newBlock();
9906 LBasicBlock loopAround = m_out.newBlock();
9907 LBasicBlock slowPath = m_out.newBlock();
9908 LBasicBlock notPresentInTable = m_out.newBlock();
9909 LBasicBlock notEmptyValue = m_out.newBlock();
9910 LBasicBlock notDeletedValue = m_out.newBlock();
9911 LBasicBlock continuation = m_out.newBlock();
9912
9913 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
9914
9915 LValue map;
9916 if (m_node->child1().useKind() == MapObjectUse)
9917 map = lowMapObject(m_node->child1());
9918 else if (m_node->child1().useKind() == SetObjectUse)
9919 map = lowSetObject(m_node->child1());
9920 else
9921 RELEASE_ASSERT_NOT_REACHED();
9922
9923 LValue key = lowJSValue(m_node->child2(), ManualOperandSpeculation);
9924 if (m_node->child2().useKind() != UntypedUse)
9925 speculate(m_node->child2());
9926
9927 LValue hash = lowInt32(m_node->child3());
9928
9929 LValue buffer = m_out.loadPtr(map, m_heaps.HashMapImpl_buffer);
9930 LValue mask = m_out.sub(m_out.load32(map, m_heaps.HashMapImpl_capacity), m_out.int32One);
9931
9932 ValueFromBlock indexStart = m_out.anchor(hash);
9933 m_out.jump(loopStart);
9934
9935 m_out.appendTo(loopStart, notEmptyValue);
9936 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
9937 LValue index = m_out.bitAnd(mask, unmaskedIndex);
9938 // FIXME: I think these buffers are caged?
9939 // https://bugs.webkit.org/show_bug.cgi?id=174925
9940 LValue hashMapBucket = m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), buffer, m_out.zeroExt(index, Int64), ScaleEight));
9941 ValueFromBlock bucketResult = m_out.anchor(hashMapBucket);
9942 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue()))),
9943 unsure(notPresentInTable), unsure(notEmptyValue));
9944
9945 m_out.appendTo(notEmptyValue, notDeletedValue);
9946 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue()))),
9947 unsure(loopAround), unsure(notDeletedValue));
9948
9949 m_out.appendTo(notDeletedValue, loopAround);
9950 LValue bucketKey = m_out.load64(hashMapBucket, m_heaps.HashMapBucket_key);
9951
9952 // Perform Object.is()
9953 switch (m_node->child2().useKind()) {
9954 case BooleanUse:
9955 case Int32Use:
9956 case SymbolUse:
9957 case ObjectUse: {
9958 m_out.branch(m_out.equal(key, bucketKey),
9959 unsure(continuation), unsure(loopAround));
9960 break;
9961 }
9962 case StringUse: {
9963 LBasicBlock notBitEqual = m_out.newBlock();
9964 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9965
9966 m_out.branch(m_out.equal(key, bucketKey),
9967 unsure(continuation), unsure(notBitEqual));
9968
9969 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9970 m_out.branch(isCell(bucketKey),
9971 unsure(bucketKeyIsCell), unsure(loopAround));
9972
9973 m_out.appendTo(bucketKeyIsCell, loopAround);
9974 m_out.branch(isString(bucketKey),
9975 unsure(slowPath), unsure(loopAround));
9976 break;
9977 }
9978 case CellUse: {
9979 LBasicBlock notBitEqual = m_out.newBlock();
9980 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9981 LBasicBlock bucketKeyIsString = m_out.newBlock();
9982
9983 m_out.branch(m_out.equal(key, bucketKey),
9984 unsure(continuation), unsure(notBitEqual));
9985
9986 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9987 m_out.branch(isCell(bucketKey),
9988 unsure(bucketKeyIsCell), unsure(loopAround));
9989
9990 m_out.appendTo(bucketKeyIsCell, bucketKeyIsString);
9991 m_out.branch(isString(bucketKey),
9992 unsure(bucketKeyIsString), unsure(loopAround));
9993
9994 m_out.appendTo(bucketKeyIsString, loopAround);
9995 m_out.branch(isString(key),
9996 unsure(slowPath), unsure(loopAround));
9997 break;
9998 }
9999 case UntypedUse: {
10000 LBasicBlock notBitEqual = m_out.newBlock();
10001 LBasicBlock bucketKeyIsCell = m_out.newBlock();
10002 LBasicBlock bothAreCells = m_out.newBlock();
10003 LBasicBlock bucketKeyIsString = m_out.newBlock();
10004
10005 m_out.branch(m_out.equal(key, bucketKey),
10006 unsure(continuation), unsure(notBitEqual));
10007
10008 m_out.appendTo(notBitEqual, bucketKeyIsCell);
10009 m_out.branch(isCell(bucketKey),
10010 unsure(bucketKeyIsCell), unsure(loopAround));
10011
10012 m_out.appendTo(bucketKeyIsCell, bothAreCells);
10013 m_out.branch(isCell(key),
10014 unsure(bothAreCells), unsure(loopAround));
10015
10016 m_out.appendTo(bothAreCells, bucketKeyIsString);
10017 m_out.branch(isString(bucketKey),
10018 unsure(bucketKeyIsString), unsure(loopAround));
10019
10020 m_out.appendTo(bucketKeyIsString, loopAround);
10021 m_out.branch(isString(key),
10022 unsure(slowPath), unsure(loopAround));
10023 break;
10024 }
10025 default:
10026 RELEASE_ASSERT_NOT_REACHED();
10027 }
10028
10029 m_out.appendTo(loopAround, slowPath);
10030 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
10031 m_out.jump(loopStart);
10032
10033 m_out.appendTo(slowPath, notPresentInTable);
10034 ValueFromBlock slowPathResult = m_out.anchor(vmCall(pointerType(),
10035 m_out.operation(m_node->child1().useKind() == MapObjectUse ? operationJSMapFindBucket : operationJSSetFindBucket), m_callFrame, map, key, hash));
10036 m_out.jump(continuation);
10037
10038 m_out.appendTo(notPresentInTable, continuation);
10039 ValueFromBlock notPresentResult;
10040 if (m_node->child1().useKind() == MapObjectUse)
10041 notPresentResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
10042 else if (m_node->child1().useKind() == SetObjectUse)
10043 notPresentResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
10044 else
10045 RELEASE_ASSERT_NOT_REACHED();
10046 m_out.jump(continuation);
10047
10048 m_out.appendTo(continuation, lastNext);
10049 setJSValue(m_out.phi(pointerType(), bucketResult, slowPathResult, notPresentResult));
10050 }
10051
10052 void compileGetMapBucketHead()
10053 {
10054 LValue map;
10055 if (m_node->child1().useKind() == MapObjectUse)
10056 map = lowMapObject(m_node->child1());
10057 else if (m_node->child1().useKind() == SetObjectUse)
10058 map = lowSetObject(m_node->child1());
10059 else
10060 RELEASE_ASSERT_NOT_REACHED();
10061
10062 ASSERT(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfHead() == HashMapImpl<HashMapBucket<HashMapBucketDataKeyValue>>::offsetOfHead());
10063 setJSValue(m_out.loadPtr(map, m_heaps.HashMapImpl_head));
10064 }
10065
10066 void compileGetMapBucketNext()
10067 {
10068 LBasicBlock loopStart = m_out.newBlock();
10069 LBasicBlock continuation = m_out.newBlock();
10070 LBasicBlock noBucket = m_out.newBlock();
10071 LBasicBlock hasBucket = m_out.newBlock();
10072 LBasicBlock nextBucket = m_out.newBlock();
10073
10074 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
10075
10076 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfNext() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfNext());
10077 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey());
10078 LValue mapBucketPrev = lowCell(m_node->child1());
10079 ValueFromBlock mapBucketStart = m_out.anchor(m_out.loadPtr(mapBucketPrev, m_heaps.HashMapBucket_next));
10080 m_out.jump(loopStart);
10081
10082 m_out.appendTo(loopStart, noBucket);
10083 LValue mapBucket = m_out.phi(pointerType(), mapBucketStart);
10084 m_out.branch(m_out.isNull(mapBucket), unsure(noBucket), unsure(hasBucket));
10085
10086 m_out.appendTo(noBucket, hasBucket);
10087 ValueFromBlock noBucketResult;
10088 if (m_node->bucketOwnerType() == BucketOwnerType::Map)
10089 noBucketResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
10090 else {
10091 ASSERT(m_node->bucketOwnerType() == BucketOwnerType::Set);
10092 noBucketResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
10093 }
10094 m_out.jump(continuation);
10095
10096 m_out.appendTo(hasBucket, nextBucket);
10097 ValueFromBlock bucketResult = m_out.anchor(mapBucket);
10098 m_out.branch(m_out.isZero64(m_out.load64(mapBucket, m_heaps.HashMapBucket_key)), unsure(nextBucket), unsure(continuation));
10099
10100 m_out.appendTo(nextBucket, continuation);
10101 m_out.addIncomingToPhi(mapBucket, m_out.anchor(m_out.loadPtr(mapBucket, m_heaps.HashMapBucket_next)));
10102 m_out.jump(loopStart);
10103
10104 m_out.appendTo(continuation, lastNext);
10105 setJSValue(m_out.phi(pointerType(), noBucketResult, bucketResult));
10106 }
10107
10108 void compileLoadValueFromMapBucket()
10109 {
10110 LValue mapBucket = lowCell(m_node->child1());
10111 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_value));
10112 }
10113
10114 void compileExtractValueFromWeakMapGet()
10115 {
10116 LValue value = lowJSValue(m_node->child1());
10117 setJSValue(m_out.select(m_out.isZero64(value),
10118 m_out.constInt64(JSValue::encode(jsUndefined())),
10119 value));
10120 }
10121
10122 void compileLoadKeyFromMapBucket()
10123 {
10124 LValue mapBucket = lowCell(m_node->child1());
10125 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_key));
10126 }
10127
10128 void compileSetAdd()
10129 {
10130 LValue set = lowSetObject(m_node->child1());
10131 LValue key = lowJSValue(m_node->child2());
10132 LValue hash = lowInt32(m_node->child3());
10133
10134 setJSValue(vmCall(pointerType(), m_out.operation(operationSetAdd), m_callFrame, set, key, hash));
10135 }
10136
10137 void compileMapSet()
10138 {
10139 LValue map = lowMapObject(m_graph.varArgChild(m_node, 0));
10140 LValue key = lowJSValue(m_graph.varArgChild(m_node, 1));
10141 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
10142 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
10143
10144 setJSValue(vmCall(pointerType(), m_out.operation(operationMapSet), m_callFrame, map, key, value, hash));
10145 }
10146
10147 void compileWeakMapGet()
10148 {
10149 LBasicBlock loopStart = m_out.newBlock();
10150 LBasicBlock loopAround = m_out.newBlock();
10151 LBasicBlock notEqualValue = m_out.newBlock();
10152 LBasicBlock continuation = m_out.newBlock();
10153
10154 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
10155
10156 LValue weakMap;
10157 if (m_node->child1().useKind() == WeakMapObjectUse)
10158 weakMap = lowWeakMapObject(m_node->child1());
10159 else if (m_node->child1().useKind() == WeakSetObjectUse)
10160 weakMap = lowWeakSetObject(m_node->child1());
10161 else
10162 RELEASE_ASSERT_NOT_REACHED();
10163 LValue key = lowObject(m_node->child2());
10164 LValue hash = lowInt32(m_node->child3());
10165
10166 LValue buffer = m_out.loadPtr(weakMap, m_heaps.WeakMapImpl_buffer);
10167 LValue mask = m_out.sub(m_out.load32(weakMap, m_heaps.WeakMapImpl_capacity), m_out.int32One);
10168
10169 ValueFromBlock indexStart = m_out.anchor(hash);
10170 m_out.jump(loopStart);
10171
10172 m_out.appendTo(loopStart, notEqualValue);
10173 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
10174 LValue index = m_out.bitAnd(mask, unmaskedIndex);
10175
10176 LValue bucket;
10177
10178 if (m_node->child1().useKind() == WeakMapObjectUse) {
10179 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)), "Should be a power of 2");
10180 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)))));
10181 } else {
10182 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)), "Should be a power of 2");
10183 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)))));
10184 }
10185
10186 LValue bucketKey = m_out.load64(bucket, m_heaps.WeakMapBucket_key);
10187 m_out.branch(m_out.equal(key, bucketKey), unsure(continuation), unsure(notEqualValue));
10188
10189 m_out.appendTo(notEqualValue, loopAround);
10190 m_out.branch(m_out.isNull(bucketKey), unsure(continuation), unsure(loopAround));
10191
10192 m_out.appendTo(loopAround, continuation);
10193 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
10194 m_out.jump(loopStart);
10195
10196 m_out.appendTo(continuation, lastNext);
10197 LValue result;
10198 if (m_node->child1().useKind() == WeakMapObjectUse)
10199 result = m_out.load64(bucket, m_heaps.WeakMapBucket_value);
10200 else
10201 result = bucketKey;
10202 setJSValue(result);
10203 }
10204
10205 void compileWeakSetAdd()
10206 {
10207 LValue set = lowWeakSetObject(m_node->child1());
10208 LValue key = lowObject(m_node->child2());
10209 LValue hash = lowInt32(m_node->child3());
10210
10211 vmCall(Void, m_out.operation(operationWeakSetAdd), m_callFrame, set, key, hash);
10212 }
10213
10214 void compileWeakMapSet()
10215 {
10216 LValue map = lowWeakMapObject(m_graph.varArgChild(m_node, 0));
10217 LValue key = lowObject(m_graph.varArgChild(m_node, 1));
10218 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
10219 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
10220
10221 vmCall(Void, m_out.operation(operationWeakMapSet), m_callFrame, map, key, value, hash);
10222 }
10223
10224 void compileIsObjectOrNull()
10225 {
10226 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
10227
10228 Edge child = m_node->child1();
10229 LValue value = lowJSValue(child);
10230
10231 LBasicBlock cellCase = m_out.newBlock();
10232 LBasicBlock notFunctionCase = m_out.newBlock();
10233 LBasicBlock objectCase = m_out.newBlock();
10234 LBasicBlock slowPath = m_out.newBlock();
10235 LBasicBlock notCellCase = m_out.newBlock();
10236 LBasicBlock continuation = m_out.newBlock();
10237
10238 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
10239
10240 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
10241 ValueFromBlock isFunctionResult = m_out.anchor(m_out.booleanFalse);
10242 m_out.branch(
10243 isFunction(value, provenType(child)),
10244 unsure(continuation), unsure(notFunctionCase));
10245
10246 m_out.appendTo(notFunctionCase, objectCase);
10247 ValueFromBlock notObjectResult = m_out.anchor(m_out.booleanFalse);
10248 m_out.branch(
10249 isObject(value, provenType(child)),
10250 unsure(objectCase), unsure(continuation));
10251
10252 m_out.appendTo(objectCase, slowPath);
10253 ValueFromBlock objectResult = m_out.anchor(m_out.booleanTrue);
10254 m_out.branch(
10255 isExoticForTypeof(value, provenType(child)),
10256 rarely(slowPath), usually(continuation));
10257
10258 m_out.appendTo(slowPath, notCellCase);
10259 VM& vm = this->vm();
10260 LValue slowResultValue = lazySlowPath(
10261 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
10262 return createLazyCallGenerator(vm,
10263 operationObjectIsObject, locations[0].directGPR(),
10264 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
10265 }, value);
10266 ValueFromBlock slowResult = m_out.anchor(m_out.notZero64(slowResultValue));
10267 m_out.jump(continuation);
10268
10269 m_out.appendTo(notCellCase, continuation);
10270 LValue notCellResultValue = m_out.equal(value, m_out.constInt64(JSValue::encode(jsNull())));
10271 ValueFromBlock notCellResult = m_out.anchor(notCellResultValue);
10272 m_out.jump(continuation);
10273
10274 m_out.appendTo(continuation, lastNext);
10275 LValue result = m_out.phi(
10276 Int32,
10277 isFunctionResult, notObjectResult, objectResult, slowResult, notCellResult);
10278 setBoolean(result);
10279 }
10280
10281 void compileIsFunction()
10282 {
10283 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
10284
10285 Edge child = m_node->child1();
10286 LValue value = lowJSValue(child);
10287
10288 LBasicBlock cellCase = m_out.newBlock();
10289 LBasicBlock notFunctionCase = m_out.newBlock();
10290 LBasicBlock slowPath = m_out.newBlock();
10291 LBasicBlock continuation = m_out.newBlock();
10292
10293 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
10294 m_out.branch(
10295 isCell(value, provenType(child)), unsure(cellCase), unsure(continuation));
10296
10297 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
10298 ValueFromBlock functionResult = m_out.anchor(m_out.booleanTrue);
10299 m_out.branch(
10300 isFunction(value, provenType(child)),
10301 unsure(continuation), unsure(notFunctionCase));
10302
10303 m_out.appendTo(notFunctionCase, slowPath);
10304 ValueFromBlock objectResult = m_out.anchor(m_out.booleanFalse);
10305 m_out.branch(
10306 isExoticForTypeof(value, provenType(child)),
10307 rarely(slowPath), usually(continuation));
10308
10309 m_out.appendTo(slowPath, continuation);
10310 VM& vm = this->vm();
10311 LValue slowResultValue = lazySlowPath(
10312 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
10313 return createLazyCallGenerator(vm,
10314 operationObjectIsFunction, locations[0].directGPR(),
10315 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
10316 }, value);
10317 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue));
10318 m_out.jump(continuation);
10319
10320 m_out.appendTo(continuation, lastNext);
10321 LValue result = m_out.phi(
10322 Int32, notCellResult, functionResult, objectResult, slowResult);
10323 setBoolean(result);
10324 }
10325
10326 void compileIsTypedArrayView()
10327 {
10328 LValue value = lowJSValue(m_node->child1());
10329
10330 LBasicBlock isCellCase = m_out.newBlock();
10331 LBasicBlock continuation = m_out.newBlock();
10332
10333 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
10334 m_out.branch(isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
10335
10336 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
10337 ValueFromBlock cellResult = m_out.anchor(isTypedArrayView(value, provenType(m_node->child1())));
10338 m_out.jump(continuation);
10339
10340 m_out.appendTo(continuation, lastNext);
10341 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
10342 }
10343
10344 void compileTypeOf()
10345 {
10346 Edge child = m_node->child1();
10347 LValue value = lowJSValue(child);
10348
10349 LBasicBlock continuation = m_out.newBlock();
10350 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
10351
10352 Vector<ValueFromBlock> results;
10353
10354 buildTypeOf(
10355 child, value,
10356 [&] (TypeofType type) {
10357 results.append(m_out.anchor(weakPointer(vm().smallStrings.typeString(type))));
10358 m_out.jump(continuation);
10359 });
10360
10361 m_out.appendTo(continuation, lastNext);
10362 setJSValue(m_out.phi(Int64, results));
10363 }
10364
10365 void compileInByVal()
10366 {
10367 setJSValue(vmCall(Int64, m_out.operation(operationInByVal), m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2())));
10368 }
10369
10370 void compileInById()
10371 {
10372 Node* node = m_node;
10373 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
10374 LValue base = lowCell(m_node->child1());
10375
10376 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10377 patchpoint->appendSomeRegister(base);
10378 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
10379 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
10380
10381 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10382
10383 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10384 preparePatchpointForExceptions(patchpoint);
10385
10386 State* state = &m_ftlState;
10387 patchpoint->setGenerator(
10388 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10389 AllowMacroScratchRegisterUsage allowScratch(jit);
10390
10391 CallSiteIndex callSiteIndex =
10392 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
10393
10394 // This is the direct exit target for operation calls.
10395 Box<CCallHelpers::JumpList> exceptions =
10396 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10397
10398 auto generator = Box<JITInByIdGenerator>::create(
10399 jit.codeBlock(), node->origin.semantic, callSiteIndex,
10400 params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
10401 JSValueRegs(params[0].gpr()));
10402
10403 generator->generateFastPath(jit);
10404 CCallHelpers::Label done = jit.label();
10405
10406 params.addLatePath(
10407 [=] (CCallHelpers& jit) {
10408 AllowMacroScratchRegisterUsage allowScratch(jit);
10409
10410 generator->slowPathJump().link(&jit);
10411 CCallHelpers::Label slowPathBegin = jit.label();
10412 CCallHelpers::Call slowPathCall = callOperation(
10413 *state, params.unavailableRegisters(), jit, node->origin.semantic,
10414 exceptions.get(), operationInByIdOptimize, params[0].gpr(),
10415 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
10416 CCallHelpers::TrustedImmPtr(uid)).call();
10417 jit.jump().linkTo(done, &jit);
10418
10419 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
10420
10421 jit.addLinkTask(
10422 [=] (LinkBuffer& linkBuffer) {
10423 generator->finalize(linkBuffer, linkBuffer);
10424 });
10425 });
10426 });
10427
10428 setJSValue(patchpoint);
10429 }
10430
10431 void compileHasOwnProperty()
10432 {
10433 LBasicBlock slowCase = m_out.newBlock();
10434 LBasicBlock continuation = m_out.newBlock();
10435 LBasicBlock lastNext = nullptr;
10436
10437 LValue object = lowObject(m_node->child1());
10438 LValue uniquedStringImpl;
10439 LValue keyAsValue = nullptr;
10440 switch (m_node->child2().useKind()) {
10441 case StringUse: {
10442 LBasicBlock isNonEmptyString = m_out.newBlock();
10443 LBasicBlock isAtomString = m_out.newBlock();
10444
10445 keyAsValue = lowString(m_node->child2());
10446 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
10447
10448 lastNext = m_out.appendTo(isNonEmptyString, isAtomString);
10449 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
10450 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
10451 m_out.branch(isNotAtomic, rarely(slowCase), usually(isAtomString));
10452
10453 m_out.appendTo(isAtomString, slowCase);
10454 break;
10455 }
10456 case SymbolUse: {
10457 keyAsValue = lowSymbol(m_node->child2());
10458 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl);
10459 lastNext = m_out.insertNewBlocksBefore(slowCase);
10460 break;
10461 }
10462 case UntypedUse: {
10463 LBasicBlock isCellCase = m_out.newBlock();
10464 LBasicBlock isStringCase = m_out.newBlock();
10465 LBasicBlock notStringCase = m_out.newBlock();
10466 LBasicBlock isNonEmptyString = m_out.newBlock();
10467 LBasicBlock isSymbolCase = m_out.newBlock();
10468 LBasicBlock hasUniquedStringImpl = m_out.newBlock();
10469
10470 keyAsValue = lowJSValue(m_node->child2());
10471 m_out.branch(isCell(keyAsValue), usually(isCellCase), rarely(slowCase));
10472
10473 lastNext = m_out.appendTo(isCellCase, isStringCase);
10474 m_out.branch(isString(keyAsValue), unsure(isStringCase), unsure(notStringCase));
10475
10476 m_out.appendTo(isStringCase, isNonEmptyString);
10477 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
10478
10479 m_out.appendTo(isNonEmptyString, notStringCase);
10480 LValue implFromString = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
10481 ValueFromBlock stringResult = m_out.anchor(implFromString);
10482 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(implFromString, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
10483 m_out.branch(isNotAtomic, rarely(slowCase), usually(hasUniquedStringImpl));
10484
10485 m_out.appendTo(notStringCase, isSymbolCase);
10486 m_out.branch(isSymbol(keyAsValue), unsure(isSymbolCase), unsure(slowCase));
10487
10488 m_out.appendTo(isSymbolCase, hasUniquedStringImpl);
10489 ValueFromBlock symbolResult = m_out.anchor(m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl));
10490 m_out.jump(hasUniquedStringImpl);
10491
10492 m_out.appendTo(hasUniquedStringImpl, slowCase);
10493 uniquedStringImpl = m_out.phi(pointerType(), stringResult, symbolResult);
10494 break;
10495 }
10496 default:
10497 RELEASE_ASSERT_NOT_REACHED();
10498 }
10499
10500 ASSERT(keyAsValue);
10501
10502 // Note that we don't test if the hash is zero here. AtomStringImpl's can't have a zero
10503 // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only
10504 // ever load the result from the cache if the cache entry matches what we are querying for.
10505 // So we either get super lucky and use zero for the hash and somehow collide with the entity
10506 // we're looking for, or we realize we're comparing against another entity, and go to the
10507 // slow path anyways.
10508 LValue hash = m_out.lShr(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
10509
10510 LValue structureID = m_out.load32(object, m_heaps.JSCell_structureID);
10511 LValue index = m_out.add(hash, structureID);
10512 index = m_out.zeroExtPtr(m_out.bitAnd(index, m_out.constInt32(HasOwnPropertyCache::mask)));
10513 ASSERT(vm().hasOwnPropertyCache());
10514 LValue cache = m_out.constIntPtr(vm().hasOwnPropertyCache());
10515
10516 IndexedAbstractHeap& heap = m_heaps.HasOwnPropertyCache;
10517 LValue sameStructureID = m_out.equal(structureID, m_out.load32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfStructureID())));
10518 LValue sameImpl = m_out.equal(uniquedStringImpl, m_out.loadPtr(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfImpl())));
10519 ValueFromBlock fastResult = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfResult())));
10520 LValue cacheHit = m_out.bitAnd(sameStructureID, sameImpl);
10521
10522 m_out.branch(m_out.notZero32(cacheHit), usually(continuation), rarely(slowCase));
10523
10524 m_out.appendTo(slowCase, continuation);
10525 ValueFromBlock slowResult;
10526 slowResult = m_out.anchor(vmCall(Int32, m_out.operation(operationHasOwnProperty), m_callFrame, object, keyAsValue));
10527 m_out.jump(continuation);
10528
10529 m_out.appendTo(continuation, lastNext);
10530 setBoolean(m_out.phi(Int32, fastResult, slowResult));
10531 }
10532
10533 void compileParseInt()
10534 {
10535 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse || m_node->child1().useKind() == StringUse);
10536 LValue result;
10537 if (m_node->child2()) {
10538 LValue radix = lowInt32(m_node->child2());
10539 if (m_node->child1().useKind() == UntypedUse)
10540 result = vmCall(Int64, m_out.operation(operationParseIntGeneric), m_callFrame, lowJSValue(m_node->child1()), radix);
10541 else
10542 result = vmCall(Int64, m_out.operation(operationParseIntString), m_callFrame, lowString(m_node->child1()), radix);
10543 } else {
10544 if (m_node->child1().useKind() == UntypedUse)
10545 result = vmCall(Int64, m_out.operation(operationParseIntNoRadixGeneric), m_callFrame, lowJSValue(m_node->child1()));
10546 else
10547 result = vmCall(Int64, m_out.operation(operationParseIntStringNoRadix), m_callFrame, lowString(m_node->child1()));
10548 }
10549 setJSValue(result);
10550 }
10551
10552 void compileOverridesHasInstance()
10553 {
10554 FrozenValue* defaultHasInstanceFunction = m_node->cellOperand();
10555 ASSERT(defaultHasInstanceFunction->cell()->inherits<JSFunction>(vm()));
10556
10557 LValue constructor = lowCell(m_node->child1());
10558 LValue hasInstance = lowJSValue(m_node->child2());
10559
10560 LBasicBlock defaultHasInstance = m_out.newBlock();
10561 LBasicBlock continuation = m_out.newBlock();
10562
10563 // Unlike in the DFG, we don't worry about cleaning this code up for the case where we have proven the hasInstanceValue is a constant as B3 should fix it for us.
10564
10565 ValueFromBlock notDefaultHasInstanceResult = m_out.anchor(m_out.booleanTrue);
10566 m_out.branch(m_out.notEqual(hasInstance, frozenPointer(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance));
10567
10568 LBasicBlock lastNext = m_out.appendTo(defaultHasInstance, continuation);
10569 ValueFromBlock implementsDefaultHasInstanceResult = m_out.anchor(m_out.testIsZero32(
10570 m_out.load8ZeroExt32(constructor, m_heaps.JSCell_typeInfoFlags),
10571 m_out.constInt32(ImplementsDefaultHasInstance)));
10572 m_out.jump(continuation);
10573
10574 m_out.appendTo(continuation, lastNext);
10575 setBoolean(m_out.phi(Int32, implementsDefaultHasInstanceResult, notDefaultHasInstanceResult));
10576 }
10577
10578 void compileCheckTypeInfoFlags()
10579 {
10580 speculate(
10581 BadTypeInfoFlags, noValue(), 0,
10582 m_out.testIsZero32(
10583 m_out.load8ZeroExt32(lowCell(m_node->child1()), m_heaps.JSCell_typeInfoFlags),
10584 m_out.constInt32(m_node->typeInfoOperand())));
10585 }
10586
10587 void compileInstanceOf()
10588 {
10589 Node* node = m_node;
10590 State* state = &m_ftlState;
10591
10592 LValue value;
10593 LValue prototype;
10594 bool valueIsCell;
10595 bool prototypeIsCell;
10596 if (m_node->child1().useKind() == CellUse
10597 && m_node->child2().useKind() == CellUse) {
10598 value = lowCell(m_node->child1());
10599 prototype = lowCell(m_node->child2());
10600
10601 valueIsCell = true;
10602 prototypeIsCell = true;
10603 } else {
10604 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
10605 DFG_ASSERT(m_graph, m_node, m_node->child2().useKind() == UntypedUse);
10606
10607 value = lowJSValue(m_node->child1());
10608 prototype = lowJSValue(m_node->child2());
10609
10610 valueIsCell = abstractValue(m_node->child1()).isType(SpecCell);
10611 prototypeIsCell = abstractValue(m_node->child2()).isType(SpecCell);
10612 }
10613
10614 bool prototypeIsObject = abstractValue(m_node->child2()).isType(SpecObject | ~SpecCell);
10615
10616 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10617 patchpoint->appendSomeRegister(value);
10618 patchpoint->appendSomeRegister(prototype);
10619 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
10620 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
10621 patchpoint->numGPScratchRegisters = 2;
10622 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
10623 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10624
10625 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10626 preparePatchpointForExceptions(patchpoint);
10627
10628 patchpoint->setGenerator(
10629 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10630 AllowMacroScratchRegisterUsage allowScratch(jit);
10631
10632 GPRReg resultGPR = params[0].gpr();
10633 GPRReg valueGPR = params[1].gpr();
10634 GPRReg prototypeGPR = params[2].gpr();
10635 GPRReg scratchGPR = params.gpScratch(0);
10636 GPRReg scratch2GPR = params.gpScratch(1);
10637
10638 CCallHelpers::Jump doneJump;
10639 if (!valueIsCell) {
10640 CCallHelpers::Jump isCell = jit.branchIfCell(valueGPR);
10641 jit.boxBooleanPayload(false, resultGPR);
10642 doneJump = jit.jump();
10643 isCell.link(&jit);
10644 }
10645
10646 CCallHelpers::JumpList slowCases;
10647 if (!prototypeIsCell)
10648 slowCases.append(jit.branchIfNotCell(prototypeGPR));
10649
10650 CallSiteIndex callSiteIndex =
10651 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
10652
10653 // This is the direct exit target for operation calls.
10654 Box<CCallHelpers::JumpList> exceptions =
10655 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10656
10657 auto generator = Box<JITInstanceOfGenerator>::create(
10658 jit.codeBlock(), node->origin.semantic, callSiteIndex,
10659 params.unavailableRegisters(), resultGPR, valueGPR, prototypeGPR, scratchGPR,
10660 scratch2GPR, prototypeIsObject);
10661 generator->generateFastPath(jit);
10662 CCallHelpers::Label done = jit.label();
10663
10664 params.addLatePath(
10665 [=] (CCallHelpers& jit) {
10666 AllowMacroScratchRegisterUsage allowScratch(jit);
10667
10668 J_JITOperation_ESsiJJ optimizationFunction = operationInstanceOfOptimize;
10669
10670 slowCases.link(&jit);
10671 CCallHelpers::Label slowPathBegin = jit.label();
10672 CCallHelpers::Call slowPathCall = callOperation(
10673 *state, params.unavailableRegisters(), jit, node->origin.semantic,
10674 exceptions.get(), optimizationFunction, resultGPR,
10675 CCallHelpers::TrustedImmPtr(generator->stubInfo()), valueGPR,
10676 prototypeGPR).call();
10677 jit.jump().linkTo(done, &jit);
10678
10679 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
10680
10681 jit.addLinkTask(
10682 [=] (LinkBuffer& linkBuffer) {
10683 generator->finalize(linkBuffer, linkBuffer);
10684 });
10685 });
10686
10687 if (doneJump.isSet())
10688 doneJump.link(&jit);
10689 });
10690
10691 // This returns a boxed boolean.
10692 setJSValue(patchpoint);
10693 }
10694
10695 void compileInstanceOfCustom()
10696 {
10697 LValue value = lowJSValue(m_node->child1());
10698 LValue constructor = lowCell(m_node->child2());
10699 LValue hasInstance = lowJSValue(m_node->child3());
10700
10701 setBoolean(m_out.logicalNot(m_out.equal(m_out.constInt32(0), vmCall(Int32, m_out.operation(operationInstanceOfCustom), m_callFrame, value, constructor, hasInstance))));
10702 }
10703
10704 void compileCountExecution()
10705 {
10706 TypedPointer counter = m_out.absolute(m_node->executionCounter()->address());
10707 m_out.store64(m_out.add(m_out.load64(counter), m_out.constInt64(1)), counter);
10708 }
10709
10710 void compileSuperSamplerBegin()
10711 {
10712 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
10713 m_out.store32(m_out.add(m_out.load32(counter), m_out.constInt32(1)), counter);
10714 }
10715
10716 void compileSuperSamplerEnd()
10717 {
10718 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
10719 m_out.store32(m_out.sub(m_out.load32(counter), m_out.constInt32(1)), counter);
10720 }
10721
10722 void compileStoreBarrier()
10723 {
10724 emitStoreBarrier(lowCell(m_node->child1()), m_node->op() == FencedStoreBarrier);
10725 }
10726
10727 void compileHasIndexedProperty()
10728 {
10729 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
10730 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
10731
10732 switch (m_node->arrayMode().type()) {
10733 case Array::Int32:
10734 case Array::Contiguous: {
10735 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10736 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10737
10738 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
10739 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
10740
10741 LBasicBlock slowCase = m_out.newBlock();
10742 LBasicBlock continuation = m_out.newBlock();
10743 LBasicBlock lastNext = nullptr;
10744
10745 if (!m_node->arrayMode().isInBounds()) {
10746 LBasicBlock checkHole = m_out.newBlock();
10747 m_out.branch(
10748 m_out.aboveOrEqual(
10749 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
10750 rarely(slowCase), usually(checkHole));
10751 lastNext = m_out.appendTo(checkHole, slowCase);
10752 } else
10753 lastNext = m_out.insertNewBlocksBefore(slowCase);
10754
10755 LValue checkHoleResultValue =
10756 m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1))));
10757 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10758 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10759
10760 m_out.appendTo(slowCase, continuation);
10761 ValueFromBlock slowResult = m_out.anchor(
10762 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10763 m_out.jump(continuation);
10764
10765 m_out.appendTo(continuation, lastNext);
10766 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10767 return;
10768 }
10769 case Array::Double: {
10770 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10771 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10772
10773 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
10774
10775 LBasicBlock slowCase = m_out.newBlock();
10776 LBasicBlock continuation = m_out.newBlock();
10777 LBasicBlock lastNext = nullptr;
10778
10779 if (!m_node->arrayMode().isInBounds()) {
10780 LBasicBlock checkHole = m_out.newBlock();
10781 m_out.branch(
10782 m_out.aboveOrEqual(
10783 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
10784 rarely(slowCase), usually(checkHole));
10785 lastNext = m_out.appendTo(checkHole, slowCase);
10786 } else
10787 lastNext = m_out.insertNewBlocksBefore(slowCase);
10788
10789 LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
10790 LValue checkHoleResultValue = m_out.doubleEqual(doubleValue, doubleValue);
10791 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10792 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10793
10794 m_out.appendTo(slowCase, continuation);
10795 ValueFromBlock slowResult = m_out.anchor(
10796 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10797 m_out.jump(continuation);
10798
10799 m_out.appendTo(continuation, lastNext);
10800 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10801 return;
10802 }
10803
10804 case Array::ArrayStorage: {
10805 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10806 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10807
10808 LBasicBlock slowCase = m_out.newBlock();
10809 LBasicBlock continuation = m_out.newBlock();
10810 LBasicBlock lastNext = nullptr;
10811
10812 if (!m_node->arrayMode().isInBounds()) {
10813 LBasicBlock checkHole = m_out.newBlock();
10814 m_out.branch(
10815 m_out.aboveOrEqual(
10816 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
10817 rarely(slowCase), usually(checkHole));
10818 lastNext = m_out.appendTo(checkHole, slowCase);
10819 } else
10820 lastNext = m_out.insertNewBlocksBefore(slowCase);
10821
10822 LValue checkHoleResultValue =
10823 m_out.notZero64(m_out.load64(baseIndex(m_heaps.ArrayStorage_vector, storage, index, m_graph.varArgChild(m_node, 1))));
10824 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10825 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10826
10827 m_out.appendTo(slowCase, continuation);
10828 ValueFromBlock slowResult = m_out.anchor(
10829 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10830 m_out.jump(continuation);
10831
10832 m_out.appendTo(continuation, lastNext);
10833 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10834 break;
10835 }
10836
10837 default: {
10838 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10839 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10840 break;
10841 }
10842 }
10843 }
10844
10845 void compileHasGenericProperty()
10846 {
10847 LValue base = lowJSValue(m_node->child1());
10848 LValue property = lowCell(m_node->child2());
10849 setJSValue(vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property));
10850 }
10851
10852 void compileHasStructureProperty()
10853 {
10854 LValue base = lowJSValue(m_node->child1());
10855 LValue property = lowString(m_node->child2());
10856 LValue enumerator = lowCell(m_node->child3());
10857
10858 LBasicBlock correctStructure = m_out.newBlock();
10859 LBasicBlock wrongStructure = m_out.newBlock();
10860 LBasicBlock continuation = m_out.newBlock();
10861
10862 m_out.branch(m_out.notEqual(
10863 m_out.load32(base, m_heaps.JSCell_structureID),
10864 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
10865 rarely(wrongStructure), usually(correctStructure));
10866
10867 LBasicBlock lastNext = m_out.appendTo(correctStructure, wrongStructure);
10868 ValueFromBlock correctStructureResult = m_out.anchor(m_out.booleanTrue);
10869 m_out.jump(continuation);
10870
10871 m_out.appendTo(wrongStructure, continuation);
10872 ValueFromBlock wrongStructureResult = m_out.anchor(
10873 m_out.equal(
10874 m_out.constInt64(JSValue::encode(jsBoolean(true))),
10875 vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property)));
10876 m_out.jump(continuation);
10877
10878 m_out.appendTo(continuation, lastNext);
10879 setBoolean(m_out.phi(Int32, correctStructureResult, wrongStructureResult));
10880 }
10881
10882 void compileGetDirectPname()
10883 {
10884 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
10885 LValue property = lowCell(m_graph.varArgChild(m_node, 1));
10886 LValue index = lowInt32(m_graph.varArgChild(m_node, 2));
10887 LValue enumerator = lowCell(m_graph.varArgChild(m_node, 3));
10888
10889 LBasicBlock checkOffset = m_out.newBlock();
10890 LBasicBlock inlineLoad = m_out.newBlock();
10891 LBasicBlock outOfLineLoad = m_out.newBlock();
10892 LBasicBlock slowCase = m_out.newBlock();
10893 LBasicBlock continuation = m_out.newBlock();
10894
10895 m_out.branch(m_out.notEqual(
10896 m_out.load32(base, m_heaps.JSCell_structureID),
10897 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
10898 rarely(slowCase), usually(checkOffset));
10899
10900 LBasicBlock lastNext = m_out.appendTo(checkOffset, inlineLoad);
10901 m_out.branch(m_out.aboveOrEqual(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)),
10902 unsure(outOfLineLoad), unsure(inlineLoad));
10903
10904 m_out.appendTo(inlineLoad, outOfLineLoad);
10905 ValueFromBlock inlineResult = m_out.anchor(
10906 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(),
10907 base, m_out.zeroExt(index, Int64), ScaleEight, JSObject::offsetOfInlineStorage())));
10908 m_out.jump(continuation);
10909
10910 m_out.appendTo(outOfLineLoad, slowCase);
10911 LValue storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
10912 LValue realIndex = m_out.signExt32To64(
10913 m_out.neg(m_out.sub(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity))));
10914 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
10915 ValueFromBlock outOfLineResult = m_out.anchor(
10916 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty)));
10917 m_out.jump(continuation);
10918
10919 m_out.appendTo(slowCase, continuation);
10920 ValueFromBlock slowCaseResult = m_out.anchor(
10921 vmCall(Int64, m_out.operation(operationGetByVal), m_callFrame, base, property));
10922 m_out.jump(continuation);
10923
10924 m_out.appendTo(continuation, lastNext);
10925 setJSValue(m_out.phi(Int64, inlineResult, outOfLineResult, slowCaseResult));
10926 }
10927
10928 void compileGetEnumerableLength()
10929 {
10930 LValue enumerator = lowCell(m_node->child1());
10931 setInt32(m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_indexLength));
10932 }
10933
10934 void compileGetPropertyEnumerator()
10935 {
10936 if (m_node->child1().useKind() == CellUse)
10937 setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumeratorCell), m_callFrame, lowCell(m_node->child1())));
10938 else
10939 setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumerator), m_callFrame, lowJSValue(m_node->child1())));
10940 }
10941
10942 void compileGetEnumeratorStructurePname()
10943 {
10944 LValue enumerator = lowCell(m_node->child1());
10945 LValue index = lowInt32(m_node->child2());
10946
10947 LBasicBlock inBounds = m_out.newBlock();
10948 LBasicBlock outOfBounds = m_out.newBlock();
10949 LBasicBlock continuation = m_out.newBlock();
10950
10951 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endStructurePropertyIndex)),
10952 usually(inBounds), rarely(outOfBounds));
10953
10954 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
10955 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
10956 ValueFromBlock inBoundsResult = m_out.anchor(
10957 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
10958 m_out.jump(continuation);
10959
10960 m_out.appendTo(outOfBounds, continuation);
10961 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
10962 m_out.jump(continuation);
10963
10964 m_out.appendTo(continuation, lastNext);
10965 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
10966 }
10967
10968 void compileGetEnumeratorGenericPname()
10969 {
10970 LValue enumerator = lowCell(m_node->child1());
10971 LValue index = lowInt32(m_node->child2());
10972
10973 LBasicBlock inBounds = m_out.newBlock();
10974 LBasicBlock outOfBounds = m_out.newBlock();
10975 LBasicBlock continuation = m_out.newBlock();
10976
10977 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endGenericPropertyIndex)),
10978 usually(inBounds), rarely(outOfBounds));
10979
10980 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
10981 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
10982 ValueFromBlock inBoundsResult = m_out.anchor(
10983 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
10984 m_out.jump(continuation);
10985
10986 m_out.appendTo(outOfBounds, continuation);
10987 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
10988 m_out.jump(continuation);
10989
10990 m_out.appendTo(continuation, lastNext);
10991 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
10992 }
10993
10994 void compileToIndexString()
10995 {
10996 LValue index = lowInt32(m_node->child1());
10997 setJSValue(vmCall(Int64, m_out.operation(operationToIndexString), m_callFrame, index));
10998 }
10999
11000 void compileCheckStructureImmediate()
11001 {
11002 LValue structure = lowCell(m_node->child1());
11003 checkStructure(
11004 structure, noValue(), BadCache, m_node->structureSet(),
11005 [this] (RegisteredStructure structure) {
11006 return weakStructure(structure);
11007 });
11008 }
11009
11010 void compileMaterializeNewObject()
11011 {
11012 ObjectMaterializationData& data = m_node->objectMaterializationData();
11013
11014 // Lower the values first, to avoid creating values inside a control flow diamond.
11015
11016 Vector<LValue, 8> values;
11017 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11018 Edge edge = m_graph.varArgChild(m_node, 1 + i);
11019 switch (data.m_properties[i].kind()) {
11020 case PublicLengthPLoc:
11021 case VectorLengthPLoc:
11022 values.append(lowInt32(edge));
11023 break;
11024 default:
11025 values.append(lowJSValue(edge));
11026 break;
11027 }
11028 }
11029
11030 RegisteredStructureSet set = m_node->structureSet();
11031
11032 Vector<LBasicBlock, 1> blocks(set.size());
11033 for (unsigned i = set.size(); i--;)
11034 blocks[i] = m_out.newBlock();
11035 LBasicBlock dummyDefault = m_out.newBlock();
11036 LBasicBlock outerContinuation = m_out.newBlock();
11037
11038 Vector<SwitchCase, 1> cases(set.size());
11039 for (unsigned i = set.size(); i--;)
11040 cases[i] = SwitchCase(weakStructure(set.at(i)), blocks[i], Weight(1));
11041 m_out.switchInstruction(
11042 lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0));
11043
11044 LBasicBlock outerLastNext = m_out.m_nextBlock;
11045
11046 Vector<ValueFromBlock, 1> results;
11047
11048 for (unsigned i = set.size(); i--;) {
11049 m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault);
11050
11051 RegisteredStructure structure = set.at(i);
11052
11053 LValue object;
11054 LValue butterfly;
11055
11056 if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
11057 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
11058 Allocator cellAllocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
11059
11060 bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
11061 unsigned indexingHeaderSize = 0;
11062 LValue indexingPayloadSizeInBytes = m_out.intPtrZero;
11063 LValue vectorLength = m_out.int32Zero;
11064 LValue publicLength = m_out.int32Zero;
11065 if (hasIndexingHeader) {
11066 indexingHeaderSize = sizeof(IndexingHeader);
11067 for (unsigned i = data.m_properties.size(); i--;) {
11068 PromotedLocationDescriptor descriptor = data.m_properties[i];
11069 switch (descriptor.kind()) {
11070 case PublicLengthPLoc:
11071 publicLength = values[i];
11072 break;
11073 case VectorLengthPLoc:
11074 vectorLength = values[i];
11075 break;
11076 default:
11077 break;
11078 }
11079 }
11080 indexingPayloadSizeInBytes =
11081 m_out.mul(m_out.zeroExtPtr(vectorLength), m_out.intPtrEight);
11082 }
11083
11084 LValue butterflySize = m_out.add(
11085 m_out.constIntPtr(
11086 structure->outOfLineCapacity() * sizeof(JSValue) + indexingHeaderSize),
11087 indexingPayloadSizeInBytes);
11088
11089 LBasicBlock slowPath = m_out.newBlock();
11090 LBasicBlock continuation = m_out.newBlock();
11091
11092 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11093
11094 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
11095
11096 LValue startOfStorage = allocateHeapCell(
11097 allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, slowPath),
11098 slowPath);
11099
11100 LValue fastButterflyValue = m_out.add(
11101 startOfStorage,
11102 m_out.constIntPtr(
11103 structure->outOfLineCapacity() * sizeof(JSValue) + sizeof(IndexingHeader)));
11104
11105 ValueFromBlock haveButterfly = m_out.anchor(fastButterflyValue);
11106
11107 splatWords(
11108 fastButterflyValue,
11109 m_out.constInt32(-structure->outOfLineCapacity() - 1),
11110 m_out.constInt32(-1),
11111 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11112
11113 m_out.store32(vectorLength, fastButterflyValue, m_heaps.Butterfly_vectorLength);
11114
11115 LValue fastObjectValue = allocateObject(
11116 m_out.constIntPtr(cellAllocator.localAllocator()), structure, fastButterflyValue,
11117 slowPath);
11118
11119 ValueFromBlock fastObject = m_out.anchor(fastObjectValue);
11120 ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue);
11121 m_out.jump(continuation);
11122
11123 m_out.appendTo(slowPath, continuation);
11124
11125 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
11126
11127 VM& vm = this->vm();
11128 LValue slowObjectValue;
11129 if (hasIndexingHeader) {
11130 slowObjectValue = lazySlowPath(
11131 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11132 return createLazyCallGenerator(vm,
11133 operationNewObjectWithButterflyWithIndexingHeaderAndVectorLength,
11134 locations[0].directGPR(), CCallHelpers::TrustedImmPtr(structure.get()),
11135 locations[1].directGPR(), locations[2].directGPR());
11136 },
11137 vectorLength, butterflyValue);
11138 } else {
11139 slowObjectValue = lazySlowPath(
11140 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11141 return createLazyCallGenerator(vm,
11142 operationNewObjectWithButterfly, locations[0].directGPR(),
11143 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR());
11144 },
11145 butterflyValue);
11146 }
11147 ValueFromBlock slowObject = m_out.anchor(slowObjectValue);
11148 ValueFromBlock slowButterfly = m_out.anchor(
11149 m_out.loadPtr(slowObjectValue, m_heaps.JSObject_butterfly));
11150
11151 m_out.jump(continuation);
11152
11153 m_out.appendTo(continuation, lastNext);
11154
11155 object = m_out.phi(pointerType(), fastObject, slowObject);
11156 butterfly = m_out.phi(pointerType(), fastButterfly, slowButterfly);
11157
11158 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
11159
11160 initializeArrayElements(m_out.constInt32(structure->indexingType()), m_out.int32Zero, vectorLength, butterfly);
11161
11162 HashMap<int32_t, LValue, DefaultHash<int32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<int32_t>> indexMap;
11163 Vector<int32_t> indices;
11164 for (unsigned i = data.m_properties.size(); i--;) {
11165 PromotedLocationDescriptor descriptor = data.m_properties[i];
11166 if (descriptor.kind() != IndexedPropertyPLoc)
11167 continue;
11168 int32_t index = static_cast<int32_t>(descriptor.info());
11169
11170 auto result = indexMap.add(index, values[i]);
11171 DFG_ASSERT(m_graph, m_node, result); // Duplicates are illegal.
11172
11173 indices.append(index);
11174 }
11175
11176 if (!indices.isEmpty()) {
11177 std::sort(indices.begin(), indices.end());
11178
11179 Vector<LBasicBlock> blocksWithStores(indices.size());
11180 Vector<LBasicBlock> blocksWithChecks(indices.size());
11181
11182 for (unsigned i = indices.size(); i--;) {
11183 blocksWithStores[i] = m_out.newBlock();
11184 blocksWithChecks[i] = m_out.newBlock(); // blocksWithChecks[0] is the continuation.
11185 }
11186
11187 LBasicBlock indexLastNext = m_out.m_nextBlock;
11188
11189 for (unsigned i = indices.size(); i--;) {
11190 int32_t index = indices[i];
11191 LValue value = indexMap.get(index);
11192
11193 m_out.branch(
11194 m_out.below(m_out.constInt32(index), publicLength),
11195 unsure(blocksWithStores[i]), unsure(blocksWithChecks[i]));
11196
11197 m_out.appendTo(blocksWithStores[i], blocksWithChecks[i]);
11198
11199 // This has to type-check and convert its inputs, but it cannot do so in a
11200 // way that updates AI. That's a bit annoying, but if you think about how
11201 // sinking works, it's actually not a bad thing. We are virtually guaranteed
11202 // that these type checks will not fail, since the type checks that guarded
11203 // the original stores to the array are still somewhere above this point.
11204 Output::StoreType storeType;
11205 IndexedAbstractHeap* heap;
11206 switch (structure->indexingType()) {
11207 case ALL_INT32_INDEXING_TYPES:
11208 // FIXME: This could use the proven type if we had the Edge for the
11209 // value. https://bugs.webkit.org/show_bug.cgi?id=155311
11210 speculate(BadType, noValue(), nullptr, isNotInt32(value));
11211 storeType = Output::Store64;
11212 heap = &m_heaps.indexedInt32Properties;
11213 break;
11214
11215 case ALL_DOUBLE_INDEXING_TYPES: {
11216 // FIXME: If the source is ValueRep, we should avoid emitting any
11217 // checks. We could also avoid emitting checks if we had the Edge of
11218 // this value. https://bugs.webkit.org/show_bug.cgi?id=155311
11219
11220 LBasicBlock intCase = m_out.newBlock();
11221 LBasicBlock doubleCase = m_out.newBlock();
11222 LBasicBlock continuation = m_out.newBlock();
11223
11224 m_out.branch(isInt32(value), unsure(intCase), unsure(doubleCase));
11225
11226 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
11227
11228 ValueFromBlock intResult =
11229 m_out.anchor(m_out.intToDouble(unboxInt32(value)));
11230 m_out.jump(continuation);
11231
11232 m_out.appendTo(doubleCase, continuation);
11233
11234 speculate(BadType, noValue(), nullptr, isNumber(value));
11235 ValueFromBlock doubleResult = m_out.anchor(unboxDouble(value));
11236 m_out.jump(continuation);
11237
11238 m_out.appendTo(continuation, lastNext);
11239 value = m_out.phi(Double, intResult, doubleResult);
11240 storeType = Output::StoreDouble;
11241 heap = &m_heaps.indexedDoubleProperties;
11242 break;
11243 }
11244
11245 case ALL_CONTIGUOUS_INDEXING_TYPES:
11246 storeType = Output::Store64;
11247 heap = &m_heaps.indexedContiguousProperties;
11248 break;
11249
11250 default:
11251 DFG_CRASH(m_graph, m_node, "Invalid indexing type");
11252 break;
11253 }
11254
11255 m_out.store(value, m_out.address(butterfly, heap->at(index)), storeType);
11256
11257 m_out.jump(blocksWithChecks[i]);
11258 m_out.appendTo(
11259 blocksWithChecks[i], i ? blocksWithStores[i - 1] : indexLastNext);
11260 }
11261 }
11262 } else {
11263 // In the easy case where we can do a one-shot allocation, we simply allocate the
11264 // object to directly have the desired structure.
11265 object = allocateObject(structure);
11266 butterfly = nullptr; // Don't have one, don't need one.
11267 }
11268
11269 BitVector setInlineOffsets;
11270 for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
11271 for (unsigned i = data.m_properties.size(); i--;) {
11272 PromotedLocationDescriptor descriptor = data.m_properties[i];
11273 if (descriptor.kind() != NamedPropertyPLoc)
11274 continue;
11275 if (m_graph.identifiers()[descriptor.info()] != entry.key)
11276 continue;
11277
11278 LValue base;
11279 if (isInlineOffset(entry.offset)) {
11280 setInlineOffsets.set(entry.offset);
11281 base = object;
11282 } else
11283 base = butterfly;
11284 storeProperty(values[i], base, descriptor.info(), entry.offset);
11285 break;
11286 }
11287 }
11288 for (unsigned i = structure->inlineCapacity(); i--;) {
11289 if (!setInlineOffsets.get(i))
11290 m_out.store64(m_out.int64Zero, m_out.address(m_heaps.properties.atAnyNumber(), object, offsetRelativeToBase(i)));
11291 }
11292
11293 results.append(m_out.anchor(object));
11294 m_out.jump(outerContinuation);
11295 }
11296
11297 m_out.appendTo(dummyDefault, outerContinuation);
11298 m_out.unreachable();
11299
11300 m_out.appendTo(outerContinuation, outerLastNext);
11301 setJSValue(m_out.phi(pointerType(), results));
11302 mutatorFence();
11303 }
11304
11305 void compileMaterializeCreateActivation()
11306 {
11307 ObjectMaterializationData& data = m_node->objectMaterializationData();
11308
11309 Vector<LValue, 8> values;
11310 for (unsigned i = 0; i < data.m_properties.size(); ++i)
11311 values.append(lowJSValue(m_graph.varArgChild(m_node, 2 + i)));
11312
11313 LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
11314 SymbolTable* table = m_node->castOperand<SymbolTable*>();
11315 ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>(vm()));
11316 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
11317
11318 LBasicBlock slowPath = m_out.newBlock();
11319 LBasicBlock continuation = m_out.newBlock();
11320
11321 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11322
11323 LValue fastObject = allocateObject<JSLexicalEnvironment>(
11324 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
11325
11326 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
11327 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
11328
11329
11330 ValueFromBlock fastResult = m_out.anchor(fastObject);
11331 m_out.jump(continuation);
11332
11333 m_out.appendTo(slowPath, continuation);
11334 // We ensure allocation sinking explictly sets bottom values for all field members.
11335 // Therefore, it doesn't matter what JSValue we pass in as the initialization value
11336 // because all fields will be overwritten.
11337 // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that
11338 // doesn't initialize every slot because we are guaranteed to do that here.
11339 VM& vm = this->vm();
11340 LValue callResult = lazySlowPath(
11341 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11342 return createLazyCallGenerator(vm,
11343 operationCreateActivationDirect, locations[0].directGPR(),
11344 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
11345 CCallHelpers::TrustedImmPtr(table),
11346 CCallHelpers::TrustedImm64(JSValue::encode(jsUndefined())));
11347 }, scope);
11348 ValueFromBlock slowResult = m_out.anchor(callResult);
11349 m_out.jump(continuation);
11350
11351 m_out.appendTo(continuation, lastNext);
11352 LValue activation = m_out.phi(pointerType(), fastResult, slowResult);
11353 RELEASE_ASSERT(data.m_properties.size() == table->scopeSize());
11354 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11355 PromotedLocationDescriptor descriptor = data.m_properties[i];
11356 ASSERT(descriptor.kind() == ClosureVarPLoc);
11357 m_out.store64(
11358 values[i], activation,
11359 m_heaps.JSLexicalEnvironment_variables[descriptor.info()]);
11360 }
11361
11362 if (validationEnabled()) {
11363 // Validate to make sure every slot in the scope has one value.
11364 ConcurrentJSLocker locker(table->m_lock);
11365 for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) {
11366 bool found = false;
11367 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11368 PromotedLocationDescriptor descriptor = data.m_properties[i];
11369 ASSERT(descriptor.kind() == ClosureVarPLoc);
11370 if (iter->value.scopeOffset().offset() == descriptor.info()) {
11371 found = true;
11372 break;
11373 }
11374 }
11375 ASSERT_UNUSED(found, found);
11376 }
11377 }
11378
11379 mutatorFence();
11380 setJSValue(activation);
11381 }
11382
11383 void compileCheckTraps()
11384 {
11385 ASSERT(Options::usePollingTraps());
11386 LBasicBlock needTrapHandling = m_out.newBlock();
11387 LBasicBlock continuation = m_out.newBlock();
11388
11389 LValue state = m_out.load8ZeroExt32(m_out.absolute(vm().needTrapHandlingAddress()));
11390 m_out.branch(m_out.isZero32(state),
11391 usually(continuation), rarely(needTrapHandling));
11392
11393 LBasicBlock lastNext = m_out.appendTo(needTrapHandling, continuation);
11394
11395 VM& vm = this->vm();
11396 lazySlowPath(
11397 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
11398 return createLazyCallGenerator(vm, operationHandleTraps, InvalidGPRReg);
11399 });
11400 m_out.jump(continuation);
11401
11402 m_out.appendTo(continuation, lastNext);
11403 }
11404
11405 void compileRegExpExec()
11406 {
11407 LValue globalObject = lowCell(m_node->child1());
11408
11409 if (m_node->child2().useKind() == RegExpObjectUse) {
11410 LValue base = lowRegExpObject(m_node->child2());
11411
11412 if (m_node->child3().useKind() == StringUse) {
11413 LValue argument = lowString(m_node->child3());
11414 LValue result = vmCall(
11415 Int64, m_out.operation(operationRegExpExecString), m_callFrame, globalObject,
11416 base, argument);
11417 setJSValue(result);
11418 return;
11419 }
11420
11421 LValue argument = lowJSValue(m_node->child3());
11422 LValue result = vmCall(
11423 Int64, m_out.operation(operationRegExpExec), m_callFrame, globalObject, base,
11424 argument);
11425 setJSValue(result);
11426 return;
11427 }
11428
11429 LValue base = lowJSValue(m_node->child2());
11430 LValue argument = lowJSValue(m_node->child3());
11431 LValue result = vmCall(
11432 Int64, m_out.operation(operationRegExpExecGeneric), m_callFrame, globalObject, base,
11433 argument);
11434 setJSValue(result);
11435 }
11436
11437 void compileRegExpExecNonGlobalOrSticky()
11438 {
11439 LValue globalObject = lowCell(m_node->child1());
11440 LValue argument = lowString(m_node->child2());
11441 LValue result = vmCall(
11442 Int64, m_out.operation(operationRegExpExecNonGlobalOrSticky), m_callFrame, globalObject, frozenPointer(m_node->cellOperand()), argument);
11443 setJSValue(result);
11444 }
11445
11446 void compileRegExpMatchFastGlobal()
11447 {
11448 LValue globalObject = lowCell(m_node->child1());
11449 LValue argument = lowString(m_node->child2());
11450 LValue result = vmCall(
11451 Int64, m_out.operation(operationRegExpMatchFastGlobalString), m_callFrame, globalObject, frozenPointer(m_node->cellOperand()), argument);
11452 setJSValue(result);
11453 }
11454
11455 void compileRegExpTest()
11456 {
11457 LValue globalObject = lowCell(m_node->child1());
11458
11459 if (m_node->child2().useKind() == RegExpObjectUse) {
11460 LValue base = lowRegExpObject(m_node->child2());
11461
11462 if (m_node->child3().useKind() == StringUse) {
11463 LValue argument = lowString(m_node->child3());
11464 LValue result = vmCall(
11465 Int32, m_out.operation(operationRegExpTestString), m_callFrame, globalObject,
11466 base, argument);
11467 setBoolean(result);
11468 return;
11469 }
11470
11471 LValue argument = lowJSValue(m_node->child3());
11472 LValue result = vmCall(
11473 Int32, m_out.operation(operationRegExpTest), m_callFrame, globalObject, base,
11474 argument);
11475 setBoolean(result);
11476 return;
11477 }
11478
11479 LValue base = lowJSValue(m_node->child2());
11480 LValue argument = lowJSValue(m_node->child3());
11481 LValue result = vmCall(
11482 Int32, m_out.operation(operationRegExpTestGeneric), m_callFrame, globalObject, base,
11483 argument);
11484 setBoolean(result);
11485 }
11486
11487 void compileRegExpMatchFast()
11488 {
11489 LValue globalObject = lowCell(m_node->child1());
11490 LValue base = lowRegExpObject(m_node->child2());
11491 LValue argument = lowString(m_node->child3());
11492 LValue result = vmCall(
11493 Int64, m_out.operation(operationRegExpMatchFastString), m_callFrame, globalObject,
11494 base, argument);
11495 setJSValue(result);
11496 }
11497
11498 void compileNewRegexp()
11499 {
11500 FrozenValue* regexp = m_node->cellOperand();
11501 LValue lastIndex = lowJSValue(m_node->child1());
11502 ASSERT(regexp->cell()->inherits<RegExp>(vm()));
11503
11504 LBasicBlock slowCase = m_out.newBlock();
11505 LBasicBlock continuation = m_out.newBlock();
11506
11507 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
11508
11509 auto structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->regExpStructure());
11510 LValue fastResultValue = allocateObject<RegExpObject>(structure, m_out.intPtrZero, slowCase);
11511 m_out.storePtr(frozenPointer(regexp), fastResultValue, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag);
11512 m_out.store64(lastIndex, fastResultValue, m_heaps.RegExpObject_lastIndex);
11513 mutatorFence();
11514 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
11515 m_out.jump(continuation);
11516
11517 m_out.appendTo(slowCase, continuation);
11518 VM& vm = this->vm();
11519 RegExp* regexpCell = regexp->cast<RegExp*>();
11520 LValue slowResultValue = lazySlowPath(
11521 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11522 return createLazyCallGenerator(vm,
11523 operationNewRegexpWithLastIndex, locations[0].directGPR(),
11524 CCallHelpers::TrustedImmPtr(regexpCell), locations[1].directGPR());
11525 }, lastIndex);
11526 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
11527 m_out.jump(continuation);
11528
11529 m_out.appendTo(continuation, lastNext);
11530 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
11531 }
11532
11533 void compileSetFunctionName()
11534 {
11535 vmCall(Void, m_out.operation(operationSetFunctionName), m_callFrame,
11536 lowCell(m_node->child1()), lowJSValue(m_node->child2()));
11537 }
11538
11539 void compileStringReplace()
11540 {
11541 if (m_node->child1().useKind() == StringUse
11542 && m_node->child2().useKind() == RegExpObjectUse
11543 && m_node->child3().useKind() == StringUse) {
11544
11545 if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>(vm())) {
11546 if (!replace->length()) {
11547 LValue string = lowString(m_node->child1());
11548 LValue regExp = lowRegExpObject(m_node->child2());
11549
11550 LValue result = vmCall(
11551 pointerType(), m_out.operation(operationStringProtoFuncReplaceRegExpEmptyStr),
11552 m_callFrame, string, regExp);
11553
11554 setJSValue(result);
11555 return;
11556 }
11557 }
11558
11559 LValue string = lowString(m_node->child1());
11560 LValue regExp = lowRegExpObject(m_node->child2());
11561 LValue replace = lowString(m_node->child3());
11562
11563 LValue result = vmCall(
11564 pointerType(), m_out.operation(operationStringProtoFuncReplaceRegExpString),
11565 m_callFrame, string, regExp, replace);
11566
11567 setJSValue(result);
11568 return;
11569 }
11570
11571 LValue search;
11572 if (m_node->child2().useKind() == StringUse)
11573 search = lowString(m_node->child2());
11574 else
11575 search = lowJSValue(m_node->child2());
11576
11577 LValue result = vmCall(
11578 pointerType(), m_out.operation(operationStringProtoFuncReplaceGeneric), m_callFrame,
11579 lowJSValue(m_node->child1()), search,
11580 lowJSValue(m_node->child3()));
11581
11582 setJSValue(result);
11583 }
11584
11585 void compileGetRegExpObjectLastIndex()
11586 {
11587 setJSValue(m_out.load64(lowRegExpObject(m_node->child1()), m_heaps.RegExpObject_lastIndex));
11588 }
11589
11590 void compileSetRegExpObjectLastIndex()
11591 {
11592 if (!m_node->ignoreLastIndexIsWritable()) {
11593 LValue regExp = lowRegExpObject(m_node->child1());
11594 LValue value = lowJSValue(m_node->child2());
11595
11596 speculate(
11597 ExoticObjectMode, noValue(), nullptr,
11598 m_out.testNonZeroPtr(
11599 m_out.loadPtr(regExp, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag),
11600 m_out.constIntPtr(RegExpObject::lastIndexIsNotWritableFlag)));
11601
11602 m_out.store64(value, regExp, m_heaps.RegExpObject_lastIndex);
11603 return;
11604 }
11605
11606 m_out.store64(lowJSValue(m_node->child2()), lowCell(m_node->child1()), m_heaps.RegExpObject_lastIndex);
11607 }
11608
11609 void compileLogShadowChickenPrologue()
11610 {
11611 LValue packet = ensureShadowChickenPacket();
11612 LValue scope = lowCell(m_node->child1());
11613
11614 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
11615 m_out.storePtr(m_out.loadPtr(addressFor(0)), packet, m_heaps.ShadowChicken_Packet_callerFrame);
11616 m_out.storePtr(m_out.loadPtr(payloadFor(CallFrameSlot::callee)), packet, m_heaps.ShadowChicken_Packet_callee);
11617 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
11618 }
11619
11620 void compileLogShadowChickenTail()
11621 {
11622 LValue packet = ensureShadowChickenPacket();
11623 LValue thisValue = lowJSValue(m_node->child1());
11624 LValue scope = lowCell(m_node->child2());
11625 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(m_node->origin.semantic);
11626
11627 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
11628 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(ShadowChicken::Packet::tailMarker())), packet, m_heaps.ShadowChicken_Packet_callee);
11629 m_out.store64(thisValue, packet, m_heaps.ShadowChicken_Packet_thisValue);
11630 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
11631 // We don't want the CodeBlock to have a weak pointer to itself because
11632 // that would cause it to always get collected.
11633 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), packet, m_heaps.ShadowChicken_Packet_codeBlock);
11634 m_out.store32(m_out.constInt32(callSiteIndex.bits()), packet, m_heaps.ShadowChicken_Packet_callSiteIndex);
11635 }
11636
11637 void compileRecordRegExpCachedResult()
11638 {
11639 Edge globalObjectEdge = m_graph.varArgChild(m_node, 0);
11640 Edge regExpEdge = m_graph.varArgChild(m_node, 1);
11641 Edge stringEdge = m_graph.varArgChild(m_node, 2);
11642 Edge startEdge = m_graph.varArgChild(m_node, 3);
11643 Edge endEdge = m_graph.varArgChild(m_node, 4);
11644
11645 LValue globalObject = lowCell(globalObjectEdge);
11646 LValue regExp = lowCell(regExpEdge);
11647 LValue string = lowCell(stringEdge);
11648 LValue start = lowInt32(startEdge);
11649 LValue end = lowInt32(endEdge);
11650
11651 m_out.storePtr(regExp, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastRegExp);
11652 m_out.storePtr(string, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastInput);
11653 m_out.store32(start, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_start);
11654 m_out.store32(end, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_end);
11655 m_out.store32As8(
11656 m_out.constInt32(0),
11657 m_out.address(globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_reified));
11658 }
11659
11660 struct ArgumentsLength {
11661 ArgumentsLength()
11662 : isKnown(false)
11663 , known(UINT_MAX)
11664 , value(nullptr)
11665 {
11666 }
11667
11668 bool isKnown;
11669 unsigned known;
11670 LValue value;
11671 };
11672 ArgumentsLength getArgumentsLength(InlineCallFrame* inlineCallFrame)
11673 {
11674 ArgumentsLength length;
11675
11676 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
11677 length.known = inlineCallFrame->argumentCountIncludingThis - 1;
11678 length.isKnown = true;
11679 length.value = m_out.constInt32(length.known);
11680 } else {
11681 length.known = UINT_MAX;
11682 length.isKnown = false;
11683
11684 VirtualRegister argumentCountRegister;
11685 if (!inlineCallFrame)
11686 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
11687 else
11688 argumentCountRegister = inlineCallFrame->argumentCountRegister;
11689 length.value = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
11690 }
11691
11692 return length;
11693 }
11694
11695 ArgumentsLength getArgumentsLength()
11696 {
11697 return getArgumentsLength(m_node->origin.semantic.inlineCallFrame());
11698 }
11699
11700 LValue getCurrentCallee()
11701 {
11702 if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame()) {
11703 if (frame->isClosureCall)
11704 return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister()));
11705 return weakPointer(frame->calleeRecovery.constant().asCell());
11706 }
11707 return m_out.loadPtr(addressFor(CallFrameSlot::callee));
11708 }
11709
11710 LValue getArgumentsStart(InlineCallFrame* inlineCallFrame, unsigned offset = 0)
11711 {
11712 VirtualRegister start = AssemblyHelpers::argumentsStart(inlineCallFrame) + offset;
11713 return addressFor(start).value();
11714 }
11715
11716 LValue getArgumentsStart()
11717 {
11718 return getArgumentsStart(m_node->origin.semantic.inlineCallFrame());
11719 }
11720
11721 template<typename Functor>
11722 void checkStructure(
11723 LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind,
11724 const RegisteredStructureSet& set, const Functor& weakStructureDiscriminant)
11725 {
11726 if (set.isEmpty()) {
11727 terminate(exitKind);
11728 return;
11729 }
11730
11731 if (set.size() == 1) {
11732 speculate(
11733 exitKind, formattedValue, 0,
11734 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set[0])));
11735 return;
11736 }
11737
11738 LBasicBlock continuation = m_out.newBlock();
11739
11740 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
11741 for (unsigned i = 0; i < set.size() - 1; ++i) {
11742 LBasicBlock nextStructure = m_out.newBlock();
11743 m_out.branch(
11744 m_out.equal(structureDiscriminant, weakStructureDiscriminant(set[i])),
11745 unsure(continuation), unsure(nextStructure));
11746 m_out.appendTo(nextStructure);
11747 }
11748
11749 speculate(
11750 exitKind, formattedValue, 0,
11751 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set.last())));
11752
11753 m_out.jump(continuation);
11754 m_out.appendTo(continuation, lastNext);
11755 }
11756
11757 LValue numberOrNotCellToInt32(Edge edge, LValue value)
11758 {
11759 LBasicBlock intCase = m_out.newBlock();
11760 LBasicBlock notIntCase = m_out.newBlock();
11761 LBasicBlock doubleCase = 0;
11762 LBasicBlock notNumberCase = 0;
11763 if (edge.useKind() == NotCellUse) {
11764 doubleCase = m_out.newBlock();
11765 notNumberCase = m_out.newBlock();
11766 }
11767 LBasicBlock continuation = m_out.newBlock();
11768
11769 Vector<ValueFromBlock> results;
11770
11771 m_out.branch(isNotInt32(value), unsure(notIntCase), unsure(intCase));
11772
11773 LBasicBlock lastNext = m_out.appendTo(intCase, notIntCase);
11774 results.append(m_out.anchor(unboxInt32(value)));
11775 m_out.jump(continuation);
11776
11777 if (edge.useKind() == NumberUse) {
11778 m_out.appendTo(notIntCase, continuation);
11779 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isCellOrMisc(value));
11780 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
11781 m_out.jump(continuation);
11782 } else {
11783 m_out.appendTo(notIntCase, doubleCase);
11784 m_out.branch(
11785 isCellOrMisc(value, provenType(edge)), unsure(notNumberCase), unsure(doubleCase));
11786
11787 m_out.appendTo(doubleCase, notNumberCase);
11788 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
11789 m_out.jump(continuation);
11790
11791 m_out.appendTo(notNumberCase, continuation);
11792
11793 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecCellCheck, isCell(value));
11794
11795 LValue specialResult = m_out.select(
11796 m_out.equal(value, m_out.constInt64(JSValue::encode(jsBoolean(true)))),
11797 m_out.int32One, m_out.int32Zero);
11798 results.append(m_out.anchor(specialResult));
11799 m_out.jump(continuation);
11800 }
11801
11802 m_out.appendTo(continuation, lastNext);
11803 return m_out.phi(Int32, results);
11804 }
11805
11806 LValue loadProperty(LValue storage, unsigned identifierNumber, PropertyOffset offset)
11807 {
11808 return m_out.load64(addressOfProperty(storage, identifierNumber, offset));
11809 }
11810
11811 void storeProperty(
11812 LValue value, LValue storage, unsigned identifierNumber, PropertyOffset offset)
11813 {
11814 m_out.store64(value, addressOfProperty(storage, identifierNumber, offset));
11815 }
11816
11817 TypedPointer addressOfProperty(
11818 LValue storage, unsigned identifierNumber, PropertyOffset offset)
11819 {
11820 return m_out.address(
11821 m_heaps.properties[identifierNumber], storage, offsetRelativeToBase(offset));
11822 }
11823
11824 LValue storageForTransition(
11825 LValue object, PropertyOffset offset,
11826 Structure* previousStructure, Structure* nextStructure)
11827 {
11828 if (isInlineOffset(offset))
11829 return object;
11830
11831 if (previousStructure->outOfLineCapacity() == nextStructure->outOfLineCapacity())
11832 return m_out.loadPtr(object, m_heaps.JSObject_butterfly);
11833
11834 LValue result;
11835 if (!previousStructure->outOfLineCapacity())
11836 result = allocatePropertyStorage(object, previousStructure);
11837 else {
11838 result = reallocatePropertyStorage(
11839 object, m_out.loadPtr(object, m_heaps.JSObject_butterfly),
11840 previousStructure, nextStructure);
11841 }
11842
11843 nukeStructureAndSetButterfly(result, object);
11844 return result;
11845 }
11846
11847 void initializeArrayElements(LValue indexingType, LValue begin, LValue end, LValue butterfly)
11848 {
11849
11850 if (begin == end)
11851 return;
11852
11853 if (indexingType->hasInt32()) {
11854 IndexingType rawIndexingType = static_cast<IndexingType>(indexingType->asInt32());
11855 if (hasUndecided(rawIndexingType))
11856 return;
11857 IndexedAbstractHeap* heap = m_heaps.forIndexingType(rawIndexingType);
11858 DFG_ASSERT(m_graph, m_node, heap);
11859
11860 LValue hole;
11861 if (hasDouble(rawIndexingType))
11862 hole = m_out.constInt64(bitwise_cast<int64_t>(PNaN));
11863 else
11864 hole = m_out.constInt64(JSValue::encode(JSValue()));
11865
11866 splatWords(butterfly, begin, end, hole, heap->atAnyIndex());
11867 } else {
11868 LValue hole = m_out.select(
11869 m_out.equal(m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)), m_out.constInt32(DoubleShape)),
11870 m_out.constInt64(bitwise_cast<int64_t>(PNaN)),
11871 m_out.constInt64(JSValue::encode(JSValue())));
11872 splatWords(butterfly, begin, end, hole, m_heaps.root);
11873 }
11874 }
11875
11876 void splatWords(LValue base, LValue begin, LValue end, LValue value, const AbstractHeap& heap)
11877 {
11878 const uint64_t unrollingLimit = 10;
11879 if (begin->hasInt() && end->hasInt()) {
11880 uint64_t beginConst = static_cast<uint64_t>(begin->asInt());
11881 uint64_t endConst = static_cast<uint64_t>(end->asInt());
11882
11883 if (endConst - beginConst <= unrollingLimit) {
11884 for (uint64_t i = beginConst; i < endConst; ++i) {
11885 LValue pointer = m_out.add(base, m_out.constIntPtr(i * sizeof(uint64_t)));
11886 m_out.store64(value, TypedPointer(heap, pointer));
11887 }
11888 return;
11889 }
11890 }
11891
11892 LBasicBlock initLoop = m_out.newBlock();
11893 LBasicBlock initDone = m_out.newBlock();
11894
11895 LBasicBlock lastNext = m_out.insertNewBlocksBefore(initLoop);
11896
11897 ValueFromBlock originalIndex = m_out.anchor(end);
11898 ValueFromBlock originalPointer = m_out.anchor(
11899 m_out.add(base, m_out.shl(m_out.signExt32ToPtr(begin), m_out.constInt32(3))));
11900 m_out.branch(m_out.notEqual(end, begin), unsure(initLoop), unsure(initDone));
11901
11902 m_out.appendTo(initLoop, initDone);
11903 LValue index = m_out.phi(Int32, originalIndex);
11904 LValue pointer = m_out.phi(pointerType(), originalPointer);
11905
11906 m_out.store64(value, TypedPointer(heap, pointer));
11907
11908 LValue nextIndex = m_out.sub(index, m_out.int32One);
11909 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
11910 m_out.addIncomingToPhi(pointer, m_out.anchor(m_out.add(pointer, m_out.intPtrEight)));
11911 m_out.branch(
11912 m_out.notEqual(nextIndex, begin), unsure(initLoop), unsure(initDone));
11913
11914 m_out.appendTo(initDone, lastNext);
11915 }
11916
11917 LValue allocatePropertyStorage(LValue object, Structure* previousStructure)
11918 {
11919 if (previousStructure->couldHaveIndexingHeader()) {
11920 return vmCall(
11921 pointerType(),
11922 m_out.operation(operationAllocateComplexPropertyStorageWithInitialCapacity),
11923 m_callFrame, object);
11924 }
11925
11926 LValue result = allocatePropertyStorageWithSizeImpl(initialOutOfLineCapacity);
11927
11928 splatWords(
11929 result,
11930 m_out.constInt32(-initialOutOfLineCapacity - 1), m_out.constInt32(-1),
11931 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11932
11933 return result;
11934 }
11935
11936 LValue reallocatePropertyStorage(
11937 LValue object, LValue oldStorage, Structure* previous, Structure* next)
11938 {
11939 size_t oldSize = previous->outOfLineCapacity();
11940 size_t newSize = oldSize * outOfLineGrowthFactor;
11941
11942 ASSERT_UNUSED(next, newSize == next->outOfLineCapacity());
11943
11944 if (previous->couldHaveIndexingHeader()) {
11945 LValue newAllocSize = m_out.constIntPtr(newSize);
11946 return vmCall(pointerType(), m_out.operation(operationAllocateComplexPropertyStorage), m_callFrame, object, newAllocSize);
11947 }
11948
11949 LValue result = allocatePropertyStorageWithSizeImpl(newSize);
11950
11951 ptrdiff_t headerSize = -sizeof(IndexingHeader) - sizeof(void*);
11952 ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize * sizeof(JSValue));
11953
11954 for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) {
11955 LValue loaded =
11956 m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset));
11957 m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset));
11958 }
11959
11960 splatWords(
11961 result,
11962 m_out.constInt32(-newSize - 1), m_out.constInt32(-oldSize - 1),
11963 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11964
11965 return result;
11966 }
11967
11968 LValue allocatePropertyStorageWithSizeImpl(size_t sizeInValues)
11969 {
11970 LBasicBlock slowPath = m_out.newBlock();
11971 LBasicBlock continuation = m_out.newBlock();
11972
11973 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11974
11975 size_t sizeInBytes = sizeInValues * sizeof(JSValue);
11976 Allocator allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(sizeInBytes, AllocatorForMode::AllocatorIfExists);
11977 LValue startOfStorage = allocateHeapCell(
11978 m_out.constIntPtr(allocator.localAllocator()), slowPath);
11979 ValueFromBlock fastButterfly = m_out.anchor(
11980 m_out.add(m_out.constIntPtr(sizeInBytes + sizeof(IndexingHeader)), startOfStorage));
11981 m_out.jump(continuation);
11982
11983 m_out.appendTo(slowPath, continuation);
11984
11985 LValue slowButterflyValue;
11986 VM& vm = this->vm();
11987 if (sizeInValues == initialOutOfLineCapacity) {
11988 slowButterflyValue = lazySlowPath(
11989 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11990 return createLazyCallGenerator(vm,
11991 operationAllocateSimplePropertyStorageWithInitialCapacity,
11992 locations[0].directGPR());
11993 });
11994 } else {
11995 slowButterflyValue = lazySlowPath(
11996 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11997 return createLazyCallGenerator(vm,
11998 operationAllocateSimplePropertyStorage, locations[0].directGPR(),
11999 CCallHelpers::TrustedImmPtr(sizeInValues));
12000 });
12001 }
12002 ValueFromBlock slowButterfly = m_out.anchor(slowButterflyValue);
12003
12004 m_out.jump(continuation);
12005
12006 m_out.appendTo(continuation, lastNext);
12007
12008 return m_out.phi(pointerType(), fastButterfly, slowButterfly);
12009 }
12010
12011 LValue getById(LValue base, AccessType type)
12012 {
12013 Node* node = m_node;
12014 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
12015
12016 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12017 patchpoint->appendSomeRegister(base);
12018 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
12019 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
12020
12021 // FIXME: If this is a GetByIdFlush/GetByIdDirectFlush, we might get some performance boost if we claim that it
12022 // clobbers volatile registers late. It's not necessary for correctness, though, since the
12023 // IC code is super smart about saving registers.
12024 // https://bugs.webkit.org/show_bug.cgi?id=152848
12025
12026 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12027
12028 RefPtr<PatchpointExceptionHandle> exceptionHandle =
12029 preparePatchpointForExceptions(patchpoint);
12030
12031 State* state = &m_ftlState;
12032 patchpoint->setGenerator(
12033 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12034 AllowMacroScratchRegisterUsage allowScratch(jit);
12035
12036 CallSiteIndex callSiteIndex =
12037 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
12038
12039 // This is the direct exit target for operation calls.
12040 Box<CCallHelpers::JumpList> exceptions =
12041 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12042
12043 // This is the exit for call IC's created by the getById for getters. We don't have
12044 // to do anything weird other than call this, since it will associate the exit with
12045 // the callsite index.
12046 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
12047
12048 auto generator = Box<JITGetByIdGenerator>::create(
12049 jit.codeBlock(), node->origin.semantic, callSiteIndex,
12050 params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
12051 JSValueRegs(params[0].gpr()), type);
12052
12053 generator->generateFastPath(jit);
12054 CCallHelpers::Label done = jit.label();
12055
12056 params.addLatePath(
12057 [=] (CCallHelpers& jit) {
12058 AllowMacroScratchRegisterUsage allowScratch(jit);
12059
12060 J_JITOperation_ESsiJI optimizationFunction = appropriateOptimizingGetByIdFunction(type);
12061
12062 generator->slowPathJump().link(&jit);
12063 CCallHelpers::Label slowPathBegin = jit.label();
12064 CCallHelpers::Call slowPathCall = callOperation(
12065 *state, params.unavailableRegisters(), jit, node->origin.semantic,
12066 exceptions.get(), optimizationFunction, params[0].gpr(),
12067 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
12068 CCallHelpers::TrustedImmPtr(uid)).call();
12069 jit.jump().linkTo(done, &jit);
12070
12071 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12072
12073 jit.addLinkTask(
12074 [=] (LinkBuffer& linkBuffer) {
12075 generator->finalize(linkBuffer, linkBuffer);
12076 });
12077 });
12078 });
12079
12080 return patchpoint;
12081 }
12082
12083 LValue getByIdWithThis(LValue base, LValue thisValue)
12084 {
12085 Node* node = m_node;
12086 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
12087
12088 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12089 patchpoint->appendSomeRegister(base);
12090 patchpoint->appendSomeRegister(thisValue);
12091 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
12092 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
12093
12094 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12095
12096 RefPtr<PatchpointExceptionHandle> exceptionHandle =
12097 preparePatchpointForExceptions(patchpoint);
12098
12099 State* state = &m_ftlState;
12100 patchpoint->setGenerator(
12101 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12102 AllowMacroScratchRegisterUsage allowScratch(jit);
12103
12104 CallSiteIndex callSiteIndex =
12105 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
12106
12107 // This is the direct exit target for operation calls.
12108 Box<CCallHelpers::JumpList> exceptions =
12109 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12110
12111 // This is the exit for call IC's created by the getById for getters. We don't have
12112 // to do anything weird other than call this, since it will associate the exit with
12113 // the callsite index.
12114 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
12115
12116 auto generator = Box<JITGetByIdWithThisGenerator>::create(
12117 jit.codeBlock(), node->origin.semantic, callSiteIndex,
12118 params.unavailableRegisters(), uid, JSValueRegs(params[0].gpr()),
12119 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), AccessType::GetWithThis);
12120
12121 generator->generateFastPath(jit);
12122 CCallHelpers::Label done = jit.label();
12123
12124 params.addLatePath(
12125 [=] (CCallHelpers& jit) {
12126 AllowMacroScratchRegisterUsage allowScratch(jit);
12127
12128 J_JITOperation_ESsiJJI optimizationFunction = operationGetByIdWithThisOptimize;
12129
12130 generator->slowPathJump().link(&jit);
12131 CCallHelpers::Label slowPathBegin = jit.label();
12132 CCallHelpers::Call slowPathCall = callOperation(
12133 *state, params.unavailableRegisters(), jit, node->origin.semantic,
12134 exceptions.get(), optimizationFunction, params[0].gpr(),
12135 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
12136 params[2].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
12137 jit.jump().linkTo(done, &jit);
12138
12139 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12140
12141 jit.addLinkTask(
12142 [=] (LinkBuffer& linkBuffer) {
12143 generator->finalize(linkBuffer, linkBuffer);
12144 });
12145 });
12146 });
12147
12148 return patchpoint;
12149 }
12150
12151 LValue isFastTypedArray(LValue object)
12152 {
12153 return m_out.equal(
12154 m_out.load32(object, m_heaps.JSArrayBufferView_mode),
12155 m_out.constInt32(FastTypedArray));
12156 }
12157
12158 TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0)
12159 {
12160 return m_out.baseIndex(
12161 heap, storage, m_out.zeroExtPtr(index), provenValue(edge), offset);
12162 }
12163
12164 template<typename IntFunctor, typename DoubleFunctor>
12165 void compare(
12166 const IntFunctor& intFunctor, const DoubleFunctor& doubleFunctor,
12167 C_JITOperation_TT stringIdentFunction,
12168 C_JITOperation_B_EJssJss stringFunction,
12169 S_JITOperation_EJJ fallbackFunction)
12170 {
12171 if (m_node->isBinaryUseKind(Int32Use)) {
12172 LValue left = lowInt32(m_node->child1());
12173 LValue right = lowInt32(m_node->child2());
12174 setBoolean(intFunctor(left, right));
12175 return;
12176 }
12177
12178 if (m_node->isBinaryUseKind(Int52RepUse)) {
12179 Int52Kind kind;
12180 LValue left = lowWhicheverInt52(m_node->child1(), kind);
12181 LValue right = lowInt52(m_node->child2(), kind);
12182 setBoolean(intFunctor(left, right));
12183 return;
12184 }
12185
12186 if (m_node->isBinaryUseKind(DoubleRepUse)) {
12187 LValue left = lowDouble(m_node->child1());
12188 LValue right = lowDouble(m_node->child2());
12189 setBoolean(doubleFunctor(left, right));
12190 return;
12191 }
12192
12193 if (m_node->isBinaryUseKind(StringIdentUse)) {
12194 LValue left = lowStringIdent(m_node->child1());
12195 LValue right = lowStringIdent(m_node->child2());
12196 setBoolean(m_out.callWithoutSideEffects(Int32, stringIdentFunction, left, right));
12197 return;
12198 }
12199
12200 if (m_node->isBinaryUseKind(StringUse)) {
12201 LValue left = lowCell(m_node->child1());
12202 LValue right = lowCell(m_node->child2());
12203 speculateString(m_node->child1(), left);
12204 speculateString(m_node->child2(), right);
12205
12206 LValue result = vmCall(
12207 Int32, m_out.operation(stringFunction),
12208 m_callFrame, left, right);
12209 setBoolean(result);
12210 return;
12211 }
12212
12213 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
12214 nonSpeculativeCompare(intFunctor, fallbackFunction);
12215 }
12216
12217 void compileStringSlice()
12218 {
12219 LBasicBlock lengthCheckCase = m_out.newBlock();
12220 LBasicBlock emptyCase = m_out.newBlock();
12221 LBasicBlock notEmptyCase = m_out.newBlock();
12222 LBasicBlock oneCharCase = m_out.newBlock();
12223 LBasicBlock is8Bit = m_out.newBlock();
12224 LBasicBlock is16Bit = m_out.newBlock();
12225 LBasicBlock bitsContinuation = m_out.newBlock();
12226 LBasicBlock bigCharacter = m_out.newBlock();
12227 LBasicBlock slowCase = m_out.newBlock();
12228 LBasicBlock ropeSlowCase = m_out.newBlock();
12229 LBasicBlock continuation = m_out.newBlock();
12230
12231 LValue string = lowString(m_node->child1());
12232 LValue start = lowInt32(m_node->child2());
12233 LValue end = nullptr;
12234 if (m_node->child3())
12235 end = lowInt32(m_node->child3());
12236 else
12237 end = m_out.constInt32(std::numeric_limits<int32_t>::max());
12238 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropeSlowCase), usually(lengthCheckCase));
12239
12240 LBasicBlock lastNext = m_out.appendTo(lengthCheckCase, emptyCase);
12241 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
12242 LValue length = m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length);
12243 auto range = populateSliceRange(start, end, length);
12244 LValue from = range.first;
12245 LValue to = range.second;
12246 LValue span = m_out.sub(to, from);
12247 m_out.branch(m_out.lessThanOrEqual(span, m_out.int32Zero), unsure(emptyCase), unsure(notEmptyCase));
12248
12249 Vector<ValueFromBlock, 5> results;
12250
12251 m_out.appendTo(emptyCase, notEmptyCase);
12252 results.append(m_out.anchor(weakPointer(jsEmptyString(&vm()))));
12253 m_out.jump(continuation);
12254
12255 m_out.appendTo(notEmptyCase, oneCharCase);
12256 m_out.branch(m_out.equal(span, m_out.int32One), unsure(oneCharCase), unsure(slowCase));
12257
12258 m_out.appendTo(oneCharCase, is8Bit);
12259 LValue storage = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
12260 m_out.branch(
12261 m_out.testIsZero32(
12262 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
12263 m_out.constInt32(StringImpl::flagIs8Bit())),
12264 unsure(is16Bit), unsure(is8Bit));
12265
12266 m_out.appendTo(is8Bit, is16Bit);
12267 ValueFromBlock char8Bit = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, storage, m_out.zeroExtPtr(from))));
12268 m_out.jump(bitsContinuation);
12269
12270 m_out.appendTo(is16Bit, bigCharacter);
12271 LValue char16BitValue = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(from)));
12272 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
12273 m_out.branch(
12274 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
12275 rarely(bigCharacter), usually(bitsContinuation));
12276
12277 m_out.appendTo(bigCharacter, bitsContinuation);
12278 results.append(m_out.anchor(vmCall(
12279 Int64, m_out.operation(operationSingleCharacterString),
12280 m_callFrame, char16BitValue)));
12281 m_out.jump(continuation);
12282
12283 m_out.appendTo(bitsContinuation, slowCase);
12284 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
12285 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
12286 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
12287 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
12288 m_out.jump(continuation);
12289
12290 m_out.appendTo(slowCase, ropeSlowCase);
12291 results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSubstr), m_callFrame, string, from, span)));
12292 m_out.jump(continuation);
12293
12294 m_out.appendTo(ropeSlowCase, continuation);
12295 results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSlice), m_callFrame, string, start, end)));
12296 m_out.jump(continuation);
12297
12298 m_out.appendTo(continuation, lastNext);
12299 setJSValue(m_out.phi(pointerType(), results));
12300 }
12301
12302 void compileToLowerCase()
12303 {
12304 LBasicBlock notRope = m_out.newBlock();
12305 LBasicBlock is8Bit = m_out.newBlock();
12306 LBasicBlock loopTop = m_out.newBlock();
12307 LBasicBlock loopBody = m_out.newBlock();
12308 LBasicBlock slowPath = m_out.newBlock();
12309 LBasicBlock continuation = m_out.newBlock();
12310
12311 LValue string = lowString(m_node->child1());
12312 ValueFromBlock startIndex = m_out.anchor(m_out.constInt32(0));
12313 ValueFromBlock startIndexForCall = m_out.anchor(m_out.constInt32(0));
12314 m_out.branch(isRopeString(string, m_node->child1()),
12315 unsure(slowPath), unsure(notRope));
12316
12317 LBasicBlock lastNext = m_out.appendTo(notRope, is8Bit);
12318 LValue impl = m_out.loadPtr(string, m_heaps.JSString_value);
12319 m_out.branch(
12320 m_out.testIsZero32(
12321 m_out.load32(impl, m_heaps.StringImpl_hashAndFlags),
12322 m_out.constInt32(StringImpl::flagIs8Bit())),
12323 unsure(slowPath), unsure(is8Bit));
12324
12325 m_out.appendTo(is8Bit, loopTop);
12326 LValue length = m_out.load32(impl, m_heaps.StringImpl_length);
12327 LValue buffer = m_out.loadPtr(impl, m_heaps.StringImpl_data);
12328 ValueFromBlock fastResult = m_out.anchor(string);
12329 m_out.jump(loopTop);
12330
12331 m_out.appendTo(loopTop, loopBody);
12332 LValue index = m_out.phi(Int32, startIndex);
12333 ValueFromBlock indexFromBlock = m_out.anchor(index);
12334 m_out.branch(m_out.below(index, length),
12335 unsure(loopBody), unsure(continuation));
12336
12337 m_out.appendTo(loopBody, slowPath);
12338
12339 // FIXME: Strings needs to be caged.
12340 // https://bugs.webkit.org/show_bug.cgi?id=174924
12341 LValue byte = m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, buffer, m_out.zeroExtPtr(index)));
12342 LValue isInvalidAsciiRange = m_out.bitAnd(byte, m_out.constInt32(~0x7F));
12343 LValue isUpperCase = m_out.belowOrEqual(m_out.sub(byte, m_out.constInt32('A')), m_out.constInt32('Z' - 'A'));
12344 LValue isBadCharacter = m_out.bitOr(isInvalidAsciiRange, isUpperCase);
12345 m_out.addIncomingToPhi(index, m_out.anchor(m_out.add(index, m_out.int32One)));
12346 m_out.branch(isBadCharacter, unsure(slowPath), unsure(loopTop));
12347
12348 m_out.appendTo(slowPath, continuation);
12349 LValue slowPathIndex = m_out.phi(Int32, startIndexForCall, indexFromBlock);
12350 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationToLowerCase), m_callFrame, string, slowPathIndex));
12351 m_out.jump(continuation);
12352
12353 m_out.appendTo(continuation, lastNext);
12354 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
12355 }
12356
12357 void compileNumberToStringWithRadix()
12358 {
12359 bool validRadixIsGuaranteed = false;
12360 if (m_node->child2()->isInt32Constant()) {
12361 int32_t radix = m_node->child2()->asInt32();
12362 if (radix >= 2 && radix <= 36)
12363 validRadixIsGuaranteed = true;
12364 }
12365
12366 switch (m_node->child1().useKind()) {
12367 case Int32Use:
12368 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt32ToStringWithValidRadix : operationInt32ToString), m_callFrame, lowInt32(m_node->child1()), lowInt32(m_node->child2())));
12369 break;
12370 case Int52RepUse:
12371 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt52ToStringWithValidRadix : operationInt52ToString), m_callFrame, lowStrictInt52(m_node->child1()), lowInt32(m_node->child2())));
12372 break;
12373 case DoubleRepUse:
12374 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationDoubleToStringWithValidRadix : operationDoubleToString), m_callFrame, lowDouble(m_node->child1()), lowInt32(m_node->child2())));
12375 break;
12376 default:
12377 RELEASE_ASSERT_NOT_REACHED();
12378 }
12379 }
12380
12381 void compileNumberToStringWithValidRadixConstant()
12382 {
12383 switch (m_node->child1().useKind()) {
12384 case Int32Use:
12385 setJSValue(vmCall(pointerType(), m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12386 break;
12387 case Int52RepUse:
12388 setJSValue(vmCall(pointerType(), m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12389 break;
12390 case DoubleRepUse:
12391 setJSValue(vmCall(pointerType(), m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12392 break;
12393 default:
12394 RELEASE_ASSERT_NOT_REACHED();
12395 }
12396 }
12397
12398 void compileResolveScopeForHoistingFuncDeclInEval()
12399 {
12400 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12401 setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScopeForHoistingFuncDeclInEval), m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
12402 }
12403
12404 void compileResolveScope()
12405 {
12406 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12407 setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScope),
12408 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
12409 }
12410
12411 void compileGetDynamicVar()
12412 {
12413 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12414 setJSValue(vmCall(Int64, m_out.operation(operationGetDynamicVar),
12415 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
12416 }
12417
12418 void compilePutDynamicVar()
12419 {
12420 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12421 setJSValue(vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutDynamicVarStrict : operationPutDynamicVarNonStrict),
12422 m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
12423 }
12424
12425 void compileUnreachable()
12426 {
12427 // It's so tempting to assert that AI has proved that this is unreachable. But that's
12428 // simply not a requirement of the Unreachable opcode at all. If you emit an opcode that
12429 // *you* know will not return, then it's fine to end the basic block with Unreachable
12430 // after that opcode. You don't have to also prove to AI that your opcode does not return.
12431 // Hence, there is nothing to do here but emit code that will crash, so that we catch
12432 // cases where you said Unreachable but you lied.
12433 //
12434 // It's also also worth noting that some clients emit this opcode because they're not 100% sure
12435 // if the code is unreachable, but they would really prefer if we crashed rather than kept going
12436 // if it did turn out to be reachable. Hence, this needs to deterministically crash.
12437
12438 crash();
12439 }
12440
12441 void compileCheckSubClass()
12442 {
12443 LValue cell = lowCell(m_node->child1());
12444
12445 const ClassInfo* classInfo = m_node->classInfo();
12446 if (!classInfo->checkSubClassSnippet) {
12447 LBasicBlock loop = m_out.newBlock();
12448 LBasicBlock parentClass = m_out.newBlock();
12449 LBasicBlock continuation = m_out.newBlock();
12450
12451 LValue structure = loadStructure(cell);
12452 LValue classInfo = m_out.loadPtr(structure, m_heaps.Structure_classInfo);
12453 ValueFromBlock otherAtStart = m_out.anchor(classInfo);
12454 m_out.jump(loop);
12455
12456 LBasicBlock lastNext = m_out.appendTo(loop, parentClass);
12457 LValue other = m_out.phi(pointerType(), otherAtStart);
12458 m_out.branch(m_out.equal(other, m_out.constIntPtr(classInfo)), unsure(continuation), unsure(parentClass));
12459
12460 m_out.appendTo(parentClass, continuation);
12461 LValue parent = m_out.loadPtr(other, m_heaps.ClassInfo_parentClass);
12462 speculate(BadType, jsValueValue(cell), m_node->child1().node(), m_out.isNull(parent));
12463 m_out.addIncomingToPhi(other, m_out.anchor(parent));
12464 m_out.jump(loop);
12465
12466 m_out.appendTo(continuation, lastNext);
12467 return;
12468 }
12469
12470 RefPtr<Snippet> domJIT = classInfo->checkSubClassSnippet();
12471 PatchpointValue* patchpoint = m_out.patchpoint(Void);
12472 patchpoint->appendSomeRegister(cell);
12473 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
12474 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
12475
12476 NodeOrigin origin = m_origin;
12477 unsigned osrExitArgumentOffset = patchpoint->numChildren();
12478 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(jsValueValue(cell), m_node->child1().node());
12479 patchpoint->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, jsValueValue(cell)));
12480
12481 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
12482 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
12483 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12484
12485 State* state = &m_ftlState;
12486 Node* node = m_node;
12487 JSValue child1Constant = m_state.forNode(m_node->child1()).value();
12488
12489 patchpoint->setGenerator(
12490 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12491 AllowMacroScratchRegisterUsage allowScratch(jit);
12492
12493 Vector<GPRReg> gpScratch;
12494 Vector<FPRReg> fpScratch;
12495 Vector<SnippetParams::Value> regs;
12496
12497 regs.append(SnippetParams::Value(params[0].gpr(), child1Constant));
12498
12499 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
12500 gpScratch.append(params.gpScratch(i));
12501
12502 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
12503 fpScratch.append(params.fpScratch(i));
12504
12505 RefPtr<OSRExitHandle> handle = exitDescriptor->emitOSRExitLater(*state, BadType, origin, params, osrExitArgumentOffset);
12506
12507 SnippetParams domJITParams(*state, params, node, nullptr, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
12508 CCallHelpers::JumpList failureCases = domJIT->generator()->run(jit, domJITParams);
12509
12510 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
12511 linkBuffer.link(failureCases, linkBuffer.locationOf<NoPtrTag>(handle->label));
12512 });
12513 });
12514 patchpoint->effects = Effects::forCheck();
12515 }
12516
12517 void compileCallDOM()
12518 {
12519 const DOMJIT::Signature* signature = m_node->signature();
12520
12521 // FIXME: We should have a way to call functions with the vector of registers.
12522 // https://bugs.webkit.org/show_bug.cgi?id=163099
12523 Vector<LValue, JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS> operands;
12524
12525 unsigned index = 0;
12526 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, [&](Node*, Edge edge) {
12527 if (!index)
12528 operands.append(lowCell(edge));
12529 else {
12530 switch (signature->arguments[index - 1]) {
12531 case SpecString:
12532 operands.append(lowString(edge));
12533 break;
12534 case SpecInt32Only:
12535 operands.append(lowInt32(edge));
12536 break;
12537 case SpecBoolean:
12538 operands.append(lowBoolean(edge));
12539 break;
12540 default:
12541 RELEASE_ASSERT_NOT_REACHED();
12542 break;
12543 }
12544 }
12545 ++index;
12546 });
12547
12548 unsigned argumentCountIncludingThis = signature->argumentCount + 1;
12549 LValue result;
12550 assertIsTaggedWith(reinterpret_cast<void*>(signature->unsafeFunction), CFunctionPtrTag);
12551 switch (argumentCountIncludingThis) {
12552 case 1:
12553 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EP>(signature->unsafeFunction)), m_callFrame, operands[0]);
12554 break;
12555 case 2:
12556 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1]);
12557 break;
12558 case 3:
12559 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1], operands[2]);
12560 break;
12561 default:
12562 RELEASE_ASSERT_NOT_REACHED();
12563 break;
12564 }
12565
12566 setJSValue(result);
12567 }
12568
12569 void compileCallDOMGetter()
12570 {
12571 DOMJIT::CallDOMGetterSnippet* domJIT = m_node->callDOMGetterData()->snippet;
12572 if (!domJIT) {
12573 // The following function is not an operation: we directly call a custom accessor getter.
12574 // Since the getter does not have code setting topCallFrame, As is the same to IC, we should set topCallFrame in caller side.
12575 m_out.storePtr(m_callFrame, m_out.absolute(&vm().topCallFrame));
12576 setJSValue(
12577 vmCall(Int64, m_out.operation(m_node->callDOMGetterData()->customAccessorGetter.retaggedExecutableAddress<CFunctionPtrTag>()),
12578 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(m_graph.identifiers()[m_node->callDOMGetterData()->identifierNumber])));
12579 return;
12580 }
12581
12582 Edge& baseEdge = m_node->child1();
12583 LValue base = lowCell(baseEdge);
12584 JSValue baseConstant = m_state.forNode(baseEdge).value();
12585
12586 LValue globalObject;
12587 JSValue globalObjectConstant;
12588 if (domJIT->requireGlobalObject) {
12589 Edge& globalObjectEdge = m_node->child2();
12590 globalObject = lowCell(globalObjectEdge);
12591 globalObjectConstant = m_state.forNode(globalObjectEdge).value();
12592 }
12593
12594 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12595 patchpoint->appendSomeRegister(base);
12596 if (domJIT->requireGlobalObject)
12597 patchpoint->appendSomeRegister(globalObject);
12598 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
12599 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
12600 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
12601 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12602 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
12603 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
12604 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
12605
12606 State* state = &m_ftlState;
12607 Node* node = m_node;
12608 patchpoint->setGenerator(
12609 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12610 AllowMacroScratchRegisterUsage allowScratch(jit);
12611
12612 Vector<GPRReg> gpScratch;
12613 Vector<FPRReg> fpScratch;
12614 Vector<SnippetParams::Value> regs;
12615
12616 regs.append(JSValueRegs(params[0].gpr()));
12617 regs.append(SnippetParams::Value(params[1].gpr(), baseConstant));
12618 if (domJIT->requireGlobalObject)
12619 regs.append(SnippetParams::Value(params[2].gpr(), globalObjectConstant));
12620
12621 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
12622 gpScratch.append(params.gpScratch(i));
12623
12624 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
12625 fpScratch.append(params.fpScratch(i));
12626
12627 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12628
12629 SnippetParams domJITParams(*state, params, node, exceptions, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
12630 domJIT->generator()->run(jit, domJITParams);
12631 });
12632 patchpoint->effects = Effects::forCall();
12633 setJSValue(patchpoint);
12634 }
12635
12636 void compileFilterICStatus()
12637 {
12638 m_interpreter.filterICStatus(m_node);
12639 }
12640
12641 LValue byteSwap32(LValue value)
12642 {
12643 // FIXME: teach B3 byteswap
12644 // https://bugs.webkit.org/show_bug.cgi?id=188759
12645
12646 RELEASE_ASSERT(value->type() == Int32);
12647 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12648 patchpoint->appendSomeRegister(value);
12649 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12650 jit.move(params[1].gpr(), params[0].gpr());
12651 jit.byteSwap32(params[0].gpr());
12652 });
12653 patchpoint->effects = Effects::none();
12654 return patchpoint;
12655 }
12656
12657 LValue byteSwap64(LValue value)
12658 {
12659 // FIXME: teach B3 byteswap
12660 // https://bugs.webkit.org/show_bug.cgi?id=188759
12661
12662 RELEASE_ASSERT(value->type() == Int64);
12663 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12664 patchpoint->appendSomeRegister(value);
12665 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12666 jit.move(params[1].gpr(), params[0].gpr());
12667 jit.byteSwap64(params[0].gpr());
12668 });
12669 patchpoint->effects = Effects::none();
12670 return patchpoint;
12671 }
12672
12673 template <typename F1, typename F2>
12674 LValue emitCodeBasedOnEndiannessBranch(LValue isLittleEndian, const F1& emitLittleEndianCode, const F2& emitBigEndianCode)
12675 {
12676 LType type;
12677
12678 LBasicBlock bigEndianCase = m_out.newBlock();
12679 LBasicBlock littleEndianCase = m_out.newBlock();
12680 LBasicBlock continuation = m_out.newBlock();
12681
12682 m_out.branch(m_out.testIsZero32(isLittleEndian, m_out.constInt32(1)),
12683 unsure(bigEndianCase), unsure(littleEndianCase));
12684
12685 LBasicBlock lastNext = m_out.appendTo(bigEndianCase, littleEndianCase);
12686 LValue bigEndianValue = emitBigEndianCode();
12687 type = bigEndianValue ? bigEndianValue->type() : Void;
12688 ValueFromBlock bigEndianResult = bigEndianValue ? m_out.anchor(bigEndianValue) : ValueFromBlock();
12689 m_out.jump(continuation);
12690
12691 m_out.appendTo(littleEndianCase, continuation);
12692 LValue littleEndianValue = emitLittleEndianCode();
12693 ValueFromBlock littleEndianResult = littleEndianValue ? m_out.anchor(littleEndianValue) : ValueFromBlock();
12694 RELEASE_ASSERT((!littleEndianValue && !bigEndianValue) || type == littleEndianValue->type());
12695 m_out.jump(continuation);
12696
12697 m_out.appendTo(continuation, lastNext);
12698 RELEASE_ASSERT(!!bigEndianResult == !!littleEndianResult);
12699 if (bigEndianResult)
12700 return m_out.phi(type, bigEndianResult, littleEndianResult);
12701 return nullptr;
12702 }
12703
12704 void compileDataViewGet()
12705 {
12706 LValue dataView = lowDataViewObject(m_node->child1());
12707 LValue index = lowInt32(m_node->child2());
12708 LValue isLittleEndian = nullptr;
12709 if (m_node->child3())
12710 isLittleEndian = lowBoolean(m_node->child3());
12711
12712 DataViewData data = m_node->dataViewData();
12713
12714 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
12715 LValue indexToCheck = m_out.zeroExtPtr(index);
12716 if (data.byteSize > 1)
12717 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
12718 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
12719
12720 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
12721
12722 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
12723
12724 if (m_node->op() == DataViewGetInt) {
12725 switch (data.byteSize) {
12726 case 1:
12727 if (data.isSigned)
12728 setInt32(m_out.load8SignExt32(pointer));
12729 else
12730 setInt32(m_out.load8ZeroExt32(pointer));
12731 break;
12732 case 2: {
12733 auto emitLittleEndianLoad = [&] {
12734 if (data.isSigned)
12735 return m_out.load16SignExt32(pointer);
12736 return m_out.load16ZeroExt32(pointer);
12737 };
12738
12739 auto emitBigEndianLoad = [&] {
12740 LValue val = m_out.load16ZeroExt32(pointer);
12741
12742 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12743 patchpoint->appendSomeRegister(val);
12744 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12745 jit.move(params[1].gpr(), params[0].gpr());
12746 jit.byteSwap16(params[0].gpr());
12747 if (data.isSigned)
12748 jit.signExtend16To32(params[0].gpr(), params[0].gpr());
12749 });
12750 patchpoint->effects = Effects::none();
12751
12752 return patchpoint;
12753 };
12754
12755 if (data.isLittleEndian == FalseTriState)
12756 setInt32(emitBigEndianLoad());
12757 else if (data.isLittleEndian == TrueTriState)
12758 setInt32(emitLittleEndianLoad());
12759 else
12760 setInt32(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianLoad, emitBigEndianLoad));
12761
12762 break;
12763 }
12764 case 4: {
12765 LValue loadedValue = m_out.load32(pointer);
12766
12767 if (data.isLittleEndian == FalseTriState)
12768 loadedValue = byteSwap32(loadedValue);
12769 else if (data.isLittleEndian == MixedTriState) {
12770 auto emitLittleEndianCode = [&] {
12771 return loadedValue;
12772 };
12773 auto emitBigEndianCode = [&] {
12774 return byteSwap32(loadedValue);
12775 };
12776
12777 loadedValue = emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12778 }
12779
12780 if (data.isSigned)
12781 setInt32(loadedValue);
12782 else
12783 setStrictInt52(m_out.zeroExt(loadedValue, Int64));
12784
12785 break;
12786 }
12787 default:
12788 RELEASE_ASSERT_NOT_REACHED();
12789 }
12790 } else {
12791 switch (data.byteSize) {
12792 case 4: {
12793 auto emitLittleEndianCode = [&] {
12794 return m_out.floatToDouble(m_out.loadFloat(pointer));
12795 };
12796
12797 auto emitBigEndianCode = [&] {
12798 LValue loadedValue = m_out.load32(pointer);
12799 PatchpointValue* patchpoint = m_out.patchpoint(Double);
12800 patchpoint->appendSomeRegister(loadedValue);
12801 patchpoint->numGPScratchRegisters = 1;
12802 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12803 jit.move(params[1].gpr(), params.gpScratch(0));
12804 jit.byteSwap32(params.gpScratch(0));
12805 jit.move32ToFloat(params.gpScratch(0), params[0].fpr());
12806 jit.convertFloatToDouble(params[0].fpr(), params[0].fpr());
12807 });
12808 patchpoint->effects = Effects::none();
12809 return patchpoint;
12810 };
12811
12812 if (data.isLittleEndian == TrueTriState)
12813 setDouble(emitLittleEndianCode());
12814 else if (data.isLittleEndian == FalseTriState)
12815 setDouble(emitBigEndianCode());
12816 else
12817 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
12818
12819 break;
12820 }
12821 case 8: {
12822 auto emitLittleEndianCode = [&] {
12823 return m_out.loadDouble(pointer);
12824 };
12825
12826 auto emitBigEndianCode = [&] {
12827 LValue loadedValue = m_out.load64(pointer);
12828 loadedValue = byteSwap64(loadedValue);
12829 return m_out.bitCast(loadedValue, Double);
12830 };
12831
12832 if (data.isLittleEndian == TrueTriState)
12833 setDouble(emitLittleEndianCode());
12834 else if (data.isLittleEndian == FalseTriState)
12835 setDouble(emitBigEndianCode());
12836 else
12837 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
12838
12839 break;
12840 }
12841 default:
12842 RELEASE_ASSERT_NOT_REACHED();
12843 }
12844 }
12845 }
12846
12847 void compileDataViewSet()
12848 {
12849 LValue dataView = lowDataViewObject(m_graph.varArgChild(m_node, 0));
12850 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
12851 LValue isLittleEndian = nullptr;
12852 if (m_graph.varArgChild(m_node, 3))
12853 isLittleEndian = lowBoolean(m_graph.varArgChild(m_node, 3));
12854
12855 DataViewData data = m_node->dataViewData();
12856
12857 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
12858 LValue indexToCheck = m_out.zeroExtPtr(index);
12859 if (data.byteSize > 1)
12860 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
12861 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
12862
12863 Edge& valueEdge = m_graph.varArgChild(m_node, 2);
12864 LValue valueToStore;
12865 switch (valueEdge.useKind()) {
12866 case Int32Use:
12867 valueToStore = lowInt32(valueEdge);
12868 break;
12869 case DoubleRepUse:
12870 valueToStore = lowDouble(valueEdge);
12871 break;
12872 case Int52RepUse:
12873 valueToStore = lowStrictInt52(valueEdge);
12874 break;
12875 default:
12876 RELEASE_ASSERT_NOT_REACHED();
12877 }
12878
12879 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
12880 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
12881
12882 if (data.isFloatingPoint) {
12883 if (data.byteSize == 4) {
12884 valueToStore = m_out.doubleToFloat(valueToStore);
12885
12886 auto emitLittleEndianCode = [&] () -> LValue {
12887 m_out.storeFloat(valueToStore, pointer);
12888 return nullptr;
12889 };
12890
12891 auto emitBigEndianCode = [&] () -> LValue {
12892 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12893 patchpoint->appendSomeRegister(valueToStore);
12894 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12895 jit.moveFloatTo32(params[1].fpr(), params[0].gpr());
12896 jit.byteSwap32(params[0].gpr());
12897 });
12898 patchpoint->effects = Effects::none();
12899 m_out.store32(patchpoint, pointer);
12900 return nullptr;
12901 };
12902
12903 if (data.isLittleEndian == FalseTriState)
12904 emitBigEndianCode();
12905 else if (data.isLittleEndian == TrueTriState)
12906 emitLittleEndianCode();
12907 else
12908 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12909
12910 } else {
12911 RELEASE_ASSERT(data.byteSize == 8);
12912 auto emitLittleEndianCode = [&] () -> LValue {
12913 m_out.storeDouble(valueToStore, pointer);
12914 return nullptr;
12915 };
12916 auto emitBigEndianCode = [&] () -> LValue {
12917 m_out.store64(byteSwap64(m_out.bitCast(valueToStore, Int64)), pointer);
12918 return nullptr;
12919 };
12920
12921 if (data.isLittleEndian == FalseTriState)
12922 emitBigEndianCode();
12923 else if (data.isLittleEndian == TrueTriState)
12924 emitLittleEndianCode();
12925 else
12926 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12927 }
12928 } else {
12929 switch (data.byteSize) {
12930 case 1:
12931 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
12932 m_out.store32As8(valueToStore, pointer);
12933 break;
12934 case 2: {
12935 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
12936
12937 auto emitLittleEndianCode = [&] () -> LValue {
12938 m_out.store32As16(valueToStore, pointer);
12939 return nullptr;
12940 };
12941 auto emitBigEndianCode = [&] () -> LValue {
12942 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12943 patchpoint->appendSomeRegister(valueToStore);
12944 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12945 jit.move(params[1].gpr(), params[0].gpr());
12946 jit.byteSwap16(params[0].gpr());
12947 });
12948 patchpoint->effects = Effects::none();
12949
12950 m_out.store32As16(patchpoint, pointer);
12951 return nullptr;
12952 };
12953
12954 if (data.isLittleEndian == FalseTriState)
12955 emitBigEndianCode();
12956 else if (data.isLittleEndian == TrueTriState)
12957 emitLittleEndianCode();
12958 else
12959 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12960 break;
12961 }
12962 case 4: {
12963 RELEASE_ASSERT(valueEdge.useKind() == Int32Use || valueEdge.useKind() == Int52RepUse);
12964
12965 if (valueEdge.useKind() == Int52RepUse)
12966 valueToStore = m_out.castToInt32(valueToStore);
12967
12968 auto emitLittleEndianCode = [&] () -> LValue {
12969 m_out.store32(valueToStore, pointer);
12970 return nullptr;
12971 };
12972 auto emitBigEndianCode = [&] () -> LValue {
12973 m_out.store32(byteSwap32(valueToStore), pointer);
12974 return nullptr;
12975 };
12976
12977 if (data.isLittleEndian == FalseTriState)
12978 emitBigEndianCode();
12979 else if (data.isLittleEndian == TrueTriState)
12980 emitLittleEndianCode();
12981 else
12982 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12983
12984 break;
12985 }
12986 default:
12987 RELEASE_ASSERT_NOT_REACHED();
12988 }
12989 }
12990 }
12991
12992 void emitSwitchForMultiByOffset(LValue base, bool structuresChecked, Vector<SwitchCase, 2>& cases, LBasicBlock exit)
12993 {
12994 if (cases.isEmpty()) {
12995 m_out.jump(exit);
12996 return;
12997 }
12998
12999 if (structuresChecked) {
13000 std::sort(
13001 cases.begin(), cases.end(),
13002 [&] (const SwitchCase& a, const SwitchCase& b) -> bool {
13003 return a.value()->asInt() < b.value()->asInt();
13004 });
13005 SwitchCase last = cases.takeLast();
13006 m_out.switchInstruction(
13007 m_out.load32(base, m_heaps.JSCell_structureID), cases, last.target(), Weight(0));
13008 return;
13009 }
13010
13011 m_out.switchInstruction(
13012 m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0));
13013 }
13014
13015 void compareEqObjectOrOtherToObject(Edge leftChild, Edge rightChild)
13016 {
13017 LValue rightCell = lowCell(rightChild);
13018 LValue leftValue = lowJSValue(leftChild, ManualOperandSpeculation);
13019
13020 speculateTruthyObject(rightChild, rightCell, SpecObject);
13021
13022 LBasicBlock leftCellCase = m_out.newBlock();
13023 LBasicBlock leftNotCellCase = m_out.newBlock();
13024 LBasicBlock continuation = m_out.newBlock();
13025
13026 m_out.branch(
13027 isCell(leftValue, provenType(leftChild)),
13028 unsure(leftCellCase), unsure(leftNotCellCase));
13029
13030 LBasicBlock lastNext = m_out.appendTo(leftCellCase, leftNotCellCase);
13031 speculateTruthyObject(leftChild, leftValue, SpecObject | (~SpecCellCheck));
13032 ValueFromBlock cellResult = m_out.anchor(m_out.equal(rightCell, leftValue));
13033 m_out.jump(continuation);
13034
13035 m_out.appendTo(leftNotCellCase, continuation);
13036 FTL_TYPE_CHECK(
13037 jsValueValue(leftValue), leftChild, SpecOther | SpecCellCheck, isNotOther(leftValue));
13038 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
13039 m_out.jump(continuation);
13040
13041 m_out.appendTo(continuation, lastNext);
13042 setBoolean(m_out.phi(Int32, cellResult, notCellResult));
13043 }
13044
13045 void speculateTruthyObject(Edge edge, LValue cell, SpeculatedType filter)
13046 {
13047 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
13048 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
13049 return;
13050 }
13051
13052 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
13053 speculate(
13054 BadType, jsValueValue(cell), edge.node(),
13055 m_out.testNonZero32(
13056 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
13057 m_out.constInt32(MasqueradesAsUndefined)));
13058 }
13059
13060 template<typename IntFunctor>
13061 void nonSpeculativeCompare(const IntFunctor& intFunctor, S_JITOperation_EJJ helperFunction)
13062 {
13063 LValue left = lowJSValue(m_node->child1());
13064 LValue right = lowJSValue(m_node->child2());
13065
13066 LBasicBlock leftIsInt = m_out.newBlock();
13067 LBasicBlock fastPath = m_out.newBlock();
13068 LBasicBlock slowPath = m_out.newBlock();
13069 LBasicBlock continuation = m_out.newBlock();
13070
13071 m_out.branch(isNotInt32(left, provenType(m_node->child1())), rarely(slowPath), usually(leftIsInt));
13072
13073 LBasicBlock lastNext = m_out.appendTo(leftIsInt, fastPath);
13074 m_out.branch(isNotInt32(right, provenType(m_node->child2())), rarely(slowPath), usually(fastPath));
13075
13076 m_out.appendTo(fastPath, slowPath);
13077 ValueFromBlock fastResult = m_out.anchor(intFunctor(unboxInt32(left), unboxInt32(right)));
13078 m_out.jump(continuation);
13079
13080 m_out.appendTo(slowPath, continuation);
13081 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
13082 pointerType(), m_out.operation(helperFunction), m_callFrame, left, right)));
13083 m_out.jump(continuation);
13084
13085 m_out.appendTo(continuation, lastNext);
13086 setBoolean(m_out.phi(Int32, fastResult, slowResult));
13087 }
13088
13089 LValue stringsEqual(LValue leftJSString, LValue rightJSString, Edge leftJSStringEdge = Edge(), Edge rightJSStringEdge = Edge())
13090 {
13091 LBasicBlock notTriviallyUnequalCase = m_out.newBlock();
13092 LBasicBlock notEmptyCase = m_out.newBlock();
13093 LBasicBlock leftReadyCase = m_out.newBlock();
13094 LBasicBlock rightReadyCase = m_out.newBlock();
13095 LBasicBlock left8BitCase = m_out.newBlock();
13096 LBasicBlock right8BitCase = m_out.newBlock();
13097 LBasicBlock loop = m_out.newBlock();
13098 LBasicBlock bytesEqual = m_out.newBlock();
13099 LBasicBlock trueCase = m_out.newBlock();
13100 LBasicBlock falseCase = m_out.newBlock();
13101 LBasicBlock slowCase = m_out.newBlock();
13102 LBasicBlock continuation = m_out.newBlock();
13103
13104 m_out.branch(isRopeString(leftJSString, leftJSStringEdge), rarely(slowCase), usually(leftReadyCase));
13105
13106 LBasicBlock lastNext = m_out.appendTo(leftReadyCase, rightReadyCase);
13107 m_out.branch(isRopeString(rightJSString, rightJSStringEdge), rarely(slowCase), usually(rightReadyCase));
13108
13109 m_out.appendTo(rightReadyCase, notTriviallyUnequalCase);
13110 LValue left = m_out.loadPtr(leftJSString, m_heaps.JSString_value);
13111 LValue right = m_out.loadPtr(rightJSString, m_heaps.JSString_value);
13112 LValue length = m_out.load32(left, m_heaps.StringImpl_length);
13113 m_out.branch(
13114 m_out.notEqual(length, m_out.load32(right, m_heaps.StringImpl_length)),
13115 unsure(falseCase), unsure(notTriviallyUnequalCase));
13116
13117 m_out.appendTo(notTriviallyUnequalCase, notEmptyCase);
13118 m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase));
13119
13120 m_out.appendTo(notEmptyCase, left8BitCase);
13121 m_out.branch(
13122 m_out.testIsZero32(
13123 m_out.load32(left, m_heaps.StringImpl_hashAndFlags),
13124 m_out.constInt32(StringImpl::flagIs8Bit())),
13125 unsure(slowCase), unsure(left8BitCase));
13126
13127 m_out.appendTo(left8BitCase, right8BitCase);
13128 m_out.branch(
13129 m_out.testIsZero32(
13130 m_out.load32(right, m_heaps.StringImpl_hashAndFlags),
13131 m_out.constInt32(StringImpl::flagIs8Bit())),
13132 unsure(slowCase), unsure(right8BitCase));
13133
13134 m_out.appendTo(right8BitCase, loop);
13135
13136 LValue leftData = m_out.loadPtr(left, m_heaps.StringImpl_data);
13137 LValue rightData = m_out.loadPtr(right, m_heaps.StringImpl_data);
13138
13139 ValueFromBlock indexAtStart = m_out.anchor(length);
13140
13141 m_out.jump(loop);
13142
13143 m_out.appendTo(loop, bytesEqual);
13144
13145 LValue indexAtLoopTop = m_out.phi(Int32, indexAtStart);
13146 LValue indexInLoop = m_out.sub(indexAtLoopTop, m_out.int32One);
13147
13148 LValue leftByte = m_out.load8ZeroExt32(
13149 m_out.baseIndex(m_heaps.characters8, leftData, m_out.zeroExtPtr(indexInLoop)));
13150 LValue rightByte = m_out.load8ZeroExt32(
13151 m_out.baseIndex(m_heaps.characters8, rightData, m_out.zeroExtPtr(indexInLoop)));
13152
13153 m_out.branch(m_out.notEqual(leftByte, rightByte), unsure(falseCase), unsure(bytesEqual));
13154
13155 m_out.appendTo(bytesEqual, trueCase);
13156
13157 ValueFromBlock indexForNextIteration = m_out.anchor(indexInLoop);
13158 m_out.addIncomingToPhi(indexAtLoopTop, indexForNextIteration);
13159 m_out.branch(m_out.notZero32(indexInLoop), unsure(loop), unsure(trueCase));
13160
13161 m_out.appendTo(trueCase, falseCase);
13162
13163 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
13164 m_out.jump(continuation);
13165
13166 m_out.appendTo(falseCase, slowCase);
13167
13168 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
13169 m_out.jump(continuation);
13170
13171 m_out.appendTo(slowCase, continuation);
13172
13173 LValue slowResultValue = vmCall(
13174 Int64, m_out.operation(operationCompareStringEq), m_callFrame,
13175 leftJSString, rightJSString);
13176 ValueFromBlock slowResult = m_out.anchor(unboxBoolean(slowResultValue));
13177 m_out.jump(continuation);
13178
13179 m_out.appendTo(continuation, lastNext);
13180 return m_out.phi(Int32, trueResult, falseResult, slowResult);
13181 }
13182
13183 enum ScratchFPRUsage {
13184 DontNeedScratchFPR,
13185 NeedScratchFPR
13186 };
13187 template<typename BinaryArithOpGenerator, ScratchFPRUsage scratchFPRUsage = DontNeedScratchFPR>
13188 void emitBinarySnippet(J_JITOperation_EJJ slowPathFunction)
13189 {
13190 Node* node = m_node;
13191
13192 LValue left = lowJSValue(node->child1());
13193 LValue right = lowJSValue(node->child2());
13194
13195 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13196 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13197
13198 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13199 patchpoint->appendSomeRegister(left);
13200 patchpoint->appendSomeRegister(right);
13201 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13202 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13203 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13204 preparePatchpointForExceptions(patchpoint);
13205 patchpoint->numGPScratchRegisters = 1;
13206 patchpoint->numFPScratchRegisters = 2;
13207 if (scratchFPRUsage == NeedScratchFPR)
13208 patchpoint->numFPScratchRegisters++;
13209 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13210 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13211 State* state = &m_ftlState;
13212 patchpoint->setGenerator(
13213 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13214 AllowMacroScratchRegisterUsage allowScratch(jit);
13215
13216 Box<CCallHelpers::JumpList> exceptions =
13217 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13218
13219 auto generator = Box<BinaryArithOpGenerator>::create(
13220 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13221 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
13222 params.fpScratch(0), params.fpScratch(1), params.gpScratch(0),
13223 scratchFPRUsage == NeedScratchFPR ? params.fpScratch(2) : InvalidFPRReg);
13224
13225 generator->generateFastPath(jit);
13226
13227 if (generator->didEmitFastPath()) {
13228 generator->endJumpList().link(&jit);
13229 CCallHelpers::Label done = jit.label();
13230
13231 params.addLatePath(
13232 [=] (CCallHelpers& jit) {
13233 AllowMacroScratchRegisterUsage allowScratch(jit);
13234
13235 generator->slowPathJumpList().link(&jit);
13236 callOperation(
13237 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13238 exceptions.get(), slowPathFunction, params[0].gpr(),
13239 params[1].gpr(), params[2].gpr());
13240 jit.jump().linkTo(done, &jit);
13241 });
13242 } else {
13243 callOperation(
13244 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13245 exceptions.get(), slowPathFunction, params[0].gpr(), params[1].gpr(),
13246 params[2].gpr());
13247 }
13248 });
13249
13250 setJSValue(patchpoint);
13251 }
13252
13253 template<typename BinaryBitOpGenerator>
13254 void emitBinaryBitOpSnippet(J_JITOperation_EJJ slowPathFunction)
13255 {
13256 Node* node = m_node;
13257
13258 LValue left = lowJSValue(node->child1());
13259 LValue right = lowJSValue(node->child2());
13260
13261 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13262 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13263
13264 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13265 patchpoint->appendSomeRegister(left);
13266 patchpoint->appendSomeRegister(right);
13267 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13268 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13269 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13270 preparePatchpointForExceptions(patchpoint);
13271 patchpoint->numGPScratchRegisters = 1;
13272 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13273 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13274 State* state = &m_ftlState;
13275 patchpoint->setGenerator(
13276 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13277 AllowMacroScratchRegisterUsage allowScratch(jit);
13278
13279 Box<CCallHelpers::JumpList> exceptions =
13280 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13281
13282 auto generator = Box<BinaryBitOpGenerator>::create(
13283 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13284 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.gpScratch(0));
13285
13286 generator->generateFastPath(jit);
13287 generator->endJumpList().link(&jit);
13288 CCallHelpers::Label done = jit.label();
13289
13290 params.addLatePath(
13291 [=] (CCallHelpers& jit) {
13292 AllowMacroScratchRegisterUsage allowScratch(jit);
13293
13294 generator->slowPathJumpList().link(&jit);
13295 callOperation(
13296 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13297 exceptions.get(), slowPathFunction, params[0].gpr(),
13298 params[1].gpr(), params[2].gpr());
13299 jit.jump().linkTo(done, &jit);
13300 });
13301 });
13302
13303 setJSValue(patchpoint);
13304 }
13305
13306 void emitRightShiftSnippet(JITRightShiftGenerator::ShiftType shiftType)
13307 {
13308 Node* node = m_node;
13309
13310 // FIXME: Make this do exceptions.
13311 // https://bugs.webkit.org/show_bug.cgi?id=151686
13312
13313 LValue left = lowJSValue(node->child1());
13314 LValue right = lowJSValue(node->child2());
13315
13316 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13317 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13318
13319 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13320 patchpoint->appendSomeRegister(left);
13321 patchpoint->appendSomeRegister(right);
13322 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13323 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13324 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13325 preparePatchpointForExceptions(patchpoint);
13326 patchpoint->numGPScratchRegisters = 1;
13327 patchpoint->numFPScratchRegisters = 1;
13328 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13329 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13330 State* state = &m_ftlState;
13331 patchpoint->setGenerator(
13332 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13333 AllowMacroScratchRegisterUsage allowScratch(jit);
13334
13335 Box<CCallHelpers::JumpList> exceptions =
13336 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13337
13338 auto generator = Box<JITRightShiftGenerator>::create(
13339 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13340 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
13341 params.fpScratch(0), params.gpScratch(0), InvalidFPRReg, shiftType);
13342
13343 generator->generateFastPath(jit);
13344 generator->endJumpList().link(&jit);
13345 CCallHelpers::Label done = jit.label();
13346
13347 params.addLatePath(
13348 [=] (CCallHelpers& jit) {
13349 AllowMacroScratchRegisterUsage allowScratch(jit);
13350
13351 generator->slowPathJumpList().link(&jit);
13352
13353 J_JITOperation_EJJ slowPathFunction =
13354 shiftType == JITRightShiftGenerator::SignedShift
13355 ? operationValueBitRShift : operationValueBitURShift;
13356
13357 callOperation(
13358 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13359 exceptions.get(), slowPathFunction, params[0].gpr(),
13360 params[1].gpr(), params[2].gpr());
13361 jit.jump().linkTo(done, &jit);
13362 });
13363 });
13364
13365 setJSValue(patchpoint);
13366 }
13367
13368 LValue allocateHeapCell(LValue allocator, LBasicBlock slowPath)
13369 {
13370 JITAllocator actualAllocator;
13371 if (allocator->hasIntPtr())
13372 actualAllocator = JITAllocator::constant(Allocator(bitwise_cast<LocalAllocator*>(allocator->asIntPtr())));
13373 else
13374 actualAllocator = JITAllocator::variable();
13375
13376 if (actualAllocator.isConstant()) {
13377 if (!actualAllocator.allocator()) {
13378 LBasicBlock haveAllocator = m_out.newBlock();
13379 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
13380 m_out.jump(slowPath);
13381 m_out.appendTo(haveAllocator, lastNext);
13382 return m_out.intPtrZero;
13383 }
13384 } else {
13385 // This means that either we know that the allocator is null or we don't know what the
13386 // allocator is. In either case, we need the null check.
13387 LBasicBlock haveAllocator = m_out.newBlock();
13388 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
13389 m_out.branch(
13390 m_out.notEqual(allocator, m_out.intPtrZero),
13391 usually(haveAllocator), rarely(slowPath));
13392 m_out.appendTo(haveAllocator, lastNext);
13393 }
13394
13395 LBasicBlock continuation = m_out.newBlock();
13396
13397 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13398
13399 PatchpointValue* patchpoint = m_out.patchpoint(pointerType());
13400 if (isARM64()) {
13401 // emitAllocateWithNonNullAllocator uses the scratch registers on ARM.
13402 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13403 }
13404 patchpoint->effects.terminal = true;
13405 if (actualAllocator.isConstant())
13406 patchpoint->numGPScratchRegisters++;
13407 else
13408 patchpoint->appendSomeRegisterWithClobber(allocator);
13409 patchpoint->numGPScratchRegisters++;
13410 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13411
13412 m_out.appendSuccessor(usually(continuation));
13413 m_out.appendSuccessor(rarely(slowPath));
13414
13415 patchpoint->setGenerator(
13416 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13417 AllowMacroScratchRegisterUsageIf allowScratchIf(jit, isARM64());
13418 CCallHelpers::JumpList jumpToSlowPath;
13419
13420 GPRReg allocatorGPR;
13421 if (actualAllocator.isConstant())
13422 allocatorGPR = params.gpScratch(1);
13423 else
13424 allocatorGPR = params[1].gpr();
13425
13426 // We use a patchpoint to emit the allocation path because whenever we mess with
13427 // allocation paths, we already reason about them at the machine code level. We know
13428 // exactly what instruction sequence we want. We're confident that no compiler
13429 // optimization could make this code better. So, it's best to have the code in
13430 // AssemblyHelpers::emitAllocate(). That way, the same optimized path is shared by
13431 // all of the compiler tiers.
13432 jit.emitAllocateWithNonNullAllocator(
13433 params[0].gpr(), actualAllocator, allocatorGPR, params.gpScratch(0),
13434 jumpToSlowPath);
13435
13436 CCallHelpers::Jump jumpToSuccess;
13437 if (!params.fallsThroughToSuccessor(0))
13438 jumpToSuccess = jit.jump();
13439
13440 Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
13441
13442 params.addLatePath(
13443 [=] (CCallHelpers& jit) {
13444 jumpToSlowPath.linkTo(*labels[1], &jit);
13445 if (jumpToSuccess.isSet())
13446 jumpToSuccess.linkTo(*labels[0], &jit);
13447 });
13448 });
13449
13450 m_out.appendTo(continuation, lastNext);
13451 return patchpoint;
13452 }
13453
13454 void storeStructure(LValue object, Structure* structure)
13455 {
13456 m_out.store32(m_out.constInt32(structure->id()), object, m_heaps.JSCell_structureID);
13457 m_out.store32(
13458 m_out.constInt32(structure->objectInitializationBlob()),
13459 object, m_heaps.JSCell_usefulBytes);
13460 }
13461
13462 void storeStructure(LValue object, LValue structure)
13463 {
13464 if (structure->hasIntPtr()) {
13465 storeStructure(object, bitwise_cast<Structure*>(structure->asIntPtr()));
13466 return;
13467 }
13468
13469 LValue id = m_out.load32(structure, m_heaps.Structure_structureID);
13470 m_out.store32(id, object, m_heaps.JSCell_structureID);
13471
13472 LValue blob = m_out.load32(structure, m_heaps.Structure_indexingModeIncludingHistory);
13473 m_out.store32(blob, object, m_heaps.JSCell_usefulBytes);
13474 }
13475
13476 template <typename StructureType>
13477 LValue allocateCell(LValue allocator, StructureType structure, LBasicBlock slowPath)
13478 {
13479 LValue result = allocateHeapCell(allocator, slowPath);
13480 storeStructure(result, structure);
13481 return result;
13482 }
13483
13484 LValue allocateObject(LValue allocator, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
13485 {
13486 return allocateObject(allocator, weakStructure(structure), butterfly, slowPath);
13487 }
13488
13489 LValue allocateObject(LValue allocator, LValue structure, LValue butterfly, LBasicBlock slowPath)
13490 {
13491 LValue result = allocateCell(allocator, structure, slowPath);
13492 if (structure->hasIntPtr()) {
13493 splatWords(
13494 result,
13495 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13496 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8 + bitwise_cast<Structure*>(structure->asIntPtr())->inlineCapacity()),
13497 m_out.int64Zero,
13498 m_heaps.properties.atAnyNumber());
13499 } else {
13500 LValue end = m_out.add(
13501 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13502 m_out.load8ZeroExt32(structure, m_heaps.Structure_inlineCapacity));
13503 splatWords(
13504 result,
13505 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13506 end,
13507 m_out.int64Zero,
13508 m_heaps.properties.atAnyNumber());
13509 }
13510
13511 m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly);
13512 return result;
13513 }
13514
13515 template<typename ClassType, typename StructureType>
13516 LValue allocateObject(
13517 size_t size, StructureType structure, LValue butterfly, LBasicBlock slowPath)
13518 {
13519 Allocator allocator = allocatorForNonVirtualConcurrently<ClassType>(vm(), size, AllocatorForMode::AllocatorIfExists);
13520 return allocateObject(
13521 m_out.constIntPtr(allocator.localAllocator()), structure, butterfly, slowPath);
13522 }
13523
13524 template<typename ClassType, typename StructureType>
13525 LValue allocateObject(StructureType structure, LValue butterfly, LBasicBlock slowPath)
13526 {
13527 return allocateObject<ClassType>(
13528 ClassType::allocationSize(0), structure, butterfly, slowPath);
13529 }
13530
13531 LValue allocatorForSize(LValue subspace, LValue size, LBasicBlock slowPath)
13532 {
13533 static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
13534
13535 // Try to do some constant-folding here.
13536 if (subspace->hasIntPtr() && size->hasIntPtr()) {
13537 CompleteSubspace* actualSubspace = bitwise_cast<CompleteSubspace*>(subspace->asIntPtr());
13538 size_t actualSize = size->asIntPtr();
13539
13540 Allocator actualAllocator = actualSubspace->allocatorForNonVirtual(actualSize, AllocatorForMode::AllocatorIfExists);
13541 if (!actualAllocator) {
13542 LBasicBlock continuation = m_out.newBlock();
13543 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13544 m_out.jump(slowPath);
13545 m_out.appendTo(continuation, lastNext);
13546 return m_out.intPtrZero;
13547 }
13548
13549 return m_out.constIntPtr(actualAllocator.localAllocator());
13550 }
13551
13552 unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
13553
13554 LBasicBlock continuation = m_out.newBlock();
13555
13556 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13557
13558 LValue sizeClassIndex = m_out.lShr(
13559 m_out.add(size, m_out.constIntPtr(MarkedSpace::sizeStep - 1)),
13560 m_out.constInt32(stepShift));
13561
13562 m_out.branch(
13563 m_out.above(sizeClassIndex, m_out.constIntPtr(MarkedSpace::largeCutoff >> stepShift)),
13564 rarely(slowPath), usually(continuation));
13565
13566 m_out.appendTo(continuation, lastNext);
13567
13568 return m_out.loadPtr(
13569 m_out.baseIndex(
13570 m_heaps.CompleteSubspace_allocatorForSizeStep,
13571 subspace, sizeClassIndex));
13572 }
13573
13574 LValue allocatorForSize(CompleteSubspace& subspace, LValue size, LBasicBlock slowPath)
13575 {
13576 return allocatorForSize(m_out.constIntPtr(&subspace), size, slowPath);
13577 }
13578
13579 template<typename ClassType>
13580 LValue allocateVariableSizedObject(
13581 LValue size, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
13582 {
13583 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
13584 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
13585 LValue allocator = allocatorForSize(*subspace, size, slowPath);
13586 return allocateObject(allocator, structure, butterfly, slowPath);
13587 }
13588
13589 template<typename ClassType>
13590 LValue allocateVariableSizedCell(
13591 LValue size, Structure* structure, LBasicBlock slowPath)
13592 {
13593 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
13594 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
13595 LValue allocator = allocatorForSize(*subspace, size, slowPath);
13596 return allocateCell(allocator, structure, slowPath);
13597 }
13598
13599 LValue allocateObject(RegisteredStructure structure)
13600 {
13601 size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
13602 Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
13603
13604 // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
13605 // instead of putting it on the slow path.
13606 // https://bugs.webkit.org/show_bug.cgi?id=161062
13607
13608 LBasicBlock slowPath = m_out.newBlock();
13609 LBasicBlock continuation = m_out.newBlock();
13610
13611 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
13612
13613 ValueFromBlock fastResult = m_out.anchor(allocateObject(
13614 m_out.constIntPtr(allocator.localAllocator()), structure, m_out.intPtrZero, slowPath));
13615
13616 m_out.jump(continuation);
13617
13618 m_out.appendTo(slowPath, continuation);
13619
13620 VM& vm = this->vm();
13621 LValue slowResultValue = lazySlowPath(
13622 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13623 return createLazyCallGenerator(vm,
13624 operationNewObject, locations[0].directGPR(),
13625 CCallHelpers::TrustedImmPtr(structure.get()));
13626 });
13627 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13628 m_out.jump(continuation);
13629
13630 m_out.appendTo(continuation, lastNext);
13631 return m_out.phi(pointerType(), fastResult, slowResult);
13632 }
13633
13634 struct ArrayValues {
13635 ArrayValues()
13636 : array(0)
13637 , butterfly(0)
13638 {
13639 }
13640
13641 ArrayValues(LValue array, LValue butterfly)
13642 : array(array)
13643 , butterfly(butterfly)
13644 {
13645 }
13646
13647 LValue array;
13648 LValue butterfly;
13649 };
13650
13651 ArrayValues allocateJSArray(LValue publicLength, LValue vectorLength, LValue structure, LValue indexingType, bool shouldInitializeElements = true, bool shouldLargeArraySizeCreateArrayStorage = true)
13652 {
13653 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
13654 if (indexingType->hasInt32()) {
13655 IndexingType type = static_cast<IndexingType>(indexingType->asInt32());
13656 ASSERT_UNUSED(type,
13657 hasUndecided(type)
13658 || hasInt32(type)
13659 || hasDouble(type)
13660 || hasContiguous(type));
13661 }
13662
13663 LBasicBlock fastCase = m_out.newBlock();
13664 LBasicBlock largeCase = m_out.newBlock();
13665 LBasicBlock failCase = m_out.newBlock();
13666 LBasicBlock continuation = m_out.newBlock();
13667 LBasicBlock slowCase = m_out.newBlock();
13668
13669 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastCase);
13670
13671 Optional<unsigned> staticVectorLength;
13672 Optional<unsigned> staticVectorLengthFromPublicLength;
13673 if (structure->hasIntPtr()) {
13674 if (publicLength->hasInt32()) {
13675 unsigned publicLengthConst = static_cast<unsigned>(publicLength->asInt32());
13676 if (publicLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
13677 publicLengthConst = Butterfly::optimalContiguousVectorLength(
13678 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), publicLengthConst);
13679 staticVectorLengthFromPublicLength = publicLengthConst;
13680 }
13681
13682 }
13683 if (vectorLength->hasInt32()) {
13684 unsigned vectorLengthConst = static_cast<unsigned>(vectorLength->asInt32());
13685 if (vectorLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
13686 vectorLengthConst = Butterfly::optimalContiguousVectorLength(
13687 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), vectorLengthConst);
13688 vectorLength = m_out.constInt32(vectorLengthConst);
13689 staticVectorLength = vectorLengthConst;
13690 }
13691 }
13692 } else {
13693 // We don't compute the optimal vector length for new Array(blah) where blah is not
13694 // statically known, since the compute effort of doing it here is probably not worth it.
13695 }
13696
13697 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
13698
13699 LValue predicate;
13700 if (shouldLargeArraySizeCreateArrayStorage)
13701 predicate = m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
13702 else
13703 predicate = m_out.booleanFalse;
13704
13705 m_out.branch(predicate, rarely(largeCase), usually(fastCase));
13706
13707 m_out.appendTo(fastCase, largeCase);
13708
13709 LValue payloadSize =
13710 m_out.shl(m_out.zeroExt(vectorLength, pointerType()), m_out.constIntPtr(3));
13711
13712 LValue butterflySize = m_out.add(
13713 payloadSize, m_out.constIntPtr(sizeof(IndexingHeader)));
13714
13715 LValue allocator = allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, failCase);
13716 LValue startOfStorage = allocateHeapCell(allocator, failCase);
13717
13718 LValue butterfly = m_out.add(startOfStorage, m_out.constIntPtr(sizeof(IndexingHeader)));
13719
13720 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
13721 m_out.store32(vectorLength, butterfly, m_heaps.Butterfly_vectorLength);
13722
13723 initializeArrayElements(
13724 indexingType,
13725 shouldInitializeElements ? m_out.int32Zero : publicLength, vectorLength,
13726 butterfly);
13727
13728 ValueFromBlock haveButterfly = m_out.anchor(butterfly);
13729
13730 LValue object = allocateObject<JSArray>(structure, butterfly, failCase);
13731
13732 ValueFromBlock fastResult = m_out.anchor(object);
13733 ValueFromBlock fastButterfly = m_out.anchor(butterfly);
13734 m_out.jump(continuation);
13735
13736 m_out.appendTo(largeCase, failCase);
13737 ValueFromBlock largeStructure = m_out.anchor(
13738 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))));
13739 m_out.jump(slowCase);
13740
13741 m_out.appendTo(failCase, slowCase);
13742 ValueFromBlock failStructure = m_out.anchor(structure);
13743 m_out.jump(slowCase);
13744
13745 m_out.appendTo(slowCase, continuation);
13746 LValue structureValue = m_out.phi(pointerType(), largeStructure, failStructure);
13747 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
13748
13749 VM& vm = this->vm();
13750 LValue slowResultValue = nullptr;
13751 if (vectorLength == publicLength
13752 || (staticVectorLengthFromPublicLength && staticVectorLength && staticVectorLength.value() == staticVectorLengthFromPublicLength.value())) {
13753 slowResultValue = lazySlowPath(
13754 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13755 return createLazyCallGenerator(vm,
13756 operationNewArrayWithSize, locations[0].directGPR(),
13757 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR());
13758 },
13759 structureValue, publicLength, butterflyValue);
13760 } else {
13761 slowResultValue = lazySlowPath(
13762 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13763 return createLazyCallGenerator(vm,
13764 operationNewArrayWithSizeAndHint, locations[0].directGPR(),
13765 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR(), locations[4].directGPR());
13766 },
13767 structureValue, publicLength, vectorLength, butterflyValue);
13768 }
13769
13770 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13771 ValueFromBlock slowButterfly = m_out.anchor(
13772 m_out.loadPtr(slowResultValue, m_heaps.JSObject_butterfly));
13773 m_out.jump(continuation);
13774
13775 m_out.appendTo(continuation, lastNext);
13776 return ArrayValues(
13777 m_out.phi(pointerType(), fastResult, slowResult),
13778 m_out.phi(pointerType(), fastButterfly, slowButterfly));
13779 }
13780
13781 ArrayValues allocateUninitializedContiguousJSArrayInternal(LValue publicLength, LValue vectorLength, RegisteredStructure structure)
13782 {
13783 bool shouldInitializeElements = false;
13784 bool shouldLargeArraySizeCreateArrayStorage = false;
13785 return allocateJSArray(
13786 publicLength, vectorLength, weakStructure(structure), m_out.constInt32(structure->indexingType()), shouldInitializeElements,
13787 shouldLargeArraySizeCreateArrayStorage);
13788 }
13789
13790 ArrayValues allocateUninitializedContiguousJSArray(LValue publicLength, RegisteredStructure structure)
13791 {
13792 return allocateUninitializedContiguousJSArrayInternal(publicLength, publicLength, structure);
13793 }
13794
13795 ArrayValues allocateUninitializedContiguousJSArray(unsigned publicLength, unsigned vectorLength, RegisteredStructure structure)
13796 {
13797 ASSERT(vectorLength >= publicLength);
13798 return allocateUninitializedContiguousJSArrayInternal(m_out.constInt32(publicLength), m_out.constInt32(vectorLength), structure);
13799 }
13800
13801 LValue ensureShadowChickenPacket()
13802 {
13803 ShadowChicken* shadowChicken = vm().shadowChicken();
13804 RELEASE_ASSERT(shadowChicken);
13805 LBasicBlock slowCase = m_out.newBlock();
13806 LBasicBlock continuation = m_out.newBlock();
13807
13808 TypedPointer addressOfLogCursor = m_out.absolute(shadowChicken->addressOfLogCursor());
13809 LValue logCursor = m_out.loadPtr(addressOfLogCursor);
13810
13811 ValueFromBlock fastResult = m_out.anchor(logCursor);
13812
13813 m_out.branch(
13814 m_out.below(logCursor, m_out.constIntPtr(shadowChicken->logEnd())),
13815 usually(continuation), rarely(slowCase));
13816
13817 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
13818
13819 vmCall(Void, m_out.operation(operationProcessShadowChickenLog), m_callFrame);
13820
13821 ValueFromBlock slowResult = m_out.anchor(m_out.loadPtr(addressOfLogCursor));
13822 m_out.jump(continuation);
13823
13824 m_out.appendTo(continuation, lastNext);
13825 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
13826
13827 m_out.storePtr(
13828 m_out.add(result, m_out.constIntPtr(sizeof(ShadowChicken::Packet))),
13829 addressOfLogCursor);
13830
13831 return result;
13832 }
13833
13834 LValue boolify(Edge edge)
13835 {
13836 switch (edge.useKind()) {
13837 case BooleanUse:
13838 case KnownBooleanUse:
13839 return lowBoolean(edge);
13840 case Int32Use:
13841 return m_out.notZero32(lowInt32(edge));
13842 case DoubleRepUse:
13843 return m_out.doubleNotEqualAndOrdered(lowDouble(edge), m_out.doubleZero);
13844 case ObjectOrOtherUse:
13845 return m_out.logicalNot(
13846 equalNullOrUndefined(
13847 edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
13848 ManualOperandSpeculation));
13849 case StringUse:
13850 return m_out.notEqual(lowString(edge), weakPointer(jsEmptyString(&m_graph.m_vm)));
13851 case StringOrOtherUse: {
13852 LValue value = lowJSValue(edge, ManualOperandSpeculation);
13853
13854 LBasicBlock cellCase = m_out.newBlock();
13855 LBasicBlock notCellCase = m_out.newBlock();
13856 LBasicBlock continuation = m_out.newBlock();
13857
13858 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
13859
13860 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
13861 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
13862 ValueFromBlock stringResult = m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(&m_graph.m_vm))));
13863 m_out.jump(continuation);
13864
13865 m_out.appendTo(notCellCase, continuation);
13866 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
13867 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
13868 m_out.jump(continuation);
13869
13870 m_out.appendTo(continuation, lastNext);
13871 return m_out.phi(Int32, stringResult, notCellResult);
13872 }
13873 case UntypedUse: {
13874 LValue value = lowJSValue(edge);
13875
13876 // Implements the following control flow structure:
13877 // if (value is cell) {
13878 // if (value is string or value is BigInt)
13879 // result = !!value->length
13880 // else {
13881 // do evil things for masquerades-as-undefined
13882 // result = true
13883 // }
13884 // } else if (value is int32) {
13885 // result = !!unboxInt32(value)
13886 // } else if (value is number) {
13887 // result = !!unboxDouble(value)
13888 // } else {
13889 // result = value == jsTrue
13890 // }
13891
13892 LBasicBlock cellCase = m_out.newBlock();
13893 LBasicBlock notStringCase = m_out.newBlock();
13894 LBasicBlock stringCase = m_out.newBlock();
13895 LBasicBlock bigIntCase = m_out.newBlock();
13896 LBasicBlock notStringOrBigIntCase = m_out.newBlock();
13897 LBasicBlock notCellCase = m_out.newBlock();
13898 LBasicBlock int32Case = m_out.newBlock();
13899 LBasicBlock notInt32Case = m_out.newBlock();
13900 LBasicBlock doubleCase = m_out.newBlock();
13901 LBasicBlock notDoubleCase = m_out.newBlock();
13902 LBasicBlock continuation = m_out.newBlock();
13903
13904 Vector<ValueFromBlock> results;
13905
13906 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
13907
13908 LBasicBlock lastNext = m_out.appendTo(cellCase, notStringCase);
13909 m_out.branch(
13910 isString(value, provenType(edge) & SpecCell),
13911 unsure(stringCase), unsure(notStringCase));
13912
13913 m_out.appendTo(notStringCase, stringCase);
13914 m_out.branch(
13915 isBigInt(value, provenType(edge) & (SpecCell - SpecString)),
13916 unsure(bigIntCase), unsure(notStringOrBigIntCase));
13917
13918 m_out.appendTo(stringCase, bigIntCase);
13919 results.append(m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(&m_graph.m_vm)))));
13920 m_out.jump(continuation);
13921
13922 m_out.appendTo(bigIntCase, notStringOrBigIntCase);
13923 LValue nonZeroBigInt = m_out.notZero32(
13924 m_out.load32NonNegative(value, m_heaps.JSBigInt_length));
13925 results.append(m_out.anchor(nonZeroBigInt));
13926 m_out.jump(continuation);
13927
13928 m_out.appendTo(notStringOrBigIntCase, notCellCase);
13929 LValue isTruthyObject;
13930 if (masqueradesAsUndefinedWatchpointIsStillValid())
13931 isTruthyObject = m_out.booleanTrue;
13932 else {
13933 LBasicBlock masqueradesCase = m_out.newBlock();
13934
13935 results.append(m_out.anchor(m_out.booleanTrue));
13936
13937 m_out.branch(
13938 m_out.testIsZero32(
13939 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
13940 m_out.constInt32(MasqueradesAsUndefined)),
13941 usually(continuation), rarely(masqueradesCase));
13942
13943 m_out.appendTo(masqueradesCase);
13944
13945 isTruthyObject = m_out.notEqual(
13946 weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
13947 m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject));
13948 }
13949 results.append(m_out.anchor(isTruthyObject));
13950 m_out.jump(continuation);
13951
13952 m_out.appendTo(notCellCase, int32Case);
13953 m_out.branch(
13954 isInt32(value, provenType(edge) & ~SpecCell),
13955 unsure(int32Case), unsure(notInt32Case));
13956
13957 m_out.appendTo(int32Case, notInt32Case);
13958 results.append(m_out.anchor(m_out.notZero32(unboxInt32(value))));
13959 m_out.jump(continuation);
13960
13961 m_out.appendTo(notInt32Case, doubleCase);
13962 m_out.branch(
13963 isNumber(value, provenType(edge) & ~SpecCell),
13964 unsure(doubleCase), unsure(notDoubleCase));
13965
13966 m_out.appendTo(doubleCase, notDoubleCase);
13967 LValue doubleIsTruthy = m_out.doubleNotEqualAndOrdered(
13968 unboxDouble(value), m_out.constDouble(0));
13969 results.append(m_out.anchor(doubleIsTruthy));
13970 m_out.jump(continuation);
13971
13972 m_out.appendTo(notDoubleCase, continuation);
13973 LValue miscIsTruthy = m_out.equal(
13974 value, m_out.constInt64(JSValue::encode(jsBoolean(true))));
13975 results.append(m_out.anchor(miscIsTruthy));
13976 m_out.jump(continuation);
13977
13978 m_out.appendTo(continuation, lastNext);
13979 return m_out.phi(Int32, results);
13980 }
13981 default:
13982 DFG_CRASH(m_graph, m_node, "Bad use kind");
13983 return 0;
13984 }
13985 }
13986
13987 enum StringOrObjectMode {
13988 AllCellsAreFalse,
13989 CellCaseSpeculatesObject
13990 };
13991 enum EqualNullOrUndefinedMode {
13992 EqualNull,
13993 EqualUndefined,
13994 EqualNullOrUndefined,
13995 SpeculateNullOrUndefined
13996 };
13997 LValue equalNullOrUndefined(
13998 Edge edge, StringOrObjectMode cellMode, EqualNullOrUndefinedMode primitiveMode,
13999 OperandSpeculationMode operandMode = AutomaticOperandSpeculation)
14000 {
14001 bool validWatchpoint = masqueradesAsUndefinedWatchpointIsStillValid();
14002
14003 LValue value = lowJSValue(edge, operandMode);
14004
14005 LBasicBlock cellCase = m_out.newBlock();
14006 LBasicBlock primitiveCase = m_out.newBlock();
14007 LBasicBlock continuation = m_out.newBlock();
14008
14009 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
14010
14011 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
14012
14013 Vector<ValueFromBlock, 3> results;
14014
14015 switch (cellMode) {
14016 case AllCellsAreFalse:
14017 break;
14018 case CellCaseSpeculatesObject:
14019 FTL_TYPE_CHECK(
14020 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
14021 break;
14022 }
14023
14024 if (validWatchpoint) {
14025 results.append(m_out.anchor(m_out.booleanFalse));
14026 m_out.jump(continuation);
14027 } else {
14028 LBasicBlock masqueradesCase =
14029 m_out.newBlock();
14030
14031 results.append(m_out.anchor(m_out.booleanFalse));
14032
14033 m_out.branch(
14034 m_out.testNonZero32(
14035 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
14036 m_out.constInt32(MasqueradesAsUndefined)),
14037 rarely(masqueradesCase), usually(continuation));
14038
14039 m_out.appendTo(masqueradesCase, primitiveCase);
14040
14041 LValue structure = loadStructure(value);
14042
14043 results.append(m_out.anchor(
14044 m_out.equal(
14045 weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
14046 m_out.loadPtr(structure, m_heaps.Structure_globalObject))));
14047 m_out.jump(continuation);
14048 }
14049
14050 m_out.appendTo(primitiveCase, continuation);
14051
14052 LValue primitiveResult;
14053 switch (primitiveMode) {
14054 case EqualNull:
14055 primitiveResult = m_out.equal(value, m_out.constInt64(ValueNull));
14056 break;
14057 case EqualUndefined:
14058 primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined));
14059 break;
14060 case EqualNullOrUndefined:
14061 primitiveResult = isOther(value, provenType(edge));
14062 break;
14063 case SpeculateNullOrUndefined:
14064 FTL_TYPE_CHECK(
14065 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
14066 primitiveResult = m_out.booleanTrue;
14067 break;
14068 }
14069 results.append(m_out.anchor(primitiveResult));
14070 m_out.jump(continuation);
14071
14072 m_out.appendTo(continuation, lastNext);
14073
14074 return m_out.phi(Int32, results);
14075 }
14076
14077 template<typename FunctionType>
14078 void contiguousPutByValOutOfBounds(
14079 FunctionType slowPathFunction, LValue base, LValue storage, LValue index, LValue value,
14080 LBasicBlock continuation)
14081 {
14082 if (!m_node->arrayMode().isInBounds()) {
14083 LBasicBlock notInBoundsCase =
14084 m_out.newBlock();
14085 LBasicBlock performStore =
14086 m_out.newBlock();
14087
14088 LValue isNotInBounds = m_out.aboveOrEqual(
14089 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength));
14090 m_out.branch(isNotInBounds, unsure(notInBoundsCase), unsure(performStore));
14091
14092 LBasicBlock lastNext = m_out.appendTo(notInBoundsCase, performStore);
14093
14094 LValue isOutOfBounds = m_out.aboveOrEqual(
14095 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_vectorLength));
14096
14097 if (!m_node->arrayMode().isOutOfBounds())
14098 speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
14099 else {
14100 LBasicBlock outOfBoundsCase =
14101 m_out.newBlock();
14102 LBasicBlock holeCase =
14103 m_out.newBlock();
14104
14105 m_out.branch(isOutOfBounds, rarely(outOfBoundsCase), usually(holeCase));
14106
14107 LBasicBlock innerLastNext = m_out.appendTo(outOfBoundsCase, holeCase);
14108
14109 vmCall(
14110 Void, m_out.operation(slowPathFunction),
14111 m_callFrame, base, index, value);
14112
14113 m_out.jump(continuation);
14114
14115 m_out.appendTo(holeCase, innerLastNext);
14116 }
14117
14118 m_out.store32(
14119 m_out.add(index, m_out.int32One),
14120 storage, m_heaps.Butterfly_publicLength);
14121
14122 m_out.jump(performStore);
14123 m_out.appendTo(performStore, lastNext);
14124 }
14125 }
14126
14127 LValue untagArrayPtr(LValue ptr, LValue size)
14128 {
14129#if CPU(ARM64E)
14130 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
14131 authenticate->appendSomeRegister(ptr);
14132 authenticate->append(size, B3::ValueRep(B3::ValueRep::SomeLateRegister));
14133 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14134 jit.move(params[1].gpr(), params[0].gpr());
14135 jit.untagArrayPtr(params[2].gpr(), params[0].gpr());
14136 });
14137 return authenticate;
14138#else
14139 UNUSED_PARAM(size);
14140 return ptr;
14141#endif
14142 }
14143
14144 LValue removeArrayPtrTag(LValue ptr)
14145 {
14146#if CPU(ARM64E)
14147 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
14148 authenticate->appendSomeRegister(ptr);
14149 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14150 jit.move(params[1].gpr(), params[0].gpr());
14151 jit.removeArrayPtrTag(params[0].gpr());
14152 });
14153 return authenticate;
14154#endif
14155 return ptr;
14156 }
14157
14158 LValue caged(Gigacage::Kind kind, LValue ptr, LValue base)
14159 {
14160#if CPU(ARM64E)
14161 if (kind == Gigacage::Primitive) {
14162 LValue size = m_out.load32(base, m_heaps.JSArrayBufferView_length);
14163 ptr = untagArrayPtr(ptr, size);
14164 }
14165#else
14166 UNUSED_PARAM(kind);
14167 UNUSED_PARAM(base);
14168#endif
14169
14170#if GIGACAGE_ENABLED
14171 UNUSED_PARAM(base);
14172 if (!Gigacage::isEnabled(kind))
14173 return ptr;
14174
14175 if (kind == Gigacage::Primitive && Gigacage::canPrimitiveGigacageBeDisabled()) {
14176 if (vm().primitiveGigacageEnabled().isStillValid())
14177 m_graph.watchpoints().addLazily(vm().primitiveGigacageEnabled());
14178 else
14179 return ptr;
14180 }
14181
14182 LValue basePtr = m_out.constIntPtr(Gigacage::basePtr(kind));
14183 LValue mask = m_out.constIntPtr(Gigacage::mask(kind));
14184
14185 LValue masked = m_out.bitAnd(ptr, mask);
14186 LValue result = m_out.add(masked, basePtr);
14187
14188#if CPU(ARM64E)
14189 {
14190 PatchpointValue* merge = m_out.patchpoint(pointerType());
14191 merge->append(result, B3::ValueRep(B3::ValueRep::SomeLateRegister));
14192 merge->appendSomeRegister(ptr);
14193 merge->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14194 jit.move(params[2].gpr(), params[0].gpr());
14195 jit.bitFieldInsert64(params[1].gpr(), 0, 64 - MacroAssembler::numberOfPACBits, params[0].gpr());
14196 });
14197 result = merge;
14198 }
14199#endif
14200 // Make sure that B3 doesn't try to do smart reassociation of these pointer bits.
14201 // FIXME: In an ideal world, B3 would not do harmful reassociations, and if it did, it would be able
14202 // to undo them during constant hoisting and regalloc. As it stands, if you remove this then Octane
14203 // gets 1.6% slower and Kraken gets 5% slower. It's all because the basePtr, which is a constant,
14204 // gets reassociated out of the add above and into the address arithmetic. This disables hoisting of
14205 // the basePtr constant. Hoisting that constant is worth a lot more perf than the reassociation. One
14206 // way to make this all work happily is to combine offset legalization with constant hoisting, and
14207 // then teach it reassociation. So, Add(Add(a, b), const) where a is loop-invariant while b isn't
14208 // will turn into Add(Add(a, const), b) by the constant hoister. We would have to teach B3 to do this
14209 // and possibly other smart things if we want to be able to remove this opaque.
14210 // https://bugs.webkit.org/show_bug.cgi?id=175493
14211 return m_out.opaque(result);
14212#endif
14213 return ptr;
14214 }
14215
14216 void buildSwitch(SwitchData* data, LType type, LValue switchValue)
14217 {
14218 ASSERT(type == pointerType() || type == Int32);
14219
14220 Vector<SwitchCase> cases;
14221 for (unsigned i = 0; i < data->cases.size(); ++i) {
14222 SwitchCase newCase;
14223
14224 if (type == pointerType()) {
14225 newCase = SwitchCase(m_out.constIntPtr(data->cases[i].value.switchLookupValue(data->kind)),
14226 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
14227 } else if (type == Int32) {
14228 newCase = SwitchCase(m_out.constInt32(data->cases[i].value.switchLookupValue(data->kind)),
14229 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
14230 } else
14231 CRASH();
14232
14233 cases.append(newCase);
14234 }
14235
14236 m_out.switchInstruction(
14237 switchValue, cases,
14238 lowBlock(data->fallThrough.block), Weight(data->fallThrough.count));
14239 }
14240
14241 void switchString(SwitchData* data, LValue string, Edge& edge)
14242 {
14243 bool canDoBinarySwitch = true;
14244 unsigned totalLength = 0;
14245
14246 for (DFG::SwitchCase myCase : data->cases) {
14247 StringImpl* string = myCase.value.stringImpl();
14248 if (!string->is8Bit()) {
14249 canDoBinarySwitch = false;
14250 break;
14251 }
14252 if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
14253 canDoBinarySwitch = false;
14254 break;
14255 }
14256 totalLength += string->length();
14257 }
14258
14259 if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
14260 switchStringSlow(data, string);
14261 return;
14262 }
14263
14264 LBasicBlock hasImplBlock = m_out.newBlock();
14265 LBasicBlock is8BitBlock = m_out.newBlock();
14266 LBasicBlock slowBlock = m_out.newBlock();
14267
14268 m_out.branch(isRopeString(string, edge), unsure(slowBlock), unsure(hasImplBlock));
14269
14270 LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock);
14271
14272 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
14273 LValue length = m_out.load32(stringImpl, m_heaps.StringImpl_length);
14274
14275 m_out.branch(
14276 m_out.testIsZero32(
14277 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
14278 m_out.constInt32(StringImpl::flagIs8Bit())),
14279 unsure(slowBlock), unsure(is8BitBlock));
14280
14281 m_out.appendTo(is8BitBlock, slowBlock);
14282
14283 LValue buffer = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
14284
14285 // FIXME: We should propagate branch weight data to the cases of this switch.
14286 // https://bugs.webkit.org/show_bug.cgi?id=144368
14287
14288 Vector<StringSwitchCase> cases;
14289 for (DFG::SwitchCase myCase : data->cases)
14290 cases.append(StringSwitchCase(myCase.value.stringImpl(), lowBlock(myCase.target.block)));
14291 std::sort(cases.begin(), cases.end());
14292 switchStringRecurse(data, buffer, length, cases, 0, 0, cases.size(), 0, false);
14293
14294 m_out.appendTo(slowBlock, lastNext);
14295 switchStringSlow(data, string);
14296 }
14297
14298 // The code for string switching is based closely on the same code in the DFG backend. While it
14299 // would be nice to reduce the amount of similar-looking code, it seems like this is one of
14300 // those algorithms where factoring out the common bits would result in more code than just
14301 // duplicating.
14302
14303 struct StringSwitchCase {
14304 StringSwitchCase() { }
14305
14306 StringSwitchCase(StringImpl* string, LBasicBlock target)
14307 : string(string)
14308 , target(target)
14309 {
14310 }
14311
14312 bool operator<(const StringSwitchCase& other) const
14313 {
14314 return stringLessThan(*string, *other.string);
14315 }
14316
14317 StringImpl* string;
14318 LBasicBlock target;
14319 };
14320
14321 struct CharacterCase {
14322 CharacterCase()
14323 : character(0)
14324 , begin(0)
14325 , end(0)
14326 {
14327 }
14328
14329 CharacterCase(LChar character, unsigned begin, unsigned end)
14330 : character(character)
14331 , begin(begin)
14332 , end(end)
14333 {
14334 }
14335
14336 bool operator<(const CharacterCase& other) const
14337 {
14338 return character < other.character;
14339 }
14340
14341 LChar character;
14342 unsigned begin;
14343 unsigned end;
14344 };
14345
14346 void switchStringRecurse(
14347 SwitchData* data, LValue buffer, LValue length, const Vector<StringSwitchCase>& cases,
14348 unsigned numChecked, unsigned begin, unsigned end, unsigned alreadyCheckedLength,
14349 unsigned checkedExactLength)
14350 {
14351 LBasicBlock fallThrough = lowBlock(data->fallThrough.block);
14352
14353 if (begin == end) {
14354 m_out.jump(fallThrough);
14355 return;
14356 }
14357
14358 unsigned minLength = cases[begin].string->length();
14359 unsigned commonChars = minLength;
14360 bool allLengthsEqual = true;
14361 for (unsigned i = begin + 1; i < end; ++i) {
14362 unsigned myCommonChars = numChecked;
14363 unsigned limit = std::min(cases[begin].string->length(), cases[i].string->length());
14364 for (unsigned j = numChecked; j < limit; ++j) {
14365 if (cases[begin].string->at(j) != cases[i].string->at(j))
14366 break;
14367 myCommonChars++;
14368 }
14369 commonChars = std::min(commonChars, myCommonChars);
14370 if (minLength != cases[i].string->length())
14371 allLengthsEqual = false;
14372 minLength = std::min(minLength, cases[i].string->length());
14373 }
14374
14375 if (checkedExactLength) {
14376 DFG_ASSERT(m_graph, m_node, alreadyCheckedLength == minLength, alreadyCheckedLength, minLength);
14377 DFG_ASSERT(m_graph, m_node, allLengthsEqual);
14378 }
14379
14380 DFG_ASSERT(m_graph, m_node, minLength >= commonChars, minLength, commonChars);
14381
14382 if (!allLengthsEqual && alreadyCheckedLength < minLength)
14383 m_out.check(m_out.below(length, m_out.constInt32(minLength)), unsure(fallThrough));
14384 if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
14385 m_out.check(m_out.notEqual(length, m_out.constInt32(minLength)), unsure(fallThrough));
14386
14387 for (unsigned i = numChecked; i < commonChars; ++i) {
14388 m_out.check(
14389 m_out.notEqual(
14390 m_out.load8ZeroExt32(buffer, m_heaps.characters8[i]),
14391 m_out.constInt32(static_cast<uint16_t>(cases[begin].string->at(i)))),
14392 unsure(fallThrough));
14393 }
14394
14395 if (minLength == commonChars) {
14396 // This is the case where one of the cases is a prefix of all of the other cases.
14397 // We've already checked that the input string is a prefix of all of the cases,
14398 // so we just check length to jump to that case.
14399
14400 DFG_ASSERT(m_graph, m_node, cases[begin].string->length() == commonChars, cases[begin].string->length(), commonChars);
14401 for (unsigned i = begin + 1; i < end; ++i)
14402 DFG_ASSERT(m_graph, m_node, cases[i].string->length() > commonChars, cases[i].string->length(), commonChars);
14403
14404 if (allLengthsEqual) {
14405 DFG_ASSERT(m_graph, m_node, end == begin + 1, end, begin);
14406 m_out.jump(cases[begin].target);
14407 return;
14408 }
14409
14410 m_out.check(
14411 m_out.equal(length, m_out.constInt32(commonChars)),
14412 unsure(cases[begin].target));
14413
14414 // We've checked if the length is >= minLength, and then we checked if the length is
14415 // == commonChars. We get to this point if it is >= minLength but not == commonChars.
14416 // Hence we know that it now must be > minLength, i.e. that it's >= minLength + 1.
14417 switchStringRecurse(
14418 data, buffer, length, cases, commonChars, begin + 1, end, minLength + 1, false);
14419 return;
14420 }
14421
14422 // At this point we know that the string is longer than commonChars, and we've only verified
14423 // commonChars. Use a binary switch on the next unchecked character, i.e.
14424 // string[commonChars].
14425
14426 DFG_ASSERT(m_graph, m_node, end >= begin + 2, end, begin);
14427
14428 LValue uncheckedChar = m_out.load8ZeroExt32(buffer, m_heaps.characters8[commonChars]);
14429
14430 Vector<CharacterCase> characterCases;
14431 CharacterCase currentCase(cases[begin].string->at(commonChars), begin, begin + 1);
14432 for (unsigned i = begin + 1; i < end; ++i) {
14433 LChar currentChar = cases[i].string->at(commonChars);
14434 if (currentChar != currentCase.character) {
14435 currentCase.end = i;
14436 characterCases.append(currentCase);
14437 currentCase = CharacterCase(currentChar, i, i + 1);
14438 } else
14439 currentCase.end = i + 1;
14440 }
14441 characterCases.append(currentCase);
14442
14443 Vector<LBasicBlock> characterBlocks;
14444 for (unsigned i = characterCases.size(); i--;)
14445 characterBlocks.append(m_out.newBlock());
14446
14447 Vector<SwitchCase> switchCases;
14448 for (unsigned i = 0; i < characterCases.size(); ++i) {
14449 if (i)
14450 DFG_ASSERT(m_graph, m_node, characterCases[i - 1].character < characterCases[i].character);
14451 switchCases.append(SwitchCase(
14452 m_out.constInt32(characterCases[i].character), characterBlocks[i], Weight()));
14453 }
14454 m_out.switchInstruction(uncheckedChar, switchCases, fallThrough, Weight());
14455
14456 LBasicBlock lastNext = m_out.m_nextBlock;
14457 characterBlocks.append(lastNext); // Makes it convenient to set nextBlock.
14458 for (unsigned i = 0; i < characterCases.size(); ++i) {
14459 m_out.appendTo(characterBlocks[i], characterBlocks[i + 1]);
14460 switchStringRecurse(
14461 data, buffer, length, cases, commonChars + 1,
14462 characterCases[i].begin, characterCases[i].end, minLength, allLengthsEqual);
14463 }
14464
14465 DFG_ASSERT(m_graph, m_node, m_out.m_nextBlock == lastNext);
14466 }
14467
14468 void switchStringSlow(SwitchData* data, LValue string)
14469 {
14470 // FIXME: We ought to be able to use computed gotos here. We would save the labels of the
14471 // blocks we want to jump to, and then request their addresses after compilation completes.
14472 // https://bugs.webkit.org/show_bug.cgi?id=144369
14473
14474 LValue branchOffset = vmCall(
14475 Int32, m_out.operation(operationSwitchStringAndGetBranchOffset),
14476 m_callFrame, m_out.constIntPtr(data->switchTableIndex), string);
14477
14478 StringJumpTable& table = codeBlock()->stringSwitchJumpTable(data->switchTableIndex);
14479
14480 Vector<SwitchCase> cases;
14481 // These may be negative, or zero, or probably other stuff, too. We don't want to mess with HashSet's corner cases and we don't really care about throughput here.
14482 StdUnorderedSet<int32_t> alreadyHandled;
14483 for (unsigned i = 0; i < data->cases.size(); ++i) {
14484 // FIXME: The fact that we're using the bytecode's switch table means that the
14485 // following DFG IR transformation would be invalid.
14486 //
14487 // Original code:
14488 // switch (v) {
14489 // case "foo":
14490 // case "bar":
14491 // things();
14492 // break;
14493 // default:
14494 // break;
14495 // }
14496 //
14497 // New code:
14498 // switch (v) {
14499 // case "foo":
14500 // instrumentFoo();
14501 // goto _things;
14502 // case "bar":
14503 // instrumentBar();
14504 // _things:
14505 // things();
14506 // break;
14507 // default:
14508 // break;
14509 // }
14510 //
14511 // Luckily, we don't currently do any such transformation. But it's kind of silly that
14512 // this is an issue.
14513 // https://bugs.webkit.org/show_bug.cgi?id=144635
14514
14515 DFG::SwitchCase myCase = data->cases[i];
14516 StringJumpTable::StringOffsetTable::iterator iter =
14517 table.offsetTable.find(myCase.value.stringImpl());
14518 DFG_ASSERT(m_graph, m_node, iter != table.offsetTable.end());
14519
14520 if (!alreadyHandled.insert(iter->value.branchOffset).second)
14521 continue;
14522
14523 cases.append(SwitchCase(
14524 m_out.constInt32(iter->value.branchOffset),
14525 lowBlock(myCase.target.block), Weight(myCase.target.count)));
14526 }
14527
14528 m_out.switchInstruction(
14529 branchOffset, cases, lowBlock(data->fallThrough.block),
14530 Weight(data->fallThrough.count));
14531 }
14532
14533 // Calls the functor at the point of code generation where we know what the result type is.
14534 // You can emit whatever code you like at that point. Expects you to terminate the basic block.
14535 // When buildTypeOf() returns, it will have terminated all basic blocks that it created. So, if
14536 // you aren't using this as the terminator of a high-level block, you should create your own
14537 // contination and set it as the nextBlock (m_out.insertNewBlocksBefore(continuation)) before
14538 // calling this. For example:
14539 //
14540 // LBasicBlock continuation = m_out.newBlock();
14541 // LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
14542 // buildTypeOf(
14543 // child, value,
14544 // [&] (TypeofType type) {
14545 // do things;
14546 // m_out.jump(continuation);
14547 // });
14548 // m_out.appendTo(continuation, lastNext);
14549 template<typename Functor>
14550 void buildTypeOf(Edge child, LValue value, const Functor& functor)
14551 {
14552 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
14553
14554 // Implements the following branching structure:
14555 //
14556 // if (is cell) {
14557 // if (is object) {
14558 // if (is function) {
14559 // return function;
14560 // } else if (doesn't have call trap and doesn't masquerade as undefined) {
14561 // return object
14562 // } else {
14563 // return slowPath();
14564 // }
14565 // } else if (is string) {
14566 // return string
14567 // } else if (is bigint) {
14568 // return bigint
14569 // } else {
14570 // return symbol
14571 // }
14572 // } else if (is number) {
14573 // return number
14574 // } else if (is null) {
14575 // return object
14576 // } else if (is boolean) {
14577 // return boolean
14578 // } else {
14579 // return undefined
14580 // }
14581 //
14582 // FIXME: typeof Symbol should be more frequently seen than BigInt.
14583 // We should change the order of type detection based on this frequency.
14584 // https://bugs.webkit.org/show_bug.cgi?id=192650
14585
14586 LBasicBlock cellCase = m_out.newBlock();
14587 LBasicBlock objectCase = m_out.newBlock();
14588 LBasicBlock functionCase = m_out.newBlock();
14589 LBasicBlock notFunctionCase = m_out.newBlock();
14590 LBasicBlock reallyObjectCase = m_out.newBlock();
14591 LBasicBlock slowPath = m_out.newBlock();
14592 LBasicBlock unreachable = m_out.newBlock();
14593 LBasicBlock notObjectCase = m_out.newBlock();
14594 LBasicBlock stringCase = m_out.newBlock();
14595 LBasicBlock notStringCase = m_out.newBlock();
14596 LBasicBlock bigIntCase = m_out.newBlock();
14597 LBasicBlock symbolCase = m_out.newBlock();
14598 LBasicBlock notCellCase = m_out.newBlock();
14599 LBasicBlock numberCase = m_out.newBlock();
14600 LBasicBlock notNumberCase = m_out.newBlock();
14601 LBasicBlock notNullCase = m_out.newBlock();
14602 LBasicBlock booleanCase = m_out.newBlock();
14603 LBasicBlock undefinedCase = m_out.newBlock();
14604
14605 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
14606
14607 LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase);
14608 m_out.branch(isObject(value, provenType(child)), unsure(objectCase), unsure(notObjectCase));
14609
14610 m_out.appendTo(objectCase, functionCase);
14611 m_out.branch(
14612 isFunction(value, provenType(child) & SpecObject),
14613 unsure(functionCase), unsure(notFunctionCase));
14614
14615 m_out.appendTo(functionCase, notFunctionCase);
14616 functor(TypeofType::Function);
14617
14618 m_out.appendTo(notFunctionCase, reallyObjectCase);
14619 m_out.branch(
14620 isExoticForTypeof(value, provenType(child) & (SpecObject - SpecFunction)),
14621 rarely(slowPath), usually(reallyObjectCase));
14622
14623 m_out.appendTo(reallyObjectCase, slowPath);
14624 functor(TypeofType::Object);
14625
14626 m_out.appendTo(slowPath, unreachable);
14627 VM& vm = this->vm();
14628 LValue result = lazySlowPath(
14629 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14630 return createLazyCallGenerator(vm,
14631 operationTypeOfObjectAsTypeofType, locations[0].directGPR(),
14632 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
14633 }, value);
14634 Vector<SwitchCase, 3> cases;
14635 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Undefined)), undefinedCase));
14636 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Object)), reallyObjectCase));
14637 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Function)), functionCase));
14638 m_out.switchInstruction(m_out.castToInt32(result), cases, unreachable, Weight());
14639
14640 m_out.appendTo(unreachable, notObjectCase);
14641 m_out.unreachable();
14642
14643 m_out.appendTo(notObjectCase, stringCase);
14644 m_out.branch(
14645 isString(value, provenType(child) & (SpecCell - SpecObject)),
14646 unsure(stringCase), unsure(notStringCase));
14647
14648 m_out.appendTo(stringCase, notStringCase);
14649 functor(TypeofType::String);
14650
14651 m_out.appendTo(notStringCase, bigIntCase);
14652 m_out.branch(
14653 isBigInt(value, provenType(child) & (SpecCell - SpecObject - SpecString)),
14654 unsure(bigIntCase), unsure(symbolCase));
14655
14656 m_out.appendTo(bigIntCase, symbolCase);
14657 functor(TypeofType::BigInt);
14658
14659 m_out.appendTo(symbolCase, notCellCase);
14660 functor(TypeofType::Symbol);
14661
14662 m_out.appendTo(notCellCase, numberCase);
14663 m_out.branch(
14664 isNumber(value, provenType(child) & ~SpecCell),
14665 unsure(numberCase), unsure(notNumberCase));
14666
14667 m_out.appendTo(numberCase, notNumberCase);
14668 functor(TypeofType::Number);
14669
14670 m_out.appendTo(notNumberCase, notNullCase);
14671 LValue isNull;
14672 if (provenType(child) & SpecOther)
14673 isNull = m_out.equal(value, m_out.constInt64(ValueNull));
14674 else
14675 isNull = m_out.booleanFalse;
14676 m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase));
14677
14678 m_out.appendTo(notNullCase, booleanCase);
14679 m_out.branch(
14680 isBoolean(value, provenType(child) & ~(SpecCell | SpecFullNumber)),
14681 unsure(booleanCase), unsure(undefinedCase));
14682
14683 m_out.appendTo(booleanCase, undefinedCase);
14684 functor(TypeofType::Boolean);
14685
14686 m_out.appendTo(undefinedCase, lastNext);
14687 functor(TypeofType::Undefined);
14688 }
14689
14690 TypedPointer pointerIntoTypedArray(LValue storage, LValue index, TypedArrayType type)
14691 {
14692 LValue offset = m_out.shl(m_out.zeroExtPtr(index), m_out.constIntPtr(logElementSize(type)));
14693
14694 return TypedPointer(
14695 m_heaps.typedArrayProperties,
14696 m_out.add(
14697 storage,
14698 offset
14699 ));
14700 }
14701
14702 LValue loadFromIntTypedArray(TypedPointer pointer, TypedArrayType type)
14703 {
14704 switch (elementSize(type)) {
14705 case 1:
14706 return isSigned(type) ? m_out.load8SignExt32(pointer) : m_out.load8ZeroExt32(pointer);
14707 case 2:
14708 return isSigned(type) ? m_out.load16SignExt32(pointer) : m_out.load16ZeroExt32(pointer);
14709 case 4:
14710 return m_out.load32(pointer);
14711 default:
14712 DFG_CRASH(m_graph, m_node, "Bad element size");
14713 }
14714 }
14715
14716 Output::StoreType storeType(TypedArrayType type)
14717 {
14718 if (isInt(type)) {
14719 switch (elementSize(type)) {
14720 case 1:
14721 return Output::Store32As8;
14722 case 2:
14723 return Output::Store32As16;
14724 case 4:
14725 return Output::Store32;
14726 default:
14727 DFG_CRASH(m_graph, m_node, "Bad element size");
14728 return Output::Store32;
14729 }
14730 }
14731 switch (type) {
14732 case TypeFloat32:
14733 return Output::StoreFloat;
14734 case TypeFloat64:
14735 return Output::StoreDouble;
14736 default:
14737 DFG_CRASH(m_graph, m_node, "Bad typed array type");
14738 }
14739 }
14740
14741 void setIntTypedArrayLoadResult(LValue result, TypedArrayType type, bool canSpeculate = false)
14742 {
14743 if (elementSize(type) < 4 || isSigned(type)) {
14744 setInt32(result);
14745 return;
14746 }
14747
14748 if (m_node->shouldSpeculateInt32() && canSpeculate) {
14749 speculate(
14750 Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
14751 setInt32(result);
14752 return;
14753 }
14754
14755 if (m_node->shouldSpeculateInt52()) {
14756 setStrictInt52(m_out.zeroExt(result, Int64));
14757 return;
14758 }
14759
14760 setDouble(m_out.unsignedToDouble(result));
14761 }
14762
14763 LValue getIntTypedArrayStoreOperand(Edge edge, bool isClamped = false)
14764 {
14765 LValue intValue;
14766 switch (edge.useKind()) {
14767 case Int52RepUse:
14768 case Int32Use: {
14769 if (edge.useKind() == Int32Use)
14770 intValue = lowInt32(edge);
14771 else
14772 intValue = m_out.castToInt32(lowStrictInt52(edge));
14773
14774 if (isClamped) {
14775 LBasicBlock atLeastZero = m_out.newBlock();
14776 LBasicBlock continuation = m_out.newBlock();
14777
14778 Vector<ValueFromBlock, 2> intValues;
14779 intValues.append(m_out.anchor(m_out.int32Zero));
14780 m_out.branch(
14781 m_out.lessThan(intValue, m_out.int32Zero),
14782 unsure(continuation), unsure(atLeastZero));
14783
14784 LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
14785
14786 intValues.append(m_out.anchor(m_out.select(
14787 m_out.greaterThan(intValue, m_out.constInt32(255)),
14788 m_out.constInt32(255),
14789 intValue)));
14790 m_out.jump(continuation);
14791
14792 m_out.appendTo(continuation, lastNext);
14793 intValue = m_out.phi(Int32, intValues);
14794 }
14795 break;
14796 }
14797
14798 case DoubleRepUse: {
14799 LValue doubleValue = lowDouble(edge);
14800
14801 if (isClamped) {
14802 LBasicBlock atLeastZero = m_out.newBlock();
14803 LBasicBlock withinRange = m_out.newBlock();
14804 LBasicBlock continuation = m_out.newBlock();
14805
14806 Vector<ValueFromBlock, 3> intValues;
14807 intValues.append(m_out.anchor(m_out.int32Zero));
14808 m_out.branch(
14809 m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
14810 unsure(continuation), unsure(atLeastZero));
14811
14812 LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
14813 intValues.append(m_out.anchor(m_out.constInt32(255)));
14814 m_out.branch(
14815 m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
14816 unsure(continuation), unsure(withinRange));
14817
14818 m_out.appendTo(withinRange, continuation);
14819 intValues.append(m_out.anchor(m_out.doubleToInt(doubleValue)));
14820 m_out.jump(continuation);
14821
14822 m_out.appendTo(continuation, lastNext);
14823 intValue = m_out.phi(Int32, intValues);
14824 } else
14825 intValue = doubleToInt32(doubleValue);
14826 break;
14827 }
14828
14829 default:
14830 DFG_CRASH(m_graph, m_node, "Bad use kind");
14831 }
14832
14833 return intValue;
14834 }
14835
14836 LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true)
14837 {
14838 LBasicBlock greatEnough = m_out.newBlock();
14839 LBasicBlock withinRange = m_out.newBlock();
14840 LBasicBlock slowPath = m_out.newBlock();
14841 LBasicBlock continuation = m_out.newBlock();
14842
14843 Vector<ValueFromBlock, 2> results;
14844
14845 m_out.branch(
14846 m_out.doubleGreaterThanOrEqual(doubleValue, m_out.constDouble(low)),
14847 unsure(greatEnough), unsure(slowPath));
14848
14849 LBasicBlock lastNext = m_out.appendTo(greatEnough, withinRange);
14850 m_out.branch(
14851 m_out.doubleLessThanOrEqual(doubleValue, m_out.constDouble(high)),
14852 unsure(withinRange), unsure(slowPath));
14853
14854 m_out.appendTo(withinRange, slowPath);
14855 LValue fastResult;
14856 if (isSigned)
14857 fastResult = m_out.doubleToInt(doubleValue);
14858 else
14859 fastResult = m_out.doubleToUInt(doubleValue);
14860 results.append(m_out.anchor(fastResult));
14861 m_out.jump(continuation);
14862
14863 m_out.appendTo(slowPath, continuation);
14864 results.append(m_out.anchor(m_out.call(Int32, m_out.operation(operationToInt32), doubleValue)));
14865 m_out.jump(continuation);
14866
14867 m_out.appendTo(continuation, lastNext);
14868 return m_out.phi(Int32, results);
14869 }
14870
14871 LValue doubleToInt32(LValue doubleValue)
14872 {
14873#if CPU(ARM64)
14874 if (MacroAssemblerARM64::supportsDoubleToInt32ConversionUsingJavaScriptSemantics()) {
14875 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
14876 patchpoint->append(ConstrainedValue(doubleValue, B3::ValueRep::SomeRegister));
14877 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14878 jit.convertDoubleToInt32UsingJavaScriptSemantics(params[1].fpr(), params[0].gpr());
14879 });
14880 patchpoint->effects = Effects::none();
14881 return patchpoint;
14882 }
14883#endif
14884
14885 if (hasSensibleDoubleToInt())
14886 return sensibleDoubleToInt32(doubleValue);
14887
14888 double limit = pow(2, 31) - 1;
14889 return doubleToInt32(doubleValue, -limit, limit);
14890 }
14891
14892 LValue sensibleDoubleToInt32(LValue doubleValue)
14893 {
14894 LBasicBlock slowPath = m_out.newBlock();
14895 LBasicBlock continuation = m_out.newBlock();
14896
14897 LValue fastResultValue = m_out.doubleToInt(doubleValue);
14898 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
14899 m_out.branch(
14900 m_out.equal(fastResultValue, m_out.constInt32(0x80000000)),
14901 rarely(slowPath), usually(continuation));
14902
14903 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
14904 ValueFromBlock slowResult = m_out.anchor(
14905 m_out.call(Int32, m_out.operation(operationToInt32SensibleSlow), doubleValue));
14906 m_out.jump(continuation);
14907
14908 m_out.appendTo(continuation, lastNext);
14909 return m_out.phi(Int32, fastResult, slowResult);
14910 }
14911
14912 // This is a mechanism for creating a code generator that fills in a gap in the code using our
14913 // own MacroAssembler. This is useful for slow paths that involve a lot of code and we don't want
14914 // to pay the price of B3 optimizing it. A lazy slow path will only be generated if it actually
14915 // executes. On the other hand, a lazy slow path always incurs the cost of two additional jumps.
14916 // Also, the lazy slow path's register allocation state is slaved to whatever B3 did, so you
14917 // have to use a ScratchRegisterAllocator to try to use some unused registers and you may have
14918 // to spill to top of stack if there aren't enough registers available.
14919 //
14920 // Lazy slow paths involve three different stages of execution. Each stage has unique
14921 // capabilities and knowledge. The stages are:
14922 //
14923 // 1) DFG->B3 lowering, i.e. code that runs in this phase. Lowering is the last time you will
14924 // have access to LValues. If there is an LValue that needs to be fed as input to a lazy slow
14925 // path, then you must pass it as an argument here (as one of the varargs arguments after the
14926 // functor). But, lowering doesn't know which registers will be used for those LValues. Hence
14927 // you pass a lambda to lazySlowPath() and that lambda will run during stage (2):
14928 //
14929 // 2) FTLCompile.cpp's fixFunctionBasedOnStackMaps. This code is the only stage at which we know
14930 // the mapping from arguments passed to this method in (1) and the registers that B3
14931 // selected for those arguments. You don't actually want to generate any code here, since then
14932 // the slow path wouldn't actually be lazily generated. Instead, you want to save the
14933 // registers being used for the arguments and defer code generation to stage (3) by creating
14934 // and returning a LazySlowPath::Generator:
14935 //
14936 // 3) LazySlowPath's generate() method. This code runs in response to the lazy slow path
14937 // executing for the first time. It will call the generator you created in stage (2).
14938 //
14939 // Note that each time you invoke stage (1), stage (2) may be invoked zero, one, or many times.
14940 // Stage (2) will usually be invoked once for stage (1). But, B3 may kill the code, in which
14941 // case stage (2) won't run. B3 may duplicate the code (for example via tail duplication),
14942 // leading to many calls to your stage (2) lambda. Stage (3) may be called zero or once for each
14943 // stage (2). It will be called zero times if the slow path never runs. This is what you hope for
14944 // whenever you use the lazySlowPath() mechanism.
14945 //
14946 // A typical use of lazySlowPath() will look like the example below, which just creates a slow
14947 // path that adds some value to the input and returns it.
14948 //
14949 // // Stage (1) is here. This is your last chance to figure out which LValues to use as inputs.
14950 // // Notice how we pass "input" as an argument to lazySlowPath().
14951 // LValue input = ...;
14952 // int addend = ...;
14953 // LValue output = lazySlowPath(
14954 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14955 // // Stage (2) is here. This is your last chance to figure out which registers are used
14956 // // for which values. Location zero is always the return value. You can ignore it if
14957 // // you don't want to return anything. Location 1 is the register for the first
14958 // // argument to the lazySlowPath(), i.e. "input". Note that the Location object could
14959 // // also hold an FPR, if you are passing a double.
14960 // GPRReg outputGPR = locations[0].directGPR();
14961 // GPRReg inputGPR = locations[1].directGPR();
14962 // return LazySlowPath::createGenerator(
14963 // [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
14964 // // Stage (3) is here. This is when you generate code. You have access to the
14965 // // registers you collected in stage (2) because this lambda closes over those
14966 // // variables (outputGPR and inputGPR). You also have access to whatever extra
14967 // // data you collected in stage (1), such as the addend in this case.
14968 // jit.add32(TrustedImm32(addend), inputGPR, outputGPR);
14969 // // You have to end by jumping to done. There is nothing to fall through to.
14970 // // You can also jump to the exception handler (see LazySlowPath.h for more
14971 // // info). Note that currently you cannot OSR exit.
14972 // params.doneJumps.append(jit.jump());
14973 // });
14974 // },
14975 // input);
14976 //
14977 // You can basically pass as many inputs as you like, either using this varargs form, or by
14978 // passing a Vector of LValues.
14979 //
14980 // Note that if your slow path is only doing a call, you can use the createLazyCallGenerator()
14981 // helper. For example:
14982 //
14983 // LValue input = ...;
14984 // LValue output = lazySlowPath(
14985 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14986 // return createLazyCallGenerator(
14987 // operationDoThings, locations[0].directGPR(), locations[1].directGPR());
14988 // }, input);
14989 //
14990 // Finally, note that all of the lambdas - both the stage (2) lambda and the stage (3) lambda -
14991 // run after the function that created them returns. Hence, you should not use by-reference
14992 // capture (i.e. [&]) in any of these lambdas.
14993 template<typename Functor, typename... ArgumentTypes>
14994 PatchpointValue* lazySlowPath(const Functor& functor, ArgumentTypes... arguments)
14995 {
14996 return lazySlowPath(functor, Vector<LValue>{ arguments... });
14997 }
14998
14999 template<typename Functor>
15000 PatchpointValue* lazySlowPath(const Functor& functor, const Vector<LValue>& userArguments)
15001 {
15002 CodeOrigin origin = m_node->origin.semantic;
15003
15004 PatchpointValue* result = m_out.patchpoint(B3::Int64);
15005 for (LValue arg : userArguments)
15006 result->append(ConstrainedValue(arg, B3::ValueRep::SomeRegister));
15007
15008 RefPtr<PatchpointExceptionHandle> exceptionHandle =
15009 preparePatchpointForExceptions(result);
15010
15011 result->clobber(RegisterSet::macroScratchRegisters());
15012 State* state = &m_ftlState;
15013
15014 result->setGenerator(
15015 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15016 Vector<Location> locations;
15017 for (const B3::ValueRep& rep : params)
15018 locations.append(Location::forValueRep(rep));
15019
15020 RefPtr<LazySlowPath::Generator> generator = functor(locations);
15021
15022 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
15023 CCallHelpers::Label done = jit.label();
15024
15025 RegisterSet usedRegisters = params.unavailableRegisters();
15026
15027 RefPtr<ExceptionTarget> exceptionTarget =
15028 exceptionHandle->scheduleExitCreation(params);
15029
15030 // FIXME: As part of handling exceptions, we need to create a concrete OSRExit here.
15031 // Doing so should automagically register late paths that emit exit thunks.
15032
15033 params.addLatePath(
15034 [=] (CCallHelpers& jit) {
15035 AllowMacroScratchRegisterUsage allowScratch(jit);
15036 patchableJump.m_jump.link(&jit);
15037 unsigned index = state->jitCode->lazySlowPaths.size();
15038 state->jitCode->lazySlowPaths.append(nullptr);
15039 jit.pushToSaveImmediateWithoutTouchingRegisters(
15040 CCallHelpers::TrustedImm32(index));
15041 CCallHelpers::Jump generatorJump = jit.jump();
15042
15043 // Note that so long as we're here, we don't really know if our late path
15044 // runs before or after any other late paths that we might depend on, like
15045 // the exception thunk.
15046
15047 RefPtr<JITCode> jitCode = state->jitCode;
15048 VM* vm = &state->graph.m_vm;
15049
15050 jit.addLinkTask(
15051 [=] (LinkBuffer& linkBuffer) {
15052 linkBuffer.link(generatorJump,
15053 CodeLocationLabel<JITThunkPtrTag>(vm->getCTIStub(lazySlowPathGenerationThunkGenerator).code()));
15054
15055 std::unique_ptr<LazySlowPath> lazySlowPath = std::make_unique<LazySlowPath>();
15056
15057 auto linkedPatchableJump = CodeLocationJump<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(patchableJump));
15058
15059 CodeLocationLabel<JSInternalPtrTag> linkedDone = linkBuffer.locationOf<JSInternalPtrTag>(done);
15060
15061 CallSiteIndex callSiteIndex =
15062 jitCode->common.addUniqueCallSiteIndex(origin);
15063
15064 lazySlowPath->initialize(
15065 linkedPatchableJump, linkedDone,
15066 exceptionTarget->label(linkBuffer), usedRegisters,
15067 callSiteIndex, generator);
15068
15069 jitCode->lazySlowPaths[index] = WTFMove(lazySlowPath);
15070 });
15071 });
15072 });
15073 return result;
15074 }
15075
15076 void speculate(
15077 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
15078 {
15079 appendOSRExit(kind, lowValue, highValue, failCondition, m_origin);
15080 }
15081
15082 void speculate(
15083 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition)
15084 {
15085 appendOSRExit(kind, lowValue, profile, failCondition, m_origin);
15086 }
15087
15088 void terminate(ExitKind kind)
15089 {
15090 speculate(kind, noValue(), nullptr, m_out.booleanTrue);
15091 didAlreadyTerminate();
15092 }
15093
15094 void didAlreadyTerminate()
15095 {
15096 m_state.setIsValid(false);
15097 }
15098
15099 void simulatedTypeCheck(Edge highValue, SpeculatedType typesPassedThrough)
15100 {
15101 m_interpreter.filter(highValue, typesPassedThrough);
15102 }
15103
15104 void typeCheck(
15105 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
15106 LValue failCondition, ExitKind exitKind = BadType)
15107 {
15108 appendTypeCheck(lowValue, highValue, typesPassedThrough, failCondition, exitKind);
15109 }
15110
15111 void appendTypeCheck(
15112 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
15113 LValue failCondition, ExitKind exitKind)
15114 {
15115 if (!m_interpreter.needsTypeCheck(highValue, typesPassedThrough))
15116 return;
15117 ASSERT(mayHaveTypeCheck(highValue.useKind()));
15118 appendOSRExit(exitKind, lowValue, highValue.node(), failCondition, m_origin);
15119 m_interpreter.filter(highValue, typesPassedThrough);
15120 }
15121
15122 LValue lowInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15123 {
15124 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
15125
15126 if (edge->hasConstant()) {
15127 JSValue value = edge->asJSValue();
15128 simulatedTypeCheck(edge, SpecInt32Only);
15129 if (!value.isInt32()) {
15130 if (mayHaveTypeCheck(edge.useKind()))
15131 terminate(Uncountable);
15132 return m_out.int32Zero;
15133 }
15134 LValue result = m_out.constInt32(value.asInt32());
15135 result->setOrigin(B3::Origin(edge.node()));
15136 return result;
15137 }
15138
15139 LoweredNodeValue value = m_int32Values.get(edge.node());
15140 if (isValid(value)) {
15141 simulatedTypeCheck(edge, SpecInt32Only);
15142 return value.value();
15143 }
15144
15145 value = m_strictInt52Values.get(edge.node());
15146 if (isValid(value))
15147 return strictInt52ToInt32(edge, value.value());
15148
15149 value = m_int52Values.get(edge.node());
15150 if (isValid(value))
15151 return strictInt52ToInt32(edge, int52ToStrictInt52(value.value()));
15152
15153 value = m_jsValueValues.get(edge.node());
15154 if (isValid(value)) {
15155 LValue boxedResult = value.value();
15156 FTL_TYPE_CHECK(
15157 jsValueValue(boxedResult), edge, SpecInt32Only, isNotInt32(boxedResult));
15158 LValue result = unboxInt32(boxedResult);
15159 setInt32(edge.node(), result);
15160 return result;
15161 }
15162
15163 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecInt32Only), provenType(edge));
15164 if (mayHaveTypeCheck(edge.useKind()))
15165 terminate(Uncountable);
15166 return m_out.int32Zero;
15167 }
15168
15169 enum Int52Kind { StrictInt52, Int52 };
15170 LValue lowInt52(Edge edge, Int52Kind kind)
15171 {
15172 DFG_ASSERT(m_graph, m_node, edge.useKind() == Int52RepUse, edge.useKind());
15173
15174 LoweredNodeValue value;
15175
15176 switch (kind) {
15177 case Int52:
15178 value = m_int52Values.get(edge.node());
15179 if (isValid(value))
15180 return value.value();
15181
15182 value = m_strictInt52Values.get(edge.node());
15183 if (isValid(value))
15184 return strictInt52ToInt52(value.value());
15185 break;
15186
15187 case StrictInt52:
15188 value = m_strictInt52Values.get(edge.node());
15189 if (isValid(value))
15190 return value.value();
15191
15192 value = m_int52Values.get(edge.node());
15193 if (isValid(value))
15194 return int52ToStrictInt52(value.value());
15195 break;
15196 }
15197
15198 DFG_ASSERT(m_graph, m_node, !provenType(edge), provenType(edge));
15199 if (mayHaveTypeCheck(edge.useKind()))
15200 terminate(Uncountable);
15201 return m_out.int64Zero;
15202 }
15203
15204 LValue lowInt52(Edge edge)
15205 {
15206 return lowInt52(edge, Int52);
15207 }
15208
15209 LValue lowStrictInt52(Edge edge)
15210 {
15211 return lowInt52(edge, StrictInt52);
15212 }
15213
15214 bool betterUseStrictInt52(Node* node)
15215 {
15216 return !isValid(m_int52Values.get(node));
15217 }
15218 bool betterUseStrictInt52(Edge edge)
15219 {
15220 return betterUseStrictInt52(edge.node());
15221 }
15222 template<typename T>
15223 Int52Kind bestInt52Kind(T node)
15224 {
15225 return betterUseStrictInt52(node) ? StrictInt52 : Int52;
15226 }
15227 Int52Kind opposite(Int52Kind kind)
15228 {
15229 switch (kind) {
15230 case Int52:
15231 return StrictInt52;
15232 case StrictInt52:
15233 return Int52;
15234 }
15235 DFG_CRASH(m_graph, m_node, "Bad use kind");
15236 return Int52;
15237 }
15238
15239 LValue lowWhicheverInt52(Edge edge, Int52Kind& kind)
15240 {
15241 kind = bestInt52Kind(edge);
15242 return lowInt52(edge, kind);
15243 }
15244
15245 LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15246 {
15247 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()), edge.useKind());
15248
15249 if (edge->op() == JSConstant) {
15250 FrozenValue* value = edge->constant();
15251 simulatedTypeCheck(edge, SpecCellCheck);
15252 if (!value->value().isCell()) {
15253 if (mayHaveTypeCheck(edge.useKind()))
15254 terminate(Uncountable);
15255 return m_out.intPtrZero;
15256 }
15257 LValue result = frozenPointer(value);
15258 result->setOrigin(B3::Origin(edge.node()));
15259 return result;
15260 }
15261
15262 LoweredNodeValue value = m_jsValueValues.get(edge.node());
15263 if (isValid(value)) {
15264 LValue uncheckedValue = value.value();
15265 FTL_TYPE_CHECK(
15266 jsValueValue(uncheckedValue), edge, SpecCellCheck, isNotCell(uncheckedValue));
15267 return uncheckedValue;
15268 }
15269
15270 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecCellCheck), provenType(edge));
15271 if (mayHaveTypeCheck(edge.useKind()))
15272 terminate(Uncountable);
15273 return m_out.intPtrZero;
15274 }
15275
15276 LValue lowObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15277 {
15278 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
15279
15280 LValue result = lowCell(edge, mode);
15281 speculateObject(edge, result);
15282 return result;
15283 }
15284
15285 LValue lowRegExpObject(Edge edge)
15286 {
15287 LValue result = lowCell(edge);
15288 speculateRegExpObject(edge, result);
15289 return result;
15290 }
15291
15292 LValue lowMapObject(Edge edge)
15293 {
15294 LValue result = lowCell(edge);
15295 speculateMapObject(edge, result);
15296 return result;
15297 }
15298
15299 LValue lowSetObject(Edge edge)
15300 {
15301 LValue result = lowCell(edge);
15302 speculateSetObject(edge, result);
15303 return result;
15304 }
15305
15306 LValue lowWeakMapObject(Edge edge)
15307 {
15308 LValue result = lowCell(edge);
15309 speculateWeakMapObject(edge, result);
15310 return result;
15311 }
15312
15313 LValue lowWeakSetObject(Edge edge)
15314 {
15315 LValue result = lowCell(edge);
15316 speculateWeakSetObject(edge, result);
15317 return result;
15318 }
15319
15320 LValue lowDataViewObject(Edge edge)
15321 {
15322 LValue result = lowCell(edge);
15323 speculateDataViewObject(edge, result);
15324 return result;
15325 }
15326
15327 LValue lowString(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15328 {
15329 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringUse || edge.useKind() == KnownStringUse || edge.useKind() == StringIdentUse);
15330
15331 LValue result = lowCell(edge, mode);
15332 speculateString(edge, result);
15333 return result;
15334 }
15335
15336 LValue lowStringIdent(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15337 {
15338 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringIdentUse);
15339
15340 LValue string = lowString(edge, mode);
15341 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
15342 speculateStringIdent(edge, string, stringImpl);
15343 return stringImpl;
15344 }
15345
15346 LValue lowSymbol(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15347 {
15348 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == SymbolUse);
15349
15350 LValue result = lowCell(edge, mode);
15351 speculateSymbol(edge, result);
15352 return result;
15353 }
15354
15355 LValue lowBigInt(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15356 {
15357 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BigIntUse);
15358
15359 LValue result = lowCell(edge, mode);
15360 speculateBigInt(edge, result);
15361 return result;
15362 }
15363
15364 LValue lowNonNullObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15365 {
15366 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
15367
15368 LValue result = lowCell(edge, mode);
15369 speculateNonNullObject(edge, result);
15370 return result;
15371 }
15372
15373 LValue lowBoolean(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15374 {
15375 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse);
15376
15377 if (edge->hasConstant()) {
15378 JSValue value = edge->asJSValue();
15379 simulatedTypeCheck(edge, SpecBoolean);
15380 if (!value.isBoolean()) {
15381 if (mayHaveTypeCheck(edge.useKind()))
15382 terminate(Uncountable);
15383 return m_out.booleanFalse;
15384 }
15385 LValue result = m_out.constBool(value.asBoolean());
15386 result->setOrigin(B3::Origin(edge.node()));
15387 return result;
15388 }
15389
15390 LoweredNodeValue value = m_booleanValues.get(edge.node());
15391 if (isValid(value)) {
15392 simulatedTypeCheck(edge, SpecBoolean);
15393 return value.value();
15394 }
15395
15396 value = m_jsValueValues.get(edge.node());
15397 if (isValid(value)) {
15398 LValue unboxedResult = value.value();
15399 FTL_TYPE_CHECK(
15400 jsValueValue(unboxedResult), edge, SpecBoolean, isNotBoolean(unboxedResult));
15401 LValue result = unboxBoolean(unboxedResult);
15402 setBoolean(edge.node(), result);
15403 return result;
15404 }
15405
15406 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecBoolean), provenType(edge));
15407 if (mayHaveTypeCheck(edge.useKind()))
15408 terminate(Uncountable);
15409 return m_out.booleanFalse;
15410 }
15411
15412 LValue lowDouble(Edge edge)
15413 {
15414 DFG_ASSERT(m_graph, m_node, isDouble(edge.useKind()), edge.useKind());
15415
15416 LoweredNodeValue value = m_doubleValues.get(edge.node());
15417 if (isValid(value))
15418 return value.value();
15419 DFG_ASSERT(m_graph, m_node, !provenType(edge), provenType(edge));
15420 if (mayHaveTypeCheck(edge.useKind()))
15421 terminate(Uncountable);
15422 return m_out.doubleZero;
15423 }
15424
15425 LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15426 {
15427 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse, m_node->op(), edge.useKind());
15428 DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind()), m_node->op(), edge.useKind());
15429 DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse, m_node->op(), edge.useKind());
15430
15431 if (edge->hasConstant()) {
15432 LValue result = m_out.constInt64(JSValue::encode(edge->asJSValue()));
15433 result->setOrigin(B3::Origin(edge.node()));
15434 return result;
15435 }
15436
15437 LoweredNodeValue value = m_jsValueValues.get(edge.node());
15438 if (isValid(value))
15439 return value.value();
15440
15441 value = m_int32Values.get(edge.node());
15442 if (isValid(value)) {
15443 LValue result = boxInt32(value.value());
15444 setJSValue(edge.node(), result);
15445 return result;
15446 }
15447
15448 value = m_booleanValues.get(edge.node());
15449 if (isValid(value)) {
15450 LValue result = boxBoolean(value.value());
15451 setJSValue(edge.node(), result);
15452 return result;
15453 }
15454
15455 DFG_CRASH(m_graph, m_node, makeString("Value not defined: ", String::number(edge.node()->index())).ascii().data());
15456 return 0;
15457 }
15458
15459 LValue lowNotCell(Edge edge)
15460 {
15461 LValue result = lowJSValue(edge, ManualOperandSpeculation);
15462 FTL_TYPE_CHECK(jsValueValue(result), edge, ~SpecCellCheck, isCell(result));
15463 return result;
15464 }
15465
15466 LValue lowStorage(Edge edge)
15467 {
15468 LoweredNodeValue value = m_storageValues.get(edge.node());
15469 if (isValid(value))
15470 return value.value();
15471
15472 LValue result = lowCell(edge);
15473 setStorage(edge.node(), result);
15474 return result;
15475 }
15476
15477 LValue strictInt52ToInt32(Edge edge, LValue value)
15478 {
15479 LValue result = m_out.castToInt32(value);
15480 FTL_TYPE_CHECK(
15481 noValue(), edge, SpecInt32Only,
15482 m_out.notEqual(m_out.signExt32To64(result), value));
15483 setInt32(edge.node(), result);
15484 return result;
15485 }
15486
15487 LValue strictInt52ToDouble(LValue value)
15488 {
15489 return m_out.intToDouble(value);
15490 }
15491
15492 LValue strictInt52ToJSValue(LValue value)
15493 {
15494 LBasicBlock isInt32 = m_out.newBlock();
15495 LBasicBlock isDouble = m_out.newBlock();
15496 LBasicBlock continuation = m_out.newBlock();
15497
15498 Vector<ValueFromBlock, 2> results;
15499
15500 LValue int32Value = m_out.castToInt32(value);
15501 m_out.branch(
15502 m_out.equal(m_out.signExt32To64(int32Value), value),
15503 unsure(isInt32), unsure(isDouble));
15504
15505 LBasicBlock lastNext = m_out.appendTo(isInt32, isDouble);
15506
15507 results.append(m_out.anchor(boxInt32(int32Value)));
15508 m_out.jump(continuation);
15509
15510 m_out.appendTo(isDouble, continuation);
15511
15512 results.append(m_out.anchor(boxDouble(m_out.intToDouble(value))));
15513 m_out.jump(continuation);
15514
15515 m_out.appendTo(continuation, lastNext);
15516 return m_out.phi(Int64, results);
15517 }
15518
15519 LValue strictInt52ToInt52(LValue value)
15520 {
15521 return m_out.shl(value, m_out.constInt64(JSValue::int52ShiftAmount));
15522 }
15523
15524 LValue int52ToStrictInt52(LValue value)
15525 {
15526 return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount));
15527 }
15528
15529 LValue isInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
15530 {
15531 if (LValue proven = isProvenValue(type, SpecInt32Only))
15532 return proven;
15533 return m_out.aboveOrEqual(jsValue, m_tagTypeNumber);
15534 }
15535 LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
15536 {
15537 if (LValue proven = isProvenValue(type, ~SpecInt32Only))
15538 return proven;
15539 return m_out.below(jsValue, m_tagTypeNumber);
15540 }
15541 LValue unboxInt32(LValue jsValue)
15542 {
15543 return m_out.castToInt32(jsValue);
15544 }
15545 LValue boxInt32(LValue value)
15546 {
15547 return m_out.add(m_out.zeroExt(value, Int64), m_tagTypeNumber);
15548 }
15549
15550 LValue isCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
15551 {
15552 if (LValue proven = isProvenValue(type, SpecCellCheck | SpecMisc))
15553 return proven;
15554 return m_out.testIsZero64(jsValue, m_tagTypeNumber);
15555 }
15556 LValue isNotCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
15557 {
15558 if (LValue proven = isProvenValue(type, ~(SpecCellCheck | SpecMisc)))
15559 return proven;
15560 return m_out.testNonZero64(jsValue, m_tagTypeNumber);
15561 }
15562
15563 LValue unboxDouble(LValue jsValue, LValue* unboxedAsInt = nullptr)
15564 {
15565 LValue asInt = m_out.add(jsValue, m_tagTypeNumber);
15566 if (unboxedAsInt)
15567 *unboxedAsInt = asInt;
15568 return m_out.bitCast(asInt, Double);
15569 }
15570 LValue boxDouble(LValue doubleValue)
15571 {
15572 return m_out.sub(m_out.bitCast(doubleValue, Int64), m_tagTypeNumber);
15573 }
15574
15575 LValue jsValueToStrictInt52(Edge edge, LValue boxedValue)
15576 {
15577 LBasicBlock intCase = m_out.newBlock();
15578 LBasicBlock doubleCase = m_out.newBlock();
15579 LBasicBlock continuation = m_out.newBlock();
15580
15581 LValue isNotInt32;
15582 if (!m_interpreter.needsTypeCheck(edge, SpecInt32Only))
15583 isNotInt32 = m_out.booleanFalse;
15584 else if (!m_interpreter.needsTypeCheck(edge, ~SpecInt32Only))
15585 isNotInt32 = m_out.booleanTrue;
15586 else
15587 isNotInt32 = this->isNotInt32(boxedValue);
15588 m_out.branch(isNotInt32, unsure(doubleCase), unsure(intCase));
15589
15590 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
15591
15592 ValueFromBlock intToInt52 = m_out.anchor(
15593 m_out.signExt32To64(unboxInt32(boxedValue)));
15594 m_out.jump(continuation);
15595
15596 m_out.appendTo(doubleCase, continuation);
15597
15598 LValue possibleResult = m_out.call(
15599 Int64, m_out.operation(operationConvertBoxedDoubleToInt52), boxedValue);
15600 FTL_TYPE_CHECK(
15601 jsValueValue(boxedValue), edge, SpecInt32Only | SpecAnyIntAsDouble,
15602 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
15603
15604 ValueFromBlock doubleToInt52 = m_out.anchor(possibleResult);
15605 m_out.jump(continuation);
15606
15607 m_out.appendTo(continuation, lastNext);
15608
15609 return m_out.phi(Int64, intToInt52, doubleToInt52);
15610 }
15611
15612 LValue doubleToStrictInt52(Edge edge, LValue value)
15613 {
15614 LValue possibleResult = m_out.call(
15615 Int64, m_out.operation(operationConvertDoubleToInt52), value);
15616 FTL_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow,
15617 doubleValue(value), edge, SpecAnyIntAsDouble,
15618 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
15619
15620 return possibleResult;
15621 }
15622
15623 LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero)
15624 {
15625 LValue integerValue = m_out.doubleToInt(value);
15626 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
15627 LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
15628 speculate(Overflow, FormattedValue(DataFormatDouble, value), m_node, valueNotConvertibleToInteger);
15629
15630 if (shouldCheckNegativeZero) {
15631 LBasicBlock valueIsZero = m_out.newBlock();
15632 LBasicBlock continuation = m_out.newBlock();
15633 m_out.branch(m_out.isZero32(integerValue), unsure(valueIsZero), unsure(continuation));
15634
15635 LBasicBlock lastNext = m_out.appendTo(valueIsZero, continuation);
15636
15637 LValue doubleBitcastToInt64 = m_out.bitCast(value, Int64);
15638 LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
15639
15640 speculate(NegativeZero, FormattedValue(DataFormatDouble, value), m_node, signBitSet);
15641 m_out.jump(continuation);
15642 m_out.appendTo(continuation, lastNext);
15643 }
15644 return integerValue;
15645 }
15646
15647 LValue isNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
15648 {
15649 if (LValue proven = isProvenValue(type, SpecFullNumber))
15650 return proven;
15651 return isNotCellOrMisc(jsValue);
15652 }
15653 LValue isNotNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
15654 {
15655 if (LValue proven = isProvenValue(type, ~SpecFullNumber))
15656 return proven;
15657 return isCellOrMisc(jsValue);
15658 }
15659
15660 LValue isNotCell(LValue jsValue, SpeculatedType type = SpecFullTop)
15661 {
15662 if (LValue proven = isProvenValue(type, ~SpecCellCheck))
15663 return proven;
15664 return m_out.testNonZero64(jsValue, m_tagMask);
15665 }
15666
15667 LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop)
15668 {
15669 if (LValue proven = isProvenValue(type, SpecCellCheck))
15670 return proven;
15671 return m_out.testIsZero64(jsValue, m_tagMask);
15672 }
15673
15674 LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop)
15675 {
15676 if (LValue proven = isProvenValue(type, ~SpecMisc))
15677 return proven;
15678 return m_out.above(value, m_out.constInt64(TagBitTypeOther | TagBitBool | TagBitUndefined));
15679 }
15680
15681 LValue isMisc(LValue value, SpeculatedType type = SpecFullTop)
15682 {
15683 if (LValue proven = isProvenValue(type, SpecMisc))
15684 return proven;
15685 return m_out.logicalNot(isNotMisc(value));
15686 }
15687
15688 LValue isNotBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
15689 {
15690 if (LValue proven = isProvenValue(type, ~SpecBoolean))
15691 return proven;
15692 return m_out.testNonZero64(
15693 m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)),
15694 m_out.constInt64(~1));
15695 }
15696 LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
15697 {
15698 if (LValue proven = isProvenValue(type, SpecBoolean))
15699 return proven;
15700 return m_out.logicalNot(isNotBoolean(jsValue));
15701 }
15702 LValue unboxBoolean(LValue jsValue)
15703 {
15704 // We want to use a cast that guarantees that B3 knows that even the integer
15705 // value is just 0 or 1. But for now we do it the dumb way.
15706 return m_out.notZero64(m_out.bitAnd(jsValue, m_out.constInt64(1)));
15707 }
15708 LValue boxBoolean(LValue value)
15709 {
15710 return m_out.select(
15711 value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse));
15712 }
15713
15714 LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop)
15715 {
15716 if (LValue proven = isProvenValue(type, ~SpecOther))
15717 return proven;
15718 return m_out.notEqual(
15719 m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
15720 m_out.constInt64(ValueNull));
15721 }
15722 LValue isOther(LValue value, SpeculatedType type = SpecFullTop)
15723 {
15724 if (LValue proven = isProvenValue(type, SpecOther))
15725 return proven;
15726 return m_out.equal(
15727 m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
15728 m_out.constInt64(ValueNull));
15729 }
15730
15731 LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType)
15732 {
15733 if (!(provenType & ~wantedType))
15734 return m_out.booleanTrue;
15735 if (!(provenType & wantedType))
15736 return m_out.booleanFalse;
15737 return nullptr;
15738 }
15739
15740 void speculate(Edge edge)
15741 {
15742 switch (edge.useKind()) {
15743 case UntypedUse:
15744 break;
15745 case KnownInt32Use:
15746 case KnownStringUse:
15747 case KnownPrimitiveUse:
15748 case KnownOtherUse:
15749 case DoubleRepUse:
15750 case Int52RepUse:
15751 case KnownCellUse:
15752 case KnownBooleanUse:
15753 ASSERT(!m_interpreter.needsTypeCheck(edge));
15754 break;
15755 case Int32Use:
15756 speculateInt32(edge);
15757 break;
15758 case CellUse:
15759 speculateCell(edge);
15760 break;
15761 case CellOrOtherUse:
15762 speculateCellOrOther(edge);
15763 break;
15764 case AnyIntUse:
15765 speculateAnyInt(edge);
15766 break;
15767 case ObjectUse:
15768 speculateObject(edge);
15769 break;
15770 case ArrayUse:
15771 speculateArray(edge);
15772 break;
15773 case FunctionUse:
15774 speculateFunction(edge);
15775 break;
15776 case ObjectOrOtherUse:
15777 speculateObjectOrOther(edge);
15778 break;
15779 case FinalObjectUse:
15780 speculateFinalObject(edge);
15781 break;
15782 case RegExpObjectUse:
15783 speculateRegExpObject(edge);
15784 break;
15785 case ProxyObjectUse:
15786 speculateProxyObject(edge);
15787 break;
15788 case DerivedArrayUse:
15789 speculateDerivedArray(edge);
15790 break;
15791 case MapObjectUse:
15792 speculateMapObject(edge);
15793 break;
15794 case SetObjectUse:
15795 speculateSetObject(edge);
15796 break;
15797 case WeakMapObjectUse:
15798 speculateWeakMapObject(edge);
15799 break;
15800 case WeakSetObjectUse:
15801 speculateWeakSetObject(edge);
15802 break;
15803 case DataViewObjectUse:
15804 speculateDataViewObject(edge);
15805 break;
15806 case StringUse:
15807 speculateString(edge);
15808 break;
15809 case StringOrOtherUse:
15810 speculateStringOrOther(edge);
15811 break;
15812 case StringIdentUse:
15813 speculateStringIdent(edge);
15814 break;
15815 case SymbolUse:
15816 speculateSymbol(edge);
15817 break;
15818 case StringObjectUse:
15819 speculateStringObject(edge);
15820 break;
15821 case StringOrStringObjectUse:
15822 speculateStringOrStringObject(edge);
15823 break;
15824 case NumberUse:
15825 speculateNumber(edge);
15826 break;
15827 case RealNumberUse:
15828 speculateRealNumber(edge);
15829 break;
15830 case DoubleRepRealUse:
15831 speculateDoubleRepReal(edge);
15832 break;
15833 case DoubleRepAnyIntUse:
15834 speculateDoubleRepAnyInt(edge);
15835 break;
15836 case BooleanUse:
15837 speculateBoolean(edge);
15838 break;
15839 case BigIntUse:
15840 speculateBigInt(edge);
15841 break;
15842 case NotStringVarUse:
15843 speculateNotStringVar(edge);
15844 break;
15845 case NotSymbolUse:
15846 speculateNotSymbol(edge);
15847 break;
15848 case NotCellUse:
15849 speculateNotCell(edge);
15850 break;
15851 case OtherUse:
15852 speculateOther(edge);
15853 break;
15854 case MiscUse:
15855 speculateMisc(edge);
15856 break;
15857 default:
15858 DFG_CRASH(m_graph, m_node, "Unsupported speculation use kind");
15859 }
15860 }
15861
15862 void speculate(Node*, Edge edge)
15863 {
15864 speculate(edge);
15865 }
15866
15867 void speculateInt32(Edge edge)
15868 {
15869 lowInt32(edge);
15870 }
15871
15872 void speculateCell(Edge edge)
15873 {
15874 lowCell(edge);
15875 }
15876
15877 void speculateNotCell(Edge edge)
15878 {
15879 if (!m_interpreter.needsTypeCheck(edge))
15880 return;
15881 lowNotCell(edge);
15882 }
15883
15884 void speculateCellOrOther(Edge edge)
15885 {
15886 if (shouldNotHaveTypeCheck(edge.useKind()))
15887 return;
15888
15889 LValue value = lowJSValue(edge, ManualOperandSpeculation);
15890
15891 LBasicBlock isNotCell = m_out.newBlock();
15892 LBasicBlock continuation = m_out.newBlock();
15893
15894 m_out.branch(isCell(value, provenType(edge)), unsure(continuation), unsure(isNotCell));
15895
15896 LBasicBlock lastNext = m_out.appendTo(isNotCell, continuation);
15897 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
15898 m_out.jump(continuation);
15899
15900 m_out.appendTo(continuation, lastNext);
15901 }
15902
15903 void speculateAnyInt(Edge edge)
15904 {
15905 if (!m_interpreter.needsTypeCheck(edge))
15906 return;
15907
15908 jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation));
15909 }
15910
15911 LValue isCellWithType(LValue cell, JSType queriedType, SpeculatedType speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
15912 {
15913 if (LValue proven = isProvenValue(type & SpecCell, speculatedTypeForQuery))
15914 return proven;
15915 return m_out.equal(
15916 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15917 m_out.constInt32(queriedType));
15918 }
15919
15920 LValue isTypedArrayView(LValue cell, SpeculatedType type = SpecFullTop)
15921 {
15922 if (LValue proven = isProvenValue(type & SpecCell, SpecTypedArrayView))
15923 return proven;
15924 LValue jsType = m_out.sub(
15925 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15926 m_out.constInt32(FirstTypedArrayType));
15927 return m_out.below(
15928 jsType,
15929 m_out.constInt32(NumberOfTypedArrayTypesExcludingDataView));
15930 }
15931
15932 LValue isObject(LValue cell, SpeculatedType type = SpecFullTop)
15933 {
15934 if (LValue proven = isProvenValue(type & SpecCell, SpecObject))
15935 return proven;
15936 return m_out.aboveOrEqual(
15937 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15938 m_out.constInt32(ObjectType));
15939 }
15940
15941 LValue isNotObject(LValue cell, SpeculatedType type = SpecFullTop)
15942 {
15943 if (LValue proven = isProvenValue(type & SpecCell, ~SpecObject))
15944 return proven;
15945 return m_out.below(
15946 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15947 m_out.constInt32(ObjectType));
15948 }
15949
15950 LValue isNotString(LValue cell, SpeculatedType type = SpecFullTop)
15951 {
15952 if (LValue proven = isProvenValue(type & SpecCell, ~SpecString))
15953 return proven;
15954 return m_out.notEqual(
15955 m_out.load32(cell, m_heaps.JSCell_structureID),
15956 m_out.constInt32(vm().stringStructure->id()));
15957 }
15958
15959 LValue isString(LValue cell, SpeculatedType type = SpecFullTop)
15960 {
15961 if (LValue proven = isProvenValue(type & SpecCell, SpecString))
15962 return proven;
15963 return m_out.equal(
15964 m_out.load32(cell, m_heaps.JSCell_structureID),
15965 m_out.constInt32(vm().stringStructure->id()));
15966 }
15967
15968 LValue isRopeString(LValue string, Edge edge = Edge())
15969 {
15970 if (edge) {
15971 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
15972 return m_out.booleanFalse;
15973 if (JSValue value = provenValue(edge)) {
15974 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
15975 return m_out.booleanFalse;
15976 }
15977 String value = edge->tryGetString(m_graph);
15978 if (!value.isNull()) {
15979 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
15980 return m_out.booleanFalse;
15981 }
15982 }
15983
15984 return m_out.testNonZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
15985 }
15986
15987 LValue isNotRopeString(LValue string, Edge edge = Edge())
15988 {
15989 if (edge) {
15990 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
15991 return m_out.booleanTrue;
15992 if (JSValue value = provenValue(edge)) {
15993 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
15994 return m_out.booleanTrue;
15995 }
15996 String value = edge->tryGetString(m_graph);
15997 if (!value.isNull()) {
15998 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
15999 return m_out.booleanTrue;
16000 }
16001 }
16002
16003 return m_out.testIsZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
16004 }
16005
16006 LValue isNotSymbol(LValue cell, SpeculatedType type = SpecFullTop)
16007 {
16008 if (LValue proven = isProvenValue(type & SpecCell, ~SpecSymbol))
16009 return proven;
16010 return m_out.notEqual(
16011 m_out.load32(cell, m_heaps.JSCell_structureID),
16012 m_out.constInt32(vm().symbolStructure->id()));
16013 }
16014
16015 LValue isSymbol(LValue cell, SpeculatedType type = SpecFullTop)
16016 {
16017 if (LValue proven = isProvenValue(type & SpecCell, SpecSymbol))
16018 return proven;
16019 return m_out.equal(
16020 m_out.load32(cell, m_heaps.JSCell_structureID),
16021 m_out.constInt32(vm().symbolStructure->id()));
16022 }
16023
16024 LValue isNotBigInt(LValue cell, SpeculatedType type = SpecFullTop)
16025 {
16026 if (LValue proven = isProvenValue(type & SpecCell, ~SpecBigInt))
16027 return proven;
16028 return m_out.notEqual(
16029 m_out.load32(cell, m_heaps.JSCell_structureID),
16030 m_out.constInt32(vm().bigIntStructure->id()));
16031 }
16032
16033 LValue isBigInt(LValue cell, SpeculatedType type = SpecFullTop)
16034 {
16035 if (LValue proven = isProvenValue(type & SpecCell, SpecBigInt))
16036 return proven;
16037 return m_out.equal(
16038 m_out.load32(cell, m_heaps.JSCell_structureID),
16039 m_out.constInt32(vm().bigIntStructure->id()));
16040 }
16041
16042 LValue isArrayTypeForArrayify(LValue cell, ArrayMode arrayMode)
16043 {
16044 switch (arrayMode.type()) {
16045 case Array::Int32:
16046 case Array::Double:
16047 case Array::Contiguous:
16048 case Array::Undecided:
16049 case Array::ArrayStorage: {
16050 IndexingType indexingModeMask = IsArray | IndexingShapeMask;
16051 if (arrayMode.action() == Array::Write)
16052 indexingModeMask |= CopyOnWrite;
16053
16054 IndexingType shape = arrayMode.shapeMask();
16055 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
16056
16057 switch (arrayMode.arrayClass()) {
16058 case Array::OriginalArray:
16059 case Array::OriginalCopyOnWriteArray:
16060 DFG_CRASH(m_graph, m_node, "Unexpected original array");
16061 return nullptr;
16062
16063 case Array::Array:
16064 return m_out.equal(
16065 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
16066 m_out.constInt32(IsArray | shape));
16067
16068 case Array::NonArray:
16069 case Array::OriginalNonArray:
16070 return m_out.equal(
16071 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
16072 m_out.constInt32(shape));
16073
16074 case Array::PossiblyArray:
16075 return m_out.equal(
16076 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask & ~IsArray)),
16077 m_out.constInt32(shape));
16078 }
16079 break;
16080 }
16081
16082 case Array::SlowPutArrayStorage: {
16083 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
16084 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
16085
16086 LBasicBlock trueCase = m_out.newBlock();
16087 LBasicBlock checkCase = m_out.newBlock();
16088 LBasicBlock continuation = m_out.newBlock();
16089
16090 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
16091 LValue isAnArrayStorageShape = m_out.belowOrEqual(
16092 m_out.sub(
16093 m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)),
16094 m_out.constInt32(ArrayStorageShape)),
16095 m_out.constInt32(SlowPutArrayStorageShape - ArrayStorageShape));
16096 m_out.branch(isAnArrayStorageShape, unsure(checkCase), unsure(continuation));
16097
16098 LBasicBlock lastNext = m_out.appendTo(checkCase, trueCase);
16099 switch (arrayMode.arrayClass()) {
16100 case Array::OriginalArray:
16101 case Array::OriginalCopyOnWriteArray:
16102 DFG_CRASH(m_graph, m_node, "Unexpected original array");
16103 return nullptr;
16104
16105 case Array::Array:
16106 m_out.branch(
16107 m_out.testNonZero32(indexingType, m_out.constInt32(IsArray)),
16108 unsure(trueCase), unsure(continuation));
16109 break;
16110
16111 case Array::NonArray:
16112 case Array::OriginalNonArray:
16113 m_out.branch(
16114 m_out.testIsZero32(indexingType, m_out.constInt32(IsArray)),
16115 unsure(trueCase), unsure(continuation));
16116 break;
16117
16118 case Array::PossiblyArray:
16119 m_out.jump(trueCase);
16120 break;
16121 }
16122
16123 m_out.appendTo(trueCase, continuation);
16124 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
16125 m_out.jump(continuation);
16126
16127 m_out.appendTo(continuation, lastNext);
16128 return m_out.phi(Int32, falseValue, trueValue);
16129 }
16130
16131 default:
16132 break;
16133 }
16134 DFG_CRASH(m_graph, m_node, "Corrupt array class");
16135 }
16136
16137 LValue isArrayTypeForCheckArray(LValue cell, ArrayMode arrayMode)
16138 {
16139 switch (arrayMode.type()) {
16140 case Array::Int32:
16141 case Array::Double:
16142 case Array::Contiguous:
16143 case Array::Undecided:
16144 case Array::ArrayStorage:
16145 case Array::SlowPutArrayStorage:
16146 return isArrayTypeForArrayify(cell, arrayMode);
16147
16148 case Array::DirectArguments:
16149 return m_out.equal(
16150 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16151 m_out.constInt32(DirectArgumentsType));
16152
16153 case Array::ScopedArguments:
16154 return m_out.equal(
16155 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16156 m_out.constInt32(ScopedArgumentsType));
16157
16158 default:
16159 return m_out.equal(
16160 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16161 m_out.constInt32(typeForTypedArrayType(arrayMode.typedArrayType())));
16162 }
16163 }
16164
16165 LValue isFunction(LValue cell, SpeculatedType type = SpecFullTop)
16166 {
16167 if (LValue proven = isProvenValue(type & SpecCell, SpecFunction))
16168 return proven;
16169 return isType(cell, JSFunctionType);
16170 }
16171 LValue isNotFunction(LValue cell, SpeculatedType type = SpecFullTop)
16172 {
16173 if (LValue proven = isProvenValue(type & SpecCell, ~SpecFunction))
16174 return proven;
16175 return isNotType(cell, JSFunctionType);
16176 }
16177
16178 LValue isExoticForTypeof(LValue cell, SpeculatedType type = SpecFullTop)
16179 {
16180 if (!(type & SpecObjectOther))
16181 return m_out.booleanFalse;
16182 return m_out.testNonZero32(
16183 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
16184 m_out.constInt32(MasqueradesAsUndefined | OverridesGetCallData));
16185 }
16186
16187 LValue isType(LValue cell, JSType type)
16188 {
16189 return m_out.equal(
16190 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16191 m_out.constInt32(type));
16192 }
16193
16194 LValue isNotType(LValue cell, JSType type)
16195 {
16196 return m_out.logicalNot(isType(cell, type));
16197 }
16198
16199 void speculateObject(Edge edge, LValue cell)
16200 {
16201 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
16202 }
16203
16204 void speculateObject(Edge edge)
16205 {
16206 speculateObject(edge, lowCell(edge));
16207 }
16208
16209 void speculateArray(Edge edge, LValue cell)
16210 {
16211 FTL_TYPE_CHECK(
16212 jsValueValue(cell), edge, SpecArray, isNotType(cell, ArrayType));
16213 }
16214
16215 void speculateArray(Edge edge)
16216 {
16217 speculateArray(edge, lowCell(edge));
16218 }
16219
16220 void speculateFunction(Edge edge, LValue cell)
16221 {
16222 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecFunction, isNotFunction(cell));
16223 }
16224
16225 void speculateFunction(Edge edge)
16226 {
16227 speculateFunction(edge, lowCell(edge));
16228 }
16229
16230 void speculateObjectOrOther(Edge edge)
16231 {
16232 if (!m_interpreter.needsTypeCheck(edge))
16233 return;
16234
16235 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16236
16237 LBasicBlock cellCase = m_out.newBlock();
16238 LBasicBlock primitiveCase = m_out.newBlock();
16239 LBasicBlock continuation = m_out.newBlock();
16240
16241 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
16242
16243 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
16244
16245 FTL_TYPE_CHECK(
16246 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
16247
16248 m_out.jump(continuation);
16249
16250 m_out.appendTo(primitiveCase, continuation);
16251
16252 FTL_TYPE_CHECK(
16253 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16254
16255 m_out.jump(continuation);
16256
16257 m_out.appendTo(continuation, lastNext);
16258 }
16259
16260 void speculateFinalObject(Edge edge, LValue cell)
16261 {
16262 FTL_TYPE_CHECK(
16263 jsValueValue(cell), edge, SpecFinalObject, isNotType(cell, FinalObjectType));
16264 }
16265
16266 void speculateFinalObject(Edge edge)
16267 {
16268 speculateFinalObject(edge, lowCell(edge));
16269 }
16270
16271 void speculateRegExpObject(Edge edge, LValue cell)
16272 {
16273 FTL_TYPE_CHECK(
16274 jsValueValue(cell), edge, SpecRegExpObject, isNotType(cell, RegExpObjectType));
16275 }
16276
16277 void speculateRegExpObject(Edge edge)
16278 {
16279 speculateRegExpObject(edge, lowCell(edge));
16280 }
16281
16282 void speculateProxyObject(Edge edge, LValue cell)
16283 {
16284 FTL_TYPE_CHECK(
16285 jsValueValue(cell), edge, SpecProxyObject, isNotType(cell, ProxyObjectType));
16286 }
16287
16288 void speculateProxyObject(Edge edge)
16289 {
16290 speculateProxyObject(edge, lowCell(edge));
16291 }
16292
16293 void speculateDerivedArray(Edge edge, LValue cell)
16294 {
16295 FTL_TYPE_CHECK(
16296 jsValueValue(cell), edge, SpecDerivedArray, isNotType(cell, DerivedArrayType));
16297 }
16298
16299 void speculateDerivedArray(Edge edge)
16300 {
16301 speculateDerivedArray(edge, lowCell(edge));
16302 }
16303
16304 void speculateMapObject(Edge edge, LValue cell)
16305 {
16306 FTL_TYPE_CHECK(
16307 jsValueValue(cell), edge, SpecMapObject, isNotType(cell, JSMapType));
16308 }
16309
16310 void speculateMapObject(Edge edge)
16311 {
16312 speculateMapObject(edge, lowCell(edge));
16313 }
16314
16315 void speculateSetObject(Edge edge, LValue cell)
16316 {
16317 FTL_TYPE_CHECK(
16318 jsValueValue(cell), edge, SpecSetObject, isNotType(cell, JSSetType));
16319 }
16320
16321 void speculateSetObject(Edge edge)
16322 {
16323 speculateSetObject(edge, lowCell(edge));
16324 }
16325
16326 void speculateWeakMapObject(Edge edge, LValue cell)
16327 {
16328 FTL_TYPE_CHECK(
16329 jsValueValue(cell), edge, SpecWeakMapObject, isNotType(cell, JSWeakMapType));
16330 }
16331
16332 void speculateWeakMapObject(Edge edge)
16333 {
16334 speculateWeakMapObject(edge, lowCell(edge));
16335 }
16336
16337 void speculateWeakSetObject(Edge edge, LValue cell)
16338 {
16339 FTL_TYPE_CHECK(
16340 jsValueValue(cell), edge, SpecWeakSetObject, isNotType(cell, JSWeakSetType));
16341 }
16342
16343 void speculateWeakSetObject(Edge edge)
16344 {
16345 speculateWeakSetObject(edge, lowCell(edge));
16346 }
16347
16348 void speculateDataViewObject(Edge edge, LValue cell)
16349 {
16350 FTL_TYPE_CHECK(
16351 jsValueValue(cell), edge, SpecDataViewObject, isNotType(cell, DataViewType));
16352 }
16353
16354 void speculateDataViewObject(Edge edge)
16355 {
16356 speculateDataViewObject(edge, lowCell(edge));
16357 }
16358
16359 void speculateString(Edge edge, LValue cell)
16360 {
16361 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecString, isNotString(cell));
16362 }
16363
16364 void speculateString(Edge edge)
16365 {
16366 speculateString(edge, lowCell(edge));
16367 }
16368
16369 void speculateStringOrOther(Edge edge, LValue value)
16370 {
16371 if (!m_interpreter.needsTypeCheck(edge))
16372 return;
16373
16374 LBasicBlock cellCase = m_out.newBlock();
16375 LBasicBlock notCellCase = m_out.newBlock();
16376 LBasicBlock continuation = m_out.newBlock();
16377
16378 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
16379
16380 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
16381
16382 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
16383
16384 m_out.jump(continuation);
16385 m_out.appendTo(notCellCase, continuation);
16386
16387 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16388
16389 m_out.jump(continuation);
16390 m_out.appendTo(continuation, lastNext);
16391 }
16392
16393 void speculateStringOrOther(Edge edge)
16394 {
16395 speculateStringOrOther(edge, lowJSValue(edge, ManualOperandSpeculation));
16396 }
16397
16398 void speculateStringIdent(Edge edge, LValue string, LValue stringImpl)
16399 {
16400 if (!m_interpreter.needsTypeCheck(edge, SpecStringIdent | ~SpecString))
16401 return;
16402
16403 speculate(BadType, jsValueValue(string), edge.node(), isRopeString(string));
16404 speculate(
16405 BadType, jsValueValue(string), edge.node(),
16406 m_out.testIsZero32(
16407 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
16408 m_out.constInt32(StringImpl::flagIsAtomic())));
16409 m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
16410 }
16411
16412 void speculateStringIdent(Edge edge)
16413 {
16414 lowStringIdent(edge);
16415 }
16416
16417 void speculateStringObject(Edge edge)
16418 {
16419 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
16420 return;
16421
16422 speculateStringObjectForCell(edge, lowCell(edge));
16423 }
16424
16425 void speculateStringOrStringObject(Edge edge)
16426 {
16427 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
16428 return;
16429
16430 LValue cellBase = lowCell(edge);
16431 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
16432 return;
16433
16434 LBasicBlock notString = m_out.newBlock();
16435 LBasicBlock continuation = m_out.newBlock();
16436
16437 LValue type = m_out.load8ZeroExt32(cellBase, m_heaps.JSCell_typeInfoType);
16438 m_out.branch(
16439 m_out.equal(type, m_out.constInt32(StringType)),
16440 unsure(continuation), unsure(notString));
16441
16442 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
16443 speculate(
16444 BadType, jsValueValue(cellBase), edge.node(),
16445 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
16446 m_out.jump(continuation);
16447
16448 m_out.appendTo(continuation, lastNext);
16449 m_interpreter.filter(edge, SpecString | SpecStringObject);
16450 }
16451
16452 void speculateStringObjectForCell(Edge edge, LValue cell)
16453 {
16454 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
16455 return;
16456
16457 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
16458 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecStringObject, m_out.notEqual(type, m_out.constInt32(StringObjectType)));
16459 }
16460
16461 void speculateSymbol(Edge edge, LValue cell)
16462 {
16463 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecSymbol, isNotSymbol(cell));
16464 }
16465
16466 void speculateSymbol(Edge edge)
16467 {
16468 speculateSymbol(edge, lowCell(edge));
16469 }
16470
16471 void speculateBigInt(Edge edge, LValue cell)
16472 {
16473 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecBigInt, isNotBigInt(cell));
16474 }
16475
16476 void speculateBigInt(Edge edge)
16477 {
16478 speculateBigInt(edge, lowCell(edge));
16479 }
16480
16481 void speculateNonNullObject(Edge edge, LValue cell)
16482 {
16483 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
16484 if (masqueradesAsUndefinedWatchpointIsStillValid())
16485 return;
16486
16487 speculate(
16488 BadType, jsValueValue(cell), edge.node(),
16489 m_out.testNonZero32(
16490 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
16491 m_out.constInt32(MasqueradesAsUndefined)));
16492 }
16493
16494 void speculateNumber(Edge edge)
16495 {
16496 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16497 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isNotNumber(value));
16498 }
16499
16500 void speculateRealNumber(Edge edge)
16501 {
16502 // Do an early return here because lowDouble() can create a lot of control flow.
16503 if (!m_interpreter.needsTypeCheck(edge))
16504 return;
16505
16506 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16507 LValue doubleValue = unboxDouble(value);
16508
16509 LBasicBlock intCase = m_out.newBlock();
16510 LBasicBlock continuation = m_out.newBlock();
16511
16512 m_out.branch(
16513 m_out.doubleEqual(doubleValue, doubleValue),
16514 usually(continuation), rarely(intCase));
16515
16516 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
16517
16518 typeCheck(
16519 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
16520 isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
16521 m_out.jump(continuation);
16522
16523 m_out.appendTo(continuation, lastNext);
16524 }
16525
16526 void speculateDoubleRepReal(Edge edge)
16527 {
16528 // Do an early return here because lowDouble() can create a lot of control flow.
16529 if (!m_interpreter.needsTypeCheck(edge))
16530 return;
16531
16532 LValue value = lowDouble(edge);
16533 FTL_TYPE_CHECK(
16534 doubleValue(value), edge, SpecDoubleReal,
16535 m_out.doubleNotEqualOrUnordered(value, value));
16536 }
16537
16538 void speculateDoubleRepAnyInt(Edge edge)
16539 {
16540 if (!m_interpreter.needsTypeCheck(edge))
16541 return;
16542
16543 doubleToStrictInt52(edge, lowDouble(edge));
16544 }
16545
16546 void speculateBoolean(Edge edge)
16547 {
16548 lowBoolean(edge);
16549 }
16550
16551 void speculateNotStringVar(Edge edge)
16552 {
16553 if (!m_interpreter.needsTypeCheck(edge, ~SpecStringVar))
16554 return;
16555
16556 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16557
16558 LBasicBlock isCellCase = m_out.newBlock();
16559 LBasicBlock isStringCase = m_out.newBlock();
16560 LBasicBlock continuation = m_out.newBlock();
16561
16562 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
16563
16564 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
16565 m_out.branch(isString(value, provenType(edge)), unsure(isStringCase), unsure(continuation));
16566
16567 m_out.appendTo(isStringCase, continuation);
16568 speculateStringIdent(edge, value, m_out.loadPtr(value, m_heaps.JSString_value));
16569 m_out.jump(continuation);
16570
16571 m_out.appendTo(continuation, lastNext);
16572 }
16573
16574 void speculateNotSymbol(Edge edge)
16575 {
16576 if (!m_interpreter.needsTypeCheck(edge, ~SpecSymbol))
16577 return;
16578
16579 ASSERT(mayHaveTypeCheck(edge.useKind()));
16580 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16581
16582 LBasicBlock isCellCase = m_out.newBlock();
16583 LBasicBlock continuation = m_out.newBlock();
16584
16585 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
16586
16587 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
16588 speculate(BadType, jsValueValue(value), edge.node(), isSymbol(value));
16589 m_out.jump(continuation);
16590
16591 m_out.appendTo(continuation, lastNext);
16592
16593 m_interpreter.filter(edge, ~SpecSymbol);
16594 }
16595
16596 void speculateOther(Edge edge)
16597 {
16598 if (!m_interpreter.needsTypeCheck(edge))
16599 return;
16600
16601 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16602 typeCheck(jsValueValue(value), edge, SpecOther, isNotOther(value));
16603 }
16604
16605 void speculateMisc(Edge edge)
16606 {
16607 if (!m_interpreter.needsTypeCheck(edge))
16608 return;
16609
16610 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16611 typeCheck(jsValueValue(value), edge, SpecMisc, isNotMisc(value));
16612 }
16613
16614 void speculateTypedArrayIsNotNeutered(LValue base)
16615 {
16616 LBasicBlock isWasteful = m_out.newBlock();
16617 LBasicBlock continuation = m_out.newBlock();
16618
16619 LValue mode = m_out.load32(base, m_heaps.JSArrayBufferView_mode);
16620 m_out.branch(m_out.equal(mode, m_out.constInt32(WastefulTypedArray)),
16621 unsure(isWasteful), unsure(continuation));
16622
16623 LBasicBlock lastNext = m_out.appendTo(isWasteful, continuation);
16624 LValue vector = m_out.loadPtr(base, m_heaps.JSArrayBufferView_vector);
16625 // FIXME: We could probably make this a mask.
16626 // https://bugs.webkit.org/show_bug.cgi?id=197701
16627 vector = removeArrayPtrTag(vector);
16628 speculate(Uncountable, jsValueValue(vector), m_node, m_out.isZero64(vector));
16629 m_out.jump(continuation);
16630
16631 m_out.appendTo(continuation, lastNext);
16632 }
16633
16634 bool masqueradesAsUndefinedWatchpointIsStillValid()
16635 {
16636 return m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->origin.semantic);
16637 }
16638
16639 LValue loadCellState(LValue base)
16640 {
16641 return m_out.load8ZeroExt32(base, m_heaps.JSCell_cellState);
16642 }
16643
16644 void emitStoreBarrier(LValue base, bool isFenced)
16645 {
16646 LBasicBlock recheckPath = nullptr;
16647 if (isFenced)
16648 recheckPath = m_out.newBlock();
16649 LBasicBlock slowPath = m_out.newBlock();
16650 LBasicBlock continuation = m_out.newBlock();
16651
16652 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isFenced ? recheckPath : slowPath);
16653
16654 LValue threshold;
16655 if (isFenced)
16656 threshold = m_out.load32(m_out.absolute(vm().heap.addressOfBarrierThreshold()));
16657 else
16658 threshold = m_out.constInt32(blackThreshold);
16659
16660 m_out.branch(
16661 m_out.above(loadCellState(base), threshold),
16662 usually(continuation), rarely(isFenced ? recheckPath : slowPath));
16663
16664 if (isFenced) {
16665 m_out.appendTo(recheckPath, slowPath);
16666
16667 m_out.fence(&m_heaps.root, &m_heaps.JSCell_cellState);
16668
16669 m_out.branch(
16670 m_out.above(loadCellState(base), m_out.constInt32(blackThreshold)),
16671 usually(continuation), rarely(slowPath));
16672 }
16673
16674 m_out.appendTo(slowPath, continuation);
16675
16676 LValue call = vmCall(Void, m_out.operation(operationWriteBarrierSlowPath), m_callFrame, base);
16677 m_heaps.decorateCCallRead(&m_heaps.root, call);
16678 m_heaps.decorateCCallWrite(&m_heaps.JSCell_cellState, call);
16679
16680 m_out.jump(continuation);
16681
16682 m_out.appendTo(continuation, lastNext);
16683 }
16684
16685 void mutatorFence()
16686 {
16687 if (isX86()) {
16688 m_out.fence(&m_heaps.root, nullptr);
16689 return;
16690 }
16691
16692 LBasicBlock slowPath = m_out.newBlock();
16693 LBasicBlock continuation = m_out.newBlock();
16694
16695 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
16696
16697 m_out.branch(
16698 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
16699 rarely(slowPath), usually(continuation));
16700
16701 m_out.appendTo(slowPath, continuation);
16702
16703 m_out.fence(&m_heaps.root, nullptr);
16704 m_out.jump(continuation);
16705
16706 m_out.appendTo(continuation, lastNext);
16707 }
16708
16709 void nukeStructureAndSetButterfly(LValue butterfly, LValue object)
16710 {
16711 if (isX86()) {
16712 m_out.store32(
16713 m_out.bitOr(
16714 m_out.load32(object, m_heaps.JSCell_structureID),
16715 m_out.constInt32(nukedStructureIDBit())),
16716 object, m_heaps.JSCell_structureID);
16717 m_out.fence(&m_heaps.root, nullptr);
16718 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16719 m_out.fence(&m_heaps.root, nullptr);
16720 return;
16721 }
16722
16723 LBasicBlock fastPath = m_out.newBlock();
16724 LBasicBlock slowPath = m_out.newBlock();
16725 LBasicBlock continuation = m_out.newBlock();
16726
16727 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastPath);
16728
16729 m_out.branch(
16730 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
16731 rarely(slowPath), usually(fastPath));
16732
16733 m_out.appendTo(fastPath, slowPath);
16734
16735 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16736 m_out.jump(continuation);
16737
16738 m_out.appendTo(slowPath, continuation);
16739
16740 m_out.store32(
16741 m_out.bitOr(
16742 m_out.load32(object, m_heaps.JSCell_structureID),
16743 m_out.constInt32(nukedStructureIDBit())),
16744 object, m_heaps.JSCell_structureID);
16745 m_out.fence(&m_heaps.root, nullptr);
16746 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16747 m_out.fence(&m_heaps.root, nullptr);
16748 m_out.jump(continuation);
16749
16750 m_out.appendTo(continuation, lastNext);
16751 }
16752
16753 LValue preciseIndexMask64(LValue value, LValue index, LValue limit)
16754 {
16755 return m_out.bitAnd(
16756 value,
16757 m_out.aShr(
16758 m_out.sub(
16759 index,
16760 m_out.opaque(limit)),
16761 m_out.constInt32(63)));
16762 }
16763
16764 LValue preciseIndexMask32(LValue value, LValue index, LValue limit)
16765 {
16766 return preciseIndexMask64(value, m_out.zeroExt(index, Int64), m_out.zeroExt(limit, Int64));
16767 }
16768
16769 template<typename... Args>
16770 LValue vmCall(LType type, LValue function, Args&&... args)
16771 {
16772 callPreflight();
16773 LValue result = m_out.call(type, function, std::forward<Args>(args)...);
16774 if (mayExit(m_graph, m_node))
16775 callCheck();
16776 else {
16777 // We can't exit due to an exception, so we also can't throw an exception.
16778#ifndef NDEBUG
16779 LBasicBlock crash = m_out.newBlock();
16780 LBasicBlock continuation = m_out.newBlock();
16781
16782 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
16783 LValue hadException = m_out.notZero64(exception);
16784
16785 m_out.branch(
16786 hadException, rarely(crash), usually(continuation));
16787
16788 LBasicBlock lastNext = m_out.appendTo(crash, continuation);
16789 m_out.unreachable();
16790
16791 m_out.appendTo(continuation, lastNext);
16792#endif
16793 }
16794 return result;
16795 }
16796
16797 void callPreflight(CodeOrigin codeOrigin)
16798 {
16799 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(codeOrigin);
16800 m_out.store32(
16801 m_out.constInt32(callSiteIndex.bits()),
16802 tagFor(CallFrameSlot::argumentCount));
16803 }
16804
16805 void callPreflight()
16806 {
16807 callPreflight(codeOriginDescriptionOfCallSite());
16808 }
16809
16810 CodeOrigin codeOriginDescriptionOfCallSite() const
16811 {
16812 CodeOrigin codeOrigin = m_node->origin.semantic;
16813 if (m_node->op() == TailCallInlinedCaller
16814 || m_node->op() == TailCallVarargsInlinedCaller
16815 || m_node->op() == TailCallForwardVarargsInlinedCaller
16816 || m_node->op() == DirectTailCallInlinedCaller) {
16817 // This case arises when you have a situation like this:
16818 // foo makes a call to bar, bar is inlined in foo. bar makes a call
16819 // to baz and baz is inlined in bar. And then baz makes a tail-call to jaz,
16820 // and jaz is inlined in baz. We want the callframe for jaz to appear to
16821 // have caller be bar.
16822 codeOrigin = *codeOrigin.inlineCallFrame()->getCallerSkippingTailCalls();
16823 }
16824
16825 return codeOrigin;
16826 }
16827
16828 void callCheck()
16829 {
16830 if (Options::useExceptionFuzz())
16831 m_out.call(Void, m_out.operation(operationExceptionFuzz), m_callFrame);
16832
16833 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
16834 LValue hadException = m_out.notZero64(exception);
16835
16836 CodeOrigin opCatchOrigin;
16837 HandlerInfo* exceptionHandler;
16838 if (m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler)) {
16839 bool exitOK = true;
16840 bool isExceptionHandler = true;
16841 appendOSRExit(
16842 ExceptionCheck, noValue(), nullptr, hadException,
16843 m_origin.withForExitAndExitOK(opCatchOrigin, exitOK), isExceptionHandler);
16844 return;
16845 }
16846
16847 LBasicBlock continuation = m_out.newBlock();
16848
16849 m_out.branch(
16850 hadException, rarely(m_handleExceptions), usually(continuation));
16851
16852 m_out.appendTo(continuation);
16853 }
16854
16855 RefPtr<PatchpointExceptionHandle> preparePatchpointForExceptions(PatchpointValue* value)
16856 {
16857 CodeOrigin opCatchOrigin;
16858 HandlerInfo* exceptionHandler;
16859 bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler);
16860 if (!willCatchException)
16861 return PatchpointExceptionHandle::defaultHandle(m_ftlState);
16862
16863 dataLogLnIf(verboseCompilationEnabled(), " Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
16864
16865 bool exitOK = true;
16866 NodeOrigin origin = m_origin.withForExitAndExitOK(opCatchOrigin, exitOK);
16867
16868 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(noValue(), nullptr);
16869
16870 // Compute the offset into the StackmapGenerationParams where we will find the exit arguments
16871 // we are about to append. We need to account for both the children we've already added, and
16872 // for the possibility of a result value if the patchpoint is not void.
16873 unsigned offset = value->numChildren();
16874 if (value->type() != Void)
16875 offset++;
16876
16877 // Use LateColdAny to ensure that the stackmap arguments interfere with the patchpoint's
16878 // result and with any late-clobbered registers.
16879 value->appendVectorWithRep(
16880 buildExitArguments(exitDescriptor, opCatchOrigin, noValue()),
16881 ValueRep::LateColdAny);
16882
16883 return PatchpointExceptionHandle::create(
16884 m_ftlState, exitDescriptor, origin, offset, *exceptionHandler);
16885 }
16886
16887 LBasicBlock lowBlock(DFG::BasicBlock* block)
16888 {
16889 return m_blocks.get(block);
16890 }
16891
16892 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, Node* highValue)
16893 {
16894 return appendOSRExitDescriptor(lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue));
16895 }
16896
16897 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, const MethodOfGettingAValueProfile& profile)
16898 {
16899 return &m_ftlState.jitCode->osrExitDescriptors.alloc(
16900 lowValue.format(), profile,
16901 availabilityMap().m_locals.numberOfArguments(),
16902 availabilityMap().m_locals.numberOfLocals());
16903 }
16904
16905 void appendOSRExit(
16906 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition,
16907 NodeOrigin origin, bool isExceptionHandler = false)
16908 {
16909 return appendOSRExit(kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue),
16910 failCondition, origin, isExceptionHandler);
16911 }
16912
16913 void appendOSRExit(
16914 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition,
16915 NodeOrigin origin, bool isExceptionHandler = false)
16916 {
16917 dataLogLnIf(verboseCompilationEnabled(), " OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
16918
16919 DFG_ASSERT(m_graph, m_node, origin.exitOK);
16920
16921 if (!isExceptionHandler
16922 && Options::useOSRExitFuzz()
16923 && canUseOSRExitFuzzing(m_graph.baselineCodeBlockFor(m_node->origin.semantic))
16924 && doOSRExitFuzzing()) {
16925 LValue numberOfFuzzChecks = m_out.add(
16926 m_out.load32(m_out.absolute(&g_numberOfOSRExitFuzzChecks)),
16927 m_out.int32One);
16928
16929 m_out.store32(numberOfFuzzChecks, m_out.absolute(&g_numberOfOSRExitFuzzChecks));
16930
16931 if (unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter()) {
16932 failCondition = m_out.bitOr(
16933 failCondition,
16934 m_out.aboveOrEqual(numberOfFuzzChecks, m_out.constInt32(atOrAfter)));
16935 }
16936 if (unsigned at = Options::fireOSRExitFuzzAt()) {
16937 failCondition = m_out.bitOr(
16938 failCondition,
16939 m_out.equal(numberOfFuzzChecks, m_out.constInt32(at)));
16940 }
16941 }
16942
16943 if (failCondition == m_out.booleanFalse)
16944 return;
16945
16946 blessSpeculation(
16947 m_out.speculate(failCondition), kind, lowValue, profile, origin);
16948 }
16949
16950 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, Node* highValue, NodeOrigin origin)
16951 {
16952 blessSpeculation(value, kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue), origin);
16953 }
16954
16955 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, NodeOrigin origin)
16956 {
16957 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, profile);
16958
16959 value->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, lowValue));
16960
16961 State* state = &m_ftlState;
16962 value->setGenerator(
16963 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
16964 exitDescriptor->emitOSRExit(
16965 *state, kind, origin, jit, params, 0);
16966 });
16967 }
16968
16969 StackmapArgumentList buildExitArguments(
16970 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, FormattedValue lowValue,
16971 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
16972 {
16973 StackmapArgumentList result;
16974 buildExitArguments(
16975 exitDescriptor, exitOrigin, result, lowValue, offsetOfExitArgumentsInStackmapLocations);
16976 return result;
16977 }
16978
16979 void buildExitArguments(
16980 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, StackmapArgumentList& arguments, FormattedValue lowValue,
16981 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
16982 {
16983 if (!!lowValue)
16984 arguments.append(lowValue.value());
16985
16986 AvailabilityMap availabilityMap = this->availabilityMap();
16987 availabilityMap.pruneByLiveness(m_graph, exitOrigin);
16988
16989 HashMap<Node*, ExitTimeObjectMaterialization*> map;
16990 availabilityMap.forEachAvailability(
16991 [&] (Availability availability) {
16992 if (!availability.shouldUseNode())
16993 return;
16994
16995 Node* node = availability.node();
16996 if (!node->isPhantomAllocation())
16997 return;
16998
16999 auto result = map.add(node, nullptr);
17000 if (result.isNewEntry) {
17001 result.iterator->value =
17002 exitDescriptor->m_materializations.add(node->op(), node->origin.semantic);
17003 }
17004 });
17005
17006 for (unsigned i = 0; i < exitDescriptor->m_values.size(); ++i) {
17007 int operand = exitDescriptor->m_values.operandForIndex(i);
17008
17009 Availability availability = availabilityMap.m_locals[i];
17010
17011 if (Options::validateFTLOSRExitLiveness()
17012 && m_graph.m_plan.mode() != FTLForOSREntryMode) {
17013
17014 if (availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), exitOrigin))
17015 DFG_CRASH(m_graph, m_node, toCString("Live bytecode local not available: operand = ", VirtualRegister(operand), ", availability = ", availability, ", origin = ", exitOrigin).data());
17016 }
17017 ExitValue exitValue = exitValueForAvailability(arguments, map, availability);
17018 if (exitValue.hasIndexInStackmapLocations())
17019 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
17020 exitDescriptor->m_values[i] = exitValue;
17021 }
17022
17023 for (auto heapPair : availabilityMap.m_heap) {
17024 Node* node = heapPair.key.base();
17025 ExitTimeObjectMaterialization* materialization = map.get(node);
17026 if (!materialization)
17027 DFG_CRASH(m_graph, m_node, toCString("Could not find materialization for ", node, " in ", availabilityMap).data());
17028 ExitValue exitValue = exitValueForAvailability(arguments, map, heapPair.value);
17029 if (exitValue.hasIndexInStackmapLocations())
17030 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
17031 materialization->add(
17032 heapPair.key.descriptor(),
17033 exitValue);
17034 }
17035
17036 if (verboseCompilationEnabled()) {
17037 dataLog(" Exit values: ", exitDescriptor->m_values, "\n");
17038 if (!exitDescriptor->m_materializations.isEmpty()) {
17039 dataLog(" Materializations: \n");
17040 for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations)
17041 dataLog(" ", pointerDump(materialization), "\n");
17042 }
17043 }
17044 }
17045
17046 ExitValue exitValueForAvailability(
17047 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
17048 Availability availability)
17049 {
17050 FlushedAt flush = availability.flushedAt();
17051 switch (flush.format()) {
17052 case DeadFlush:
17053 case ConflictingFlush:
17054 if (availability.hasNode())
17055 return exitValueForNode(arguments, map, availability.node());
17056
17057 // This means that the value is dead. It could be dead in bytecode or it could have
17058 // been killed by our DCE, which can sometimes kill things even if they were live in
17059 // bytecode.
17060 return ExitValue::dead();
17061
17062 case FlushedJSValue:
17063 case FlushedCell:
17064 case FlushedBoolean:
17065 return ExitValue::inJSStack(flush.virtualRegister());
17066
17067 case FlushedInt32:
17068 return ExitValue::inJSStackAsInt32(flush.virtualRegister());
17069
17070 case FlushedInt52:
17071 return ExitValue::inJSStackAsInt52(flush.virtualRegister());
17072
17073 case FlushedDouble:
17074 return ExitValue::inJSStackAsDouble(flush.virtualRegister());
17075 }
17076
17077 DFG_CRASH(m_graph, m_node, "Invalid flush format");
17078 return ExitValue::dead();
17079 }
17080
17081 ExitValue exitValueForNode(
17082 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
17083 Node* node)
17084 {
17085 // NOTE: In FTL->B3, we cannot generate code here, because m_output is positioned after the
17086 // stackmap value. Like all values, the stackmap value cannot use a child that is defined after
17087 // it.
17088
17089 ASSERT(node->shouldGenerate());
17090 ASSERT(node->hasResult());
17091
17092 if (node) {
17093 switch (node->op()) {
17094 case BottomValue:
17095 // This might arise in object materializations. I actually doubt that it would,
17096 // but it seems worthwhile to be conservative.
17097 return ExitValue::dead();
17098
17099 case JSConstant:
17100 case Int52Constant:
17101 case DoubleConstant:
17102 return ExitValue::constant(node->asJSValue());
17103
17104 default:
17105 if (node->isPhantomAllocation())
17106 return ExitValue::materializeNewObject(map.get(node));
17107 break;
17108 }
17109 }
17110
17111 LoweredNodeValue value = m_int32Values.get(node);
17112 if (isValid(value))
17113 return exitArgument(arguments, DataFormatInt32, value.value());
17114
17115 value = m_int52Values.get(node);
17116 if (isValid(value))
17117 return exitArgument(arguments, DataFormatInt52, value.value());
17118
17119 value = m_strictInt52Values.get(node);
17120 if (isValid(value))
17121 return exitArgument(arguments, DataFormatStrictInt52, value.value());
17122
17123 value = m_booleanValues.get(node);
17124 if (isValid(value))
17125 return exitArgument(arguments, DataFormatBoolean, value.value());
17126
17127 value = m_jsValueValues.get(node);
17128 if (isValid(value))
17129 return exitArgument(arguments, DataFormatJS, value.value());
17130
17131 value = m_doubleValues.get(node);
17132 if (isValid(value))
17133 return exitArgument(arguments, DataFormatDouble, value.value());
17134
17135 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
17136 return ExitValue::dead();
17137 }
17138
17139 ExitValue exitArgument(StackmapArgumentList& arguments, DataFormat format, LValue value)
17140 {
17141 ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
17142 arguments.append(value);
17143 return result;
17144 }
17145
17146 ExitValue exitValueForTailCall(StackmapArgumentList& arguments, Node* node)
17147 {
17148 ASSERT(node->shouldGenerate());
17149 ASSERT(node->hasResult());
17150
17151 switch (node->op()) {
17152 case JSConstant:
17153 case Int52Constant:
17154 case DoubleConstant:
17155 return ExitValue::constant(node->asJSValue());
17156
17157 default:
17158 break;
17159 }
17160
17161 LoweredNodeValue value = m_jsValueValues.get(node);
17162 if (isValid(value))
17163 return exitArgument(arguments, DataFormatJS, value.value());
17164
17165 value = m_int32Values.get(node);
17166 if (isValid(value))
17167 return exitArgument(arguments, DataFormatJS, boxInt32(value.value()));
17168
17169 value = m_booleanValues.get(node);
17170 if (isValid(value))
17171 return exitArgument(arguments, DataFormatJS, boxBoolean(value.value()));
17172
17173 // Doubles and Int52 have been converted by ValueRep()
17174 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
17175 }
17176
17177 void setInt32(Node* node, LValue value)
17178 {
17179 m_int32Values.set(node, LoweredNodeValue(value, m_highBlock));
17180 }
17181 void setInt52(Node* node, LValue value)
17182 {
17183 m_int52Values.set(node, LoweredNodeValue(value, m_highBlock));
17184 }
17185 void setStrictInt52(Node* node, LValue value)
17186 {
17187 m_strictInt52Values.set(node, LoweredNodeValue(value, m_highBlock));
17188 }
17189 void setInt52(Node* node, LValue value, Int52Kind kind)
17190 {
17191 switch (kind) {
17192 case Int52:
17193 setInt52(node, value);
17194 return;
17195
17196 case StrictInt52:
17197 setStrictInt52(node, value);
17198 return;
17199 }
17200
17201 DFG_CRASH(m_graph, m_node, "Corrupt int52 kind");
17202 }
17203 void setJSValue(Node* node, LValue value)
17204 {
17205 m_jsValueValues.set(node, LoweredNodeValue(value, m_highBlock));
17206 }
17207 void setBoolean(Node* node, LValue value)
17208 {
17209 m_booleanValues.set(node, LoweredNodeValue(value, m_highBlock));
17210 }
17211 void setStorage(Node* node, LValue value)
17212 {
17213 m_storageValues.set(node, LoweredNodeValue(value, m_highBlock));
17214 }
17215 void setDouble(Node* node, LValue value)
17216 {
17217 m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock));
17218 }
17219
17220 void setInt32(LValue value)
17221 {
17222 setInt32(m_node, value);
17223 }
17224 void setInt52(LValue value)
17225 {
17226 setInt52(m_node, value);
17227 }
17228 void setStrictInt52(LValue value)
17229 {
17230 setStrictInt52(m_node, value);
17231 }
17232 void setInt52(LValue value, Int52Kind kind)
17233 {
17234 setInt52(m_node, value, kind);
17235 }
17236 void setJSValue(LValue value)
17237 {
17238 setJSValue(m_node, value);
17239 }
17240 void setBoolean(LValue value)
17241 {
17242 setBoolean(m_node, value);
17243 }
17244 void setStorage(LValue value)
17245 {
17246 setStorage(m_node, value);
17247 }
17248 void setDouble(LValue value)
17249 {
17250 setDouble(m_node, value);
17251 }
17252
17253 bool isValid(const LoweredNodeValue& value)
17254 {
17255 if (!value)
17256 return false;
17257 if (!m_graph.m_ssaDominators->dominates(value.block(), m_highBlock))
17258 return false;
17259 return true;
17260 }
17261
17262 void addWeakReference(JSCell* target)
17263 {
17264 m_graph.m_plan.weakReferences().addLazily(target);
17265 }
17266
17267 LValue loadStructure(LValue value)
17268 {
17269 LValue structureID = m_out.load32(value, m_heaps.JSCell_structureID);
17270 LValue tableBase = m_out.loadPtr(m_out.absolute(vm().heap.structureIDTable().base()));
17271 LValue tableIndex = m_out.aShr(structureID, m_out.constInt32(StructureIDTable::s_numberOfEntropyBits));
17272 LValue entropyBits = m_out.shl(m_out.zeroExtPtr(structureID), m_out.constInt32(StructureIDTable::s_entropyBitsShiftForStructurePointer));
17273 TypedPointer address = m_out.baseIndex(m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
17274 LValue encodedStructureBits = m_out.loadPtr(address);
17275 return m_out.bitXor(encodedStructureBits, entropyBits);
17276 }
17277
17278 LValue weakPointer(JSCell* pointer)
17279 {
17280 addWeakReference(pointer);
17281 return m_out.weakPointer(m_graph, pointer);
17282 }
17283
17284 LValue frozenPointer(FrozenValue* value)
17285 {
17286 return m_out.weakPointer(value);
17287 }
17288
17289 LValue weakStructureID(RegisteredStructure structure)
17290 {
17291 return m_out.constInt32(structure->id());
17292 }
17293
17294 LValue weakStructure(RegisteredStructure structure)
17295 {
17296 ASSERT(!!structure.get());
17297 return m_out.weakPointer(m_graph, structure.get());
17298 }
17299
17300 TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
17301 {
17302 return m_out.address(base, m_heaps.variables[operand], offset);
17303 }
17304 TypedPointer payloadFor(LValue base, int operand)
17305 {
17306 return addressFor(base, operand, PayloadOffset);
17307 }
17308 TypedPointer tagFor(LValue base, int operand)
17309 {
17310 return addressFor(base, operand, TagOffset);
17311 }
17312 TypedPointer addressFor(int operand, ptrdiff_t offset = 0)
17313 {
17314 return addressFor(VirtualRegister(operand), offset);
17315 }
17316 TypedPointer addressFor(VirtualRegister operand, ptrdiff_t offset = 0)
17317 {
17318 if (operand.isLocal())
17319 return addressFor(m_captured, operand.offset(), offset);
17320 return addressFor(m_callFrame, operand.offset(), offset);
17321 }
17322 TypedPointer payloadFor(int operand)
17323 {
17324 return payloadFor(VirtualRegister(operand));
17325 }
17326 TypedPointer payloadFor(VirtualRegister operand)
17327 {
17328 return addressFor(operand, PayloadOffset);
17329 }
17330 TypedPointer tagFor(int operand)
17331 {
17332 return tagFor(VirtualRegister(operand));
17333 }
17334 TypedPointer tagFor(VirtualRegister operand)
17335 {
17336 return addressFor(operand, TagOffset);
17337 }
17338
17339 AbstractValue abstractValue(Node* node)
17340 {
17341 return m_state.forNode(node);
17342 }
17343 AbstractValue abstractValue(Edge edge)
17344 {
17345 return abstractValue(edge.node());
17346 }
17347
17348 SpeculatedType provenType(Node* node)
17349 {
17350 return abstractValue(node).m_type;
17351 }
17352 SpeculatedType provenType(Edge edge)
17353 {
17354 return provenType(edge.node());
17355 }
17356
17357 JSValue provenValue(Node* node)
17358 {
17359 return abstractValue(node).m_value;
17360 }
17361 JSValue provenValue(Edge edge)
17362 {
17363 return provenValue(edge.node());
17364 }
17365
17366 StructureAbstractValue abstractStructure(Node* node)
17367 {
17368 return abstractValue(node).m_structure;
17369 }
17370 StructureAbstractValue abstractStructure(Edge edge)
17371 {
17372 return abstractStructure(edge.node());
17373 }
17374
17375 void crash()
17376 {
17377 crash(m_highBlock, m_node);
17378 }
17379 void crash(DFG::BasicBlock* block, Node* node)
17380 {
17381 BlockIndex blockIndex = block->index;
17382 unsigned nodeIndex = node ? node->index() : UINT_MAX;
17383#if ASSERT_DISABLED
17384 m_out.patchpoint(Void)->setGenerator(
17385 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
17386 AllowMacroScratchRegisterUsage allowScratch(jit);
17387
17388 jit.move(CCallHelpers::TrustedImm32(blockIndex), GPRInfo::regT0);
17389 jit.move(CCallHelpers::TrustedImm32(nodeIndex), GPRInfo::regT1);
17390 if (node)
17391 jit.move(CCallHelpers::TrustedImm32(node->op()), GPRInfo::regT2);
17392 jit.abortWithReason(FTLCrash);
17393 });
17394#else
17395 m_out.call(
17396 Void,
17397 m_out.constIntPtr(ftlUnreachable),
17398 // We don't want the CodeBlock to have a weak pointer to itself because
17399 // that would cause it to always get collected.
17400 m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), m_out.constInt32(blockIndex),
17401 m_out.constInt32(nodeIndex));
17402#endif
17403 m_out.unreachable();
17404 }
17405
17406 AvailabilityMap& availabilityMap() { return m_availabilityCalculator.m_availability; }
17407
17408 VM& vm() { return m_graph.m_vm; }
17409 CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
17410
17411 Graph& m_graph;
17412 State& m_ftlState;
17413 AbstractHeapRepository m_heaps;
17414 Output m_out;
17415 Procedure& m_proc;
17416
17417 LBasicBlock m_handleExceptions;
17418 HashMap<DFG::BasicBlock*, LBasicBlock> m_blocks;
17419
17420 LValue m_callFrame;
17421 LValue m_captured;
17422 LValue m_tagTypeNumber;
17423 LValue m_tagMask;
17424
17425 HashMap<Node*, LoweredNodeValue> m_int32Values;
17426 HashMap<Node*, LoweredNodeValue> m_strictInt52Values;
17427 HashMap<Node*, LoweredNodeValue> m_int52Values;
17428 HashMap<Node*, LoweredNodeValue> m_jsValueValues;
17429 HashMap<Node*, LoweredNodeValue> m_booleanValues;
17430 HashMap<Node*, LoweredNodeValue> m_storageValues;
17431 HashMap<Node*, LoweredNodeValue> m_doubleValues;
17432
17433 HashMap<Node*, LValue> m_phis;
17434
17435 LocalOSRAvailabilityCalculator m_availabilityCalculator;
17436
17437 InPlaceAbstractState m_state;
17438 AbstractInterpreter<InPlaceAbstractState> m_interpreter;
17439 DFG::BasicBlock* m_highBlock;
17440 DFG::BasicBlock* m_nextHighBlock;
17441 LBasicBlock m_nextLowBlock;
17442
17443 enum IndexMaskingMode { IndexMaskingDisabled, IndexMaskingEnabled };
17444
17445 IndexMaskingMode m_indexMaskingMode;
17446
17447 NodeOrigin m_origin;
17448 unsigned m_nodeIndex;
17449 Node* m_node;
17450
17451 // These are used for validating AI state.
17452 HashMap<Node*, NodeSet> m_liveInToNode;
17453 HashMap<Node*, AbstractValue> m_aiCheckedNodes;
17454 String m_graphDump;
17455};
17456
17457} // anonymous namespace
17458
17459void lowerDFGToB3(State& state)
17460{
17461 LowerDFGToB3 lowering(state);
17462 lowering.lower();
17463}
17464
17465} } // namespace JSC::FTL
17466
17467#endif // ENABLE(FTL_JIT)
17468
17469