1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "AssemblyHelpers.h"
28
29#if ENABLE(JIT)
30
31#include "JITOperations.h"
32#include "JSCInlines.h"
33#include "LinkBuffer.h"
34#include "MaxFrameExtentForSlowPathCall.h"
35#include "SuperSampler.h"
36#include "ThunkGenerators.h"
37
38#if ENABLE(WEBASSEMBLY)
39#include "WasmContextInlines.h"
40#include "WasmMemoryInformation.h"
41#endif
42
43namespace JSC {
44
45ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin)
46{
47 auto* inlineCallFrame = codeOrigin.inlineCallFrame();
48 if (!inlineCallFrame)
49 return m_codeBlock->ownerExecutable();
50 return inlineCallFrame->baselineCodeBlock->ownerExecutable();
51}
52
53AssemblyHelpers::Jump AssemblyHelpers::branchIfFastTypedArray(GPRReg baseGPR)
54{
55 return branch32(
56 Equal,
57 Address(baseGPR, JSArrayBufferView::offsetOfMode()),
58 TrustedImm32(FastTypedArray));
59}
60
61AssemblyHelpers::Jump AssemblyHelpers::branchIfNotFastTypedArray(GPRReg baseGPR)
62{
63 return branch32(
64 NotEqual,
65 Address(baseGPR, JSArrayBufferView::offsetOfMode()),
66 TrustedImm32(FastTypedArray));
67}
68
69void AssemblyHelpers::incrementSuperSamplerCount()
70{
71 add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<const void*>(&g_superSamplerCount)));
72}
73
74void AssemblyHelpers::decrementSuperSamplerCount()
75{
76 sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<const void*>(&g_superSamplerCount)));
77}
78
79void AssemblyHelpers::purifyNaN(FPRReg fpr)
80{
81 MacroAssembler::Jump notNaN = branchIfNotNaN(fpr);
82 static const double NaN = PNaN;
83 loadDouble(TrustedImmPtr(&NaN), fpr);
84 notNaN.link(this);
85}
86
87#if ENABLE(SAMPLING_FLAGS)
88void AssemblyHelpers::setSamplingFlag(int32_t flag)
89{
90 ASSERT(flag >= 1);
91 ASSERT(flag <= 32);
92 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
93}
94
95void AssemblyHelpers::clearSamplingFlag(int32_t flag)
96{
97 ASSERT(flag >= 1);
98 ASSERT(flag <= 32);
99 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
100}
101#endif
102
103#if !ASSERT_DISABLED
104#if USE(JSVALUE64)
105void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
106{
107#if CPU(X86_64) || CPU(ARM64)
108 Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast<uintptr_t>(0xFFFFFFFFu)));
109 abortWithReason(AHIsNotInt32);
110 checkInt32.link(this);
111#else
112 UNUSED_PARAM(gpr);
113#endif
114}
115
116void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
117{
118 Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
119 abortWithReason(AHIsNotJSInt32);
120 checkJSInt32.link(this);
121}
122
123void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
124{
125 Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
126 abortWithReason(AHIsNotJSNumber);
127 checkJSNumber.link(this);
128}
129
130void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
131{
132 Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
133 Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
134 checkJSInt32.link(this);
135 abortWithReason(AHIsNotJSDouble);
136 checkJSNumber.link(this);
137}
138
139void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
140{
141 Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
142 abortWithReason(AHIsNotCell);
143 checkCell.link(this);
144}
145
146void AssemblyHelpers::jitAssertTagsInPlace()
147{
148 Jump ok = branch64(Equal, GPRInfo::tagTypeNumberRegister, TrustedImm64(TagTypeNumber));
149 abortWithReason(AHTagTypeNumberNotInPlace);
150 breakpoint();
151 ok.link(this);
152
153 ok = branch64(Equal, GPRInfo::tagMaskRegister, TrustedImm64(TagMask));
154 abortWithReason(AHTagMaskNotInPlace);
155 ok.link(this);
156}
157#elif USE(JSVALUE32_64)
158void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
159{
160 UNUSED_PARAM(gpr);
161}
162
163void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
164{
165 Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
166 abortWithReason(AHIsNotJSInt32);
167 checkJSInt32.link(this);
168}
169
170void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
171{
172 Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
173 Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
174 abortWithReason(AHIsNotJSNumber);
175 checkJSInt32.link(this);
176 checkJSDouble.link(this);
177}
178
179void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
180{
181 Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
182 abortWithReason(AHIsNotJSDouble);
183 checkJSDouble.link(this);
184}
185
186void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
187{
188 Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag));
189 abortWithReason(AHIsNotCell);
190 checkCell.link(this);
191}
192
193void AssemblyHelpers::jitAssertTagsInPlace()
194{
195}
196#endif // USE(JSVALUE32_64)
197
198void AssemblyHelpers::jitAssertHasValidCallFrame()
199{
200 Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7));
201 abortWithReason(AHCallFrameMisaligned);
202 checkCFR.link(this);
203}
204
205void AssemblyHelpers::jitAssertIsNull(GPRReg gpr)
206{
207 Jump checkNull = branchTestPtr(Zero, gpr);
208 abortWithReason(AHIsNotNull);
209 checkNull.link(this);
210}
211
212void AssemblyHelpers::jitAssertArgumentCountSane()
213{
214 Jump ok = branch32(Below, payloadFor(CallFrameSlot::argumentCount), TrustedImm32(10000000));
215 abortWithReason(AHInsaneArgumentCount);
216 ok.link(this);
217}
218
219#endif // !ASSERT_DISABLED
220
221void AssemblyHelpers::jitReleaseAssertNoException(VM& vm)
222{
223 Jump noException;
224#if USE(JSVALUE64)
225 noException = branchTest64(Zero, AbsoluteAddress(vm.addressOfException()));
226#elif USE(JSVALUE32_64)
227 noException = branch32(Equal, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
228#endif
229 abortWithReason(JITUncoughtExceptionAfterCall);
230 noException.link(this);
231}
232
233void AssemblyHelpers::callExceptionFuzz(VM& vm)
234{
235 if (!Options::useExceptionFuzz())
236 return;
237
238 EncodedJSValue* buffer = vm.exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters));
239
240 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
241#if USE(JSVALUE64)
242 store64(GPRInfo::toRegister(i), buffer + i);
243#else
244 store32(GPRInfo::toRegister(i), buffer + i);
245#endif
246 }
247 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
248 move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
249 storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0));
250 }
251
252 // Set up one argument.
253#if CPU(X86)
254 poke(GPRInfo::callFrameRegister, 0);
255#else
256 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
257#endif
258 move(TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR);
259 call(GPRInfo::nonPreservedNonReturnGPR, OperationPtrTag);
260
261 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
262 move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
263 loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i));
264 }
265 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
266#if USE(JSVALUE64)
267 load64(buffer + i, GPRInfo::toRegister(i));
268#else
269 load32(buffer + i, GPRInfo::toRegister(i));
270#endif
271 }
272}
273
274AssemblyHelpers::Jump AssemblyHelpers::emitJumpIfException(VM& vm)
275{
276 return emitExceptionCheck(vm, NormalExceptionCheck);
277}
278
279AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(VM& vm, ExceptionCheckKind kind, ExceptionJumpWidth width)
280{
281 callExceptionFuzz(vm);
282
283 if (width == FarJumpWidth)
284 kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck);
285
286 Jump result;
287#if USE(JSVALUE64)
288 result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm.addressOfException()));
289#elif USE(JSVALUE32_64)
290 result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
291#endif
292
293 if (width == NormalJumpWidth)
294 return result;
295
296 PatchableJump realJump = patchableJump();
297 result.link(this);
298
299 return realJump.m_jump;
300}
301
302AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck(VM& vm)
303{
304 callExceptionFuzz(vm);
305
306 Jump result;
307#if USE(JSVALUE64)
308 result = branchTest64(NonZero, AbsoluteAddress(vm.addressOfException()));
309#elif USE(JSVALUE32_64)
310 result = branch32(NotEqual, AbsoluteAddress(vm.addressOfException()), TrustedImm32(0));
311#endif
312
313 return result;
314}
315
316void AssemblyHelpers::emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest)
317{
318 const Structure* structurePtr = reinterpret_cast<const Structure*>(structure.m_value);
319#if USE(JSVALUE64)
320 jit.store64(TrustedImm64(structurePtr->idBlob()), MacroAssembler::Address(dest, JSCell::structureIDOffset()));
321 if (!ASSERT_DISABLED) {
322 Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id()));
323 jit.abortWithReason(AHStructureIDIsValid);
324 correctStructure.link(&jit);
325
326 Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()), TrustedImm32(structurePtr->indexingModeIncludingHistory()));
327 jit.abortWithReason(AHIndexingTypeIsValid);
328 correctIndexingType.link(&jit);
329
330 Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type()));
331 jit.abortWithReason(AHTypeInfoIsValid);
332 correctType.link(&jit);
333
334 Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags()));
335 jit.abortWithReason(AHTypeInfoInlineTypeFlagsAreValid);
336 correctFlags.link(&jit);
337 }
338#else
339 // Do a 32-bit wide store to initialize the cell's fields.
340 jit.store32(TrustedImm32(structurePtr->objectInitializationBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()));
341 jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
342#endif
343}
344
345void AssemblyHelpers::loadProperty(GPRReg object, GPRReg offset, JSValueRegs result)
346{
347 Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
348
349 loadPtr(Address(object, JSObject::butterflyOffset()), result.payloadGPR());
350 neg32(offset);
351 signExtend32ToPtr(offset, offset);
352 Jump ready = jump();
353
354 isInline.link(this);
355 addPtr(
356 TrustedImm32(
357 static_cast<int32_t>(sizeof(JSObject)) -
358 (static_cast<int32_t>(firstOutOfLineOffset) - 2) * static_cast<int32_t>(sizeof(EncodedJSValue))),
359 object, result.payloadGPR());
360
361 ready.link(this);
362
363 loadValue(
364 BaseIndex(
365 result.payloadGPR(), offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)),
366 result);
367}
368
369void AssemblyHelpers::emitLoadStructure(VM& vm, RegisterID source, RegisterID dest, RegisterID scratch)
370{
371#if USE(JSVALUE64)
372#if CPU(ARM64)
373 RegisterID scratch2 = dataTempRegister;
374#elif CPU(X86_64)
375 RegisterID scratch2 = scratchRegister();
376#else
377#error "Unsupported cpu"
378#endif
379
380 ASSERT(dest != scratch);
381 ASSERT(dest != scratch2);
382 ASSERT(scratch != scratch2);
383
384 load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), scratch2);
385 loadPtr(vm.heap.structureIDTable().base(), scratch);
386 rshift32(scratch2, TrustedImm32(StructureIDTable::s_numberOfEntropyBits), dest);
387 loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
388 lshiftPtr(TrustedImm32(StructureIDTable::s_entropyBitsShiftForStructurePointer), scratch2);
389 xorPtr(scratch2, dest);
390#else // not USE(JSVALUE64)
391 UNUSED_PARAM(scratch);
392 UNUSED_PARAM(vm);
393 loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
394#endif // not USE(JSVALUE64)
395}
396
397void AssemblyHelpers::makeSpaceOnStackForCCall()
398{
399 unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
400 if (stackOffset)
401 subPtr(TrustedImm32(stackOffset), stackPointerRegister);
402}
403
404void AssemblyHelpers::reclaimSpaceOnStackForCCall()
405{
406 unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
407 if (stackOffset)
408 addPtr(TrustedImm32(stackOffset), stackPointerRegister);
409}
410
411#if USE(JSVALUE64)
412template<typename LoadFromHigh, typename StoreToHigh, typename LoadFromLow, typename StoreToLow>
413void emitRandomThunkImpl(AssemblyHelpers& jit, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result, const LoadFromHigh& loadFromHigh, const StoreToHigh& storeToHigh, const LoadFromLow& loadFromLow, const StoreToLow& storeToLow)
414{
415 // Inlined WeakRandom::advance().
416 // uint64_t x = m_low;
417 loadFromLow(scratch0);
418 // uint64_t y = m_high;
419 loadFromHigh(scratch1);
420 // m_low = y;
421 storeToLow(scratch1);
422
423 // x ^= x << 23;
424 jit.move(scratch0, scratch2);
425 jit.lshift64(AssemblyHelpers::TrustedImm32(23), scratch2);
426 jit.xor64(scratch2, scratch0);
427
428 // x ^= x >> 17;
429 jit.move(scratch0, scratch2);
430 jit.rshift64(AssemblyHelpers::TrustedImm32(17), scratch2);
431 jit.xor64(scratch2, scratch0);
432
433 // x ^= y ^ (y >> 26);
434 jit.move(scratch1, scratch2);
435 jit.rshift64(AssemblyHelpers::TrustedImm32(26), scratch2);
436 jit.xor64(scratch1, scratch2);
437 jit.xor64(scratch2, scratch0);
438
439 // m_high = x;
440 storeToHigh(scratch0);
441
442 // return x + y;
443 jit.add64(scratch1, scratch0);
444
445 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
446 jit.move(AssemblyHelpers::TrustedImm64((1ULL << 53) - 1), scratch1);
447 jit.and64(scratch1, scratch0);
448 // Now, scratch0 is always in range of int64_t. Safe to convert it to double with cvtsi2sdq.
449 jit.convertInt64ToDouble(scratch0, result);
450
451 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
452 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
453 static const double scale = 1.0 / (1ULL << 53);
454
455 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
456 // It just reduces the exp part of the given 53bit double integer.
457 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
458 // Now we get 53bit precision random double value in [0, 1).
459 jit.move(AssemblyHelpers::TrustedImmPtr(&scale), scratch1);
460 jit.mulDouble(AssemblyHelpers::Address(scratch1), result);
461}
462
463void AssemblyHelpers::emitRandomThunk(JSGlobalObject* globalObject, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result)
464{
465 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
466 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
467
468 auto loadFromHigh = [&](GPRReg high) {
469 load64(highAddress, high);
470 };
471 auto storeToHigh = [&](GPRReg high) {
472 store64(high, highAddress);
473 };
474 auto loadFromLow = [&](GPRReg low) {
475 load64(lowAddress, low);
476 };
477 auto storeToLow = [&](GPRReg low) {
478 store64(low, lowAddress);
479 };
480
481 emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
482}
483
484void AssemblyHelpers::emitRandomThunk(VM& vm, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result)
485{
486 emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, scratch3);
487 emitLoadStructure(vm, scratch3, scratch3, scratch0);
488 loadPtr(Address(scratch3, Structure::globalObjectOffset()), scratch3);
489 // Now, scratch3 holds JSGlobalObject*.
490
491 auto loadFromHigh = [&](GPRReg high) {
492 load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()), high);
493 };
494 auto storeToHigh = [&](GPRReg high) {
495 store64(high, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()));
496 };
497 auto loadFromLow = [&](GPRReg low) {
498 load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()), low);
499 };
500 auto storeToLow = [&](GPRReg low) {
501 store64(low, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()));
502 };
503
504 emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
505}
506#endif
507
508void AssemblyHelpers::emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
509{
510 if (Options::forceGCSlowPaths()) {
511 slowPath.append(jump());
512 return;
513 }
514
515 // NOTE, some invariants of this function:
516 // - When going to the slow path, we must leave resultGPR with zero in it.
517 // - We *can not* use RegisterSet::macroScratchRegisters on x86.
518 // - We *can* use RegisterSet::macroScratchRegisters on ARM.
519
520 Jump popPath;
521 Jump done;
522
523 if (allocator.isConstant())
524 move(TrustedImmPtr(allocator.allocator().localAllocator()), allocatorGPR);
525
526 load32(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()), resultGPR);
527 popPath = branchTest32(Zero, resultGPR);
528 if (allocator.isConstant())
529 add32(TrustedImm32(-allocator.allocator().cellSize()), resultGPR, scratchGPR);
530 else {
531 move(resultGPR, scratchGPR);
532 sub32(Address(allocatorGPR, LocalAllocator::offsetOfCellSize()), scratchGPR);
533 }
534 negPtr(resultGPR);
535 store32(scratchGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()));
536 Address payloadEndAddr = Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfPayloadEnd());
537 addPtr(payloadEndAddr, resultGPR);
538
539 done = jump();
540
541 popPath.link(this);
542
543 loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()), resultGPR);
544 xorPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
545 slowPath.append(branchTestPtr(Zero, resultGPR));
546
547 // The object is half-allocated: we have what we know is a fresh object, but
548 // it's still on the GC's free list.
549 loadPtr(Address(resultGPR), scratchGPR);
550 storePtr(scratchGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()));
551
552 done.link(this);
553}
554
555void AssemblyHelpers::emitAllocate(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
556{
557 if (allocator.isConstant()) {
558 if (!allocator.allocator()) {
559 slowPath.append(jump());
560 return;
561 }
562 } else
563 slowPath.append(branchTestPtr(Zero, allocatorGPR));
564 emitAllocateWithNonNullAllocator(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
565}
566
567void AssemblyHelpers::emitAllocateVariableSized(GPRReg resultGPR, CompleteSubspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
568{
569 static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
570
571 unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
572
573 add32(TrustedImm32(MarkedSpace::sizeStep - 1), allocationSize, scratchGPR1);
574 urshift32(TrustedImm32(stepShift), scratchGPR1);
575 slowPath.append(branch32(Above, scratchGPR1, TrustedImm32(MarkedSpace::largeCutoff >> stepShift)));
576 move(TrustedImmPtr(subspace.allocatorForSizeStep()), scratchGPR2);
577 loadPtr(BaseIndex(scratchGPR2, scratchGPR1, timesPtr()), scratchGPR1);
578
579 emitAllocate(resultGPR, JITAllocator::variable(), scratchGPR1, scratchGPR2, slowPath);
580}
581
582void AssemblyHelpers::restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame)
583{
584#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
585 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
586 RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
587 unsigned registerCount = allCalleeSaves->size();
588
589 GPRReg scratch = InvalidGPRReg;
590 unsigned scratchGPREntryIndex = 0;
591
592 // Use the first GPR entry's register as our scratch.
593 for (unsigned i = 0; i < registerCount; i++) {
594 RegisterAtOffset entry = allCalleeSaves->at(i);
595 if (dontRestoreRegisters.get(entry.reg()))
596 continue;
597 if (entry.reg().isGPR()) {
598 scratchGPREntryIndex = i;
599 scratch = entry.reg().gpr();
600 break;
601 }
602 }
603 ASSERT(scratch != InvalidGPRReg);
604
605 loadPtr(&topEntryFrame, scratch);
606 addPtr(TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), scratch);
607
608 // Restore all callee saves except for the scratch.
609 for (unsigned i = 0; i < registerCount; i++) {
610 RegisterAtOffset entry = allCalleeSaves->at(i);
611 if (dontRestoreRegisters.get(entry.reg()))
612 continue;
613 if (entry.reg().isGPR()) {
614 if (i != scratchGPREntryIndex)
615 loadPtr(Address(scratch, entry.offset()), entry.reg().gpr());
616 } else
617 loadDouble(Address(scratch, entry.offset()), entry.reg().fpr());
618 }
619
620 // Restore the callee save value of the scratch.
621 RegisterAtOffset entry = allCalleeSaves->at(scratchGPREntryIndex);
622 ASSERT(!dontRestoreRegisters.get(entry.reg()));
623 ASSERT(entry.reg().isGPR());
624 ASSERT(scratch == entry.reg().gpr());
625 loadPtr(Address(scratch, entry.offset()), scratch);
626#else
627 UNUSED_PARAM(topEntryFrame);
628#endif
629}
630
631void AssemblyHelpers::emitDumbVirtualCall(VM& vm, CallLinkInfo* info)
632{
633 move(TrustedImmPtr(info), GPRInfo::regT2);
634 Call call = nearCall();
635 addLinkTask(
636 [=, &vm] (LinkBuffer& linkBuffer) {
637 MacroAssemblerCodeRef<JITStubRoutinePtrTag> virtualThunk = virtualThunkFor(&vm, *info);
638 info->setSlowStub(createJITStubRoutine(virtualThunk, vm, nullptr, true));
639 linkBuffer.link(call, CodeLocationLabel<JITStubRoutinePtrTag>(virtualThunk.code()));
640 });
641}
642
643#if USE(JSVALUE64)
644void AssemblyHelpers::wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch)
645{
646 GPRReg input = inputAndResult;
647 // key += ~(key << 32);
648 move(input, scratch);
649 lshift64(TrustedImm32(32), scratch);
650 not64(scratch);
651 add64(scratch, input);
652 // key ^= (key >> 22);
653 move(input, scratch);
654 urshift64(TrustedImm32(22), scratch);
655 xor64(scratch, input);
656 // key += ~(key << 13);
657 move(input, scratch);
658 lshift64(TrustedImm32(13), scratch);
659 not64(scratch);
660 add64(scratch, input);
661 // key ^= (key >> 8);
662 move(input, scratch);
663 urshift64(TrustedImm32(8), scratch);
664 xor64(scratch, input);
665 // key += (key << 3);
666 move(input, scratch);
667 lshift64(TrustedImm32(3), scratch);
668 add64(scratch, input);
669 // key ^= (key >> 15);
670 move(input, scratch);
671 urshift64(TrustedImm32(15), scratch);
672 xor64(scratch, input);
673 // key += ~(key << 27);
674 move(input, scratch);
675 lshift64(TrustedImm32(27), scratch);
676 not64(scratch);
677 add64(scratch, input);
678 // key ^= (key >> 31);
679 move(input, scratch);
680 urshift64(TrustedImm32(31), scratch);
681 xor64(scratch, input);
682
683 // return static_cast<unsigned>(result)
684 void* mask = bitwise_cast<void*>(static_cast<uintptr_t>(UINT_MAX));
685 and64(TrustedImmPtr(mask), inputAndResult);
686}
687#endif // USE(JSVALUE64)
688
689void AssemblyHelpers::emitConvertValueToBoolean(VM& vm, JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg valueAsFPR, FPRReg tempFPR, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject, bool invert)
690{
691 // Implements the following control flow structure:
692 // if (value is cell) {
693 // if (value is string or value is BigInt)
694 // result = !!value->length
695 // else {
696 // do evil things for masquerades-as-undefined
697 // result = true
698 // }
699 // } else if (value is int32) {
700 // result = !!unboxInt32(value)
701 // } else if (value is number) {
702 // result = !!unboxDouble(value)
703 // } else {
704 // result = value == jsTrue
705 // }
706
707 JumpList done;
708
709 auto notCell = branchIfNotCell(value);
710 auto isString = branchIfString(value.payloadGPR());
711 auto isBigInt = branchIfBigInt(value.payloadGPR());
712
713 if (shouldCheckMasqueradesAsUndefined) {
714 ASSERT(scratchIfShouldCheckMasqueradesAsUndefined != InvalidGPRReg);
715 JumpList isNotMasqueradesAsUndefined;
716 isNotMasqueradesAsUndefined.append(branchTest8(Zero, Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)));
717 emitLoadStructure(vm, value.payloadGPR(), result, scratchIfShouldCheckMasqueradesAsUndefined);
718 move(TrustedImmPtr(globalObject), scratchIfShouldCheckMasqueradesAsUndefined);
719 isNotMasqueradesAsUndefined.append(branchPtr(NotEqual, Address(result, Structure::globalObjectOffset()), scratchIfShouldCheckMasqueradesAsUndefined));
720
721 // We act like we are "undefined" here.
722 move(invert ? TrustedImm32(1) : TrustedImm32(0), result);
723 done.append(jump());
724 isNotMasqueradesAsUndefined.link(this);
725 }
726 move(invert ? TrustedImm32(0) : TrustedImm32(1), result);
727 done.append(jump());
728
729 isString.link(this);
730 move(TrustedImmPtr(jsEmptyString(&vm)), result);
731 comparePtr(invert ? Equal : NotEqual, value.payloadGPR(), result, result);
732 done.append(jump());
733
734 isBigInt.link(this);
735 load32(Address(value.payloadGPR(), JSBigInt::offsetOfLength()), result);
736 compare32(invert ? Equal : NotEqual, result, TrustedImm32(0), result);
737 done.append(jump());
738
739 notCell.link(this);
740 auto notInt32 = branchIfNotInt32(value);
741 compare32(invert ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0), result);
742 done.append(jump());
743
744 notInt32.link(this);
745 auto notDouble = branchIfNotDoubleKnownNotInt32(value);
746#if USE(JSVALUE64)
747 unboxDouble(value.gpr(), result, valueAsFPR);
748#else
749 unboxDouble(value, valueAsFPR, tempFPR);
750#endif
751 move(invert ? TrustedImm32(1) : TrustedImm32(0), result);
752 done.append(branchDoubleZeroOrNaN(valueAsFPR, tempFPR));
753 move(invert ? TrustedImm32(0) : TrustedImm32(1), result);
754 done.append(jump());
755
756 notDouble.link(this);
757#if USE(JSVALUE64)
758 static_assert(static_cast<int32_t>(ValueTrue) == ValueTrue, "");
759 compare64(invert ? NotEqual : Equal, value.gpr(), TrustedImm32(ValueTrue), result);
760#else
761 move(invert ? TrustedImm32(1) : TrustedImm32(0), result);
762 done.append(branchIfNotBoolean(value, InvalidGPRReg));
763 compare32(invert ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0), result);
764#endif
765
766 done.link(this);
767}
768
769AssemblyHelpers::JumpList AssemblyHelpers::branchIfValue(VM& vm, JSValueRegs value, GPRReg scratch, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg valueAsFPR, FPRReg tempFPR, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject, bool invert)
770{
771 // Implements the following control flow structure:
772 // if (value is cell) {
773 // if (value is string or value is BigInt)
774 // result = !!value->length
775 // else {
776 // do evil things for masquerades-as-undefined
777 // result = true
778 // }
779 // } else if (value is int32) {
780 // result = !!unboxInt32(value)
781 // } else if (value is number) {
782 // result = !!unboxDouble(value)
783 // } else {
784 // result = value == jsTrue
785 // }
786
787 JumpList done;
788 JumpList truthy;
789
790 auto notCell = branchIfNotCell(value);
791 auto isString = branchIfString(value.payloadGPR());
792 auto isBigInt = branchIfBigInt(value.payloadGPR());
793
794 if (shouldCheckMasqueradesAsUndefined) {
795 ASSERT(scratchIfShouldCheckMasqueradesAsUndefined != InvalidGPRReg);
796 JumpList isNotMasqueradesAsUndefined;
797 isNotMasqueradesAsUndefined.append(branchTest8(Zero, Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)));
798 emitLoadStructure(vm, value.payloadGPR(), scratch, scratchIfShouldCheckMasqueradesAsUndefined);
799 move(TrustedImmPtr(globalObject), scratchIfShouldCheckMasqueradesAsUndefined);
800 isNotMasqueradesAsUndefined.append(branchPtr(NotEqual, Address(scratch, Structure::globalObjectOffset()), scratchIfShouldCheckMasqueradesAsUndefined));
801
802 // We act like we are "undefined" here.
803 if (invert)
804 truthy.append(jump());
805 else
806 done.append(jump());
807
808 if (invert)
809 done.append(isNotMasqueradesAsUndefined);
810 else
811 truthy.append(isNotMasqueradesAsUndefined);
812 } else {
813 if (invert)
814 done.append(jump());
815 else
816 truthy.append(jump());
817 }
818
819 isString.link(this);
820 truthy.append(branchPtr(invert ? Equal : NotEqual, value.payloadGPR(), TrustedImmPtr(jsEmptyString(&vm))));
821 done.append(jump());
822
823 isBigInt.link(this);
824 truthy.append(branchTest32(invert ? Zero : NonZero, Address(value.payloadGPR(), JSBigInt::offsetOfLength())));
825 done.append(jump());
826
827 notCell.link(this);
828 auto notInt32 = branchIfNotInt32(value);
829 truthy.append(branchTest32(invert ? Zero : NonZero, value.payloadGPR()));
830 done.append(jump());
831
832 notInt32.link(this);
833 auto notDouble = branchIfNotDoubleKnownNotInt32(value);
834#if USE(JSVALUE64)
835 unboxDouble(value.gpr(), scratch, valueAsFPR);
836#else
837 unboxDouble(value, valueAsFPR, tempFPR);
838#endif
839 if (invert) {
840 truthy.append(branchDoubleZeroOrNaN(valueAsFPR, tempFPR));
841 done.append(jump());
842 } else {
843 done.append(branchDoubleZeroOrNaN(valueAsFPR, tempFPR));
844 truthy.append(jump());
845 }
846
847 notDouble.link(this);
848#if USE(JSVALUE64)
849 truthy.append(branch64(invert ? NotEqual : Equal, value.gpr(), TrustedImm64(JSValue::encode(jsBoolean(true)))));
850#else
851 auto notBoolean = branchIfNotBoolean(value, InvalidGPRReg);
852 if (invert)
853 truthy.append(notBoolean);
854 else
855 done.append(notBoolean);
856 truthy.append(branch32(invert ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0)));
857#endif
858
859 done.link(this);
860
861 return truthy;
862}
863
864#if ENABLE(WEBASSEMBLY)
865void AssemblyHelpers::loadWasmContextInstance(GPRReg dst)
866{
867#if ENABLE(FAST_TLS_JIT)
868 if (Wasm::Context::useFastTLS()) {
869 loadFromTLSPtr(fastTLSOffsetForKey(WTF_WASM_CONTEXT_KEY), dst);
870 return;
871 }
872#endif
873 move(Wasm::PinnedRegisterInfo::get().wasmContextInstancePointer, dst);
874}
875
876void AssemblyHelpers::storeWasmContextInstance(GPRReg src)
877{
878#if ENABLE(FAST_TLS_JIT)
879 if (Wasm::Context::useFastTLS()) {
880 storeToTLSPtr(src, fastTLSOffsetForKey(WTF_WASM_CONTEXT_KEY));
881 return;
882 }
883#endif
884 move(src, Wasm::PinnedRegisterInfo::get().wasmContextInstancePointer);
885}
886
887bool AssemblyHelpers::loadWasmContextInstanceNeedsMacroScratchRegister()
888{
889#if ENABLE(FAST_TLS_JIT)
890 if (Wasm::Context::useFastTLS())
891 return loadFromTLSPtrNeedsMacroScratchRegister();
892#endif
893 return false;
894}
895
896bool AssemblyHelpers::storeWasmContextInstanceNeedsMacroScratchRegister()
897{
898#if ENABLE(FAST_TLS_JIT)
899 if (Wasm::Context::useFastTLS())
900 return storeToTLSPtrNeedsMacroScratchRegister();
901#endif
902 return false;
903}
904
905#endif // ENABLE(WEBASSEMBLY)
906
907void AssemblyHelpers::debugCall(VM& vm, V_DebugOperation_EPP function, void* argument)
908{
909 size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
910 ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(scratchSize);
911 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
912
913 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
914#if USE(JSVALUE64)
915 store64(GPRInfo::toRegister(i), buffer + i);
916#else
917 store32(GPRInfo::toRegister(i), buffer + i);
918#endif
919 }
920
921 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
922 move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
923 storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
924 }
925
926 // Tell GC mark phase how much of the scratch buffer is active during call.
927 move(TrustedImmPtr(scratchBuffer->addressOfActiveLength()), GPRInfo::regT0);
928 storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
929
930#if CPU(X86_64) || CPU(ARM_THUMB2) || CPU(ARM64) || CPU(MIPS)
931 move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
932 move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
933 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
934 GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
935#elif CPU(X86)
936 poke(GPRInfo::callFrameRegister, 0);
937 poke(TrustedImmPtr(argument), 1);
938 poke(TrustedImmPtr(buffer), 2);
939 GPRReg scratch = GPRInfo::regT0;
940#else
941#error "JIT not supported on this platform."
942#endif
943 move(TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(function)), scratch);
944 call(scratch, OperationPtrTag);
945
946 move(TrustedImmPtr(scratchBuffer->addressOfActiveLength()), GPRInfo::regT0);
947 storePtr(TrustedImmPtr(nullptr), GPRInfo::regT0);
948
949 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
950 move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
951 loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
952 }
953 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
954#if USE(JSVALUE64)
955 load64(buffer + i, GPRInfo::toRegister(i));
956#else
957 load32(buffer + i, GPRInfo::toRegister(i));
958#endif
959 }
960}
961
962void AssemblyHelpers::copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(GPRReg calleeSavesBuffer)
963{
964#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
965 addPtr(TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), calleeSavesBuffer);
966
967 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
968 RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
969 unsigned registerCount = allCalleeSaves->size();
970
971 for (unsigned i = 0; i < registerCount; i++) {
972 RegisterAtOffset entry = allCalleeSaves->at(i);
973 if (dontCopyRegisters.get(entry.reg()))
974 continue;
975 if (entry.reg().isGPR())
976 storePtr(entry.reg().gpr(), Address(calleeSavesBuffer, entry.offset()));
977 else
978 storeDouble(entry.reg().fpr(), Address(calleeSavesBuffer, entry.offset()));
979 }
980#else
981 UNUSED_PARAM(calleeSavesBuffer);
982#endif
983}
984
985void AssemblyHelpers::sanitizeStackInline(VM& vm, GPRReg scratch)
986{
987 loadPtr(vm.addressOfLastStackTop(), scratch);
988 Jump done = branchPtr(BelowOrEqual, stackPointerRegister, scratch);
989 Label loop = label();
990 storePtr(TrustedImmPtr(nullptr), scratch);
991 addPtr(TrustedImmPtr(sizeof(void*)), scratch);
992 branchPtr(Above, stackPointerRegister, scratch).linkTo(loop, this);
993 done.link(this);
994 move(stackPointerRegister, scratch);
995 storePtr(scratch, vm.addressOfLastStackTop());
996}
997
998void AssemblyHelpers::emitPreparePreciseIndexMask32(GPRReg index, GPRReg length, GPRReg result)
999{
1000 if (length == result) {
1001 negPtr(length);
1002 addPtr(index, length);
1003 } else {
1004 move(index, result);
1005 subPtr(length, result);
1006 }
1007 rshiftPtr(TrustedImm32(preciseIndexMaskShift<void*>()), result);
1008}
1009
1010} // namespace JSC
1011
1012#endif // ENABLE(JIT)
1013
1014