1/*
2 * Copyright (C) 2010-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "ThunkGenerators.h"
28
29#include "CodeBlock.h"
30#include "DFGSpeculativeJIT.h"
31#include "JITExceptions.h"
32#include "JITOperations.h"
33#include "JSArray.h"
34#include "JSBoundFunction.h"
35#include "JSCInlines.h"
36#include "MathCommon.h"
37#include "MaxFrameExtentForSlowPathCall.h"
38#include "SpecializedThunkJIT.h"
39#include <wtf/InlineASM.h>
40#include <wtf/StringPrintStream.h>
41#include <wtf/text/StringImpl.h>
42
43#if ENABLE(JIT)
44
45namespace JSC {
46
47template<typename TagType>
48inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR, TagType tag)
49{
50 if (ASSERT_DISABLED)
51 return;
52 CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
53 jit.abortWithReason(TGInvalidPointer);
54 isNonZero.link(&jit);
55 jit.pushToSave(pointerGPR);
56 jit.untagPtr(tag, pointerGPR);
57 jit.load8(pointerGPR, pointerGPR);
58 jit.popToRestore(pointerGPR);
59}
60
61// We will jump here if the JIT code tries to make a call, but the
62// linking helper (C++ code) decides to throw an exception instead.
63MacroAssemblerCodeRef<JITThunkPtrTag> throwExceptionFromCallSlowPathGenerator(VM* vm)
64{
65 CCallHelpers jit;
66
67 // The call pushed a return address, so we need to pop it back off to re-align the stack,
68 // even though we won't use it.
69 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
70
71 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
72
73 jit.setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
74 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
75 emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
76 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
77 jit.jumpToExceptionHandler(*vm);
78
79 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
80 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Throw exception from call slow path thunk");
81}
82
83static void slowPathFor(CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
84{
85 jit.sanitizeStackInline(*vm, GPRInfo::nonArgGPR0);
86 jit.emitFunctionPrologue();
87 jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
88#if OS(WINDOWS) && CPU(X86_64)
89 // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
90 // Other argument values are shift by 1. Use space on the stack for our two return values.
91 // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
92 // and space for the 16 byte return area.
93 jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), CCallHelpers::stackPointerRegister);
94 jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
95 jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
96 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
97 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(slowPathFunction)), GPRInfo::nonArgGPR0);
98 emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
99 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
100 jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
101 jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
102 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
103#else
104 if (maxFrameExtentForSlowPathCall)
105 jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
106 jit.setupArguments<decltype(slowPathFunction)>(GPRInfo::regT2);
107 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(slowPathFunction)), GPRInfo::nonArgGPR0);
108 emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag);
109 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
110 if (maxFrameExtentForSlowPathCall)
111 jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
112#endif
113
114 // This slow call will return the address of one of the following:
115 // 1) Exception throwing thunk.
116 // 2) Host call return value returner thingy.
117 // 3) The function to call.
118 // The second return value GPR will hold a non-zero value for tail calls.
119
120 emitPointerValidation(jit, GPRInfo::returnValueGPR, JSEntryPtrTag);
121 jit.emitFunctionEpilogue();
122 jit.untagReturnAddress();
123
124 RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
125 CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
126
127 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
128 jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
129
130 doNotTrash.link(&jit);
131 jit.jump(GPRInfo::returnValueGPR, JSEntryPtrTag);
132}
133
134MacroAssemblerCodeRef<JITThunkPtrTag> linkCallThunkGenerator(VM* vm)
135{
136 // The return address is on the stack or in the link register. We will hence
137 // save the return address to the call frame while we make a C++ function call
138 // to perform linking and lazy compilation if necessary. We expect the callee
139 // to be in regT0/regT1 (payload/tag), the CallFrame to have already
140 // been adjusted, and all other registers to be available for use.
141 CCallHelpers jit;
142
143 slowPathFor(jit, vm, operationLinkCall);
144
145 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
146 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Link call slow path thunk");
147}
148
149// For closure optimizations, we only include calls, since if you're using closures for
150// object construction then you're going to lose big time anyway.
151MacroAssemblerCodeRef<JITThunkPtrTag> linkPolymorphicCallThunkGenerator(VM* vm)
152{
153 CCallHelpers jit;
154
155 slowPathFor(jit, vm, operationLinkPolymorphicCall);
156
157 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
158 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Link polymorphic call slow path thunk");
159}
160
161// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
162// path virtual call so that we can enable fast tail calls for megamorphic
163// virtual calls by using the shuffler.
164// https://bugs.webkit.org/show_bug.cgi?id=148831
165MacroAssemblerCodeRef<JITStubRoutinePtrTag> virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
166{
167 // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
168 // The return address is on the stack, or in the link register. We will hence
169 // jump to the callee, or save the return address to the call frame while we
170 // make a C++ function call to the appropriate JIT operation.
171
172 CCallHelpers jit;
173
174 CCallHelpers::JumpList slowCase;
175
176 // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
177 // slow path execution for the profiler.
178 jit.add32(
179 CCallHelpers::TrustedImm32(1),
180 CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
181
182 // FIXME: we should have a story for eliminating these checks. In many cases,
183 // the DFG knows that the value is definitely a cell, or definitely a function.
184
185#if USE(JSVALUE64)
186 if (callLinkInfo.isTailCall()) {
187 // Tail calls could have clobbered the GPRInfo::tagMaskRegister because they
188 // restore callee saved registers before getthing here. So, let's materialize
189 // the TagMask in a temp register and use the temp instead.
190 slowCase.append(jit.branchIfNotCell(GPRInfo::regT0, DoNotHaveTagRegisters));
191 } else
192 slowCase.append(jit.branchIfNotCell(GPRInfo::regT0));
193#else
194 slowCase.append(jit.branchIfNotCell(GPRInfo::regT1));
195#endif
196 auto notJSFunction = jit.branchIfNotFunction(GPRInfo::regT0);
197
198 // Now we know we have a JSFunction.
199
200 jit.loadPtr(
201 CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
202 GPRInfo::regT4);
203 jit.loadPtr(
204 CCallHelpers::Address(
205 GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
206 callLinkInfo.specializationKind())),
207 GPRInfo::regT4);
208 slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
209
210 // Now we know that we have a CodeBlock, and we're committed to making a fast
211 // call.
212
213 // Make a tail call. This will return back to JIT code.
214 JSInterfaceJIT::Label callCode(jit.label());
215 emitPointerValidation(jit, GPRInfo::regT4, JSEntryPtrTag);
216 if (callLinkInfo.isTailCall()) {
217 jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
218 jit.prepareForTailCallSlow(GPRInfo::regT4);
219 }
220 jit.jump(GPRInfo::regT4, JSEntryPtrTag);
221
222 notJSFunction.link(&jit);
223 slowCase.append(jit.branchIfNotType(GPRInfo::regT0, InternalFunctionType));
224 void* executableAddress = vm->getCTIInternalFunctionTrampolineFor(callLinkInfo.specializationKind()).executableAddress();
225 jit.move(CCallHelpers::TrustedImmPtr(executableAddress), GPRInfo::regT4);
226 jit.jump().linkTo(callCode, &jit);
227
228 slowCase.link(&jit);
229
230 // Here we don't know anything, so revert to the full slow path.
231 slowPathFor(jit, vm, operationVirtualCall);
232
233 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
234 return FINALIZE_CODE(
235 patchBuffer, JITStubRoutinePtrTag,
236 "Virtual %s slow path thunk",
237 callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct");
238}
239
240enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags };
241enum class ThunkFunctionType { JSFunction, InternalFunction };
242
243static MacroAssemblerCodeRef<JITThunkPtrTag> nativeForGenerator(VM* vm, ThunkFunctionType thunkFunctionType, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
244{
245 // FIXME: This should be able to log ShadowChicken prologue packets.
246 // https://bugs.webkit.org/show_bug.cgi?id=155689
247
248 int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
249
250 JSInterfaceJIT jit(vm);
251
252 switch (entryType) {
253 case EnterViaCall:
254 jit.emitFunctionPrologue();
255 break;
256 case EnterViaJumpWithSavedTags:
257#if USE(JSVALUE64)
258 // We're coming from a specialized thunk that has saved the prior tag registers' contents.
259 // Restore them now.
260 jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
261#endif
262 break;
263 case EnterViaJumpWithoutSavedTags:
264 jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister);
265 break;
266 }
267
268 jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
269 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
270
271#if CPU(X86)
272 // Calling convention: f(ecx, edx, ...);
273 // Host function signature: f(ExecState*);
274 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
275
276 jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
277
278 // call the function
279 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::regT1);
280 if (thunkFunctionType == ThunkFunctionType::JSFunction) {
281 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
282 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction), JSEntryPtrTag);
283 } else
284 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, InternalFunction::offsetOfNativeFunctionFor(kind)), JSEntryPtrTag);
285
286 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
287
288#elif CPU(X86_64)
289#if !OS(WINDOWS)
290 // Calling convention: f(edi, esi, edx, ecx, ...);
291 // Host function signature: f(ExecState*);
292 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
293
294 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::esi);
295 if (thunkFunctionType == ThunkFunctionType::JSFunction) {
296 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
297 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction), X86Registers::r9);
298 } else
299 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, InternalFunction::offsetOfNativeFunctionFor(kind)), X86Registers::r9);
300 jit.call(X86Registers::r9, JSEntryPtrTag);
301
302#else
303 // Calling convention: f(ecx, edx, r8, r9, ...);
304 // Host function signature: f(ExecState*);
305 jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
306
307 // Leave space for the callee parameter home addresses.
308 // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
309 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
310
311 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::edx);
312 if (thunkFunctionType == ThunkFunctionType::JSFunction) {
313 jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
314 jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction), JSEntryPtrTag);
315 } else
316 jit.call(JSInterfaceJIT::Address(X86Registers::edx, InternalFunction::offsetOfNativeFunctionFor(kind)), JSEntryPtrTag);
317
318 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
319#endif
320
321#elif CPU(ARM64)
322 COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
323 COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
324 COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
325
326 // Host function signature: f(ExecState*);
327 jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
328
329 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, ARM64Registers::x1);
330 if (thunkFunctionType == ThunkFunctionType::JSFunction) {
331 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
332 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction), ARM64Registers::x2);
333 } else
334 jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, InternalFunction::offsetOfNativeFunctionFor(kind)), ARM64Registers::x2);
335 jit.call(ARM64Registers::x2, JSEntryPtrTag);
336
337#elif CPU(ARM_THUMB2) || CPU(MIPS)
338#if CPU(MIPS)
339 // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
340 jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
341#endif
342
343 // Calling convention is f(argumentGPR0, argumentGPR1, ...).
344 // Host function signature is f(ExecState*).
345 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
346
347 jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::argumentGPR1);
348 if (thunkFunctionType == ThunkFunctionType::JSFunction) {
349 jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
350 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSEntryPtrTag);
351 } else
352 jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, InternalFunction::offsetOfNativeFunctionFor(kind)), JSEntryPtrTag);
353
354#if CPU(MIPS)
355 // Restore stack space
356 jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
357#endif
358#else
359#error "JIT not supported on this platform."
360 UNUSED_PARAM(executableOffsetToFunction);
361 abortWithReason(TGNotSupported);
362#endif
363
364 // Check for an exception
365#if USE(JSVALUE64)
366 jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
367 JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
368#else
369 JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
370 JSInterfaceJIT::NotEqual,
371 JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
372 JSInterfaceJIT::TrustedImm32(0));
373#endif
374
375 jit.emitFunctionEpilogue();
376 // Return.
377 jit.ret();
378
379 // Handle an exception
380 exceptionHandler.link(&jit);
381
382 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
383 jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
384
385#if CPU(X86) && USE(JSVALUE32_64)
386 jit.subPtr(JSInterfaceJIT::TrustedImm32(4), JSInterfaceJIT::stackPointerRegister);
387 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
388 jit.push(JSInterfaceJIT::regT0);
389#else
390#if OS(WINDOWS)
391 // Allocate space on stack for the 4 parameter registers.
392 jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
393#endif
394 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
395#endif
396 jit.move(JSInterfaceJIT::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationVMHandleException)), JSInterfaceJIT::regT3);
397 jit.call(JSInterfaceJIT::regT3, OperationPtrTag);
398#if CPU(X86) && USE(JSVALUE32_64)
399 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
400#elif OS(WINDOWS)
401 jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
402#endif
403
404 jit.jumpToExceptionHandler(*vm);
405
406 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
407 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "%s %s%s trampoline", thunkFunctionType == ThunkFunctionType::JSFunction ? "native" : "internal", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data());
408}
409
410MacroAssemblerCodeRef<JITThunkPtrTag> nativeCallGenerator(VM* vm)
411{
412 return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall);
413}
414
415MacroAssemblerCodeRef<JITThunkPtrTag> nativeTailCallGenerator(VM* vm)
416{
417 return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithSavedTags);
418}
419
420MacroAssemblerCodeRef<JITThunkPtrTag> nativeTailCallWithoutSavedTagsGenerator(VM* vm)
421{
422 return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithoutSavedTags);
423}
424
425MacroAssemblerCodeRef<JITThunkPtrTag> nativeConstructGenerator(VM* vm)
426{
427 return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForConstruct);
428}
429
430MacroAssemblerCodeRef<JITThunkPtrTag> internalFunctionCallGenerator(VM* vm)
431{
432 return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForCall);
433}
434
435MacroAssemblerCodeRef<JITThunkPtrTag> internalFunctionConstructGenerator(VM* vm)
436{
437 return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForConstruct);
438}
439
440MacroAssemblerCodeRef<JITThunkPtrTag> arityFixupGenerator(VM* vm)
441{
442 JSInterfaceJIT jit(vm);
443
444 // We enter with fixup count in argumentGPR0
445 // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
446#if USE(JSVALUE64)
447#if OS(WINDOWS)
448 const GPRReg extraTemp = JSInterfaceJIT::regT0;
449#else
450 const GPRReg extraTemp = JSInterfaceJIT::regT5;
451#endif
452# if CPU(X86_64)
453 jit.pop(JSInterfaceJIT::regT4);
454# endif
455 jit.tagReturnAddress();
456#if CPU(ARM64E)
457 jit.loadPtr(JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
458 jit.addPtr(JSInterfaceJIT::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, extraTemp);
459 jit.untagPtr(extraTemp, GPRInfo::regT3);
460 PtrTag tempReturnPCTag = static_cast<PtrTag>(random());
461 jit.move(JSInterfaceJIT::TrustedImmPtr(tempReturnPCTag), extraTemp);
462 jit.tagPtr(extraTemp, GPRInfo::regT3);
463 jit.storePtr(GPRInfo::regT3, JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
464#endif
465 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
466 jit.load32(JSInterfaceJIT::addressFor(CallFrameSlot::argumentCount), JSInterfaceJIT::argumentGPR2);
467 jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
468
469 // Check to see if we have extra slots we can use
470 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
471 jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
472 JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
473 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
474 JSInterfaceJIT::Label fillExtraSlots(jit.label());
475 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
476 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
477 jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
478 jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
479 JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
480 noExtraSlot.link(&jit);
481
482 jit.neg64(JSInterfaceJIT::argumentGPR0);
483
484 // Adjust call frame register and stack pointer to account for missing args.
485 // We need to change the stack pointer first before performing copy/fill loops.
486 // This stack space below the stack pointer is considered unused by OS. Therefore,
487 // OS may corrupt this space when constructing a signal stack.
488 jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
489 jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
490 jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
491 jit.untagReturnAddress();
492 jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
493 jit.tagReturnAddress();
494
495 // Move current frame down argumentGPR0 number of slots
496 JSInterfaceJIT::Label copyLoop(jit.label());
497 jit.load64(JSInterfaceJIT::regT3, extraTemp);
498 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
499 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
500 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
501
502 // Fill in argumentGPR0 missing arg slots with undefined
503 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
504 jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
505 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
506 jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
507 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
508 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
509
510 done.link(&jit);
511
512#if CPU(ARM64E)
513 jit.loadPtr(JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
514 jit.move(JSInterfaceJIT::TrustedImmPtr(tempReturnPCTag), extraTemp);
515 jit.untagPtr(extraTemp, GPRInfo::regT3);
516 jit.addPtr(JSInterfaceJIT::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, extraTemp);
517 jit.tagPtr(extraTemp, GPRInfo::regT3);
518 jit.storePtr(GPRInfo::regT3, JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
519#endif
520
521# if CPU(X86_64)
522 jit.push(JSInterfaceJIT::regT4);
523# endif
524 jit.ret();
525#else // USE(JSVALUE64) section above, USE(JSVALUE32_64) section below.
526# if CPU(X86)
527 jit.pop(JSInterfaceJIT::regT4);
528# endif
529 jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
530 jit.load32(JSInterfaceJIT::addressFor(CallFrameSlot::argumentCount), JSInterfaceJIT::argumentGPR2);
531 jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
532
533 // Check to see if we have extra slots we can use
534 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
535 jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
536 JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
537 JSInterfaceJIT::Label fillExtraSlots(jit.label());
538 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
539 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
540 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
541 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
542 jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
543 jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
544 jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
545 JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
546 noExtraSlot.link(&jit);
547
548 jit.neg32(JSInterfaceJIT::argumentGPR0);
549
550 // Adjust call frame register and stack pointer to account for missing args.
551 // We need to change the stack pointer first before performing copy/fill loops.
552 // This stack space below the stack pointer is considered unused by OS. Therefore,
553 // OS may corrupt this space when constructing a signal stack.
554 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
555 jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
556 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
557 jit.untagReturnAddress();
558 jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
559 jit.tagReturnAddress();
560
561 // Move current frame down argumentGPR0 number of slots
562 JSInterfaceJIT::Label copyLoop(jit.label());
563 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
564 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
565 jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
566 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
567 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
568 jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
569
570 // Fill in argumentGPR0 missing arg slots with undefined
571 jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
572 JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
573 jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
574 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
575 jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
576 jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
577
578 jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
579 jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
580
581 done.link(&jit);
582
583# if CPU(X86)
584 jit.push(JSInterfaceJIT::regT4);
585# endif
586 jit.ret();
587#endif // End of USE(JSVALUE32_64) section.
588
589 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
590 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "fixup arity");
591}
592
593MacroAssemblerCodeRef<JITThunkPtrTag> unreachableGenerator(VM* vm)
594{
595 JSInterfaceJIT jit(vm);
596
597 jit.breakpoint();
598
599 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
600 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "unreachable thunk");
601}
602
603MacroAssemblerCodeRef<JITThunkPtrTag> stringGetByValGenerator(VM* vm)
604{
605 // regT0 is JSString*, and regT1 (64bit) or regT2 (32bit) is int index.
606 // Return regT0 = result JSString* if succeeds. Otherwise, return regT0 = 0.
607#if USE(JSVALUE64)
608 GPRReg stringGPR = GPRInfo::regT0;
609 GPRReg indexGPR = GPRInfo::regT1;
610 GPRReg scratchGPR = GPRInfo::regT2;
611#else
612 GPRReg stringGPR = GPRInfo::regT0;
613 GPRReg indexGPR = GPRInfo::regT2;
614 GPRReg scratchGPR = GPRInfo::regT1;
615#endif
616
617 JSInterfaceJIT jit(vm);
618 JSInterfaceJIT::JumpList failures;
619 jit.tagReturnAddress();
620
621 // Load string length to regT2, and start the process of loading the data pointer into regT0
622 jit.loadPtr(JSInterfaceJIT::Address(stringGPR, JSString::offsetOfValue()), stringGPR);
623 failures.append(jit.branchIfRopeStringImpl(stringGPR));
624 jit.load32(JSInterfaceJIT::Address(stringGPR, StringImpl::lengthMemoryOffset()), scratchGPR);
625
626 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
627 failures.append(jit.branch32(JSInterfaceJIT::AboveOrEqual, indexGPR, scratchGPR));
628
629 // Load the character
630 JSInterfaceJIT::JumpList is16Bit;
631 JSInterfaceJIT::JumpList cont8Bit;
632 // Load the string flags
633 jit.load32(JSInterfaceJIT::Address(stringGPR, StringImpl::flagsOffset()), scratchGPR);
634 jit.loadPtr(JSInterfaceJIT::Address(stringGPR, StringImpl::dataOffset()), stringGPR);
635 is16Bit.append(jit.branchTest32(JSInterfaceJIT::Zero, scratchGPR, JSInterfaceJIT::TrustedImm32(StringImpl::flagIs8Bit())));
636 jit.load8(JSInterfaceJIT::BaseIndex(stringGPR, indexGPR, JSInterfaceJIT::TimesOne, 0), stringGPR);
637 cont8Bit.append(jit.jump());
638 is16Bit.link(&jit);
639 jit.load16(JSInterfaceJIT::BaseIndex(stringGPR, indexGPR, JSInterfaceJIT::TimesTwo, 0), stringGPR);
640 cont8Bit.link(&jit);
641
642 failures.append(jit.branch32(JSInterfaceJIT::Above, stringGPR, JSInterfaceJIT::TrustedImm32(maxSingleCharacterString)));
643 jit.move(JSInterfaceJIT::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), indexGPR);
644 jit.loadPtr(JSInterfaceJIT::BaseIndex(indexGPR, stringGPR, JSInterfaceJIT::ScalePtr, 0), stringGPR);
645 jit.ret();
646
647 failures.link(&jit);
648 jit.move(JSInterfaceJIT::TrustedImm32(0), stringGPR);
649 jit.ret();
650
651 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
652 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "String get_by_val stub");
653}
654
655static void stringCharLoad(SpecializedThunkJIT& jit)
656{
657 // load string
658 jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
659
660 // Load string length to regT2, and start the process of loading the data pointer into regT0
661 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, JSString::offsetOfValue()), SpecializedThunkJIT::regT0);
662 jit.appendFailure(jit.branchIfRopeStringImpl(SpecializedThunkJIT::regT0));
663 jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::lengthMemoryOffset()), SpecializedThunkJIT::regT2);
664
665 // load index
666 jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
667
668 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
669 jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
670
671 // Load the character
672 SpecializedThunkJIT::JumpList is16Bit;
673 SpecializedThunkJIT::JumpList cont8Bit;
674 // Load the string flags
675 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2);
676 jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0);
677 is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
678 jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
679 cont8Bit.append(jit.jump());
680 is16Bit.link(&jit);
681 jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
682 cont8Bit.link(&jit);
683}
684
685static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
686{
687 jit.appendFailure(jit.branch32(MacroAssembler::Above, src, MacroAssembler::TrustedImm32(maxSingleCharacterString)));
688 jit.move(MacroAssembler::TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), scratch);
689 jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
690 jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
691}
692
693MacroAssemblerCodeRef<JITThunkPtrTag> charCodeAtThunkGenerator(VM* vm)
694{
695 SpecializedThunkJIT jit(vm, 1);
696 stringCharLoad(jit);
697 jit.returnInt32(SpecializedThunkJIT::regT0);
698 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
699}
700
701MacroAssemblerCodeRef<JITThunkPtrTag> charAtThunkGenerator(VM* vm)
702{
703 SpecializedThunkJIT jit(vm, 1);
704 stringCharLoad(jit);
705 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
706 jit.returnJSCell(SpecializedThunkJIT::regT0);
707 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
708}
709
710MacroAssemblerCodeRef<JITThunkPtrTag> fromCharCodeThunkGenerator(VM* vm)
711{
712 SpecializedThunkJIT jit(vm, 1);
713 // load char code
714 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
715 charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
716 jit.returnJSCell(SpecializedThunkJIT::regT0);
717 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
718}
719
720MacroAssemblerCodeRef<JITThunkPtrTag> clz32ThunkGenerator(VM* vm)
721{
722 SpecializedThunkJIT jit(vm, 1);
723 MacroAssembler::Jump nonIntArgJump;
724 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
725
726 SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
727 jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
728 jit.returnInt32(SpecializedThunkJIT::regT1);
729
730 if (jit.supportsFloatingPointTruncate()) {
731 nonIntArgJump.link(&jit);
732 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
733 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
734 jit.appendFailure(jit.jump());
735 } else
736 jit.appendFailure(nonIntArgJump);
737
738 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
739}
740
741MacroAssemblerCodeRef<JITThunkPtrTag> sqrtThunkGenerator(VM* vm)
742{
743 SpecializedThunkJIT jit(vm, 1);
744 if (!jit.supportsFloatingPointSqrt())
745 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
746
747 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
748 jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
749 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
750 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
751}
752
753
754#define UnaryDoubleOpWrapper(function) function##Wrapper
755enum MathThunkCallingConvention { };
756typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
757
758#if CPU(X86_64) && COMPILER(GCC_COMPATIBLE) && (OS(DARWIN) || OS(LINUX))
759
760#define defineUnaryDoubleOpWrapper(function) \
761 asm( \
762 ".text\n" \
763 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
764 HIDE_SYMBOL(function##Thunk) "\n" \
765 SYMBOL_STRING(function##Thunk) ":" "\n" \
766 "pushq %rax\n" \
767 "call " GLOBAL_REFERENCE(function) "\n" \
768 "popq %rcx\n" \
769 "ret\n" \
770 );\
771 extern "C" { \
772 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
773 } \
774 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
775
776#elif CPU(X86) && COMPILER(GCC_COMPATIBLE) && OS(LINUX) && defined(__PIC__)
777#define defineUnaryDoubleOpWrapper(function) \
778 asm( \
779 ".text\n" \
780 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
781 HIDE_SYMBOL(function##Thunk) "\n" \
782 SYMBOL_STRING(function##Thunk) ":" "\n" \
783 "pushl %ebx\n" \
784 "subl $20, %esp\n" \
785 "movsd %xmm0, (%esp) \n" \
786 "call __x86.get_pc_thunk.bx\n" \
787 "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
788 "call " GLOBAL_REFERENCE(function) "\n" \
789 "fstpl (%esp) \n" \
790 "movsd (%esp), %xmm0 \n" \
791 "addl $20, %esp\n" \
792 "popl %ebx\n" \
793 "ret\n" \
794 );\
795 extern "C" { \
796 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
797 } \
798 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
799
800#elif CPU(X86) && COMPILER(GCC_COMPATIBLE) && (OS(DARWIN) || OS(LINUX))
801#define defineUnaryDoubleOpWrapper(function) \
802 asm( \
803 ".text\n" \
804 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
805 HIDE_SYMBOL(function##Thunk) "\n" \
806 SYMBOL_STRING(function##Thunk) ":" "\n" \
807 "subl $20, %esp\n" \
808 "movsd %xmm0, (%esp) \n" \
809 "call " GLOBAL_REFERENCE(function) "\n" \
810 "fstpl (%esp) \n" \
811 "movsd (%esp), %xmm0 \n" \
812 "addl $20, %esp\n" \
813 "ret\n" \
814 );\
815 extern "C" { \
816 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
817 } \
818 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
819
820#elif CPU(ARM_THUMB2) && COMPILER(GCC_COMPATIBLE) && PLATFORM(IOS_FAMILY)
821
822#define defineUnaryDoubleOpWrapper(function) \
823 asm( \
824 ".text\n" \
825 ".align 2\n" \
826 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
827 HIDE_SYMBOL(function##Thunk) "\n" \
828 ".thumb\n" \
829 ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \
830 SYMBOL_STRING(function##Thunk) ":" "\n" \
831 "push {lr}\n" \
832 "vmov r0, r1, d0\n" \
833 "blx " GLOBAL_REFERENCE(function) "\n" \
834 "vmov d0, r0, r1\n" \
835 "pop {lr}\n" \
836 "bx lr\n" \
837 ); \
838 extern "C" { \
839 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
840 } \
841 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
842
843#elif CPU(ARM64)
844
845#define defineUnaryDoubleOpWrapper(function) \
846 asm( \
847 ".text\n" \
848 ".align 2\n" \
849 ".globl " SYMBOL_STRING(function##Thunk) "\n" \
850 HIDE_SYMBOL(function##Thunk) "\n" \
851 SYMBOL_STRING(function##Thunk) ":" "\n" \
852 "b " GLOBAL_REFERENCE(function) "\n" \
853 ".previous" \
854 ); \
855 extern "C" { \
856 MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
857 } \
858 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
859
860#elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
861
862// MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
863static double (_cdecl *floorFunction)(double) = floor;
864static double (_cdecl *ceilFunction)(double) = ceil;
865static double (_cdecl *truncFunction)(double) = trunc;
866static double (_cdecl *expFunction)(double) = exp;
867static double (_cdecl *logFunction)(double) = log;
868static double (_cdecl *jsRoundFunction)(double) = jsRound;
869
870#define defineUnaryDoubleOpWrapper(function) \
871 extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
872 { \
873 __asm \
874 { \
875 __asm sub esp, 20 \
876 __asm movsd mmword ptr [esp], xmm0 \
877 __asm call function##Function \
878 __asm fstp qword ptr [esp] \
879 __asm movsd xmm0, mmword ptr [esp] \
880 __asm add esp, 20 \
881 __asm ret \
882 } \
883 } \
884 static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
885
886#else
887
888#define defineUnaryDoubleOpWrapper(function) \
889 static MathThunk UnaryDoubleOpWrapper(function) = 0
890#endif
891
892defineUnaryDoubleOpWrapper(jsRound);
893defineUnaryDoubleOpWrapper(exp);
894defineUnaryDoubleOpWrapper(log);
895defineUnaryDoubleOpWrapper(floor);
896defineUnaryDoubleOpWrapper(ceil);
897defineUnaryDoubleOpWrapper(trunc);
898
899static const double halfConstant = 0.5;
900
901MacroAssemblerCodeRef<JITThunkPtrTag> floorThunkGenerator(VM* vm)
902{
903 SpecializedThunkJIT jit(vm, 1);
904 MacroAssembler::Jump nonIntJump;
905 if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
906 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
907 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
908 jit.returnInt32(SpecializedThunkJIT::regT0);
909 nonIntJump.link(&jit);
910 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
911
912 if (jit.supportsFloatingPointRounding()) {
913 SpecializedThunkJIT::JumpList doubleResult;
914 jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
915 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
916 jit.returnInt32(SpecializedThunkJIT::regT0);
917 doubleResult.link(&jit);
918 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
919 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
920 }
921
922 SpecializedThunkJIT::Jump intResult;
923 SpecializedThunkJIT::JumpList doubleResult;
924 if (jit.supportsFloatingPointTruncate()) {
925 jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
926 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
927 SpecializedThunkJIT::JumpList slowPath;
928 // Handle the negative doubles in the slow path for now.
929 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
930 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
931 intResult = jit.jump();
932 slowPath.link(&jit);
933 }
934 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
935 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
936 if (jit.supportsFloatingPointTruncate())
937 intResult.link(&jit);
938 jit.returnInt32(SpecializedThunkJIT::regT0);
939 doubleResult.link(&jit);
940 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
941 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
942}
943
944MacroAssemblerCodeRef<JITThunkPtrTag> ceilThunkGenerator(VM* vm)
945{
946 SpecializedThunkJIT jit(vm, 1);
947 if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
948 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
949 MacroAssembler::Jump nonIntJump;
950 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
951 jit.returnInt32(SpecializedThunkJIT::regT0);
952 nonIntJump.link(&jit);
953 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
954 if (jit.supportsFloatingPointRounding())
955 jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
956 else
957 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
958
959 SpecializedThunkJIT::JumpList doubleResult;
960 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
961 jit.returnInt32(SpecializedThunkJIT::regT0);
962 doubleResult.link(&jit);
963 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
964 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
965}
966
967MacroAssemblerCodeRef<JITThunkPtrTag> truncThunkGenerator(VM* vm)
968{
969 SpecializedThunkJIT jit(vm, 1);
970 if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
971 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
972 MacroAssembler::Jump nonIntJump;
973 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
974 jit.returnInt32(SpecializedThunkJIT::regT0);
975 nonIntJump.link(&jit);
976 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
977 if (jit.supportsFloatingPointRounding())
978 jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
979 else
980 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
981
982 SpecializedThunkJIT::JumpList doubleResult;
983 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
984 jit.returnInt32(SpecializedThunkJIT::regT0);
985 doubleResult.link(&jit);
986 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
987 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
988}
989
990MacroAssemblerCodeRef<JITThunkPtrTag> roundThunkGenerator(VM* vm)
991{
992 SpecializedThunkJIT jit(vm, 1);
993 if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
994 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
995 MacroAssembler::Jump nonIntJump;
996 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
997 jit.returnInt32(SpecializedThunkJIT::regT0);
998 nonIntJump.link(&jit);
999 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1000 SpecializedThunkJIT::Jump intResult;
1001 SpecializedThunkJIT::JumpList doubleResult;
1002 if (jit.supportsFloatingPointTruncate()) {
1003 jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
1004 doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
1005 SpecializedThunkJIT::JumpList slowPath;
1006 // Handle the negative doubles in the slow path for now.
1007 slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
1008 jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
1009 jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
1010 slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
1011 intResult = jit.jump();
1012 slowPath.link(&jit);
1013 }
1014 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
1015 jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
1016 if (jit.supportsFloatingPointTruncate())
1017 intResult.link(&jit);
1018 jit.returnInt32(SpecializedThunkJIT::regT0);
1019 doubleResult.link(&jit);
1020 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1021 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
1022}
1023
1024MacroAssemblerCodeRef<JITThunkPtrTag> expThunkGenerator(VM* vm)
1025{
1026 if (!UnaryDoubleOpWrapper(exp))
1027 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1028 SpecializedThunkJIT jit(vm, 1);
1029 if (!jit.supportsFloatingPoint())
1030 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1031 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1032 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
1033 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1034 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
1035}
1036
1037MacroAssemblerCodeRef<JITThunkPtrTag> logThunkGenerator(VM* vm)
1038{
1039 if (!UnaryDoubleOpWrapper(log))
1040 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1041 SpecializedThunkJIT jit(vm, 1);
1042 if (!jit.supportsFloatingPoint())
1043 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1044 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1045 jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
1046 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1047 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
1048}
1049
1050MacroAssemblerCodeRef<JITThunkPtrTag> absThunkGenerator(VM* vm)
1051{
1052 SpecializedThunkJIT jit(vm, 1);
1053 if (!jit.supportsFloatingPointAbs())
1054 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1055
1056#if USE(JSVALUE64)
1057 unsigned virtualRegisterIndex = CallFrame::argumentOffset(0);
1058 jit.load64(AssemblyHelpers::addressFor(virtualRegisterIndex), GPRInfo::regT0);
1059 auto notInteger = jit.branchIfNotInt32(GPRInfo::regT0);
1060
1061 // Abs Int32.
1062 jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1);
1063 jit.add32(GPRInfo::regT1, GPRInfo::regT0);
1064 jit.xor32(GPRInfo::regT1, GPRInfo::regT0);
1065
1066 // IntMin cannot be inverted.
1067 MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0);
1068
1069 // Box and finish.
1070 jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
1071 MacroAssembler::Jump doneWithIntegers = jit.jump();
1072
1073 // Handle Doubles.
1074 notInteger.link(&jit);
1075 jit.appendFailure(jit.branchIfNotNumber(GPRInfo::regT0));
1076 jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0);
1077 MacroAssembler::Label absFPR0Label = jit.label();
1078 jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1);
1079 jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0);
1080
1081 // Tail.
1082 doneWithIntegers.link(&jit);
1083 jit.returnJSValue(GPRInfo::regT0);
1084
1085 // We know the value of regT0 is IntMin. We could load that value from memory but
1086 // it is simpler to just convert it.
1087 integerIsIntMin.link(&jit);
1088 jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
1089 jit.jump().linkTo(absFPR0Label, &jit);
1090#else
1091 MacroAssembler::Jump nonIntJump;
1092 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
1093 jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
1094 jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1095 jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1096 jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
1097 jit.returnInt32(SpecializedThunkJIT::regT0);
1098 nonIntJump.link(&jit);
1099 // Shame about the double int conversion here.
1100 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1101 jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
1102 jit.returnDouble(SpecializedThunkJIT::fpRegT1);
1103#endif
1104 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
1105}
1106
1107MacroAssemblerCodeRef<JITThunkPtrTag> imulThunkGenerator(VM* vm)
1108{
1109 SpecializedThunkJIT jit(vm, 2);
1110 MacroAssembler::Jump nonIntArg0Jump;
1111 jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
1112 SpecializedThunkJIT::Label doneLoadingArg0(&jit);
1113 MacroAssembler::Jump nonIntArg1Jump;
1114 jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump);
1115 SpecializedThunkJIT::Label doneLoadingArg1(&jit);
1116 jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
1117 jit.returnInt32(SpecializedThunkJIT::regT0);
1118
1119 if (jit.supportsFloatingPointTruncate()) {
1120 nonIntArg0Jump.link(&jit);
1121 jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
1122 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
1123 jit.appendFailure(jit.jump());
1124 } else
1125 jit.appendFailure(nonIntArg0Jump);
1126
1127 if (jit.supportsFloatingPointTruncate()) {
1128 nonIntArg1Jump.link(&jit);
1129 jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
1130 jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
1131 jit.appendFailure(jit.jump());
1132 } else
1133 jit.appendFailure(nonIntArg1Jump);
1134
1135 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
1136}
1137
1138MacroAssemblerCodeRef<JITThunkPtrTag> randomThunkGenerator(VM* vm)
1139{
1140 SpecializedThunkJIT jit(vm, 0);
1141 if (!jit.supportsFloatingPoint())
1142 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1143
1144#if USE(JSVALUE64)
1145 jit.emitRandomThunk(*vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
1146 jit.returnDouble(SpecializedThunkJIT::fpRegT0);
1147
1148 return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
1149#else
1150 return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
1151#endif
1152}
1153
1154MacroAssemblerCodeRef<JITThunkPtrTag> boundThisNoArgsFunctionCallGenerator(VM* vm)
1155{
1156 CCallHelpers jit;
1157
1158 jit.emitFunctionPrologue();
1159
1160 // Set up our call frame.
1161 jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
1162 jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCount));
1163
1164 unsigned extraStackNeeded = 0;
1165 if (unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes())
1166 extraStackNeeded = stackAlignmentBytes() - stackMisalignment;
1167
1168 // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
1169 // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
1170 // call, since that would be way too weird.
1171
1172 // The formula for the number of stack bytes needed given some number of parameters (including
1173 // this) is:
1174 //
1175 // stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
1176 //
1177 // Probably we want to write this as:
1178 //
1179 // stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
1180 //
1181 // That's really all there is to this. We have all the registers we need to do it.
1182
1183 jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT1);
1184 jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2);
1185 jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
1186 jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
1187 jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
1188
1189 if (extraStackNeeded)
1190 jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
1191
1192 // At this point regT1 has the actual argument count and regT2 has the amount of stack we will
1193 // need.
1194
1195 jit.subPtr(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
1196
1197 // Do basic callee frame setup, including 'this'.
1198
1199 jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT3);
1200
1201 jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
1202
1203 JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT2);
1204 jit.loadValue(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfBoundThis()), valueRegs);
1205 jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0));
1206
1207 jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT3);
1208 jit.storeCell(GPRInfo::regT3, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
1209
1210 // OK, now we can start copying. This is a simple matter of copying parameters from the caller's
1211 // frame to the callee's frame. Note that we know that regT1 (the argument count) must be at
1212 // least 1.
1213 jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1214 CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1);
1215
1216 CCallHelpers::Label loop = jit.label();
1217 jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
1218 jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgument(1)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs);
1219 jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
1220 jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loop, &jit);
1221
1222 done.link(&jit);
1223
1224 jit.loadPtr(
1225 CCallHelpers::Address(GPRInfo::regT3, JSFunction::offsetOfExecutable()),
1226 GPRInfo::regT0);
1227 jit.loadPtr(
1228 CCallHelpers::Address(
1229 GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
1230 GPRInfo::regT0);
1231 CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0);
1232
1233 emitPointerValidation(jit, GPRInfo::regT0, JSEntryPtrTag);
1234 jit.call(GPRInfo::regT0, JSEntryPtrTag);
1235
1236 jit.emitFunctionEpilogue();
1237 jit.ret();
1238
1239 LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID);
1240 linkBuffer.link(noCode, CodeLocationLabel<JITThunkPtrTag>(vm->jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
1241 return FINALIZE_CODE(
1242 linkBuffer, JITThunkPtrTag, "Specialized thunk for bound function calls with no arguments");
1243}
1244
1245} // namespace JSC
1246
1247#endif // ENABLE(JIT)
1248