1 | /* |
2 | * Copyright (C) 2010-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
23 | * THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "ThunkGenerators.h" |
28 | |
29 | #include "CodeBlock.h" |
30 | #include "DFGSpeculativeJIT.h" |
31 | #include "JITExceptions.h" |
32 | #include "JITOperations.h" |
33 | #include "JSArray.h" |
34 | #include "JSBoundFunction.h" |
35 | #include "JSCInlines.h" |
36 | #include "MathCommon.h" |
37 | #include "MaxFrameExtentForSlowPathCall.h" |
38 | #include "ProbeContext.h" |
39 | #include "SpecializedThunkJIT.h" |
40 | #include <wtf/InlineASM.h> |
41 | #include <wtf/StringPrintStream.h> |
42 | #include <wtf/text/StringImpl.h> |
43 | |
44 | #if ENABLE(JIT) |
45 | |
46 | namespace JSC { |
47 | |
48 | template<typename TagType> |
49 | inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR, TagType tag) |
50 | { |
51 | if (ASSERT_DISABLED) |
52 | return; |
53 | CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR); |
54 | jit.abortWithReason(TGInvalidPointer); |
55 | isNonZero.link(&jit); |
56 | jit.pushToSave(pointerGPR); |
57 | jit.untagPtr(tag, pointerGPR); |
58 | jit.load8(pointerGPR, pointerGPR); |
59 | jit.popToRestore(pointerGPR); |
60 | } |
61 | |
62 | // We will jump here if the JIT code tries to make a call, but the |
63 | // linking helper (C++ code) decides to throw an exception instead. |
64 | MacroAssemblerCodeRef<JITThunkPtrTag> throwExceptionFromCallSlowPathGenerator(VM& vm) |
65 | { |
66 | CCallHelpers jit; |
67 | |
68 | // The call pushed a return address, so we need to pop it back off to re-align the stack, |
69 | // even though we won't use it. |
70 | jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR); |
71 | |
72 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame); |
73 | |
74 | jit.setupArguments<decltype(operationLookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&vm)); |
75 | jit.prepareCallOperation(vm); |
76 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationLookupExceptionHandler)), GPRInfo::nonArgGPR0); |
77 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag); |
78 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag); |
79 | jit.jumpToExceptionHandler(vm); |
80 | |
81 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
82 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Throw exception from call slow path thunk" ); |
83 | } |
84 | |
85 | static void slowPathFor(CCallHelpers& jit, VM& vm, Sprt_JITOperation_EGCli slowPathFunction) |
86 | { |
87 | jit.sanitizeStackInline(vm, GPRInfo::nonArgGPR0); |
88 | jit.emitFunctionPrologue(); |
89 | jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame); |
90 | #if OS(WINDOWS) && CPU(X86_64) |
91 | // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits. |
92 | // Other argument values are shift by 1. Use space on the stack for our two return values. |
93 | // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments |
94 | // and space for the 16 byte return area. |
95 | jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), CCallHelpers::stackPointerRegister); |
96 | static_assert(GPRInfo::regT2 != GPRInfo::argumentGPR0); |
97 | static_assert(GPRInfo::regT3 != GPRInfo::argumentGPR0); |
98 | jit.move(GPRInfo::regT2, GPRInfo::argumentGPR0); |
99 | jit.move(GPRInfo::regT3, GPRInfo::argumentGPR2); |
100 | jit.move(GPRInfo::argumentGPR0, GPRInfo::argumentGPR3); |
101 | jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0); |
102 | jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); |
103 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(slowPathFunction)), GPRInfo::nonArgGPR0); |
104 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag); |
105 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag); |
106 | jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2); |
107 | jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR); |
108 | jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); |
109 | #else |
110 | if (maxFrameExtentForSlowPathCall) |
111 | jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); |
112 | jit.setupArguments<decltype(slowPathFunction)>(GPRInfo::regT3, GPRInfo::regT2); |
113 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(slowPathFunction)), GPRInfo::nonArgGPR0); |
114 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag); |
115 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag); |
116 | if (maxFrameExtentForSlowPathCall) |
117 | jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); |
118 | #endif |
119 | |
120 | // This slow call will return the address of one of the following: |
121 | // 1) Exception throwing thunk. |
122 | // 2) Host call return value returner thingy. |
123 | // 3) The function to call. |
124 | // The second return value GPR will hold a non-zero value for tail calls. |
125 | |
126 | emitPointerValidation(jit, GPRInfo::returnValueGPR, JSEntryPtrTag); |
127 | jit.emitFunctionEpilogue(); |
128 | jit.untagReturnAddress(); |
129 | |
130 | RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0)); |
131 | CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2); |
132 | |
133 | jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR); |
134 | jit.prepareForTailCallSlow(GPRInfo::returnValueGPR); |
135 | |
136 | doNotTrash.link(&jit); |
137 | jit.farJump(GPRInfo::returnValueGPR, JSEntryPtrTag); |
138 | } |
139 | |
140 | MacroAssemblerCodeRef<JITThunkPtrTag> linkCallThunkGenerator(VM& vm) |
141 | { |
142 | // The return address is on the stack or in the link register. We will hence |
143 | // save the return address to the call frame while we make a C++ function call |
144 | // to perform linking and lazy compilation if necessary. We expect the callee |
145 | // to be in regT0/regT1 (payload/tag), the CallFrame to have already |
146 | // been adjusted, and all other registers to be available for use. |
147 | CCallHelpers jit; |
148 | |
149 | slowPathFor(jit, vm, operationLinkCall); |
150 | |
151 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
152 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Link call slow path thunk" ); |
153 | } |
154 | |
155 | // For closure optimizations, we only include calls, since if you're using closures for |
156 | // object construction then you're going to lose big time anyway. |
157 | MacroAssemblerCodeRef<JITThunkPtrTag> linkPolymorphicCallThunkGenerator(VM& vm) |
158 | { |
159 | CCallHelpers jit; |
160 | |
161 | slowPathFor(jit, vm, operationLinkPolymorphicCall); |
162 | |
163 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
164 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Link polymorphic call slow path thunk" ); |
165 | } |
166 | |
167 | // FIXME: We should distinguish between a megamorphic virtual call vs. a slow |
168 | // path virtual call so that we can enable fast tail calls for megamorphic |
169 | // virtual calls by using the shuffler. |
170 | // https://bugs.webkit.org/show_bug.cgi?id=148831 |
171 | MacroAssemblerCodeRef<JITStubRoutinePtrTag> virtualThunkFor(VM& vm, CallLinkInfo& callLinkInfo) |
172 | { |
173 | // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1). |
174 | // The return address is on the stack, or in the link register. We will hence |
175 | // jump to the callee, or save the return address to the call frame while we |
176 | // make a C++ function call to the appropriate JIT operation. |
177 | |
178 | CCallHelpers jit; |
179 | |
180 | CCallHelpers::JumpList slowCase; |
181 | |
182 | // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the |
183 | // slow path execution for the profiler. |
184 | jit.add32( |
185 | CCallHelpers::TrustedImm32(1), |
186 | CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount())); |
187 | |
188 | // FIXME: we should have a story for eliminating these checks. In many cases, |
189 | // the DFG knows that the value is definitely a cell, or definitely a function. |
190 | |
191 | #if USE(JSVALUE64) |
192 | if (callLinkInfo.isTailCall()) { |
193 | // Tail calls could have clobbered the GPRInfo::notCellMaskRegister because they |
194 | // restore callee saved registers before getthing here. So, let's materialize |
195 | // the NotCellMask in a temp register and use the temp instead. |
196 | slowCase.append(jit.branchIfNotCell(GPRInfo::regT0, DoNotHaveTagRegisters)); |
197 | } else |
198 | slowCase.append(jit.branchIfNotCell(GPRInfo::regT0)); |
199 | #else |
200 | slowCase.append(jit.branchIfNotCell(GPRInfo::regT1)); |
201 | #endif |
202 | auto notJSFunction = jit.branchIfNotFunction(GPRInfo::regT0); |
203 | |
204 | // Now we know we have a JSFunction. |
205 | |
206 | jit.loadPtr( |
207 | CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()), |
208 | GPRInfo::regT4); |
209 | jit.loadPtr( |
210 | CCallHelpers::Address( |
211 | GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor( |
212 | callLinkInfo.specializationKind())), |
213 | GPRInfo::regT4); |
214 | slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4)); |
215 | |
216 | // Now we know that we have a CodeBlock, and we're committed to making a fast |
217 | // call. |
218 | |
219 | // Make a tail call. This will return back to JIT code. |
220 | JSInterfaceJIT::Label callCode(jit.label()); |
221 | emitPointerValidation(jit, GPRInfo::regT4, JSEntryPtrTag); |
222 | if (callLinkInfo.isTailCall()) { |
223 | jit.preserveReturnAddressAfterCall(GPRInfo::regT0); |
224 | jit.prepareForTailCallSlow(GPRInfo::regT4); |
225 | } |
226 | jit.farJump(GPRInfo::regT4, JSEntryPtrTag); |
227 | |
228 | notJSFunction.link(&jit); |
229 | slowCase.append(jit.branchIfNotType(GPRInfo::regT0, InternalFunctionType)); |
230 | void* executableAddress = vm.getCTIInternalFunctionTrampolineFor(callLinkInfo.specializationKind()).executableAddress(); |
231 | jit.move(CCallHelpers::TrustedImmPtr(executableAddress), GPRInfo::regT4); |
232 | jit.jump().linkTo(callCode, &jit); |
233 | |
234 | slowCase.link(&jit); |
235 | |
236 | // Here we don't know anything, so revert to the full slow path. |
237 | slowPathFor(jit, vm, operationVirtualCall); |
238 | |
239 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
240 | return FINALIZE_CODE( |
241 | patchBuffer, JITStubRoutinePtrTag, |
242 | "Virtual %s slow path thunk" , |
243 | callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct" ); |
244 | } |
245 | |
246 | enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags }; |
247 | enum class ThunkFunctionType { JSFunction, InternalFunction }; |
248 | |
249 | static MacroAssemblerCodeRef<JITThunkPtrTag> nativeForGenerator(VM& vm, ThunkFunctionType thunkFunctionType, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall) |
250 | { |
251 | // FIXME: This should be able to log ShadowChicken prologue packets. |
252 | // https://bugs.webkit.org/show_bug.cgi?id=155689 |
253 | |
254 | int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind); |
255 | |
256 | JSInterfaceJIT jit(&vm); |
257 | |
258 | switch (entryType) { |
259 | case EnterViaCall: |
260 | jit.emitFunctionPrologue(); |
261 | break; |
262 | case EnterViaJumpWithSavedTags: |
263 | #if USE(JSVALUE64) |
264 | // We're coming from a specialized thunk that has saved the prior tag registers' contents. |
265 | // Restore them now. |
266 | jit.popPair(JSInterfaceJIT::numberTagRegister, JSInterfaceJIT::notCellMaskRegister); |
267 | #endif |
268 | break; |
269 | case EnterViaJumpWithoutSavedTags: |
270 | jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister); |
271 | break; |
272 | } |
273 | |
274 | jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock); |
275 | jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame); |
276 | |
277 | // Host function signature: f(JSGlobalObject*, CallFrame*); |
278 | #if CPU(X86_64) && OS(WINDOWS) |
279 | // Leave space for the callee parameter home addresses. |
280 | // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it. |
281 | jit.subPtr(CCallHelpers::TrustedImm32(4 * sizeof(int64_t)), CCallHelpers::stackPointerRegister); |
282 | #elif CPU(MIPS) |
283 | // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. |
284 | jit.subPtr(CCallHelpers::TrustedImm32(16), CCallHelpers::stackPointerRegister); |
285 | #endif |
286 | |
287 | jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); |
288 | jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, GPRInfo::argumentGPR2); |
289 | |
290 | if (thunkFunctionType == ThunkFunctionType::JSFunction) { |
291 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, JSFunction::offsetOfGlobalObject()), GPRInfo::argumentGPR0); |
292 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, JSFunction::offsetOfExecutable()), GPRInfo::argumentGPR2); |
293 | jit.call(CCallHelpers::Address(GPRInfo::argumentGPR2, executableOffsetToFunction), JSEntryPtrTag); |
294 | } else { |
295 | ASSERT(thunkFunctionType == ThunkFunctionType::InternalFunction); |
296 | jit.loadPtr(CCallHelpers::Address(GPRInfo::argumentGPR2, InternalFunction::offsetOfGlobalObject()), GPRInfo::argumentGPR0); |
297 | jit.call(CCallHelpers::Address(GPRInfo::argumentGPR2, InternalFunction::offsetOfNativeFunctionFor(kind)), JSEntryPtrTag); |
298 | } |
299 | |
300 | #if CPU(X86_64) && OS(WINDOWS) |
301 | jit.addPtr(CCallHelpers::TrustedImm32(4 * sizeof(int64_t)), CCallHelpers::stackPointerRegister); |
302 | #elif CPU(MIPS) |
303 | jit.addPtr(CCallHelpers::TrustedImm32(16), CCallHelpers::stackPointerRegister); |
304 | #endif |
305 | |
306 | // Check for an exception |
307 | #if USE(JSVALUE64) |
308 | jit.load64(vm.addressOfException(), JSInterfaceJIT::regT2); |
309 | JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2); |
310 | #else |
311 | JSInterfaceJIT::Jump exceptionHandler = jit.branch32( |
312 | JSInterfaceJIT::NotEqual, |
313 | JSInterfaceJIT::AbsoluteAddress(vm.addressOfException()), |
314 | JSInterfaceJIT::TrustedImm32(0)); |
315 | #endif |
316 | |
317 | jit.emitFunctionEpilogue(); |
318 | // Return. |
319 | jit.ret(); |
320 | |
321 | // Handle an exception |
322 | exceptionHandler.link(&jit); |
323 | |
324 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame); |
325 | jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm.topCallFrame); |
326 | #if OS(WINDOWS) |
327 | // Allocate space on stack for the 4 parameter registers. |
328 | jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); |
329 | #endif |
330 | jit.move(CCallHelpers::TrustedImmPtr(&vm), JSInterfaceJIT::argumentGPR0); |
331 | jit.move(JSInterfaceJIT::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationVMHandleException)), JSInterfaceJIT::regT3); |
332 | jit.call(JSInterfaceJIT::regT3, OperationPtrTag); |
333 | #if OS(WINDOWS) |
334 | jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); |
335 | #endif |
336 | |
337 | jit.jumpToExceptionHandler(vm); |
338 | |
339 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
340 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "%s %s%s trampoline" , thunkFunctionType == ThunkFunctionType::JSFunction ? "native" : "internal" , entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "" , toCString(kind).data()); |
341 | } |
342 | |
343 | MacroAssemblerCodeRef<JITThunkPtrTag> nativeCallGenerator(VM& vm) |
344 | { |
345 | return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall); |
346 | } |
347 | |
348 | MacroAssemblerCodeRef<JITThunkPtrTag> nativeTailCallGenerator(VM& vm) |
349 | { |
350 | return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithSavedTags); |
351 | } |
352 | |
353 | MacroAssemblerCodeRef<JITThunkPtrTag> nativeTailCallWithoutSavedTagsGenerator(VM& vm) |
354 | { |
355 | return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForCall, EnterViaJumpWithoutSavedTags); |
356 | } |
357 | |
358 | MacroAssemblerCodeRef<JITThunkPtrTag> nativeConstructGenerator(VM& vm) |
359 | { |
360 | return nativeForGenerator(vm, ThunkFunctionType::JSFunction, CodeForConstruct); |
361 | } |
362 | |
363 | MacroAssemblerCodeRef<JITThunkPtrTag> internalFunctionCallGenerator(VM& vm) |
364 | { |
365 | return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForCall); |
366 | } |
367 | |
368 | MacroAssemblerCodeRef<JITThunkPtrTag> internalFunctionConstructGenerator(VM& vm) |
369 | { |
370 | return nativeForGenerator(vm, ThunkFunctionType::InternalFunction, CodeForConstruct); |
371 | } |
372 | |
373 | MacroAssemblerCodeRef<JITThunkPtrTag> arityFixupGenerator(VM& vm) |
374 | { |
375 | JSInterfaceJIT jit(&vm); |
376 | |
377 | // We enter with fixup count in argumentGPR0 |
378 | // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-) |
379 | #if USE(JSVALUE64) |
380 | #if OS(WINDOWS) |
381 | const GPRReg extraTemp = JSInterfaceJIT::regT0; |
382 | #else |
383 | const GPRReg = JSInterfaceJIT::regT5; |
384 | #endif |
385 | # if CPU(X86_64) |
386 | jit.pop(JSInterfaceJIT::regT4); |
387 | # endif |
388 | jit.tagReturnAddress(); |
389 | #if CPU(ARM64E) |
390 | jit.loadPtr(JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3); |
391 | jit.addPtr(JSInterfaceJIT::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, extraTemp); |
392 | jit.untagPtr(extraTemp, GPRInfo::regT3); |
393 | PtrTag tempReturnPCTag = static_cast<PtrTag>(random()); |
394 | jit.move(JSInterfaceJIT::TrustedImmPtr(tempReturnPCTag), extraTemp); |
395 | jit.tagPtr(extraTemp, GPRInfo::regT3); |
396 | jit.storePtr(GPRInfo::regT3, JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); |
397 | #endif |
398 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); |
399 | jit.load32(JSInterfaceJIT::addressFor(CallFrameSlot::argumentCount), JSInterfaceJIT::argumentGPR2); |
400 | jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2); |
401 | |
402 | // Check to see if we have extra slots we can use |
403 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1); |
404 | jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1); |
405 | JSInterfaceJIT::Jump = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1); |
406 | jit.move(JSInterfaceJIT::TrustedImm64(JSValue::ValueUndefined), extraTemp); |
407 | JSInterfaceJIT::Label (jit.label()); |
408 | jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight)); |
409 | jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2); |
410 | jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit); |
411 | jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0); |
412 | JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0); |
413 | noExtraSlot.link(&jit); |
414 | |
415 | jit.neg64(JSInterfaceJIT::argumentGPR0); |
416 | |
417 | // Adjust call frame register and stack pointer to account for missing args. |
418 | // We need to change the stack pointer first before performing copy/fill loops. |
419 | // This stack space below the stack pointer is considered unused by OS. Therefore, |
420 | // OS may corrupt this space when constructing a signal stack. |
421 | jit.move(JSInterfaceJIT::argumentGPR0, extraTemp); |
422 | jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp); |
423 | jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister); |
424 | jit.untagReturnAddress(); |
425 | jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister); |
426 | jit.tagReturnAddress(); |
427 | |
428 | // Move current frame down argumentGPR0 number of slots |
429 | JSInterfaceJIT::Label copyLoop(jit.label()); |
430 | jit.load64(JSInterfaceJIT::regT3, extraTemp); |
431 | jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight)); |
432 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); |
433 | jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit); |
434 | |
435 | // Fill in argumentGPR0 missing arg slots with undefined |
436 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2); |
437 | jit.move(JSInterfaceJIT::TrustedImm64(JSValue::ValueUndefined), extraTemp); |
438 | JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); |
439 | jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight)); |
440 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); |
441 | jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit); |
442 | |
443 | done.link(&jit); |
444 | |
445 | #if CPU(ARM64E) |
446 | jit.loadPtr(JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3); |
447 | jit.move(JSInterfaceJIT::TrustedImmPtr(tempReturnPCTag), extraTemp); |
448 | jit.untagPtr(extraTemp, GPRInfo::regT3); |
449 | jit.addPtr(JSInterfaceJIT::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, extraTemp); |
450 | jit.tagPtr(extraTemp, GPRInfo::regT3); |
451 | jit.storePtr(GPRInfo::regT3, JSInterfaceJIT::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); |
452 | #endif |
453 | |
454 | # if CPU(X86_64) |
455 | jit.push(JSInterfaceJIT::regT4); |
456 | # endif |
457 | jit.ret(); |
458 | #else // USE(JSVALUE64) section above, USE(JSVALUE32_64) section below. |
459 | jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); |
460 | jit.load32(JSInterfaceJIT::addressFor(CallFrameSlot::argumentCount), JSInterfaceJIT::argumentGPR2); |
461 | jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2); |
462 | |
463 | // Check to see if we have extra slots we can use |
464 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1); |
465 | jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1); |
466 | JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1); |
467 | JSInterfaceJIT::Label fillExtraSlots(jit.label()); |
468 | jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5); |
469 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset)); |
470 | jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5); |
471 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset)); |
472 | jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2); |
473 | jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit); |
474 | jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0); |
475 | JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0); |
476 | noExtraSlot.link(&jit); |
477 | |
478 | jit.neg32(JSInterfaceJIT::argumentGPR0); |
479 | |
480 | // Adjust call frame register and stack pointer to account for missing args. |
481 | // We need to change the stack pointer first before performing copy/fill loops. |
482 | // This stack space below the stack pointer is considered unused by OS. Therefore, |
483 | // OS may corrupt this space when constructing a signal stack. |
484 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5); |
485 | jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5); |
486 | jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister); |
487 | jit.untagReturnAddress(); |
488 | jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister); |
489 | jit.tagReturnAddress(); |
490 | |
491 | // Move current frame down argumentGPR0 number of slots |
492 | JSInterfaceJIT::Label copyLoop(jit.label()); |
493 | jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5); |
494 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset)); |
495 | jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5); |
496 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset)); |
497 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); |
498 | jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit); |
499 | |
500 | // Fill in argumentGPR0 missing arg slots with undefined |
501 | jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2); |
502 | JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); |
503 | jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5); |
504 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset)); |
505 | jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5); |
506 | jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset)); |
507 | |
508 | jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); |
509 | jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit); |
510 | |
511 | done.link(&jit); |
512 | |
513 | jit.ret(); |
514 | #endif // End of USE(JSVALUE32_64) section. |
515 | |
516 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
517 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "fixup arity" ); |
518 | } |
519 | |
520 | MacroAssemblerCodeRef<JITThunkPtrTag> unreachableGenerator(VM& vm) |
521 | { |
522 | JSInterfaceJIT jit(&vm); |
523 | |
524 | jit.breakpoint(); |
525 | |
526 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
527 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "unreachable thunk" ); |
528 | } |
529 | |
530 | MacroAssemblerCodeRef<JITThunkPtrTag> stringGetByValGenerator(VM& vm) |
531 | { |
532 | // regT0 is JSString*, and regT1 (64bit) or regT2 (32bit) is int index. |
533 | // Return regT0 = result JSString* if succeeds. Otherwise, return regT0 = 0. |
534 | #if USE(JSVALUE64) |
535 | GPRReg stringGPR = GPRInfo::regT0; |
536 | GPRReg indexGPR = GPRInfo::regT1; |
537 | GPRReg scratchGPR = GPRInfo::regT2; |
538 | #else |
539 | GPRReg stringGPR = GPRInfo::regT0; |
540 | GPRReg indexGPR = GPRInfo::regT2; |
541 | GPRReg scratchGPR = GPRInfo::regT1; |
542 | #endif |
543 | |
544 | JSInterfaceJIT jit(&vm); |
545 | JSInterfaceJIT::JumpList failures; |
546 | jit.tagReturnAddress(); |
547 | |
548 | // Load string length to regT2, and start the process of loading the data pointer into regT0 |
549 | jit.loadPtr(JSInterfaceJIT::Address(stringGPR, JSString::offsetOfValue()), stringGPR); |
550 | failures.append(jit.branchIfRopeStringImpl(stringGPR)); |
551 | jit.load32(JSInterfaceJIT::Address(stringGPR, StringImpl::lengthMemoryOffset()), scratchGPR); |
552 | |
553 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large |
554 | failures.append(jit.branch32(JSInterfaceJIT::AboveOrEqual, indexGPR, scratchGPR)); |
555 | |
556 | // Load the character |
557 | JSInterfaceJIT::JumpList cont8Bit; |
558 | // Load the string flags |
559 | jit.load32(JSInterfaceJIT::Address(stringGPR, StringImpl::flagsOffset()), scratchGPR); |
560 | jit.loadPtr(JSInterfaceJIT::Address(stringGPR, StringImpl::dataOffset()), stringGPR); |
561 | auto is16Bit = jit.branchTest32(JSInterfaceJIT::Zero, scratchGPR, JSInterfaceJIT::TrustedImm32(StringImpl::flagIs8Bit())); |
562 | jit.load8(JSInterfaceJIT::BaseIndex(stringGPR, indexGPR, JSInterfaceJIT::TimesOne, 0), stringGPR); |
563 | cont8Bit.append(jit.jump()); |
564 | is16Bit.link(&jit); |
565 | jit.load16(JSInterfaceJIT::BaseIndex(stringGPR, indexGPR, JSInterfaceJIT::TimesTwo, 0), stringGPR); |
566 | cont8Bit.link(&jit); |
567 | |
568 | failures.append(jit.branch32(JSInterfaceJIT::Above, stringGPR, JSInterfaceJIT::TrustedImm32(maxSingleCharacterString))); |
569 | jit.move(JSInterfaceJIT::TrustedImmPtr(vm.smallStrings.singleCharacterStrings()), indexGPR); |
570 | jit.loadPtr(JSInterfaceJIT::BaseIndex(indexGPR, stringGPR, JSInterfaceJIT::ScalePtr, 0), stringGPR); |
571 | jit.ret(); |
572 | |
573 | failures.link(&jit); |
574 | jit.move(JSInterfaceJIT::TrustedImm32(0), stringGPR); |
575 | jit.ret(); |
576 | |
577 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
578 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "String get_by_val stub" ); |
579 | } |
580 | |
581 | static void stringCharLoad(SpecializedThunkJIT& jit) |
582 | { |
583 | // load string |
584 | jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0); |
585 | |
586 | // Load string length to regT2, and start the process of loading the data pointer into regT0 |
587 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, JSString::offsetOfValue()), SpecializedThunkJIT::regT0); |
588 | jit.appendFailure(jit.branchIfRopeStringImpl(SpecializedThunkJIT::regT0)); |
589 | jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::lengthMemoryOffset()), SpecializedThunkJIT::regT2); |
590 | |
591 | // load index |
592 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index |
593 | |
594 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large |
595 | jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2)); |
596 | |
597 | // Load the character |
598 | SpecializedThunkJIT::JumpList is16Bit; |
599 | SpecializedThunkJIT::JumpList cont8Bit; |
600 | // Load the string flags |
601 | jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2); |
602 | jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, StringImpl::dataOffset()), SpecializedThunkJIT::regT0); |
603 | is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit()))); |
604 | jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0); |
605 | cont8Bit.append(jit.jump()); |
606 | is16Bit.link(&jit); |
607 | jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0); |
608 | cont8Bit.link(&jit); |
609 | } |
610 | |
611 | static void charToString(SpecializedThunkJIT& jit, VM& vm, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch) |
612 | { |
613 | jit.appendFailure(jit.branch32(MacroAssembler::Above, src, MacroAssembler::TrustedImm32(maxSingleCharacterString))); |
614 | jit.move(MacroAssembler::TrustedImmPtr(vm.smallStrings.singleCharacterStrings()), scratch); |
615 | jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst); |
616 | jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst)); |
617 | } |
618 | |
619 | MacroAssemblerCodeRef<JITThunkPtrTag> charCodeAtThunkGenerator(VM& vm) |
620 | { |
621 | SpecializedThunkJIT jit(vm, 1); |
622 | stringCharLoad(jit); |
623 | jit.returnInt32(SpecializedThunkJIT::regT0); |
624 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "charCodeAt" ); |
625 | } |
626 | |
627 | MacroAssemblerCodeRef<JITThunkPtrTag> charAtThunkGenerator(VM& vm) |
628 | { |
629 | SpecializedThunkJIT jit(vm, 1); |
630 | stringCharLoad(jit); |
631 | charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); |
632 | jit.returnJSCell(SpecializedThunkJIT::regT0); |
633 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "charAt" ); |
634 | } |
635 | |
636 | MacroAssemblerCodeRef<JITThunkPtrTag> fromCharCodeThunkGenerator(VM& vm) |
637 | { |
638 | SpecializedThunkJIT jit(vm, 1); |
639 | // load char code |
640 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0); |
641 | charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); |
642 | jit.returnJSCell(SpecializedThunkJIT::regT0); |
643 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "fromCharCode" ); |
644 | } |
645 | |
646 | MacroAssemblerCodeRef<JITThunkPtrTag> stringPrototypeCodePointAtThunkGenerator(VM& vm) |
647 | { |
648 | SpecializedThunkJIT jit(vm, 1); |
649 | |
650 | // load string |
651 | jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, GPRInfo::regT0); |
652 | |
653 | // Load string length to regT3, and start the process of loading the data pointer into regT2 |
654 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSString::offsetOfValue()), GPRInfo::regT0); |
655 | jit.appendFailure(jit.branchIfRopeStringImpl(GPRInfo::regT0)); |
656 | jit.load32(CCallHelpers::Address(GPRInfo::regT0, StringImpl::lengthMemoryOffset()), GPRInfo::regT3); |
657 | |
658 | // load index |
659 | jit.loadInt32Argument(0, GPRInfo::regT1); // regT1 contains the index |
660 | |
661 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large |
662 | jit.appendFailure(jit.branch32(CCallHelpers::AboveOrEqual, GPRInfo::regT1, GPRInfo::regT3)); |
663 | |
664 | // Load the character |
665 | CCallHelpers::JumpList done; |
666 | // Load the string flags |
667 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, StringImpl::dataOffset()), GPRInfo::regT2); |
668 | auto is16Bit = jit.branchTest32(CCallHelpers::Zero, CCallHelpers::Address(GPRInfo::regT0, StringImpl::flagsOffset()), CCallHelpers::TrustedImm32(StringImpl::flagIs8Bit())); |
669 | jit.load8(CCallHelpers::BaseIndex(GPRInfo::regT2, GPRInfo::regT1, CCallHelpers::TimesOne, 0), GPRInfo::regT0); |
670 | done.append(jit.jump()); |
671 | |
672 | is16Bit.link(&jit); |
673 | jit.load16(CCallHelpers::BaseIndex(GPRInfo::regT2, GPRInfo::regT1, CCallHelpers::TimesTwo, 0), GPRInfo::regT0); |
674 | // Original index is int32_t, and here, we ensure that it is positive. If we interpret it as uint32_t, adding 1 never overflows. |
675 | jit.add32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1); |
676 | done.append(jit.branch32(CCallHelpers::AboveOrEqual, GPRInfo::regT1, GPRInfo::regT3)); |
677 | jit.and32(CCallHelpers::TrustedImm32(0xfffffc00), GPRInfo::regT0, GPRInfo::regT3); |
678 | done.append(jit.branch32(CCallHelpers::NotEqual, GPRInfo::regT3, CCallHelpers::TrustedImm32(0xd800))); |
679 | jit.load16(CCallHelpers::BaseIndex(GPRInfo::regT2, GPRInfo::regT1, CCallHelpers::TimesTwo, 0), GPRInfo::regT2); |
680 | jit.and32(CCallHelpers::TrustedImm32(0xfffffc00), GPRInfo::regT2, GPRInfo::regT3); |
681 | done.append(jit.branch32(CCallHelpers::NotEqual, GPRInfo::regT3, CCallHelpers::TrustedImm32(0xdc00))); |
682 | jit.lshift32(CCallHelpers::TrustedImm32(10), GPRInfo::regT0); |
683 | jit.getEffectiveAddress(CCallHelpers::BaseIndex(GPRInfo::regT0, GPRInfo::regT2, CCallHelpers::TimesOne, -U16_SURROGATE_OFFSET), GPRInfo::regT0); |
684 | done.link(&jit); |
685 | |
686 | jit.returnInt32(GPRInfo::regT0); |
687 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "codePointAt" ); |
688 | } |
689 | |
690 | MacroAssemblerCodeRef<JITThunkPtrTag> clz32ThunkGenerator(VM& vm) |
691 | { |
692 | SpecializedThunkJIT jit(vm, 1); |
693 | MacroAssembler::Jump nonIntArgJump; |
694 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump); |
695 | |
696 | SpecializedThunkJIT::Label convertedArgumentReentry(&jit); |
697 | jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); |
698 | jit.returnInt32(SpecializedThunkJIT::regT1); |
699 | |
700 | if (jit.supportsFloatingPointTruncate()) { |
701 | nonIntArgJump.link(&jit); |
702 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
703 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit); |
704 | jit.appendFailure(jit.jump()); |
705 | } else |
706 | jit.appendFailure(nonIntArgJump); |
707 | |
708 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "clz32" ); |
709 | } |
710 | |
711 | MacroAssemblerCodeRef<JITThunkPtrTag> sqrtThunkGenerator(VM& vm) |
712 | { |
713 | SpecializedThunkJIT jit(vm, 1); |
714 | if (!jit.supportsFloatingPointSqrt()) |
715 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
716 | |
717 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
718 | jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); |
719 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
720 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "sqrt" ); |
721 | } |
722 | |
723 | |
724 | #define UnaryDoubleOpWrapper(function) function##Wrapper |
725 | enum MathThunkCallingConvention { }; |
726 | typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); |
727 | |
728 | #if CPU(X86_64) && COMPILER(GCC_COMPATIBLE) && (OS(DARWIN) || OS(LINUX)) |
729 | |
730 | #define defineUnaryDoubleOpWrapper(function) \ |
731 | asm( \ |
732 | ".text\n" \ |
733 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ |
734 | HIDE_SYMBOL(function##Thunk) "\n" \ |
735 | SYMBOL_STRING(function##Thunk) ":" "\n" \ |
736 | "pushq %rax\n" \ |
737 | "call " GLOBAL_REFERENCE(function) "\n" \ |
738 | "popq %rcx\n" \ |
739 | "ret\n" \ |
740 | );\ |
741 | extern "C" { \ |
742 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ |
743 | } \ |
744 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; |
745 | |
746 | #elif CPU(X86) && COMPILER(GCC_COMPATIBLE) && OS(LINUX) && defined(__PIC__) |
747 | #define defineUnaryDoubleOpWrapper(function) \ |
748 | asm( \ |
749 | ".text\n" \ |
750 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ |
751 | HIDE_SYMBOL(function##Thunk) "\n" \ |
752 | SYMBOL_STRING(function##Thunk) ":" "\n" \ |
753 | "pushl %ebx\n" \ |
754 | "subl $20, %esp\n" \ |
755 | "movsd %xmm0, (%esp) \n" \ |
756 | "call __x86.get_pc_thunk.bx\n" \ |
757 | "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \ |
758 | "call " GLOBAL_REFERENCE(function) "\n" \ |
759 | "fstpl (%esp) \n" \ |
760 | "movsd (%esp), %xmm0 \n" \ |
761 | "addl $20, %esp\n" \ |
762 | "popl %ebx\n" \ |
763 | "ret\n" \ |
764 | );\ |
765 | extern "C" { \ |
766 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ |
767 | } \ |
768 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; |
769 | |
770 | #elif CPU(X86) && COMPILER(GCC_COMPATIBLE) && (OS(DARWIN) || OS(LINUX)) |
771 | #define defineUnaryDoubleOpWrapper(function) \ |
772 | asm( \ |
773 | ".text\n" \ |
774 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ |
775 | HIDE_SYMBOL(function##Thunk) "\n" \ |
776 | SYMBOL_STRING(function##Thunk) ":" "\n" \ |
777 | "subl $20, %esp\n" \ |
778 | "movsd %xmm0, (%esp) \n" \ |
779 | "call " GLOBAL_REFERENCE(function) "\n" \ |
780 | "fstpl (%esp) \n" \ |
781 | "movsd (%esp), %xmm0 \n" \ |
782 | "addl $20, %esp\n" \ |
783 | "ret\n" \ |
784 | );\ |
785 | extern "C" { \ |
786 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ |
787 | } \ |
788 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; |
789 | |
790 | #elif CPU(ARM_THUMB2) && COMPILER(GCC_COMPATIBLE) && PLATFORM(IOS_FAMILY) |
791 | |
792 | #define defineUnaryDoubleOpWrapper(function) \ |
793 | asm( \ |
794 | ".text\n" \ |
795 | ".align 2\n" \ |
796 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ |
797 | HIDE_SYMBOL(function##Thunk) "\n" \ |
798 | ".thumb\n" \ |
799 | ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \ |
800 | SYMBOL_STRING(function##Thunk) ":" "\n" \ |
801 | "push {lr}\n" \ |
802 | "vmov r0, r1, d0\n" \ |
803 | "blx " GLOBAL_REFERENCE(function) "\n" \ |
804 | "vmov d0, r0, r1\n" \ |
805 | "pop {lr}\n" \ |
806 | "bx lr\n" \ |
807 | ); \ |
808 | extern "C" { \ |
809 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ |
810 | } \ |
811 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; |
812 | |
813 | #elif CPU(ARM64) |
814 | |
815 | #define defineUnaryDoubleOpWrapper(function) \ |
816 | asm( \ |
817 | ".text\n" \ |
818 | ".align 2\n" \ |
819 | ".globl " SYMBOL_STRING(function##Thunk) "\n" \ |
820 | HIDE_SYMBOL(function##Thunk) "\n" \ |
821 | SYMBOL_STRING(function##Thunk) ":" "\n" \ |
822 | "b " GLOBAL_REFERENCE(function) "\n" \ |
823 | ".previous" \ |
824 | ); \ |
825 | extern "C" { \ |
826 | MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ |
827 | } \ |
828 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; |
829 | |
830 | #elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS) |
831 | |
832 | // MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions. |
833 | static double (_cdecl *floorFunction)(double) = floor; |
834 | static double (_cdecl *ceilFunction)(double) = ceil; |
835 | static double (_cdecl *truncFunction)(double) = trunc; |
836 | static double (_cdecl *expFunction)(double) = exp; |
837 | static double (_cdecl *logFunction)(double) = log; |
838 | static double (_cdecl *jsRoundFunction)(double) = jsRound; |
839 | |
840 | #define defineUnaryDoubleOpWrapper(function) \ |
841 | extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \ |
842 | { \ |
843 | __asm \ |
844 | { \ |
845 | __asm sub esp, 20 \ |
846 | __asm movsd mmword ptr [esp], xmm0 \ |
847 | __asm call function##Function \ |
848 | __asm fstp qword ptr [esp] \ |
849 | __asm movsd xmm0, mmword ptr [esp] \ |
850 | __asm add esp, 20 \ |
851 | __asm ret \ |
852 | } \ |
853 | } \ |
854 | static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; |
855 | |
856 | #else |
857 | |
858 | #define defineUnaryDoubleOpWrapper(function) \ |
859 | static MathThunk UnaryDoubleOpWrapper(function) = 0 |
860 | #endif |
861 | |
862 | defineUnaryDoubleOpWrapper(jsRound); |
863 | defineUnaryDoubleOpWrapper(exp); |
864 | defineUnaryDoubleOpWrapper(log); |
865 | defineUnaryDoubleOpWrapper(floor); |
866 | defineUnaryDoubleOpWrapper(ceil); |
867 | defineUnaryDoubleOpWrapper(trunc); |
868 | |
869 | MacroAssemblerCodeRef<JITThunkPtrTag> floorThunkGenerator(VM& vm) |
870 | { |
871 | SpecializedThunkJIT jit(vm, 1); |
872 | MacroAssembler::Jump nonIntJump; |
873 | if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint()) |
874 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
875 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); |
876 | jit.returnInt32(SpecializedThunkJIT::regT0); |
877 | nonIntJump.link(&jit); |
878 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
879 | |
880 | if (jit.supportsFloatingPointRounding()) { |
881 | SpecializedThunkJIT::JumpList doubleResult; |
882 | jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); |
883 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
884 | jit.returnInt32(SpecializedThunkJIT::regT0); |
885 | doubleResult.link(&jit); |
886 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
887 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "floor" ); |
888 | } |
889 | |
890 | SpecializedThunkJIT::Jump intResult; |
891 | SpecializedThunkJIT::JumpList doubleResult; |
892 | if (jit.supportsFloatingPointTruncate()) { |
893 | jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1); |
894 | doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); |
895 | SpecializedThunkJIT::JumpList slowPath; |
896 | // Handle the negative doubles in the slow path for now. |
897 | slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); |
898 | slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0)); |
899 | intResult = jit.jump(); |
900 | slowPath.link(&jit); |
901 | } |
902 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor)); |
903 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
904 | if (jit.supportsFloatingPointTruncate()) |
905 | intResult.link(&jit); |
906 | jit.returnInt32(SpecializedThunkJIT::regT0); |
907 | doubleResult.link(&jit); |
908 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
909 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "floor" ); |
910 | } |
911 | |
912 | MacroAssemblerCodeRef<JITThunkPtrTag> ceilThunkGenerator(VM& vm) |
913 | { |
914 | SpecializedThunkJIT jit(vm, 1); |
915 | if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint()) |
916 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
917 | MacroAssembler::Jump nonIntJump; |
918 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); |
919 | jit.returnInt32(SpecializedThunkJIT::regT0); |
920 | nonIntJump.link(&jit); |
921 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
922 | if (jit.supportsFloatingPointRounding()) |
923 | jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); |
924 | else |
925 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil)); |
926 | |
927 | SpecializedThunkJIT::JumpList doubleResult; |
928 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
929 | jit.returnInt32(SpecializedThunkJIT::regT0); |
930 | doubleResult.link(&jit); |
931 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
932 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "ceil" ); |
933 | } |
934 | |
935 | MacroAssemblerCodeRef<JITThunkPtrTag> truncThunkGenerator(VM& vm) |
936 | { |
937 | SpecializedThunkJIT jit(vm, 1); |
938 | if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint()) |
939 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
940 | MacroAssembler::Jump nonIntJump; |
941 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); |
942 | jit.returnInt32(SpecializedThunkJIT::regT0); |
943 | nonIntJump.link(&jit); |
944 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
945 | if (jit.supportsFloatingPointRounding()) |
946 | jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); |
947 | else |
948 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc)); |
949 | |
950 | SpecializedThunkJIT::JumpList doubleResult; |
951 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
952 | jit.returnInt32(SpecializedThunkJIT::regT0); |
953 | doubleResult.link(&jit); |
954 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
955 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "trunc" ); |
956 | } |
957 | |
958 | MacroAssemblerCodeRef<JITThunkPtrTag> roundThunkGenerator(VM& vm) |
959 | { |
960 | SpecializedThunkJIT jit(vm, 1); |
961 | if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint()) |
962 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
963 | MacroAssembler::Jump nonIntJump; |
964 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); |
965 | jit.returnInt32(SpecializedThunkJIT::regT0); |
966 | nonIntJump.link(&jit); |
967 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
968 | SpecializedThunkJIT::JumpList doubleResult; |
969 | if (jit.supportsFloatingPointRounding()) { |
970 | jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1); |
971 | doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); |
972 | |
973 | jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); |
974 | static constexpr double halfConstant = -0.5; |
975 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT2); |
976 | jit.addDouble(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::fpRegT2); |
977 | MacroAssembler::Jump shouldRoundDown = jit.branchDouble(MacroAssembler::DoubleGreaterThan, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT0); |
978 | |
979 | jit.moveDouble(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::fpRegT0); |
980 | MacroAssembler::Jump continuation = jit.jump(); |
981 | |
982 | shouldRoundDown.link(&jit); |
983 | static constexpr double oneConstant = 1.0; |
984 | jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT2); |
985 | jit.subDouble(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT0); |
986 | |
987 | continuation.link(&jit); |
988 | } else |
989 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound)); |
990 | jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1); |
991 | jit.returnInt32(SpecializedThunkJIT::regT0); |
992 | doubleResult.link(&jit); |
993 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
994 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "round" ); |
995 | } |
996 | |
997 | MacroAssemblerCodeRef<JITThunkPtrTag> expThunkGenerator(VM& vm) |
998 | { |
999 | if (!UnaryDoubleOpWrapper(exp)) |
1000 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
1001 | SpecializedThunkJIT jit(vm, 1); |
1002 | if (!jit.supportsFloatingPoint()) |
1003 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
1004 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
1005 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp)); |
1006 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
1007 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "exp" ); |
1008 | } |
1009 | |
1010 | MacroAssemblerCodeRef<JITThunkPtrTag> logThunkGenerator(VM& vm) |
1011 | { |
1012 | if (!UnaryDoubleOpWrapper(log)) |
1013 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
1014 | SpecializedThunkJIT jit(vm, 1); |
1015 | if (!jit.supportsFloatingPoint()) |
1016 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
1017 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
1018 | jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log)); |
1019 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
1020 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "log" ); |
1021 | } |
1022 | |
1023 | MacroAssemblerCodeRef<JITThunkPtrTag> absThunkGenerator(VM& vm) |
1024 | { |
1025 | SpecializedThunkJIT jit(vm, 1); |
1026 | if (!jit.supportsFloatingPointAbs()) |
1027 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
1028 | |
1029 | #if USE(JSVALUE64) |
1030 | unsigned virtualRegisterIndex = CallFrame::argumentOffset(0); |
1031 | jit.load64(AssemblyHelpers::addressFor(virtualRegisterIndex), GPRInfo::regT0); |
1032 | auto notInteger = jit.branchIfNotInt32(GPRInfo::regT0); |
1033 | |
1034 | // Abs Int32. |
1035 | jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1); |
1036 | jit.add32(GPRInfo::regT1, GPRInfo::regT0); |
1037 | jit.xor32(GPRInfo::regT1, GPRInfo::regT0); |
1038 | |
1039 | // IntMin cannot be inverted. |
1040 | MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0); |
1041 | |
1042 | // Box and finish. |
1043 | jit.or64(GPRInfo::numberTagRegister, GPRInfo::regT0); |
1044 | MacroAssembler::Jump doneWithIntegers = jit.jump(); |
1045 | |
1046 | // Handle Doubles. |
1047 | notInteger.link(&jit); |
1048 | jit.appendFailure(jit.branchIfNotNumber(GPRInfo::regT0)); |
1049 | jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0); |
1050 | MacroAssembler::Label absFPR0Label = jit.label(); |
1051 | jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1); |
1052 | jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0); |
1053 | |
1054 | // Tail. |
1055 | doneWithIntegers.link(&jit); |
1056 | jit.returnJSValue(GPRInfo::regT0); |
1057 | |
1058 | // We know the value of regT0 is IntMin. We could load that value from memory but |
1059 | // it is simpler to just convert it. |
1060 | integerIsIntMin.link(&jit); |
1061 | jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0); |
1062 | jit.jump().linkTo(absFPR0Label, &jit); |
1063 | #else |
1064 | MacroAssembler::Jump nonIntJump; |
1065 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump); |
1066 | jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1); |
1067 | jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); |
1068 | jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); |
1069 | jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0)); |
1070 | jit.returnInt32(SpecializedThunkJIT::regT0); |
1071 | nonIntJump.link(&jit); |
1072 | // Shame about the double int conversion here. |
1073 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
1074 | jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); |
1075 | jit.returnDouble(SpecializedThunkJIT::fpRegT1); |
1076 | #endif |
1077 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "abs" ); |
1078 | } |
1079 | |
1080 | MacroAssemblerCodeRef<JITThunkPtrTag> imulThunkGenerator(VM& vm) |
1081 | { |
1082 | SpecializedThunkJIT jit(vm, 2); |
1083 | MacroAssembler::Jump nonIntArg0Jump; |
1084 | jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump); |
1085 | SpecializedThunkJIT::Label doneLoadingArg0(&jit); |
1086 | MacroAssembler::Jump nonIntArg1Jump; |
1087 | jit.loadInt32Argument(1, SpecializedThunkJIT::regT1, nonIntArg1Jump); |
1088 | SpecializedThunkJIT::Label doneLoadingArg1(&jit); |
1089 | jit.mul32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0); |
1090 | jit.returnInt32(SpecializedThunkJIT::regT0); |
1091 | |
1092 | if (jit.supportsFloatingPointTruncate()) { |
1093 | nonIntArg0Jump.link(&jit); |
1094 | jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); |
1095 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit); |
1096 | jit.appendFailure(jit.jump()); |
1097 | } else |
1098 | jit.appendFailure(nonIntArg0Jump); |
1099 | |
1100 | if (jit.supportsFloatingPointTruncate()) { |
1101 | nonIntArg1Jump.link(&jit); |
1102 | jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1); |
1103 | jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit); |
1104 | jit.appendFailure(jit.jump()); |
1105 | } else |
1106 | jit.appendFailure(nonIntArg1Jump); |
1107 | |
1108 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "imul" ); |
1109 | } |
1110 | |
1111 | MacroAssemblerCodeRef<JITThunkPtrTag> randomThunkGenerator(VM& vm) |
1112 | { |
1113 | SpecializedThunkJIT jit(vm, 0); |
1114 | if (!jit.supportsFloatingPoint()) |
1115 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
1116 | |
1117 | #if USE(JSVALUE64) |
1118 | jit.emitRandomThunk(vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0); |
1119 | jit.returnDouble(SpecializedThunkJIT::fpRegT0); |
1120 | |
1121 | return jit.finalize(vm.jitStubs->ctiNativeTailCall(vm), "random" ); |
1122 | #else |
1123 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(vm.jitStubs->ctiNativeCall(vm)); |
1124 | #endif |
1125 | } |
1126 | |
1127 | MacroAssemblerCodeRef<JITThunkPtrTag> boundThisNoArgsFunctionCallGenerator(VM& vm) |
1128 | { |
1129 | CCallHelpers jit; |
1130 | |
1131 | jit.emitFunctionPrologue(); |
1132 | |
1133 | // Set up our call frame. |
1134 | jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock)); |
1135 | jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCount)); |
1136 | |
1137 | unsigned = 0; |
1138 | if (unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes()) |
1139 | extraStackNeeded = stackAlignmentBytes() - stackMisalignment; |
1140 | |
1141 | // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail |
1142 | // call here as far as I can tell. At least not so long as the generic path doesn't do a tail |
1143 | // call, since that would be way too weird. |
1144 | |
1145 | // The formula for the number of stack bytes needed given some number of parameters (including |
1146 | // this) is: |
1147 | // |
1148 | // stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC)) |
1149 | // |
1150 | // Probably we want to write this as: |
1151 | // |
1152 | // stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register)) |
1153 | // |
1154 | // That's really all there is to this. We have all the registers we need to do it. |
1155 | |
1156 | jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT1); |
1157 | jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2); |
1158 | jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2); |
1159 | jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2); |
1160 | jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2); |
1161 | |
1162 | if (extraStackNeeded) |
1163 | jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2); |
1164 | |
1165 | // At this point regT1 has the actual argument count and regT2 has the amount of stack we will need. |
1166 | // Check to see if we have enough stack space. |
1167 | |
1168 | jit.negPtr(GPRInfo::regT2); |
1169 | jit.addPtr(CCallHelpers::stackPointerRegister, GPRInfo::regT2); |
1170 | jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT3); |
1171 | CCallHelpers::Jump haveStackSpace = jit.branchPtr(CCallHelpers::BelowOrEqual, CCallHelpers::AbsoluteAddress(vm.addressOfSoftStackLimit()), GPRInfo::regT2); |
1172 | |
1173 | // Throw Stack Overflow exception |
1174 | jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame); |
1175 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfGlobalObject()), GPRInfo::regT3); |
1176 | jit.setupArguments<decltype(operationThrowStackOverflowErrorFromThunk)>(GPRInfo::regT3); |
1177 | jit.prepareCallOperation(vm); |
1178 | jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationThrowStackOverflowErrorFromThunk)), GPRInfo::nonArgGPR0); |
1179 | emitPointerValidation(jit, GPRInfo::nonArgGPR0, OperationPtrTag); |
1180 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag); |
1181 | jit.jumpToExceptionHandler(vm); |
1182 | |
1183 | haveStackSpace.link(&jit); |
1184 | jit.move(GPRInfo::regT2, CCallHelpers::stackPointerRegister); |
1185 | |
1186 | // Do basic callee frame setup, including 'this'. |
1187 | |
1188 | jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCount)); |
1189 | |
1190 | JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT2); |
1191 | jit.loadValue(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfBoundThis()), valueRegs); |
1192 | jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0)); |
1193 | |
1194 | jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT3); |
1195 | jit.storeCell(GPRInfo::regT3, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee)); |
1196 | |
1197 | // OK, now we can start copying. This is a simple matter of copying parameters from the caller's |
1198 | // frame to the callee's frame. Note that we know that regT1 (the argument count) must be at |
1199 | // least 1. |
1200 | jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1); |
1201 | CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1); |
1202 | |
1203 | CCallHelpers::Label loop = jit.label(); |
1204 | jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1); |
1205 | jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgument(1)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs); |
1206 | jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight)); |
1207 | jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loop, &jit); |
1208 | |
1209 | done.link(&jit); |
1210 | |
1211 | jit.loadPtr( |
1212 | CCallHelpers::Address(GPRInfo::regT3, JSFunction::offsetOfExecutable()), |
1213 | GPRInfo::regT0); |
1214 | jit.loadPtr( |
1215 | CCallHelpers::Address( |
1216 | GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)), |
1217 | GPRInfo::regT0); |
1218 | CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0); |
1219 | |
1220 | emitPointerValidation(jit, GPRInfo::regT0, JSEntryPtrTag); |
1221 | jit.call(GPRInfo::regT0, JSEntryPtrTag); |
1222 | |
1223 | jit.emitFunctionEpilogue(); |
1224 | jit.ret(); |
1225 | |
1226 | LinkBuffer linkBuffer(jit, GLOBAL_THUNK_ID); |
1227 | linkBuffer.link(noCode, CodeLocationLabel<JITThunkPtrTag>(vm.jitStubs->ctiNativeTailCallWithoutSavedTags(vm))); |
1228 | return FINALIZE_CODE( |
1229 | linkBuffer, JITThunkPtrTag, "Specialized thunk for bound function calls with no arguments" ); |
1230 | } |
1231 | |
1232 | } // namespace JSC |
1233 | |
1234 | #endif // ENABLE(JIT) |
1235 | |