1 | /* |
2 | * Copyright (C) 2013-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "FTLThunks.h" |
28 | |
29 | #if ENABLE(FTL_JIT) |
30 | |
31 | #include "AssemblyHelpers.h" |
32 | #include "DFGOSRExitCompilerCommon.h" |
33 | #include "FPRInfo.h" |
34 | #include "FTLOSRExitCompiler.h" |
35 | #include "FTLOperations.h" |
36 | #include "FTLSaveRestore.h" |
37 | #include "GPRInfo.h" |
38 | #include "LinkBuffer.h" |
39 | |
40 | namespace JSC { namespace FTL { |
41 | |
42 | using namespace DFG; |
43 | |
44 | enum class FrameAndStackAdjustmentRequirement { |
45 | Needed, |
46 | NotNeeded |
47 | }; |
48 | |
49 | static MacroAssemblerCodeRef<JITThunkPtrTag> genericGenerationThunkGenerator( |
50 | VM& vm, FunctionPtr<CFunctionPtrTag> generationFunction, PtrTag resultTag, const char* name, unsigned , FrameAndStackAdjustmentRequirement frameAndStackAdjustmentRequirement) |
51 | { |
52 | AssemblyHelpers jit(nullptr); |
53 | |
54 | if (frameAndStackAdjustmentRequirement == FrameAndStackAdjustmentRequirement::Needed) { |
55 | // This needs to happen before we use the scratch buffer because this function also uses the scratch buffer. |
56 | adjustFrameAndStackInOSRExitCompilerThunk<FTL::JITCode>(jit, vm, JITType::FTLJIT); |
57 | } |
58 | |
59 | // Note that the "return address" will be the ID that we pass to the generation function. |
60 | |
61 | ptrdiff_t stackMisalignment = MacroAssembler::pushToSaveByteOffset(); |
62 | |
63 | // Pretend that we're a C call frame. |
64 | jit.pushToSave(MacroAssembler::framePointerRegister); |
65 | jit.move(MacroAssembler::stackPointerRegister, MacroAssembler::framePointerRegister); |
66 | stackMisalignment += MacroAssembler::pushToSaveByteOffset(); |
67 | |
68 | // Now create ourselves enough stack space to give saveAllRegisters() a scratch slot. |
69 | unsigned numberOfRequiredPops = 0; |
70 | do { |
71 | jit.pushToSave(GPRInfo::regT0); |
72 | stackMisalignment += MacroAssembler::pushToSaveByteOffset(); |
73 | numberOfRequiredPops++; |
74 | } while (stackMisalignment % stackAlignmentBytes()); |
75 | |
76 | ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(requiredScratchMemorySizeInBytes()); |
77 | char* buffer = static_cast<char*>(scratchBuffer->dataBuffer()); |
78 | |
79 | saveAllRegisters(jit, buffer); |
80 | |
81 | // Tell GC mark phase how much of the scratch buffer is active during call. |
82 | jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->addressOfActiveLength()), GPRInfo::nonArgGPR0); |
83 | jit.storePtr(MacroAssembler::TrustedImmPtr(requiredScratchMemorySizeInBytes()), GPRInfo::nonArgGPR0); |
84 | |
85 | jit.loadPtr(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); |
86 | jit.peek( |
87 | GPRInfo::argumentGPR1, |
88 | (stackMisalignment - MacroAssembler::pushToSaveByteOffset()) / sizeof(void*)); |
89 | jit.prepareCallOperation(vm); |
90 | MacroAssembler::Call functionCall = jit.call(OperationPtrTag); |
91 | |
92 | // At this point we want to make a tail call to what was returned to us in the |
93 | // returnValueGPR. But at the same time as we do this, we must restore all registers. |
94 | // The way we will accomplish this is by arranging to have the tail call target in the |
95 | // return address "slot" (be it a register or the stack). |
96 | |
97 | jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); |
98 | |
99 | // Make sure we tell the GC that we're not using the scratch buffer anymore. |
100 | jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->addressOfActiveLength()), GPRInfo::regT1); |
101 | jit.storePtr(MacroAssembler::TrustedImmPtr(nullptr), GPRInfo::regT1); |
102 | |
103 | // Prepare for tail call. |
104 | while (numberOfRequiredPops--) |
105 | jit.popToRestore(GPRInfo::regT1); |
106 | jit.popToRestore(MacroAssembler::framePointerRegister); |
107 | |
108 | // When we came in here, there was an additional thing pushed to the stack. Some clients want it |
109 | // popped before proceeding. |
110 | while (extraPopsToRestore--) |
111 | jit.popToRestore(GPRInfo::regT1); |
112 | |
113 | // Put the return address wherever the return instruction wants it. On all platforms, this |
114 | // ensures that the return address is out of the way of register restoration. |
115 | jit.restoreReturnAddressBeforeReturn(GPRInfo::regT0); |
116 | |
117 | restoreAllRegisters(jit, buffer); |
118 | |
119 | #if CPU(ARM64E) |
120 | jit.untagPtr(resultTag, AssemblyHelpers::linkRegister); |
121 | jit.tagReturnAddress(); |
122 | #else |
123 | UNUSED_PARAM(resultTag); |
124 | #endif |
125 | jit.ret(); |
126 | |
127 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
128 | patchBuffer.link(functionCall, generationFunction.retagged<OperationPtrTag>()); |
129 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "%s" , name); |
130 | } |
131 | |
132 | MacroAssemblerCodeRef<JITThunkPtrTag> osrExitGenerationThunkGenerator(VM& vm) |
133 | { |
134 | unsigned = 0; |
135 | return genericGenerationThunkGenerator( |
136 | vm, operationCompileFTLOSRExit, OSRExitPtrTag, "FTL OSR exit generation thunk" , extraPopsToRestore, FrameAndStackAdjustmentRequirement::Needed); |
137 | } |
138 | |
139 | MacroAssemblerCodeRef<JITThunkPtrTag> lazySlowPathGenerationThunkGenerator(VM& vm) |
140 | { |
141 | unsigned = 1; |
142 | return genericGenerationThunkGenerator( |
143 | vm, operationCompileFTLLazySlowPath, JITStubRoutinePtrTag, "FTL lazy slow path generation thunk" , extraPopsToRestore, FrameAndStackAdjustmentRequirement::NotNeeded); |
144 | } |
145 | |
146 | static void registerClobberCheck(AssemblyHelpers& jit, RegisterSet dontClobber) |
147 | { |
148 | if (!Options::clobberAllRegsInFTLICSlowPath()) |
149 | return; |
150 | |
151 | RegisterSet clobber = RegisterSet::allRegisters(); |
152 | clobber.exclude(RegisterSet::reservedHardwareRegisters()); |
153 | clobber.exclude(RegisterSet::stackRegisters()); |
154 | clobber.exclude(RegisterSet::calleeSaveRegisters()); |
155 | clobber.exclude(dontClobber); |
156 | |
157 | GPRReg someGPR; |
158 | for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { |
159 | if (!clobber.get(reg) || !reg.isGPR()) |
160 | continue; |
161 | |
162 | jit.move(AssemblyHelpers::TrustedImm32(0x1337beef), reg.gpr()); |
163 | someGPR = reg.gpr(); |
164 | } |
165 | |
166 | for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { |
167 | if (!clobber.get(reg) || !reg.isFPR()) |
168 | continue; |
169 | |
170 | jit.move64ToDouble(someGPR, reg.fpr()); |
171 | } |
172 | } |
173 | |
174 | MacroAssemblerCodeRef<JITThunkPtrTag> slowPathCallThunkGenerator(VM& vm, const SlowPathCallKey& key) |
175 | { |
176 | AssemblyHelpers jit(nullptr); |
177 | jit.tagReturnAddress(); |
178 | |
179 | // We want to save the given registers at the given offset, then we want to save the |
180 | // old return address somewhere past that offset, and then finally we want to make the |
181 | // call. |
182 | |
183 | size_t currentOffset = key.offset() + sizeof(void*); |
184 | |
185 | #if CPU(X86_64) |
186 | currentOffset += sizeof(void*); |
187 | #endif |
188 | |
189 | for (MacroAssembler::RegisterID reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = static_cast<MacroAssembler::RegisterID>(reg + 1)) { |
190 | if (!key.usedRegisters().get(reg)) |
191 | continue; |
192 | jit.storePtr(reg, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset)); |
193 | currentOffset += sizeof(void*); |
194 | } |
195 | |
196 | for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = static_cast<MacroAssembler::FPRegisterID>(reg + 1)) { |
197 | if (!key.usedRegisters().get(reg)) |
198 | continue; |
199 | jit.storeDouble(reg, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset)); |
200 | currentOffset += sizeof(double); |
201 | } |
202 | |
203 | jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR0); |
204 | jit.storePtr(GPRInfo::nonArgGPR0, AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset())); |
205 | jit.prepareCallOperation(vm); |
206 | |
207 | registerClobberCheck(jit, key.argumentRegisters()); |
208 | |
209 | AssemblyHelpers::Call call = jit.call(OperationPtrTag); |
210 | |
211 | jit.loadPtr(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, key.offset()), GPRInfo::nonPreservedNonReturnGPR); |
212 | jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR); |
213 | |
214 | for (MacroAssembler::FPRegisterID reg = MacroAssembler::lastFPRegister(); ; reg = static_cast<MacroAssembler::FPRegisterID>(reg - 1)) { |
215 | if (key.usedRegisters().get(reg)) { |
216 | currentOffset -= sizeof(double); |
217 | jit.loadDouble(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset), reg); |
218 | } |
219 | if (reg == MacroAssembler::firstFPRegister()) |
220 | break; |
221 | } |
222 | |
223 | for (MacroAssembler::RegisterID reg = MacroAssembler::lastRegister(); ; reg = static_cast<MacroAssembler::RegisterID>(reg - 1)) { |
224 | if (key.usedRegisters().get(reg)) { |
225 | currentOffset -= sizeof(void*); |
226 | jit.loadPtr(AssemblyHelpers::Address(MacroAssembler::stackPointerRegister, currentOffset), reg); |
227 | } |
228 | if (reg == MacroAssembler::firstRegister()) |
229 | break; |
230 | } |
231 | |
232 | jit.ret(); |
233 | |
234 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID); |
235 | patchBuffer.link(call, key.callTarget()); |
236 | return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "FTL slow path call thunk for %s" , toCString(key).data()); |
237 | } |
238 | |
239 | } } // namespace JSC::FTL |
240 | |
241 | #endif // ENABLE(FTL_JIT) |
242 | |
243 | |