1 | /* |
2 | * Copyright (C) 2016-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "WasmB3IRGenerator.h" |
28 | |
29 | #if ENABLE(WEBASSEMBLY) |
30 | |
31 | #include "AllowMacroScratchRegisterUsageIf.h" |
32 | #include "B3BasicBlockInlines.h" |
33 | #include "B3CCallValue.h" |
34 | #include "B3Compile.h" |
35 | #include "B3ConstPtrValue.h" |
36 | #include "B3FixSSA.h" |
37 | #include "B3Generate.h" |
38 | #include "B3InsertionSet.h" |
39 | #include "B3SlotBaseValue.h" |
40 | #include "B3StackmapGenerationParams.h" |
41 | #include "B3SwitchValue.h" |
42 | #include "B3UpsilonValue.h" |
43 | #include "B3Validate.h" |
44 | #include "B3ValueInlines.h" |
45 | #include "B3ValueKey.h" |
46 | #include "B3Variable.h" |
47 | #include "B3VariableValue.h" |
48 | #include "B3WasmAddressValue.h" |
49 | #include "B3WasmBoundsCheckValue.h" |
50 | #include "DisallowMacroScratchRegisterUsage.h" |
51 | #include "JSCInlines.h" |
52 | #include "JSWebAssemblyInstance.h" |
53 | #include "ScratchRegisterAllocator.h" |
54 | #include "VirtualRegister.h" |
55 | #include "WasmCallingConvention.h" |
56 | #include "WasmContextInlines.h" |
57 | #include "WasmExceptionType.h" |
58 | #include "WasmFunctionParser.h" |
59 | #include "WasmInstance.h" |
60 | #include "WasmMemory.h" |
61 | #include "WasmOMGPlan.h" |
62 | #include "WasmOSREntryData.h" |
63 | #include "WasmOpcodeOrigin.h" |
64 | #include "WasmOperations.h" |
65 | #include "WasmSignatureInlines.h" |
66 | #include "WasmThunks.h" |
67 | #include <limits> |
68 | #include <wtf/Optional.h> |
69 | #include <wtf/StdLibExtras.h> |
70 | |
71 | void dumpProcedure(void* ptr) |
72 | { |
73 | JSC::B3::Procedure* proc = static_cast<JSC::B3::Procedure*>(ptr); |
74 | proc->dump(WTF::dataFile()); |
75 | } |
76 | |
77 | namespace JSC { namespace Wasm { |
78 | |
79 | using namespace B3; |
80 | |
81 | namespace { |
82 | namespace WasmB3IRGeneratorInternal { |
83 | static constexpr bool verbose = false; |
84 | } |
85 | } |
86 | |
87 | class B3IRGenerator { |
88 | public: |
89 | using ResultList = Vector<Value*, 1>; // Value must be a Phi |
90 | |
91 | struct ControlData { |
92 | ControlData(Procedure& proc, Origin origin, BlockSignature signature, BlockType type, BasicBlock* continuation, BasicBlock* special = nullptr) |
93 | : controlBlockType(type) |
94 | , signature(signature) |
95 | , continuation(continuation) |
96 | , special(special) |
97 | { |
98 | if (type == BlockType::Loop) { |
99 | for (unsigned i = 0; i < signature->argumentCount(); ++i) |
100 | phis.append(proc.add<Value>(Phi, toB3Type(signature->argument(i)), origin)); |
101 | } else { |
102 | for (unsigned i = 0; i < signature->returnCount(); ++i) |
103 | phis.append(proc.add<Value>(Phi, toB3Type(signature->returnType(i)), origin)); |
104 | } |
105 | } |
106 | |
107 | ControlData() |
108 | { |
109 | } |
110 | |
111 | void dump(PrintStream& out) const |
112 | { |
113 | switch (blockType()) { |
114 | case BlockType::If: |
115 | out.print("If: " ); |
116 | break; |
117 | case BlockType::Block: |
118 | out.print("Block: " ); |
119 | break; |
120 | case BlockType::Loop: |
121 | out.print("Loop: " ); |
122 | break; |
123 | case BlockType::TopLevel: |
124 | out.print("TopLevel: " ); |
125 | break; |
126 | } |
127 | out.print("Continuation: " , *continuation, ", Special: " ); |
128 | if (special) |
129 | out.print(*special); |
130 | else |
131 | out.print("None" ); |
132 | } |
133 | |
134 | BlockType blockType() const { return controlBlockType; } |
135 | |
136 | bool hasNonVoidresult() const { return signature->returnsVoid(); } |
137 | |
138 | BasicBlock* targetBlockForBranch() |
139 | { |
140 | if (blockType() == BlockType::Loop) |
141 | return special; |
142 | return continuation; |
143 | } |
144 | |
145 | void convertIfToBlock() |
146 | { |
147 | ASSERT(blockType() == BlockType::If); |
148 | controlBlockType = BlockType::Block; |
149 | special = nullptr; |
150 | } |
151 | |
152 | private: |
153 | friend class B3IRGenerator; |
154 | BlockType controlBlockType; |
155 | BlockSignature signature; |
156 | BasicBlock* continuation; |
157 | BasicBlock* special; |
158 | ResultList phis; |
159 | }; |
160 | |
161 | using ExpressionType = Value*; |
162 | using ExpressionList = Vector<ExpressionType, 1>; |
163 | using Stack = ExpressionList; |
164 | |
165 | using ControlType = ControlData; |
166 | using ControlEntry = FunctionParser<B3IRGenerator>::ControlEntry; |
167 | |
168 | static constexpr ExpressionType emptyExpression() { return nullptr; } |
169 | bool isControlTypeIf(const ControlType& control) { return control.blockType() == BlockType::If; } |
170 | |
171 | typedef String ErrorType; |
172 | typedef Unexpected<ErrorType> UnexpectedResult; |
173 | typedef Expected<std::unique_ptr<InternalFunction>, ErrorType> Result; |
174 | typedef Expected<void, ErrorType> PartialResult; |
175 | template <typename ...Args> |
176 | NEVER_INLINE UnexpectedResult WARN_UNUSED_RETURN fail(Args... args) const |
177 | { |
178 | using namespace FailureHelper; // See ADL comment in WasmParser.h. |
179 | return UnexpectedResult(makeString("WebAssembly.Module failed compiling: "_s , makeString(args)...)); |
180 | } |
181 | #define WASM_COMPILE_FAIL_IF(condition, ...) do { \ |
182 | if (UNLIKELY(condition)) \ |
183 | return fail(__VA_ARGS__); \ |
184 | } while (0) |
185 | |
186 | B3IRGenerator(const ModuleInformation&, Procedure&, InternalFunction*, Vector<UnlinkedWasmToWasmCall>&, unsigned& osrEntryScratchBufferSize, MemoryMode, CompilationMode, unsigned functionIndex, unsigned loopIndexForOSREntry, TierUpCount*, ThrowWasmException); |
187 | |
188 | PartialResult WARN_UNUSED_RETURN addArguments(const Signature&); |
189 | PartialResult WARN_UNUSED_RETURN addLocal(Type, uint32_t); |
190 | ExpressionType addConstant(Type, uint64_t); |
191 | |
192 | // References |
193 | PartialResult WARN_UNUSED_RETURN addRefIsNull(ExpressionType& value, ExpressionType& result); |
194 | PartialResult WARN_UNUSED_RETURN addRefFunc(uint32_t index, ExpressionType& result); |
195 | |
196 | // Tables |
197 | PartialResult WARN_UNUSED_RETURN addTableGet(unsigned, ExpressionType& index, ExpressionType& result); |
198 | PartialResult WARN_UNUSED_RETURN addTableSet(unsigned, ExpressionType& index, ExpressionType& value); |
199 | PartialResult WARN_UNUSED_RETURN addTableSize(unsigned, ExpressionType& result); |
200 | PartialResult WARN_UNUSED_RETURN addTableGrow(unsigned, ExpressionType& fill, ExpressionType& delta, ExpressionType& result); |
201 | PartialResult WARN_UNUSED_RETURN addTableFill(unsigned, ExpressionType& offset, ExpressionType& fill, ExpressionType& count); |
202 | // Locals |
203 | PartialResult WARN_UNUSED_RETURN getLocal(uint32_t index, ExpressionType& result); |
204 | PartialResult WARN_UNUSED_RETURN setLocal(uint32_t index, ExpressionType value); |
205 | |
206 | // Globals |
207 | PartialResult WARN_UNUSED_RETURN getGlobal(uint32_t index, ExpressionType& result); |
208 | PartialResult WARN_UNUSED_RETURN setGlobal(uint32_t index, ExpressionType value); |
209 | |
210 | // Memory |
211 | PartialResult WARN_UNUSED_RETURN load(LoadOpType, ExpressionType pointer, ExpressionType& result, uint32_t offset); |
212 | PartialResult WARN_UNUSED_RETURN store(StoreOpType, ExpressionType pointer, ExpressionType value, uint32_t offset); |
213 | PartialResult WARN_UNUSED_RETURN addGrowMemory(ExpressionType delta, ExpressionType& result); |
214 | PartialResult WARN_UNUSED_RETURN addCurrentMemory(ExpressionType& result); |
215 | |
216 | // Basic operators |
217 | template<OpType> |
218 | PartialResult WARN_UNUSED_RETURN addOp(ExpressionType arg, ExpressionType& result); |
219 | template<OpType> |
220 | PartialResult WARN_UNUSED_RETURN addOp(ExpressionType left, ExpressionType right, ExpressionType& result); |
221 | PartialResult WARN_UNUSED_RETURN addSelect(ExpressionType condition, ExpressionType nonZero, ExpressionType zero, ExpressionType& result); |
222 | |
223 | |
224 | // Control flow |
225 | ControlData WARN_UNUSED_RETURN addTopLevel(BlockSignature); |
226 | PartialResult WARN_UNUSED_RETURN addBlock(BlockSignature, Stack& enclosingStack, ControlType& newBlock, Stack& newStack); |
227 | PartialResult WARN_UNUSED_RETURN addLoop(BlockSignature, Stack& enclosingStack, ControlType& block, Stack& newStack, uint32_t loopIndex); |
228 | PartialResult WARN_UNUSED_RETURN addIf(ExpressionType condition, BlockSignature, Stack& enclosingStack, ControlType& result, Stack& newStack); |
229 | PartialResult WARN_UNUSED_RETURN addElse(ControlData&, const Stack&); |
230 | PartialResult WARN_UNUSED_RETURN addElseToUnreachable(ControlData&); |
231 | |
232 | PartialResult WARN_UNUSED_RETURN addReturn(const ControlData&, const ExpressionList& returnValues); |
233 | PartialResult WARN_UNUSED_RETURN addBranch(ControlData&, ExpressionType condition, const Stack& returnValues); |
234 | PartialResult WARN_UNUSED_RETURN addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTargets, const Stack& expressionStack); |
235 | PartialResult WARN_UNUSED_RETURN endBlock(ControlEntry&, Stack& expressionStack); |
236 | PartialResult WARN_UNUSED_RETURN addEndToUnreachable(ControlEntry&, const Stack& = { }); |
237 | |
238 | // Calls |
239 | PartialResult WARN_UNUSED_RETURN addCall(uint32_t calleeIndex, const Signature&, Vector<ExpressionType>& args, ResultList& results); |
240 | PartialResult WARN_UNUSED_RETURN addCallIndirect(unsigned tableIndex, const Signature&, Vector<ExpressionType>& args, ResultList& results); |
241 | PartialResult WARN_UNUSED_RETURN addUnreachable(); |
242 | B3::Value* createCallPatchpoint(BasicBlock*, Origin, const Signature&, Vector<ExpressionType>& args, const ScopedLambda<void(PatchpointValue*)>& patchpointFunctor); |
243 | |
244 | void dump(const Vector<ControlEntry>& controlStack, const Stack* expressionStack); |
245 | void setParser(FunctionParser<B3IRGenerator>* parser) { m_parser = parser; }; |
246 | void didFinishParsingLocals() { } |
247 | |
248 | Value* constant(B3::Type, uint64_t bits, Optional<Origin> = WTF::nullopt); |
249 | Value* framePointer(); |
250 | void insertConstants(); |
251 | |
252 | B3::Type toB3ResultType(BlockSignature); |
253 | |
254 | private: |
255 | void emitExceptionCheck(CCallHelpers&, ExceptionType); |
256 | |
257 | void emitEntryTierUpCheck(); |
258 | void emitLoopTierUpCheck(uint32_t loopIndex); |
259 | |
260 | void emitWriteBarrierForJSWrapper(); |
261 | ExpressionType emitCheckAndPreparePointer(ExpressionType pointer, uint32_t offset, uint32_t sizeOfOp); |
262 | B3::Kind memoryKind(B3::Opcode memoryOp); |
263 | ExpressionType emitLoadOp(LoadOpType, ExpressionType pointer, uint32_t offset); |
264 | void emitStoreOp(StoreOpType, ExpressionType pointer, ExpressionType value, uint32_t offset); |
265 | |
266 | void unify(const ExpressionType phi, const ExpressionType source); |
267 | void unifyValuesWithBlock(const Stack& resultStack, const ResultList& stack); |
268 | |
269 | void emitChecksForModOrDiv(B3::Opcode, ExpressionType left, ExpressionType right); |
270 | |
271 | int32_t WARN_UNUSED_RETURN fixupPointerPlusOffset(ExpressionType&, uint32_t); |
272 | |
273 | void restoreWasmContextInstance(Procedure&, BasicBlock*, Value*); |
274 | enum class RestoreCachedStackLimit { No, Yes }; |
275 | void restoreWebAssemblyGlobalState(RestoreCachedStackLimit, const MemoryInformation&, Value* instance, Procedure&, BasicBlock*); |
276 | |
277 | Origin origin(); |
278 | |
279 | uint32_t outerLoopIndex() const |
280 | { |
281 | if (m_outerLoops.isEmpty()) |
282 | return UINT32_MAX; |
283 | return m_outerLoops.last(); |
284 | } |
285 | |
286 | FunctionParser<B3IRGenerator>* m_parser { nullptr }; |
287 | const ModuleInformation& m_info; |
288 | const MemoryMode m_mode { MemoryMode::BoundsChecking }; |
289 | const CompilationMode m_compilationMode { CompilationMode::BBQMode }; |
290 | const unsigned m_functionIndex { UINT_MAX }; |
291 | const unsigned m_loopIndexForOSREntry { UINT_MAX }; |
292 | TierUpCount* m_tierUp { nullptr }; |
293 | |
294 | Procedure& m_proc; |
295 | BasicBlock* m_rootBlock { nullptr }; |
296 | BasicBlock* m_currentBlock { nullptr }; |
297 | Vector<uint32_t> m_outerLoops; |
298 | Vector<Variable*> m_locals; |
299 | Vector<UnlinkedWasmToWasmCall>& m_unlinkedWasmToWasmCalls; // List each call site and the function index whose address it should be patched with. |
300 | unsigned& m_osrEntryScratchBufferSize; |
301 | HashMap<ValueKey, Value*> m_constantPool; |
302 | HashMap<BlockSignature, B3::Type> m_tupleMap; |
303 | InsertionSet m_constantInsertionValues; |
304 | Value* m_framePointer { nullptr }; |
305 | GPRReg m_memoryBaseGPR { InvalidGPRReg }; |
306 | GPRReg m_memorySizeGPR { InvalidGPRReg }; |
307 | GPRReg m_wasmContextInstanceGPR { InvalidGPRReg }; |
308 | bool m_makesCalls { false }; |
309 | |
310 | Value* m_instanceValue { nullptr }; // Always use the accessor below to ensure the instance value is materialized when used. |
311 | bool m_usesInstanceValue { false }; |
312 | Value* instanceValue() |
313 | { |
314 | m_usesInstanceValue = true; |
315 | return m_instanceValue; |
316 | } |
317 | |
318 | uint32_t m_maxNumJSCallArguments { 0 }; |
319 | unsigned m_numImportFunctions; |
320 | }; |
321 | |
322 | // Memory accesses in WebAssembly have unsigned 32-bit offsets, whereas they have signed 32-bit offsets in B3. |
323 | int32_t B3IRGenerator::fixupPointerPlusOffset(ExpressionType& ptr, uint32_t offset) |
324 | { |
325 | if (static_cast<uint64_t>(offset) > static_cast<uint64_t>(std::numeric_limits<int32_t>::max())) { |
326 | ptr = m_currentBlock->appendNew<Value>(m_proc, Add, origin(), ptr, m_currentBlock->appendNew<Const64Value>(m_proc, origin(), offset)); |
327 | return 0; |
328 | } |
329 | return offset; |
330 | } |
331 | |
332 | void B3IRGenerator::restoreWasmContextInstance(Procedure& proc, BasicBlock* block, Value* arg) |
333 | { |
334 | if (Context::useFastTLS()) { |
335 | PatchpointValue* patchpoint = block->appendNew<PatchpointValue>(proc, B3::Void, Origin()); |
336 | if (CCallHelpers::storeWasmContextInstanceNeedsMacroScratchRegister()) |
337 | patchpoint->clobber(RegisterSet::macroScratchRegisters()); |
338 | patchpoint->append(ConstrainedValue(arg, ValueRep::SomeRegister)); |
339 | patchpoint->setGenerator( |
340 | [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
341 | AllowMacroScratchRegisterUsageIf allowScratch(jit, CCallHelpers::storeWasmContextInstanceNeedsMacroScratchRegister()); |
342 | jit.storeWasmContextInstance(params[0].gpr()); |
343 | }); |
344 | return; |
345 | } |
346 | |
347 | // FIXME: Because WasmToWasm call clobbers wasmContextInstance register and does not restore it, we need to restore it in the caller side. |
348 | // This prevents us from using ArgumentReg to this (logically) immutable pinned register. |
349 | PatchpointValue* patchpoint = block->appendNew<PatchpointValue>(proc, B3::Void, Origin()); |
350 | Effects effects = Effects::none(); |
351 | effects.writesPinned = true; |
352 | effects.reads = B3::HeapRange::top(); |
353 | patchpoint->effects = effects; |
354 | patchpoint->clobberLate(RegisterSet(m_wasmContextInstanceGPR)); |
355 | patchpoint->append(arg, ValueRep::SomeRegister); |
356 | GPRReg wasmContextInstanceGPR = m_wasmContextInstanceGPR; |
357 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& param) { |
358 | jit.move(param[0].gpr(), wasmContextInstanceGPR); |
359 | }); |
360 | } |
361 | |
362 | B3IRGenerator::B3IRGenerator(const ModuleInformation& info, Procedure& procedure, InternalFunction* compilation, Vector<UnlinkedWasmToWasmCall>& unlinkedWasmToWasmCalls, unsigned& osrEntryScratchBufferSize, MemoryMode mode, CompilationMode compilationMode, unsigned functionIndex, unsigned loopIndexForOSREntry, TierUpCount* tierUp, ThrowWasmException throwWasmException) |
363 | : m_info(info) |
364 | , m_mode(mode) |
365 | , m_compilationMode(compilationMode) |
366 | , m_functionIndex(functionIndex) |
367 | , m_loopIndexForOSREntry(loopIndexForOSREntry) |
368 | , m_tierUp(tierUp) |
369 | , m_proc(procedure) |
370 | , m_unlinkedWasmToWasmCalls(unlinkedWasmToWasmCalls) |
371 | , m_osrEntryScratchBufferSize(osrEntryScratchBufferSize) |
372 | , m_constantInsertionValues(m_proc) |
373 | , m_numImportFunctions(info.importFunctionCount()) |
374 | { |
375 | m_rootBlock = m_proc.addBlock(); |
376 | m_currentBlock = m_rootBlock; |
377 | |
378 | // FIXME we don't really need to pin registers here if there's no memory. It makes wasm -> wasm thunks simpler for now. https://bugs.webkit.org/show_bug.cgi?id=166623 |
379 | const PinnedRegisterInfo& pinnedRegs = PinnedRegisterInfo::get(); |
380 | |
381 | m_memoryBaseGPR = pinnedRegs.baseMemoryPointer; |
382 | m_proc.pinRegister(m_memoryBaseGPR); |
383 | |
384 | m_wasmContextInstanceGPR = pinnedRegs.wasmContextInstancePointer; |
385 | if (!Context::useFastTLS()) |
386 | m_proc.pinRegister(m_wasmContextInstanceGPR); |
387 | |
388 | if (mode != MemoryMode::Signaling) { |
389 | m_memorySizeGPR = pinnedRegs.sizeRegister; |
390 | m_proc.pinRegister(m_memorySizeGPR); |
391 | } |
392 | |
393 | if (throwWasmException) |
394 | Thunks::singleton().setThrowWasmException(throwWasmException); |
395 | |
396 | if (info.memory) { |
397 | m_proc.setWasmBoundsCheckGenerator([=] (CCallHelpers& jit, GPRReg pinnedGPR) { |
398 | AllowMacroScratchRegisterUsage allowScratch(jit); |
399 | switch (m_mode) { |
400 | case MemoryMode::BoundsChecking: |
401 | ASSERT_UNUSED(pinnedGPR, m_memorySizeGPR == pinnedGPR); |
402 | break; |
403 | case MemoryMode::Signaling: |
404 | ASSERT_UNUSED(pinnedGPR, InvalidGPRReg == pinnedGPR); |
405 | break; |
406 | } |
407 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsMemoryAccess); |
408 | }); |
409 | |
410 | switch (m_mode) { |
411 | case MemoryMode::BoundsChecking: |
412 | break; |
413 | case MemoryMode::Signaling: |
414 | // Most memory accesses in signaling mode don't do an explicit |
415 | // exception check because they can rely on fault handling to detect |
416 | // out-of-bounds accesses. FaultSignalHandler nonetheless needs the |
417 | // thunk to exist so that it can jump to that thunk. |
418 | if (UNLIKELY(!Thunks::singleton().stub(throwExceptionFromWasmThunkGenerator))) |
419 | CRASH(); |
420 | break; |
421 | } |
422 | } |
423 | |
424 | { |
425 | auto* calleeMoveLocation = &compilation->calleeMoveLocation; |
426 | static_assert(CallFrameSlot::codeBlock * sizeof(Register) < WasmCallingConvention::headerSizeInBytes, "We rely on this here for now." ); |
427 | static_assert(CallFrameSlot::callee * sizeof(Register) < WasmCallingConvention::headerSizeInBytes, "We rely on this here for now." ); |
428 | B3::PatchpointValue* getCalleePatchpoint = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, B3::Int64, Origin()); |
429 | getCalleePatchpoint->resultConstraints = { B3::ValueRep::SomeRegister }; |
430 | getCalleePatchpoint->effects = B3::Effects::none(); |
431 | getCalleePatchpoint->setGenerator( |
432 | [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { |
433 | GPRReg result = params[0].gpr(); |
434 | MacroAssembler::DataLabelPtr moveLocation = jit.moveWithPatch(MacroAssembler::TrustedImmPtr(nullptr), result); |
435 | jit.addLinkTask([calleeMoveLocation, moveLocation] (LinkBuffer& linkBuffer) { |
436 | *calleeMoveLocation = linkBuffer.locationOf<WasmEntryPtrTag>(moveLocation); |
437 | }); |
438 | }); |
439 | |
440 | B3::Value* offsetOfCallee = m_currentBlock->appendNew<B3::Const64Value>(m_proc, Origin(), CallFrameSlot::callee * sizeof(Register)); |
441 | m_currentBlock->appendNew<B3::MemoryValue>(m_proc, B3::Store, Origin(), |
442 | getCalleePatchpoint, |
443 | m_currentBlock->appendNew<B3::Value>(m_proc, B3::Add, Origin(), framePointer(), offsetOfCallee)); |
444 | |
445 | // FIXME: We shouldn't have to store zero into the CodeBlock* spot in the call frame, |
446 | // but there are places that interpret non-null CodeBlock slot to mean a valid CodeBlock. |
447 | // When doing unwinding, we'll need to verify that the entire runtime is OK with a non-null |
448 | // CodeBlock not implying that the CodeBlock is valid. |
449 | // https://bugs.webkit.org/show_bug.cgi?id=165321 |
450 | B3::Value* offsetOfCodeBlock = m_currentBlock->appendNew<B3::Const64Value>(m_proc, Origin(), CallFrameSlot::codeBlock * sizeof(Register)); |
451 | m_currentBlock->appendNew<B3::MemoryValue>(m_proc, B3::Store, Origin(), |
452 | m_currentBlock->appendNew<B3::Const64Value>(m_proc, Origin(), 0), |
453 | m_currentBlock->appendNew<B3::Value>(m_proc, B3::Add, Origin(), framePointer(), offsetOfCodeBlock)); |
454 | } |
455 | |
456 | { |
457 | B3::PatchpointValue* stackOverflowCheck = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, pointerType(), Origin()); |
458 | m_instanceValue = stackOverflowCheck; |
459 | stackOverflowCheck->appendSomeRegister(framePointer()); |
460 | stackOverflowCheck->clobber(RegisterSet::macroScratchRegisters()); |
461 | if (!Context::useFastTLS()) { |
462 | // FIXME: Because WasmToWasm call clobbers wasmContextInstance register and does not restore it, we need to restore it in the caller side. |
463 | // This prevents us from using ArgumentReg to this (logically) immutable pinned register. |
464 | stackOverflowCheck->effects.writesPinned = false; |
465 | stackOverflowCheck->effects.readsPinned = true; |
466 | stackOverflowCheck->resultConstraints = { ValueRep::reg(m_wasmContextInstanceGPR) }; |
467 | } |
468 | stackOverflowCheck->numGPScratchRegisters = 2; |
469 | stackOverflowCheck->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { |
470 | const Checked<int32_t> wasmFrameSize = params.proc().frameSize(); |
471 | const unsigned minimumParentCheckSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), 1024); |
472 | const unsigned = WTF::roundUpToMultipleOf(stackAlignmentBytes(), std::max<uint32_t>( |
473 | // This allows us to elide stack checks for functions that are terminal nodes in the call |
474 | // tree, (e.g they don't make any calls) and have a small enough frame size. This works by |
475 | // having any such terminal node have its parent caller include some extra size in its |
476 | // own check for it. The goal here is twofold: |
477 | // 1. Emit less code. |
478 | // 2. Try to speed things up by skipping stack checks. |
479 | minimumParentCheckSize, |
480 | // This allows us to elide stack checks in the Wasm -> Embedder call IC stub. Since these will |
481 | // spill all arguments to the stack, we ensure that a stack check here covers the |
482 | // stack that such a stub would use. |
483 | (Checked<uint32_t>(m_maxNumJSCallArguments) * sizeof(Register) + JSCallingConvention::headerSizeInBytes).unsafeGet() |
484 | )); |
485 | const int32_t checkSize = m_makesCalls ? (wasmFrameSize + extraFrameSize).unsafeGet() : wasmFrameSize.unsafeGet(); |
486 | bool needUnderflowCheck = static_cast<unsigned>(checkSize) > Options::reservedZoneSize(); |
487 | bool needsOverflowCheck = m_makesCalls || wasmFrameSize >= minimumParentCheckSize || needUnderflowCheck; |
488 | |
489 | GPRReg contextInstance = Context::useFastTLS() ? params[0].gpr() : m_wasmContextInstanceGPR; |
490 | |
491 | // This allows leaf functions to not do stack checks if their frame size is within |
492 | // certain limits since their caller would have already done the check. |
493 | if (needsOverflowCheck) { |
494 | AllowMacroScratchRegisterUsage allowScratch(jit); |
495 | GPRReg fp = params[1].gpr(); |
496 | GPRReg scratch1 = params.gpScratch(0); |
497 | GPRReg scratch2 = params.gpScratch(1); |
498 | |
499 | if (Context::useFastTLS()) |
500 | jit.loadWasmContextInstance(contextInstance); |
501 | |
502 | jit.loadPtr(CCallHelpers::Address(contextInstance, Instance::offsetOfCachedStackLimit()), scratch2); |
503 | jit.addPtr(CCallHelpers::TrustedImm32(-checkSize), fp, scratch1); |
504 | MacroAssembler::JumpList overflow; |
505 | if (UNLIKELY(needUnderflowCheck)) |
506 | overflow.append(jit.branchPtr(CCallHelpers::Above, scratch1, fp)); |
507 | overflow.append(jit.branchPtr(CCallHelpers::Below, scratch1, scratch2)); |
508 | jit.addLinkTask([overflow] (LinkBuffer& linkBuffer) { |
509 | linkBuffer.link(overflow, CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(throwStackOverflowFromWasmThunkGenerator).code())); |
510 | }); |
511 | } else if (m_usesInstanceValue && Context::useFastTLS()) { |
512 | // No overflow check is needed, but the instance values still needs to be correct. |
513 | AllowMacroScratchRegisterUsageIf allowScratch(jit, CCallHelpers::loadWasmContextInstanceNeedsMacroScratchRegister()); |
514 | jit.loadWasmContextInstance(contextInstance); |
515 | } else { |
516 | // We said we'd return a pointer. We don't actually need to because it isn't used, but the patchpoint conservatively said it had effects (potential stack check) which prevent it from getting removed. |
517 | } |
518 | }); |
519 | } |
520 | |
521 | emitEntryTierUpCheck(); |
522 | |
523 | if (m_compilationMode == CompilationMode::OMGForOSREntryMode) |
524 | m_currentBlock = m_proc.addBlock(); |
525 | } |
526 | |
527 | void B3IRGenerator::restoreWebAssemblyGlobalState(RestoreCachedStackLimit restoreCachedStackLimit, const MemoryInformation& memory, Value* instance, Procedure& proc, BasicBlock* block) |
528 | { |
529 | restoreWasmContextInstance(proc, block, instance); |
530 | |
531 | if (restoreCachedStackLimit == RestoreCachedStackLimit::Yes) { |
532 | // The Instance caches the stack limit, but also knows where its canonical location is. |
533 | Value* pointerToActualStackLimit = block->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfPointerToActualStackLimit())); |
534 | Value* actualStackLimit = block->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), pointerToActualStackLimit); |
535 | block->appendNew<MemoryValue>(m_proc, Store, origin(), actualStackLimit, instanceValue(), safeCast<int32_t>(Instance::offsetOfCachedStackLimit())); |
536 | } |
537 | |
538 | if (!!memory) { |
539 | const PinnedRegisterInfo* pinnedRegs = &PinnedRegisterInfo::get(); |
540 | RegisterSet clobbers; |
541 | clobbers.set(pinnedRegs->baseMemoryPointer); |
542 | clobbers.set(pinnedRegs->sizeRegister); |
543 | if (!isARM64()) |
544 | clobbers.set(RegisterSet::macroScratchRegisters()); |
545 | |
546 | B3::PatchpointValue* patchpoint = block->appendNew<B3::PatchpointValue>(proc, B3::Void, origin()); |
547 | Effects effects = Effects::none(); |
548 | effects.writesPinned = true; |
549 | effects.reads = B3::HeapRange::top(); |
550 | patchpoint->effects = effects; |
551 | patchpoint->clobber(clobbers); |
552 | patchpoint->numGPScratchRegisters = Gigacage::isEnabled(Gigacage::Primitive) ? 1 : 0; |
553 | |
554 | patchpoint->append(instance, ValueRep::SomeRegister); |
555 | patchpoint->setGenerator([pinnedRegs] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { |
556 | AllowMacroScratchRegisterUsage allowScratch(jit); |
557 | GPRReg baseMemory = pinnedRegs->baseMemoryPointer; |
558 | GPRReg scratchOrSize = Gigacage::isEnabled(Gigacage::Primitive) ? params.gpScratch(0) : pinnedRegs->sizeRegister; |
559 | |
560 | jit.loadPtr(CCallHelpers::Address(params[0].gpr(), Instance::offsetOfCachedMemorySize()), pinnedRegs->sizeRegister); |
561 | jit.loadPtr(CCallHelpers::Address(params[0].gpr(), Instance::offsetOfCachedMemory()), baseMemory); |
562 | |
563 | jit.cageConditionally(Gigacage::Primitive, baseMemory, pinnedRegs->sizeRegister, scratchOrSize); |
564 | }); |
565 | } |
566 | } |
567 | |
568 | void B3IRGenerator::emitExceptionCheck(CCallHelpers& jit, ExceptionType type) |
569 | { |
570 | jit.move(CCallHelpers::TrustedImm32(static_cast<uint32_t>(type)), GPRInfo::argumentGPR1); |
571 | auto jumpToExceptionStub = jit.jump(); |
572 | |
573 | jit.addLinkTask([jumpToExceptionStub] (LinkBuffer& linkBuffer) { |
574 | linkBuffer.link(jumpToExceptionStub, CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(throwExceptionFromWasmThunkGenerator).code())); |
575 | }); |
576 | } |
577 | |
578 | Value* B3IRGenerator::constant(B3::Type type, uint64_t bits, Optional<Origin> maybeOrigin) |
579 | { |
580 | auto result = m_constantPool.ensure(ValueKey(opcodeForConstant(type), type, static_cast<int64_t>(bits)), [&] { |
581 | Value* result = m_proc.addConstant(maybeOrigin ? *maybeOrigin : origin(), type, bits); |
582 | m_constantInsertionValues.insertValue(0, result); |
583 | return result; |
584 | }); |
585 | return result.iterator->value; |
586 | } |
587 | |
588 | Value* B3IRGenerator::framePointer() |
589 | { |
590 | if (!m_framePointer) { |
591 | m_framePointer = m_proc.add<B3::Value>(B3::FramePointer, Origin()); |
592 | ASSERT(m_framePointer); |
593 | m_constantInsertionValues.insertValue(0, m_framePointer); |
594 | } |
595 | return m_framePointer; |
596 | } |
597 | |
598 | void B3IRGenerator::insertConstants() |
599 | { |
600 | m_constantInsertionValues.execute(m_proc.at(0)); |
601 | } |
602 | |
603 | B3::Type B3IRGenerator::toB3ResultType(BlockSignature returnType) |
604 | { |
605 | if (returnType->returnsVoid()) |
606 | return B3::Void; |
607 | |
608 | if (returnType->returnCount() == 1) |
609 | return toB3Type(returnType->returnType(0)); |
610 | |
611 | auto result = m_tupleMap.ensure(returnType, [&] { |
612 | Vector<B3::Type> result; |
613 | for (unsigned i = 0; i < returnType->returnCount(); ++i) |
614 | result.append(toB3Type(returnType->returnType(i))); |
615 | return m_proc.addTuple(WTFMove(result)); |
616 | }); |
617 | return result.iterator->value; |
618 | } |
619 | |
620 | auto B3IRGenerator::addLocal(Type type, uint32_t count) -> PartialResult |
621 | { |
622 | size_t newSize = m_locals.size() + count; |
623 | ASSERT(!(CheckedUint32(count) + m_locals.size()).hasOverflowed()); |
624 | ASSERT(newSize <= maxFunctionLocals); |
625 | WASM_COMPILE_FAIL_IF(!m_locals.tryReserveCapacity(newSize), "can't allocate memory for " , newSize, " locals" ); |
626 | |
627 | for (uint32_t i = 0; i < count; ++i) { |
628 | Variable* local = m_proc.addVariable(toB3Type(type)); |
629 | m_locals.uncheckedAppend(local); |
630 | auto val = isSubtype(type, Anyref) ? JSValue::encode(jsNull()) : 0; |
631 | m_currentBlock->appendNew<VariableValue>(m_proc, Set, Origin(), local, constant(toB3Type(type), val, Origin())); |
632 | } |
633 | return { }; |
634 | } |
635 | |
636 | auto B3IRGenerator::addArguments(const Signature& signature) -> PartialResult |
637 | { |
638 | ASSERT(!m_locals.size()); |
639 | WASM_COMPILE_FAIL_IF(!m_locals.tryReserveCapacity(signature.argumentCount()), "can't allocate memory for " , signature.argumentCount(), " arguments" ); |
640 | |
641 | m_locals.grow(signature.argumentCount()); |
642 | CallInformation wasmCallInfo = wasmCallingConvention().callInformationFor(signature, CallRole::Callee); |
643 | |
644 | for (size_t i = 0; i < signature.argumentCount(); ++i) { |
645 | B3::Type type = toB3Type(signature.argument(i)); |
646 | B3::Value* argument; |
647 | auto rep = wasmCallInfo.params[i]; |
648 | if (rep.isReg()) { |
649 | argument = m_currentBlock->appendNew<B3::ArgumentRegValue>(m_proc, Origin(), rep.reg()); |
650 | if (type == B3::Int32 || type == B3::Float) |
651 | argument = m_currentBlock->appendNew<B3::Value>(m_proc, B3::Trunc, Origin(), argument); |
652 | } else { |
653 | ASSERT(rep.isStack()); |
654 | B3::Value* address = m_currentBlock->appendNew<B3::Value>(m_proc, B3::Add, Origin(), framePointer(), |
655 | m_currentBlock->appendNew<B3::Const64Value>(m_proc, Origin(), rep.offsetFromFP())); |
656 | argument = m_currentBlock->appendNew<B3::MemoryValue>(m_proc, B3::Load, type, Origin(), address); |
657 | } |
658 | |
659 | Variable* argumentVariable = m_proc.addVariable(argument->type()); |
660 | m_locals[i] = argumentVariable; |
661 | m_currentBlock->appendNew<VariableValue>(m_proc, Set, Origin(), argumentVariable, argument); |
662 | } |
663 | |
664 | return { }; |
665 | } |
666 | |
667 | auto B3IRGenerator::addRefIsNull(ExpressionType& value, ExpressionType& result) -> PartialResult |
668 | { |
669 | result = m_currentBlock->appendNew<Value>(m_proc, B3::Equal, origin(), value, m_currentBlock->appendNew<Const64Value>(m_proc, origin(), JSValue::encode(jsNull()))); |
670 | return { }; |
671 | } |
672 | |
673 | auto B3IRGenerator::addTableGet(unsigned tableIndex, ExpressionType& index, ExpressionType& result) -> PartialResult |
674 | { |
675 | // FIXME: Emit this inline <https://bugs.webkit.org/show_bug.cgi?id=198506>. |
676 | result = m_currentBlock->appendNew<CCallValue>(m_proc, toB3Type(Anyref), origin(), |
677 | m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationGetWasmTableElement, B3CCallPtrTag)), |
678 | instanceValue(), m_currentBlock->appendNew<Const32Value>(m_proc, origin(), tableIndex), index); |
679 | |
680 | { |
681 | CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), |
682 | m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), result, m_currentBlock->appendNew<Const64Value>(m_proc, origin(), 0))); |
683 | |
684 | check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
685 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTableAccess); |
686 | }); |
687 | } |
688 | |
689 | return { }; |
690 | } |
691 | |
692 | auto B3IRGenerator::addTableSet(unsigned tableIndex, ExpressionType& index, ExpressionType& value) -> PartialResult |
693 | { |
694 | // FIXME: Emit this inline <https://bugs.webkit.org/show_bug.cgi?id=198506>. |
695 | auto shouldThrow = m_currentBlock->appendNew<CCallValue>(m_proc, B3::Int32, origin(), |
696 | m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationSetWasmTableElement, B3CCallPtrTag)), |
697 | instanceValue(), m_currentBlock->appendNew<Const32Value>(m_proc, origin(), tableIndex), index, value); |
698 | |
699 | { |
700 | CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), |
701 | m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), shouldThrow, m_currentBlock->appendNew<Const32Value>(m_proc, origin(), 0))); |
702 | |
703 | check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
704 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTableAccess); |
705 | }); |
706 | } |
707 | |
708 | return { }; |
709 | } |
710 | |
711 | auto B3IRGenerator::addRefFunc(uint32_t index, ExpressionType& result) -> PartialResult |
712 | { |
713 | // FIXME: Emit this inline <https://bugs.webkit.org/show_bug.cgi?id=198506>. |
714 | |
715 | result = m_currentBlock->appendNew<CCallValue>(m_proc, B3::Int64, origin(), |
716 | m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationWasmRefFunc, B3CCallPtrTag)), |
717 | instanceValue(), addConstant(Type::I32, index)); |
718 | |
719 | return { }; |
720 | } |
721 | |
722 | auto B3IRGenerator::addTableSize(unsigned tableIndex, ExpressionType& result) -> PartialResult |
723 | { |
724 | // FIXME: Emit this inline <https://bugs.webkit.org/show_bug.cgi?id=198506>. |
725 | result = m_currentBlock->appendNew<CCallValue>(m_proc, toB3Type(I32), origin(), |
726 | m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationGetWasmTableSize, B3CCallPtrTag)), |
727 | instanceValue(), m_currentBlock->appendNew<Const32Value>(m_proc, origin(), tableIndex)); |
728 | |
729 | return { }; |
730 | } |
731 | |
732 | auto B3IRGenerator::addTableGrow(unsigned tableIndex, ExpressionType& fill, ExpressionType& delta, ExpressionType& result) -> PartialResult |
733 | { |
734 | result = m_currentBlock->appendNew<CCallValue>(m_proc, toB3Type(I32), origin(), |
735 | m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationWasmTableGrow, B3CCallPtrTag)), |
736 | instanceValue(), m_currentBlock->appendNew<Const32Value>(m_proc, origin(), tableIndex), fill, delta); |
737 | |
738 | return { }; |
739 | } |
740 | |
741 | auto B3IRGenerator::addTableFill(unsigned tableIndex, ExpressionType& offset, ExpressionType& fill, ExpressionType& count) -> PartialResult |
742 | { |
743 | auto result = m_currentBlock->appendNew<CCallValue>(m_proc, toB3Type(I32), origin(), |
744 | m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationWasmTableFill, B3CCallPtrTag)), |
745 | instanceValue(), m_currentBlock->appendNew<Const32Value>(m_proc, origin(), tableIndex), offset, fill, count); |
746 | |
747 | { |
748 | CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), |
749 | m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), result, m_currentBlock->appendNew<Const32Value>(m_proc, origin(), 0))); |
750 | |
751 | check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
752 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTableAccess); |
753 | }); |
754 | } |
755 | |
756 | return { }; |
757 | } |
758 | |
759 | auto B3IRGenerator::getLocal(uint32_t index, ExpressionType& result) -> PartialResult |
760 | { |
761 | ASSERT(m_locals[index]); |
762 | result = m_currentBlock->appendNew<VariableValue>(m_proc, B3::Get, origin(), m_locals[index]); |
763 | return { }; |
764 | } |
765 | |
766 | auto B3IRGenerator::addUnreachable() -> PartialResult |
767 | { |
768 | B3::PatchpointValue* unreachable = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, B3::Void, origin()); |
769 | unreachable->setGenerator([this] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
770 | this->emitExceptionCheck(jit, ExceptionType::Unreachable); |
771 | }); |
772 | unreachable->effects.terminal = true; |
773 | return { }; |
774 | } |
775 | |
776 | auto B3IRGenerator::addGrowMemory(ExpressionType delta, ExpressionType& result) -> PartialResult |
777 | { |
778 | result = m_currentBlock->appendNew<CCallValue>(m_proc, Int32, origin(), |
779 | m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationGrowMemory, B3CCallPtrTag)), |
780 | framePointer(), instanceValue(), delta); |
781 | |
782 | restoreWebAssemblyGlobalState(RestoreCachedStackLimit::No, m_info.memory, instanceValue(), m_proc, m_currentBlock); |
783 | |
784 | return { }; |
785 | } |
786 | |
787 | auto B3IRGenerator::addCurrentMemory(ExpressionType& result) -> PartialResult |
788 | { |
789 | static_assert(sizeof(decltype(static_cast<Memory*>(nullptr)->size())) == sizeof(uint64_t), "codegen relies on this size" ); |
790 | Value* size = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int64, origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfCachedMemorySize())); |
791 | |
792 | constexpr uint32_t shiftValue = 16; |
793 | static_assert(PageCount::pageSize == 1ull << shiftValue, "This must hold for the code below to be correct." ); |
794 | Value* numPages = m_currentBlock->appendNew<Value>(m_proc, ZShr, origin(), |
795 | size, m_currentBlock->appendNew<Const32Value>(m_proc, origin(), shiftValue)); |
796 | |
797 | result = m_currentBlock->appendNew<Value>(m_proc, Trunc, origin(), numPages); |
798 | |
799 | return { }; |
800 | } |
801 | |
802 | auto B3IRGenerator::setLocal(uint32_t index, ExpressionType value) -> PartialResult |
803 | { |
804 | ASSERT(m_locals[index]); |
805 | m_currentBlock->appendNew<VariableValue>(m_proc, B3::Set, origin(), m_locals[index], value); |
806 | return { }; |
807 | } |
808 | |
809 | auto B3IRGenerator::getGlobal(uint32_t index, ExpressionType& result) -> PartialResult |
810 | { |
811 | Value* globalsArray = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfGlobals())); |
812 | result = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, toB3Type(m_info.globals[index].type), origin(), globalsArray, safeCast<int32_t>(index * sizeof(Register))); |
813 | return { }; |
814 | } |
815 | |
816 | auto B3IRGenerator::setGlobal(uint32_t index, ExpressionType value) -> PartialResult |
817 | { |
818 | ASSERT(toB3Type(m_info.globals[index].type) == value->type()); |
819 | Value* globalsArray = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfGlobals())); |
820 | m_currentBlock->appendNew<MemoryValue>(m_proc, Store, origin(), value, globalsArray, safeCast<int32_t>(index * sizeof(Register))); |
821 | |
822 | if (isSubtype(m_info.globals[index].type, Anyref)) |
823 | emitWriteBarrierForJSWrapper(); |
824 | |
825 | return { }; |
826 | } |
827 | |
828 | inline void B3IRGenerator::emitWriteBarrierForJSWrapper() |
829 | { |
830 | Value* cell = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfOwner())); |
831 | Value* cellState = m_currentBlock->appendNew<MemoryValue>(m_proc, Load8Z, Int32, origin(), cell, safeCast<int32_t>(JSCell::cellStateOffset())); |
832 | Value* vm = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), cell, safeCast<int32_t>(JSWebAssemblyInstance::offsetOfVM())); |
833 | Value* threshold = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, origin(), vm, safeCast<int32_t>(VM::offsetOfHeapBarrierThreshold())); |
834 | |
835 | BasicBlock* fenceCheckPath = m_proc.addBlock(); |
836 | BasicBlock* fencePath = m_proc.addBlock(); |
837 | BasicBlock* doSlowPath = m_proc.addBlock(); |
838 | BasicBlock* continuation = m_proc.addBlock(); |
839 | |
840 | m_currentBlock->appendNewControlValue(m_proc, B3::Branch, origin(), |
841 | m_currentBlock->appendNew<Value>(m_proc, Above, origin(), cellState, threshold), |
842 | FrequentedBlock(continuation), FrequentedBlock(fenceCheckPath, FrequencyClass::Rare)); |
843 | fenceCheckPath->addPredecessor(m_currentBlock); |
844 | continuation->addPredecessor(m_currentBlock); |
845 | m_currentBlock = fenceCheckPath; |
846 | |
847 | Value* shouldFence = m_currentBlock->appendNew<MemoryValue>(m_proc, Load8Z, Int32, origin(), vm, safeCast<int32_t>(VM::offsetOfHeapMutatorShouldBeFenced())); |
848 | m_currentBlock->appendNewControlValue(m_proc, B3::Branch, origin(), |
849 | shouldFence, |
850 | FrequentedBlock(fencePath), FrequentedBlock(doSlowPath)); |
851 | fencePath->addPredecessor(m_currentBlock); |
852 | doSlowPath->addPredecessor(m_currentBlock); |
853 | m_currentBlock = fencePath; |
854 | |
855 | B3::PatchpointValue* doFence = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, B3::Void, origin()); |
856 | doFence->setGenerator([] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
857 | jit.memoryFence(); |
858 | }); |
859 | |
860 | Value* cellStateLoadAfterFence = m_currentBlock->appendNew<MemoryValue>(m_proc, Load8Z, Int32, origin(), cell, safeCast<int32_t>(JSCell::cellStateOffset())); |
861 | m_currentBlock->appendNewControlValue(m_proc, B3::Branch, origin(), |
862 | m_currentBlock->appendNew<Value>(m_proc, Above, origin(), cellStateLoadAfterFence, m_currentBlock->appendNew<Const32Value>(m_proc, origin(), blackThreshold)), |
863 | FrequentedBlock(continuation), FrequentedBlock(doSlowPath, FrequencyClass::Rare)); |
864 | doSlowPath->addPredecessor(m_currentBlock); |
865 | continuation->addPredecessor(m_currentBlock); |
866 | m_currentBlock = doSlowPath; |
867 | |
868 | Value* writeBarrierAddress = m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationWasmWriteBarrierSlowPath, B3CCallPtrTag)); |
869 | m_currentBlock->appendNew<CCallValue>(m_proc, B3::Void, origin(), writeBarrierAddress, cell, vm); |
870 | m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), continuation); |
871 | |
872 | continuation->addPredecessor(m_currentBlock); |
873 | m_currentBlock = continuation; |
874 | } |
875 | |
876 | inline Value* B3IRGenerator::emitCheckAndPreparePointer(ExpressionType pointer, uint32_t offset, uint32_t sizeOfOperation) |
877 | { |
878 | ASSERT(m_memoryBaseGPR); |
879 | |
880 | switch (m_mode) { |
881 | case MemoryMode::BoundsChecking: { |
882 | // We're not using signal handling at all, we must therefore check that no memory access exceeds the current memory size. |
883 | ASSERT(m_memorySizeGPR); |
884 | ASSERT(sizeOfOperation + offset > offset); |
885 | m_currentBlock->appendNew<WasmBoundsCheckValue>(m_proc, origin(), m_memorySizeGPR, pointer, sizeOfOperation + offset - 1); |
886 | break; |
887 | } |
888 | |
889 | case MemoryMode::Signaling: { |
890 | // We've virtually mapped 4GiB+redzone for this memory. Only the user-allocated pages are addressable, contiguously in range [0, current], |
891 | // and everything above is mapped PROT_NONE. We don't need to perform any explicit bounds check in the 4GiB range because WebAssembly register |
892 | // memory accesses are 32-bit. However WebAssembly register + offset accesses perform the addition in 64-bit which can push an access above |
893 | // the 32-bit limit (the offset is unsigned 32-bit). The redzone will catch most small offsets, and we'll explicitly bounds check any |
894 | // register + large offset access. We don't think this will be generated frequently. |
895 | // |
896 | // We could check that register + large offset doesn't exceed 4GiB+redzone since that's technically the limit we need to avoid overflowing the |
897 | // PROT_NONE region, but it's better if we use a smaller immediate because it can codegens better. We know that anything equal to or greater |
898 | // than the declared 'maximum' will trap, so we can compare against that number. If there was no declared 'maximum' then we still know that |
899 | // any access equal to or greater than 4GiB will trap, no need to add the redzone. |
900 | if (offset >= Memory::fastMappedRedzoneBytes()) { |
901 | size_t maximum = m_info.memory.maximum() ? m_info.memory.maximum().bytes() : std::numeric_limits<uint32_t>::max(); |
902 | m_currentBlock->appendNew<WasmBoundsCheckValue>(m_proc, origin(), pointer, sizeOfOperation + offset - 1, maximum); |
903 | } |
904 | break; |
905 | } |
906 | } |
907 | |
908 | pointer = m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), pointer); |
909 | return m_currentBlock->appendNew<WasmAddressValue>(m_proc, origin(), pointer, m_memoryBaseGPR); |
910 | } |
911 | |
912 | inline uint32_t sizeOfLoadOp(LoadOpType op) |
913 | { |
914 | switch (op) { |
915 | case LoadOpType::I32Load8S: |
916 | case LoadOpType::I32Load8U: |
917 | case LoadOpType::I64Load8S: |
918 | case LoadOpType::I64Load8U: |
919 | return 1; |
920 | case LoadOpType::I32Load16S: |
921 | case LoadOpType::I64Load16S: |
922 | case LoadOpType::I32Load16U: |
923 | case LoadOpType::I64Load16U: |
924 | return 2; |
925 | case LoadOpType::I32Load: |
926 | case LoadOpType::I64Load32S: |
927 | case LoadOpType::I64Load32U: |
928 | case LoadOpType::F32Load: |
929 | return 4; |
930 | case LoadOpType::I64Load: |
931 | case LoadOpType::F64Load: |
932 | return 8; |
933 | } |
934 | RELEASE_ASSERT_NOT_REACHED(); |
935 | } |
936 | |
937 | inline B3::Kind B3IRGenerator::memoryKind(B3::Opcode memoryOp) |
938 | { |
939 | if (m_mode == MemoryMode::Signaling) |
940 | return trapping(memoryOp); |
941 | return memoryOp; |
942 | } |
943 | |
944 | inline Value* B3IRGenerator::emitLoadOp(LoadOpType op, ExpressionType pointer, uint32_t uoffset) |
945 | { |
946 | int32_t offset = fixupPointerPlusOffset(pointer, uoffset); |
947 | |
948 | switch (op) { |
949 | case LoadOpType::I32Load8S: { |
950 | return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load8S), origin(), pointer, offset); |
951 | } |
952 | |
953 | case LoadOpType::I64Load8S: { |
954 | Value* value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load8S), origin(), pointer, offset); |
955 | return m_currentBlock->appendNew<Value>(m_proc, SExt32, origin(), value); |
956 | } |
957 | |
958 | case LoadOpType::I32Load8U: { |
959 | return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load8Z), origin(), pointer, offset); |
960 | } |
961 | |
962 | case LoadOpType::I64Load8U: { |
963 | Value* value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load8Z), origin(), pointer, offset); |
964 | return m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), value); |
965 | } |
966 | |
967 | case LoadOpType::I32Load16S: { |
968 | return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load16S), origin(), pointer, offset); |
969 | } |
970 | |
971 | case LoadOpType::I64Load16S: { |
972 | Value* value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load16S), origin(), pointer, offset); |
973 | return m_currentBlock->appendNew<Value>(m_proc, SExt32, origin(), value); |
974 | } |
975 | |
976 | case LoadOpType::I32Load16U: { |
977 | return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load16Z), origin(), pointer, offset); |
978 | } |
979 | |
980 | case LoadOpType::I64Load16U: { |
981 | Value* value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load16Z), origin(), pointer, offset); |
982 | return m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), value); |
983 | } |
984 | |
985 | case LoadOpType::I32Load: { |
986 | return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Int32, origin(), pointer, offset); |
987 | } |
988 | |
989 | case LoadOpType::I64Load32U: { |
990 | Value* value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Int32, origin(), pointer, offset); |
991 | return m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), value); |
992 | } |
993 | |
994 | case LoadOpType::I64Load32S: { |
995 | Value* value = m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Int32, origin(), pointer, offset); |
996 | return m_currentBlock->appendNew<Value>(m_proc, SExt32, origin(), value); |
997 | } |
998 | |
999 | case LoadOpType::I64Load: { |
1000 | return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Int64, origin(), pointer, offset); |
1001 | } |
1002 | |
1003 | case LoadOpType::F32Load: { |
1004 | return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Float, origin(), pointer, offset); |
1005 | } |
1006 | |
1007 | case LoadOpType::F64Load: { |
1008 | return m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Load), Double, origin(), pointer, offset); |
1009 | } |
1010 | } |
1011 | RELEASE_ASSERT_NOT_REACHED(); |
1012 | } |
1013 | |
1014 | auto B3IRGenerator::load(LoadOpType op, ExpressionType pointer, ExpressionType& result, uint32_t offset) -> PartialResult |
1015 | { |
1016 | ASSERT(pointer->type() == Int32); |
1017 | |
1018 | if (UNLIKELY(sumOverflows<uint32_t>(offset, sizeOfLoadOp(op)))) { |
1019 | // FIXME: Even though this is provably out of bounds, it's not a validation error, so we have to handle it |
1020 | // as a runtime exception. However, this may change: https://bugs.webkit.org/show_bug.cgi?id=166435 |
1021 | B3::PatchpointValue* throwException = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, B3::Void, origin()); |
1022 | throwException->setGenerator([this] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1023 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsMemoryAccess); |
1024 | }); |
1025 | |
1026 | switch (op) { |
1027 | case LoadOpType::I32Load8S: |
1028 | case LoadOpType::I32Load16S: |
1029 | case LoadOpType::I32Load: |
1030 | case LoadOpType::I32Load16U: |
1031 | case LoadOpType::I32Load8U: |
1032 | result = constant(Int32, 0); |
1033 | break; |
1034 | case LoadOpType::I64Load8S: |
1035 | case LoadOpType::I64Load8U: |
1036 | case LoadOpType::I64Load16S: |
1037 | case LoadOpType::I64Load32U: |
1038 | case LoadOpType::I64Load32S: |
1039 | case LoadOpType::I64Load: |
1040 | case LoadOpType::I64Load16U: |
1041 | result = constant(Int64, 0); |
1042 | break; |
1043 | case LoadOpType::F32Load: |
1044 | result = constant(Float, 0); |
1045 | break; |
1046 | case LoadOpType::F64Load: |
1047 | result = constant(Double, 0); |
1048 | break; |
1049 | } |
1050 | |
1051 | } else |
1052 | result = emitLoadOp(op, emitCheckAndPreparePointer(pointer, offset, sizeOfLoadOp(op)), offset); |
1053 | |
1054 | return { }; |
1055 | } |
1056 | |
1057 | inline uint32_t sizeOfStoreOp(StoreOpType op) |
1058 | { |
1059 | switch (op) { |
1060 | case StoreOpType::I32Store8: |
1061 | case StoreOpType::I64Store8: |
1062 | return 1; |
1063 | case StoreOpType::I32Store16: |
1064 | case StoreOpType::I64Store16: |
1065 | return 2; |
1066 | case StoreOpType::I32Store: |
1067 | case StoreOpType::I64Store32: |
1068 | case StoreOpType::F32Store: |
1069 | return 4; |
1070 | case StoreOpType::I64Store: |
1071 | case StoreOpType::F64Store: |
1072 | return 8; |
1073 | } |
1074 | RELEASE_ASSERT_NOT_REACHED(); |
1075 | } |
1076 | |
1077 | |
1078 | inline void B3IRGenerator::emitStoreOp(StoreOpType op, ExpressionType pointer, ExpressionType value, uint32_t uoffset) |
1079 | { |
1080 | int32_t offset = fixupPointerPlusOffset(pointer, uoffset); |
1081 | |
1082 | switch (op) { |
1083 | case StoreOpType::I64Store8: |
1084 | value = m_currentBlock->appendNew<Value>(m_proc, Trunc, origin(), value); |
1085 | FALLTHROUGH; |
1086 | |
1087 | case StoreOpType::I32Store8: |
1088 | m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Store8), origin(), value, pointer, offset); |
1089 | return; |
1090 | |
1091 | case StoreOpType::I64Store16: |
1092 | value = m_currentBlock->appendNew<Value>(m_proc, Trunc, origin(), value); |
1093 | FALLTHROUGH; |
1094 | |
1095 | case StoreOpType::I32Store16: |
1096 | m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Store16), origin(), value, pointer, offset); |
1097 | return; |
1098 | |
1099 | case StoreOpType::I64Store32: |
1100 | value = m_currentBlock->appendNew<Value>(m_proc, Trunc, origin(), value); |
1101 | FALLTHROUGH; |
1102 | |
1103 | case StoreOpType::I64Store: |
1104 | case StoreOpType::I32Store: |
1105 | case StoreOpType::F32Store: |
1106 | case StoreOpType::F64Store: |
1107 | m_currentBlock->appendNew<MemoryValue>(m_proc, memoryKind(Store), origin(), value, pointer, offset); |
1108 | return; |
1109 | } |
1110 | RELEASE_ASSERT_NOT_REACHED(); |
1111 | } |
1112 | |
1113 | auto B3IRGenerator::store(StoreOpType op, ExpressionType pointer, ExpressionType value, uint32_t offset) -> PartialResult |
1114 | { |
1115 | ASSERT(pointer->type() == Int32); |
1116 | |
1117 | if (UNLIKELY(sumOverflows<uint32_t>(offset, sizeOfStoreOp(op)))) { |
1118 | // FIXME: Even though this is provably out of bounds, it's not a validation error, so we have to handle it |
1119 | // as a runtime exception. However, this may change: https://bugs.webkit.org/show_bug.cgi?id=166435 |
1120 | B3::PatchpointValue* throwException = m_currentBlock->appendNew<B3::PatchpointValue>(m_proc, B3::Void, origin()); |
1121 | throwException->setGenerator([this] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1122 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsMemoryAccess); |
1123 | }); |
1124 | } else |
1125 | emitStoreOp(op, emitCheckAndPreparePointer(pointer, offset, sizeOfStoreOp(op)), value, offset); |
1126 | |
1127 | return { }; |
1128 | } |
1129 | |
1130 | auto B3IRGenerator::addSelect(ExpressionType condition, ExpressionType nonZero, ExpressionType zero, ExpressionType& result) -> PartialResult |
1131 | { |
1132 | result = m_currentBlock->appendNew<Value>(m_proc, B3::Select, origin(), condition, nonZero, zero); |
1133 | return { }; |
1134 | } |
1135 | |
1136 | B3IRGenerator::ExpressionType B3IRGenerator::addConstant(Type type, uint64_t value) |
1137 | { |
1138 | |
1139 | return constant(toB3Type(type), value); |
1140 | } |
1141 | |
1142 | void B3IRGenerator::emitEntryTierUpCheck() |
1143 | { |
1144 | if (!m_tierUp) |
1145 | return; |
1146 | |
1147 | ASSERT(m_tierUp); |
1148 | Value* countDownLocation = constant(pointerType(), reinterpret_cast<uint64_t>(&m_tierUp->m_counter), Origin()); |
1149 | |
1150 | PatchpointValue* patch = m_currentBlock->appendNew<PatchpointValue>(m_proc, B3::Void, Origin()); |
1151 | Effects effects = Effects::none(); |
1152 | // FIXME: we should have a more precise heap range for the tier up count. |
1153 | effects.reads = B3::HeapRange::top(); |
1154 | effects.writes = B3::HeapRange::top(); |
1155 | patch->effects = effects; |
1156 | patch->clobber(RegisterSet::macroScratchRegisters()); |
1157 | |
1158 | patch->append(countDownLocation, ValueRep::SomeRegister); |
1159 | patch->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
1160 | AllowMacroScratchRegisterUsage allowScratch(jit); |
1161 | CCallHelpers::Jump tierUp = jit.branchAdd32(CCallHelpers::PositiveOrZero, CCallHelpers::TrustedImm32(TierUpCount::functionEntryIncrement()), CCallHelpers::Address(params[0].gpr())); |
1162 | CCallHelpers::Label tierUpResume = jit.label(); |
1163 | |
1164 | params.addLatePath([=] (CCallHelpers& jit) { |
1165 | tierUp.link(&jit); |
1166 | |
1167 | const unsigned = 0; |
1168 | RegisterSet registersToSpill = { }; |
1169 | registersToSpill.add(GPRInfo::argumentGPR1); |
1170 | unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(jit, registersToSpill, extraPaddingBytes); |
1171 | |
1172 | jit.move(MacroAssembler::TrustedImm32(m_functionIndex), GPRInfo::argumentGPR1); |
1173 | MacroAssembler::Call call = jit.nearCall(); |
1174 | |
1175 | ScratchRegisterAllocator::restoreRegistersFromStackForCall(jit, registersToSpill, RegisterSet(), numberOfStackBytesUsedForRegisterPreservation, extraPaddingBytes); |
1176 | jit.jump(tierUpResume); |
1177 | |
1178 | jit.addLinkTask([=] (LinkBuffer& linkBuffer) { |
1179 | MacroAssembler::repatchNearCall(linkBuffer.locationOfNearCall<NoPtrTag>(call), CodeLocationLabel<JITThunkPtrTag>(Thunks::singleton().stub(triggerOMGEntryTierUpThunkGenerator).code())); |
1180 | }); |
1181 | }); |
1182 | }); |
1183 | } |
1184 | |
1185 | void B3IRGenerator::emitLoopTierUpCheck(uint32_t loopIndex) |
1186 | { |
1187 | uint32_t outerLoopIndex = this->outerLoopIndex(); |
1188 | m_outerLoops.append(loopIndex); |
1189 | |
1190 | if (!m_tierUp) |
1191 | return; |
1192 | |
1193 | Origin origin = this->origin(); |
1194 | ASSERT(m_tierUp->osrEntryTriggers().size() == loopIndex); |
1195 | m_tierUp->osrEntryTriggers().append(TierUpCount::TriggerReason::DontTrigger); |
1196 | m_tierUp->outerLoops().append(outerLoopIndex); |
1197 | |
1198 | Value* countDownLocation = constant(pointerType(), reinterpret_cast<uint64_t>(&m_tierUp->m_counter), origin); |
1199 | |
1200 | Vector<ExpressionType> stackmap; |
1201 | for (auto& local : m_locals) { |
1202 | ExpressionType result = m_currentBlock->appendNew<VariableValue>(m_proc, B3::Get, origin, local); |
1203 | stackmap.append(result); |
1204 | } |
1205 | for (unsigned controlIndex = 0; controlIndex < m_parser->controlStack().size(); ++controlIndex) { |
1206 | auto& expressionStack = m_parser->controlStack()[controlIndex].enclosedExpressionStack; |
1207 | for (Value* value : expressionStack) |
1208 | stackmap.append(value); |
1209 | } |
1210 | |
1211 | PatchpointValue* patch = m_currentBlock->appendNew<PatchpointValue>(m_proc, B3::Void, origin); |
1212 | Effects effects = Effects::none(); |
1213 | // FIXME: we should have a more precise heap range for the tier up count. |
1214 | effects.reads = B3::HeapRange::top(); |
1215 | effects.writes = B3::HeapRange::top(); |
1216 | effects.exitsSideways = true; |
1217 | patch->effects = effects; |
1218 | |
1219 | patch->clobber(RegisterSet::macroScratchRegisters()); |
1220 | RegisterSet clobberLate; |
1221 | clobberLate.add(GPRInfo::argumentGPR0); |
1222 | patch->clobberLate(clobberLate); |
1223 | |
1224 | patch->append(countDownLocation, ValueRep::SomeRegister); |
1225 | patch->appendVectorWithRep(stackmap, ValueRep::ColdAny); |
1226 | |
1227 | TierUpCount::TriggerReason* forceEntryTrigger = &(m_tierUp->osrEntryTriggers().last()); |
1228 | static_assert(!static_cast<uint8_t>(TierUpCount::TriggerReason::DontTrigger), "the JIT code assumes non-zero means 'enter'" ); |
1229 | static_assert(sizeof(TierUpCount::TriggerReason) == 1, "branchTest8 assumes this size" ); |
1230 | patch->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
1231 | AllowMacroScratchRegisterUsage allowScratch(jit); |
1232 | CCallHelpers::Jump forceOSREntry = jit.branchTest8(CCallHelpers::NonZero, CCallHelpers::AbsoluteAddress(forceEntryTrigger)); |
1233 | CCallHelpers::Jump tierUp = jit.branchAdd32(CCallHelpers::PositiveOrZero, CCallHelpers::TrustedImm32(TierUpCount::loopIncrement()), CCallHelpers::Address(params[0].gpr())); |
1234 | MacroAssembler::Label tierUpResume = jit.label(); |
1235 | |
1236 | OSREntryData& osrEntryData = m_tierUp->addOSREntryData(m_functionIndex, loopIndex); |
1237 | // First argument is the countdown location. |
1238 | for (unsigned i = 1; i < params.value()->numChildren(); ++i) |
1239 | osrEntryData.values().constructAndAppend(params[i], params.value()->child(i)->type()); |
1240 | OSREntryData* osrEntryDataPtr = &osrEntryData; |
1241 | |
1242 | params.addLatePath([=] (CCallHelpers& jit) { |
1243 | AllowMacroScratchRegisterUsage allowScratch(jit); |
1244 | forceOSREntry.link(&jit); |
1245 | tierUp.link(&jit); |
1246 | |
1247 | jit.probe(operationWasmTriggerOSREntryNow, osrEntryDataPtr); |
1248 | jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::argumentGPR0).linkTo(tierUpResume, &jit); |
1249 | jit.farJump(GPRInfo::argumentGPR1, WasmEntryPtrTag); |
1250 | }); |
1251 | }); |
1252 | } |
1253 | |
1254 | auto B3IRGenerator::addLoop(BlockSignature signature, Stack& enclosingStack, ControlType& block, Stack& newStack, uint32_t loopIndex) -> PartialResult |
1255 | { |
1256 | BasicBlock* body = m_proc.addBlock(); |
1257 | BasicBlock* continuation = m_proc.addBlock(); |
1258 | |
1259 | block = ControlData(m_proc, origin(), signature, BlockType::Loop, continuation, body); |
1260 | |
1261 | ExpressionList args; |
1262 | { |
1263 | unsigned offset = enclosingStack.size() - signature->argumentCount(); |
1264 | for (unsigned i = 0; i < signature->argumentCount(); ++i) { |
1265 | Value* value = enclosingStack.at(offset + i); |
1266 | auto* upsilon = m_currentBlock->appendNew<UpsilonValue>(m_proc, origin(), value); |
1267 | Value* phi = block.phis[i]; |
1268 | body->append(phi); |
1269 | upsilon->setPhi(phi); |
1270 | newStack.append(phi); |
1271 | } |
1272 | enclosingStack.shrink(offset); |
1273 | } |
1274 | |
1275 | m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), body); |
1276 | if (loopIndex == m_loopIndexForOSREntry) { |
1277 | dataLogLnIf(WasmB3IRGeneratorInternal::verbose, "Setting up for OSR entry" ); |
1278 | |
1279 | m_currentBlock = m_rootBlock; |
1280 | Value* pointer = m_rootBlock->appendNew<ArgumentRegValue>(m_proc, Origin(), GPRInfo::argumentGPR0); |
1281 | |
1282 | unsigned indexInBuffer = 0; |
1283 | auto loadFromScratchBuffer = [&] (B3::Type type) { |
1284 | size_t offset = sizeof(uint64_t) * indexInBuffer++; |
1285 | RELEASE_ASSERT(type.isNumeric()); |
1286 | return m_currentBlock->appendNew<MemoryValue>(m_proc, Load, type, origin(), pointer, offset); |
1287 | }; |
1288 | |
1289 | for (auto& local : m_locals) |
1290 | m_currentBlock->appendNew<VariableValue>(m_proc, Set, Origin(), local, loadFromScratchBuffer(local->type())); |
1291 | |
1292 | for (unsigned controlIndex = 0; controlIndex < m_parser->controlStack().size(); ++controlIndex) { |
1293 | const auto& data = m_parser->controlStack()[controlIndex].controlData; |
1294 | auto& expressionStack = m_parser->controlStack()[controlIndex].enclosedExpressionStack; |
1295 | |
1296 | // For each stack entry enclosed by this loop we need to replace the value with a phi so we can fill it on OSR entry. |
1297 | BasicBlock* sourceBlock = nullptr; |
1298 | unsigned blockIndex = 0; |
1299 | B3::InsertionSet insertionSet(m_proc); |
1300 | for (unsigned i = 0; i < expressionStack.size(); i++) { |
1301 | auto* value = expressionStack[i]; |
1302 | if (value->isConstant()) { |
1303 | ++indexInBuffer; |
1304 | continue; |
1305 | } |
1306 | |
1307 | if (value->owner != sourceBlock) { |
1308 | insertionSet.execute(sourceBlock); |
1309 | ASSERT(insertionSet.isEmpty()); |
1310 | dataLogLnIf(WasmB3IRGeneratorInternal::verbose && sourceBlock, "Executed insertion set into: " , *sourceBlock); |
1311 | blockIndex = 0; |
1312 | sourceBlock = value->owner; |
1313 | } |
1314 | |
1315 | while (sourceBlock->at(blockIndex++) != value) |
1316 | ASSERT(blockIndex < sourceBlock->size()); |
1317 | ASSERT(sourceBlock->at(blockIndex - 1) == value); |
1318 | |
1319 | auto* phi = data.continuation->appendNew<Value>(m_proc, Phi, value->type(), value->origin()); |
1320 | expressionStack[i] = phi; |
1321 | m_currentBlock->appendNew<UpsilonValue>(m_proc, value->origin(), loadFromScratchBuffer(value->type()), phi); |
1322 | |
1323 | auto* sourceUpsilon = m_proc.add<UpsilonValue>(value->origin(), value, phi); |
1324 | insertionSet.insertValue(blockIndex, sourceUpsilon); |
1325 | } |
1326 | insertionSet.execute(sourceBlock); |
1327 | } |
1328 | |
1329 | m_osrEntryScratchBufferSize = indexInBuffer; |
1330 | m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), body); |
1331 | body->addPredecessor(m_currentBlock); |
1332 | } |
1333 | |
1334 | m_currentBlock = body; |
1335 | emitLoopTierUpCheck(loopIndex); |
1336 | return { }; |
1337 | } |
1338 | |
1339 | B3IRGenerator::ControlData B3IRGenerator::addTopLevel(BlockSignature signature) |
1340 | { |
1341 | return ControlData(m_proc, Origin(), signature, BlockType::TopLevel, m_proc.addBlock()); |
1342 | } |
1343 | |
1344 | auto B3IRGenerator::addBlock(BlockSignature signature, Stack& enclosingStack, ControlType& newBlock, Stack& newStack) -> PartialResult |
1345 | { |
1346 | BasicBlock* continuation = m_proc.addBlock(); |
1347 | |
1348 | newStack = splitStack(signature, enclosingStack); |
1349 | newBlock = ControlData(m_proc, origin(), signature, BlockType::Block, continuation); |
1350 | return { }; |
1351 | } |
1352 | |
1353 | auto B3IRGenerator::addIf(ExpressionType condition, BlockSignature signature, Stack& enclosingStack, ControlType& result, Stack& newStack) -> PartialResult |
1354 | { |
1355 | // FIXME: This needs to do some kind of stack passing. |
1356 | |
1357 | BasicBlock* taken = m_proc.addBlock(); |
1358 | BasicBlock* notTaken = m_proc.addBlock(); |
1359 | BasicBlock* continuation = m_proc.addBlock(); |
1360 | |
1361 | m_currentBlock->appendNew<Value>(m_proc, B3::Branch, origin(), condition); |
1362 | m_currentBlock->setSuccessors(FrequentedBlock(taken), FrequentedBlock(notTaken)); |
1363 | taken->addPredecessor(m_currentBlock); |
1364 | notTaken->addPredecessor(m_currentBlock); |
1365 | |
1366 | m_currentBlock = taken; |
1367 | newStack = splitStack(signature, enclosingStack); |
1368 | result = ControlData(m_proc, origin(), signature, BlockType::If, continuation, notTaken); |
1369 | return { }; |
1370 | } |
1371 | |
1372 | auto B3IRGenerator::addElse(ControlData& data, const Stack& currentStack) -> PartialResult |
1373 | { |
1374 | unifyValuesWithBlock(currentStack, data.phis); |
1375 | m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), data.continuation); |
1376 | return addElseToUnreachable(data); |
1377 | } |
1378 | |
1379 | auto B3IRGenerator::addElseToUnreachable(ControlData& data) -> PartialResult |
1380 | { |
1381 | ASSERT(data.blockType() == BlockType::If); |
1382 | m_currentBlock = data.special; |
1383 | data.convertIfToBlock(); |
1384 | return { }; |
1385 | } |
1386 | |
1387 | auto B3IRGenerator::addReturn(const ControlData&, const ExpressionList& returnValues) -> PartialResult |
1388 | { |
1389 | CallInformation wasmCallInfo = wasmCallingConvention().callInformationFor(m_parser->signature(), CallRole::Callee); |
1390 | |
1391 | PatchpointValue* patch = m_proc.add<PatchpointValue>(B3::Void, origin()); |
1392 | patch->setGenerator([] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { |
1393 | auto calleeSaves = params.code().calleeSaveRegisterAtOffsetList(); |
1394 | |
1395 | for (RegisterAtOffset calleeSave : calleeSaves) |
1396 | jit.load64ToReg(CCallHelpers::Address(GPRInfo::callFrameRegister, calleeSave.offset()), calleeSave.reg()); |
1397 | |
1398 | jit.emitFunctionEpilogue(); |
1399 | jit.ret(); |
1400 | }); |
1401 | patch->effects.terminal = true; |
1402 | |
1403 | RELEASE_ASSERT(returnValues.size() >= wasmCallInfo.results.size()); |
1404 | unsigned offset = returnValues.size() - wasmCallInfo.results.size(); |
1405 | for (unsigned i = 0; i < wasmCallInfo.results.size(); ++i) { |
1406 | B3::ValueRep rep = wasmCallInfo.results[i]; |
1407 | if (rep.isStack()) { |
1408 | B3::Value* address = m_currentBlock->appendNew<B3::Value>(m_proc, B3::Add, Origin(), framePointer(), constant(pointerType(), rep.offsetFromFP())); |
1409 | m_currentBlock->appendNew<B3::MemoryValue>(m_proc, B3::Store, Origin(), returnValues[offset + i], address); |
1410 | } else { |
1411 | ASSERT(rep.isReg()); |
1412 | patch->append(returnValues[offset + i], rep); |
1413 | } |
1414 | } |
1415 | |
1416 | m_currentBlock->append(patch); |
1417 | return { }; |
1418 | } |
1419 | |
1420 | auto B3IRGenerator::addBranch(ControlData& data, ExpressionType condition, const Stack& returnValues) -> PartialResult |
1421 | { |
1422 | unifyValuesWithBlock(returnValues, data.phis); |
1423 | |
1424 | BasicBlock* target = data.targetBlockForBranch(); |
1425 | if (condition) { |
1426 | BasicBlock* continuation = m_proc.addBlock(); |
1427 | m_currentBlock->appendNew<Value>(m_proc, B3::Branch, origin(), condition); |
1428 | m_currentBlock->setSuccessors(FrequentedBlock(target), FrequentedBlock(continuation)); |
1429 | target->addPredecessor(m_currentBlock); |
1430 | continuation->addPredecessor(m_currentBlock); |
1431 | m_currentBlock = continuation; |
1432 | } else { |
1433 | m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), FrequentedBlock(target)); |
1434 | target->addPredecessor(m_currentBlock); |
1435 | } |
1436 | |
1437 | return { }; |
1438 | } |
1439 | |
1440 | auto B3IRGenerator::addSwitch(ExpressionType condition, const Vector<ControlData*>& targets, ControlData& defaultTarget, const Stack& expressionStack) -> PartialResult |
1441 | { |
1442 | for (size_t i = 0; i < targets.size(); ++i) |
1443 | unifyValuesWithBlock(expressionStack, targets[i]->phis); |
1444 | unifyValuesWithBlock(expressionStack, defaultTarget.phis); |
1445 | |
1446 | SwitchValue* switchValue = m_currentBlock->appendNew<SwitchValue>(m_proc, origin(), condition); |
1447 | switchValue->setFallThrough(FrequentedBlock(defaultTarget.targetBlockForBranch())); |
1448 | for (size_t i = 0; i < targets.size(); ++i) |
1449 | switchValue->appendCase(SwitchCase(i, FrequentedBlock(targets[i]->targetBlockForBranch()))); |
1450 | |
1451 | return { }; |
1452 | } |
1453 | |
1454 | auto B3IRGenerator::endBlock(ControlEntry& entry, Stack& expressionStack) -> PartialResult |
1455 | { |
1456 | ControlData& data = entry.controlData; |
1457 | |
1458 | ASSERT(expressionStack.size() == data.signature->returnCount()); |
1459 | if (data.blockType() != BlockType::Loop) |
1460 | unifyValuesWithBlock(expressionStack, data.phis); |
1461 | |
1462 | m_currentBlock->appendNewControlValue(m_proc, Jump, origin(), data.continuation); |
1463 | data.continuation->addPredecessor(m_currentBlock); |
1464 | |
1465 | return addEndToUnreachable(entry, expressionStack); |
1466 | } |
1467 | |
1468 | auto B3IRGenerator::addEndToUnreachable(ControlEntry& entry, const Stack& expressionStack) -> PartialResult |
1469 | { |
1470 | ControlData& data = entry.controlData; |
1471 | m_currentBlock = data.continuation; |
1472 | |
1473 | if (data.blockType() == BlockType::If) { |
1474 | data.special->appendNewControlValue(m_proc, Jump, origin(), m_currentBlock); |
1475 | m_currentBlock->addPredecessor(data.special); |
1476 | } |
1477 | |
1478 | if (data.blockType() != BlockType::Loop) { |
1479 | for (Value* result : data.phis) { |
1480 | m_currentBlock->append(result); |
1481 | entry.enclosedExpressionStack.append(result); |
1482 | } |
1483 | } else { |
1484 | m_outerLoops.removeLast(); |
1485 | for (unsigned i = 0; i < data.signature->returnCount(); ++i) { |
1486 | if (i < expressionStack.size()) |
1487 | entry.enclosedExpressionStack.append(expressionStack[i]); |
1488 | else |
1489 | entry.enclosedExpressionStack.append(constant(toB3Type(data.signature->returnType(i)), 0xbbadbeef)); |
1490 | } |
1491 | } |
1492 | |
1493 | // TopLevel does not have any code after this so we need to make sure we emit a return here. |
1494 | if (data.blockType() == BlockType::TopLevel) |
1495 | return addReturn(entry.controlData, entry.enclosedExpressionStack); |
1496 | |
1497 | return { }; |
1498 | } |
1499 | |
1500 | |
1501 | B3::Value* B3IRGenerator::createCallPatchpoint(BasicBlock* block, Origin origin, const Signature& signature, Vector<ExpressionType>& args, const ScopedLambda<void(PatchpointValue*)>& patchpointFunctor) |
1502 | { |
1503 | Vector<B3::ConstrainedValue> constrainedArguments; |
1504 | CallInformation wasmCallInfo = wasmCallingConvention().callInformationFor(signature); |
1505 | for (unsigned i = 0; i < args.size(); ++i) |
1506 | constrainedArguments.append(B3::ConstrainedValue(args[i], wasmCallInfo.params[i])); |
1507 | |
1508 | m_proc.requestCallArgAreaSizeInBytes(WTF::roundUpToMultipleOf(stackAlignmentBytes(), wasmCallInfo.headerAndArgumentStackSizeInBytes)); |
1509 | |
1510 | B3::Type returnType = toB3ResultType(&signature); |
1511 | B3::PatchpointValue* patchpoint = block->appendNew<B3::PatchpointValue>(m_proc, returnType, origin); |
1512 | patchpoint->clobberEarly(RegisterSet::macroScratchRegisters()); |
1513 | patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall()); |
1514 | patchpointFunctor(patchpoint); |
1515 | patchpoint->appendVector(constrainedArguments); |
1516 | |
1517 | if (returnType != B3::Void) |
1518 | patchpoint->resultConstraints = WTFMove(wasmCallInfo.results); |
1519 | return patchpoint; |
1520 | } |
1521 | |
1522 | auto B3IRGenerator::addCall(uint32_t functionIndex, const Signature& signature, Vector<ExpressionType>& args, ResultList& results) -> PartialResult |
1523 | { |
1524 | ASSERT(signature.argumentCount() == args.size()); |
1525 | |
1526 | m_makesCalls = true; |
1527 | B3::Type returnType = toB3ResultType(&signature); |
1528 | |
1529 | auto fillResults = [&] (Value* callResult) { |
1530 | ASSERT(returnType == callResult->type()); |
1531 | |
1532 | switch (returnType.kind()) { |
1533 | case B3::Void: { |
1534 | break; |
1535 | } |
1536 | case B3::Tuple: { |
1537 | const Vector<B3::Type>& tuple = m_proc.tupleForType(returnType); |
1538 | ASSERT(signature.returnCount() == tuple.size()); |
1539 | for (unsigned i = 0; i < signature.returnCount(); ++i) |
1540 | results.append(m_currentBlock->appendNew<ExtractValue>(m_proc, origin(), tuple[i], callResult, i)); |
1541 | break; |
1542 | } |
1543 | default: { |
1544 | results.append(callResult); |
1545 | break; |
1546 | } |
1547 | } |
1548 | }; |
1549 | |
1550 | Vector<UnlinkedWasmToWasmCall>* unlinkedWasmToWasmCalls = &m_unlinkedWasmToWasmCalls; |
1551 | |
1552 | if (m_info.isImportedFunctionFromFunctionIndexSpace(functionIndex)) { |
1553 | m_maxNumJSCallArguments = std::max(m_maxNumJSCallArguments, static_cast<uint32_t>(args.size())); |
1554 | |
1555 | // FIXME: imports can be linked here, instead of generating a patchpoint, because all import stubs are generated before B3 compilation starts. https://bugs.webkit.org/show_bug.cgi?id=166462 |
1556 | Value* targetInstance = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfTargetInstance(functionIndex))); |
1557 | // The target instance is 0 unless the call is wasm->wasm. |
1558 | Value* isWasmCall = m_currentBlock->appendNew<Value>(m_proc, NotEqual, origin(), targetInstance, m_currentBlock->appendNew<Const64Value>(m_proc, origin(), 0)); |
1559 | |
1560 | BasicBlock* isWasmBlock = m_proc.addBlock(); |
1561 | BasicBlock* isEmbedderBlock = m_proc.addBlock(); |
1562 | BasicBlock* continuation = m_proc.addBlock(); |
1563 | m_currentBlock->appendNewControlValue(m_proc, B3::Branch, origin(), isWasmCall, FrequentedBlock(isWasmBlock), FrequentedBlock(isEmbedderBlock)); |
1564 | |
1565 | Value* wasmCallResult = createCallPatchpoint(isWasmBlock, origin(), signature, args, |
1566 | scopedLambdaRef<void(PatchpointValue*)>([=] (PatchpointValue* patchpoint) -> void { |
1567 | patchpoint->effects.writesPinned = true; |
1568 | patchpoint->effects.readsPinned = true; |
1569 | // We need to clobber all potential pinned registers since we might be leaving the instance. |
1570 | // We pessimistically assume we could be calling to something that is bounds checking. |
1571 | // FIXME: We shouldn't have to do this: https://bugs.webkit.org/show_bug.cgi?id=172181 |
1572 | patchpoint->clobberLate(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking)); |
1573 | patchpoint->setGenerator([unlinkedWasmToWasmCalls, functionIndex] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1574 | AllowMacroScratchRegisterUsage allowScratch(jit); |
1575 | CCallHelpers::Call call = jit.threadSafePatchableNearCall(); |
1576 | jit.addLinkTask([unlinkedWasmToWasmCalls, call, functionIndex] (LinkBuffer& linkBuffer) { |
1577 | unlinkedWasmToWasmCalls->append({ linkBuffer.locationOfNearCall<WasmEntryPtrTag>(call), functionIndex }); |
1578 | }); |
1579 | }); |
1580 | })); |
1581 | UpsilonValue* wasmCallResultUpsilon = returnType == B3::Void ? nullptr : isWasmBlock->appendNew<UpsilonValue>(m_proc, origin(), wasmCallResult); |
1582 | isWasmBlock->appendNewControlValue(m_proc, Jump, origin(), continuation); |
1583 | |
1584 | // FIXME: Let's remove this indirection by creating a PIC friendly IC |
1585 | // for calls out to the embedder. This shouldn't be that hard to do. We could probably |
1586 | // implement the IC to be over Context*. |
1587 | // https://bugs.webkit.org/show_bug.cgi?id=170375 |
1588 | Value* jumpDestination = isEmbedderBlock->appendNew<MemoryValue>(m_proc, |
1589 | Load, pointerType(), origin(), instanceValue(), safeCast<int32_t>(Instance::offsetOfWasmToEmbedderStub(functionIndex))); |
1590 | |
1591 | Value* embedderCallResult = createCallPatchpoint(isEmbedderBlock, origin(), signature, args, |
1592 | scopedLambdaRef<void(PatchpointValue*)>([=] (PatchpointValue* patchpoint) -> void { |
1593 | patchpoint->effects.writesPinned = true; |
1594 | patchpoint->effects.readsPinned = true; |
1595 | patchpoint->append(jumpDestination, ValueRep::SomeRegister); |
1596 | // We need to clobber all potential pinned registers since we might be leaving the instance. |
1597 | // We pessimistically assume we could be calling to something that is bounds checking. |
1598 | // FIXME: We shouldn't have to do this: https://bugs.webkit.org/show_bug.cgi?id=172181 |
1599 | patchpoint->clobberLate(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking)); |
1600 | patchpoint->setGenerator([returnType] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { |
1601 | AllowMacroScratchRegisterUsage allowScratch(jit); |
1602 | jit.call(params[params.proc().resultCount(returnType)].gpr(), WasmEntryPtrTag); |
1603 | }); |
1604 | })); |
1605 | UpsilonValue* embedderCallResultUpsilon = returnType == B3::Void ? nullptr : isEmbedderBlock->appendNew<UpsilonValue>(m_proc, origin(), embedderCallResult); |
1606 | isEmbedderBlock->appendNewControlValue(m_proc, Jump, origin(), continuation); |
1607 | |
1608 | m_currentBlock = continuation; |
1609 | |
1610 | if (returnType != B3::Void) { |
1611 | Value* phi = continuation->appendNew<Value>(m_proc, Phi, returnType, origin()); |
1612 | wasmCallResultUpsilon->setPhi(phi); |
1613 | embedderCallResultUpsilon->setPhi(phi); |
1614 | fillResults(phi); |
1615 | } |
1616 | |
1617 | // The call could have been to another WebAssembly instance, and / or could have modified our Memory. |
1618 | restoreWebAssemblyGlobalState(RestoreCachedStackLimit::Yes, m_info.memory, instanceValue(), m_proc, continuation); |
1619 | } else { |
1620 | |
1621 | Value* patch = createCallPatchpoint(m_currentBlock, origin(), signature, args, |
1622 | scopedLambdaRef<void(PatchpointValue*)>([=] (PatchpointValue* patchpoint) -> void { |
1623 | patchpoint->effects.writesPinned = true; |
1624 | patchpoint->effects.readsPinned = true; |
1625 | |
1626 | patchpoint->setGenerator([unlinkedWasmToWasmCalls, functionIndex] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1627 | AllowMacroScratchRegisterUsage allowScratch(jit); |
1628 | CCallHelpers::Call call = jit.threadSafePatchableNearCall(); |
1629 | jit.addLinkTask([unlinkedWasmToWasmCalls, call, functionIndex] (LinkBuffer& linkBuffer) { |
1630 | unlinkedWasmToWasmCalls->append({ linkBuffer.locationOfNearCall<WasmEntryPtrTag>(call), functionIndex }); |
1631 | }); |
1632 | }); |
1633 | })); |
1634 | fillResults(patch); |
1635 | } |
1636 | |
1637 | return { }; |
1638 | } |
1639 | |
1640 | auto B3IRGenerator::addCallIndirect(unsigned tableIndex, const Signature& signature, Vector<ExpressionType>& args, ResultList& results) -> PartialResult |
1641 | { |
1642 | ExpressionType calleeIndex = args.takeLast(); |
1643 | ASSERT(signature.argumentCount() == args.size()); |
1644 | |
1645 | m_makesCalls = true; |
1646 | // Note: call indirect can call either WebAssemblyFunction or WebAssemblyWrapperFunction. Because |
1647 | // WebAssemblyWrapperFunction is like calling into the embedder, we conservatively assume all call indirects |
1648 | // can be to the embedder for our stack check calculation. |
1649 | m_maxNumJSCallArguments = std::max(m_maxNumJSCallArguments, static_cast<uint32_t>(args.size())); |
1650 | |
1651 | ExpressionType callableFunctionBuffer; |
1652 | ExpressionType instancesBuffer; |
1653 | ExpressionType callableFunctionBufferLength; |
1654 | ExpressionType mask; |
1655 | { |
1656 | ExpressionType table = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), |
1657 | instanceValue(), safeCast<int32_t>(Instance::offsetOfTablePtr(m_numImportFunctions, tableIndex))); |
1658 | callableFunctionBuffer = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), |
1659 | table, safeCast<int32_t>(FuncRefTable::offsetOfFunctions())); |
1660 | instancesBuffer = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), |
1661 | table, safeCast<int32_t>(FuncRefTable::offsetOfInstances())); |
1662 | callableFunctionBufferLength = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, origin(), |
1663 | table, safeCast<int32_t>(Table::offsetOfLength())); |
1664 | mask = m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), |
1665 | m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, origin(), |
1666 | table, safeCast<int32_t>(Table::offsetOfMask()))); |
1667 | } |
1668 | |
1669 | // Check the index we are looking for is valid. |
1670 | { |
1671 | CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), |
1672 | m_currentBlock->appendNew<Value>(m_proc, AboveEqual, origin(), calleeIndex, callableFunctionBufferLength)); |
1673 | |
1674 | check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1675 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsCallIndirect); |
1676 | }); |
1677 | } |
1678 | |
1679 | calleeIndex = m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), calleeIndex); |
1680 | |
1681 | if (Options::enableSpectreMitigations()) |
1682 | calleeIndex = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), mask, calleeIndex); |
1683 | |
1684 | ExpressionType callableFunction; |
1685 | { |
1686 | // Compute the offset in the table index space we are looking for. |
1687 | ExpressionType offset = m_currentBlock->appendNew<Value>(m_proc, Mul, origin(), |
1688 | calleeIndex, constant(pointerType(), sizeof(WasmToWasmImportableFunction))); |
1689 | callableFunction = m_currentBlock->appendNew<Value>(m_proc, Add, origin(), callableFunctionBuffer, offset); |
1690 | |
1691 | // Check that the WasmToWasmImportableFunction is initialized. We trap if it isn't. An "invalid" SignatureIndex indicates it's not initialized. |
1692 | // FIXME: when we have trap handlers, we can just let the call fail because Signature::invalidIndex is 0. https://bugs.webkit.org/show_bug.cgi?id=177210 |
1693 | static_assert(sizeof(WasmToWasmImportableFunction::signatureIndex) == sizeof(uint64_t), "Load codegen assumes i64" ); |
1694 | ExpressionType calleeSignatureIndex = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, Int64, origin(), callableFunction, safeCast<int32_t>(WasmToWasmImportableFunction::offsetOfSignatureIndex())); |
1695 | { |
1696 | CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), |
1697 | m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), |
1698 | calleeSignatureIndex, |
1699 | m_currentBlock->appendNew<Const64Value>(m_proc, origin(), Signature::invalidIndex))); |
1700 | |
1701 | check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1702 | this->emitExceptionCheck(jit, ExceptionType::NullTableEntry); |
1703 | }); |
1704 | } |
1705 | |
1706 | // Check the signature matches the value we expect. |
1707 | { |
1708 | ExpressionType expectedSignatureIndex = m_currentBlock->appendNew<Const64Value>(m_proc, origin(), SignatureInformation::get(signature)); |
1709 | CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), |
1710 | m_currentBlock->appendNew<Value>(m_proc, NotEqual, origin(), calleeSignatureIndex, expectedSignatureIndex)); |
1711 | |
1712 | check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1713 | this->emitExceptionCheck(jit, ExceptionType::BadSignature); |
1714 | }); |
1715 | } |
1716 | } |
1717 | |
1718 | // Do a context switch if needed. |
1719 | { |
1720 | Value* offset = m_currentBlock->appendNew<Value>(m_proc, Mul, origin(), |
1721 | calleeIndex, constant(pointerType(), sizeof(Instance*))); |
1722 | Value* newContextInstance = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), |
1723 | m_currentBlock->appendNew<Value>(m_proc, Add, origin(), instancesBuffer, offset)); |
1724 | |
1725 | BasicBlock* continuation = m_proc.addBlock(); |
1726 | BasicBlock* doContextSwitch = m_proc.addBlock(); |
1727 | |
1728 | Value* isSameContextInstance = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), |
1729 | newContextInstance, instanceValue()); |
1730 | m_currentBlock->appendNewControlValue(m_proc, B3::Branch, origin(), |
1731 | isSameContextInstance, FrequentedBlock(continuation), FrequentedBlock(doContextSwitch)); |
1732 | |
1733 | PatchpointValue* patchpoint = doContextSwitch->appendNew<PatchpointValue>(m_proc, B3::Void, origin()); |
1734 | patchpoint->effects.writesPinned = true; |
1735 | // We pessimistically assume we're calling something with BoundsChecking memory. |
1736 | // FIXME: We shouldn't have to do this: https://bugs.webkit.org/show_bug.cgi?id=172181 |
1737 | patchpoint->clobber(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking)); |
1738 | patchpoint->clobber(RegisterSet::macroScratchRegisters()); |
1739 | patchpoint->append(newContextInstance, ValueRep::SomeRegister); |
1740 | patchpoint->append(instanceValue(), ValueRep::SomeRegister); |
1741 | patchpoint->numGPScratchRegisters = Gigacage::isEnabled(Gigacage::Primitive) ? 1 : 0; |
1742 | |
1743 | patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { |
1744 | AllowMacroScratchRegisterUsage allowScratch(jit); |
1745 | GPRReg newContextInstance = params[0].gpr(); |
1746 | GPRReg oldContextInstance = params[1].gpr(); |
1747 | const PinnedRegisterInfo& pinnedRegs = PinnedRegisterInfo::get(); |
1748 | GPRReg baseMemory = pinnedRegs.baseMemoryPointer; |
1749 | ASSERT(newContextInstance != baseMemory); |
1750 | jit.loadPtr(CCallHelpers::Address(oldContextInstance, Instance::offsetOfCachedStackLimit()), baseMemory); |
1751 | jit.storePtr(baseMemory, CCallHelpers::Address(newContextInstance, Instance::offsetOfCachedStackLimit())); |
1752 | jit.storeWasmContextInstance(newContextInstance); |
1753 | ASSERT(pinnedRegs.sizeRegister != baseMemory); |
1754 | // FIXME: We should support more than one memory size register |
1755 | // see: https://bugs.webkit.org/show_bug.cgi?id=162952 |
1756 | ASSERT(pinnedRegs.sizeRegister != newContextInstance); |
1757 | GPRReg scratchOrSize = Gigacage::isEnabled(Gigacage::Primitive) ? params.gpScratch(0) : pinnedRegs.sizeRegister; |
1758 | |
1759 | jit.loadPtr(CCallHelpers::Address(newContextInstance, Instance::offsetOfCachedMemorySize()), pinnedRegs.sizeRegister); // Memory size. |
1760 | jit.loadPtr(CCallHelpers::Address(newContextInstance, Instance::offsetOfCachedMemory()), baseMemory); // Memory::void*. |
1761 | |
1762 | jit.cageConditionally(Gigacage::Primitive, baseMemory, pinnedRegs.sizeRegister, scratchOrSize); |
1763 | }); |
1764 | doContextSwitch->appendNewControlValue(m_proc, Jump, origin(), continuation); |
1765 | |
1766 | m_currentBlock = continuation; |
1767 | } |
1768 | |
1769 | ExpressionType calleeCode = m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), |
1770 | m_currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), origin(), callableFunction, |
1771 | safeCast<int32_t>(WasmToWasmImportableFunction::offsetOfEntrypointLoadLocation()))); |
1772 | |
1773 | B3::Type returnType = toB3ResultType(&signature); |
1774 | ExpressionType callResult = createCallPatchpoint(m_currentBlock, origin(), signature, args, |
1775 | scopedLambdaRef<void(PatchpointValue*)>([=] (PatchpointValue* patchpoint) -> void { |
1776 | patchpoint->effects.writesPinned = true; |
1777 | patchpoint->effects.readsPinned = true; |
1778 | // We need to clobber all potential pinned registers since we might be leaving the instance. |
1779 | // We pessimistically assume we're always calling something that is bounds checking so |
1780 | // because the wasm->wasm thunk unconditionally overrides the size registers. |
1781 | // FIXME: We should not have to do this, but the wasm->wasm stub assumes it can |
1782 | // use all the pinned registers as scratch: https://bugs.webkit.org/show_bug.cgi?id=172181 |
1783 | patchpoint->clobberLate(PinnedRegisterInfo::get().toSave(MemoryMode::BoundsChecking)); |
1784 | |
1785 | patchpoint->append(calleeCode, ValueRep::SomeRegister); |
1786 | patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) { |
1787 | AllowMacroScratchRegisterUsage allowScratch(jit); |
1788 | jit.call(params[params.proc().resultCount(returnType)].gpr(), WasmEntryPtrTag); |
1789 | }); |
1790 | })); |
1791 | |
1792 | switch (returnType.kind()) { |
1793 | case B3::Void: { |
1794 | break; |
1795 | } |
1796 | case B3::Tuple: { |
1797 | const Vector<B3::Type>& tuple = m_proc.tupleForType(returnType); |
1798 | for (unsigned i = 0; i < signature.returnCount(); ++i) |
1799 | results.append(m_currentBlock->appendNew<ExtractValue>(m_proc, origin(), tuple[i], callResult, i)); |
1800 | break; |
1801 | } |
1802 | default: { |
1803 | results.append(callResult); |
1804 | break; |
1805 | } |
1806 | } |
1807 | |
1808 | // The call could have been to another WebAssembly instance, and / or could have modified our Memory. |
1809 | restoreWebAssemblyGlobalState(RestoreCachedStackLimit::Yes, m_info.memory, instanceValue(), m_proc, m_currentBlock); |
1810 | |
1811 | return { }; |
1812 | } |
1813 | |
1814 | void B3IRGenerator::unify(const ExpressionType phi, const ExpressionType source) |
1815 | { |
1816 | m_currentBlock->appendNew<UpsilonValue>(m_proc, origin(), source, phi); |
1817 | } |
1818 | |
1819 | void B3IRGenerator::unifyValuesWithBlock(const Stack& resultStack, const ResultList& result) |
1820 | { |
1821 | ASSERT(result.size() <= resultStack.size()); |
1822 | |
1823 | for (size_t i = 0; i < result.size(); ++i) |
1824 | unify(result[result.size() - 1 - i], resultStack.at(resultStack.size() - 1 - i)); |
1825 | } |
1826 | |
1827 | static void dumpExpressionStack(const CommaPrinter& comma, const B3IRGenerator::ExpressionList& expressionStack) |
1828 | { |
1829 | dataLog(comma, "ExpressionStack:" ); |
1830 | for (const auto& expression : expressionStack) |
1831 | dataLog(comma, *expression); |
1832 | } |
1833 | |
1834 | void B3IRGenerator::dump(const Vector<ControlEntry>& controlStack, const Stack* expressionStack) |
1835 | { |
1836 | dataLogLn("Constants:" ); |
1837 | for (const auto& constant : m_constantPool) |
1838 | dataLogLn(deepDump(m_proc, constant.value)); |
1839 | |
1840 | dataLogLn("Processing Graph:" ); |
1841 | dataLog(m_proc); |
1842 | dataLogLn("With current block:" , *m_currentBlock); |
1843 | dataLogLn("Control stack:" ); |
1844 | ASSERT(controlStack.size()); |
1845 | for (size_t i = controlStack.size(); i--;) { |
1846 | dataLog(" " , controlStack[i].controlData, ": " ); |
1847 | CommaPrinter comma(", " , "" ); |
1848 | dumpExpressionStack(comma, *expressionStack); |
1849 | expressionStack = &controlStack[i].enclosedExpressionStack; |
1850 | dataLogLn(); |
1851 | } |
1852 | dataLogLn(); |
1853 | } |
1854 | |
1855 | auto B3IRGenerator::origin() -> Origin |
1856 | { |
1857 | OpcodeOrigin origin(m_parser->currentOpcode(), m_parser->currentOpcodeStartingOffset()); |
1858 | ASSERT(isValidOpType(static_cast<uint8_t>(origin.opcode()))); |
1859 | return bitwise_cast<Origin>(origin); |
1860 | } |
1861 | |
1862 | Expected<std::unique_ptr<InternalFunction>, String> parseAndCompile(CompilationContext& compilationContext, const FunctionData& function, const Signature& signature, Vector<UnlinkedWasmToWasmCall>& unlinkedWasmToWasmCalls, unsigned& osrEntryScratchBufferSize, const ModuleInformation& info, MemoryMode mode, CompilationMode compilationMode, uint32_t functionIndex, uint32_t loopIndexForOSREntry, TierUpCount* tierUp, ThrowWasmException throwWasmException) |
1863 | { |
1864 | auto result = makeUnique<InternalFunction>(); |
1865 | |
1866 | compilationContext.embedderEntrypointJIT = makeUnique<CCallHelpers>(); |
1867 | compilationContext.wasmEntrypointJIT = makeUnique<CCallHelpers>(); |
1868 | |
1869 | Procedure procedure; |
1870 | |
1871 | procedure.setOriginPrinter([] (PrintStream& out, Origin origin) { |
1872 | if (origin.data()) |
1873 | out.print("Wasm: " , bitwise_cast<OpcodeOrigin>(origin)); |
1874 | }); |
1875 | |
1876 | // This means we cannot use either StackmapGenerationParams::usedRegisters() or |
1877 | // StackmapGenerationParams::unavailableRegisters(). In exchange for this concession, we |
1878 | // don't strictly need to run Air::reportUsedRegisters(), which saves a bit of CPU time at |
1879 | // optLevel=1. |
1880 | procedure.setNeedsUsedRegisters(false); |
1881 | |
1882 | procedure.setOptLevel(compilationMode == CompilationMode::BBQMode |
1883 | ? Options::webAssemblyBBQB3OptimizationLevel() |
1884 | : Options::webAssemblyOMGOptimizationLevel()); |
1885 | |
1886 | B3IRGenerator irGenerator(info, procedure, result.get(), unlinkedWasmToWasmCalls, osrEntryScratchBufferSize, mode, compilationMode, functionIndex, loopIndexForOSREntry, tierUp, throwWasmException); |
1887 | FunctionParser<B3IRGenerator> parser(irGenerator, function.data.data(), function.data.size(), signature, info); |
1888 | WASM_FAIL_IF_HELPER_FAILS(parser.parse()); |
1889 | |
1890 | irGenerator.insertConstants(); |
1891 | |
1892 | procedure.resetReachability(); |
1893 | if (!ASSERT_DISABLED) |
1894 | validate(procedure, "After parsing:\n" ); |
1895 | |
1896 | dataLogIf(WasmB3IRGeneratorInternal::verbose, "Pre SSA: " , procedure); |
1897 | fixSSA(procedure); |
1898 | dataLogIf(WasmB3IRGeneratorInternal::verbose, "Post SSA: " , procedure); |
1899 | |
1900 | { |
1901 | B3::prepareForGeneration(procedure); |
1902 | B3::generate(procedure, *compilationContext.wasmEntrypointJIT); |
1903 | compilationContext.wasmEntrypointByproducts = procedure.releaseByproducts(); |
1904 | result->entrypoint.calleeSaveRegisters = procedure.calleeSaveRegisterAtOffsetList(); |
1905 | } |
1906 | |
1907 | return result; |
1908 | } |
1909 | |
1910 | // Custom wasm ops. These are the ones too messy to do in wasm.json. |
1911 | |
1912 | void B3IRGenerator::emitChecksForModOrDiv(B3::Opcode operation, ExpressionType left, ExpressionType right) |
1913 | { |
1914 | ASSERT(operation == Div || operation == Mod || operation == UDiv || operation == UMod); |
1915 | const B3::Type type = left->type(); |
1916 | |
1917 | { |
1918 | CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), |
1919 | m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), right, constant(type, 0))); |
1920 | |
1921 | check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1922 | this->emitExceptionCheck(jit, ExceptionType::DivisionByZero); |
1923 | }); |
1924 | } |
1925 | |
1926 | if (operation == Div) { |
1927 | int64_t min = type == Int32 ? std::numeric_limits<int32_t>::min() : std::numeric_limits<int64_t>::min(); |
1928 | |
1929 | CheckValue* check = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), |
1930 | m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
1931 | m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), left, constant(type, min)), |
1932 | m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), right, constant(type, -1)))); |
1933 | |
1934 | check->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) { |
1935 | this->emitExceptionCheck(jit, ExceptionType::IntegerOverflow); |
1936 | }); |
1937 | } |
1938 | } |
1939 | |
1940 | template<> |
1941 | auto B3IRGenerator::addOp<OpType::I32DivS>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult |
1942 | { |
1943 | const B3::Opcode op = Div; |
1944 | emitChecksForModOrDiv(op, left, right); |
1945 | result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); |
1946 | return { }; |
1947 | } |
1948 | |
1949 | template<> |
1950 | auto B3IRGenerator::addOp<OpType::I32RemS>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult |
1951 | { |
1952 | const B3::Opcode op = Mod; |
1953 | emitChecksForModOrDiv(op, left, right); |
1954 | result = m_currentBlock->appendNew<Value>(m_proc, chill(op), origin(), left, right); |
1955 | return { }; |
1956 | } |
1957 | |
1958 | template<> |
1959 | auto B3IRGenerator::addOp<OpType::I32DivU>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult |
1960 | { |
1961 | const B3::Opcode op = UDiv; |
1962 | emitChecksForModOrDiv(op, left, right); |
1963 | result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); |
1964 | return { }; |
1965 | } |
1966 | |
1967 | template<> |
1968 | auto B3IRGenerator::addOp<OpType::I32RemU>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult |
1969 | { |
1970 | const B3::Opcode op = UMod; |
1971 | emitChecksForModOrDiv(op, left, right); |
1972 | result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); |
1973 | return { }; |
1974 | } |
1975 | |
1976 | template<> |
1977 | auto B3IRGenerator::addOp<OpType::I64DivS>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult |
1978 | { |
1979 | const B3::Opcode op = Div; |
1980 | emitChecksForModOrDiv(op, left, right); |
1981 | result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); |
1982 | return { }; |
1983 | } |
1984 | |
1985 | template<> |
1986 | auto B3IRGenerator::addOp<OpType::I64RemS>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult |
1987 | { |
1988 | const B3::Opcode op = Mod; |
1989 | emitChecksForModOrDiv(op, left, right); |
1990 | result = m_currentBlock->appendNew<Value>(m_proc, chill(op), origin(), left, right); |
1991 | return { }; |
1992 | } |
1993 | |
1994 | template<> |
1995 | auto B3IRGenerator::addOp<OpType::I64DivU>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult |
1996 | { |
1997 | const B3::Opcode op = UDiv; |
1998 | emitChecksForModOrDiv(op, left, right); |
1999 | result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); |
2000 | return { }; |
2001 | } |
2002 | |
2003 | template<> |
2004 | auto B3IRGenerator::addOp<OpType::I64RemU>(ExpressionType left, ExpressionType right, ExpressionType& result) -> PartialResult |
2005 | { |
2006 | const B3::Opcode op = UMod; |
2007 | emitChecksForModOrDiv(op, left, right); |
2008 | result = m_currentBlock->appendNew<Value>(m_proc, op, origin(), left, right); |
2009 | return { }; |
2010 | } |
2011 | |
2012 | template<> |
2013 | auto B3IRGenerator::addOp<OpType::I32Ctz>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2014 | { |
2015 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); |
2016 | patchpoint->append(arg, ValueRep::SomeRegister); |
2017 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2018 | jit.countTrailingZeros32(params[1].gpr(), params[0].gpr()); |
2019 | }); |
2020 | patchpoint->effects = Effects::none(); |
2021 | result = patchpoint; |
2022 | return { }; |
2023 | } |
2024 | |
2025 | template<> |
2026 | auto B3IRGenerator::addOp<OpType::I64Ctz>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2027 | { |
2028 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); |
2029 | patchpoint->append(arg, ValueRep::SomeRegister); |
2030 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2031 | jit.countTrailingZeros64(params[1].gpr(), params[0].gpr()); |
2032 | }); |
2033 | patchpoint->effects = Effects::none(); |
2034 | result = patchpoint; |
2035 | return { }; |
2036 | } |
2037 | |
2038 | template<> |
2039 | auto B3IRGenerator::addOp<OpType::I32Popcnt>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2040 | { |
2041 | #if CPU(X86_64) |
2042 | if (MacroAssembler::supportsCountPopulation()) { |
2043 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); |
2044 | patchpoint->append(arg, ValueRep::SomeRegister); |
2045 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2046 | jit.countPopulation32(params[1].gpr(), params[0].gpr()); |
2047 | }); |
2048 | patchpoint->effects = Effects::none(); |
2049 | result = patchpoint; |
2050 | return { }; |
2051 | } |
2052 | #endif |
2053 | |
2054 | Value* funcAddress = m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(&operationPopcount32, B3CCallPtrTag)); |
2055 | result = m_currentBlock->appendNew<CCallValue>(m_proc, Int32, origin(), Effects::none(), funcAddress, arg); |
2056 | return { }; |
2057 | } |
2058 | |
2059 | template<> |
2060 | auto B3IRGenerator::addOp<OpType::I64Popcnt>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2061 | { |
2062 | #if CPU(X86_64) |
2063 | if (MacroAssembler::supportsCountPopulation()) { |
2064 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); |
2065 | patchpoint->append(arg, ValueRep::SomeRegister); |
2066 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2067 | jit.countPopulation64(params[1].gpr(), params[0].gpr()); |
2068 | }); |
2069 | patchpoint->effects = Effects::none(); |
2070 | result = patchpoint; |
2071 | return { }; |
2072 | } |
2073 | #endif |
2074 | |
2075 | Value* funcAddress = m_currentBlock->appendNew<ConstPtrValue>(m_proc, origin(), tagCFunctionPtr<void*>(operationPopcount64, B3CCallPtrTag)); |
2076 | result = m_currentBlock->appendNew<CCallValue>(m_proc, Int64, origin(), Effects::none(), funcAddress, arg); |
2077 | return { }; |
2078 | } |
2079 | |
2080 | template<> |
2081 | auto B3IRGenerator::addOp<F64ConvertUI64>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2082 | { |
2083 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Double, origin()); |
2084 | if (isX86()) |
2085 | patchpoint->numGPScratchRegisters = 1; |
2086 | patchpoint->clobber(RegisterSet::macroScratchRegisters()); |
2087 | patchpoint->append(ConstrainedValue(arg, ValueRep::SomeRegister)); |
2088 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2089 | AllowMacroScratchRegisterUsage allowScratch(jit); |
2090 | #if CPU(X86_64) |
2091 | jit.convertUInt64ToDouble(params[1].gpr(), params[0].fpr(), params.gpScratch(0)); |
2092 | #else |
2093 | jit.convertUInt64ToDouble(params[1].gpr(), params[0].fpr()); |
2094 | #endif |
2095 | }); |
2096 | patchpoint->effects = Effects::none(); |
2097 | result = patchpoint; |
2098 | return { }; |
2099 | } |
2100 | |
2101 | template<> |
2102 | auto B3IRGenerator::addOp<OpType::F32ConvertUI64>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2103 | { |
2104 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Float, origin()); |
2105 | if (isX86()) |
2106 | patchpoint->numGPScratchRegisters = 1; |
2107 | patchpoint->clobber(RegisterSet::macroScratchRegisters()); |
2108 | patchpoint->append(ConstrainedValue(arg, ValueRep::SomeRegister)); |
2109 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2110 | AllowMacroScratchRegisterUsage allowScratch(jit); |
2111 | #if CPU(X86_64) |
2112 | jit.convertUInt64ToFloat(params[1].gpr(), params[0].fpr(), params.gpScratch(0)); |
2113 | #else |
2114 | jit.convertUInt64ToFloat(params[1].gpr(), params[0].fpr()); |
2115 | #endif |
2116 | }); |
2117 | patchpoint->effects = Effects::none(); |
2118 | result = patchpoint; |
2119 | return { }; |
2120 | } |
2121 | |
2122 | template<> |
2123 | auto B3IRGenerator::addOp<OpType::F64Nearest>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2124 | { |
2125 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Double, origin()); |
2126 | patchpoint->append(arg, ValueRep::SomeRegister); |
2127 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2128 | jit.roundTowardNearestIntDouble(params[1].fpr(), params[0].fpr()); |
2129 | }); |
2130 | patchpoint->effects = Effects::none(); |
2131 | result = patchpoint; |
2132 | return { }; |
2133 | } |
2134 | |
2135 | template<> |
2136 | auto B3IRGenerator::addOp<OpType::F32Nearest>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2137 | { |
2138 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Float, origin()); |
2139 | patchpoint->append(arg, ValueRep::SomeRegister); |
2140 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2141 | jit.roundTowardNearestIntFloat(params[1].fpr(), params[0].fpr()); |
2142 | }); |
2143 | patchpoint->effects = Effects::none(); |
2144 | result = patchpoint; |
2145 | return { }; |
2146 | } |
2147 | |
2148 | template<> |
2149 | auto B3IRGenerator::addOp<OpType::F64Trunc>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2150 | { |
2151 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Double, origin()); |
2152 | patchpoint->append(arg, ValueRep::SomeRegister); |
2153 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2154 | jit.roundTowardZeroDouble(params[1].fpr(), params[0].fpr()); |
2155 | }); |
2156 | patchpoint->effects = Effects::none(); |
2157 | result = patchpoint; |
2158 | return { }; |
2159 | } |
2160 | |
2161 | template<> |
2162 | auto B3IRGenerator::addOp<OpType::F32Trunc>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2163 | { |
2164 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Float, origin()); |
2165 | patchpoint->append(arg, ValueRep::SomeRegister); |
2166 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2167 | jit.roundTowardZeroFloat(params[1].fpr(), params[0].fpr()); |
2168 | }); |
2169 | patchpoint->effects = Effects::none(); |
2170 | result = patchpoint; |
2171 | return { }; |
2172 | } |
2173 | |
2174 | template<> |
2175 | auto B3IRGenerator::addOp<OpType::I32TruncSF64>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2176 | { |
2177 | Value* max = constant(Double, bitwise_cast<uint64_t>(-static_cast<double>(std::numeric_limits<int32_t>::min()))); |
2178 | Value* min = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<int32_t>::min()))); |
2179 | Value* outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
2180 | m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), |
2181 | m_currentBlock->appendNew<Value>(m_proc, GreaterEqual, origin(), arg, min)); |
2182 | outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); |
2183 | CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); |
2184 | trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { |
2185 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); |
2186 | }); |
2187 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); |
2188 | patchpoint->append(arg, ValueRep::SomeRegister); |
2189 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2190 | jit.truncateDoubleToInt32(params[1].fpr(), params[0].gpr()); |
2191 | }); |
2192 | patchpoint->effects = Effects::none(); |
2193 | result = patchpoint; |
2194 | return { }; |
2195 | } |
2196 | |
2197 | template<> |
2198 | auto B3IRGenerator::addOp<OpType::I32TruncSF32>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2199 | { |
2200 | Value* max = constant(Float, bitwise_cast<uint32_t>(-static_cast<float>(std::numeric_limits<int32_t>::min()))); |
2201 | Value* min = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<int32_t>::min()))); |
2202 | Value* outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
2203 | m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), |
2204 | m_currentBlock->appendNew<Value>(m_proc, GreaterEqual, origin(), arg, min)); |
2205 | outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); |
2206 | CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); |
2207 | trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { |
2208 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); |
2209 | }); |
2210 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); |
2211 | patchpoint->append(arg, ValueRep::SomeRegister); |
2212 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2213 | jit.truncateFloatToInt32(params[1].fpr(), params[0].gpr()); |
2214 | }); |
2215 | patchpoint->effects = Effects::none(); |
2216 | result = patchpoint; |
2217 | return { }; |
2218 | } |
2219 | |
2220 | |
2221 | template<> |
2222 | auto B3IRGenerator::addOp<OpType::I32TruncUF64>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2223 | { |
2224 | Value* max = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<int32_t>::min()) * -2.0)); |
2225 | Value* min = constant(Double, bitwise_cast<uint64_t>(-1.0)); |
2226 | Value* outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
2227 | m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), |
2228 | m_currentBlock->appendNew<Value>(m_proc, GreaterThan, origin(), arg, min)); |
2229 | outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); |
2230 | CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); |
2231 | trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { |
2232 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); |
2233 | }); |
2234 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); |
2235 | patchpoint->append(arg, ValueRep::SomeRegister); |
2236 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2237 | jit.truncateDoubleToUint32(params[1].fpr(), params[0].gpr()); |
2238 | }); |
2239 | patchpoint->effects = Effects::none(); |
2240 | result = patchpoint; |
2241 | return { }; |
2242 | } |
2243 | |
2244 | template<> |
2245 | auto B3IRGenerator::addOp<OpType::I32TruncUF32>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2246 | { |
2247 | Value* max = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<int32_t>::min()) * static_cast<float>(-2.0))); |
2248 | Value* min = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(-1.0))); |
2249 | Value* outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
2250 | m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), |
2251 | m_currentBlock->appendNew<Value>(m_proc, GreaterThan, origin(), arg, min)); |
2252 | outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); |
2253 | CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); |
2254 | trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { |
2255 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); |
2256 | }); |
2257 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int32, origin()); |
2258 | patchpoint->append(arg, ValueRep::SomeRegister); |
2259 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2260 | jit.truncateFloatToUint32(params[1].fpr(), params[0].gpr()); |
2261 | }); |
2262 | patchpoint->effects = Effects::none(); |
2263 | result = patchpoint; |
2264 | return { }; |
2265 | } |
2266 | |
2267 | template<> |
2268 | auto B3IRGenerator::addOp<OpType::I64TruncSF64>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2269 | { |
2270 | Value* max = constant(Double, bitwise_cast<uint64_t>(-static_cast<double>(std::numeric_limits<int64_t>::min()))); |
2271 | Value* min = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<int64_t>::min()))); |
2272 | Value* outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
2273 | m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), |
2274 | m_currentBlock->appendNew<Value>(m_proc, GreaterEqual, origin(), arg, min)); |
2275 | outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); |
2276 | CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); |
2277 | trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { |
2278 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); |
2279 | }); |
2280 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); |
2281 | patchpoint->append(arg, ValueRep::SomeRegister); |
2282 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2283 | jit.truncateDoubleToInt64(params[1].fpr(), params[0].gpr()); |
2284 | }); |
2285 | patchpoint->effects = Effects::none(); |
2286 | result = patchpoint; |
2287 | return { }; |
2288 | } |
2289 | |
2290 | template<> |
2291 | auto B3IRGenerator::addOp<OpType::I64TruncUF64>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2292 | { |
2293 | Value* max = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<int64_t>::min()) * -2.0)); |
2294 | Value* min = constant(Double, bitwise_cast<uint64_t>(-1.0)); |
2295 | Value* outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
2296 | m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), |
2297 | m_currentBlock->appendNew<Value>(m_proc, GreaterThan, origin(), arg, min)); |
2298 | outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); |
2299 | CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); |
2300 | trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { |
2301 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); |
2302 | }); |
2303 | |
2304 | Value* signBitConstant; |
2305 | if (isX86()) { |
2306 | // Since x86 doesn't have an instruction to convert floating points to unsigned integers, we at least try to do the smart thing if |
2307 | // the numbers are would be positive anyway as a signed integer. Since we cannot materialize constants into fprs we have b3 do it |
2308 | // so we can pool them if needed. |
2309 | signBitConstant = constant(Double, bitwise_cast<uint64_t>(static_cast<double>(std::numeric_limits<uint64_t>::max() - std::numeric_limits<int64_t>::max()))); |
2310 | } |
2311 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); |
2312 | patchpoint->append(arg, ValueRep::SomeRegister); |
2313 | if (isX86()) { |
2314 | patchpoint->append(signBitConstant, ValueRep::SomeRegister); |
2315 | patchpoint->numFPScratchRegisters = 1; |
2316 | } |
2317 | patchpoint->clobber(RegisterSet::macroScratchRegisters()); |
2318 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2319 | AllowMacroScratchRegisterUsage allowScratch(jit); |
2320 | FPRReg scratch = InvalidFPRReg; |
2321 | FPRReg constant = InvalidFPRReg; |
2322 | if (isX86()) { |
2323 | scratch = params.fpScratch(0); |
2324 | constant = params[2].fpr(); |
2325 | } |
2326 | jit.truncateDoubleToUint64(params[1].fpr(), params[0].gpr(), scratch, constant); |
2327 | }); |
2328 | patchpoint->effects = Effects::none(); |
2329 | result = patchpoint; |
2330 | return { }; |
2331 | } |
2332 | |
2333 | template<> |
2334 | auto B3IRGenerator::addOp<OpType::I64TruncSF32>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2335 | { |
2336 | Value* max = constant(Float, bitwise_cast<uint32_t>(-static_cast<float>(std::numeric_limits<int64_t>::min()))); |
2337 | Value* min = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<int64_t>::min()))); |
2338 | Value* outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
2339 | m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), |
2340 | m_currentBlock->appendNew<Value>(m_proc, GreaterEqual, origin(), arg, min)); |
2341 | outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); |
2342 | CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); |
2343 | trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { |
2344 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); |
2345 | }); |
2346 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); |
2347 | patchpoint->append(arg, ValueRep::SomeRegister); |
2348 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2349 | jit.truncateFloatToInt64(params[1].fpr(), params[0].gpr()); |
2350 | }); |
2351 | patchpoint->effects = Effects::none(); |
2352 | result = patchpoint; |
2353 | return { }; |
2354 | } |
2355 | |
2356 | template<> |
2357 | auto B3IRGenerator::addOp<OpType::I64TruncUF32>(ExpressionType arg, ExpressionType& result) -> PartialResult |
2358 | { |
2359 | Value* max = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<int64_t>::min()) * static_cast<float>(-2.0))); |
2360 | Value* min = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(-1.0))); |
2361 | Value* outOfBounds = m_currentBlock->appendNew<Value>(m_proc, BitAnd, origin(), |
2362 | m_currentBlock->appendNew<Value>(m_proc, LessThan, origin(), arg, max), |
2363 | m_currentBlock->appendNew<Value>(m_proc, GreaterThan, origin(), arg, min)); |
2364 | outOfBounds = m_currentBlock->appendNew<Value>(m_proc, Equal, origin(), outOfBounds, constant(Int32, 0)); |
2365 | CheckValue* trap = m_currentBlock->appendNew<CheckValue>(m_proc, Check, origin(), outOfBounds); |
2366 | trap->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) { |
2367 | this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsTrunc); |
2368 | }); |
2369 | |
2370 | Value* signBitConstant; |
2371 | if (isX86()) { |
2372 | // Since x86 doesn't have an instruction to convert floating points to unsigned integers, we at least try to do the smart thing if |
2373 | // the numbers would be positive anyway as a signed integer. Since we cannot materialize constants into fprs we have b3 do it |
2374 | // so we can pool them if needed. |
2375 | signBitConstant = constant(Float, bitwise_cast<uint32_t>(static_cast<float>(std::numeric_limits<uint64_t>::max() - std::numeric_limits<int64_t>::max()))); |
2376 | } |
2377 | PatchpointValue* patchpoint = m_currentBlock->appendNew<PatchpointValue>(m_proc, Int64, origin()); |
2378 | patchpoint->append(arg, ValueRep::SomeRegister); |
2379 | if (isX86()) { |
2380 | patchpoint->append(signBitConstant, ValueRep::SomeRegister); |
2381 | patchpoint->numFPScratchRegisters = 1; |
2382 | } |
2383 | patchpoint->clobber(RegisterSet::macroScratchRegisters()); |
2384 | patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
2385 | AllowMacroScratchRegisterUsage allowScratch(jit); |
2386 | FPRReg scratch = InvalidFPRReg; |
2387 | FPRReg constant = InvalidFPRReg; |
2388 | if (isX86()) { |
2389 | scratch = params.fpScratch(0); |
2390 | constant = params[2].fpr(); |
2391 | } |
2392 | jit.truncateFloatToUint64(params[1].fpr(), params[0].gpr(), scratch, constant); |
2393 | }); |
2394 | patchpoint->effects = Effects::none(); |
2395 | result = patchpoint; |
2396 | return { }; |
2397 | } |
2398 | |
2399 | } } // namespace JSC::Wasm |
2400 | |
2401 | #include "WasmB3IRGeneratorInlines.h" |
2402 | |
2403 | #endif // ENABLE(WEBASSEMBLY) |
2404 | |