1 | /* |
2 | * Copyright (C) 2017-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "WasmOMGPlan.h" |
28 | |
29 | #if ENABLE(WEBASSEMBLY) |
30 | |
31 | #include "B3Compilation.h" |
32 | #include "B3OpaqueByproducts.h" |
33 | #include "JSCInlines.h" |
34 | #include "LinkBuffer.h" |
35 | #include "WasmB3IRGenerator.h" |
36 | #include "WasmCallee.h" |
37 | #include "WasmContext.h" |
38 | #include "WasmInstance.h" |
39 | #include "WasmMachineThreads.h" |
40 | #include "WasmMemory.h" |
41 | #include "WasmNameSection.h" |
42 | #include "WasmSignatureInlines.h" |
43 | #include "WasmValidate.h" |
44 | #include "WasmWorklist.h" |
45 | #include <wtf/DataLog.h> |
46 | #include <wtf/Locker.h> |
47 | #include <wtf/MonotonicTime.h> |
48 | #include <wtf/StdLibExtras.h> |
49 | #include <wtf/ThreadMessage.h> |
50 | |
51 | namespace JSC { namespace Wasm { |
52 | |
53 | namespace WasmOMGPlanInternal { |
54 | static constexpr bool verbose = false; |
55 | } |
56 | |
57 | OMGPlan::OMGPlan(Context* context, Ref<Module>&& module, uint32_t functionIndex, MemoryMode mode, CompletionTask&& task) |
58 | : Base(context, makeRef(const_cast<ModuleInformation&>(module->moduleInformation())), WTFMove(task)) |
59 | , m_module(WTFMove(module)) |
60 | , m_codeBlock(*m_module->codeBlockFor(mode)) |
61 | , m_functionIndex(functionIndex) |
62 | { |
63 | setMode(mode); |
64 | ASSERT(m_codeBlock->runnable()); |
65 | ASSERT(m_codeBlock.ptr() == m_module->codeBlockFor(m_mode)); |
66 | dataLogLnIf(WasmOMGPlanInternal::verbose, "Starting OMG plan for " , functionIndex, " of module: " , RawPointer(&m_module.get())); |
67 | } |
68 | |
69 | void OMGPlan::work(CompilationEffort) |
70 | { |
71 | ASSERT(m_codeBlock->runnable()); |
72 | ASSERT(m_codeBlock.ptr() == m_module->codeBlockFor(mode())); |
73 | const FunctionData& function = m_moduleInformation->functions[m_functionIndex]; |
74 | |
75 | const uint32_t functionIndexSpace = m_functionIndex + m_module->moduleInformation().importFunctionCount(); |
76 | ASSERT(functionIndexSpace < m_module->moduleInformation().functionIndexSpaceSize()); |
77 | |
78 | SignatureIndex signatureIndex = m_moduleInformation->internalFunctionSignatureIndices[m_functionIndex]; |
79 | const Signature& signature = SignatureInformation::get(signatureIndex); |
80 | ASSERT(validateFunction(function, signature, m_moduleInformation.get())); |
81 | |
82 | Vector<UnlinkedWasmToWasmCall> unlinkedCalls; |
83 | unsigned osrEntryScratchBufferSize; |
84 | CompilationContext context; |
85 | auto parseAndCompileResult = parseAndCompile(context, function, signature, unlinkedCalls, osrEntryScratchBufferSize, m_moduleInformation.get(), m_mode, CompilationMode::OMGMode, m_functionIndex, UINT32_MAX); |
86 | |
87 | if (UNLIKELY(!parseAndCompileResult)) { |
88 | fail(holdLock(m_lock), makeString(parseAndCompileResult.error(), "when trying to tier up " , String::number(m_functionIndex))); |
89 | return; |
90 | } |
91 | |
92 | Entrypoint omgEntrypoint; |
93 | LinkBuffer linkBuffer(*context.wasmEntrypointJIT, nullptr, JITCompilationCanFail); |
94 | if (UNLIKELY(linkBuffer.didFailToAllocate())) { |
95 | Base::fail(holdLock(m_lock), makeString("Out of executable memory while tiering up function at index " , String::number(m_functionIndex))); |
96 | return; |
97 | } |
98 | |
99 | omgEntrypoint.compilation = makeUnique<B3::Compilation>( |
100 | FINALIZE_WASM_CODE_FOR_MODE(CompilationMode::OMGMode, linkBuffer, B3CompilationPtrTag, "WebAssembly OMG function[%i] %s name %s" , m_functionIndex, signature.toString().ascii().data(), makeString(IndexOrName(functionIndexSpace, m_moduleInformation->nameSection->get(functionIndexSpace))).ascii().data()), |
101 | WTFMove(context.wasmEntrypointByproducts)); |
102 | |
103 | omgEntrypoint.calleeSaveRegisters = WTFMove(parseAndCompileResult.value()->entrypoint.calleeSaveRegisters); |
104 | |
105 | MacroAssemblerCodePtr<WasmEntryPtrTag> entrypoint; |
106 | { |
107 | ASSERT(m_codeBlock.ptr() == m_module->codeBlockFor(mode())); |
108 | Ref<OMGCallee> callee = OMGCallee::create(WTFMove(omgEntrypoint), functionIndexSpace, m_moduleInformation->nameSection->get(functionIndexSpace), WTFMove(unlinkedCalls)); |
109 | MacroAssembler::repatchPointer(parseAndCompileResult.value()->calleeMoveLocation, CalleeBits::boxWasm(callee.ptr())); |
110 | ASSERT(!m_codeBlock->m_omgCallees[m_functionIndex]); |
111 | entrypoint = callee->entrypoint(); |
112 | |
113 | // We want to make sure we publish our callee at the same time as we link our callsites. This enables us to ensure we |
114 | // always call the fastest code. Any function linked after us will see our new code and the new callsites, which they |
115 | // will update. It's also ok if they publish their code before we reset the instruction caches because after we release |
116 | // the lock our code is ready to be published too. |
117 | LockHolder holder(m_codeBlock->m_lock); |
118 | m_codeBlock->m_omgCallees[m_functionIndex] = callee.copyRef(); |
119 | { |
120 | if (BBQCallee* bbqCallee = m_codeBlock->m_bbqCallees[m_functionIndex].get()) { |
121 | auto locker = holdLock(bbqCallee->tierUpCount()->getLock()); |
122 | bbqCallee->setReplacement(callee.copyRef()); |
123 | bbqCallee->tierUpCount()->m_compilationStatusForOMG = TierUpCount::CompilationStatus::Compiled; |
124 | } |
125 | if (LLIntCallee* llintCallee = m_codeBlock->m_llintCallees[m_functionIndex].get()) { |
126 | auto locker = holdLock(llintCallee->tierUpCounter().m_lock); |
127 | llintCallee->setReplacement(callee.copyRef()); |
128 | llintCallee->tierUpCounter().m_compilationStatus = LLIntTierUpCounter::CompilationStatus::Compiled; |
129 | } |
130 | } |
131 | for (auto& call : callee->wasmToWasmCallsites()) { |
132 | MacroAssemblerCodePtr<WasmEntryPtrTag> entrypoint; |
133 | if (call.functionIndexSpace < m_module->moduleInformation().importFunctionCount()) |
134 | entrypoint = m_codeBlock->m_wasmToWasmExitStubs[call.functionIndexSpace].code(); |
135 | else |
136 | entrypoint = m_codeBlock->wasmEntrypointCalleeFromFunctionIndexSpace(call.functionIndexSpace).entrypoint().retagged<WasmEntryPtrTag>(); |
137 | |
138 | MacroAssembler::repatchNearCall(call.callLocation, CodeLocationLabel<WasmEntryPtrTag>(entrypoint)); |
139 | } |
140 | } |
141 | |
142 | // It's important to make sure we do this before we make any of the code we just compiled visible. If we didn't, we could end up |
143 | // where we are tiering up some function A to A' and we repatch some function B to call A' instead of A. Another CPU could see |
144 | // the updates to B but still not have reset its cache of A', which would lead to all kinds of badness. |
145 | resetInstructionCacheOnAllThreads(); |
146 | WTF::storeStoreFence(); // This probably isn't necessary but it's good to be paranoid. |
147 | |
148 | m_codeBlock->m_wasmIndirectCallEntryPoints[m_functionIndex] = entrypoint; |
149 | { |
150 | LockHolder holder(m_codeBlock->m_lock); |
151 | |
152 | auto repatchCalls = [&] (const Vector<UnlinkedWasmToWasmCall>& callsites) { |
153 | for (auto& call : callsites) { |
154 | dataLogLnIf(WasmOMGPlanInternal::verbose, "Considering repatching call at: " , RawPointer(call.callLocation.dataLocation()), " that targets " , call.functionIndexSpace); |
155 | if (call.functionIndexSpace == functionIndexSpace) { |
156 | dataLogLnIf(WasmOMGPlanInternal::verbose, "Repatching call at: " , RawPointer(call.callLocation.dataLocation()), " to " , RawPointer(entrypoint.executableAddress())); |
157 | MacroAssembler::repatchNearCall(call.callLocation, CodeLocationLabel<WasmEntryPtrTag>(entrypoint)); |
158 | } |
159 | } |
160 | }; |
161 | |
162 | for (unsigned i = 0; i < m_codeBlock->m_wasmToWasmCallsites.size(); ++i) { |
163 | repatchCalls(m_codeBlock->m_wasmToWasmCallsites[i]); |
164 | if (LLIntCallee* llintCallee = m_codeBlock->m_llintCallees[i].get()) { |
165 | if (JITCallee* replacementCallee = llintCallee->replacement()) |
166 | repatchCalls(replacementCallee->wasmToWasmCallsites()); |
167 | if (OMGForOSREntryCallee* osrEntryCallee = llintCallee->osrEntryCallee()) |
168 | repatchCalls(osrEntryCallee->wasmToWasmCallsites()); |
169 | } |
170 | if (BBQCallee* bbqCallee = m_codeBlock->m_bbqCallees[i].get()) { |
171 | if (OMGCallee* replacementCallee = bbqCallee->replacement()) |
172 | repatchCalls(replacementCallee->wasmToWasmCallsites()); |
173 | if (OMGForOSREntryCallee* osrEntryCallee = bbqCallee->osrEntryCallee()) |
174 | repatchCalls(osrEntryCallee->wasmToWasmCallsites()); |
175 | } |
176 | } |
177 | } |
178 | |
179 | dataLogLnIf(WasmOMGPlanInternal::verbose, "Finished OMG " , m_functionIndex); |
180 | complete(holdLock(m_lock)); |
181 | } |
182 | |
183 | } } // namespace JSC::Wasm |
184 | |
185 | #endif // ENABLE(WEBASSEMBLY) |
186 | |