1 | /* |
2 | * Copyright (C) 2013-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "DFGJITCode.h" |
28 | |
29 | #if ENABLE(DFG_JIT) |
30 | |
31 | #include "CodeBlock.h" |
32 | #include "FTLForOSREntryJITCode.h" |
33 | #include "JSCInlines.h" |
34 | #include "TrackedReferences.h" |
35 | |
36 | namespace JSC { namespace DFG { |
37 | |
38 | JITCode::JITCode() |
39 | : DirectJITCode(JITType::DFGJIT) |
40 | #if ENABLE(FTL_JIT) |
41 | , osrEntryRetry(0) |
42 | , abandonOSREntry(false) |
43 | #endif // ENABLE(FTL_JIT) |
44 | { |
45 | } |
46 | |
47 | JITCode::~JITCode() |
48 | { |
49 | } |
50 | |
51 | CommonData* JITCode::dfgCommon() |
52 | { |
53 | return &common; |
54 | } |
55 | |
56 | JITCode* JITCode::dfg() |
57 | { |
58 | return this; |
59 | } |
60 | |
61 | void JITCode::shrinkToFit() |
62 | { |
63 | common.shrinkToFit(); |
64 | osrEntry.shrinkToFit(); |
65 | osrExit.shrinkToFit(); |
66 | speculationRecovery.shrinkToFit(); |
67 | minifiedDFG.prepareAndShrink(); |
68 | variableEventStream.shrinkToFit(); |
69 | } |
70 | |
71 | void JITCode::reconstruct( |
72 | CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex, |
73 | Operands<ValueRecovery>& result) |
74 | { |
75 | variableEventStream.reconstruct( |
76 | codeBlock, codeOrigin, minifiedDFG, streamIndex, result); |
77 | } |
78 | |
79 | void JITCode::reconstruct(CallFrame* callFrame, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex, Operands<Optional<JSValue>>& result) |
80 | { |
81 | Operands<ValueRecovery> recoveries; |
82 | reconstruct(codeBlock, codeOrigin, streamIndex, recoveries); |
83 | |
84 | result = Operands<Optional<JSValue>>(OperandsLike, recoveries); |
85 | for (size_t i = result.size(); i--;) |
86 | result[i] = recoveries[i].recover(callFrame); |
87 | } |
88 | |
89 | RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock* codeBlock, CallSiteIndex callSiteIndex) |
90 | { |
91 | for (OSRExit& exit : osrExit) { |
92 | if (exit.isExceptionHandler() && exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) { |
93 | Operands<ValueRecovery> valueRecoveries; |
94 | reconstruct(codeBlock, exit.m_codeOrigin, exit.m_streamIndex, valueRecoveries); |
95 | RegisterSet liveAtOSRExit; |
96 | for (size_t index = 0; index < valueRecoveries.size(); ++index) { |
97 | const ValueRecovery& recovery = valueRecoveries[index]; |
98 | if (recovery.isInRegisters()) { |
99 | if (recovery.isInGPR()) |
100 | liveAtOSRExit.set(recovery.gpr()); |
101 | else if (recovery.isInFPR()) |
102 | liveAtOSRExit.set(recovery.fpr()); |
103 | #if USE(JSVALUE32_64) |
104 | else if (recovery.isInJSValueRegs()) { |
105 | liveAtOSRExit.set(recovery.payloadGPR()); |
106 | liveAtOSRExit.set(recovery.tagGPR()); |
107 | } |
108 | #endif |
109 | else |
110 | RELEASE_ASSERT_NOT_REACHED(); |
111 | } |
112 | } |
113 | |
114 | return liveAtOSRExit; |
115 | } |
116 | } |
117 | |
118 | return { }; |
119 | } |
120 | |
121 | #if ENABLE(FTL_JIT) |
122 | bool JITCode::checkIfOptimizationThresholdReached(CodeBlock* codeBlock) |
123 | { |
124 | ASSERT(codeBlock->jitType() == JITType::DFGJIT); |
125 | return tierUpCounter.checkIfThresholdCrossedAndSet(codeBlock); |
126 | } |
127 | |
128 | void JITCode::optimizeNextInvocation(CodeBlock* codeBlock) |
129 | { |
130 | ASSERT(codeBlock->jitType() == JITType::DFGJIT); |
131 | if (Options::verboseOSR()) |
132 | dataLog(*codeBlock, ": FTL-optimizing next invocation.\n" ); |
133 | tierUpCounter.setNewThreshold(0, codeBlock); |
134 | } |
135 | |
136 | void JITCode::dontOptimizeAnytimeSoon(CodeBlock* codeBlock) |
137 | { |
138 | ASSERT(codeBlock->jitType() == JITType::DFGJIT); |
139 | if (Options::verboseOSR()) |
140 | dataLog(*codeBlock, ": Not FTL-optimizing anytime soon.\n" ); |
141 | tierUpCounter.deferIndefinitely(); |
142 | } |
143 | |
144 | void JITCode::optimizeAfterWarmUp(CodeBlock* codeBlock) |
145 | { |
146 | ASSERT(codeBlock->jitType() == JITType::DFGJIT); |
147 | if (Options::verboseOSR()) |
148 | dataLog(*codeBlock, ": FTL-optimizing after warm-up.\n" ); |
149 | CodeBlock* baseline = codeBlock->baselineVersion(); |
150 | tierUpCounter.setNewThreshold( |
151 | baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeAfterWarmUp()), |
152 | baseline); |
153 | } |
154 | |
155 | void JITCode::optimizeSoon(CodeBlock* codeBlock) |
156 | { |
157 | ASSERT(codeBlock->jitType() == JITType::DFGJIT); |
158 | if (Options::verboseOSR()) |
159 | dataLog(*codeBlock, ": FTL-optimizing soon.\n" ); |
160 | CodeBlock* baseline = codeBlock->baselineVersion(); |
161 | tierUpCounter.setNewThreshold( |
162 | baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeSoon()), |
163 | codeBlock); |
164 | } |
165 | |
166 | void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock* codeBlock) |
167 | { |
168 | ASSERT(codeBlock->jitType() == JITType::DFGJIT); |
169 | if (Options::verboseOSR()) |
170 | dataLog(*codeBlock, ": Forcing slow path concurrently for FTL entry.\n" ); |
171 | tierUpCounter.forceSlowPathConcurrently(); |
172 | } |
173 | |
174 | void JITCode::setOptimizationThresholdBasedOnCompilationResult( |
175 | CodeBlock* codeBlock, CompilationResult result) |
176 | { |
177 | ASSERT(codeBlock->jitType() == JITType::DFGJIT); |
178 | switch (result) { |
179 | case CompilationSuccessful: |
180 | optimizeNextInvocation(codeBlock); |
181 | codeBlock->baselineVersion()->m_hasBeenCompiledWithFTL = true; |
182 | return; |
183 | case CompilationFailed: |
184 | dontOptimizeAnytimeSoon(codeBlock); |
185 | codeBlock->baselineVersion()->m_didFailFTLCompilation = true; |
186 | return; |
187 | case CompilationDeferred: |
188 | optimizeAfterWarmUp(codeBlock); |
189 | return; |
190 | case CompilationInvalidated: |
191 | // This is weird - it will only happen in cases when the DFG code block (i.e. |
192 | // the code block that this JITCode belongs to) is also invalidated. So it |
193 | // doesn't really matter what we do. But, we do the right thing anyway. Note |
194 | // that us counting the reoptimization actually means that we might count it |
195 | // twice. But that's generally OK. It's better to overcount reoptimizations |
196 | // than it is to undercount them. |
197 | codeBlock->baselineVersion()->countReoptimization(); |
198 | optimizeAfterWarmUp(codeBlock); |
199 | return; |
200 | } |
201 | RELEASE_ASSERT_NOT_REACHED(); |
202 | } |
203 | |
204 | void JITCode::setOSREntryBlock(VM& vm, const JSCell* owner, CodeBlock* osrEntryBlock) |
205 | { |
206 | if (Options::verboseOSR()) { |
207 | dataLog(RawPointer(this), ": Setting OSR entry block to " , RawPointer(osrEntryBlock), "\n" ); |
208 | dataLog("OSR entries will go to " , osrEntryBlock->jitCode()->ftlForOSREntry()->addressForCall(ArityCheckNotRequired), "\n" ); |
209 | } |
210 | m_osrEntryBlock.set(vm, owner, osrEntryBlock); |
211 | } |
212 | |
213 | void JITCode::clearOSREntryBlockAndResetThresholds(CodeBlock *dfgCodeBlock) |
214 | { |
215 | ASSERT(m_osrEntryBlock); |
216 | |
217 | BytecodeIndex osrEntryBytecode = m_osrEntryBlock->jitCode()->ftlForOSREntry()->bytecodeIndex(); |
218 | m_osrEntryBlock.clear(); |
219 | osrEntryRetry = 0; |
220 | tierUpEntryTriggers.set(osrEntryBytecode, JITCode::TriggerReason::DontTrigger); |
221 | setOptimizationThresholdBasedOnCompilationResult(dfgCodeBlock, CompilationDeferred); |
222 | } |
223 | #endif // ENABLE(FTL_JIT) |
224 | |
225 | void JITCode::validateReferences(const TrackedReferences& trackedReferences) |
226 | { |
227 | common.validateReferences(trackedReferences); |
228 | |
229 | for (OSREntryData& entry : osrEntry) { |
230 | for (unsigned i = entry.m_expectedValues.size(); i--;) |
231 | entry.m_expectedValues[i].validateReferences(trackedReferences); |
232 | } |
233 | |
234 | minifiedDFG.validateReferences(trackedReferences); |
235 | } |
236 | |
237 | Optional<CodeOrigin> JITCode::findPC(CodeBlock*, void* pc) |
238 | { |
239 | for (OSRExit& exit : osrExit) { |
240 | if (ExecutableMemoryHandle* handle = exit.m_code.executableMemory()) { |
241 | if (handle->start().untaggedPtr() <= pc && pc < handle->end().untaggedPtr()) |
242 | return Optional<CodeOrigin>(exit.m_codeOriginForExitProfile); |
243 | } |
244 | } |
245 | |
246 | return WTF::nullopt; |
247 | } |
248 | |
249 | void JITCode::finalizeOSREntrypoints() |
250 | { |
251 | auto comparator = [] (const auto& a, const auto& b) { |
252 | return a.m_bytecodeIndex < b.m_bytecodeIndex; |
253 | }; |
254 | std::sort(osrEntry.begin(), osrEntry.end(), comparator); |
255 | |
256 | #if !ASSERT_DISABLED |
257 | auto verifyIsSorted = [&] (auto& osrVector) { |
258 | for (unsigned i = 0; i + 1 < osrVector.size(); ++i) |
259 | ASSERT(osrVector[i].m_bytecodeIndex <= osrVector[i + 1].m_bytecodeIndex); |
260 | }; |
261 | verifyIsSorted(osrEntry); |
262 | #endif |
263 | } |
264 | |
265 | } } // namespace JSC::DFG |
266 | |
267 | #endif // ENABLE(DFG_JIT) |
268 | |