1/*
2 * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGJITCode.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "CodeBlock.h"
32#include "FTLForOSREntryJITCode.h"
33#include "JSCInlines.h"
34#include "TrackedReferences.h"
35
36namespace JSC { namespace DFG {
37
38JITCode::JITCode()
39 : DirectJITCode(JITType::DFGJIT)
40#if ENABLE(FTL_JIT)
41 , osrEntryRetry(0)
42 , abandonOSREntry(false)
43#endif // ENABLE(FTL_JIT)
44{
45}
46
47JITCode::~JITCode()
48{
49}
50
51CommonData* JITCode::dfgCommon()
52{
53 return &common;
54}
55
56JITCode* JITCode::dfg()
57{
58 return this;
59}
60
61void JITCode::shrinkToFit()
62{
63 common.shrinkToFit();
64 osrEntry.shrinkToFit();
65 osrExit.shrinkToFit();
66 speculationRecovery.shrinkToFit();
67 minifiedDFG.prepareAndShrink();
68 variableEventStream.shrinkToFit();
69}
70
71void JITCode::reconstruct(
72 CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
73 Operands<ValueRecovery>& result)
74{
75 variableEventStream.reconstruct(
76 codeBlock, codeOrigin, minifiedDFG, streamIndex, result);
77}
78
79void JITCode::reconstruct(
80 ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
81 Operands<Optional<JSValue>>& result)
82{
83 Operands<ValueRecovery> recoveries;
84 reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
85
86 result = Operands<Optional<JSValue>>(OperandsLike, recoveries);
87 for (size_t i = result.size(); i--;)
88 result[i] = recoveries[i].recover(exec);
89}
90
91RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock* codeBlock, CallSiteIndex callSiteIndex)
92{
93 for (OSRExit& exit : osrExit) {
94 if (exit.isExceptionHandler() && exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) {
95 Operands<ValueRecovery> valueRecoveries;
96 reconstruct(codeBlock, exit.m_codeOrigin, exit.m_streamIndex, valueRecoveries);
97 RegisterSet liveAtOSRExit;
98 for (size_t index = 0; index < valueRecoveries.size(); ++index) {
99 const ValueRecovery& recovery = valueRecoveries[index];
100 if (recovery.isInRegisters()) {
101 if (recovery.isInGPR())
102 liveAtOSRExit.set(recovery.gpr());
103 else if (recovery.isInFPR())
104 liveAtOSRExit.set(recovery.fpr());
105#if USE(JSVALUE32_64)
106 else if (recovery.isInJSValueRegs()) {
107 liveAtOSRExit.set(recovery.payloadGPR());
108 liveAtOSRExit.set(recovery.tagGPR());
109 }
110#endif
111 else
112 RELEASE_ASSERT_NOT_REACHED();
113 }
114 }
115
116 return liveAtOSRExit;
117 }
118 }
119
120 return { };
121}
122
123#if ENABLE(FTL_JIT)
124bool JITCode::checkIfOptimizationThresholdReached(CodeBlock* codeBlock)
125{
126 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
127 return tierUpCounter.checkIfThresholdCrossedAndSet(codeBlock);
128}
129
130void JITCode::optimizeNextInvocation(CodeBlock* codeBlock)
131{
132 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
133 if (Options::verboseOSR())
134 dataLog(*codeBlock, ": FTL-optimizing next invocation.\n");
135 tierUpCounter.setNewThreshold(0, codeBlock);
136}
137
138void JITCode::dontOptimizeAnytimeSoon(CodeBlock* codeBlock)
139{
140 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
141 if (Options::verboseOSR())
142 dataLog(*codeBlock, ": Not FTL-optimizing anytime soon.\n");
143 tierUpCounter.deferIndefinitely();
144}
145
146void JITCode::optimizeAfterWarmUp(CodeBlock* codeBlock)
147{
148 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
149 if (Options::verboseOSR())
150 dataLog(*codeBlock, ": FTL-optimizing after warm-up.\n");
151 CodeBlock* baseline = codeBlock->baselineVersion();
152 tierUpCounter.setNewThreshold(
153 baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeAfterWarmUp()),
154 baseline);
155}
156
157void JITCode::optimizeSoon(CodeBlock* codeBlock)
158{
159 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
160 if (Options::verboseOSR())
161 dataLog(*codeBlock, ": FTL-optimizing soon.\n");
162 CodeBlock* baseline = codeBlock->baselineVersion();
163 tierUpCounter.setNewThreshold(
164 baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeSoon()),
165 codeBlock);
166}
167
168void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock* codeBlock)
169{
170 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
171 if (Options::verboseOSR())
172 dataLog(*codeBlock, ": Forcing slow path concurrently for FTL entry.\n");
173 tierUpCounter.forceSlowPathConcurrently();
174}
175
176void JITCode::setOptimizationThresholdBasedOnCompilationResult(
177 CodeBlock* codeBlock, CompilationResult result)
178{
179 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
180 switch (result) {
181 case CompilationSuccessful:
182 optimizeNextInvocation(codeBlock);
183 codeBlock->baselineVersion()->m_hasBeenCompiledWithFTL = true;
184 return;
185 case CompilationFailed:
186 dontOptimizeAnytimeSoon(codeBlock);
187 codeBlock->baselineVersion()->m_didFailFTLCompilation = true;
188 return;
189 case CompilationDeferred:
190 optimizeAfterWarmUp(codeBlock);
191 return;
192 case CompilationInvalidated:
193 // This is weird - it will only happen in cases when the DFG code block (i.e.
194 // the code block that this JITCode belongs to) is also invalidated. So it
195 // doesn't really matter what we do. But, we do the right thing anyway. Note
196 // that us counting the reoptimization actually means that we might count it
197 // twice. But that's generally OK. It's better to overcount reoptimizations
198 // than it is to undercount them.
199 codeBlock->baselineVersion()->countReoptimization();
200 optimizeAfterWarmUp(codeBlock);
201 return;
202 }
203 RELEASE_ASSERT_NOT_REACHED();
204}
205
206void JITCode::setOSREntryBlock(VM& vm, const JSCell* owner, CodeBlock* osrEntryBlock)
207{
208 if (Options::verboseOSR()) {
209 dataLog(RawPointer(this), ": Setting OSR entry block to ", RawPointer(osrEntryBlock), "\n");
210 dataLog("OSR entries will go to ", osrEntryBlock->jitCode()->ftlForOSREntry()->addressForCall(ArityCheckNotRequired), "\n");
211 }
212 m_osrEntryBlock.set(vm, owner, osrEntryBlock);
213}
214
215void JITCode::clearOSREntryBlockAndResetThresholds(CodeBlock *dfgCodeBlock)
216{
217 ASSERT(m_osrEntryBlock);
218
219 unsigned osrEntryBytecode = m_osrEntryBlock->jitCode()->ftlForOSREntry()->bytecodeIndex();
220 m_osrEntryBlock.clear();
221 osrEntryRetry = 0;
222 tierUpEntryTriggers.set(osrEntryBytecode, JITCode::TriggerReason::DontTrigger);
223 setOptimizationThresholdBasedOnCompilationResult(dfgCodeBlock, CompilationDeferred);
224}
225#endif // ENABLE(FTL_JIT)
226
227void JITCode::validateReferences(const TrackedReferences& trackedReferences)
228{
229 common.validateReferences(trackedReferences);
230
231 for (OSREntryData& entry : osrEntry) {
232 for (unsigned i = entry.m_expectedValues.size(); i--;)
233 entry.m_expectedValues[i].validateReferences(trackedReferences);
234 }
235
236 minifiedDFG.validateReferences(trackedReferences);
237}
238
239Optional<CodeOrigin> JITCode::findPC(CodeBlock*, void* pc)
240{
241 for (OSRExit& exit : osrExit) {
242 if (ExecutableMemoryHandle* handle = exit.m_code.executableMemory()) {
243 if (handle->start().untaggedPtr() <= pc && pc < handle->end().untaggedPtr())
244 return Optional<CodeOrigin>(exit.m_codeOriginForExitProfile);
245 }
246 }
247
248 return WTF::nullopt;
249}
250
251void JITCode::finalizeOSREntrypoints()
252{
253 auto comparator = [] (const auto& a, const auto& b) {
254 return a.m_bytecodeIndex < b.m_bytecodeIndex;
255 };
256 std::sort(osrEntry.begin(), osrEntry.end(), comparator);
257
258#if !ASSERT_DISABLED
259 auto verifyIsSorted = [&] (auto& osrVector) {
260 for (unsigned i = 0; i + 1 < osrVector.size(); ++i)
261 ASSERT(osrVector[i].m_bytecodeIndex <= osrVector[i + 1].m_bytecodeIndex);
262 };
263 verifyIsSorted(osrEntry);
264#endif
265}
266
267} } // namespace JSC::DFG
268
269#endif // ENABLE(DFG_JIT)
270