1 | /* |
2 | * Copyright (C) 2012-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "CallLinkStatus.h" |
28 | |
29 | #include "BytecodeStructs.h" |
30 | #include "CallLinkInfo.h" |
31 | #include "CodeBlock.h" |
32 | #include "DFGJITCode.h" |
33 | #include "InlineCallFrame.h" |
34 | #include "InterpreterInlines.h" |
35 | #include "LLIntCallLinkInfo.h" |
36 | #include "JSCInlines.h" |
37 | #include <wtf/CommaPrinter.h> |
38 | #include <wtf/ListDump.h> |
39 | |
40 | namespace JSC { |
41 | |
42 | namespace CallLinkStatusInternal { |
43 | static const bool verbose = false; |
44 | } |
45 | |
46 | CallLinkStatus::CallLinkStatus(JSValue value) |
47 | : m_couldTakeSlowPath(false) |
48 | , m_isProved(false) |
49 | { |
50 | if (!value || !value.isCell()) { |
51 | m_couldTakeSlowPath = true; |
52 | return; |
53 | } |
54 | |
55 | m_variants.append(CallVariant(value.asCell())); |
56 | } |
57 | |
58 | CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJSLocker&, CodeBlock* profiledBlock, unsigned bytecodeIndex) |
59 | { |
60 | UNUSED_PARAM(profiledBlock); |
61 | UNUSED_PARAM(bytecodeIndex); |
62 | #if ENABLE(DFG_JIT) |
63 | if (profiledBlock->unlinkedCodeBlock()->hasExitSite(DFG::FrequentExitSite(bytecodeIndex, BadCell))) { |
64 | // We could force this to be a closure call, but instead we'll just assume that it |
65 | // takes slow path. |
66 | return takesSlowPath(); |
67 | } |
68 | #endif |
69 | |
70 | auto instruction = profiledBlock->instructions().at(bytecodeIndex); |
71 | OpcodeID op = instruction->opcodeID(); |
72 | |
73 | LLIntCallLinkInfo* callLinkInfo; |
74 | switch (op) { |
75 | case op_call: |
76 | callLinkInfo = &instruction->as<OpCall>().metadata(profiledBlock).m_callLinkInfo; |
77 | break; |
78 | case op_construct: |
79 | callLinkInfo = &instruction->as<OpConstruct>().metadata(profiledBlock).m_callLinkInfo; |
80 | break; |
81 | case op_tail_call: |
82 | callLinkInfo = &instruction->as<OpTailCall>().metadata(profiledBlock).m_callLinkInfo; |
83 | break; |
84 | default: |
85 | return CallLinkStatus(); |
86 | } |
87 | |
88 | |
89 | return CallLinkStatus(callLinkInfo->lastSeenCallee()); |
90 | } |
91 | |
92 | CallLinkStatus CallLinkStatus::computeFor( |
93 | CodeBlock* profiledBlock, unsigned bytecodeIndex, const ICStatusMap& map, |
94 | ExitSiteData exitSiteData) |
95 | { |
96 | ConcurrentJSLocker locker(profiledBlock->m_lock); |
97 | |
98 | UNUSED_PARAM(profiledBlock); |
99 | UNUSED_PARAM(bytecodeIndex); |
100 | UNUSED_PARAM(map); |
101 | UNUSED_PARAM(exitSiteData); |
102 | #if ENABLE(DFG_JIT) |
103 | CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex)).callLinkInfo; |
104 | if (!callLinkInfo) { |
105 | if (exitSiteData.takesSlowPath) |
106 | return takesSlowPath(); |
107 | return computeFromLLInt(locker, profiledBlock, bytecodeIndex); |
108 | } |
109 | |
110 | return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData); |
111 | #else |
112 | return CallLinkStatus(); |
113 | #endif |
114 | } |
115 | |
116 | CallLinkStatus CallLinkStatus::computeFor( |
117 | CodeBlock* profiledBlock, unsigned bytecodeIndex, const ICStatusMap& map) |
118 | { |
119 | return computeFor(profiledBlock, bytecodeIndex, map, computeExitSiteData(profiledBlock, bytecodeIndex)); |
120 | } |
121 | |
122 | CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(CodeBlock* profiledBlock, unsigned bytecodeIndex) |
123 | { |
124 | ExitSiteData exitSiteData; |
125 | #if ENABLE(DFG_JIT) |
126 | UnlinkedCodeBlock* codeBlock = profiledBlock->unlinkedCodeBlock(); |
127 | ConcurrentJSLocker locker(codeBlock->m_lock); |
128 | |
129 | auto takesSlowPath = [&] (ExitingInlineKind inlineKind) -> ExitFlag { |
130 | return ExitFlag( |
131 | codeBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType, ExitFromAnything, inlineKind)) |
132 | || codeBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable, ExitFromAnything, inlineKind)), |
133 | inlineKind); |
134 | }; |
135 | |
136 | auto badFunction = [&] (ExitingInlineKind inlineKind) -> ExitFlag { |
137 | return ExitFlag(codeBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell, ExitFromAnything, inlineKind)), inlineKind); |
138 | }; |
139 | |
140 | exitSiteData.takesSlowPath |= takesSlowPath(ExitFromNotInlined); |
141 | exitSiteData.takesSlowPath |= takesSlowPath(ExitFromInlined); |
142 | exitSiteData.badFunction |= badFunction(ExitFromNotInlined); |
143 | exitSiteData.badFunction |= badFunction(ExitFromInlined); |
144 | #else |
145 | UNUSED_PARAM(profiledBlock); |
146 | UNUSED_PARAM(bytecodeIndex); |
147 | #endif |
148 | |
149 | return exitSiteData; |
150 | } |
151 | |
152 | #if ENABLE(JIT) |
153 | CallLinkStatus CallLinkStatus::computeFor( |
154 | const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo) |
155 | { |
156 | // We don't really need this, but anytime we have to debug this code, it becomes indispensable. |
157 | UNUSED_PARAM(profiledBlock); |
158 | |
159 | CallLinkStatus result = computeFromCallLinkInfo(locker, callLinkInfo); |
160 | result.m_maxNumArguments = callLinkInfo.maxNumArguments(); |
161 | return result; |
162 | } |
163 | |
164 | CallLinkStatus CallLinkStatus::computeFromCallLinkInfo( |
165 | const ConcurrentJSLocker&, CallLinkInfo& callLinkInfo) |
166 | { |
167 | // We cannot tell you anything about direct calls. |
168 | if (callLinkInfo.isDirect()) |
169 | return CallLinkStatus(); |
170 | |
171 | if (callLinkInfo.clearedByGC() || callLinkInfo.clearedByVirtual()) |
172 | return takesSlowPath(); |
173 | |
174 | // Note that despite requiring that the locker is held, this code is racy with respect |
175 | // to the CallLinkInfo: it may get cleared while this code runs! This is because |
176 | // CallLinkInfo::unlink() may be called from a different CodeBlock than the one that owns |
177 | // the CallLinkInfo and currently we save space by not having CallLinkInfos know who owns |
178 | // them. So, there is no way for either the caller of CallLinkInfo::unlock() or unlock() |
179 | // itself to figure out which lock to lock. |
180 | // |
181 | // Fortunately, that doesn't matter. The only things we ask of CallLinkInfo - the slow |
182 | // path count, the stub, and the target - can all be asked racily. Stubs and targets can |
183 | // only be deleted at next GC, so if we load a non-null one, then it must contain data |
184 | // that is still marginally valid (i.e. the pointers ain't stale). This kind of raciness |
185 | // is probably OK for now. |
186 | |
187 | // PolymorphicCallStubRoutine is a GCAwareJITStubRoutine, so if non-null, it will stay alive |
188 | // until next GC even if the CallLinkInfo is concurrently cleared. Also, the variants list is |
189 | // never mutated after the PolymorphicCallStubRoutine is instantiated. We have some conservative |
190 | // fencing in place to make sure that we see the variants list after construction. |
191 | if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) { |
192 | WTF::loadLoadFence(); |
193 | |
194 | if (!stub->hasEdges()) { |
195 | // This means we have an FTL profile, which has incomplete information. |
196 | // |
197 | // It's not clear if takesSlowPath() or CallLinkStatus() are appropriate here, but I |
198 | // think that takesSlowPath() is a narrow winner. |
199 | // |
200 | // Either way, this is telling us that the FTL had been led to believe that it's |
201 | // profitable not to inline anything. |
202 | return takesSlowPath(); |
203 | } |
204 | |
205 | CallEdgeList edges = stub->edges(); |
206 | |
207 | // Now that we've loaded the edges list, there are no further concurrency concerns. We will |
208 | // just manipulate and prune this list to our liking - mostly removing entries that are too |
209 | // infrequent and ensuring that it's sorted in descending order of frequency. |
210 | |
211 | RELEASE_ASSERT(edges.size()); |
212 | |
213 | std::sort( |
214 | edges.begin(), edges.end(), |
215 | [] (CallEdge a, CallEdge b) { |
216 | return a.count() > b.count(); |
217 | }); |
218 | RELEASE_ASSERT(edges.first().count() >= edges.last().count()); |
219 | |
220 | double totalCallsToKnown = 0; |
221 | double totalCallsToUnknown = callLinkInfo.slowPathCount(); |
222 | CallVariantList variants; |
223 | for (size_t i = 0; i < edges.size(); ++i) { |
224 | CallEdge edge = edges[i]; |
225 | // If the call is at the tail of the distribution, then we don't optimize it and we |
226 | // treat it as if it was a call to something unknown. We define the tail as being either |
227 | // a call that doesn't belong to the N most frequent callees (N = |
228 | // maxPolymorphicCallVariantsForInlining) or that has a total call count that is too |
229 | // small. |
230 | if (i >= Options::maxPolymorphicCallVariantsForInlining() |
231 | || edge.count() < Options::frequentCallThreshold()) |
232 | totalCallsToUnknown += edge.count(); |
233 | else { |
234 | totalCallsToKnown += edge.count(); |
235 | variants.append(edge.callee()); |
236 | } |
237 | } |
238 | |
239 | // Bail if we didn't find any calls that qualified. |
240 | RELEASE_ASSERT(!!totalCallsToKnown == !!variants.size()); |
241 | if (variants.isEmpty()) |
242 | return takesSlowPath(); |
243 | |
244 | // We require that the distribution of callees is skewed towards a handful of common ones. |
245 | if (totalCallsToKnown / totalCallsToUnknown < Options::minimumCallToKnownRate()) |
246 | return takesSlowPath(); |
247 | |
248 | RELEASE_ASSERT(totalCallsToKnown); |
249 | RELEASE_ASSERT(variants.size()); |
250 | |
251 | CallLinkStatus result; |
252 | result.m_variants = variants; |
253 | result.m_couldTakeSlowPath = !!totalCallsToUnknown; |
254 | result.m_isBasedOnStub = true; |
255 | return result; |
256 | } |
257 | |
258 | CallLinkStatus result; |
259 | |
260 | if (JSObject* target = callLinkInfo.lastSeenCallee()) { |
261 | CallVariant variant(target); |
262 | if (callLinkInfo.hasSeenClosure()) |
263 | variant = variant.despecifiedClosure(); |
264 | result.m_variants.append(variant); |
265 | } |
266 | |
267 | result.m_couldTakeSlowPath = !!callLinkInfo.slowPathCount(); |
268 | |
269 | return result; |
270 | } |
271 | |
272 | CallLinkStatus CallLinkStatus::computeFor( |
273 | const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo, |
274 | ExitSiteData exitSiteData, ExitingInlineKind inlineKind) |
275 | { |
276 | CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo); |
277 | result.accountForExits(exitSiteData, inlineKind); |
278 | return result; |
279 | } |
280 | |
281 | void CallLinkStatus::accountForExits(ExitSiteData exitSiteData, ExitingInlineKind inlineKind) |
282 | { |
283 | if (exitSiteData.badFunction.isSet(inlineKind)) { |
284 | if (isBasedOnStub()) { |
285 | // If we have a polymorphic stub, then having an exit site is not quite so useful. In |
286 | // most cases, the information in the stub has higher fidelity. |
287 | makeClosureCall(); |
288 | } else { |
289 | // We might not have a polymorphic stub for any number of reasons. When this happens, we |
290 | // are in less certain territory, so exit sites mean a lot. |
291 | m_couldTakeSlowPath = true; |
292 | } |
293 | } |
294 | |
295 | if (exitSiteData.takesSlowPath.isSet(inlineKind)) |
296 | m_couldTakeSlowPath = true; |
297 | } |
298 | |
299 | CallLinkStatus CallLinkStatus::computeFor( |
300 | CodeBlock* profiledBlock, CodeOrigin codeOrigin, |
301 | const ICStatusMap& baselineMap, const ICStatusContextStack& optimizedStack) |
302 | { |
303 | if (CallLinkStatusInternal::verbose) |
304 | dataLog("Figuring out call profiling for " , codeOrigin, "\n" ); |
305 | ExitSiteData exitSiteData = computeExitSiteData(profiledBlock, codeOrigin.bytecodeIndex()); |
306 | if (CallLinkStatusInternal::verbose) { |
307 | dataLog("takesSlowPath = " , exitSiteData.takesSlowPath, "\n" ); |
308 | dataLog("badFunction = " , exitSiteData.badFunction, "\n" ); |
309 | } |
310 | |
311 | for (const ICStatusContext* context : optimizedStack) { |
312 | if (CallLinkStatusInternal::verbose) |
313 | dataLog("Examining status in " , pointerDump(context->optimizedCodeBlock), "\n" ); |
314 | ICStatus status = context->get(codeOrigin); |
315 | |
316 | // If we have both a CallLinkStatus and a callLinkInfo for the same origin, then it means |
317 | // one of two things: |
318 | // |
319 | // 1) We initially thought that we'd be able to inline this call so we recorded a status |
320 | // but then we realized that it was pointless and gave up and emitted a normal call |
321 | // anyway. |
322 | // |
323 | // 2) We did a polymorphic call inline that had a slow path case. |
324 | // |
325 | // In the first case, it's essential that we use the callLinkInfo, since the status may |
326 | // be polymorphic but the link info benefits from polyvariant profiling. |
327 | // |
328 | // In the second case, it's essential that we use the status, since the callLinkInfo |
329 | // corresponds to the slow case. |
330 | // |
331 | // It would be annoying to distinguish those two cases. However, we know that: |
332 | // |
333 | // If the first case happens in the FTL, then it means that even with polyvariant |
334 | // profiling, we failed to do anything useful. That suggests that in the FTL, it's OK to |
335 | // prioritize the status. That status will again tell us to not do anything useful. |
336 | // |
337 | // The second case only happens in the FTL. |
338 | // |
339 | // Therefore, we prefer the status in the FTL and the info in the DFG. |
340 | // |
341 | // Luckily, this case doesn't matter for the other ICStatuses, since they never do the |
342 | // fast-path-slow-path control-flow-diamond style of IC inlining. It's either all fast |
343 | // path or it's a full IC. So, for them, if there is an IC status then it means case (1). |
344 | |
345 | bool checkStatusFirst = context->optimizedCodeBlock->jitType() == JITType::FTLJIT; |
346 | |
347 | auto bless = [&] (CallLinkStatus& result) { |
348 | if (!context->isInlined(codeOrigin)) |
349 | result.merge(computeFor(profiledBlock, codeOrigin.bytecodeIndex(), baselineMap, exitSiteData)); |
350 | }; |
351 | |
352 | auto checkInfo = [&] () -> CallLinkStatus { |
353 | if (!status.callLinkInfo) |
354 | return CallLinkStatus(); |
355 | |
356 | if (CallLinkStatusInternal::verbose) |
357 | dataLog("Have CallLinkInfo with CodeOrigin = " , status.callLinkInfo->codeOrigin(), "\n" ); |
358 | CallLinkStatus result; |
359 | { |
360 | ConcurrentJSLocker locker(context->optimizedCodeBlock->m_lock); |
361 | result = computeFor( |
362 | locker, context->optimizedCodeBlock, *status.callLinkInfo, exitSiteData, |
363 | context->inlineKind(codeOrigin)); |
364 | if (CallLinkStatusInternal::verbose) |
365 | dataLog("Got result: " , result, "\n" ); |
366 | } |
367 | bless(result); |
368 | return result; |
369 | }; |
370 | |
371 | auto checkStatus = [&] () -> CallLinkStatus { |
372 | if (!status.callStatus) |
373 | return CallLinkStatus(); |
374 | CallLinkStatus result = *status.callStatus; |
375 | if (CallLinkStatusInternal::verbose) |
376 | dataLog("Have callStatus: " , result, "\n" ); |
377 | result.accountForExits(exitSiteData, context->inlineKind(codeOrigin)); |
378 | bless(result); |
379 | return result; |
380 | }; |
381 | |
382 | if (checkStatusFirst) { |
383 | if (CallLinkStatus result = checkStatus()) |
384 | return result; |
385 | if (CallLinkStatus result = checkInfo()) |
386 | return result; |
387 | continue; |
388 | } |
389 | |
390 | if (CallLinkStatus result = checkInfo()) |
391 | return result; |
392 | if (CallLinkStatus result = checkStatus()) |
393 | return result; |
394 | } |
395 | |
396 | return computeFor(profiledBlock, codeOrigin.bytecodeIndex(), baselineMap, exitSiteData); |
397 | } |
398 | #endif |
399 | |
400 | void CallLinkStatus::setProvenConstantCallee(CallVariant variant) |
401 | { |
402 | m_variants = CallVariantList{ variant }; |
403 | m_couldTakeSlowPath = false; |
404 | m_isProved = true; |
405 | } |
406 | |
407 | bool CallLinkStatus::isClosureCall() const |
408 | { |
409 | for (unsigned i = m_variants.size(); i--;) { |
410 | if (m_variants[i].isClosureCall()) |
411 | return true; |
412 | } |
413 | return false; |
414 | } |
415 | |
416 | void CallLinkStatus::makeClosureCall() |
417 | { |
418 | m_variants = despecifiedVariantList(m_variants); |
419 | } |
420 | |
421 | bool CallLinkStatus::finalize(VM& vm) |
422 | { |
423 | for (CallVariant& variant : m_variants) { |
424 | if (!variant.finalize(vm)) |
425 | return false; |
426 | } |
427 | return true; |
428 | } |
429 | |
430 | void CallLinkStatus::merge(const CallLinkStatus& other) |
431 | { |
432 | m_couldTakeSlowPath |= other.m_couldTakeSlowPath; |
433 | |
434 | for (const CallVariant& otherVariant : other.m_variants) { |
435 | bool found = false; |
436 | for (CallVariant& thisVariant : m_variants) { |
437 | if (thisVariant.merge(otherVariant)) { |
438 | found = true; |
439 | break; |
440 | } |
441 | } |
442 | if (!found) |
443 | m_variants.append(otherVariant); |
444 | } |
445 | } |
446 | |
447 | void CallLinkStatus::filter(VM& vm, JSValue value) |
448 | { |
449 | m_variants.removeAllMatching( |
450 | [&] (CallVariant& variant) -> bool { |
451 | variant.filter(vm, value); |
452 | return !variant; |
453 | }); |
454 | } |
455 | |
456 | void CallLinkStatus::dump(PrintStream& out) const |
457 | { |
458 | if (!isSet()) { |
459 | out.print("Not Set" ); |
460 | return; |
461 | } |
462 | |
463 | CommaPrinter comma; |
464 | |
465 | if (m_isProved) |
466 | out.print(comma, "Statically Proved" ); |
467 | |
468 | if (m_couldTakeSlowPath) |
469 | out.print(comma, "Could Take Slow Path" ); |
470 | |
471 | if (m_isBasedOnStub) |
472 | out.print(comma, "Based On Stub" ); |
473 | |
474 | if (!m_variants.isEmpty()) |
475 | out.print(comma, listDump(m_variants)); |
476 | |
477 | if (m_maxNumArguments) |
478 | out.print(comma, "maxNumArguments = " , m_maxNumArguments); |
479 | } |
480 | |
481 | } // namespace JSC |
482 | |
483 | |