1 | /* |
2 | * Copyright (C) 2011-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "DFGGraph.h" |
28 | |
29 | #if ENABLE(DFG_JIT) |
30 | |
31 | #include "BytecodeKills.h" |
32 | #include "BytecodeLivenessAnalysisInlines.h" |
33 | #include "CodeBlock.h" |
34 | #include "CodeBlockWithJITType.h" |
35 | #include "DFGBackwardsCFG.h" |
36 | #include "DFGBackwardsDominators.h" |
37 | #include "DFGBlockWorklist.h" |
38 | #include "DFGCFG.h" |
39 | #include "DFGClobberSet.h" |
40 | #include "DFGClobbersExitState.h" |
41 | #include "DFGControlEquivalenceAnalysis.h" |
42 | #include "DFGDominators.h" |
43 | #include "DFGFlowIndexing.h" |
44 | #include "DFGFlowMap.h" |
45 | #include "DFGJITCode.h" |
46 | #include "DFGMayExit.h" |
47 | #include "DFGNaturalLoops.h" |
48 | #include "DFGVariableAccessDataDump.h" |
49 | #include "FullBytecodeLiveness.h" |
50 | #include "FunctionExecutableDump.h" |
51 | #include "GetterSetter.h" |
52 | #include "JIT.h" |
53 | #include "JSLexicalEnvironment.h" |
54 | #include "MaxFrameExtentForSlowPathCall.h" |
55 | #include "OperandsInlines.h" |
56 | #include "JSCInlines.h" |
57 | #include "StackAlignment.h" |
58 | #include <wtf/CommaPrinter.h> |
59 | #include <wtf/ListDump.h> |
60 | |
61 | namespace JSC { namespace DFG { |
62 | |
63 | static constexpr bool dumpOSRAvailabilityData = false; |
64 | |
65 | // Creates an array of stringized names. |
66 | static const char* dfgOpNames[] = { |
67 | #define STRINGIZE_DFG_OP_ENUM(opcode, flags) #opcode , |
68 | FOR_EACH_DFG_OP(STRINGIZE_DFG_OP_ENUM) |
69 | #undef STRINGIZE_DFG_OP_ENUM |
70 | }; |
71 | |
72 | Graph::Graph(VM& vm, Plan& plan) |
73 | : m_vm(vm) |
74 | , m_plan(plan) |
75 | , m_codeBlock(m_plan.codeBlock()) |
76 | , m_profiledBlock(m_codeBlock->alternative()) |
77 | , m_ssaCFG(std::make_unique<SSACFG>(*this)) |
78 | , m_nextMachineLocal(0) |
79 | , m_fixpointState(BeforeFixpoint) |
80 | , m_structureRegistrationState(HaveNotStartedRegistering) |
81 | , m_form(LoadStore) |
82 | , m_unificationState(LocallyUnified) |
83 | , m_refCountState(EverythingIsLive) |
84 | { |
85 | ASSERT(m_profiledBlock); |
86 | |
87 | m_hasDebuggerEnabled = m_profiledBlock->wasCompiledWithDebuggingOpcodes() || Options::forceDebuggerBytecodeGeneration(); |
88 | |
89 | m_indexingCache = std::make_unique<FlowIndexing>(*this); |
90 | m_abstractValuesCache = std::make_unique<FlowMap<AbstractValue>>(*this); |
91 | |
92 | registerStructure(vm.structureStructure.get()); |
93 | this->stringStructure = registerStructure(vm.stringStructure.get()); |
94 | this->symbolStructure = registerStructure(vm.symbolStructure.get()); |
95 | } |
96 | |
97 | Graph::~Graph() |
98 | { |
99 | } |
100 | |
101 | const char *Graph::opName(NodeType op) |
102 | { |
103 | return dfgOpNames[op]; |
104 | } |
105 | |
106 | static void printWhiteSpace(PrintStream& out, unsigned amount) |
107 | { |
108 | while (amount-- > 0) |
109 | out.print(" " ); |
110 | } |
111 | |
112 | bool Graph::dumpCodeOrigin(PrintStream& out, const char* prefix, Node*& previousNodeRef, Node* currentNode, DumpContext* context) |
113 | { |
114 | if (!currentNode->origin.semantic) |
115 | return false; |
116 | |
117 | Node* previousNode = previousNodeRef; |
118 | previousNodeRef = currentNode; |
119 | |
120 | if (!previousNode) |
121 | return false; |
122 | |
123 | if (previousNode->origin.semantic.inlineCallFrame() == currentNode->origin.semantic.inlineCallFrame()) |
124 | return false; |
125 | |
126 | Vector<CodeOrigin> previousInlineStack = previousNode->origin.semantic.inlineStack(); |
127 | Vector<CodeOrigin> currentInlineStack = currentNode->origin.semantic.inlineStack(); |
128 | unsigned commonSize = std::min(previousInlineStack.size(), currentInlineStack.size()); |
129 | unsigned indexOfDivergence = commonSize; |
130 | for (unsigned i = 0; i < commonSize; ++i) { |
131 | if (previousInlineStack[i].inlineCallFrame() != currentInlineStack[i].inlineCallFrame()) { |
132 | indexOfDivergence = i; |
133 | break; |
134 | } |
135 | } |
136 | |
137 | bool hasPrinted = false; |
138 | |
139 | // Print the pops. |
140 | for (unsigned i = previousInlineStack.size(); i-- > indexOfDivergence;) { |
141 | out.print(prefix); |
142 | printWhiteSpace(out, i * 2); |
143 | out.print("<-- " , inContext(*previousInlineStack[i].inlineCallFrame(), context), "\n" ); |
144 | hasPrinted = true; |
145 | } |
146 | |
147 | // Print the pushes. |
148 | for (unsigned i = indexOfDivergence; i < currentInlineStack.size(); ++i) { |
149 | out.print(prefix); |
150 | printWhiteSpace(out, i * 2); |
151 | out.print("--> " , inContext(*currentInlineStack[i].inlineCallFrame(), context), "\n" ); |
152 | hasPrinted = true; |
153 | } |
154 | |
155 | return hasPrinted; |
156 | } |
157 | |
158 | int Graph::amountOfNodeWhiteSpace(Node* node) |
159 | { |
160 | return (node->origin.semantic.inlineDepth() - 1) * 2; |
161 | } |
162 | |
163 | void Graph::printNodeWhiteSpace(PrintStream& out, Node* node) |
164 | { |
165 | printWhiteSpace(out, amountOfNodeWhiteSpace(node)); |
166 | } |
167 | |
168 | void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext* context) |
169 | { |
170 | NodeType op = node->op(); |
171 | |
172 | unsigned refCount = node->refCount(); |
173 | bool mustGenerate = node->mustGenerate(); |
174 | if (mustGenerate) |
175 | --refCount; |
176 | |
177 | out.print(prefix); |
178 | printNodeWhiteSpace(out, node); |
179 | |
180 | // Example/explanation of dataflow dump output |
181 | // |
182 | // 14: <!2:7> GetByVal(@3, @13) |
183 | // ^1 ^2 ^3 ^4 ^5 |
184 | // |
185 | // (1) The nodeIndex of this operation. |
186 | // (2) The reference count. The number printed is the 'real' count, |
187 | // not including the 'mustGenerate' ref. If the node is |
188 | // 'mustGenerate' then the count it prefixed with '!'. |
189 | // (3) The virtual register slot assigned to this node. |
190 | // (4) The name of the operation. |
191 | // (5) The arguments to the operation. The may be of the form: |
192 | // @# - a NodeIndex referencing a prior node in the graph. |
193 | // arg# - an argument number. |
194 | // id# - the index in the CodeBlock of an identifier { if codeBlock is passed to dump(), the string representation is displayed }. |
195 | // var# - the index of a var on the global object, used by GetGlobalVar/GetGlobalLexicalVariable/PutGlobalVariable operations. |
196 | out.printf("% 4d:<%c%u:" , (int)node->index(), mustGenerate ? '!' : ' ', refCount); |
197 | if (node->hasResult() && node->hasVirtualRegister() && node->virtualRegister().isValid()) |
198 | out.print(node->virtualRegister()); |
199 | else |
200 | out.print("-" ); |
201 | out.print(">\t" , opName(op), "(" ); |
202 | CommaPrinter comma; |
203 | if (node->flags() & NodeHasVarArgs) { |
204 | for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) { |
205 | if (!m_varArgChildren[childIdx]) |
206 | continue; |
207 | out.print(comma, m_varArgChildren[childIdx]); |
208 | } |
209 | } else { |
210 | if (!!node->child1() || !!node->child2() || !!node->child3()) |
211 | out.print(comma, node->child1()); |
212 | if (!!node->child2() || !!node->child3()) |
213 | out.print(comma, node->child2()); |
214 | if (!!node->child3()) |
215 | out.print(comma, node->child3()); |
216 | } |
217 | |
218 | if (toCString(NodeFlagsDump(node->flags())) != "<empty>" ) |
219 | out.print(comma, NodeFlagsDump(node->flags())); |
220 | if (node->prediction()) |
221 | out.print(comma, SpeculationDump(node->prediction())); |
222 | if (node->hasNumberOfArgumentsToSkip()) |
223 | out.print(comma, "numberOfArgumentsToSkip = " , node->numberOfArgumentsToSkip()); |
224 | if (node->hasArrayMode()) |
225 | out.print(comma, node->arrayMode()); |
226 | if (node->hasArithUnaryType()) |
227 | out.print(comma, "Type:" , node->arithUnaryType()); |
228 | if (node->hasArithMode()) |
229 | out.print(comma, node->arithMode()); |
230 | if (node->hasArithRoundingMode()) |
231 | out.print(comma, "Rounding:" , node->arithRoundingMode()); |
232 | if (node->hasScopeOffset()) |
233 | out.print(comma, node->scopeOffset()); |
234 | if (node->hasDirectArgumentsOffset()) |
235 | out.print(comma, node->capturedArgumentsOffset()); |
236 | if (node->hasArgumentIndex()) |
237 | out.print(comma, node->argumentIndex()); |
238 | if (node->hasRegisterPointer()) |
239 | out.print(comma, "global" , "(" , RawPointer(node->variablePointer()), ")" ); |
240 | if (node->hasIdentifier()) |
241 | out.print(comma, "id" , node->identifierNumber(), "{" , identifiers()[node->identifierNumber()], "}" ); |
242 | if (node->hasPromotedLocationDescriptor()) |
243 | out.print(comma, node->promotedLocationDescriptor()); |
244 | if (node->hasClassInfo()) |
245 | out.print(comma, *node->classInfo()); |
246 | if (node->hasStructureSet()) |
247 | out.print(comma, inContext(node->structureSet().toStructureSet(), context)); |
248 | if (node->hasStructure()) |
249 | out.print(comma, inContext(*node->structure().get(), context)); |
250 | if (node->op() == CPUIntrinsic) |
251 | out.print(comma, intrinsicName(node->intrinsic())); |
252 | if (node->hasTransition()) { |
253 | out.print(comma, pointerDumpInContext(node->transition(), context)); |
254 | #if USE(JSVALUE64) |
255 | out.print(", ID:" , node->transition()->next->id()); |
256 | #else |
257 | out.print(", ID:" , RawPointer(node->transition()->next.get())); |
258 | #endif |
259 | } |
260 | if (node->hasCellOperand()) { |
261 | if (!node->cellOperand()->value() || !node->cellOperand()->value().isCell()) |
262 | out.print(comma, "invalid cell operand: " , node->cellOperand()->value()); |
263 | else { |
264 | out.print(comma, pointerDump(node->cellOperand()->value().asCell())); |
265 | if (node->cellOperand()->value().isCell()) { |
266 | CallVariant variant(node->cellOperand()->value().asCell()); |
267 | if (ExecutableBase* executable = variant.executable()) { |
268 | if (executable->isHostFunction()) |
269 | out.print(comma, "<host function>" ); |
270 | else if (FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(m_vm, executable)) |
271 | out.print(comma, FunctionExecutableDump(functionExecutable)); |
272 | else |
273 | out.print(comma, "<non-function executable>" ); |
274 | } |
275 | } |
276 | } |
277 | } |
278 | if (node->hasSpeculatedTypeForQuery()) |
279 | out.print(comma, SpeculationDump(node->speculatedTypeForQuery())); |
280 | if (node->hasStorageAccessData()) { |
281 | StorageAccessData& storageAccessData = node->storageAccessData(); |
282 | out.print(comma, "id" , storageAccessData.identifierNumber, "{" , identifiers()[storageAccessData.identifierNumber], "}" ); |
283 | out.print(", " , static_cast<ptrdiff_t>(storageAccessData.offset)); |
284 | } |
285 | if (node->hasMultiGetByOffsetData()) { |
286 | MultiGetByOffsetData& data = node->multiGetByOffsetData(); |
287 | out.print(comma, "id" , data.identifierNumber, "{" , identifiers()[data.identifierNumber], "}" ); |
288 | for (unsigned i = 0; i < data.cases.size(); ++i) |
289 | out.print(comma, inContext(data.cases[i], context)); |
290 | } |
291 | if (node->hasMultiPutByOffsetData()) { |
292 | MultiPutByOffsetData& data = node->multiPutByOffsetData(); |
293 | out.print(comma, "id" , data.identifierNumber, "{" , identifiers()[data.identifierNumber], "}" ); |
294 | for (unsigned i = 0; i < data.variants.size(); ++i) |
295 | out.print(comma, inContext(data.variants[i], context)); |
296 | } |
297 | if (node->hasMatchStructureData()) { |
298 | for (MatchStructureVariant& variant : node->matchStructureData().variants) |
299 | out.print(comma, inContext(*variant.structure.get(), context), "=>" , variant.result); |
300 | } |
301 | ASSERT(node->hasVariableAccessData(*this) == node->accessesStack(*this)); |
302 | if (node->hasVariableAccessData(*this)) { |
303 | VariableAccessData* variableAccessData = node->tryGetVariableAccessData(); |
304 | if (variableAccessData) { |
305 | VirtualRegister operand = variableAccessData->local(); |
306 | out.print(comma, variableAccessData->local(), "(" , VariableAccessDataDump(*this, variableAccessData), ")" ); |
307 | operand = variableAccessData->machineLocal(); |
308 | if (operand.isValid()) |
309 | out.print(comma, "machine:" , operand); |
310 | } |
311 | } |
312 | if (node->hasStackAccessData()) { |
313 | StackAccessData* data = node->stackAccessData(); |
314 | out.print(comma, data->local); |
315 | if (data->machineLocal.isValid()) |
316 | out.print(comma, "machine:" , data->machineLocal); |
317 | out.print(comma, data->format); |
318 | } |
319 | if (node->hasUnlinkedLocal()) |
320 | out.print(comma, node->unlinkedLocal()); |
321 | if (node->hasVectorLengthHint()) |
322 | out.print(comma, "vectorLengthHint = " , node->vectorLengthHint()); |
323 | if (node->hasLazyJSValue()) |
324 | out.print(comma, node->lazyJSValue()); |
325 | if (node->hasIndexingType()) |
326 | out.print(comma, IndexingTypeDump(node->indexingMode())); |
327 | if (node->hasTypedArrayType()) |
328 | out.print(comma, node->typedArrayType()); |
329 | if (node->hasPhi()) |
330 | out.print(comma, "^" , node->phi()->index()); |
331 | if (node->hasExecutionCounter()) |
332 | out.print(comma, RawPointer(node->executionCounter())); |
333 | if (node->hasWatchpointSet()) |
334 | out.print(comma, RawPointer(node->watchpointSet())); |
335 | if (node->hasStoragePointer()) |
336 | out.print(comma, RawPointer(node->storagePointer())); |
337 | if (node->hasObjectMaterializationData()) |
338 | out.print(comma, node->objectMaterializationData()); |
339 | if (node->hasCallVarargsData()) |
340 | out.print(comma, "firstVarArgOffset = " , node->callVarargsData()->firstVarArgOffset); |
341 | if (node->hasLoadVarargsData()) { |
342 | LoadVarargsData* data = node->loadVarargsData(); |
343 | out.print(comma, "start = " , data->start, ", count = " , data->count); |
344 | if (data->machineStart.isValid()) |
345 | out.print(", machineStart = " , data->machineStart); |
346 | if (data->machineCount.isValid()) |
347 | out.print(", machineCount = " , data->machineCount); |
348 | out.print(", offset = " , data->offset, ", mandatoryMinimum = " , data->mandatoryMinimum); |
349 | out.print(", limit = " , data->limit); |
350 | } |
351 | if (node->hasCallDOMGetterData()) { |
352 | CallDOMGetterData* data = node->callDOMGetterData(); |
353 | out.print(comma, "id" , data->identifierNumber, "{" , identifiers()[data->identifierNumber], "}" ); |
354 | out.print(", domJIT = " , RawPointer(data->domJIT)); |
355 | } |
356 | if (node->hasIgnoreLastIndexIsWritable()) |
357 | out.print(comma, "ignoreLastIndexIsWritable = " , node->ignoreLastIndexIsWritable()); |
358 | if (node->isConstant()) |
359 | out.print(comma, pointerDumpInContext(node->constant(), context)); |
360 | if (node->hasCallLinkStatus()) |
361 | out.print(comma, *node->callLinkStatus()); |
362 | if (node->hasGetByIdStatus()) |
363 | out.print(comma, *node->getByIdStatus()); |
364 | if (node->hasInByIdStatus()) |
365 | out.print(comma, *node->inByIdStatus()); |
366 | if (node->hasPutByIdStatus()) |
367 | out.print(comma, *node->putByIdStatus()); |
368 | if (node->isJump()) |
369 | out.print(comma, "T:" , *node->targetBlock()); |
370 | if (node->isBranch()) |
371 | out.print(comma, "T:" , node->branchData()->taken, ", F:" , node->branchData()->notTaken); |
372 | if (node->isSwitch()) { |
373 | SwitchData* data = node->switchData(); |
374 | out.print(comma, data->kind); |
375 | for (unsigned i = 0; i < data->cases.size(); ++i) |
376 | out.print(comma, inContext(data->cases[i].value, context), ":" , data->cases[i].target); |
377 | out.print(comma, "default:" , data->fallThrough); |
378 | } |
379 | if (node->isEntrySwitch()) { |
380 | EntrySwitchData* data = node->entrySwitchData(); |
381 | for (unsigned i = 0; i < data->cases.size(); ++i) |
382 | out.print(comma, BranchTarget(data->cases[i])); |
383 | } |
384 | ClobberSet reads; |
385 | ClobberSet writes; |
386 | addReadsAndWrites(*this, node, reads, writes); |
387 | if (!reads.isEmpty()) |
388 | out.print(comma, "R:" , sortedListDump(reads.direct(), "," )); |
389 | if (!writes.isEmpty()) |
390 | out.print(comma, "W:" , sortedListDump(writes.direct(), "," )); |
391 | ExitMode exitMode = mayExit(*this, node); |
392 | if (exitMode != DoesNotExit) |
393 | out.print(comma, exitMode); |
394 | if (clobbersExitState(*this, node)) |
395 | out.print(comma, "ClobbersExit" ); |
396 | if (node->origin.isSet()) { |
397 | out.print(comma, "bc#" , node->origin.semantic.bytecodeIndex()); |
398 | if (node->origin.semantic != node->origin.forExit && node->origin.forExit.isSet()) |
399 | out.print(comma, "exit: " , node->origin.forExit); |
400 | } |
401 | out.print(comma, node->origin.exitOK ? "ExitValid" : "ExitInvalid" ); |
402 | if (node->origin.wasHoisted) |
403 | out.print(comma, "WasHoisted" ); |
404 | out.print(")" ); |
405 | |
406 | if (node->accessesStack(*this) && node->tryGetVariableAccessData()) |
407 | out.print(" predicting " , SpeculationDump(node->tryGetVariableAccessData()->prediction())); |
408 | else if (node->hasHeapPrediction()) |
409 | out.print(" predicting " , SpeculationDump(node->getHeapPrediction())); |
410 | |
411 | out.print("\n" ); |
412 | } |
413 | |
414 | bool Graph::terminalsAreValid() |
415 | { |
416 | for (BasicBlock* block : blocksInNaturalOrder()) { |
417 | if (!block->terminal()) |
418 | return false; |
419 | } |
420 | return true; |
421 | } |
422 | |
423 | static BasicBlock* unboxLoopNode(const CPSCFG::Node& node) { return node.node(); } |
424 | static BasicBlock* unboxLoopNode(BasicBlock* block) { return block; } |
425 | |
426 | void Graph::(PrintStream& out, const char* prefix, BasicBlock* block, PhiNodeDumpMode phiNodeDumpMode, DumpContext* context) |
427 | { |
428 | out.print(prefix, "Block " , *block, " (" , inContext(block->at(0)->origin.semantic, context), "):" , |
429 | block->isReachable ? "" : " (skipped)" , block->isOSRTarget ? " (OSR target)" : "" , block->isCatchEntrypoint ? " (Catch Entrypoint)" : "" , "\n" ); |
430 | if (block->executionCount == block->executionCount) |
431 | out.print(prefix, " Execution count: " , block->executionCount, "\n" ); |
432 | out.print(prefix, " Predecessors:" ); |
433 | for (size_t i = 0; i < block->predecessors.size(); ++i) |
434 | out.print(" " , *block->predecessors[i]); |
435 | out.print("\n" ); |
436 | out.print(prefix, " Successors:" ); |
437 | if (block->terminal()) { |
438 | for (BasicBlock* successor : block->successors()) { |
439 | out.print(" " , *successor); |
440 | } |
441 | } else |
442 | out.print(" <invalid>" ); |
443 | out.print("\n" ); |
444 | |
445 | auto printDominators = [&] (auto& dominators) { |
446 | out.print(prefix, " Dominated by: " , dominators.dominatorsOf(block), "\n" ); |
447 | out.print(prefix, " Dominates: " , dominators.blocksDominatedBy(block), "\n" ); |
448 | out.print(prefix, " Dominance Frontier: " , dominators.dominanceFrontierOf(block), "\n" ); |
449 | out.print(prefix, " Iterated Dominance Frontier: " , |
450 | dominators.iteratedDominanceFrontierOf(typename std::remove_reference<decltype(dominators)>::type::List { block }), "\n" ); |
451 | }; |
452 | |
453 | if (terminalsAreValid()) { |
454 | if (m_ssaDominators) |
455 | printDominators(*m_ssaDominators); |
456 | else if (m_cpsDominators) |
457 | printDominators(*m_cpsDominators); |
458 | } |
459 | |
460 | if (m_backwardsDominators && terminalsAreValid()) { |
461 | out.print(prefix, " Backwards dominates by: " , m_backwardsDominators->dominatorsOf(block), "\n" ); |
462 | out.print(prefix, " Backwards dominates: " , m_backwardsDominators->blocksDominatedBy(block), "\n" ); |
463 | } |
464 | if (m_controlEquivalenceAnalysis && terminalsAreValid()) { |
465 | out.print(prefix, " Control equivalent to:" ); |
466 | for (BasicBlock* otherBlock : blocksInNaturalOrder()) { |
467 | if (m_controlEquivalenceAnalysis->areEquivalent(block, otherBlock)) |
468 | out.print(" " , *otherBlock); |
469 | } |
470 | out.print("\n" ); |
471 | } |
472 | |
473 | auto printNaturalLoops = [&] (auto& naturalLoops) { |
474 | if (const auto* loop = naturalLoops->headerOf(block)) { |
475 | out.print(prefix, " Loop header, contains:" ); |
476 | Vector<BlockIndex> sortedBlockList; |
477 | for (unsigned i = 0; i < loop->size(); ++i) |
478 | sortedBlockList.append(unboxLoopNode(loop->at(i))->index); |
479 | std::sort(sortedBlockList.begin(), sortedBlockList.end()); |
480 | for (unsigned i = 0; i < sortedBlockList.size(); ++i) |
481 | out.print(" #" , sortedBlockList[i]); |
482 | out.print("\n" ); |
483 | } |
484 | |
485 | auto containingLoops = naturalLoops->loopsOf(block); |
486 | if (!containingLoops.isEmpty()) { |
487 | out.print(prefix, " Containing loop headers:" ); |
488 | for (unsigned i = 0; i < containingLoops.size(); ++i) |
489 | out.print(" " , *unboxLoopNode(containingLoops[i]->header())); |
490 | out.print("\n" ); |
491 | } |
492 | }; |
493 | |
494 | if (m_ssaNaturalLoops) |
495 | printNaturalLoops(m_ssaNaturalLoops); |
496 | else if (m_cpsNaturalLoops) |
497 | printNaturalLoops(m_cpsNaturalLoops); |
498 | |
499 | if (!block->phis.isEmpty()) { |
500 | out.print(prefix, " Phi Nodes:" ); |
501 | for (size_t i = 0; i < block->phis.size(); ++i) { |
502 | Node* phiNode = block->phis[i]; |
503 | if (!phiNode->shouldGenerate() && phiNodeDumpMode == DumpLivePhisOnly) |
504 | continue; |
505 | out.print(" @" , phiNode->index(), "<" , phiNode->local(), "," , phiNode->refCount(), ">->(" ); |
506 | if (phiNode->child1()) { |
507 | out.print("@" , phiNode->child1()->index()); |
508 | if (phiNode->child2()) { |
509 | out.print(", @" , phiNode->child2()->index()); |
510 | if (phiNode->child3()) |
511 | out.print(", @" , phiNode->child3()->index()); |
512 | } |
513 | } |
514 | out.print(")" , i + 1 < block->phis.size() ? "," : "" ); |
515 | } |
516 | out.print("\n" ); |
517 | } |
518 | } |
519 | |
520 | void Graph::dump(PrintStream& out, DumpContext* context) |
521 | { |
522 | DumpContext myContext; |
523 | myContext.graph = this; |
524 | if (!context) |
525 | context = &myContext; |
526 | |
527 | out.print("\n" ); |
528 | out.print("DFG for " , CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT), ":\n" ); |
529 | out.print(" Fixpoint state: " , m_fixpointState, "; Form: " , m_form, "; Unification state: " , m_unificationState, "; Ref count state: " , m_refCountState, "\n" ); |
530 | if (m_form == SSA) { |
531 | for (unsigned entrypointIndex = 0; entrypointIndex < m_argumentFormats.size(); ++entrypointIndex) |
532 | out.print(" Argument formats for entrypoint index: " , entrypointIndex, " : " , listDump(m_argumentFormats[entrypointIndex]), "\n" ); |
533 | } |
534 | else { |
535 | for (auto pair : m_rootToArguments) |
536 | out.print(" Arguments for block#" , pair.key->index, ": " , listDump(pair.value), "\n" ); |
537 | } |
538 | out.print("\n" ); |
539 | |
540 | Node* lastNode = nullptr; |
541 | for (size_t b = 0; b < m_blocks.size(); ++b) { |
542 | BasicBlock* block = m_blocks[b].get(); |
543 | if (!block) |
544 | continue; |
545 | dumpBlockHeader(out, "" , block, DumpAllPhis, context); |
546 | out.print(" States: " , block->cfaStructureClobberStateAtHead); |
547 | if (!block->cfaHasVisited) |
548 | out.print(", CurrentlyCFAUnreachable" ); |
549 | if (!block->intersectionOfCFAHasVisited) |
550 | out.print(", CFAUnreachable" ); |
551 | out.print("\n" ); |
552 | switch (m_form) { |
553 | case LoadStore: |
554 | case ThreadedCPS: { |
555 | out.print(" Vars Before: " ); |
556 | if (block->cfaHasVisited) |
557 | out.print(inContext(block->valuesAtHead, context)); |
558 | else |
559 | out.print("<empty>" ); |
560 | out.print("\n" ); |
561 | out.print(" Intersected Vars Before: " ); |
562 | if (block->intersectionOfCFAHasVisited) |
563 | out.print(inContext(block->intersectionOfPastValuesAtHead, context)); |
564 | else |
565 | out.print("<empty>" ); |
566 | out.print("\n" ); |
567 | out.print(" Var Links: " , block->variablesAtHead, "\n" ); |
568 | break; |
569 | } |
570 | |
571 | case SSA: { |
572 | RELEASE_ASSERT(block->ssa); |
573 | if (dumpOSRAvailabilityData) |
574 | out.print(" Availability: " , block->ssa->availabilityAtHead, "\n" ); |
575 | out.print(" Live: " , nodeListDump(block->ssa->liveAtHead), "\n" ); |
576 | out.print(" Values: " , nodeValuePairListDump(block->ssa->valuesAtHead, context), "\n" ); |
577 | break; |
578 | } } |
579 | for (size_t i = 0; i < block->size(); ++i) { |
580 | dumpCodeOrigin(out, "" , lastNode, block->at(i), context); |
581 | dump(out, "" , block->at(i), context); |
582 | } |
583 | out.print(" States: " , block->cfaBranchDirection, ", " , block->cfaStructureClobberStateAtTail); |
584 | if (!block->cfaDidFinish) |
585 | out.print(", CFAInvalidated" ); |
586 | out.print("\n" ); |
587 | switch (m_form) { |
588 | case LoadStore: |
589 | case ThreadedCPS: { |
590 | out.print(" Vars After: " ); |
591 | if (block->cfaHasVisited) |
592 | out.print(inContext(block->valuesAtTail, context)); |
593 | else |
594 | out.print("<empty>" ); |
595 | out.print("\n" ); |
596 | out.print(" Var Links: " , block->variablesAtTail, "\n" ); |
597 | break; |
598 | } |
599 | |
600 | case SSA: { |
601 | RELEASE_ASSERT(block->ssa); |
602 | if (dumpOSRAvailabilityData) |
603 | out.print(" Availability: " , block->ssa->availabilityAtTail, "\n" ); |
604 | out.print(" Live: " , nodeListDump(block->ssa->liveAtTail), "\n" ); |
605 | out.print(" Values: " , nodeValuePairListDump(block->ssa->valuesAtTail, context), "\n" ); |
606 | break; |
607 | } } |
608 | out.print("\n" ); |
609 | } |
610 | |
611 | out.print("GC Values:\n" ); |
612 | for (FrozenValue* value : m_frozenValues) { |
613 | if (value->pointsToHeap()) |
614 | out.print(" " , inContext(*value, &myContext), "\n" ); |
615 | } |
616 | |
617 | out.print(inContext(watchpoints(), &myContext)); |
618 | |
619 | if (!myContext.isEmpty()) { |
620 | myContext.dump(out); |
621 | out.print("\n" ); |
622 | } |
623 | } |
624 | |
625 | void Graph::deleteNode(Node* node) |
626 | { |
627 | if (validationEnabled() && m_form == SSA) { |
628 | for (BasicBlock* block : blocksInNaturalOrder()) { |
629 | DFG_ASSERT(*this, node, !block->ssa->liveAtHead.contains(node)); |
630 | DFG_ASSERT(*this, node, !block->ssa->liveAtTail.contains(node)); |
631 | } |
632 | } |
633 | |
634 | m_nodes.remove(node); |
635 | } |
636 | |
637 | void Graph::packNodeIndices() |
638 | { |
639 | m_nodes.packIndices(); |
640 | } |
641 | |
642 | void Graph::dethread() |
643 | { |
644 | if (m_form == LoadStore || m_form == SSA) |
645 | return; |
646 | |
647 | if (logCompilationChanges()) |
648 | dataLog("Dethreading DFG graph.\n" ); |
649 | |
650 | for (BlockIndex blockIndex = m_blocks.size(); blockIndex--;) { |
651 | BasicBlock* block = m_blocks[blockIndex].get(); |
652 | if (!block) |
653 | continue; |
654 | for (unsigned phiIndex = block->phis.size(); phiIndex--;) { |
655 | Node* phi = block->phis[phiIndex]; |
656 | phi->children.reset(); |
657 | } |
658 | } |
659 | |
660 | m_form = LoadStore; |
661 | } |
662 | |
663 | void Graph::handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock* block, BasicBlock* successor) |
664 | { |
665 | if (!successor->isReachable) { |
666 | successor->isReachable = true; |
667 | worklist.append(successor); |
668 | } |
669 | |
670 | if (!successor->predecessors.contains(block)) |
671 | successor->predecessors.append(block); |
672 | } |
673 | |
674 | void Graph::determineReachability() |
675 | { |
676 | Vector<BasicBlock*, 16> worklist; |
677 | for (BasicBlock* entrypoint : m_roots) { |
678 | entrypoint->isReachable = true; |
679 | worklist.append(entrypoint); |
680 | } |
681 | while (!worklist.isEmpty()) { |
682 | BasicBlock* block = worklist.takeLast(); |
683 | for (unsigned i = block->numSuccessors(); i--;) |
684 | handleSuccessor(worklist, block, block->successor(i)); |
685 | } |
686 | } |
687 | |
688 | void Graph::resetReachability() |
689 | { |
690 | for (BlockIndex blockIndex = m_blocks.size(); blockIndex--;) { |
691 | BasicBlock* block = m_blocks[blockIndex].get(); |
692 | if (!block) |
693 | continue; |
694 | block->isReachable = false; |
695 | block->predecessors.clear(); |
696 | } |
697 | |
698 | determineReachability(); |
699 | } |
700 | |
701 | namespace { |
702 | |
703 | class RefCountCalculator { |
704 | public: |
705 | RefCountCalculator(Graph& graph) |
706 | : m_graph(graph) |
707 | { |
708 | } |
709 | |
710 | void calculate() |
711 | { |
712 | // First reset the counts to 0 for all nodes. |
713 | for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { |
714 | BasicBlock* block = m_graph.block(blockIndex); |
715 | if (!block) |
716 | continue; |
717 | for (unsigned indexInBlock = block->size(); indexInBlock--;) |
718 | block->at(indexInBlock)->setRefCount(0); |
719 | for (unsigned phiIndex = block->phis.size(); phiIndex--;) |
720 | block->phis[phiIndex]->setRefCount(0); |
721 | } |
722 | |
723 | // Now find the roots: |
724 | // - Nodes that are must-generate. |
725 | // - Nodes that are reachable from type checks. |
726 | // Set their ref counts to 1 and put them on the worklist. |
727 | for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { |
728 | BasicBlock* block = m_graph.block(blockIndex); |
729 | if (!block) |
730 | continue; |
731 | for (unsigned indexInBlock = block->size(); indexInBlock--;) { |
732 | Node* node = block->at(indexInBlock); |
733 | DFG_NODE_DO_TO_CHILDREN(m_graph, node, findTypeCheckRoot); |
734 | if (!(node->flags() & NodeMustGenerate)) |
735 | continue; |
736 | if (!node->postfixRef()) |
737 | m_worklist.append(node); |
738 | } |
739 | } |
740 | |
741 | while (!m_worklist.isEmpty()) { |
742 | while (!m_worklist.isEmpty()) { |
743 | Node* node = m_worklist.last(); |
744 | m_worklist.removeLast(); |
745 | ASSERT(node->shouldGenerate()); // It should not be on the worklist unless it's ref'ed. |
746 | DFG_NODE_DO_TO_CHILDREN(m_graph, node, countEdge); |
747 | } |
748 | |
749 | if (m_graph.m_form == SSA) { |
750 | // Find Phi->Upsilon edges, which are represented as meta-data in the |
751 | // Upsilon. |
752 | for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { |
753 | BasicBlock* block = m_graph.block(blockIndex); |
754 | if (!block) |
755 | continue; |
756 | for (unsigned nodeIndex = block->size(); nodeIndex--;) { |
757 | Node* node = block->at(nodeIndex); |
758 | if (node->op() != Upsilon) |
759 | continue; |
760 | if (node->shouldGenerate()) |
761 | continue; |
762 | if (node->phi()->shouldGenerate()) |
763 | countNode(node); |
764 | } |
765 | } |
766 | } |
767 | } |
768 | } |
769 | |
770 | private: |
771 | void findTypeCheckRoot(Node*, Edge edge) |
772 | { |
773 | // We may have an "unproved" untyped use for code that is unreachable. The CFA |
774 | // will just not have gotten around to it. |
775 | if (edge.isProved() || edge.willNotHaveCheck()) |
776 | return; |
777 | if (!edge->postfixRef()) |
778 | m_worklist.append(edge.node()); |
779 | } |
780 | |
781 | void countNode(Node* node) |
782 | { |
783 | if (node->postfixRef()) |
784 | return; |
785 | m_worklist.append(node); |
786 | } |
787 | |
788 | void countEdge(Node*, Edge edge) |
789 | { |
790 | // Don't count edges that are already counted for their type checks. |
791 | if (!(edge.isProved() || edge.willNotHaveCheck())) |
792 | return; |
793 | countNode(edge.node()); |
794 | } |
795 | |
796 | Graph& m_graph; |
797 | Vector<Node*, 128> m_worklist; |
798 | }; |
799 | |
800 | } // anonymous namespace |
801 | |
802 | void Graph::computeRefCounts() |
803 | { |
804 | RefCountCalculator calculator(*this); |
805 | calculator.calculate(); |
806 | } |
807 | |
808 | void Graph::killBlockAndItsContents(BasicBlock* block) |
809 | { |
810 | if (auto& ssaData = block->ssa) |
811 | ssaData->invalidate(); |
812 | for (unsigned phiIndex = block->phis.size(); phiIndex--;) |
813 | deleteNode(block->phis[phiIndex]); |
814 | for (Node* node : *block) |
815 | deleteNode(node); |
816 | |
817 | killBlock(block); |
818 | } |
819 | |
820 | void Graph::killUnreachableBlocks() |
821 | { |
822 | invalidateNodeLiveness(); |
823 | |
824 | for (BlockIndex blockIndex = 0; blockIndex < numBlocks(); ++blockIndex) { |
825 | BasicBlock* block = this->block(blockIndex); |
826 | if (!block) |
827 | continue; |
828 | if (block->isReachable) |
829 | continue; |
830 | |
831 | dataLogIf(Options::verboseDFGBytecodeParsing(), "Basic block #" , blockIndex, " was killed because it was unreachable\n" ); |
832 | killBlockAndItsContents(block); |
833 | } |
834 | } |
835 | |
836 | void Graph::invalidateCFG() |
837 | { |
838 | m_cpsDominators = nullptr; |
839 | m_ssaDominators = nullptr; |
840 | m_cpsNaturalLoops = nullptr; |
841 | m_ssaNaturalLoops = nullptr; |
842 | m_controlEquivalenceAnalysis = nullptr; |
843 | m_backwardsDominators = nullptr; |
844 | m_backwardsCFG = nullptr; |
845 | m_cpsCFG = nullptr; |
846 | } |
847 | |
848 | void Graph::invalidateNodeLiveness() |
849 | { |
850 | if (m_form != SSA) |
851 | return; |
852 | |
853 | for (BasicBlock* block : blocksInNaturalOrder()) |
854 | block->ssa->invalidate(); |
855 | } |
856 | |
857 | void Graph::substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal) |
858 | { |
859 | for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) { |
860 | Node* node = block[indexInBlock]; |
861 | bool shouldContinue = true; |
862 | switch (node->op()) { |
863 | case SetLocal: { |
864 | if (node->local() == variableAccessData->local()) |
865 | shouldContinue = false; |
866 | break; |
867 | } |
868 | |
869 | case GetLocal: { |
870 | if (node->variableAccessData() != variableAccessData) |
871 | continue; |
872 | substitute(block, indexInBlock, node, newGetLocal); |
873 | Node* oldTailNode = block.variablesAtTail.operand(variableAccessData->local()); |
874 | if (oldTailNode == node) |
875 | block.variablesAtTail.operand(variableAccessData->local()) = newGetLocal; |
876 | shouldContinue = false; |
877 | break; |
878 | } |
879 | |
880 | default: |
881 | break; |
882 | } |
883 | if (!shouldContinue) |
884 | break; |
885 | } |
886 | } |
887 | |
888 | BlockList Graph::blocksInPreOrder() |
889 | { |
890 | BlockList result; |
891 | result.reserveInitialCapacity(m_blocks.size()); |
892 | BlockWorklist worklist; |
893 | for (BasicBlock* entrypoint : m_roots) |
894 | worklist.push(entrypoint); |
895 | while (BasicBlock* block = worklist.pop()) { |
896 | result.append(block); |
897 | for (unsigned i = block->numSuccessors(); i--;) |
898 | worklist.push(block->successor(i)); |
899 | } |
900 | |
901 | if (validationEnabled()) { |
902 | // When iterating over pre order, we should see dominators |
903 | // before things they dominate. |
904 | auto validateResults = [&] (auto& dominators) { |
905 | for (unsigned i = 0; i < result.size(); ++i) { |
906 | BasicBlock* a = result[i]; |
907 | if (!a) |
908 | continue; |
909 | for (unsigned j = 0; j < result.size(); ++j) { |
910 | BasicBlock* b = result[j]; |
911 | if (!b || a == b) |
912 | continue; |
913 | if (dominators.dominates(a, b)) |
914 | RELEASE_ASSERT(i < j); |
915 | } |
916 | } |
917 | }; |
918 | |
919 | if (m_form == SSA || m_isInSSAConversion) |
920 | validateResults(ensureSSADominators()); |
921 | else |
922 | validateResults(ensureCPSDominators()); |
923 | } |
924 | return result; |
925 | } |
926 | |
927 | BlockList Graph::blocksInPostOrder(bool isSafeToValidate) |
928 | { |
929 | BlockList result; |
930 | result.reserveInitialCapacity(m_blocks.size()); |
931 | PostOrderBlockWorklist worklist; |
932 | for (BasicBlock* entrypoint : m_roots) |
933 | worklist.push(entrypoint); |
934 | while (BlockWithOrder item = worklist.pop()) { |
935 | switch (item.order) { |
936 | case VisitOrder::Pre: |
937 | worklist.pushPost(item.node); |
938 | for (unsigned i = item.node->numSuccessors(); i--;) |
939 | worklist.push(item.node->successor(i)); |
940 | break; |
941 | case VisitOrder::Post: |
942 | result.append(item.node); |
943 | break; |
944 | } |
945 | } |
946 | |
947 | if (isSafeToValidate && validationEnabled()) { // There are users of this where we haven't yet built of the CFG enough to be able to run dominators. |
948 | auto validateResults = [&] (auto& dominators) { |
949 | // When iterating over reverse post order, we should see dominators |
950 | // before things they dominate. |
951 | for (unsigned i = 0; i < result.size(); ++i) { |
952 | BasicBlock* a = result[i]; |
953 | if (!a) |
954 | continue; |
955 | for (unsigned j = 0; j < result.size(); ++j) { |
956 | BasicBlock* b = result[j]; |
957 | if (!b || a == b) |
958 | continue; |
959 | if (dominators.dominates(a, b)) |
960 | RELEASE_ASSERT(i > j); |
961 | } |
962 | } |
963 | }; |
964 | |
965 | if (m_form == SSA || m_isInSSAConversion) |
966 | validateResults(ensureSSADominators()); |
967 | else |
968 | validateResults(ensureCPSDominators()); |
969 | } |
970 | |
971 | return result; |
972 | } |
973 | |
974 | void Graph::clearReplacements() |
975 | { |
976 | for (BlockIndex blockIndex = numBlocks(); blockIndex--;) { |
977 | BasicBlock* block = m_blocks[blockIndex].get(); |
978 | if (!block) |
979 | continue; |
980 | for (unsigned phiIndex = block->phis.size(); phiIndex--;) |
981 | block->phis[phiIndex]->setReplacement(nullptr); |
982 | for (unsigned nodeIndex = block->size(); nodeIndex--;) |
983 | block->at(nodeIndex)->setReplacement(nullptr); |
984 | } |
985 | } |
986 | |
987 | void Graph::clearEpochs() |
988 | { |
989 | for (BlockIndex blockIndex = numBlocks(); blockIndex--;) { |
990 | BasicBlock* block = m_blocks[blockIndex].get(); |
991 | if (!block) |
992 | continue; |
993 | for (unsigned phiIndex = block->phis.size(); phiIndex--;) |
994 | block->phis[phiIndex]->setEpoch(Epoch()); |
995 | for (unsigned nodeIndex = block->size(); nodeIndex--;) |
996 | block->at(nodeIndex)->setEpoch(Epoch()); |
997 | } |
998 | } |
999 | |
1000 | void Graph::initializeNodeOwners() |
1001 | { |
1002 | for (BlockIndex blockIndex = numBlocks(); blockIndex--;) { |
1003 | BasicBlock* block = m_blocks[blockIndex].get(); |
1004 | if (!block) |
1005 | continue; |
1006 | for (unsigned phiIndex = block->phis.size(); phiIndex--;) |
1007 | block->phis[phiIndex]->owner = block; |
1008 | for (unsigned nodeIndex = block->size(); nodeIndex--;) |
1009 | block->at(nodeIndex)->owner = block; |
1010 | } |
1011 | } |
1012 | |
1013 | void Graph::clearFlagsOnAllNodes(NodeFlags flags) |
1014 | { |
1015 | for (BlockIndex blockIndex = numBlocks(); blockIndex--;) { |
1016 | BasicBlock* block = m_blocks[blockIndex].get(); |
1017 | if (!block) |
1018 | continue; |
1019 | for (unsigned phiIndex = block->phis.size(); phiIndex--;) |
1020 | block->phis[phiIndex]->clearFlags(flags); |
1021 | for (unsigned nodeIndex = block->size(); nodeIndex--;) |
1022 | block->at(nodeIndex)->clearFlags(flags); |
1023 | } |
1024 | } |
1025 | |
1026 | bool Graph::watchCondition(const ObjectPropertyCondition& key) |
1027 | { |
1028 | if (!key.isWatchable()) |
1029 | return false; |
1030 | |
1031 | DesiredWeakReferences& weakReferences = m_plan.weakReferences(); |
1032 | weakReferences.addLazily(key.object()); |
1033 | if (key.hasPrototype()) |
1034 | weakReferences.addLazily(key.prototype()); |
1035 | if (key.hasRequiredValue()) |
1036 | weakReferences.addLazily(key.requiredValue()); |
1037 | |
1038 | m_plan.watchpoints().addLazily(key); |
1039 | |
1040 | if (key.kind() == PropertyCondition::Presence) |
1041 | m_safeToLoad.add(std::make_pair(key.object(), key.offset())); |
1042 | |
1043 | return true; |
1044 | } |
1045 | |
1046 | bool Graph::watchConditions(const ObjectPropertyConditionSet& keys) |
1047 | { |
1048 | if (!keys.isValid()) |
1049 | return false; |
1050 | |
1051 | for (const ObjectPropertyCondition& key : keys) { |
1052 | if (!watchCondition(key)) |
1053 | return false; |
1054 | } |
1055 | return true; |
1056 | } |
1057 | |
1058 | bool Graph::isSafeToLoad(JSObject* base, PropertyOffset offset) |
1059 | { |
1060 | return m_safeToLoad.contains(std::make_pair(base, offset)); |
1061 | } |
1062 | |
1063 | bool Graph::watchGlobalProperty(JSGlobalObject* globalObject, unsigned identifierNumber) |
1064 | { |
1065 | UniquedStringImpl* uid = identifiers()[identifierNumber]; |
1066 | // If we already have a WatchpointSet, and it is already invalidated, it means that this scope operation must be changed from GlobalProperty to GlobalLexicalVar, |
1067 | // but we still have stale metadata here since we have not yet executed this bytecode operation since the invalidation. Just emitting ForceOSRExit to update the |
1068 | // metadata when it reaches to this code. |
1069 | if (auto* watchpoint = globalObject->getReferencedPropertyWatchpointSet(uid)) { |
1070 | if (!watchpoint->isStillValid()) |
1071 | return false; |
1072 | } |
1073 | globalProperties().addLazily(DesiredGlobalProperty(globalObject, identifierNumber)); |
1074 | return true; |
1075 | } |
1076 | |
1077 | FullBytecodeLiveness& Graph::livenessFor(CodeBlock* codeBlock) |
1078 | { |
1079 | HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>>::iterator iter = m_bytecodeLiveness.find(codeBlock); |
1080 | if (iter != m_bytecodeLiveness.end()) |
1081 | return *iter->value; |
1082 | |
1083 | std::unique_ptr<FullBytecodeLiveness> liveness = std::make_unique<FullBytecodeLiveness>(); |
1084 | codeBlock->livenessAnalysis().computeFullLiveness(codeBlock, *liveness); |
1085 | FullBytecodeLiveness& result = *liveness; |
1086 | m_bytecodeLiveness.add(codeBlock, WTFMove(liveness)); |
1087 | return result; |
1088 | } |
1089 | |
1090 | FullBytecodeLiveness& Graph::livenessFor(InlineCallFrame* inlineCallFrame) |
1091 | { |
1092 | return livenessFor(baselineCodeBlockFor(inlineCallFrame)); |
1093 | } |
1094 | |
1095 | BytecodeKills& Graph::killsFor(CodeBlock* codeBlock) |
1096 | { |
1097 | HashMap<CodeBlock*, std::unique_ptr<BytecodeKills>>::iterator iter = m_bytecodeKills.find(codeBlock); |
1098 | if (iter != m_bytecodeKills.end()) |
1099 | return *iter->value; |
1100 | |
1101 | std::unique_ptr<BytecodeKills> kills = std::make_unique<BytecodeKills>(); |
1102 | codeBlock->livenessAnalysis().computeKills(codeBlock, *kills); |
1103 | BytecodeKills& result = *kills; |
1104 | m_bytecodeKills.add(codeBlock, WTFMove(kills)); |
1105 | return result; |
1106 | } |
1107 | |
1108 | BytecodeKills& Graph::killsFor(InlineCallFrame* inlineCallFrame) |
1109 | { |
1110 | return killsFor(baselineCodeBlockFor(inlineCallFrame)); |
1111 | } |
1112 | |
1113 | bool Graph::isLiveInBytecode(VirtualRegister operand, CodeOrigin codeOrigin) |
1114 | { |
1115 | static const bool verbose = false; |
1116 | |
1117 | if (verbose) |
1118 | dataLog("Checking of operand is live: " , operand, "\n" ); |
1119 | CodeOrigin* codeOriginPtr = &codeOrigin; |
1120 | for (;;) { |
1121 | VirtualRegister reg = VirtualRegister( |
1122 | operand.offset() - codeOriginPtr->stackOffset()); |
1123 | |
1124 | if (verbose) |
1125 | dataLog("reg = " , reg, "\n" ); |
1126 | |
1127 | auto* inlineCallFrame = codeOriginPtr->inlineCallFrame(); |
1128 | if (operand.offset() < codeOriginPtr->stackOffset() + CallFrame::headerSizeInRegisters) { |
1129 | if (reg.isArgument()) { |
1130 | RELEASE_ASSERT(reg.offset() < CallFrame::headerSizeInRegisters); |
1131 | |
1132 | |
1133 | if (inlineCallFrame->isClosureCall |
1134 | && reg.offset() == CallFrameSlot::callee) { |
1135 | if (verbose) |
1136 | dataLog("Looks like a callee.\n" ); |
1137 | return true; |
1138 | } |
1139 | |
1140 | if (inlineCallFrame->isVarargs() |
1141 | && reg.offset() == CallFrameSlot::argumentCount) { |
1142 | if (verbose) |
1143 | dataLog("Looks like the argument count.\n" ); |
1144 | return true; |
1145 | } |
1146 | |
1147 | return false; |
1148 | } |
1149 | |
1150 | if (verbose) |
1151 | dataLog("Asking the bytecode liveness.\n" ); |
1152 | return livenessFor(inlineCallFrame).operandIsLive(reg.offset(), codeOriginPtr->bytecodeIndex()); |
1153 | } |
1154 | |
1155 | if (!inlineCallFrame) { |
1156 | if (verbose) |
1157 | dataLog("Ran out of stack, returning true.\n" ); |
1158 | return true; |
1159 | } |
1160 | |
1161 | // Arguments are always live. This would be redundant if it wasn't for our |
1162 | // op_call_varargs inlining. |
1163 | if (reg.isArgument() |
1164 | && static_cast<size_t>(reg.toArgument()) < inlineCallFrame->argumentsWithFixup.size()) { |
1165 | if (verbose) |
1166 | dataLog("Argument is live.\n" ); |
1167 | return true; |
1168 | } |
1169 | |
1170 | // We need to handle tail callers because we may decide to exit to the |
1171 | // the return bytecode following the tail call. |
1172 | codeOriginPtr = &inlineCallFrame->directCaller; |
1173 | } |
1174 | |
1175 | RELEASE_ASSERT_NOT_REACHED(); |
1176 | } |
1177 | |
1178 | BitVector Graph::localsLiveInBytecode(CodeOrigin codeOrigin) |
1179 | { |
1180 | BitVector result; |
1181 | result.ensureSize(block(0)->variablesAtHead.numberOfLocals()); |
1182 | forAllLocalsLiveInBytecode( |
1183 | codeOrigin, |
1184 | [&] (VirtualRegister reg) { |
1185 | ASSERT(reg.isLocal()); |
1186 | result.quickSet(reg.toLocal()); |
1187 | }); |
1188 | return result; |
1189 | } |
1190 | |
1191 | unsigned Graph::parameterSlotsForArgCount(unsigned argCount) |
1192 | { |
1193 | size_t frameSize = CallFrame::headerSizeInRegisters + argCount; |
1194 | size_t alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), frameSize); |
1195 | return alignedFrameSize - CallerFrameAndPC::sizeInRegisters; |
1196 | } |
1197 | |
1198 | unsigned Graph::frameRegisterCount() |
1199 | { |
1200 | unsigned result = m_nextMachineLocal + std::max(m_parameterSlots, static_cast<unsigned>(maxFrameExtentForSlowPathCallInRegisters)); |
1201 | return roundLocalRegisterCountForFramePointerOffset(result); |
1202 | } |
1203 | |
1204 | unsigned Graph::stackPointerOffset() |
1205 | { |
1206 | return virtualRegisterForLocal(frameRegisterCount() - 1).offset(); |
1207 | } |
1208 | |
1209 | unsigned Graph::requiredRegisterCountForExit() |
1210 | { |
1211 | unsigned count = JIT::frameRegisterCountFor(m_profiledBlock); |
1212 | for (InlineCallFrameSet::iterator iter = m_plan.inlineCallFrames()->begin(); !!iter; ++iter) { |
1213 | InlineCallFrame* inlineCallFrame = *iter; |
1214 | CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); |
1215 | unsigned requiredCount = VirtualRegister(inlineCallFrame->stackOffset).toLocal() + 1 + JIT::frameRegisterCountFor(codeBlock); |
1216 | count = std::max(count, requiredCount); |
1217 | } |
1218 | return count; |
1219 | } |
1220 | |
1221 | unsigned Graph::requiredRegisterCountForExecutionAndExit() |
1222 | { |
1223 | // FIXME: We should make sure that frameRegisterCount() and requiredRegisterCountForExit() |
1224 | // never overflows. https://bugs.webkit.org/show_bug.cgi?id=173852 |
1225 | return std::max(frameRegisterCount(), requiredRegisterCountForExit()); |
1226 | } |
1227 | |
1228 | JSValue Graph::tryGetConstantProperty( |
1229 | JSValue base, const RegisteredStructureSet& structureSet, PropertyOffset offset) |
1230 | { |
1231 | if (!base || !base.isObject()) |
1232 | return JSValue(); |
1233 | |
1234 | JSObject* object = asObject(base); |
1235 | |
1236 | for (unsigned i = structureSet.size(); i--;) { |
1237 | RegisteredStructure structure = structureSet[i]; |
1238 | |
1239 | WatchpointSet* set = structure->propertyReplacementWatchpointSet(offset); |
1240 | if (!set || !set->isStillValid()) |
1241 | return JSValue(); |
1242 | |
1243 | ASSERT(structure->isValidOffset(offset)); |
1244 | ASSERT(!structure->isUncacheableDictionary()); |
1245 | |
1246 | watchpoints().addLazily(set); |
1247 | } |
1248 | |
1249 | // What follows may require some extra thought. We need this load to load a valid JSValue. If |
1250 | // our profiling makes sense and we're still on track to generate code that won't be |
1251 | // invalidated, then we have nothing to worry about. We do, however, have to worry about |
1252 | // loading - and then using - an invalid JSValue in the case that unbeknownst to us our code |
1253 | // is doomed. |
1254 | // |
1255 | // One argument in favor of this code is that it should definitely work because the butterfly |
1256 | // is always set before the structure. However, we don't currently have a fence between those |
1257 | // stores. It's not clear if this matters, however. We only shrink the propertyStorage while |
1258 | // holding the Structure's lock. So, for this to fail, you'd need an access on a constant |
1259 | // object pointer such that the inline caches told us that the object had a structure that it |
1260 | // did not *yet* have, and then later,the object transitioned to that structure that the inline |
1261 | // caches had already seen. And then the processor reordered the stores. Seems unlikely and |
1262 | // difficult to test. I believe that this is worth revisiting but it isn't worth losing sleep |
1263 | // over. Filed: |
1264 | // https://bugs.webkit.org/show_bug.cgi?id=134641 |
1265 | // |
1266 | // For now, we just do the minimal thing: defend against the structure right now being |
1267 | // incompatible with the getDirect we're trying to do. The easiest way to do that is to |
1268 | // determine if the structure belongs to the proven set. |
1269 | |
1270 | Structure* structure = object->structure(m_vm); |
1271 | if (!structureSet.toStructureSet().contains(structure)) |
1272 | return JSValue(); |
1273 | |
1274 | return object->getDirectConcurrently(structure, offset); |
1275 | } |
1276 | |
1277 | JSValue Graph::tryGetConstantProperty(JSValue base, Structure* structure, PropertyOffset offset) |
1278 | { |
1279 | return tryGetConstantProperty(base, RegisteredStructureSet(registerStructure(structure)), offset); |
1280 | } |
1281 | |
1282 | JSValue Graph::tryGetConstantProperty( |
1283 | JSValue base, const StructureAbstractValue& structure, PropertyOffset offset) |
1284 | { |
1285 | if (structure.isInfinite()) { |
1286 | // FIXME: If we just converted the offset to a uid, we could do ObjectPropertyCondition |
1287 | // watching to constant-fold the property. |
1288 | // https://bugs.webkit.org/show_bug.cgi?id=147271 |
1289 | return JSValue(); |
1290 | } |
1291 | |
1292 | return tryGetConstantProperty(base, structure.set(), offset); |
1293 | } |
1294 | |
1295 | JSValue Graph::tryGetConstantProperty(const AbstractValue& base, PropertyOffset offset) |
1296 | { |
1297 | return tryGetConstantProperty(base.m_value, base.m_structure, offset); |
1298 | } |
1299 | |
1300 | AbstractValue Graph::inferredValueForProperty( |
1301 | const AbstractValue& base, PropertyOffset offset, |
1302 | StructureClobberState clobberState) |
1303 | { |
1304 | if (JSValue value = tryGetConstantProperty(base, offset)) { |
1305 | AbstractValue result; |
1306 | result.set(*this, *freeze(value), clobberState); |
1307 | return result; |
1308 | } |
1309 | |
1310 | return AbstractValue::heapTop(); |
1311 | } |
1312 | |
1313 | JSValue Graph::tryGetConstantClosureVar(JSValue base, ScopeOffset offset) |
1314 | { |
1315 | // This has an awesome concurrency story. See comment for GetGlobalVar in ByteCodeParser. |
1316 | |
1317 | if (!base) |
1318 | return JSValue(); |
1319 | |
1320 | JSLexicalEnvironment* activation = jsDynamicCast<JSLexicalEnvironment*>(m_vm, base); |
1321 | if (!activation) |
1322 | return JSValue(); |
1323 | |
1324 | SymbolTable* symbolTable = activation->symbolTable(); |
1325 | JSValue value; |
1326 | WatchpointSet* set; |
1327 | { |
1328 | ConcurrentJSLocker locker(symbolTable->m_lock); |
1329 | |
1330 | SymbolTableEntry* entry = symbolTable->entryFor(locker, offset); |
1331 | if (!entry) |
1332 | return JSValue(); |
1333 | |
1334 | set = entry->watchpointSet(); |
1335 | if (!set) |
1336 | return JSValue(); |
1337 | |
1338 | if (set->state() != IsWatched) |
1339 | return JSValue(); |
1340 | |
1341 | ASSERT(entry->scopeOffset() == offset); |
1342 | value = activation->variableAt(offset).get(); |
1343 | if (!value) |
1344 | return JSValue(); |
1345 | } |
1346 | |
1347 | watchpoints().addLazily(set); |
1348 | |
1349 | return value; |
1350 | } |
1351 | |
1352 | JSValue Graph::tryGetConstantClosureVar(const AbstractValue& value, ScopeOffset offset) |
1353 | { |
1354 | return tryGetConstantClosureVar(value.m_value, offset); |
1355 | } |
1356 | |
1357 | JSValue Graph::tryGetConstantClosureVar(Node* node, ScopeOffset offset) |
1358 | { |
1359 | if (!node->hasConstant()) |
1360 | return JSValue(); |
1361 | return tryGetConstantClosureVar(node->asJSValue(), offset); |
1362 | } |
1363 | |
1364 | JSArrayBufferView* Graph::tryGetFoldableView(JSValue value) |
1365 | { |
1366 | if (!value) |
1367 | return nullptr; |
1368 | JSArrayBufferView* view = jsDynamicCast<JSArrayBufferView*>(m_vm, value); |
1369 | if (!view) |
1370 | return nullptr; |
1371 | if (!view->length()) |
1372 | return nullptr; |
1373 | WTF::loadLoadFence(); |
1374 | watchpoints().addLazily(view); |
1375 | return view; |
1376 | } |
1377 | |
1378 | JSArrayBufferView* Graph::tryGetFoldableView(JSValue value, ArrayMode arrayMode) |
1379 | { |
1380 | if (arrayMode.type() != Array::AnyTypedArray && arrayMode.typedArrayType() == NotTypedArray) |
1381 | return nullptr; |
1382 | return tryGetFoldableView(value); |
1383 | } |
1384 | |
1385 | void Graph::registerFrozenValues() |
1386 | { |
1387 | m_codeBlock->constants().shrink(0); |
1388 | m_codeBlock->constantsSourceCodeRepresentation().resize(0); |
1389 | for (FrozenValue* value : m_frozenValues) { |
1390 | if (!value->pointsToHeap()) |
1391 | continue; |
1392 | |
1393 | ASSERT(value->structure()); |
1394 | ASSERT(m_plan.weakReferences().contains(value->structure())); |
1395 | |
1396 | switch (value->strength()) { |
1397 | case WeakValue: { |
1398 | m_plan.weakReferences().addLazily(value->value().asCell()); |
1399 | break; |
1400 | } |
1401 | case StrongValue: { |
1402 | unsigned constantIndex = m_codeBlock->addConstantLazily(); |
1403 | // We already have a barrier on the code block. |
1404 | m_codeBlock->constants()[constantIndex].setWithoutWriteBarrier(value->value()); |
1405 | break; |
1406 | } } |
1407 | } |
1408 | m_codeBlock->constants().shrinkToFit(); |
1409 | m_codeBlock->constantsSourceCodeRepresentation().shrinkToFit(); |
1410 | } |
1411 | |
1412 | void Graph::visitChildren(SlotVisitor& visitor) |
1413 | { |
1414 | for (FrozenValue* value : m_frozenValues) { |
1415 | visitor.appendUnbarriered(value->value()); |
1416 | visitor.appendUnbarriered(value->structure()); |
1417 | } |
1418 | } |
1419 | |
1420 | FrozenValue* Graph::freeze(JSValue value) |
1421 | { |
1422 | if (UNLIKELY(!value)) |
1423 | return FrozenValue::emptySingleton(); |
1424 | |
1425 | // There are weird relationships in how optimized CodeBlocks |
1426 | // point to other CodeBlocks. We don't want to have them be |
1427 | // part of the weak pointer set. For example, an optimized CodeBlock |
1428 | // having a weak pointer to itself will cause it to get collected. |
1429 | RELEASE_ASSERT(!jsDynamicCast<CodeBlock*>(m_vm, value)); |
1430 | |
1431 | auto result = m_frozenValueMap.add(JSValue::encode(value), nullptr); |
1432 | if (LIKELY(!result.isNewEntry)) |
1433 | return result.iterator->value; |
1434 | |
1435 | if (value.isUInt32()) |
1436 | m_uint32ValuesInUse.append(value.asUInt32()); |
1437 | |
1438 | FrozenValue frozenValue = FrozenValue::freeze(value); |
1439 | if (Structure* structure = frozenValue.structure()) |
1440 | registerStructure(structure); |
1441 | |
1442 | return result.iterator->value = m_frozenValues.add(frozenValue); |
1443 | } |
1444 | |
1445 | FrozenValue* Graph::freezeStrong(JSValue value) |
1446 | { |
1447 | FrozenValue* result = freeze(value); |
1448 | result->strengthenTo(StrongValue); |
1449 | return result; |
1450 | } |
1451 | |
1452 | void Graph::convertToConstant(Node* node, FrozenValue* value) |
1453 | { |
1454 | if (value->structure()) |
1455 | assertIsRegistered(value->structure()); |
1456 | node->convertToConstant(value); |
1457 | } |
1458 | |
1459 | void Graph::convertToConstant(Node* node, JSValue value) |
1460 | { |
1461 | convertToConstant(node, freeze(value)); |
1462 | } |
1463 | |
1464 | void Graph::convertToStrongConstant(Node* node, JSValue value) |
1465 | { |
1466 | convertToConstant(node, freezeStrong(value)); |
1467 | } |
1468 | |
1469 | RegisteredStructure Graph::registerStructure(Structure* structure, StructureRegistrationResult& result) |
1470 | { |
1471 | m_plan.weakReferences().addLazily(structure); |
1472 | if (m_plan.watchpoints().consider(structure)) |
1473 | result = StructureRegisteredAndWatched; |
1474 | else |
1475 | result = StructureRegisteredNormally; |
1476 | return RegisteredStructure::createPrivate(structure); |
1477 | } |
1478 | |
1479 | void Graph::registerAndWatchStructureTransition(Structure* structure) |
1480 | { |
1481 | m_plan.weakReferences().addLazily(structure); |
1482 | m_plan.watchpoints().addLazily(structure->transitionWatchpointSet()); |
1483 | } |
1484 | |
1485 | void Graph::assertIsRegistered(Structure* structure) |
1486 | { |
1487 | // It's convenient to be able to call this with a maybe-null structure. |
1488 | if (!structure) |
1489 | return; |
1490 | |
1491 | DFG_ASSERT(*this, nullptr, m_plan.weakReferences().contains(structure)); |
1492 | |
1493 | if (!structure->dfgShouldWatch()) |
1494 | return; |
1495 | if (watchpoints().isWatched(structure->transitionWatchpointSet())) |
1496 | return; |
1497 | |
1498 | DFG_CRASH(*this, nullptr, toCString("Structure " , pointerDump(structure), " is watchable but isn't being watched." ).data()); |
1499 | } |
1500 | |
1501 | static void logDFGAssertionFailure( |
1502 | Graph& graph, const CString& whileText, const char* file, int line, const char* function, |
1503 | const char* assertion) |
1504 | { |
1505 | startCrashing(); |
1506 | dataLog("DFG ASSERTION FAILED: " , assertion, "\n" ); |
1507 | dataLog(file, "(" , line, ") : " , function, "\n" ); |
1508 | dataLog("\n" ); |
1509 | dataLog(whileText); |
1510 | dataLog("Graph at time of failure:\n" ); |
1511 | graph.dump(); |
1512 | dataLog("\n" ); |
1513 | dataLog("DFG ASSERTION FAILED: " , assertion, "\n" ); |
1514 | dataLog(file, "(" , line, ") : " , function, "\n" ); |
1515 | } |
1516 | |
1517 | void Graph::logAssertionFailure( |
1518 | std::nullptr_t, const char* file, int line, const char* function, const char* assertion) |
1519 | { |
1520 | logDFGAssertionFailure(*this, "" , file, line, function, assertion); |
1521 | } |
1522 | |
1523 | void Graph::logAssertionFailure( |
1524 | Node* node, const char* file, int line, const char* function, const char* assertion) |
1525 | { |
1526 | logDFGAssertionFailure(*this, toCString("While handling node " , node, "\n\n" ), file, line, function, assertion); |
1527 | } |
1528 | |
1529 | void Graph::logAssertionFailure( |
1530 | BasicBlock* block, const char* file, int line, const char* function, const char* assertion) |
1531 | { |
1532 | logDFGAssertionFailure(*this, toCString("While handling block " , pointerDump(block), "\n\n" ), file, line, function, assertion); |
1533 | } |
1534 | |
1535 | CPSCFG& Graph::ensureCPSCFG() |
1536 | { |
1537 | RELEASE_ASSERT(m_form != SSA && !m_isInSSAConversion); |
1538 | if (!m_cpsCFG) |
1539 | m_cpsCFG = std::make_unique<CPSCFG>(*this); |
1540 | return *m_cpsCFG; |
1541 | } |
1542 | |
1543 | CPSDominators& Graph::ensureCPSDominators() |
1544 | { |
1545 | RELEASE_ASSERT(m_form != SSA && !m_isInSSAConversion); |
1546 | if (!m_cpsDominators) |
1547 | m_cpsDominators = std::make_unique<CPSDominators>(*this); |
1548 | return *m_cpsDominators; |
1549 | } |
1550 | |
1551 | SSADominators& Graph::ensureSSADominators() |
1552 | { |
1553 | RELEASE_ASSERT(m_form == SSA || m_isInSSAConversion); |
1554 | if (!m_ssaDominators) |
1555 | m_ssaDominators = std::make_unique<SSADominators>(*this); |
1556 | return *m_ssaDominators; |
1557 | } |
1558 | |
1559 | CPSNaturalLoops& Graph::ensureCPSNaturalLoops() |
1560 | { |
1561 | RELEASE_ASSERT(m_form != SSA && !m_isInSSAConversion); |
1562 | ensureCPSDominators(); |
1563 | if (!m_cpsNaturalLoops) |
1564 | m_cpsNaturalLoops = std::make_unique<CPSNaturalLoops>(*this); |
1565 | return *m_cpsNaturalLoops; |
1566 | } |
1567 | |
1568 | SSANaturalLoops& Graph::ensureSSANaturalLoops() |
1569 | { |
1570 | RELEASE_ASSERT(m_form == SSA); |
1571 | ensureSSADominators(); |
1572 | if (!m_ssaNaturalLoops) |
1573 | m_ssaNaturalLoops = std::make_unique<SSANaturalLoops>(*this); |
1574 | return *m_ssaNaturalLoops; |
1575 | } |
1576 | |
1577 | BackwardsCFG& Graph::ensureBackwardsCFG() |
1578 | { |
1579 | // We could easily relax this in the future to work over CPS, but today, it's only used in SSA. |
1580 | RELEASE_ASSERT(m_form == SSA); |
1581 | if (!m_backwardsCFG) |
1582 | m_backwardsCFG = std::make_unique<BackwardsCFG>(*this); |
1583 | return *m_backwardsCFG; |
1584 | } |
1585 | |
1586 | BackwardsDominators& Graph::ensureBackwardsDominators() |
1587 | { |
1588 | RELEASE_ASSERT(m_form == SSA); |
1589 | if (!m_backwardsDominators) |
1590 | m_backwardsDominators = std::make_unique<BackwardsDominators>(*this); |
1591 | return *m_backwardsDominators; |
1592 | } |
1593 | |
1594 | ControlEquivalenceAnalysis& Graph::ensureControlEquivalenceAnalysis() |
1595 | { |
1596 | RELEASE_ASSERT(m_form == SSA); |
1597 | if (!m_controlEquivalenceAnalysis) |
1598 | m_controlEquivalenceAnalysis = std::make_unique<ControlEquivalenceAnalysis>(*this); |
1599 | return *m_controlEquivalenceAnalysis; |
1600 | } |
1601 | |
1602 | MethodOfGettingAValueProfile Graph::methodOfGettingAValueProfileFor(Node* currentNode, Node* operandNode) |
1603 | { |
1604 | // This represents IR like `CurrentNode(@operandNode)`. For example: `GetByVal(..., Int32:@GetLocal)`. |
1605 | |
1606 | for (Node* node = operandNode; node;) { |
1607 | // currentNode is null when we're doing speculation checks for checkArgumentTypes(). |
1608 | if (!currentNode || node->origin.semantic != currentNode->origin.semantic || !currentNode->hasResult()) { |
1609 | CodeBlock* profiledBlock = baselineCodeBlockFor(node->origin.semantic); |
1610 | |
1611 | if (node->accessesStack(*this)) { |
1612 | if (m_form != SSA && node->local().isArgument()) { |
1613 | int argument = node->local().toArgument(); |
1614 | Node* argumentNode = m_rootToArguments.find(block(0))->value[argument]; |
1615 | // FIXME: We should match SetArgumentDefinitely nodes at other entrypoints as well: |
1616 | // https://bugs.webkit.org/show_bug.cgi?id=175841 |
1617 | if (argumentNode && node->variableAccessData() == argumentNode->variableAccessData()) |
1618 | return &profiledBlock->valueProfileForArgument(argument); |
1619 | } |
1620 | |
1621 | if (node->op() == GetLocal) { |
1622 | return MethodOfGettingAValueProfile::fromLazyOperand( |
1623 | profiledBlock, |
1624 | LazyOperandValueProfileKey( |
1625 | node->origin.semantic.bytecodeIndex(), node->local())); |
1626 | } |
1627 | } |
1628 | |
1629 | if (node->hasHeapPrediction()) |
1630 | return &profiledBlock->valueProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex()); |
1631 | |
1632 | if (profiledBlock->hasBaselineJITProfiling()) { |
1633 | if (ArithProfile* result = profiledBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex())) |
1634 | return result; |
1635 | } |
1636 | } |
1637 | |
1638 | switch (node->op()) { |
1639 | case BooleanToNumber: |
1640 | case Identity: |
1641 | case ValueRep: |
1642 | case DoubleRep: |
1643 | case Int52Rep: |
1644 | node = node->child1().node(); |
1645 | break; |
1646 | default: |
1647 | node = nullptr; |
1648 | } |
1649 | } |
1650 | |
1651 | return MethodOfGettingAValueProfile(); |
1652 | } |
1653 | |
1654 | bool Graph::getRegExpPrototypeProperty(JSObject* regExpPrototype, Structure* regExpPrototypeStructure, UniquedStringImpl* uid, JSValue& returnJSValue) |
1655 | { |
1656 | unsigned attributesUnused; |
1657 | PropertyOffset offset = regExpPrototypeStructure->getConcurrently(uid, attributesUnused); |
1658 | if (!isValidOffset(offset)) |
1659 | return false; |
1660 | |
1661 | JSValue value = tryGetConstantProperty(regExpPrototype, regExpPrototypeStructure, offset); |
1662 | if (!value) |
1663 | return false; |
1664 | |
1665 | // We only care about functions and getters at this point. If you want to access other properties |
1666 | // you'll have to add code for those types. |
1667 | JSFunction* function = jsDynamicCast<JSFunction*>(m_vm, value); |
1668 | if (!function) { |
1669 | GetterSetter* getterSetter = jsDynamicCast<GetterSetter*>(m_vm, value); |
1670 | |
1671 | if (!getterSetter) |
1672 | return false; |
1673 | |
1674 | returnJSValue = JSValue(getterSetter); |
1675 | return true; |
1676 | } |
1677 | |
1678 | returnJSValue = value; |
1679 | return true; |
1680 | } |
1681 | |
1682 | bool Graph::isStringPrototypeMethodSane(JSGlobalObject* globalObject, UniquedStringImpl* uid) |
1683 | { |
1684 | ObjectPropertyConditionSet conditions = generateConditionsForPrototypeEquivalenceConcurrently(m_vm, globalObject, globalObject->stringObjectStructure(), globalObject->stringPrototype(), uid); |
1685 | |
1686 | if (!conditions.isValid()) |
1687 | return false; |
1688 | |
1689 | ObjectPropertyCondition equivalenceCondition = conditions.slotBaseCondition(); |
1690 | RELEASE_ASSERT(equivalenceCondition.hasRequiredValue()); |
1691 | JSFunction* function = jsDynamicCast<JSFunction*>(m_vm, equivalenceCondition.condition().requiredValue()); |
1692 | if (!function) |
1693 | return false; |
1694 | |
1695 | if (function->executable()->intrinsicFor(CodeForCall) != StringPrototypeValueOfIntrinsic) |
1696 | return false; |
1697 | |
1698 | return watchConditions(conditions); |
1699 | } |
1700 | |
1701 | |
1702 | bool Graph::canOptimizeStringObjectAccess(const CodeOrigin& codeOrigin) |
1703 | { |
1704 | if (hasExitSite(codeOrigin, BadCache) || hasExitSite(codeOrigin, BadConstantCache)) |
1705 | return false; |
1706 | |
1707 | JSGlobalObject* globalObject = globalObjectFor(codeOrigin); |
1708 | Structure* stringObjectStructure = globalObjectFor(codeOrigin)->stringObjectStructure(); |
1709 | registerStructure(stringObjectStructure); |
1710 | ASSERT(stringObjectStructure->storedPrototype().isObject()); |
1711 | ASSERT(stringObjectStructure->storedPrototype().asCell()->classInfo(*stringObjectStructure->storedPrototype().asCell()->vm()) == StringPrototype::info()); |
1712 | |
1713 | if (!watchConditions(generateConditionsForPropertyMissConcurrently(m_vm, globalObject, stringObjectStructure, m_vm.propertyNames->toPrimitiveSymbol.impl()))) |
1714 | return false; |
1715 | |
1716 | // We're being conservative here. We want DFG's ToString on StringObject to be |
1717 | // used in both numeric contexts (that would call valueOf()) and string contexts |
1718 | // (that would call toString()). We don't want the DFG to have to distinguish |
1719 | // between the two, just because that seems like it would get confusing. So we |
1720 | // just require both methods to be sane. |
1721 | if (!isStringPrototypeMethodSane(globalObject, m_vm.propertyNames->valueOf.impl())) |
1722 | return false; |
1723 | return isStringPrototypeMethodSane(globalObject, m_vm.propertyNames->toString.impl()); |
1724 | } |
1725 | |
1726 | bool Graph::willCatchExceptionInMachineFrame(CodeOrigin codeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut) |
1727 | { |
1728 | if (!m_hasExceptionHandlers) |
1729 | return false; |
1730 | |
1731 | unsigned bytecodeIndexToCheck = codeOrigin.bytecodeIndex(); |
1732 | while (1) { |
1733 | InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame(); |
1734 | CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame); |
1735 | if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) { |
1736 | opCatchOriginOut = CodeOrigin(handler->target, inlineCallFrame); |
1737 | catchHandlerOut = handler; |
1738 | return true; |
1739 | } |
1740 | |
1741 | if (!inlineCallFrame) |
1742 | return false; |
1743 | |
1744 | bytecodeIndexToCheck = inlineCallFrame->directCaller.bytecodeIndex(); |
1745 | codeOrigin = inlineCallFrame->directCaller; |
1746 | } |
1747 | |
1748 | RELEASE_ASSERT_NOT_REACHED(); |
1749 | } |
1750 | |
1751 | bool Graph::canDoFastSpread(Node* node, const AbstractValue& value) |
1752 | { |
1753 | // The parameter 'value' is the AbstractValue for child1 (the thing being spread). |
1754 | ASSERT(node->op() == Spread); |
1755 | |
1756 | if (node->child1().useKind() != ArrayUse) { |
1757 | // Note: we only speculate on ArrayUse when we've set up the necessary watchpoints |
1758 | // to prove that the iteration protocol is non-observable starting from ArrayPrototype. |
1759 | return false; |
1760 | } |
1761 | |
1762 | // FIXME: We should add profiling of the incoming operand to Spread |
1763 | // so we can speculate in such a way that we guarantee that this |
1764 | // function would return true: |
1765 | // https://bugs.webkit.org/show_bug.cgi?id=171198 |
1766 | |
1767 | if (!value.m_structure.isFinite()) |
1768 | return false; |
1769 | |
1770 | ArrayPrototype* arrayPrototype = globalObjectFor(node->child1()->origin.semantic)->arrayPrototype(); |
1771 | bool allGood = true; |
1772 | value.m_structure.forEach([&] (RegisteredStructure structure) { |
1773 | allGood &= structure->hasMonoProto() |
1774 | && structure->storedPrototype() == arrayPrototype |
1775 | && !structure->isDictionary() |
1776 | && structure->getConcurrently(m_vm.propertyNames->iteratorSymbol.impl()) == invalidOffset |
1777 | && !structure->mayInterceptIndexedAccesses(); |
1778 | }); |
1779 | |
1780 | return allGood; |
1781 | } |
1782 | |
1783 | void Graph::clearCPSCFGData() |
1784 | { |
1785 | m_cpsNaturalLoops = nullptr; |
1786 | m_cpsDominators = nullptr; |
1787 | m_cpsCFG = nullptr; |
1788 | } |
1789 | |
1790 | } } // namespace JSC::DFG |
1791 | |
1792 | #endif // ENABLE(DFG_JIT) |
1793 | |