1 | /* |
2 | * Copyright (C) 2015-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
23 | * THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "DFGStoreBarrierInsertionPhase.h" |
28 | |
29 | #if ENABLE(DFG_JIT) |
30 | |
31 | #include "DFGAbstractInterpreterInlines.h" |
32 | #include "DFGBlockMapInlines.h" |
33 | #include "DFGDoesGC.h" |
34 | #include "DFGGraph.h" |
35 | #include "DFGInPlaceAbstractState.h" |
36 | #include "DFGInsertionSet.h" |
37 | #include "DFGPhase.h" |
38 | #include "JSCInlines.h" |
39 | #include <wtf/CommaPrinter.h> |
40 | #include <wtf/HashSet.h> |
41 | |
42 | namespace JSC { namespace DFG { |
43 | |
44 | namespace { |
45 | |
46 | namespace DFGStoreBarrierInsertionPhaseInternal { |
47 | static const bool verbose = false; |
48 | } |
49 | |
50 | enum class PhaseMode { |
51 | // Does only a local analysis for store barrier insertion and assumes that pointers live |
52 | // from predecessor blocks may need barriers. Assumes CPS conventions. Does not use AI for |
53 | // eliminating store barriers, but does a best effort to eliminate barriers when you're |
54 | // storing a non-cell value by using Node::result() and by looking at constants. The local |
55 | // analysis is based on GC epochs, so it will eliminate a lot of locally redundant barriers. |
56 | Fast, |
57 | |
58 | // Does a global analysis for store barrier insertion. Reuses the GC-epoch-based analysis |
59 | // used by Fast, but adds a conservative merge rule for propagating information from one |
60 | // block to the next. This will ensure for example that if a value V coming from multiple |
61 | // predecessors in B didn't need any more barriers at the end of each predecessor (either |
62 | // because it was the last allocated object in that predecessor or because it just had a |
63 | // barrier executed), then until we hit another GC point in B, we won't need another barrier |
64 | // on V. Uses AI for eliminating barriers when we know that the value being stored is not a |
65 | // cell. Assumes SSA conventions. |
66 | Global |
67 | }; |
68 | |
69 | template<PhaseMode mode> |
70 | class StoreBarrierInsertionPhase : public Phase { |
71 | public: |
72 | StoreBarrierInsertionPhase(Graph& graph) |
73 | : Phase(graph, mode == PhaseMode::Fast ? "fast store barrier insertion" : "global store barrier insertion" ) |
74 | , m_insertionSet(graph) |
75 | { |
76 | } |
77 | |
78 | bool run() |
79 | { |
80 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) { |
81 | dataLog("Starting store barrier insertion:\n" ); |
82 | m_graph.dump(); |
83 | } |
84 | |
85 | switch (mode) { |
86 | case PhaseMode::Fast: { |
87 | DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA); |
88 | |
89 | m_graph.clearEpochs(); |
90 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) |
91 | handleBlock(block); |
92 | return true; |
93 | } |
94 | |
95 | case PhaseMode::Global: { |
96 | DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA); |
97 | |
98 | m_state = std::make_unique<InPlaceAbstractState>(m_graph); |
99 | m_interpreter = std::make_unique<AbstractInterpreter<InPlaceAbstractState>>(m_graph, *m_state); |
100 | |
101 | m_isConverged = false; |
102 | |
103 | // First run the analysis. Inside basic blocks we use an epoch-based analysis that |
104 | // is very precise. At block boundaries, we just propagate which nodes may need a |
105 | // barrier. This gives us a very nice bottom->top fixpoint: we start out assuming |
106 | // that no node needs any barriers at block boundaries, and then we converge |
107 | // towards believing that all nodes need barriers. "Needing a barrier" is like |
108 | // saying that the node is in a past epoch. "Not needing a barrier" is like saying |
109 | // that the node is in the current epoch. |
110 | m_stateAtHead = std::make_unique<BlockMap<HashSet<Node*>>>(m_graph); |
111 | m_stateAtTail = std::make_unique<BlockMap<HashSet<Node*>>>(m_graph); |
112 | |
113 | BlockList postOrder = m_graph.blocksInPostOrder(); |
114 | |
115 | bool changed = true; |
116 | while (changed) { |
117 | changed = false; |
118 | |
119 | // Intentional backwards loop because we are using RPO. |
120 | for (unsigned blockIndex = postOrder.size(); blockIndex--;) { |
121 | BasicBlock* block = postOrder[blockIndex]; |
122 | |
123 | if (!handleBlock(block)) { |
124 | // If the block didn't finish, then it cannot affect the fixpoint. |
125 | continue; |
126 | } |
127 | |
128 | // Construct the state-at-tail based on the epochs of live nodes and the |
129 | // current epoch. We grow state-at-tail monotonically to ensure convergence. |
130 | bool thisBlockChanged = false; |
131 | for (NodeFlowProjection node : block->ssa->liveAtTail) { |
132 | if (node.kind() == NodeFlowProjection::Shadow) |
133 | continue; |
134 | if (node->epoch() != m_currentEpoch) { |
135 | // If the node is older than the current epoch, then we may need to |
136 | // run a barrier on it in the future. So, add it to the state. |
137 | thisBlockChanged |= m_stateAtTail->at(block).add(node.node()).isNewEntry; |
138 | } |
139 | } |
140 | |
141 | if (!thisBlockChanged) { |
142 | // This iteration didn't learn anything new about this block. |
143 | continue; |
144 | } |
145 | |
146 | // Changed things. Make sure that we loop one more time. |
147 | changed = true; |
148 | |
149 | for (BasicBlock* successor : block->successors()) { |
150 | for (Node* node : m_stateAtTail->at(block)) |
151 | m_stateAtHead->at(successor).add(node); |
152 | } |
153 | } |
154 | } |
155 | |
156 | // Tell handleBlock() that it's time to actually insert barriers for real. |
157 | m_isConverged = true; |
158 | |
159 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) |
160 | handleBlock(block); |
161 | |
162 | return true; |
163 | } } |
164 | |
165 | RELEASE_ASSERT_NOT_REACHED(); |
166 | return false; |
167 | } |
168 | |
169 | private: |
170 | bool handleBlock(BasicBlock* block) |
171 | { |
172 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) { |
173 | dataLog("Dealing with block " , pointerDump(block), "\n" ); |
174 | if (reallyInsertBarriers()) |
175 | dataLog(" Really inserting barriers.\n" ); |
176 | } |
177 | |
178 | m_currentEpoch = Epoch::first(); |
179 | |
180 | if (mode == PhaseMode::Global) { |
181 | if (!block->cfaHasVisited) |
182 | return false; |
183 | m_state->beginBasicBlock(block); |
184 | |
185 | for (NodeFlowProjection node : block->ssa->liveAtHead) { |
186 | if (node.kind() == NodeFlowProjection::Shadow) |
187 | continue; |
188 | if (m_stateAtHead->at(block).contains(node.node())) { |
189 | // If previous blocks tell us that this node may need a barrier in the |
190 | // future, then put it in the ancient primordial epoch. This forces us to |
191 | // emit a barrier on any possibly-cell store, regardless of the epoch of the |
192 | // stored value. |
193 | node->setEpoch(Epoch()); |
194 | } else { |
195 | // If previous blocks aren't requiring us to run a barrier on this node, |
196 | // then put it in the current epoch. This means that we will skip barriers |
197 | // on this node so long as we don't allocate. It also means that we won't |
198 | // run barriers on stores to on one such node into another such node. That's |
199 | // fine, because nodes would be excluded from the state set if at the tails |
200 | // of all predecessors they always had the current epoch. |
201 | node->setEpoch(m_currentEpoch); |
202 | } |
203 | } |
204 | } |
205 | |
206 | bool result = true; |
207 | |
208 | for (m_nodeIndex = 0; m_nodeIndex < block->size(); ++m_nodeIndex) { |
209 | m_node = block->at(m_nodeIndex); |
210 | |
211 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) { |
212 | dataLog( |
213 | " " , m_currentEpoch, ": Looking at node " , m_node, " with children: " ); |
214 | CommaPrinter comma; |
215 | m_graph.doToChildren( |
216 | m_node, |
217 | [&] (Edge edge) { |
218 | dataLog(comma, edge, " (" , edge->epoch(), ")" ); |
219 | }); |
220 | dataLog("\n" ); |
221 | } |
222 | |
223 | if (mode == PhaseMode::Global) { |
224 | // Execute edges separately because we don't want to insert barriers if the |
225 | // operation doing the store does a check that ensures that the child is not |
226 | // a cell. |
227 | m_interpreter->startExecuting(); |
228 | m_interpreter->executeEdges(m_node); |
229 | } |
230 | |
231 | switch (m_node->op()) { |
232 | case PutByValDirect: |
233 | case PutByVal: |
234 | case PutByValAlias: { |
235 | switch (m_node->arrayMode().modeForPut().type()) { |
236 | case Array::Contiguous: |
237 | case Array::ArrayStorage: |
238 | case Array::SlowPutArrayStorage: { |
239 | Edge child1 = m_graph.varArgChild(m_node, 0); |
240 | Edge child3 = m_graph.varArgChild(m_node, 2); |
241 | considerBarrier(child1, child3); |
242 | break; |
243 | } |
244 | default: |
245 | break; |
246 | } |
247 | break; |
248 | } |
249 | |
250 | case ArrayPush: { |
251 | switch (m_node->arrayMode().type()) { |
252 | case Array::Contiguous: |
253 | case Array::ArrayStorage: { |
254 | unsigned elementOffset = 2; |
255 | unsigned elementCount = m_node->numChildren() - elementOffset; |
256 | Edge& arrayEdge = m_graph.varArgChild(m_node, 1); |
257 | for (unsigned i = 0; i < elementCount; ++i) { |
258 | Edge& element = m_graph.varArgChild(m_node, i + elementOffset); |
259 | considerBarrier(arrayEdge, element); |
260 | } |
261 | break; |
262 | } |
263 | default: |
264 | break; |
265 | } |
266 | break; |
267 | } |
268 | |
269 | case PutById: |
270 | case PutByIdFlush: |
271 | case PutByIdDirect: |
272 | case PutStructure: { |
273 | considerBarrier(m_node->child1()); |
274 | break; |
275 | } |
276 | |
277 | case RecordRegExpCachedResult: { |
278 | considerBarrier(m_graph.varArgChild(m_node, 0)); |
279 | break; |
280 | } |
281 | |
282 | case PutClosureVar: |
283 | case PutToArguments: |
284 | case SetRegExpObjectLastIndex: { |
285 | considerBarrier(m_node->child1(), m_node->child2()); |
286 | break; |
287 | } |
288 | |
289 | case MultiPutByOffset: { |
290 | considerBarrier(m_node->child1()); |
291 | break; |
292 | } |
293 | |
294 | case PutByOffset: { |
295 | considerBarrier(m_node->child2(), m_node->child3()); |
296 | break; |
297 | } |
298 | |
299 | case PutGlobalVariable: { |
300 | considerBarrier(m_node->child1(), m_node->child2()); |
301 | break; |
302 | } |
303 | |
304 | case SetFunctionName: { |
305 | considerBarrier(m_node->child1(), m_node->child2()); |
306 | break; |
307 | } |
308 | |
309 | case NukeStructureAndSetButterfly: { |
310 | considerBarrier(m_node->child1()); |
311 | break; |
312 | } |
313 | |
314 | default: |
315 | break; |
316 | } |
317 | |
318 | if (doesGC(m_graph, m_node)) |
319 | m_currentEpoch.bump(); |
320 | |
321 | switch (m_node->op()) { |
322 | case NewObject: |
323 | case NewArray: |
324 | case NewArrayWithSize: |
325 | case NewArrayBuffer: |
326 | case NewTypedArray: |
327 | case NewRegexp: |
328 | case NewStringObject: |
329 | case NewSymbol: |
330 | case MaterializeNewObject: |
331 | case MaterializeCreateActivation: |
332 | case MakeRope: |
333 | case CreateActivation: |
334 | case CreateDirectArguments: |
335 | case CreateScopedArguments: |
336 | case CreateClonedArguments: |
337 | case NewFunction: |
338 | case NewGeneratorFunction: |
339 | case NewAsyncGeneratorFunction: |
340 | case NewAsyncFunction: |
341 | case AllocatePropertyStorage: |
342 | case ReallocatePropertyStorage: |
343 | // Nodes that allocate get to set their epoch because for those nodes we know |
344 | // that they will be the newest object in the heap. |
345 | m_node->setEpoch(m_currentEpoch); |
346 | break; |
347 | |
348 | case Upsilon: |
349 | // Assume the worst for Phis so that we don't have to worry about Phi shadows. |
350 | m_node->phi()->setEpoch(Epoch()); |
351 | m_node->setEpoch(Epoch()); |
352 | break; |
353 | |
354 | default: |
355 | // For nodes that aren't guaranteed to allocate, we say that their return value |
356 | // (if there is one) could be arbitrarily old. |
357 | m_node->setEpoch(Epoch()); |
358 | break; |
359 | } |
360 | |
361 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) { |
362 | dataLog( |
363 | " " , m_currentEpoch, ": Done with node " , m_node, " (" , m_node->epoch(), |
364 | ") with children: " ); |
365 | CommaPrinter comma; |
366 | m_graph.doToChildren( |
367 | m_node, |
368 | [&] (Edge edge) { |
369 | dataLog(comma, edge, " (" , edge->epoch(), ")" ); |
370 | }); |
371 | dataLog("\n" ); |
372 | } |
373 | |
374 | if (mode == PhaseMode::Global) { |
375 | if (!m_interpreter->executeEffects(m_nodeIndex, m_node)) { |
376 | result = false; |
377 | break; |
378 | } |
379 | } |
380 | } |
381 | |
382 | if (mode == PhaseMode::Global) |
383 | m_state->reset(); |
384 | |
385 | if (reallyInsertBarriers()) |
386 | m_insertionSet.execute(block); |
387 | |
388 | return result; |
389 | } |
390 | |
391 | void considerBarrier(Edge base, Edge child) |
392 | { |
393 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) |
394 | dataLog(" Considering adding barrier " , base, " => " , child, "\n" ); |
395 | |
396 | // We don't need a store barrier if the child is guaranteed to not be a cell. |
397 | switch (mode) { |
398 | case PhaseMode::Fast: { |
399 | // Don't try too hard because it's too expensive to run AI. |
400 | if (child->hasConstant()) { |
401 | if (!child->asJSValue().isCell()) { |
402 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) |
403 | dataLog(" Rejecting because of constant type.\n" ); |
404 | return; |
405 | } |
406 | } else { |
407 | switch (child->result()) { |
408 | case NodeResultNumber: |
409 | case NodeResultDouble: |
410 | case NodeResultInt32: |
411 | case NodeResultInt52: |
412 | case NodeResultBoolean: |
413 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) |
414 | dataLog(" Rejecting because of result type.\n" ); |
415 | return; |
416 | default: |
417 | break; |
418 | } |
419 | } |
420 | break; |
421 | } |
422 | |
423 | case PhaseMode::Global: { |
424 | // Go into rage mode to eliminate any chance of a barrier with a non-cell child. We |
425 | // can afford to keep around AI in Global mode. |
426 | if (!m_interpreter->needsTypeCheck(child, ~SpecCell)) { |
427 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) |
428 | dataLog(" Rejecting because of AI type.\n" ); |
429 | return; |
430 | } |
431 | break; |
432 | } } |
433 | |
434 | considerBarrier(base); |
435 | } |
436 | |
437 | void considerBarrier(Edge base) |
438 | { |
439 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) |
440 | dataLog(" Considering adding barrier on " , base, "\n" ); |
441 | |
442 | // We don't need a store barrier if the epoch of the base is identical to the current |
443 | // epoch. That means that we either just allocated the object and so it's guaranteed to |
444 | // be in newgen, or we just ran a barrier on it so it's guaranteed to be remembered |
445 | // already. |
446 | if (base->epoch() == m_currentEpoch) { |
447 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) |
448 | dataLog(" Rejecting because it's in the current epoch.\n" ); |
449 | return; |
450 | } |
451 | |
452 | if (DFGStoreBarrierInsertionPhaseInternal::verbose) |
453 | dataLog(" Inserting barrier.\n" ); |
454 | insertBarrier(m_nodeIndex + 1, base); |
455 | } |
456 | |
457 | void insertBarrier(unsigned nodeIndex, Edge base) |
458 | { |
459 | // This is just our way of saying that barriers are not redundant with each other according |
460 | // to forward analysis: if we proved one time that a barrier was necessary then it'll for |
461 | // sure be necessary next time. |
462 | base->setEpoch(Epoch()); |
463 | |
464 | // If we're in global mode, we should only insert the barriers once we have converged. |
465 | if (!reallyInsertBarriers()) |
466 | return; |
467 | |
468 | // FIXME: We could support StoreBarrier(UntypedUse:). That would be sort of cool. |
469 | // But right now we don't need it. |
470 | |
471 | DFG_ASSERT(m_graph, m_node, isCell(base.useKind()), m_node->op(), base.useKind()); |
472 | |
473 | // Barriers are always inserted after the node that they service. Therefore, we always know |
474 | // that the thing is a cell now. |
475 | base.setUseKind(KnownCellUse); |
476 | |
477 | NodeOrigin origin = m_node->origin; |
478 | if (clobbersExitState(m_graph, m_node)) |
479 | origin = origin.withInvalidExit(); |
480 | |
481 | m_insertionSet.insertNode(nodeIndex, SpecNone, FencedStoreBarrier, origin, base); |
482 | } |
483 | |
484 | bool reallyInsertBarriers() |
485 | { |
486 | return mode == PhaseMode::Fast || m_isConverged; |
487 | } |
488 | |
489 | InsertionSet m_insertionSet; |
490 | Epoch m_currentEpoch; |
491 | unsigned m_nodeIndex; |
492 | Node* m_node; |
493 | |
494 | // Things we only use in Global mode. |
495 | std::unique_ptr<InPlaceAbstractState> m_state; |
496 | std::unique_ptr<AbstractInterpreter<InPlaceAbstractState>> m_interpreter; |
497 | std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtHead; |
498 | std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtTail; |
499 | bool m_isConverged; |
500 | }; |
501 | |
502 | } // anonymous namespace |
503 | |
504 | bool performFastStoreBarrierInsertion(Graph& graph) |
505 | { |
506 | return runPhase<StoreBarrierInsertionPhase<PhaseMode::Fast>>(graph); |
507 | } |
508 | |
509 | bool performGlobalStoreBarrierInsertion(Graph& graph) |
510 | { |
511 | return runPhase<StoreBarrierInsertionPhase<PhaseMode::Global>>(graph); |
512 | } |
513 | |
514 | } } // namespace JSC::DFG |
515 | |
516 | #endif // ENABLE(DFG_JIT) |
517 | |
518 | |