1/*
2 * Copyright (C) 2015-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGStoreBarrierInsertionPhase.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "DFGAbstractInterpreterInlines.h"
32#include "DFGBlockMapInlines.h"
33#include "DFGDoesGC.h"
34#include "DFGGraph.h"
35#include "DFGInPlaceAbstractState.h"
36#include "DFGInsertionSet.h"
37#include "DFGPhase.h"
38#include "JSCInlines.h"
39#include <wtf/CommaPrinter.h>
40#include <wtf/HashSet.h>
41
42namespace JSC { namespace DFG {
43
44namespace {
45
46namespace DFGStoreBarrierInsertionPhaseInternal {
47static constexpr bool verbose = false;
48}
49
50enum class PhaseMode {
51 // Does only a local analysis for store barrier insertion and assumes that pointers live
52 // from predecessor blocks may need barriers. Assumes CPS conventions. Does not use AI for
53 // eliminating store barriers, but does a best effort to eliminate barriers when you're
54 // storing a non-cell value by using Node::result() and by looking at constants. The local
55 // analysis is based on GC epochs, so it will eliminate a lot of locally redundant barriers.
56 Fast,
57
58 // Does a global analysis for store barrier insertion. Reuses the GC-epoch-based analysis
59 // used by Fast, but adds a conservative merge rule for propagating information from one
60 // block to the next. This will ensure for example that if a value V coming from multiple
61 // predecessors in B didn't need any more barriers at the end of each predecessor (either
62 // because it was the last allocated object in that predecessor or because it just had a
63 // barrier executed), then until we hit another GC point in B, we won't need another barrier
64 // on V. Uses AI for eliminating barriers when we know that the value being stored is not a
65 // cell. Assumes SSA conventions.
66 Global
67};
68
69template<PhaseMode mode>
70class StoreBarrierInsertionPhase : public Phase {
71public:
72 StoreBarrierInsertionPhase(Graph& graph)
73 : Phase(graph, mode == PhaseMode::Fast ? "fast store barrier insertion" : "global store barrier insertion")
74 , m_insertionSet(graph)
75 {
76 }
77
78 bool run()
79 {
80 if (DFGStoreBarrierInsertionPhaseInternal::verbose) {
81 dataLog("Starting store barrier insertion:\n");
82 m_graph.dump();
83 }
84
85 switch (mode) {
86 case PhaseMode::Fast: {
87 DFG_ASSERT(m_graph, nullptr, m_graph.m_form != SSA);
88
89 m_graph.clearEpochs();
90 for (BasicBlock* block : m_graph.blocksInNaturalOrder())
91 handleBlock(block);
92 return true;
93 }
94
95 case PhaseMode::Global: {
96 DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
97
98 m_state = makeUnique<InPlaceAbstractState>(m_graph);
99 m_interpreter = makeUnique<AbstractInterpreter<InPlaceAbstractState>>(m_graph, *m_state);
100
101 m_isConverged = false;
102
103 // First run the analysis. Inside basic blocks we use an epoch-based analysis that
104 // is very precise. At block boundaries, we just propagate which nodes may need a
105 // barrier. This gives us a very nice bottom->top fixpoint: we start out assuming
106 // that no node needs any barriers at block boundaries, and then we converge
107 // towards believing that all nodes need barriers. "Needing a barrier" is like
108 // saying that the node is in a past epoch. "Not needing a barrier" is like saying
109 // that the node is in the current epoch.
110 m_stateAtHead = makeUnique<BlockMap<HashSet<Node*>>>(m_graph);
111 m_stateAtTail = makeUnique<BlockMap<HashSet<Node*>>>(m_graph);
112
113 BlockList postOrder = m_graph.blocksInPostOrder();
114
115 bool changed = true;
116 while (changed) {
117 changed = false;
118
119 // Intentional backwards loop because we are using RPO.
120 for (unsigned blockIndex = postOrder.size(); blockIndex--;) {
121 BasicBlock* block = postOrder[blockIndex];
122
123 if (!handleBlock(block)) {
124 // If the block didn't finish, then it cannot affect the fixpoint.
125 continue;
126 }
127
128 // Construct the state-at-tail based on the epochs of live nodes and the
129 // current epoch. We grow state-at-tail monotonically to ensure convergence.
130 bool thisBlockChanged = false;
131 for (NodeFlowProjection node : block->ssa->liveAtTail) {
132 if (node.kind() == NodeFlowProjection::Shadow)
133 continue;
134 if (node->epoch() != m_currentEpoch) {
135 // If the node is older than the current epoch, then we may need to
136 // run a barrier on it in the future. So, add it to the state.
137 thisBlockChanged |= m_stateAtTail->at(block).add(node.node()).isNewEntry;
138 }
139 }
140
141 if (!thisBlockChanged) {
142 // This iteration didn't learn anything new about this block.
143 continue;
144 }
145
146 // Changed things. Make sure that we loop one more time.
147 changed = true;
148
149 for (BasicBlock* successor : block->successors()) {
150 for (Node* node : m_stateAtTail->at(block))
151 m_stateAtHead->at(successor).add(node);
152 }
153 }
154 }
155
156 // Tell handleBlock() that it's time to actually insert barriers for real.
157 m_isConverged = true;
158
159 for (BasicBlock* block : m_graph.blocksInNaturalOrder())
160 handleBlock(block);
161
162 return true;
163 } }
164
165 RELEASE_ASSERT_NOT_REACHED();
166 return false;
167 }
168
169private:
170 bool handleBlock(BasicBlock* block)
171 {
172 if (DFGStoreBarrierInsertionPhaseInternal::verbose) {
173 dataLog("Dealing with block ", pointerDump(block), "\n");
174 if (reallyInsertBarriers())
175 dataLog(" Really inserting barriers.\n");
176 }
177
178 m_currentEpoch = Epoch::first();
179
180 if (mode == PhaseMode::Global) {
181 if (!block->cfaHasVisited)
182 return false;
183 m_state->beginBasicBlock(block);
184
185 for (NodeFlowProjection node : block->ssa->liveAtHead) {
186 if (node.kind() == NodeFlowProjection::Shadow)
187 continue;
188 if (m_stateAtHead->at(block).contains(node.node())) {
189 // If previous blocks tell us that this node may need a barrier in the
190 // future, then put it in the ancient primordial epoch. This forces us to
191 // emit a barrier on any possibly-cell store, regardless of the epoch of the
192 // stored value.
193 node->setEpoch(Epoch());
194 } else {
195 // If previous blocks aren't requiring us to run a barrier on this node,
196 // then put it in the current epoch. This means that we will skip barriers
197 // on this node so long as we don't allocate. It also means that we won't
198 // run barriers on stores to on one such node into another such node. That's
199 // fine, because nodes would be excluded from the state set if at the tails
200 // of all predecessors they always had the current epoch.
201 node->setEpoch(m_currentEpoch);
202 }
203 }
204 }
205
206 bool result = true;
207
208 for (m_nodeIndex = 0; m_nodeIndex < block->size(); ++m_nodeIndex) {
209 m_node = block->at(m_nodeIndex);
210
211 if (DFGStoreBarrierInsertionPhaseInternal::verbose) {
212 dataLog(
213 " ", m_currentEpoch, ": Looking at node ", m_node, " with children: ");
214 CommaPrinter comma;
215 m_graph.doToChildren(
216 m_node,
217 [&] (Edge edge) {
218 dataLog(comma, edge, " (", edge->epoch(), ")");
219 });
220 dataLog("\n");
221 }
222
223 if (mode == PhaseMode::Global) {
224 // Execute edges separately because we don't want to insert barriers if the
225 // operation doing the store does a check that ensures that the child is not
226 // a cell.
227 m_interpreter->startExecuting();
228 m_interpreter->executeEdges(m_node);
229 }
230
231 switch (m_node->op()) {
232 case PutByValDirect:
233 case PutByVal:
234 case PutByValAlias: {
235 switch (m_node->arrayMode().modeForPut().type()) {
236 case Array::Contiguous:
237 case Array::ArrayStorage:
238 case Array::SlowPutArrayStorage: {
239 Edge child1 = m_graph.varArgChild(m_node, 0);
240 Edge child3 = m_graph.varArgChild(m_node, 2);
241 considerBarrier(child1, child3);
242 break;
243 }
244 default:
245 break;
246 }
247 break;
248 }
249
250 case ArrayPush: {
251 switch (m_node->arrayMode().type()) {
252 case Array::Contiguous:
253 case Array::ArrayStorage: {
254 unsigned elementOffset = 2;
255 unsigned elementCount = m_node->numChildren() - elementOffset;
256 Edge& arrayEdge = m_graph.varArgChild(m_node, 1);
257 for (unsigned i = 0; i < elementCount; ++i) {
258 Edge& element = m_graph.varArgChild(m_node, i + elementOffset);
259 considerBarrier(arrayEdge, element);
260 }
261 break;
262 }
263 default:
264 break;
265 }
266 break;
267 }
268
269 case PutById:
270 case PutByIdFlush:
271 case PutByIdDirect:
272 case PutStructure: {
273 considerBarrier(m_node->child1());
274 break;
275 }
276
277 case RecordRegExpCachedResult: {
278 considerBarrier(m_graph.varArgChild(m_node, 0));
279 break;
280 }
281
282 case PutClosureVar:
283 case PutToArguments:
284 case SetRegExpObjectLastIndex:
285 case PutInternalField: {
286 considerBarrier(m_node->child1(), m_node->child2());
287 break;
288 }
289
290 case MultiPutByOffset: {
291 considerBarrier(m_node->child1());
292 break;
293 }
294
295 case PutByOffset: {
296 considerBarrier(m_node->child2(), m_node->child3());
297 break;
298 }
299
300 case PutGlobalVariable: {
301 considerBarrier(m_node->child1(), m_node->child2());
302 break;
303 }
304
305 case SetFunctionName: {
306 considerBarrier(m_node->child1(), m_node->child2());
307 break;
308 }
309
310 case NukeStructureAndSetButterfly: {
311 considerBarrier(m_node->child1());
312 break;
313 }
314
315 default:
316 break;
317 }
318
319 if (doesGC(m_graph, m_node))
320 m_currentEpoch.bump();
321
322 switch (m_node->op()) {
323 case NewObject:
324 case NewPromise:
325 case NewGenerator:
326 case NewAsyncGenerator:
327 case NewArray:
328 case NewArrayWithSize:
329 case NewArrayBuffer:
330 case NewTypedArray:
331 case NewRegexp:
332 case NewStringObject:
333 case NewSymbol:
334 case MaterializeNewObject:
335 case MaterializeCreateActivation:
336 case MakeRope:
337 case CreateActivation:
338 case CreateDirectArguments:
339 case CreateScopedArguments:
340 case CreateClonedArguments:
341 case NewFunction:
342 case NewGeneratorFunction:
343 case NewAsyncGeneratorFunction:
344 case NewAsyncFunction:
345 case AllocatePropertyStorage:
346 case ReallocatePropertyStorage:
347 // Nodes that allocate get to set their epoch because for those nodes we know
348 // that they will be the newest object in the heap.
349 m_node->setEpoch(m_currentEpoch);
350 break;
351
352 case Upsilon:
353 // Assume the worst for Phis so that we don't have to worry about Phi shadows.
354 m_node->phi()->setEpoch(Epoch());
355 m_node->setEpoch(Epoch());
356 break;
357
358 default:
359 // For nodes that aren't guaranteed to allocate, we say that their return value
360 // (if there is one) could be arbitrarily old.
361 m_node->setEpoch(Epoch());
362 break;
363 }
364
365 if (DFGStoreBarrierInsertionPhaseInternal::verbose) {
366 dataLog(
367 " ", m_currentEpoch, ": Done with node ", m_node, " (", m_node->epoch(),
368 ") with children: ");
369 CommaPrinter comma;
370 m_graph.doToChildren(
371 m_node,
372 [&] (Edge edge) {
373 dataLog(comma, edge, " (", edge->epoch(), ")");
374 });
375 dataLog("\n");
376 }
377
378 if (mode == PhaseMode::Global) {
379 if (!m_interpreter->executeEffects(m_nodeIndex, m_node)) {
380 result = false;
381 break;
382 }
383 }
384 }
385
386 if (mode == PhaseMode::Global)
387 m_state->reset();
388
389 if (reallyInsertBarriers())
390 m_insertionSet.execute(block);
391
392 return result;
393 }
394
395 void considerBarrier(Edge base, Edge child)
396 {
397 if (DFGStoreBarrierInsertionPhaseInternal::verbose)
398 dataLog(" Considering adding barrier ", base, " => ", child, "\n");
399
400 // We don't need a store barrier if the child is guaranteed to not be a cell.
401 switch (mode) {
402 case PhaseMode::Fast: {
403 // Don't try too hard because it's too expensive to run AI.
404 if (child->hasConstant()) {
405 if (!child->asJSValue().isCell()) {
406 if (DFGStoreBarrierInsertionPhaseInternal::verbose)
407 dataLog(" Rejecting because of constant type.\n");
408 return;
409 }
410 } else {
411 switch (child->result()) {
412 case NodeResultNumber:
413 case NodeResultDouble:
414 case NodeResultInt32:
415 case NodeResultInt52:
416 case NodeResultBoolean:
417 if (DFGStoreBarrierInsertionPhaseInternal::verbose)
418 dataLog(" Rejecting because of result type.\n");
419 return;
420 default:
421 break;
422 }
423 }
424 break;
425 }
426
427 case PhaseMode::Global: {
428 // Go into rage mode to eliminate any chance of a barrier with a non-cell child. We
429 // can afford to keep around AI in Global mode.
430 if (!m_interpreter->needsTypeCheck(child, ~SpecCell)) {
431 if (DFGStoreBarrierInsertionPhaseInternal::verbose)
432 dataLog(" Rejecting because of AI type.\n");
433 return;
434 }
435 break;
436 } }
437
438 considerBarrier(base);
439 }
440
441 void considerBarrier(Edge base)
442 {
443 if (DFGStoreBarrierInsertionPhaseInternal::verbose)
444 dataLog(" Considering adding barrier on ", base, "\n");
445
446 // We don't need a store barrier if the epoch of the base is identical to the current
447 // epoch. That means that we either just allocated the object and so it's guaranteed to
448 // be in newgen, or we just ran a barrier on it so it's guaranteed to be remembered
449 // already.
450 if (base->epoch() == m_currentEpoch) {
451 if (DFGStoreBarrierInsertionPhaseInternal::verbose)
452 dataLog(" Rejecting because it's in the current epoch.\n");
453 return;
454 }
455
456 if (DFGStoreBarrierInsertionPhaseInternal::verbose)
457 dataLog(" Inserting barrier.\n");
458 insertBarrier(m_nodeIndex + 1, base);
459 }
460
461 void insertBarrier(unsigned nodeIndex, Edge base)
462 {
463 // This is just our way of saying that barriers are not redundant with each other according
464 // to forward analysis: if we proved one time that a barrier was necessary then it'll for
465 // sure be necessary next time.
466 base->setEpoch(Epoch());
467
468 // If we're in global mode, we should only insert the barriers once we have converged.
469 if (!reallyInsertBarriers())
470 return;
471
472 // FIXME: We could support StoreBarrier(UntypedUse:). That would be sort of cool.
473 // But right now we don't need it.
474
475 DFG_ASSERT(m_graph, m_node, isCell(base.useKind()), m_node->op(), base.useKind());
476
477 // Barriers are always inserted after the node that they service. Therefore, we always know
478 // that the thing is a cell now.
479 base.setUseKind(KnownCellUse);
480
481 NodeOrigin origin = m_node->origin;
482 if (clobbersExitState(m_graph, m_node))
483 origin = origin.withInvalidExit();
484
485 m_insertionSet.insertNode(nodeIndex, SpecNone, FencedStoreBarrier, origin, base);
486 }
487
488 bool reallyInsertBarriers()
489 {
490 return mode == PhaseMode::Fast || m_isConverged;
491 }
492
493 InsertionSet m_insertionSet;
494 Epoch m_currentEpoch;
495 unsigned m_nodeIndex;
496 Node* m_node;
497
498 // Things we only use in Global mode.
499 std::unique_ptr<InPlaceAbstractState> m_state;
500 std::unique_ptr<AbstractInterpreter<InPlaceAbstractState>> m_interpreter;
501 std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtHead;
502 std::unique_ptr<BlockMap<HashSet<Node*>>> m_stateAtTail;
503 bool m_isConverged;
504};
505
506} // anonymous namespace
507
508bool performFastStoreBarrierInsertion(Graph& graph)
509{
510 return runPhase<StoreBarrierInsertionPhase<PhaseMode::Fast>>(graph);
511}
512
513bool performGlobalStoreBarrierInsertion(Graph& graph)
514{
515 return runPhase<StoreBarrierInsertionPhase<PhaseMode::Global>>(graph);
516}
517
518} } // namespace JSC::DFG
519
520#endif // ENABLE(DFG_JIT)
521
522