1/*
2 * Copyright (C) 2016 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGStoreBarrierClusteringPhase.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "DFGDoesGC.h"
32#include "DFGGraph.h"
33#include "DFGInsertionSet.h"
34#include "DFGMayExit.h"
35#include "DFGPhase.h"
36#include "JSCInlines.h"
37#include <wtf/FastBitVector.h>
38
39namespace JSC { namespace DFG {
40
41namespace {
42
43class StoreBarrierClusteringPhase : public Phase {
44public:
45 StoreBarrierClusteringPhase(Graph& graph)
46 : Phase(graph, "store barrier fencing")
47 , m_insertionSet(graph)
48 {
49 }
50
51 bool run()
52 {
53 size_t maxSize = 0;
54 for (BasicBlock* block : m_graph.blocksInNaturalOrder())
55 maxSize = std::max(maxSize, block->size());
56 m_barrierPoints.resize(maxSize);
57
58 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
59 size_t blockSize = block->size();
60 doBlock(block);
61 m_barrierPoints.clearRange(0, blockSize);
62 }
63
64 return true;
65 }
66
67private:
68 // This summarizes everything we need to remember about a barrier.
69 struct ChildAndOrigin {
70 ChildAndOrigin() { }
71
72 ChildAndOrigin(Node* child, CodeOrigin semanticOrigin)
73 : child(child)
74 , semanticOrigin(semanticOrigin)
75 {
76 }
77
78 Node* child { nullptr };
79 CodeOrigin semanticOrigin;
80 };
81
82 void doBlock(BasicBlock* block)
83 {
84 ASSERT(m_barrierPoints.isEmpty());
85
86 // First identify the places where we would want to place all of the barriers based on a
87 // backwards analysis. We use the futureGC flag to tell us if we had seen a GC. Since this
88 // is a backwards analysis, when we get to a node, futureGC tells us if a GC will happen
89 // in the future after that node.
90 bool futureGC = true;
91 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
92 Node* node = block->at(nodeIndex);
93
94 // This is a backwards analysis, so exits require conservatism. If we exit, then there
95 // probably will be a GC in the future! If we needed to then we could lift that
96 // requirement by either (1) having a StoreBarrierHint that tells OSR exit to barrier that
97 // value or (2) automatically barriering any DFG-live Node on OSR exit. Either way, it
98 // would be weird because it would create a new root for OSR availability analysis. I
99 // don't have evidence that it would be worth it.
100 if (doesGC(m_graph, node) || mayExit(m_graph, node) != DoesNotExit) {
101 futureGC = true;
102 continue;
103 }
104
105 if (node->isStoreBarrier() && futureGC) {
106 m_barrierPoints[nodeIndex] = true;
107 futureGC = false;
108 }
109 }
110
111 // Now we run forward and collect the barriers. When we hit a barrier point, insert all of
112 // them with a fence.
113 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
114 Node* node = block->at(nodeIndex);
115 if (!node->isStoreBarrier())
116 continue;
117
118 DFG_ASSERT(m_graph, node, !node->origin.wasHoisted);
119 DFG_ASSERT(m_graph, node, node->child1().useKind() == KnownCellUse, node->op(), node->child1().useKind());
120
121 NodeOrigin origin = node->origin;
122 m_neededBarriers.append(ChildAndOrigin(node->child1().node(), origin.semantic));
123 node->remove(m_graph);
124
125 if (!m_barrierPoints[nodeIndex])
126 continue;
127
128 std::sort(
129 m_neededBarriers.begin(), m_neededBarriers.end(),
130 [&] (const ChildAndOrigin& a, const ChildAndOrigin& b) -> bool {
131 return a.child < b.child;
132 });
133 removeRepeatedElements(
134 m_neededBarriers,
135 [&] (const ChildAndOrigin& a, const ChildAndOrigin& b) -> bool{
136 return a.child == b.child;
137 });
138 for (auto iter = m_neededBarriers.begin(); iter != m_neededBarriers.end(); ++iter) {
139 Node* child = iter->child;
140 CodeOrigin semanticOrigin = iter->semanticOrigin;
141
142 NodeType type;
143 if (iter == m_neededBarriers.begin())
144 type = FencedStoreBarrier;
145 else
146 type = StoreBarrier;
147
148 m_insertionSet.insertNode(
149 nodeIndex, SpecNone, type, origin.withSemantic(semanticOrigin),
150 Edge(child, KnownCellUse));
151 }
152 m_neededBarriers.shrink(0);
153 }
154
155 m_insertionSet.execute(block);
156 }
157
158 InsertionSet m_insertionSet;
159 FastBitVector m_barrierPoints;
160 Vector<ChildAndOrigin> m_neededBarriers;
161};
162
163} // anonymous namespace
164
165bool performStoreBarrierClustering(Graph& graph)
166{
167 return runPhase<StoreBarrierClusteringPhase>(graph);
168}
169
170} } // namespace JSC::DFG
171
172#endif // ENABLE(DFG_JIT)
173
174