1 | /* |
2 | * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #if ENABLE(DFG_JIT) |
29 | |
30 | #include "DFGAbstractValue.h" |
31 | #include "DFGAvailabilityMap.h" |
32 | #include "DFGBranchDirection.h" |
33 | #include "DFGNode.h" |
34 | #include "DFGNodeAbstractValuePair.h" |
35 | #include "DFGStructureClobberState.h" |
36 | #include "Operands.h" |
37 | #include <wtf/Vector.h> |
38 | |
39 | namespace JSC { namespace DFG { |
40 | |
41 | class Graph; |
42 | class InsertionSet; |
43 | |
44 | typedef Vector<BasicBlock*, 2> PredecessorList; |
45 | typedef Vector<Node*, 8> BlockNodeList; |
46 | |
47 | struct BasicBlock : RefCounted<BasicBlock> { |
48 | BasicBlock( |
49 | BytecodeIndex bytecodeBegin, unsigned numArguments, unsigned numLocals, |
50 | float executionCount); |
51 | ~BasicBlock(); |
52 | |
53 | void ensureLocals(unsigned newNumLocals); |
54 | |
55 | size_t size() const { return m_nodes.size(); } |
56 | bool isEmpty() const { return !size(); } |
57 | Node*& at(size_t i) { return m_nodes[i]; } |
58 | Node* at(size_t i) const { return m_nodes[i]; } |
59 | Node* tryAt(size_t i) const |
60 | { |
61 | if (i >= size()) |
62 | return nullptr; |
63 | return at(i); |
64 | } |
65 | Node*& operator[](size_t i) { return at(i); } |
66 | Node* operator[](size_t i) const { return at(i); } |
67 | Node* last() const |
68 | { |
69 | RELEASE_ASSERT(!!size()); |
70 | return at(size() - 1); |
71 | } |
72 | |
73 | // Use this to find both the index of the terminal and the terminal itself in one go. May |
74 | // return a clear NodeAndIndex if the basic block currently lacks a terminal. That may happen |
75 | // in the middle of IR transformations within a phase but should never be the case in between |
76 | // phases. |
77 | // |
78 | // The reason why this is more than just "at(size() - 1)" is that we may place non-terminal |
79 | // liveness marking instructions after the terminal. This is supposed to happen infrequently |
80 | // but some basic blocks - most notably return blocks - will have liveness markers for all of |
81 | // the flushed variables right after the return. |
82 | // |
83 | // It turns out that doing this linear search is basically perf-neutral, so long as we force |
84 | // the method to be inlined. Hence the ALWAYS_INLINE. |
85 | ALWAYS_INLINE NodeAndIndex findTerminal() const |
86 | { |
87 | size_t i = size(); |
88 | while (i--) { |
89 | Node* node = at(i); |
90 | if (node->isTerminal()) |
91 | return NodeAndIndex(node, i); |
92 | switch (node->op()) { |
93 | // The bitter end can contain Phantoms and the like. There will probably only be one or two nodes after the terminal. They are all no-ops and will not have any checked children. |
94 | case Check: // This is here because it's our universal no-op. |
95 | case CheckVarargs: |
96 | case Phantom: |
97 | case PhantomLocal: |
98 | case Flush: |
99 | break; |
100 | default: |
101 | return NodeAndIndex(); |
102 | } |
103 | } |
104 | return NodeAndIndex(); |
105 | } |
106 | |
107 | ALWAYS_INLINE Node* terminal() const |
108 | { |
109 | return findTerminal().node; |
110 | } |
111 | |
112 | void resize(size_t size) { m_nodes.resize(size); } |
113 | void grow(size_t size) { m_nodes.grow(size); } |
114 | |
115 | void append(Node* node) { m_nodes.append(node); } |
116 | void insertBeforeTerminal(Node* node) |
117 | { |
118 | NodeAndIndex result = findTerminal(); |
119 | if (!result) |
120 | append(node); |
121 | else |
122 | m_nodes.insert(result.index, node); |
123 | } |
124 | |
125 | void replaceTerminal(Graph&, Node*); |
126 | |
127 | size_t numNodes() const { return phis.size() + size(); } |
128 | Node* node(size_t i) const |
129 | { |
130 | if (i < phis.size()) |
131 | return phis[i]; |
132 | return at(i - phis.size()); |
133 | } |
134 | bool isPhiIndex(size_t i) const { return i < phis.size(); } |
135 | |
136 | bool isInPhis(Node* node) const; |
137 | bool isInBlock(Node* myNode) const; |
138 | |
139 | BlockNodeList::iterator begin() { return m_nodes.begin(); } |
140 | BlockNodeList::iterator end() { return m_nodes.end(); } |
141 | |
142 | unsigned numSuccessors() { return terminal()->numSuccessors(); } |
143 | |
144 | BasicBlock*& successor(unsigned index) |
145 | { |
146 | return terminal()->successor(index); |
147 | } |
148 | BasicBlock*& successorForCondition(bool condition) |
149 | { |
150 | return terminal()->successorForCondition(condition); |
151 | } |
152 | |
153 | Node::SuccessorsIterable successors() |
154 | { |
155 | return terminal()->successors(); |
156 | } |
157 | |
158 | void removePredecessor(BasicBlock* block); |
159 | void replacePredecessor(BasicBlock* from, BasicBlock* to); |
160 | |
161 | template<typename... Params> |
162 | Node* appendNode(Graph&, SpeculatedType, Params...); |
163 | |
164 | template<typename... Params> |
165 | Node* appendNonTerminal(Graph&, SpeculatedType, Params...); |
166 | |
167 | template<typename... Params> |
168 | Node* replaceTerminal(Graph&, SpeculatedType, Params...); |
169 | |
170 | void dump(PrintStream& out) const; |
171 | |
172 | void didLink() |
173 | { |
174 | #if !ASSERT_DISABLED |
175 | isLinked = true; |
176 | #endif |
177 | } |
178 | |
179 | // This value is used internally for block linking and OSR entry. It is mostly meaningless |
180 | // for other purposes due to inlining. |
181 | BytecodeIndex bytecodeBegin; |
182 | |
183 | BlockIndex index; |
184 | |
185 | StructureClobberState cfaStructureClobberStateAtHead; |
186 | StructureClobberState cfaStructureClobberStateAtTail; |
187 | BranchDirection cfaBranchDirection; |
188 | bool cfaHasVisited; |
189 | bool cfaShouldRevisit; |
190 | bool cfaThinksShouldTryConstantFolding { false }; |
191 | bool cfaDidFinish; |
192 | bool intersectionOfCFAHasVisited; |
193 | bool isOSRTarget; |
194 | bool isCatchEntrypoint; |
195 | |
196 | #if !ASSERT_DISABLED |
197 | bool isLinked; |
198 | #endif |
199 | bool isReachable; |
200 | |
201 | Vector<Node*> phis; |
202 | PredecessorList predecessors; |
203 | |
204 | Operands<Node*> variablesAtHead; |
205 | Operands<Node*> variablesAtTail; |
206 | |
207 | Operands<AbstractValue> valuesAtHead; |
208 | Operands<AbstractValue> valuesAtTail; |
209 | |
210 | // The intersection of assumptions we have made previously at the head of this block. Note |
211 | // that under normal circumstances, each time we run the CFA, we will get strictly more precise |
212 | // results. But we don't actually require this to be the case. It's fine for the CFA to loosen |
213 | // up for any odd reason. It's fine when this happens, because anything that the CFA proves |
214 | // must be true from that point forward, except if some registered watchpoint fires, in which |
215 | // case the code won't ever run. So, the CFA proving something less precise later on is just an |
216 | // outcome of the CFA being imperfect; the more precise thing that it had proved earlier is no |
217 | // less true. |
218 | // |
219 | // But for the purpose of OSR entry, we need to make sure that we remember what assumptions we |
220 | // had used for optimizing any given basic block. That's what this is for. |
221 | // |
222 | // It's interesting that we could use this to make the CFA more precise: all future CFAs could |
223 | // filter their results with this thing to sort of maintain maximal precision. Because we |
224 | // expect CFA to usually be monotonically more precise each time we run it to fixpoint, this |
225 | // would not be a productive optimization: it would make setting up a basic block more |
226 | // expensive and would only benefit bizarre pathological cases. |
227 | Operands<AbstractValue> intersectionOfPastValuesAtHead; |
228 | |
229 | float executionCount; |
230 | |
231 | struct SSAData { |
232 | WTF_MAKE_FAST_ALLOCATED; |
233 | public: |
234 | void invalidate() |
235 | { |
236 | liveAtTail.clear(); |
237 | liveAtHead.clear(); |
238 | valuesAtHead.clear(); |
239 | valuesAtTail.clear(); |
240 | } |
241 | |
242 | AvailabilityMap availabilityAtHead; |
243 | AvailabilityMap availabilityAtTail; |
244 | |
245 | Vector<NodeFlowProjection> liveAtHead; |
246 | Vector<NodeFlowProjection> liveAtTail; |
247 | Vector<NodeAbstractValuePair> valuesAtHead; |
248 | Vector<NodeAbstractValuePair> valuesAtTail; |
249 | |
250 | SSAData(BasicBlock*); |
251 | ~SSAData(); |
252 | }; |
253 | std::unique_ptr<SSAData> ssa; |
254 | |
255 | private: |
256 | friend class InsertionSet; |
257 | BlockNodeList m_nodes; |
258 | }; |
259 | |
260 | typedef Vector<BasicBlock*> BlockList; |
261 | |
262 | static inline BytecodeIndex getBytecodeBeginForBlock(BasicBlock** basicBlock) |
263 | { |
264 | return (*basicBlock)->bytecodeBegin; |
265 | } |
266 | |
267 | static inline BasicBlock* blockForBytecodeIndex(Vector<BasicBlock*>& linkingTargets, BytecodeIndex bytecodeBegin) |
268 | { |
269 | return *binarySearch<BasicBlock*, BytecodeIndex>(linkingTargets, linkingTargets.size(), bytecodeBegin, getBytecodeBeginForBlock); |
270 | } |
271 | |
272 | } } // namespace JSC::DFG |
273 | |
274 | #endif // ENABLE(DFG_JIT) |
275 | |