1 | /* |
2 | * Copyright (C) 2015-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "B3LowerMacrosAfterOptimizations.h" |
28 | |
29 | #if ENABLE(B3_JIT) |
30 | |
31 | #include "AirArg.h" |
32 | #include "B3BasicBlockInlines.h" |
33 | #include "B3BlockInsertionSet.h" |
34 | #include "B3CCallValue.h" |
35 | #include "B3ConstDoubleValue.h" |
36 | #include "B3ConstFloatValue.h" |
37 | #include "B3ConstPtrValue.h" |
38 | #include "B3InsertionSetInlines.h" |
39 | #include "B3PhaseScope.h" |
40 | #include "B3ValueInlines.h" |
41 | |
42 | namespace JSC { namespace B3 { |
43 | |
44 | using Arg = Air::Arg; |
45 | using Code = Air::Code; |
46 | using Tmp = Air::Tmp; |
47 | |
48 | namespace { |
49 | |
50 | class LowerMacrosAfterOptimizations { |
51 | public: |
52 | LowerMacrosAfterOptimizations(Procedure& proc) |
53 | : m_proc(proc) |
54 | , m_blockInsertionSet(proc) |
55 | , m_insertionSet(proc) |
56 | { |
57 | } |
58 | |
59 | bool run() |
60 | { |
61 | for (BasicBlock* block : m_proc) { |
62 | m_block = block; |
63 | processCurrentBlock(); |
64 | } |
65 | m_changed |= m_blockInsertionSet.execute(); |
66 | if (m_changed) { |
67 | m_proc.resetReachability(); |
68 | m_proc.invalidateCFG(); |
69 | } |
70 | return m_changed; |
71 | } |
72 | |
73 | private: |
74 | void processCurrentBlock() |
75 | { |
76 | for (m_index = 0; m_index < m_block->size(); ++m_index) { |
77 | m_value = m_block->at(m_index); |
78 | m_origin = m_value->origin(); |
79 | switch (m_value->opcode()) { |
80 | case Abs: { |
81 | // ARM supports this instruction natively. |
82 | if (isARM64()) |
83 | break; |
84 | |
85 | Value* mask = nullptr; |
86 | if (m_value->type() == Double) |
87 | mask = m_insertionSet.insert<ConstDoubleValue>(m_index, m_origin, bitwise_cast<double>(~(1ll << 63))); |
88 | else if (m_value->type() == Float) |
89 | mask = m_insertionSet.insert<ConstFloatValue>(m_index, m_origin, bitwise_cast<float>(~(1 << 31))); |
90 | else |
91 | RELEASE_ASSERT_NOT_REACHED(); |
92 | Value* result = m_insertionSet.insert<Value>(m_index, BitAnd, m_origin, m_value->child(0), mask); |
93 | m_value->replaceWithIdentity(result); |
94 | break; |
95 | } |
96 | case Ceil: { |
97 | if (MacroAssembler::supportsFloatingPointRounding()) |
98 | break; |
99 | |
100 | Value* functionAddress = nullptr; |
101 | if (m_value->type() == Double) { |
102 | double (*ceilDouble)(double) = ceil; |
103 | functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, tagCFunctionPtr(ceilDouble, B3CCallPtrTag)); |
104 | } else if (m_value->type() == Float) |
105 | functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, tagCFunctionPtr(ceilf, B3CCallPtrTag)); |
106 | else |
107 | RELEASE_ASSERT_NOT_REACHED(); |
108 | |
109 | Value* result = m_insertionSet.insert<CCallValue>(m_index, |
110 | m_value->type(), |
111 | m_origin, |
112 | Effects::none(), |
113 | functionAddress, |
114 | m_value->child(0)); |
115 | m_value->replaceWithIdentity(result); |
116 | break; |
117 | } |
118 | case Floor: { |
119 | if (MacroAssembler::supportsFloatingPointRounding()) |
120 | break; |
121 | |
122 | Value* functionAddress = nullptr; |
123 | if (m_value->type() == Double) { |
124 | double (*floorDouble)(double) = floor; |
125 | functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, tagCFunctionPtr(floorDouble, B3CCallPtrTag)); |
126 | } else if (m_value->type() == Float) |
127 | functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, tagCFunctionPtr(floorf, B3CCallPtrTag)); |
128 | else |
129 | RELEASE_ASSERT_NOT_REACHED(); |
130 | |
131 | Value* result = m_insertionSet.insert<CCallValue>(m_index, |
132 | m_value->type(), |
133 | m_origin, |
134 | Effects::none(), |
135 | functionAddress, |
136 | m_value->child(0)); |
137 | m_value->replaceWithIdentity(result); |
138 | break; |
139 | } |
140 | case Neg: { |
141 | if (!isFloat(m_value->type())) |
142 | break; |
143 | |
144 | // X86 is odd in that it requires this. |
145 | if (!isX86()) |
146 | break; |
147 | |
148 | Value* mask = nullptr; |
149 | if (m_value->type() == Double) |
150 | mask = m_insertionSet.insert<ConstDoubleValue>(m_index, m_origin, -0.0); |
151 | else { |
152 | RELEASE_ASSERT(m_value->type() == Float); |
153 | mask = m_insertionSet.insert<ConstFloatValue>(m_index, m_origin, -0.0f); |
154 | } |
155 | |
156 | Value* result = m_insertionSet.insert<Value>( |
157 | m_index, BitXor, m_origin, m_value->child(0), mask); |
158 | m_value->replaceWithIdentity(result); |
159 | break; |
160 | } |
161 | |
162 | case RotL: { |
163 | // ARM64 doesn't have a rotate left. |
164 | if (isARM64()) { |
165 | Value* newShift = m_insertionSet.insert<Value>(m_index, Neg, m_value->origin(), m_value->child(1)); |
166 | Value* rotate = m_insertionSet.insert<Value>(m_index, RotR, m_value->origin(), m_value->child(0), newShift); |
167 | m_value->replaceWithIdentity(rotate); |
168 | break; |
169 | } |
170 | break; |
171 | } |
172 | |
173 | default: |
174 | break; |
175 | } |
176 | } |
177 | m_insertionSet.execute(m_block); |
178 | } |
179 | |
180 | Procedure& m_proc; |
181 | BlockInsertionSet m_blockInsertionSet; |
182 | InsertionSet m_insertionSet; |
183 | BasicBlock* m_block; |
184 | unsigned m_index; |
185 | Value* m_value; |
186 | Origin m_origin; |
187 | bool m_changed { false }; |
188 | }; |
189 | |
190 | bool lowerMacrosImpl(Procedure& proc) |
191 | { |
192 | LowerMacrosAfterOptimizations lowerMacros(proc); |
193 | return lowerMacros.run(); |
194 | } |
195 | |
196 | } // anonymous namespace |
197 | |
198 | bool lowerMacrosAfterOptimizations(Procedure& proc) |
199 | { |
200 | PhaseScope phaseScope(proc, "lowerMacrosAfterOptimizations" ); |
201 | bool result = lowerMacrosImpl(proc); |
202 | if (shouldValidateIR()) |
203 | RELEASE_ASSERT(!lowerMacrosImpl(proc)); |
204 | return result; |
205 | } |
206 | |
207 | } } // namespace JSC::B3 |
208 | |
209 | #endif // ENABLE(B3_JIT) |
210 | |
211 | |