1 | /* |
2 | * Copyright (C) 2015-2017 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "B3LowerToAir.h" |
28 | |
29 | #if ENABLE(B3_JIT) |
30 | |
31 | #include "AirBlockInsertionSet.h" |
32 | #include "AirCCallSpecial.h" |
33 | #include "AirCode.h" |
34 | #include "AirInsertionSet.h" |
35 | #include "AirInstInlines.h" |
36 | #include "AirPrintSpecial.h" |
37 | #include "AirStackSlot.h" |
38 | #include "B3ArgumentRegValue.h" |
39 | #include "B3AtomicValue.h" |
40 | #include "B3BasicBlockInlines.h" |
41 | #include "B3BlockWorklist.h" |
42 | #include "B3CCallValue.h" |
43 | #include "B3CheckSpecial.h" |
44 | #include "B3Commutativity.h" |
45 | #include "B3Dominators.h" |
46 | #include "B3FenceValue.h" |
47 | #include "B3MemoryValueInlines.h" |
48 | #include "B3PatchpointSpecial.h" |
49 | #include "B3PatchpointValue.h" |
50 | #include "B3PhaseScope.h" |
51 | #include "B3PhiChildren.h" |
52 | #include "B3Procedure.h" |
53 | #include "B3SlotBaseValue.h" |
54 | #include "B3StackSlot.h" |
55 | #include "B3UpsilonValue.h" |
56 | #include "B3UseCounts.h" |
57 | #include "B3ValueInlines.h" |
58 | #include "B3Variable.h" |
59 | #include "B3VariableValue.h" |
60 | #include "B3WasmAddressValue.h" |
61 | #include <wtf/IndexMap.h> |
62 | #include <wtf/IndexSet.h> |
63 | #include <wtf/ListDump.h> |
64 | |
65 | #if ASSERT_DISABLED |
66 | IGNORE_RETURN_TYPE_WARNINGS_BEGIN |
67 | #endif |
68 | |
69 | namespace JSC { namespace B3 { |
70 | |
71 | namespace { |
72 | |
73 | namespace B3LowerToAirInternal { |
74 | static const bool verbose = false; |
75 | } |
76 | |
77 | using Arg = Air::Arg; |
78 | using Inst = Air::Inst; |
79 | using Code = Air::Code; |
80 | using Tmp = Air::Tmp; |
81 | |
82 | // FIXME: We wouldn't need this if Air supported Width modifiers in Air::Kind. |
83 | // https://bugs.webkit.org/show_bug.cgi?id=169247 |
84 | #define OPCODE_FOR_WIDTH(opcode, width) ( \ |
85 | (width) == Width8 ? Air::opcode ## 8 : \ |
86 | (width) == Width16 ? Air::opcode ## 16 : \ |
87 | (width) == Width32 ? Air::opcode ## 32 : \ |
88 | Air::opcode ## 64) |
89 | #define OPCODE_FOR_CANONICAL_WIDTH(opcode, width) ( \ |
90 | (width) == Width64 ? Air::opcode ## 64 : Air::opcode ## 32) |
91 | |
92 | class LowerToAir { |
93 | public: |
94 | LowerToAir(Procedure& procedure) |
95 | : m_valueToTmp(procedure.values().size()) |
96 | , m_phiToTmp(procedure.values().size()) |
97 | , m_blockToBlock(procedure.size()) |
98 | , m_useCounts(procedure) |
99 | , m_phiChildren(procedure) |
100 | , m_dominators(procedure.dominators()) |
101 | , m_procedure(procedure) |
102 | , m_code(procedure.code()) |
103 | , m_blockInsertionSet(m_code) |
104 | #if CPU(X86) || CPU(X86_64) |
105 | , m_eax(X86Registers::eax) |
106 | , m_ecx(X86Registers::ecx) |
107 | , m_edx(X86Registers::edx) |
108 | #endif |
109 | { |
110 | } |
111 | |
112 | void run() |
113 | { |
114 | using namespace Air; |
115 | for (B3::BasicBlock* block : m_procedure) |
116 | m_blockToBlock[block] = m_code.addBlock(block->frequency()); |
117 | |
118 | for (Value* value : m_procedure.values()) { |
119 | switch (value->opcode()) { |
120 | case Phi: { |
121 | m_phiToTmp[value] = m_code.newTmp(value->resultBank()); |
122 | if (B3LowerToAirInternal::verbose) |
123 | dataLog("Phi tmp for " , *value, ": " , m_phiToTmp[value], "\n" ); |
124 | break; |
125 | } |
126 | default: |
127 | break; |
128 | } |
129 | } |
130 | |
131 | for (B3::StackSlot* stack : m_procedure.stackSlots()) |
132 | m_stackToStack.add(stack, m_code.addStackSlot(stack)); |
133 | for (Variable* variable : m_procedure.variables()) |
134 | m_variableToTmp.add(variable, m_code.newTmp(variable->bank())); |
135 | |
136 | // Figure out which blocks are not rare. |
137 | m_fastWorklist.push(m_procedure[0]); |
138 | while (B3::BasicBlock* block = m_fastWorklist.pop()) { |
139 | for (B3::FrequentedBlock& successor : block->successors()) { |
140 | if (!successor.isRare()) |
141 | m_fastWorklist.push(successor.block()); |
142 | } |
143 | } |
144 | |
145 | m_procedure.resetValueOwners(); // Used by crossesInterference(). |
146 | |
147 | // Lower defs before uses on a global level. This is a good heuristic to lock down a |
148 | // hoisted address expression before we duplicate it back into the loop. |
149 | for (B3::BasicBlock* block : m_procedure.blocksInPreOrder()) { |
150 | m_block = block; |
151 | |
152 | m_isRare = !m_fastWorklist.saw(block); |
153 | |
154 | if (B3LowerToAirInternal::verbose) |
155 | dataLog("Lowering Block " , *block, ":\n" ); |
156 | |
157 | // Make sure that the successors are set up correctly. |
158 | for (B3::FrequentedBlock successor : block->successors()) { |
159 | m_blockToBlock[block]->successors().append( |
160 | Air::FrequentedBlock(m_blockToBlock[successor.block()], successor.frequency())); |
161 | } |
162 | |
163 | // Process blocks in reverse order so we see uses before defs. That's what allows us |
164 | // to match patterns effectively. |
165 | for (unsigned i = block->size(); i--;) { |
166 | m_index = i; |
167 | m_value = block->at(i); |
168 | if (m_locked.contains(m_value)) |
169 | continue; |
170 | m_insts.append(Vector<Inst>()); |
171 | if (B3LowerToAirInternal::verbose) |
172 | dataLog("Lowering " , deepDump(m_procedure, m_value), ":\n" ); |
173 | lower(); |
174 | if (B3LowerToAirInternal::verbose) { |
175 | for (Inst& inst : m_insts.last()) |
176 | dataLog(" " , inst, "\n" ); |
177 | } |
178 | } |
179 | |
180 | finishAppendingInstructions(m_blockToBlock[block]); |
181 | } |
182 | |
183 | m_blockInsertionSet.execute(); |
184 | |
185 | Air::InsertionSet insertionSet(m_code); |
186 | for (Inst& inst : m_prologue) |
187 | insertionSet.insertInst(0, WTFMove(inst)); |
188 | insertionSet.execute(m_code[0]); |
189 | } |
190 | |
191 | private: |
192 | bool shouldCopyPropagate(Value* value) |
193 | { |
194 | switch (value->opcode()) { |
195 | case Trunc: |
196 | case Identity: |
197 | case Opaque: |
198 | return true; |
199 | default: |
200 | return false; |
201 | } |
202 | } |
203 | |
204 | class ArgPromise { |
205 | WTF_MAKE_NONCOPYABLE(ArgPromise); |
206 | public: |
207 | ArgPromise() { } |
208 | |
209 | ArgPromise(const Arg& arg, Value* valueToLock = nullptr) |
210 | : m_arg(arg) |
211 | , m_value(valueToLock) |
212 | { |
213 | } |
214 | |
215 | void swap(ArgPromise& other) |
216 | { |
217 | std::swap(m_arg, other.m_arg); |
218 | std::swap(m_value, other.m_value); |
219 | std::swap(m_wasConsumed, other.m_wasConsumed); |
220 | std::swap(m_wasWrapped, other.m_wasWrapped); |
221 | std::swap(m_traps, other.m_traps); |
222 | } |
223 | |
224 | ArgPromise(ArgPromise&& other) |
225 | { |
226 | swap(other); |
227 | } |
228 | |
229 | ArgPromise& operator=(ArgPromise&& other) |
230 | { |
231 | swap(other); |
232 | return *this; |
233 | } |
234 | |
235 | ~ArgPromise() |
236 | { |
237 | if (m_wasConsumed) |
238 | RELEASE_ASSERT(m_wasWrapped); |
239 | } |
240 | |
241 | void setTraps(bool value) |
242 | { |
243 | m_traps = value; |
244 | } |
245 | |
246 | static ArgPromise tmp(Value* value) |
247 | { |
248 | ArgPromise result; |
249 | result.m_value = value; |
250 | return result; |
251 | } |
252 | |
253 | explicit operator bool() const { return m_arg || m_value; } |
254 | |
255 | Arg::Kind kind() const |
256 | { |
257 | if (!m_arg && m_value) |
258 | return Arg::Tmp; |
259 | return m_arg.kind(); |
260 | } |
261 | |
262 | const Arg& peek() const |
263 | { |
264 | return m_arg; |
265 | } |
266 | |
267 | Arg consume(LowerToAir& lower) |
268 | { |
269 | m_wasConsumed = true; |
270 | if (!m_arg && m_value) |
271 | return lower.tmp(m_value); |
272 | if (m_value) |
273 | lower.commitInternal(m_value); |
274 | return m_arg; |
275 | } |
276 | |
277 | template<typename... Args> |
278 | Inst inst(Args&&... args) |
279 | { |
280 | Inst result(std::forward<Args>(args)...); |
281 | result.kind.effects |= m_traps; |
282 | m_wasWrapped = true; |
283 | return result; |
284 | } |
285 | |
286 | private: |
287 | // Three forms: |
288 | // Everything null: invalid. |
289 | // Arg non-null, value null: just use the arg, nothing special. |
290 | // Arg null, value non-null: it's a tmp, pin it when necessary. |
291 | // Arg non-null, value non-null: use the arg, lock the value. |
292 | Arg m_arg; |
293 | Value* m_value { nullptr }; |
294 | bool m_wasConsumed { false }; |
295 | bool m_wasWrapped { false }; |
296 | bool m_traps { false }; |
297 | }; |
298 | |
299 | // Consider using tmpPromise() in cases where you aren't sure that you want to pin the value yet. |
300 | // Here are three canonical ways of using tmp() and tmpPromise(): |
301 | // |
302 | // Idiom #1: You know that you want a tmp() and you know that it will be valid for the |
303 | // instruction you're emitting. |
304 | // |
305 | // append(Foo, tmp(bar)); |
306 | // |
307 | // Idiom #2: You don't know if you want to use a tmp() because you haven't determined if the |
308 | // instruction will accept it, so you query first. Note that the call to tmp() happens only after |
309 | // you are sure that you will use it. |
310 | // |
311 | // if (isValidForm(Foo, Arg::Tmp)) |
312 | // append(Foo, tmp(bar)) |
313 | // |
314 | // Idiom #3: Same as Idiom #2, but using tmpPromise. Notice that this calls consume() only after |
315 | // it's sure it will use the tmp. That's deliberate. Also note that you're required to pass any |
316 | // Inst you create with consumed promises through that promise's inst() function. |
317 | // |
318 | // ArgPromise promise = tmpPromise(bar); |
319 | // if (isValidForm(Foo, promise.kind())) |
320 | // append(promise.inst(Foo, promise.consume(*this))) |
321 | // |
322 | // In both idiom #2 and idiom #3, we don't pin the value to a temporary except when we actually |
323 | // emit the instruction. Both tmp() and tmpPromise().consume(*this) will pin it. Pinning means |
324 | // that we will henceforth require that the value of 'bar' is generated as a separate |
325 | // instruction. We don't want to pin the value to a temporary if we might change our minds, and |
326 | // pass an address operand representing 'bar' to Foo instead. |
327 | // |
328 | // Because tmp() pins, the following is not an idiom you should use: |
329 | // |
330 | // Tmp tmp = this->tmp(bar); |
331 | // if (isValidForm(Foo, tmp.kind())) |
332 | // append(Foo, tmp); |
333 | // |
334 | // That's because if isValidForm() returns false, you will have already pinned the 'bar' to a |
335 | // temporary. You might later want to try to do something like loadPromise(), and that will fail. |
336 | // This arises in operations that have both a Addr,Tmp and Tmp,Addr forms. The following code |
337 | // seems right, but will actually fail to ever match the Tmp,Addr form because by then, the right |
338 | // value is already pinned. |
339 | // |
340 | // auto tryThings = [this] (const Arg& left, const Arg& right) { |
341 | // if (isValidForm(Foo, left.kind(), right.kind())) |
342 | // return Inst(Foo, m_value, left, right); |
343 | // return Inst(); |
344 | // }; |
345 | // if (Inst result = tryThings(loadAddr(left), tmp(right))) |
346 | // return result; |
347 | // if (Inst result = tryThings(tmp(left), loadAddr(right))) // this never succeeds. |
348 | // return result; |
349 | // return Inst(Foo, m_value, tmp(left), tmp(right)); |
350 | // |
351 | // If you imagine that loadAddr(value) is just loadPromise(value).consume(*this), then this code |
352 | // will run correctly - it will generate OK code - but the second form is never matched. |
353 | // loadAddr(right) will never succeed because it will observe that 'right' is already pinned. |
354 | // Of course, it's exactly because of the risky nature of such code that we don't have a |
355 | // loadAddr() helper and require you to balance ArgPromise's in code like this. Such code will |
356 | // work fine if written as: |
357 | // |
358 | // auto tryThings = [this] (ArgPromise& left, ArgPromise& right) { |
359 | // if (isValidForm(Foo, left.kind(), right.kind())) |
360 | // return left.inst(right.inst(Foo, m_value, left.consume(*this), right.consume(*this))); |
361 | // return Inst(); |
362 | // }; |
363 | // if (Inst result = tryThings(loadPromise(left), tmpPromise(right))) |
364 | // return result; |
365 | // if (Inst result = tryThings(tmpPromise(left), loadPromise(right))) |
366 | // return result; |
367 | // return Inst(Foo, m_value, tmp(left), tmp(right)); |
368 | // |
369 | // Notice that we did use tmp in the fall-back case at the end, because by then, we know for sure |
370 | // that we want a tmp. But using tmpPromise in the tryThings() calls ensures that doing so |
371 | // doesn't prevent us from trying loadPromise on the same value. |
372 | Tmp tmp(Value* value) |
373 | { |
374 | Tmp& tmp = m_valueToTmp[value]; |
375 | if (!tmp) { |
376 | while (shouldCopyPropagate(value)) |
377 | value = value->child(0); |
378 | |
379 | if (value->opcode() == FramePointer) |
380 | return Tmp(GPRInfo::callFrameRegister); |
381 | |
382 | Tmp& realTmp = m_valueToTmp[value]; |
383 | if (!realTmp) { |
384 | realTmp = m_code.newTmp(value->resultBank()); |
385 | if (m_procedure.isFastConstant(value->key())) |
386 | m_code.addFastTmp(realTmp); |
387 | if (B3LowerToAirInternal::verbose) |
388 | dataLog("Tmp for " , *value, ": " , realTmp, "\n" ); |
389 | } |
390 | tmp = realTmp; |
391 | } |
392 | return tmp; |
393 | } |
394 | |
395 | ArgPromise tmpPromise(Value* value) |
396 | { |
397 | return ArgPromise::tmp(value); |
398 | } |
399 | |
400 | bool canBeInternal(Value* value) |
401 | { |
402 | // If one of the internal things has already been computed, then we don't want to cause |
403 | // it to be recomputed again. |
404 | if (m_valueToTmp[value]) |
405 | return false; |
406 | |
407 | // We require internals to have only one use - us. It's not clear if this should be numUses() or |
408 | // numUsingInstructions(). Ideally, it would be numUsingInstructions(), except that it's not clear |
409 | // if we'd actually do the right thing when matching over such a DAG pattern. For now, it simply |
410 | // doesn't matter because we don't implement patterns that would trigger this. |
411 | if (m_useCounts.numUses(value) != 1) |
412 | return false; |
413 | |
414 | return true; |
415 | } |
416 | |
417 | // If you ask canBeInternal() and then construct something from that, and you commit to emitting |
418 | // that code, then you must commitInternal() on that value. This is tricky, and you only need to |
419 | // do it if you're pattern matching by hand rather than using the patterns language. Long story |
420 | // short, you should avoid this by using the pattern matcher to match patterns. |
421 | void commitInternal(Value* value) |
422 | { |
423 | if (value) |
424 | m_locked.add(value); |
425 | } |
426 | |
427 | bool crossesInterference(Value* value) |
428 | { |
429 | // If it's in a foreign block, then be conservative. We could handle this if we were |
430 | // willing to do heavier analysis. For example, if we had liveness, then we could label |
431 | // values as "crossing interference" if they interfere with anything that they are live |
432 | // across. But, it's not clear how useful this would be. |
433 | if (value->owner != m_value->owner) |
434 | return true; |
435 | |
436 | Effects effects = value->effects(); |
437 | |
438 | for (unsigned i = m_index; i--;) { |
439 | Value* otherValue = m_block->at(i); |
440 | if (otherValue == value) |
441 | return false; |
442 | if (effects.interferes(otherValue->effects())) |
443 | return true; |
444 | } |
445 | |
446 | ASSERT_NOT_REACHED(); |
447 | return true; |
448 | } |
449 | |
450 | template<typename Int, typename = Value::IsLegalOffset<Int>> |
451 | Optional<unsigned> scaleForShl(Value* shl, Int offset, Optional<Width> width = WTF::nullopt) |
452 | { |
453 | if (shl->opcode() != Shl) |
454 | return WTF::nullopt; |
455 | if (!shl->child(1)->hasInt32()) |
456 | return WTF::nullopt; |
457 | unsigned logScale = shl->child(1)->asInt32(); |
458 | if (shl->type() == Int32) |
459 | logScale &= 31; |
460 | else |
461 | logScale &= 63; |
462 | // Use 64-bit math to perform the shift so that <<32 does the right thing, but then switch |
463 | // to signed since that's what all of our APIs want. |
464 | int64_t bigScale = static_cast<uint64_t>(1) << static_cast<uint64_t>(logScale); |
465 | if (!isRepresentableAs<int32_t>(bigScale)) |
466 | return WTF::nullopt; |
467 | unsigned scale = static_cast<int32_t>(bigScale); |
468 | if (!Arg::isValidIndexForm(scale, offset, width)) |
469 | return WTF::nullopt; |
470 | return scale; |
471 | } |
472 | |
473 | // This turns the given operand into an address. |
474 | template<typename Int, typename = Value::IsLegalOffset<Int>> |
475 | Arg effectiveAddr(Value* address, Int offset, Width width) |
476 | { |
477 | ASSERT(Arg::isValidAddrForm(offset, width)); |
478 | |
479 | auto fallback = [&] () -> Arg { |
480 | return Arg::addr(tmp(address), offset); |
481 | }; |
482 | |
483 | static const unsigned lotsOfUses = 10; // This is arbitrary and we should tune it eventually. |
484 | |
485 | // Only match if the address value isn't used in some large number of places. |
486 | if (m_useCounts.numUses(address) > lotsOfUses) |
487 | return fallback(); |
488 | |
489 | switch (address->opcode()) { |
490 | case Add: { |
491 | Value* left = address->child(0); |
492 | Value* right = address->child(1); |
493 | |
494 | auto tryIndex = [&] (Value* index, Value* base) -> Arg { |
495 | Optional<unsigned> scale = scaleForShl(index, offset, width); |
496 | if (!scale) |
497 | return Arg(); |
498 | if (m_locked.contains(index->child(0)) || m_locked.contains(base)) |
499 | return Arg(); |
500 | return Arg::index(tmp(base), tmp(index->child(0)), *scale, offset); |
501 | }; |
502 | |
503 | if (Arg result = tryIndex(left, right)) |
504 | return result; |
505 | if (Arg result = tryIndex(right, left)) |
506 | return result; |
507 | |
508 | if (m_locked.contains(left) || m_locked.contains(right) |
509 | || !Arg::isValidIndexForm(1, offset, width)) |
510 | return fallback(); |
511 | |
512 | return Arg::index(tmp(left), tmp(right), 1, offset); |
513 | } |
514 | |
515 | case Shl: { |
516 | Value* left = address->child(0); |
517 | |
518 | // We'll never see child(1)->isInt32(0), since that would have been reduced. If the shift |
519 | // amount is greater than 1, then there isn't really anything smart that we could do here. |
520 | // We avoid using baseless indexes because their encoding isn't particularly efficient. |
521 | if (m_locked.contains(left) || !address->child(1)->isInt32(1) |
522 | || !Arg::isValidIndexForm(1, offset, width)) |
523 | return fallback(); |
524 | |
525 | return Arg::index(tmp(left), tmp(left), 1, offset); |
526 | } |
527 | |
528 | case FramePointer: |
529 | return Arg::addr(Tmp(GPRInfo::callFrameRegister), offset); |
530 | |
531 | case SlotBase: |
532 | return Arg::stack(m_stackToStack.get(address->as<SlotBaseValue>()->slot()), offset); |
533 | |
534 | case WasmAddress: { |
535 | WasmAddressValue* wasmAddress = address->as<WasmAddressValue>(); |
536 | Value* pointer = wasmAddress->child(0); |
537 | if (!Arg::isValidIndexForm(1, offset, width) || m_locked.contains(pointer)) |
538 | return fallback(); |
539 | |
540 | // FIXME: We should support ARM64 LDR 32-bit addressing, which will |
541 | // allow us to fuse a Shl ptr, 2 into the address. Additionally, and |
542 | // perhaps more importantly, it would allow us to avoid a truncating |
543 | // move. See: https://bugs.webkit.org/show_bug.cgi?id=163465 |
544 | |
545 | return Arg::index(Tmp(wasmAddress->pinnedGPR()), tmp(pointer), 1, offset); |
546 | } |
547 | |
548 | default: |
549 | return fallback(); |
550 | } |
551 | } |
552 | |
553 | // This gives you the address of the given Load or Store. If it's not a Load or Store, then |
554 | // it returns Arg(). |
555 | Arg addr(Value* memoryValue) |
556 | { |
557 | MemoryValue* value = memoryValue->as<MemoryValue>(); |
558 | if (!value) |
559 | return Arg(); |
560 | |
561 | if (value->requiresSimpleAddr()) |
562 | return Arg::simpleAddr(tmp(value->lastChild())); |
563 | |
564 | Value::OffsetType offset = value->offset(); |
565 | Width width = value->accessWidth(); |
566 | |
567 | Arg result = effectiveAddr(value->lastChild(), offset, width); |
568 | RELEASE_ASSERT(result.isValidForm(width)); |
569 | |
570 | return result; |
571 | } |
572 | |
573 | template<typename... Args> |
574 | Inst trappingInst(bool traps, Args&&... args) |
575 | { |
576 | Inst result(std::forward<Args>(args)...); |
577 | result.kind.effects |= traps; |
578 | return result; |
579 | } |
580 | |
581 | template<typename... Args> |
582 | Inst trappingInst(Value* value, Args&&... args) |
583 | { |
584 | return trappingInst(value->traps(), std::forward<Args>(args)...); |
585 | } |
586 | |
587 | ArgPromise loadPromiseAnyOpcode(Value* loadValue) |
588 | { |
589 | RELEASE_ASSERT(loadValue->as<MemoryValue>()); |
590 | if (!canBeInternal(loadValue)) |
591 | return Arg(); |
592 | if (crossesInterference(loadValue)) |
593 | return Arg(); |
594 | // On x86, all loads have fences. Doing this kind of instruction selection will move the load, |
595 | // but that's fine because our interference analysis stops the motion of fences around other |
596 | // fences. So, any load motion we introduce here would not be observable. |
597 | if (!isX86() && loadValue->as<MemoryValue>()->hasFence()) |
598 | return Arg(); |
599 | Arg loadAddr = addr(loadValue); |
600 | RELEASE_ASSERT(loadAddr); |
601 | ArgPromise result(loadAddr, loadValue); |
602 | if (loadValue->traps()) |
603 | result.setTraps(true); |
604 | return result; |
605 | } |
606 | |
607 | ArgPromise loadPromise(Value* loadValue, B3::Opcode loadOpcode) |
608 | { |
609 | if (loadValue->opcode() != loadOpcode) |
610 | return Arg(); |
611 | return loadPromiseAnyOpcode(loadValue); |
612 | } |
613 | |
614 | ArgPromise loadPromise(Value* loadValue) |
615 | { |
616 | return loadPromise(loadValue, Load); |
617 | } |
618 | |
619 | Arg imm(int64_t intValue) |
620 | { |
621 | if (Arg::isValidImmForm(intValue)) |
622 | return Arg::imm(intValue); |
623 | return Arg(); |
624 | } |
625 | |
626 | Arg imm(Value* value) |
627 | { |
628 | if (value->hasInt()) |
629 | return imm(value->asInt()); |
630 | return Arg(); |
631 | } |
632 | |
633 | Arg bitImm(Value* value) |
634 | { |
635 | if (value->hasInt()) { |
636 | int64_t intValue = value->asInt(); |
637 | if (Arg::isValidBitImmForm(intValue)) |
638 | return Arg::bitImm(intValue); |
639 | } |
640 | return Arg(); |
641 | } |
642 | |
643 | Arg bitImm64(Value* value) |
644 | { |
645 | if (value->hasInt()) { |
646 | int64_t intValue = value->asInt(); |
647 | if (Arg::isValidBitImm64Form(intValue)) |
648 | return Arg::bitImm64(intValue); |
649 | } |
650 | return Arg(); |
651 | } |
652 | |
653 | Arg immOrTmp(Value* value) |
654 | { |
655 | if (Arg result = imm(value)) |
656 | return result; |
657 | return tmp(value); |
658 | } |
659 | |
660 | // By convention, we use Oops to mean "I don't know". |
661 | Air::Opcode tryOpcodeForType( |
662 | Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type) |
663 | { |
664 | Air::Opcode opcode; |
665 | switch (type) { |
666 | case Int32: |
667 | opcode = opcode32; |
668 | break; |
669 | case Int64: |
670 | opcode = opcode64; |
671 | break; |
672 | case Float: |
673 | opcode = opcodeFloat; |
674 | break; |
675 | case Double: |
676 | opcode = opcodeDouble; |
677 | break; |
678 | default: |
679 | opcode = Air::Oops; |
680 | break; |
681 | } |
682 | |
683 | return opcode; |
684 | } |
685 | |
686 | Air::Opcode tryOpcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type) |
687 | { |
688 | return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type); |
689 | } |
690 | |
691 | Air::Opcode opcodeForType( |
692 | Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type) |
693 | { |
694 | Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, type); |
695 | RELEASE_ASSERT(opcode != Air::Oops); |
696 | return opcode; |
697 | } |
698 | |
699 | Air::Opcode opcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type) |
700 | { |
701 | return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type); |
702 | } |
703 | |
704 | template<Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble = Air::Oops, Air::Opcode opcodeFloat = Air::Oops> |
705 | void appendUnOp(Value* value) |
706 | { |
707 | Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, value->type()); |
708 | |
709 | Tmp result = tmp(m_value); |
710 | |
711 | // Two operand forms like: |
712 | // Op a, b |
713 | // mean something like: |
714 | // b = Op a |
715 | |
716 | ArgPromise addr = loadPromise(value); |
717 | if (isValidForm(opcode, addr.kind(), Arg::Tmp)) { |
718 | append(addr.inst(opcode, m_value, addr.consume(*this), result)); |
719 | return; |
720 | } |
721 | |
722 | if (isValidForm(opcode, Arg::Tmp, Arg::Tmp)) { |
723 | append(opcode, tmp(value), result); |
724 | return; |
725 | } |
726 | |
727 | ASSERT(value->type() == m_value->type()); |
728 | append(relaxedMoveForType(m_value->type()), tmp(value), result); |
729 | append(opcode, result); |
730 | } |
731 | |
732 | // Call this method when doing two-operand lowering of a commutative operation. You have a choice of |
733 | // which incoming Value is moved into the result. This will select which one is likely to be most |
734 | // profitable to use as the result. Doing the right thing can have big performance consequences in tight |
735 | // kernels. |
736 | bool preferRightForResult(Value* left, Value* right) |
737 | { |
738 | // The default is to move left into result, because that's required for non-commutative instructions. |
739 | // The value that we want to move into result position is the one that dies here. So, if we're |
740 | // compiling a commutative operation and we know that actually right is the one that dies right here, |
741 | // then we can flip things around to help coalescing, which then kills the move instruction. |
742 | // |
743 | // But it's more complicated: |
744 | // - Used-once is a bad estimate of whether the variable dies here. |
745 | // - A child might be a candidate for coalescing with this value. |
746 | // |
747 | // Currently, we have machinery in place to recognize super obvious forms of the latter issue. |
748 | |
749 | // We recognize when a child is a Phi that has this value as one of its children. We're very |
750 | // conservative about this; for example we don't even consider transitive Phi children. |
751 | bool leftIsPhiWithThis = m_phiChildren[left].transitivelyUses(m_value); |
752 | bool rightIsPhiWithThis = m_phiChildren[right].transitivelyUses(m_value); |
753 | |
754 | if (leftIsPhiWithThis != rightIsPhiWithThis) |
755 | return rightIsPhiWithThis; |
756 | |
757 | if (m_useCounts.numUsingInstructions(right) != 1) |
758 | return false; |
759 | |
760 | if (m_useCounts.numUsingInstructions(left) != 1) |
761 | return true; |
762 | |
763 | // The use count might be 1 if the variable is live around a loop. We can guarantee that we |
764 | // pick the variable that is least likely to suffer this problem if we pick the one that |
765 | // is closest to us in an idom walk. By convention, we slightly bias this in favor of |
766 | // returning true. |
767 | |
768 | // We cannot prefer right if right is further away in an idom walk. |
769 | if (m_dominators.strictlyDominates(right->owner, left->owner)) |
770 | return false; |
771 | |
772 | return true; |
773 | } |
774 | |
775 | template<Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Commutativity commutativity = NotCommutative> |
776 | void appendBinOp(Value* left, Value* right) |
777 | { |
778 | Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, left->type()); |
779 | |
780 | Tmp result = tmp(m_value); |
781 | |
782 | // Three-operand forms like: |
783 | // Op a, b, c |
784 | // mean something like: |
785 | // c = a Op b |
786 | |
787 | if (isValidForm(opcode, Arg::Imm, Arg::Tmp, Arg::Tmp)) { |
788 | if (commutativity == Commutative) { |
789 | if (imm(right)) { |
790 | append(opcode, imm(right), tmp(left), result); |
791 | return; |
792 | } |
793 | } else { |
794 | // A non-commutative operation could have an immediate in left. |
795 | if (imm(left)) { |
796 | append(opcode, imm(left), tmp(right), result); |
797 | return; |
798 | } |
799 | } |
800 | } |
801 | |
802 | if (isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) { |
803 | if (commutativity == Commutative) { |
804 | if (Arg rightArg = bitImm(right)) { |
805 | append(opcode, rightArg, tmp(left), result); |
806 | return; |
807 | } |
808 | } else { |
809 | // A non-commutative operation could have an immediate in left. |
810 | if (Arg leftArg = bitImm(left)) { |
811 | append(opcode, leftArg, tmp(right), result); |
812 | return; |
813 | } |
814 | } |
815 | } |
816 | |
817 | if (isValidForm(opcode, Arg::BitImm64, Arg::Tmp, Arg::Tmp)) { |
818 | if (commutativity == Commutative) { |
819 | if (Arg rightArg = bitImm64(right)) { |
820 | append(opcode, rightArg, tmp(left), result); |
821 | return; |
822 | } |
823 | } else { |
824 | // A non-commutative operation could have an immediate in left. |
825 | if (Arg leftArg = bitImm64(left)) { |
826 | append(opcode, leftArg, tmp(right), result); |
827 | return; |
828 | } |
829 | } |
830 | } |
831 | |
832 | if (imm(right) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) { |
833 | append(opcode, tmp(left), imm(right), result); |
834 | return; |
835 | } |
836 | |
837 | // Note that no extant architecture has a three-operand form of binary operations that also |
838 | // load from memory. If such an abomination did exist, we would handle it somewhere around |
839 | // here. |
840 | |
841 | // Two-operand forms like: |
842 | // Op a, b |
843 | // mean something like: |
844 | // b = b Op a |
845 | |
846 | // At this point, we prefer versions of the operation that have a fused load or an immediate |
847 | // over three operand forms. |
848 | |
849 | if (left != right) { |
850 | ArgPromise leftAddr = loadPromise(left); |
851 | if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp, Arg::Tmp)) { |
852 | append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), tmp(right), result)); |
853 | return; |
854 | } |
855 | |
856 | if (commutativity == Commutative) { |
857 | if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp)) { |
858 | append(relaxedMoveForType(m_value->type()), tmp(right), result); |
859 | append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), result)); |
860 | return; |
861 | } |
862 | } |
863 | |
864 | ArgPromise rightAddr = loadPromise(right); |
865 | if (isValidForm(opcode, Arg::Tmp, rightAddr.kind(), Arg::Tmp)) { |
866 | append(rightAddr.inst(opcode, m_value, tmp(left), rightAddr.consume(*this), result)); |
867 | return; |
868 | } |
869 | |
870 | if (commutativity == Commutative) { |
871 | if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp, Arg::Tmp)) { |
872 | append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), tmp(left), result)); |
873 | return; |
874 | } |
875 | } |
876 | |
877 | if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp)) { |
878 | append(relaxedMoveForType(m_value->type()), tmp(left), result); |
879 | append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), result)); |
880 | return; |
881 | } |
882 | } |
883 | |
884 | if (imm(right) && isValidForm(opcode, Arg::Imm, Arg::Tmp)) { |
885 | append(relaxedMoveForType(m_value->type()), tmp(left), result); |
886 | append(opcode, imm(right), result); |
887 | return; |
888 | } |
889 | |
890 | if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
891 | append(opcode, tmp(left), tmp(right), result); |
892 | return; |
893 | } |
894 | |
895 | if (commutativity == Commutative && preferRightForResult(left, right)) { |
896 | append(relaxedMoveForType(m_value->type()), tmp(right), result); |
897 | append(opcode, tmp(left), result); |
898 | return; |
899 | } |
900 | |
901 | append(relaxedMoveForType(m_value->type()), tmp(left), result); |
902 | append(opcode, tmp(right), result); |
903 | } |
904 | |
905 | template<Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative> |
906 | void appendBinOp(Value* left, Value* right) |
907 | { |
908 | appendBinOp<opcode32, opcode64, Air::Oops, Air::Oops, commutativity>(left, right); |
909 | } |
910 | |
911 | template<Air::Opcode opcode32, Air::Opcode opcode64> |
912 | void appendShift(Value* value, Value* amount) |
913 | { |
914 | using namespace Air; |
915 | Air::Opcode opcode = opcodeForType(opcode32, opcode64, value->type()); |
916 | |
917 | if (imm(amount)) { |
918 | if (isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) { |
919 | append(opcode, tmp(value), imm(amount), tmp(m_value)); |
920 | return; |
921 | } |
922 | if (isValidForm(opcode, Arg::Imm, Arg::Tmp)) { |
923 | append(Move, tmp(value), tmp(m_value)); |
924 | append(opcode, imm(amount), tmp(m_value)); |
925 | return; |
926 | } |
927 | } |
928 | |
929 | if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
930 | append(opcode, tmp(value), tmp(amount), tmp(m_value)); |
931 | return; |
932 | } |
933 | |
934 | append(Move, tmp(value), tmp(m_value)); |
935 | append(Move, tmp(amount), m_ecx); |
936 | append(opcode, m_ecx, tmp(m_value)); |
937 | } |
938 | |
939 | template<Air::Opcode opcode32, Air::Opcode opcode64> |
940 | bool tryAppendStoreUnOp(Value* value) |
941 | { |
942 | Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, value->type()); |
943 | if (opcode == Air::Oops) |
944 | return false; |
945 | |
946 | Arg storeAddr = addr(m_value); |
947 | ASSERT(storeAddr); |
948 | |
949 | ArgPromise loadPromise = this->loadPromise(value); |
950 | if (loadPromise.peek() != storeAddr) |
951 | return false; |
952 | |
953 | if (!isValidForm(opcode, storeAddr.kind())) |
954 | return false; |
955 | |
956 | loadPromise.consume(*this); |
957 | append(trappingInst(m_value, loadPromise.inst(opcode, m_value, storeAddr))); |
958 | return true; |
959 | } |
960 | |
961 | template< |
962 | Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative> |
963 | bool tryAppendStoreBinOp(Value* left, Value* right) |
964 | { |
965 | RELEASE_ASSERT(m_value->as<MemoryValue>()); |
966 | |
967 | Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, left->type()); |
968 | if (opcode == Air::Oops) |
969 | return false; |
970 | |
971 | if (m_value->as<MemoryValue>()->hasFence()) |
972 | return false; |
973 | |
974 | Arg storeAddr = addr(m_value); |
975 | ASSERT(storeAddr); |
976 | |
977 | auto getLoadPromise = [&] (Value* load) -> ArgPromise { |
978 | switch (m_value->opcode()) { |
979 | case B3::Store: |
980 | if (load->opcode() != B3::Load) |
981 | return ArgPromise(); |
982 | break; |
983 | case B3::Store8: |
984 | if (load->opcode() != B3::Load8Z && load->opcode() != B3::Load8S) |
985 | return ArgPromise(); |
986 | break; |
987 | case B3::Store16: |
988 | if (load->opcode() != B3::Load16Z && load->opcode() != B3::Load16S) |
989 | return ArgPromise(); |
990 | break; |
991 | default: |
992 | return ArgPromise(); |
993 | } |
994 | return loadPromiseAnyOpcode(load); |
995 | }; |
996 | |
997 | ArgPromise loadPromise; |
998 | Value* otherValue = nullptr; |
999 | |
1000 | loadPromise = getLoadPromise(left); |
1001 | if (loadPromise.peek() == storeAddr) |
1002 | otherValue = right; |
1003 | else if (commutativity == Commutative) { |
1004 | loadPromise = getLoadPromise(right); |
1005 | if (loadPromise.peek() == storeAddr) |
1006 | otherValue = left; |
1007 | } |
1008 | |
1009 | if (!otherValue) |
1010 | return false; |
1011 | |
1012 | if (isValidForm(opcode, Arg::Imm, storeAddr.kind()) && imm(otherValue)) { |
1013 | loadPromise.consume(*this); |
1014 | append(trappingInst(m_value, loadPromise.inst(opcode, m_value, imm(otherValue), storeAddr))); |
1015 | return true; |
1016 | } |
1017 | |
1018 | if (!isValidForm(opcode, Arg::Tmp, storeAddr.kind())) |
1019 | return false; |
1020 | |
1021 | loadPromise.consume(*this); |
1022 | append(trappingInst(m_value, loadPromise.inst(opcode, m_value, tmp(otherValue), storeAddr))); |
1023 | return true; |
1024 | } |
1025 | |
1026 | Inst createStore(Air::Kind move, Value* value, const Arg& dest) |
1027 | { |
1028 | using namespace Air; |
1029 | if (auto imm_value = imm(value)) { |
1030 | if (isARM64() && imm_value.value() == 0) { |
1031 | switch (move.opcode) { |
1032 | default: |
1033 | break; |
1034 | case Air::Move32: |
1035 | if (isValidForm(StoreZero32, dest.kind()) && dest.isValidForm(Width32)) |
1036 | return Inst(StoreZero32, m_value, dest); |
1037 | break; |
1038 | case Air::Move: |
1039 | if (isValidForm(StoreZero64, dest.kind()) && dest.isValidForm(Width64)) |
1040 | return Inst(StoreZero64, m_value, dest); |
1041 | break; |
1042 | } |
1043 | } |
1044 | if (isValidForm(move.opcode, Arg::Imm, dest.kind())) |
1045 | return Inst(move, m_value, imm_value, dest); |
1046 | } |
1047 | |
1048 | return Inst(move, m_value, tmp(value), dest); |
1049 | } |
1050 | |
1051 | Air::Opcode storeOpcode(Width width, Bank bank) |
1052 | { |
1053 | using namespace Air; |
1054 | switch (width) { |
1055 | case Width8: |
1056 | RELEASE_ASSERT(bank == GP); |
1057 | return Air::Store8; |
1058 | case Width16: |
1059 | RELEASE_ASSERT(bank == GP); |
1060 | return Air::Store16; |
1061 | case Width32: |
1062 | switch (bank) { |
1063 | case GP: |
1064 | return Move32; |
1065 | case FP: |
1066 | return MoveFloat; |
1067 | } |
1068 | break; |
1069 | case Width64: |
1070 | RELEASE_ASSERT(is64Bit()); |
1071 | switch (bank) { |
1072 | case GP: |
1073 | return Move; |
1074 | case FP: |
1075 | return MoveDouble; |
1076 | } |
1077 | break; |
1078 | } |
1079 | RELEASE_ASSERT_NOT_REACHED(); |
1080 | } |
1081 | |
1082 | void appendStore(Value* value, const Arg& dest) |
1083 | { |
1084 | using namespace Air; |
1085 | MemoryValue* memory = value->as<MemoryValue>(); |
1086 | RELEASE_ASSERT(memory->isStore()); |
1087 | |
1088 | Air::Kind kind; |
1089 | if (memory->hasFence()) { |
1090 | RELEASE_ASSERT(memory->accessBank() == GP); |
1091 | |
1092 | if (isX86()) { |
1093 | kind = OPCODE_FOR_WIDTH(Xchg, memory->accessWidth()); |
1094 | kind.effects = true; |
1095 | Tmp swapTmp = m_code.newTmp(GP); |
1096 | append(relaxedMoveForType(memory->accessType()), tmp(memory->child(0)), swapTmp); |
1097 | append(kind, swapTmp, dest); |
1098 | return; |
1099 | } |
1100 | |
1101 | kind = OPCODE_FOR_WIDTH(StoreRel, memory->accessWidth()); |
1102 | } else |
1103 | kind = storeOpcode(memory->accessWidth(), memory->accessBank()); |
1104 | |
1105 | kind.effects |= memory->traps(); |
1106 | |
1107 | append(createStore(kind, memory->child(0), dest)); |
1108 | } |
1109 | |
1110 | Air::Opcode moveForType(Type type) |
1111 | { |
1112 | using namespace Air; |
1113 | switch (type) { |
1114 | case Int32: |
1115 | return Move32; |
1116 | case Int64: |
1117 | RELEASE_ASSERT(is64Bit()); |
1118 | return Move; |
1119 | case Float: |
1120 | return MoveFloat; |
1121 | case Double: |
1122 | return MoveDouble; |
1123 | case Void: |
1124 | break; |
1125 | } |
1126 | RELEASE_ASSERT_NOT_REACHED(); |
1127 | return Air::Oops; |
1128 | } |
1129 | |
1130 | Air::Opcode relaxedMoveForType(Type type) |
1131 | { |
1132 | using namespace Air; |
1133 | switch (type) { |
1134 | case Int32: |
1135 | case Int64: |
1136 | // For Int32, we could return Move or Move32. It's a trade-off. |
1137 | // |
1138 | // Move32: Using Move32 guarantees that we use the narrower move, but in cases where the |
1139 | // register allocator can't prove that the variables involved are 32-bit, this will |
1140 | // disable coalescing. |
1141 | // |
1142 | // Move: Using Move guarantees that the register allocator can coalesce normally, but in |
1143 | // cases where it can't prove that the variables are 32-bit and it doesn't coalesce, |
1144 | // this will force us to use a full 64-bit Move instead of the slightly cheaper |
1145 | // 32-bit Move32. |
1146 | // |
1147 | // Coalescing is a lot more profitable than turning Move into Move32. So, it's better to |
1148 | // use Move here because in cases where the register allocator cannot prove that |
1149 | // everything is 32-bit, we still get coalescing. |
1150 | return Move; |
1151 | case Float: |
1152 | // MoveFloat is always coalescable and we never convert MoveDouble to MoveFloat, so we |
1153 | // should use MoveFloat when we know that the temporaries involved are 32-bit. |
1154 | return MoveFloat; |
1155 | case Double: |
1156 | return MoveDouble; |
1157 | case Void: |
1158 | break; |
1159 | } |
1160 | RELEASE_ASSERT_NOT_REACHED(); |
1161 | return Air::Oops; |
1162 | } |
1163 | |
1164 | #if ENABLE(MASM_PROBE) |
1165 | template<typename... Arguments> |
1166 | void print(Arguments&&... arguments) |
1167 | { |
1168 | Value* origin = m_value; |
1169 | print(origin, std::forward<Arguments>(arguments)...); |
1170 | } |
1171 | |
1172 | template<typename... Arguments> |
1173 | void print(Value* origin, Arguments&&... arguments) |
1174 | { |
1175 | auto printList = Printer::makePrintRecordList(arguments...); |
1176 | auto printSpecial = static_cast<Air::PrintSpecial*>(m_code.addSpecial(std::make_unique<Air::PrintSpecial>(printList))); |
1177 | Inst inst(Air::Patch, origin, Arg::special(printSpecial)); |
1178 | Printer::appendAirArgs(inst, std::forward<Arguments>(arguments)...); |
1179 | append(WTFMove(inst)); |
1180 | } |
1181 | #endif // ENABLE(MASM_PROBE) |
1182 | |
1183 | template<typename... Arguments> |
1184 | void append(Air::Kind kind, Arguments&&... arguments) |
1185 | { |
1186 | m_insts.last().append(Inst(kind, m_value, std::forward<Arguments>(arguments)...)); |
1187 | } |
1188 | |
1189 | template<typename... Arguments> |
1190 | void appendTrapping(Air::Kind kind, Arguments&&... arguments) |
1191 | { |
1192 | m_insts.last().append(trappingInst(m_value, kind, m_value, std::forward<Arguments>(arguments)...)); |
1193 | } |
1194 | |
1195 | void append(Inst&& inst) |
1196 | { |
1197 | m_insts.last().append(WTFMove(inst)); |
1198 | } |
1199 | void append(const Inst& inst) |
1200 | { |
1201 | m_insts.last().append(inst); |
1202 | } |
1203 | |
1204 | void finishAppendingInstructions(Air::BasicBlock* target) |
1205 | { |
1206 | // Now append the instructions. m_insts contains them in reverse order, so we process |
1207 | // it in reverse. |
1208 | for (unsigned i = m_insts.size(); i--;) { |
1209 | for (Inst& inst : m_insts[i]) |
1210 | target->appendInst(WTFMove(inst)); |
1211 | } |
1212 | m_insts.shrink(0); |
1213 | } |
1214 | |
1215 | Air::BasicBlock* newBlock() |
1216 | { |
1217 | return m_blockInsertionSet.insertAfter(m_blockToBlock[m_block]); |
1218 | } |
1219 | |
1220 | // NOTE: This will create a continuation block (`nextBlock`) *after* any blocks you've created using |
1221 | // newBlock(). So, it's preferable to create all of your blocks upfront using newBlock(). Also note |
1222 | // that any code you emit before this will be prepended to the continuation, and any code you emit |
1223 | // after this will be appended to the previous block. |
1224 | void splitBlock(Air::BasicBlock*& previousBlock, Air::BasicBlock*& nextBlock) |
1225 | { |
1226 | Air::BasicBlock* block = m_blockToBlock[m_block]; |
1227 | |
1228 | previousBlock = block; |
1229 | nextBlock = m_blockInsertionSet.insertAfter(block); |
1230 | |
1231 | finishAppendingInstructions(nextBlock); |
1232 | nextBlock->successors() = block->successors(); |
1233 | block->successors().clear(); |
1234 | |
1235 | m_insts.append(Vector<Inst>()); |
1236 | } |
1237 | |
1238 | template<typename T, typename... Arguments> |
1239 | T* ensureSpecial(T*& field, Arguments&&... arguments) |
1240 | { |
1241 | if (!field) { |
1242 | field = static_cast<T*>( |
1243 | m_code.addSpecial(std::make_unique<T>(std::forward<Arguments>(arguments)...))); |
1244 | } |
1245 | return field; |
1246 | } |
1247 | |
1248 | template<typename... Arguments> |
1249 | CheckSpecial* ensureCheckSpecial(Arguments&&... arguments) |
1250 | { |
1251 | CheckSpecial::Key key(std::forward<Arguments>(arguments)...); |
1252 | auto result = m_checkSpecials.add(key, nullptr); |
1253 | return ensureSpecial(result.iterator->value, key); |
1254 | } |
1255 | |
1256 | void fillStackmap(Inst& inst, StackmapValue* stackmap, unsigned numSkipped) |
1257 | { |
1258 | for (unsigned i = numSkipped; i < stackmap->numChildren(); ++i) { |
1259 | ConstrainedValue value = stackmap->constrainedChild(i); |
1260 | |
1261 | Arg arg; |
1262 | switch (value.rep().kind()) { |
1263 | case ValueRep::WarmAny: |
1264 | case ValueRep::ColdAny: |
1265 | case ValueRep::LateColdAny: |
1266 | if (imm(value.value())) |
1267 | arg = imm(value.value()); |
1268 | else if (value.value()->hasInt64()) |
1269 | arg = Arg::bigImm(value.value()->asInt64()); |
1270 | else if (value.value()->hasDouble() && canBeInternal(value.value())) { |
1271 | commitInternal(value.value()); |
1272 | arg = Arg::bigImm(bitwise_cast<int64_t>(value.value()->asDouble())); |
1273 | } else |
1274 | arg = tmp(value.value()); |
1275 | break; |
1276 | case ValueRep::SomeRegister: |
1277 | case ValueRep::SomeLateRegister: |
1278 | arg = tmp(value.value()); |
1279 | break; |
1280 | case ValueRep::SomeRegisterWithClobber: { |
1281 | Tmp dstTmp = m_code.newTmp(value.value()->resultBank()); |
1282 | append(relaxedMoveForType(value.value()->type()), immOrTmp(value.value()), dstTmp); |
1283 | arg = dstTmp; |
1284 | break; |
1285 | } |
1286 | case ValueRep::LateRegister: |
1287 | case ValueRep::Register: |
1288 | stackmap->earlyClobbered().clear(value.rep().reg()); |
1289 | arg = Tmp(value.rep().reg()); |
1290 | append(relaxedMoveForType(value.value()->type()), immOrTmp(value.value()), arg); |
1291 | break; |
1292 | case ValueRep::StackArgument: |
1293 | arg = Arg::callArg(value.rep().offsetFromSP()); |
1294 | append(trappingInst(m_value, createStore(moveForType(value.value()->type()), value.value(), arg))); |
1295 | break; |
1296 | default: |
1297 | RELEASE_ASSERT_NOT_REACHED(); |
1298 | break; |
1299 | } |
1300 | inst.args.append(arg); |
1301 | } |
1302 | } |
1303 | |
1304 | // Create an Inst to do the comparison specified by the given value. |
1305 | template<typename CompareFunctor, typename TestFunctor, typename CompareDoubleFunctor, typename CompareFloatFunctor> |
1306 | Inst createGenericCompare( |
1307 | Value* value, |
1308 | const CompareFunctor& compare, // Signature: (Width, Arg relCond, Arg, Arg) -> Inst |
1309 | const TestFunctor& test, // Signature: (Width, Arg resCond, Arg, Arg) -> Inst |
1310 | const CompareDoubleFunctor& compareDouble, // Signature: (Arg doubleCond, Arg, Arg) -> Inst |
1311 | const CompareFloatFunctor& compareFloat, // Signature: (Arg doubleCond, Arg, Arg) -> Inst |
1312 | bool inverted = false) |
1313 | { |
1314 | // NOTE: This is totally happy to match comparisons that have already been computed elsewhere |
1315 | // since on most architectures, the cost of branching on a previously computed comparison |
1316 | // result is almost always higher than just doing another fused compare/branch. The only time |
1317 | // it could be worse is if we have a binary comparison and both operands are variables (not |
1318 | // constants), and we encounter register pressure. Even in this case, duplicating the compare |
1319 | // so that we can fuse it to the branch will be more efficient most of the time, since |
1320 | // register pressure is not *that* common. For this reason, this algorithm will always |
1321 | // duplicate the comparison. |
1322 | // |
1323 | // However, we cannot duplicate loads. The canBeInternal() on a load will assume that we |
1324 | // already validated canBeInternal() on all of the values that got us to the load. So, even |
1325 | // if we are sharing a value, we still need to call canBeInternal() for the purpose of |
1326 | // tracking whether we are still in good shape to fuse loads. |
1327 | // |
1328 | // We could even have a chain of compare values that we fuse, and any member of the chain |
1329 | // could be shared. Once any of them are shared, then the shared one's transitive children |
1330 | // cannot be locked (i.e. commitInternal()). But if none of them are shared, then we want to |
1331 | // lock all of them because that's a prerequisite to fusing the loads so that the loads don't |
1332 | // get duplicated. For example, we might have: |
1333 | // |
1334 | // @tmp1 = LessThan(@a, @b) |
1335 | // @tmp2 = Equal(@tmp1, 0) |
1336 | // Branch(@tmp2) |
1337 | // |
1338 | // If either @a or @b are loads, then we want to have locked @tmp1 and @tmp2 so that they |
1339 | // don't emit the loads a second time. But if we had another use of @tmp2, then we cannot |
1340 | // lock @tmp1 (or @a or @b) because then we'll get into trouble when the other values that |
1341 | // try to share @tmp1 with us try to do their lowering. |
1342 | // |
1343 | // There's one more wrinkle. If we don't lock an internal value, then this internal value may |
1344 | // have already separately locked its children. So, if we're not locking a value then we need |
1345 | // to make sure that its children aren't locked. We encapsulate this in two ways: |
1346 | // |
1347 | // canCommitInternal: This variable tells us if the values that we've fused so far are |
1348 | // locked. This means that we're not sharing any of them with anyone. This permits us to fuse |
1349 | // loads. If it's false, then we cannot fuse loads and we also need to ensure that the |
1350 | // children of any values we try to fuse-by-sharing are not already locked. You don't have to |
1351 | // worry about the children locking thing if you use prepareToFuse() before trying to fuse a |
1352 | // sharable value. But, you do need to guard any load fusion by checking if canCommitInternal |
1353 | // is true. |
1354 | // |
1355 | // FusionResult prepareToFuse(value): Call this when you think that you would like to fuse |
1356 | // some value and that value is not a load. It will automatically handle the shared-or-locked |
1357 | // issues and it will clear canCommitInternal if necessary. This will return CannotFuse |
1358 | // (which acts like false) if the value cannot be locked and its children are locked. That's |
1359 | // rare, but you just need to make sure that you do smart things when this happens (i.e. just |
1360 | // use the value rather than trying to fuse it). After you call prepareToFuse(), you can |
1361 | // still change your mind about whether you will actually fuse the value. If you do fuse it, |
1362 | // you need to call commitFusion(value, fusionResult). |
1363 | // |
1364 | // commitFusion(value, fusionResult): Handles calling commitInternal(value) if fusionResult |
1365 | // is FuseAndCommit. |
1366 | |
1367 | bool canCommitInternal = true; |
1368 | |
1369 | enum FusionResult { |
1370 | CannotFuse, |
1371 | FuseAndCommit, |
1372 | Fuse |
1373 | }; |
1374 | auto prepareToFuse = [&] (Value* value) -> FusionResult { |
1375 | if (value == m_value) { |
1376 | // It's not actually internal. It's the root value. We're good to go. |
1377 | return Fuse; |
1378 | } |
1379 | |
1380 | if (canCommitInternal && canBeInternal(value)) { |
1381 | // We are the only users of this value. This also means that the value's children |
1382 | // could not have been locked, since we have now proved that m_value dominates value |
1383 | // in the data flow graph. To only other way to value is from a user of m_value. If |
1384 | // value's children are shared with others, then they could not have been locked |
1385 | // because their use count is greater than 1. If they are only used from value, then |
1386 | // in order for value's children to be locked, value would also have to be locked, |
1387 | // and we just proved that it wasn't. |
1388 | return FuseAndCommit; |
1389 | } |
1390 | |
1391 | // We're going to try to share value with others. It's possible that some other basic |
1392 | // block had already emitted code for value and then matched over its children and then |
1393 | // locked them, in which case we just want to use value instead of duplicating it. So, we |
1394 | // validate the children. Note that this only arises in linear chains like: |
1395 | // |
1396 | // BB#1: |
1397 | // @1 = Foo(...) |
1398 | // @2 = Bar(@1) |
1399 | // Jump(#2) |
1400 | // BB#2: |
1401 | // @3 = Baz(@2) |
1402 | // |
1403 | // Notice how we could start by generating code for BB#1 and then decide to lock @1 when |
1404 | // generating code for @2, if we have some way of fusing Bar and Foo into a single |
1405 | // instruction. This is legal, since indeed @1 only has one user. The fact that @2 now |
1406 | // has a tmp (i.e. @2 is pinned), canBeInternal(@2) will return false, which brings us |
1407 | // here. In that case, we cannot match over @2 because then we'd hit a hazard if we end |
1408 | // up deciding not to fuse Foo into the fused Baz/Bar. |
1409 | // |
1410 | // Happily, there are only two places where this kind of child validation happens is in |
1411 | // rules that admit sharing, like this and effectiveAddress(). |
1412 | // |
1413 | // N.B. We could probably avoid the need to do value locking if we committed to a well |
1414 | // chosen code generation order. For example, if we guaranteed that all of the users of |
1415 | // a value get generated before that value, then there's no way for the lowering of @3 to |
1416 | // see @1 locked. But we don't want to do that, since this is a greedy instruction |
1417 | // selector and so we want to be able to play with order. |
1418 | for (Value* child : value->children()) { |
1419 | if (m_locked.contains(child)) |
1420 | return CannotFuse; |
1421 | } |
1422 | |
1423 | // It's safe to share value, but since we're sharing, it means that we aren't locking it. |
1424 | // If we don't lock it, then fusing loads is off limits and all of value's children will |
1425 | // have to go through the sharing path as well. Fusing loads is off limits because the load |
1426 | // could already have been emitted elsehwere - so fusing it here would duplicate the load. |
1427 | // We don't consider that to be a legal optimization. |
1428 | canCommitInternal = false; |
1429 | |
1430 | return Fuse; |
1431 | }; |
1432 | |
1433 | auto commitFusion = [&] (Value* value, FusionResult result) { |
1434 | if (result == FuseAndCommit) |
1435 | commitInternal(value); |
1436 | }; |
1437 | |
1438 | // Chew through any inversions. This loop isn't necessary for comparisons and branches, but |
1439 | // we do need at least one iteration of it for Check. |
1440 | for (;;) { |
1441 | bool shouldInvert = |
1442 | (value->opcode() == BitXor && value->child(1)->hasInt() && (value->child(1)->asInt() == 1) && value->child(0)->returnsBool()) |
1443 | || (value->opcode() == Equal && value->child(1)->isInt(0)); |
1444 | if (!shouldInvert) |
1445 | break; |
1446 | |
1447 | FusionResult fusionResult = prepareToFuse(value); |
1448 | if (fusionResult == CannotFuse) |
1449 | break; |
1450 | commitFusion(value, fusionResult); |
1451 | |
1452 | value = value->child(0); |
1453 | inverted = !inverted; |
1454 | } |
1455 | |
1456 | auto createRelCond = [&] ( |
1457 | MacroAssembler::RelationalCondition relationalCondition, |
1458 | MacroAssembler::DoubleCondition doubleCondition) { |
1459 | Arg relCond = Arg::relCond(relationalCondition).inverted(inverted); |
1460 | Arg doubleCond = Arg::doubleCond(doubleCondition).inverted(inverted); |
1461 | Value* left = value->child(0); |
1462 | Value* right = value->child(1); |
1463 | |
1464 | if (isInt(value->child(0)->type())) { |
1465 | Arg rightImm = imm(right); |
1466 | |
1467 | auto tryCompare = [&] ( |
1468 | Width width, ArgPromise&& left, ArgPromise&& right) -> Inst { |
1469 | if (Inst result = compare(width, relCond, left, right)) |
1470 | return result; |
1471 | if (Inst result = compare(width, relCond.flipped(), right, left)) |
1472 | return result; |
1473 | return Inst(); |
1474 | }; |
1475 | |
1476 | auto tryCompareLoadImm = [&] ( |
1477 | Width width, B3::Opcode loadOpcode, Arg::Signedness signedness) -> Inst { |
1478 | if (rightImm && rightImm.isRepresentableAs(width, signedness)) { |
1479 | if (Inst result = tryCompare(width, loadPromise(left, loadOpcode), rightImm)) { |
1480 | commitInternal(left); |
1481 | return result; |
1482 | } |
1483 | } |
1484 | return Inst(); |
1485 | }; |
1486 | |
1487 | Width width = value->child(0)->resultWidth(); |
1488 | |
1489 | if (canCommitInternal) { |
1490 | // First handle compares that involve fewer bits than B3's type system supports. |
1491 | // This is pretty important. For example, we want this to be a single |
1492 | // instruction: |
1493 | // |
1494 | // @1 = Load8S(...) |
1495 | // @2 = Const32(...) |
1496 | // @3 = LessThan(@1, @2) |
1497 | // Branch(@3) |
1498 | |
1499 | if (relCond.isSignedCond()) { |
1500 | if (Inst result = tryCompareLoadImm(Width8, Load8S, Arg::Signed)) |
1501 | return result; |
1502 | } |
1503 | |
1504 | if (relCond.isUnsignedCond()) { |
1505 | if (Inst result = tryCompareLoadImm(Width8, Load8Z, Arg::Unsigned)) |
1506 | return result; |
1507 | } |
1508 | |
1509 | if (relCond.isSignedCond()) { |
1510 | if (Inst result = tryCompareLoadImm(Width16, Load16S, Arg::Signed)) |
1511 | return result; |
1512 | } |
1513 | |
1514 | if (relCond.isUnsignedCond()) { |
1515 | if (Inst result = tryCompareLoadImm(Width16, Load16Z, Arg::Unsigned)) |
1516 | return result; |
1517 | } |
1518 | |
1519 | // Now handle compares that involve a load and an immediate. |
1520 | |
1521 | if (Inst result = tryCompareLoadImm(width, Load, Arg::Signed)) |
1522 | return result; |
1523 | |
1524 | // Now handle compares that involve a load. It's not obvious that it's better to |
1525 | // handle this before the immediate cases or not. Probably doesn't matter. |
1526 | |
1527 | if (Inst result = tryCompare(width, loadPromise(left), tmpPromise(right))) { |
1528 | commitInternal(left); |
1529 | return result; |
1530 | } |
1531 | |
1532 | if (Inst result = tryCompare(width, tmpPromise(left), loadPromise(right))) { |
1533 | commitInternal(right); |
1534 | return result; |
1535 | } |
1536 | } |
1537 | |
1538 | // Now handle compares that involve an immediate and a tmp. |
1539 | |
1540 | if (rightImm && rightImm.isRepresentableAs<int32_t>()) { |
1541 | if (Inst result = tryCompare(width, tmpPromise(left), rightImm)) |
1542 | return result; |
1543 | } |
1544 | |
1545 | // Finally, handle comparison between tmps. |
1546 | ArgPromise leftPromise = tmpPromise(left); |
1547 | ArgPromise rightPromise = tmpPromise(right); |
1548 | return compare(width, relCond, leftPromise, rightPromise); |
1549 | } |
1550 | |
1551 | // Floating point comparisons can't really do anything smart. |
1552 | ArgPromise leftPromise = tmpPromise(left); |
1553 | ArgPromise rightPromise = tmpPromise(right); |
1554 | if (value->child(0)->type() == Float) |
1555 | return compareFloat(doubleCond, leftPromise, rightPromise); |
1556 | return compareDouble(doubleCond, leftPromise, rightPromise); |
1557 | }; |
1558 | |
1559 | Width width = value->resultWidth(); |
1560 | Arg resCond = Arg::resCond(MacroAssembler::NonZero).inverted(inverted); |
1561 | |
1562 | auto tryTest = [&] ( |
1563 | Width width, ArgPromise&& left, ArgPromise&& right) -> Inst { |
1564 | if (Inst result = test(width, resCond, left, right)) |
1565 | return result; |
1566 | if (Inst result = test(width, resCond, right, left)) |
1567 | return result; |
1568 | return Inst(); |
1569 | }; |
1570 | |
1571 | auto attemptFused = [&] () -> Inst { |
1572 | switch (value->opcode()) { |
1573 | case NotEqual: |
1574 | return createRelCond(MacroAssembler::NotEqual, MacroAssembler::DoubleNotEqualOrUnordered); |
1575 | case Equal: |
1576 | return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqual); |
1577 | case LessThan: |
1578 | return createRelCond(MacroAssembler::LessThan, MacroAssembler::DoubleLessThan); |
1579 | case GreaterThan: |
1580 | return createRelCond(MacroAssembler::GreaterThan, MacroAssembler::DoubleGreaterThan); |
1581 | case LessEqual: |
1582 | return createRelCond(MacroAssembler::LessThanOrEqual, MacroAssembler::DoubleLessThanOrEqual); |
1583 | case GreaterEqual: |
1584 | return createRelCond(MacroAssembler::GreaterThanOrEqual, MacroAssembler::DoubleGreaterThanOrEqual); |
1585 | case EqualOrUnordered: |
1586 | // The integer condition is never used in this case. |
1587 | return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqualOrUnordered); |
1588 | case Above: |
1589 | // We use a bogus double condition because these integer comparisons won't got down that |
1590 | // path anyway. |
1591 | return createRelCond(MacroAssembler::Above, MacroAssembler::DoubleEqual); |
1592 | case Below: |
1593 | return createRelCond(MacroAssembler::Below, MacroAssembler::DoubleEqual); |
1594 | case AboveEqual: |
1595 | return createRelCond(MacroAssembler::AboveOrEqual, MacroAssembler::DoubleEqual); |
1596 | case BelowEqual: |
1597 | return createRelCond(MacroAssembler::BelowOrEqual, MacroAssembler::DoubleEqual); |
1598 | case BitAnd: { |
1599 | Value* left = value->child(0); |
1600 | Value* right = value->child(1); |
1601 | |
1602 | bool hasRightConst; |
1603 | int64_t rightConst; |
1604 | Arg rightImm; |
1605 | Arg rightImm64; |
1606 | |
1607 | hasRightConst = right->hasInt(); |
1608 | if (hasRightConst) { |
1609 | rightConst = right->asInt(); |
1610 | rightImm = bitImm(right); |
1611 | rightImm64 = bitImm64(right); |
1612 | } |
1613 | |
1614 | auto tryTestLoadImm = [&] (Width width, Arg::Signedness signedness, B3::Opcode loadOpcode) -> Inst { |
1615 | if (!hasRightConst) |
1616 | return Inst(); |
1617 | // Signed loads will create high bits, so if the immediate has high bits |
1618 | // then we cannot proceed. Consider BitAnd(Load8S(ptr), 0x101). This cannot |
1619 | // be turned into testb (ptr), $1, since if the high bit within that byte |
1620 | // was set then it would be extended to include 0x100. The handling below |
1621 | // won't anticipate this, so we need to catch it here. |
1622 | if (signedness == Arg::Signed |
1623 | && !Arg::isRepresentableAs(width, Arg::Unsigned, rightConst)) |
1624 | return Inst(); |
1625 | |
1626 | // FIXME: If this is unsigned then we can chop things off of the immediate. |
1627 | // This might make the immediate more legal. Perhaps that's a job for |
1628 | // strength reduction? |
1629 | // https://bugs.webkit.org/show_bug.cgi?id=169248 |
1630 | |
1631 | if (rightImm) { |
1632 | if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm)) { |
1633 | commitInternal(left); |
1634 | return result; |
1635 | } |
1636 | } |
1637 | if (rightImm64) { |
1638 | if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm64)) { |
1639 | commitInternal(left); |
1640 | return result; |
1641 | } |
1642 | } |
1643 | return Inst(); |
1644 | }; |
1645 | |
1646 | if (canCommitInternal) { |
1647 | // First handle test's that involve fewer bits than B3's type system supports. |
1648 | |
1649 | if (Inst result = tryTestLoadImm(Width8, Arg::Unsigned, Load8Z)) |
1650 | return result; |
1651 | |
1652 | if (Inst result = tryTestLoadImm(Width8, Arg::Signed, Load8S)) |
1653 | return result; |
1654 | |
1655 | if (Inst result = tryTestLoadImm(Width16, Arg::Unsigned, Load16Z)) |
1656 | return result; |
1657 | |
1658 | if (Inst result = tryTestLoadImm(Width16, Arg::Signed, Load16S)) |
1659 | return result; |
1660 | |
1661 | // This allows us to use a 32-bit test for 64-bit BitAnd if the immediate is |
1662 | // representable as an unsigned 32-bit value. The logic involved is the same |
1663 | // as if we were pondering using a 32-bit test for |
1664 | // BitAnd(SExt(Load(ptr)), const), in the sense that in both cases we have |
1665 | // to worry about high bits. So, we use the "Signed" version of this helper. |
1666 | if (Inst result = tryTestLoadImm(Width32, Arg::Signed, Load)) |
1667 | return result; |
1668 | |
1669 | // This is needed to handle 32-bit test for arbitrary 32-bit immediates. |
1670 | if (Inst result = tryTestLoadImm(width, Arg::Unsigned, Load)) |
1671 | return result; |
1672 | |
1673 | // Now handle test's that involve a load. |
1674 | |
1675 | Width width = value->child(0)->resultWidth(); |
1676 | if (Inst result = tryTest(width, loadPromise(left), tmpPromise(right))) { |
1677 | commitInternal(left); |
1678 | return result; |
1679 | } |
1680 | |
1681 | if (Inst result = tryTest(width, tmpPromise(left), loadPromise(right))) { |
1682 | commitInternal(right); |
1683 | return result; |
1684 | } |
1685 | } |
1686 | |
1687 | // Now handle test's that involve an immediate and a tmp. |
1688 | |
1689 | if (hasRightConst) { |
1690 | if ((width == Width32 && rightConst == 0xffffffff) |
1691 | || (width == Width64 && rightConst == -1)) { |
1692 | if (Inst result = tryTest(width, tmpPromise(left), tmpPromise(left))) |
1693 | return result; |
1694 | } |
1695 | if (isRepresentableAs<uint32_t>(rightConst)) { |
1696 | if (Inst result = tryTest(Width32, tmpPromise(left), rightImm)) |
1697 | return result; |
1698 | if (Inst result = tryTest(Width32, tmpPromise(left), rightImm64)) |
1699 | return result; |
1700 | } |
1701 | if (Inst result = tryTest(width, tmpPromise(left), rightImm)) |
1702 | return result; |
1703 | if (Inst result = tryTest(width, tmpPromise(left), rightImm64)) |
1704 | return result; |
1705 | } |
1706 | |
1707 | // Finally, just do tmp's. |
1708 | return tryTest(width, tmpPromise(left), tmpPromise(right)); |
1709 | } |
1710 | default: |
1711 | return Inst(); |
1712 | } |
1713 | }; |
1714 | |
1715 | if (FusionResult fusionResult = prepareToFuse(value)) { |
1716 | if (Inst result = attemptFused()) { |
1717 | commitFusion(value, fusionResult); |
1718 | return result; |
1719 | } |
1720 | } |
1721 | |
1722 | if (Arg::isValidBitImmForm(-1)) { |
1723 | if (canCommitInternal && value->as<MemoryValue>()) { |
1724 | // Handle things like Branch(Load8Z(value)) |
1725 | |
1726 | if (Inst result = tryTest(Width8, loadPromise(value, Load8Z), Arg::bitImm(-1))) { |
1727 | commitInternal(value); |
1728 | return result; |
1729 | } |
1730 | |
1731 | if (Inst result = tryTest(Width8, loadPromise(value, Load8S), Arg::bitImm(-1))) { |
1732 | commitInternal(value); |
1733 | return result; |
1734 | } |
1735 | |
1736 | if (Inst result = tryTest(Width16, loadPromise(value, Load16Z), Arg::bitImm(-1))) { |
1737 | commitInternal(value); |
1738 | return result; |
1739 | } |
1740 | |
1741 | if (Inst result = tryTest(Width16, loadPromise(value, Load16S), Arg::bitImm(-1))) { |
1742 | commitInternal(value); |
1743 | return result; |
1744 | } |
1745 | |
1746 | if (Inst result = tryTest(width, loadPromise(value), Arg::bitImm(-1))) { |
1747 | commitInternal(value); |
1748 | return result; |
1749 | } |
1750 | } |
1751 | |
1752 | ArgPromise leftPromise = tmpPromise(value); |
1753 | ArgPromise rightPromise = Arg::bitImm(-1); |
1754 | if (Inst result = test(width, resCond, leftPromise, rightPromise)) |
1755 | return result; |
1756 | } |
1757 | |
1758 | // Sometimes this is the only form of test available. We prefer not to use this because |
1759 | // it's less canonical. |
1760 | ArgPromise leftPromise = tmpPromise(value); |
1761 | ArgPromise rightPromise = tmpPromise(value); |
1762 | return test(width, resCond, leftPromise, rightPromise); |
1763 | } |
1764 | |
1765 | Inst createBranch(Value* value, bool inverted = false) |
1766 | { |
1767 | using namespace Air; |
1768 | return createGenericCompare( |
1769 | value, |
1770 | [this] ( |
1771 | Width width, const Arg& relCond, |
1772 | ArgPromise& left, ArgPromise& right) -> Inst { |
1773 | switch (width) { |
1774 | case Width8: |
1775 | if (isValidForm(Branch8, Arg::RelCond, left.kind(), right.kind())) { |
1776 | return left.inst(right.inst( |
1777 | Branch8, m_value, relCond, |
1778 | left.consume(*this), right.consume(*this))); |
1779 | } |
1780 | return Inst(); |
1781 | case Width16: |
1782 | return Inst(); |
1783 | case Width32: |
1784 | if (isValidForm(Branch32, Arg::RelCond, left.kind(), right.kind())) { |
1785 | return left.inst(right.inst( |
1786 | Branch32, m_value, relCond, |
1787 | left.consume(*this), right.consume(*this))); |
1788 | } |
1789 | return Inst(); |
1790 | case Width64: |
1791 | if (isValidForm(Branch64, Arg::RelCond, left.kind(), right.kind())) { |
1792 | return left.inst(right.inst( |
1793 | Branch64, m_value, relCond, |
1794 | left.consume(*this), right.consume(*this))); |
1795 | } |
1796 | return Inst(); |
1797 | } |
1798 | ASSERT_NOT_REACHED(); |
1799 | }, |
1800 | [this] ( |
1801 | Width width, const Arg& resCond, |
1802 | ArgPromise& left, ArgPromise& right) -> Inst { |
1803 | switch (width) { |
1804 | case Width8: |
1805 | if (isValidForm(BranchTest8, Arg::ResCond, left.kind(), right.kind())) { |
1806 | return left.inst(right.inst( |
1807 | BranchTest8, m_value, resCond, |
1808 | left.consume(*this), right.consume(*this))); |
1809 | } |
1810 | return Inst(); |
1811 | case Width16: |
1812 | return Inst(); |
1813 | case Width32: |
1814 | if (isValidForm(BranchTest32, Arg::ResCond, left.kind(), right.kind())) { |
1815 | return left.inst(right.inst( |
1816 | BranchTest32, m_value, resCond, |
1817 | left.consume(*this), right.consume(*this))); |
1818 | } |
1819 | return Inst(); |
1820 | case Width64: |
1821 | if (isValidForm(BranchTest64, Arg::ResCond, left.kind(), right.kind())) { |
1822 | return left.inst(right.inst( |
1823 | BranchTest64, m_value, resCond, |
1824 | left.consume(*this), right.consume(*this))); |
1825 | } |
1826 | return Inst(); |
1827 | } |
1828 | ASSERT_NOT_REACHED(); |
1829 | }, |
1830 | [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1831 | if (isValidForm(BranchDouble, Arg::DoubleCond, left.kind(), right.kind())) { |
1832 | return left.inst(right.inst( |
1833 | BranchDouble, m_value, doubleCond, |
1834 | left.consume(*this), right.consume(*this))); |
1835 | } |
1836 | return Inst(); |
1837 | }, |
1838 | [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1839 | if (isValidForm(BranchFloat, Arg::DoubleCond, left.kind(), right.kind())) { |
1840 | return left.inst(right.inst( |
1841 | BranchFloat, m_value, doubleCond, |
1842 | left.consume(*this), right.consume(*this))); |
1843 | } |
1844 | return Inst(); |
1845 | }, |
1846 | inverted); |
1847 | } |
1848 | |
1849 | Inst createCompare(Value* value, bool inverted = false) |
1850 | { |
1851 | using namespace Air; |
1852 | return createGenericCompare( |
1853 | value, |
1854 | [this] ( |
1855 | Width width, const Arg& relCond, |
1856 | ArgPromise& left, ArgPromise& right) -> Inst { |
1857 | switch (width) { |
1858 | case Width8: |
1859 | case Width16: |
1860 | return Inst(); |
1861 | case Width32: |
1862 | if (isValidForm(Compare32, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) { |
1863 | return left.inst(right.inst( |
1864 | Compare32, m_value, relCond, |
1865 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1866 | } |
1867 | return Inst(); |
1868 | case Width64: |
1869 | if (isValidForm(Compare64, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) { |
1870 | return left.inst(right.inst( |
1871 | Compare64, m_value, relCond, |
1872 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1873 | } |
1874 | return Inst(); |
1875 | } |
1876 | ASSERT_NOT_REACHED(); |
1877 | }, |
1878 | [this] ( |
1879 | Width width, const Arg& resCond, |
1880 | ArgPromise& left, ArgPromise& right) -> Inst { |
1881 | switch (width) { |
1882 | case Width8: |
1883 | case Width16: |
1884 | return Inst(); |
1885 | case Width32: |
1886 | if (isValidForm(Test32, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) { |
1887 | return left.inst(right.inst( |
1888 | Test32, m_value, resCond, |
1889 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1890 | } |
1891 | return Inst(); |
1892 | case Width64: |
1893 | if (isValidForm(Test64, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) { |
1894 | return left.inst(right.inst( |
1895 | Test64, m_value, resCond, |
1896 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1897 | } |
1898 | return Inst(); |
1899 | } |
1900 | ASSERT_NOT_REACHED(); |
1901 | }, |
1902 | [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1903 | if (isValidForm(CompareDouble, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) { |
1904 | return left.inst(right.inst( |
1905 | CompareDouble, m_value, doubleCond, |
1906 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1907 | } |
1908 | return Inst(); |
1909 | }, |
1910 | [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1911 | if (isValidForm(CompareFloat, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) { |
1912 | return left.inst(right.inst( |
1913 | CompareFloat, m_value, doubleCond, |
1914 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1915 | } |
1916 | return Inst(); |
1917 | }, |
1918 | inverted); |
1919 | } |
1920 | |
1921 | struct MoveConditionallyConfig { |
1922 | Air::Opcode moveConditionally32; |
1923 | Air::Opcode moveConditionally64; |
1924 | Air::Opcode moveConditionallyTest32; |
1925 | Air::Opcode moveConditionallyTest64; |
1926 | Air::Opcode moveConditionallyDouble; |
1927 | Air::Opcode moveConditionallyFloat; |
1928 | }; |
1929 | Inst createSelect(const MoveConditionallyConfig& config) |
1930 | { |
1931 | using namespace Air; |
1932 | auto createSelectInstruction = [&] (Air::Opcode opcode, const Arg& condition, ArgPromise& left, ArgPromise& right) -> Inst { |
1933 | if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
1934 | Tmp result = tmp(m_value); |
1935 | Tmp thenCase = tmp(m_value->child(1)); |
1936 | Tmp elseCase = tmp(m_value->child(2)); |
1937 | return left.inst(right.inst( |
1938 | opcode, m_value, condition, |
1939 | left.consume(*this), right.consume(*this), thenCase, elseCase, result)); |
1940 | } |
1941 | if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp)) { |
1942 | Tmp result = tmp(m_value); |
1943 | Tmp source = tmp(m_value->child(1)); |
1944 | append(relaxedMoveForType(m_value->type()), tmp(m_value->child(2)), result); |
1945 | return left.inst(right.inst( |
1946 | opcode, m_value, condition, |
1947 | left.consume(*this), right.consume(*this), source, result)); |
1948 | } |
1949 | return Inst(); |
1950 | }; |
1951 | |
1952 | return createGenericCompare( |
1953 | m_value->child(0), |
1954 | [&] (Width width, const Arg& relCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1955 | switch (width) { |
1956 | case Width8: |
1957 | // FIXME: Support these things. |
1958 | // https://bugs.webkit.org/show_bug.cgi?id=151504 |
1959 | return Inst(); |
1960 | case Width16: |
1961 | return Inst(); |
1962 | case Width32: |
1963 | return createSelectInstruction(config.moveConditionally32, relCond, left, right); |
1964 | case Width64: |
1965 | return createSelectInstruction(config.moveConditionally64, relCond, left, right); |
1966 | } |
1967 | ASSERT_NOT_REACHED(); |
1968 | }, |
1969 | [&] (Width width, const Arg& resCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1970 | switch (width) { |
1971 | case Width8: |
1972 | // FIXME: Support more things. |
1973 | // https://bugs.webkit.org/show_bug.cgi?id=151504 |
1974 | return Inst(); |
1975 | case Width16: |
1976 | return Inst(); |
1977 | case Width32: |
1978 | return createSelectInstruction(config.moveConditionallyTest32, resCond, left, right); |
1979 | case Width64: |
1980 | return createSelectInstruction(config.moveConditionallyTest64, resCond, left, right); |
1981 | } |
1982 | ASSERT_NOT_REACHED(); |
1983 | }, |
1984 | [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1985 | return createSelectInstruction(config.moveConditionallyDouble, doubleCond, left, right); |
1986 | }, |
1987 | [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1988 | return createSelectInstruction(config.moveConditionallyFloat, doubleCond, left, right); |
1989 | }, |
1990 | false); |
1991 | } |
1992 | |
1993 | bool tryAppendLea() |
1994 | { |
1995 | using namespace Air; |
1996 | Air::Opcode leaOpcode = tryOpcodeForType(Lea32, Lea64, m_value->type()); |
1997 | if (!isValidForm(leaOpcode, Arg::Index, Arg::Tmp)) |
1998 | return false; |
1999 | |
2000 | // This lets us turn things like this: |
2001 | // |
2002 | // Add(Add(@x, Shl(@y, $2)), $100) |
2003 | // |
2004 | // Into this: |
2005 | // |
2006 | // lea 100(%rdi,%rsi,4), %rax |
2007 | // |
2008 | // We have a choice here between committing the internal bits of an index or sharing |
2009 | // them. There are solid arguments for both. |
2010 | // |
2011 | // Sharing: The word on the street is that the cost of a lea is one cycle no matter |
2012 | // what it does. Every experiment I've ever seen seems to confirm this. So, sharing |
2013 | // helps us in situations where Wasm input did this: |
2014 | // |
2015 | // x = a[i].x; |
2016 | // y = a[i].y; |
2017 | // |
2018 | // With sharing we would do: |
2019 | // |
2020 | // leal (%a,%i,4), %tmp |
2021 | // cmp (%size, %tmp) |
2022 | // ja _fail |
2023 | // movl (%base, %tmp), %x |
2024 | // leal 4(%a,%i,4), %tmp |
2025 | // cmp (%size, %tmp) |
2026 | // ja _fail |
2027 | // movl (%base, %tmp), %y |
2028 | // |
2029 | // In the absence of sharing, we may find ourselves needing separate registers for |
2030 | // the innards of the index. That's relatively unlikely to be a thing due to other |
2031 | // optimizations that we already have, but it could happen |
2032 | // |
2033 | // Committing: The worst case is that there is a complicated graph of additions and |
2034 | // shifts, where each value has multiple uses. In that case, it's better to compute |
2035 | // each one separately from the others since that way, each calculation will use a |
2036 | // relatively nearby tmp as its input. That seems uncommon, but in those cases, |
2037 | // committing is a clear winner: it would result in a simple interference graph |
2038 | // while sharing would result in a complex one. Interference sucks because it means |
2039 | // more time in IRC and it means worse code. |
2040 | // |
2041 | // It's not super clear if any of these corner cases would ever arise. Committing |
2042 | // has the benefit that it's easier to reason about, and protects a much darker |
2043 | // corner case (more interference). |
2044 | |
2045 | // Here are the things we want to match: |
2046 | // Add(Add(@x, @y), $c) |
2047 | // Add(Shl(@x, $c), @y) |
2048 | // Add(@x, Shl(@y, $c)) |
2049 | // Add(Add(@x, Shl(@y, $c)), $d) |
2050 | // Add(Add(Shl(@x, $c), @y), $d) |
2051 | // |
2052 | // Note that if you do Add(Shl(@x, $c), $d) then we will treat $d as a non-constant and |
2053 | // force it to materialize. You'll get something like this: |
2054 | // |
2055 | // movl $d, %tmp |
2056 | // leal (%tmp,%x,1<<c), %result |
2057 | // |
2058 | // Which is pretty close to optimal and has the nice effect of being able to handle large |
2059 | // constants gracefully. |
2060 | |
2061 | Value* innerAdd = nullptr; |
2062 | |
2063 | Value* value = m_value; |
2064 | |
2065 | // We're going to consume Add(Add(_), $c). If we succeed at consuming it then we have these |
2066 | // patterns left (i.e. in the Add(_)): |
2067 | // |
2068 | // Add(Add(@x, @y), $c) |
2069 | // Add(Add(@x, Shl(@y, $c)), $d) |
2070 | // Add(Add(Shl(@x, $c), @y), $d) |
2071 | // |
2072 | // Otherwise we are looking at these patterns: |
2073 | // |
2074 | // Add(Shl(@x, $c), @y) |
2075 | // Add(@x, Shl(@y, $c)) |
2076 | // |
2077 | // This means that the subsequent code only has to worry about three patterns: |
2078 | // |
2079 | // Add(Shl(@x, $c), @y) |
2080 | // Add(@x, Shl(@y, $c)) |
2081 | // Add(@x, @y) (only if offset != 0) |
2082 | Value::OffsetType offset = 0; |
2083 | if (value->child(1)->isRepresentableAs<Value::OffsetType>() |
2084 | && canBeInternal(value->child(0)) |
2085 | && value->child(0)->opcode() == Add) { |
2086 | innerAdd = value->child(0); |
2087 | offset = static_cast<Value::OffsetType>(value->child(1)->asInt()); |
2088 | value = value->child(0); |
2089 | } |
2090 | |
2091 | auto tryShl = [&] (Value* shl, Value* other) -> bool { |
2092 | Optional<unsigned> scale = scaleForShl(shl, offset); |
2093 | if (!scale) |
2094 | return false; |
2095 | if (!canBeInternal(shl)) |
2096 | return false; |
2097 | |
2098 | ASSERT(!m_locked.contains(shl->child(0))); |
2099 | ASSERT(!m_locked.contains(other)); |
2100 | |
2101 | append(leaOpcode, Arg::index(tmp(other), tmp(shl->child(0)), *scale, offset), tmp(m_value)); |
2102 | commitInternal(innerAdd); |
2103 | commitInternal(shl); |
2104 | return true; |
2105 | }; |
2106 | |
2107 | if (tryShl(value->child(0), value->child(1))) |
2108 | return true; |
2109 | if (tryShl(value->child(1), value->child(0))) |
2110 | return true; |
2111 | |
2112 | // The remaining pattern is just: |
2113 | // Add(@x, @y) (only if offset != 0) |
2114 | if (!offset) |
2115 | return false; |
2116 | ASSERT(!m_locked.contains(value->child(0))); |
2117 | ASSERT(!m_locked.contains(value->child(1))); |
2118 | append(leaOpcode, Arg::index(tmp(value->child(0)), tmp(value->child(1)), 1, offset), tmp(m_value)); |
2119 | commitInternal(innerAdd); |
2120 | return true; |
2121 | } |
2122 | |
2123 | void appendX86Div(B3::Opcode op) |
2124 | { |
2125 | using namespace Air; |
2126 | Air::Opcode convertToDoubleWord; |
2127 | Air::Opcode div; |
2128 | switch (m_value->type()) { |
2129 | case Int32: |
2130 | convertToDoubleWord = X86ConvertToDoubleWord32; |
2131 | div = X86Div32; |
2132 | break; |
2133 | case Int64: |
2134 | convertToDoubleWord = X86ConvertToQuadWord64; |
2135 | div = X86Div64; |
2136 | break; |
2137 | default: |
2138 | RELEASE_ASSERT_NOT_REACHED(); |
2139 | return; |
2140 | } |
2141 | |
2142 | ASSERT(op == Div || op == Mod); |
2143 | Tmp result = op == Div ? m_eax : m_edx; |
2144 | |
2145 | append(Move, tmp(m_value->child(0)), m_eax); |
2146 | append(convertToDoubleWord, m_eax, m_edx); |
2147 | append(div, m_eax, m_edx, tmp(m_value->child(1))); |
2148 | append(Move, result, tmp(m_value)); |
2149 | } |
2150 | |
2151 | void appendX86UDiv(B3::Opcode op) |
2152 | { |
2153 | using namespace Air; |
2154 | Air::Opcode div = m_value->type() == Int32 ? X86UDiv32 : X86UDiv64; |
2155 | |
2156 | ASSERT(op == UDiv || op == UMod); |
2157 | Tmp result = op == UDiv ? m_eax : m_edx; |
2158 | |
2159 | append(Move, tmp(m_value->child(0)), m_eax); |
2160 | append(Xor64, m_edx, m_edx); |
2161 | append(div, m_eax, m_edx, tmp(m_value->child(1))); |
2162 | append(Move, result, tmp(m_value)); |
2163 | } |
2164 | |
2165 | Air::Opcode loadLinkOpcode(Width width, bool fence) |
2166 | { |
2167 | return fence ? OPCODE_FOR_WIDTH(LoadLinkAcq, width) : OPCODE_FOR_WIDTH(LoadLink, width); |
2168 | } |
2169 | |
2170 | Air::Opcode storeCondOpcode(Width width, bool fence) |
2171 | { |
2172 | return fence ? OPCODE_FOR_WIDTH(StoreCondRel, width) : OPCODE_FOR_WIDTH(StoreCond, width); |
2173 | } |
2174 | |
2175 | // This can emit code for the following patterns: |
2176 | // AtomicWeakCAS |
2177 | // BitXor(AtomicWeakCAS, 1) |
2178 | // AtomicStrongCAS |
2179 | // Equal(AtomicStrongCAS, expected) |
2180 | // NotEqual(AtomicStrongCAS, expected) |
2181 | // Branch(AtomicWeakCAS) |
2182 | // Branch(Equal(AtomicStrongCAS, expected)) |
2183 | // Branch(NotEqual(AtomicStrongCAS, expected)) |
2184 | // |
2185 | // It assumes that atomicValue points to the CAS, and m_value points to the instruction being |
2186 | // generated. It assumes that you've consumed everything that needs to be consumed. |
2187 | void appendCAS(Value* atomicValue, bool invert) |
2188 | { |
2189 | using namespace Air; |
2190 | AtomicValue* atomic = atomicValue->as<AtomicValue>(); |
2191 | RELEASE_ASSERT(atomic); |
2192 | |
2193 | bool isBranch = m_value->opcode() == Branch; |
2194 | bool isStrong = atomic->opcode() == AtomicStrongCAS; |
2195 | bool returnsOldValue = m_value->opcode() == AtomicStrongCAS; |
2196 | bool hasFence = atomic->hasFence(); |
2197 | |
2198 | Width width = atomic->accessWidth(); |
2199 | Arg address = addr(atomic); |
2200 | |
2201 | Tmp valueResultTmp; |
2202 | Tmp boolResultTmp; |
2203 | if (returnsOldValue) { |
2204 | RELEASE_ASSERT(!invert); |
2205 | valueResultTmp = tmp(m_value); |
2206 | boolResultTmp = m_code.newTmp(GP); |
2207 | } else if (isBranch) { |
2208 | valueResultTmp = m_code.newTmp(GP); |
2209 | boolResultTmp = m_code.newTmp(GP); |
2210 | } else { |
2211 | valueResultTmp = m_code.newTmp(GP); |
2212 | boolResultTmp = tmp(m_value); |
2213 | } |
2214 | |
2215 | Tmp successBoolResultTmp; |
2216 | if (isStrong && !isBranch) |
2217 | successBoolResultTmp = m_code.newTmp(GP); |
2218 | else |
2219 | successBoolResultTmp = boolResultTmp; |
2220 | |
2221 | Tmp expectedValueTmp = tmp(atomic->child(0)); |
2222 | Tmp newValueTmp = tmp(atomic->child(1)); |
2223 | |
2224 | Air::FrequentedBlock success; |
2225 | Air::FrequentedBlock failure; |
2226 | if (isBranch) { |
2227 | success = m_blockToBlock[m_block]->successor(invert); |
2228 | failure = m_blockToBlock[m_block]->successor(!invert); |
2229 | } |
2230 | |
2231 | if (isX86()) { |
2232 | append(relaxedMoveForType(atomic->accessType()), immOrTmp(atomic->child(0)), m_eax); |
2233 | if (returnsOldValue) { |
2234 | appendTrapping(OPCODE_FOR_WIDTH(AtomicStrongCAS, width), m_eax, newValueTmp, address); |
2235 | append(relaxedMoveForType(atomic->accessType()), m_eax, valueResultTmp); |
2236 | } else if (isBranch) { |
2237 | appendTrapping(OPCODE_FOR_WIDTH(BranchAtomicStrongCAS, width), Arg::statusCond(MacroAssembler::Success), m_eax, newValueTmp, address); |
2238 | m_blockToBlock[m_block]->setSuccessors(success, failure); |
2239 | } else |
2240 | appendTrapping(OPCODE_FOR_WIDTH(AtomicStrongCAS, width), Arg::statusCond(invert ? MacroAssembler::Failure : MacroAssembler::Success), m_eax, tmp(atomic->child(1)), address, boolResultTmp); |
2241 | return; |
2242 | } |
2243 | |
2244 | RELEASE_ASSERT(isARM64()); |
2245 | // We wish to emit: |
2246 | // |
2247 | // Block #reloop: |
2248 | // LoadLink |
2249 | // Branch NotEqual |
2250 | // Successors: Then:#fail, Else: #store |
2251 | // Block #store: |
2252 | // StoreCond |
2253 | // Xor $1, %result <--- only if !invert |
2254 | // Jump |
2255 | // Successors: #done |
2256 | // Block #fail: |
2257 | // Move $invert, %result |
2258 | // Jump |
2259 | // Successors: #done |
2260 | // Block #done: |
2261 | |
2262 | Air::BasicBlock* reloopBlock = newBlock(); |
2263 | Air::BasicBlock* storeBlock = newBlock(); |
2264 | Air::BasicBlock* successBlock = nullptr; |
2265 | if (!isBranch && isStrong) |
2266 | successBlock = newBlock(); |
2267 | Air::BasicBlock* failBlock = nullptr; |
2268 | if (!isBranch) { |
2269 | failBlock = newBlock(); |
2270 | failure = failBlock; |
2271 | } |
2272 | Air::BasicBlock* strongFailBlock; |
2273 | if (isStrong && hasFence) |
2274 | strongFailBlock = newBlock(); |
2275 | Air::FrequentedBlock comparisonFail = failure; |
2276 | Air::FrequentedBlock weakFail; |
2277 | if (isStrong) { |
2278 | if (hasFence) |
2279 | comparisonFail = strongFailBlock; |
2280 | weakFail = reloopBlock; |
2281 | } else |
2282 | weakFail = failure; |
2283 | Air::BasicBlock* beginBlock; |
2284 | Air::BasicBlock* doneBlock; |
2285 | splitBlock(beginBlock, doneBlock); |
2286 | |
2287 | append(Air::Jump); |
2288 | beginBlock->setSuccessors(reloopBlock); |
2289 | |
2290 | reloopBlock->append(trappingInst(m_value, loadLinkOpcode(width, atomic->hasFence()), m_value, address, valueResultTmp)); |
2291 | reloopBlock->append(OPCODE_FOR_CANONICAL_WIDTH(Branch, width), m_value, Arg::relCond(MacroAssembler::NotEqual), valueResultTmp, expectedValueTmp); |
2292 | reloopBlock->setSuccessors(comparisonFail, storeBlock); |
2293 | |
2294 | storeBlock->append(trappingInst(m_value, storeCondOpcode(width, atomic->hasFence()), m_value, newValueTmp, address, successBoolResultTmp)); |
2295 | if (isBranch) { |
2296 | storeBlock->append(BranchTest32, m_value, Arg::resCond(MacroAssembler::Zero), boolResultTmp, boolResultTmp); |
2297 | storeBlock->setSuccessors(success, weakFail); |
2298 | doneBlock->successors().clear(); |
2299 | RELEASE_ASSERT(!doneBlock->size()); |
2300 | doneBlock->append(Air::Oops, m_value); |
2301 | } else { |
2302 | if (isStrong) { |
2303 | storeBlock->append(BranchTest32, m_value, Arg::resCond(MacroAssembler::Zero), successBoolResultTmp, successBoolResultTmp); |
2304 | storeBlock->setSuccessors(successBlock, reloopBlock); |
2305 | |
2306 | successBlock->append(Move, m_value, Arg::imm(!invert), boolResultTmp); |
2307 | successBlock->append(Air::Jump, m_value); |
2308 | successBlock->setSuccessors(doneBlock); |
2309 | } else { |
2310 | if (!invert) |
2311 | storeBlock->append(Xor32, m_value, Arg::bitImm(1), boolResultTmp, boolResultTmp); |
2312 | |
2313 | storeBlock->append(Air::Jump, m_value); |
2314 | storeBlock->setSuccessors(doneBlock); |
2315 | } |
2316 | |
2317 | failBlock->append(Move, m_value, Arg::imm(invert), boolResultTmp); |
2318 | failBlock->append(Air::Jump, m_value); |
2319 | failBlock->setSuccessors(doneBlock); |
2320 | } |
2321 | |
2322 | if (isStrong && hasFence) { |
2323 | Tmp tmp = m_code.newTmp(GP); |
2324 | strongFailBlock->append(trappingInst(m_value, storeCondOpcode(width, atomic->hasFence()), m_value, valueResultTmp, address, tmp)); |
2325 | strongFailBlock->append(BranchTest32, m_value, Arg::resCond(MacroAssembler::Zero), tmp, tmp); |
2326 | strongFailBlock->setSuccessors(failure, reloopBlock); |
2327 | } |
2328 | } |
2329 | |
2330 | bool appendVoidAtomic(Air::Opcode atomicOpcode) |
2331 | { |
2332 | if (m_useCounts.numUses(m_value)) |
2333 | return false; |
2334 | |
2335 | Arg address = addr(m_value); |
2336 | |
2337 | if (isValidForm(atomicOpcode, Arg::Imm, address.kind()) && imm(m_value->child(0))) { |
2338 | append(atomicOpcode, imm(m_value->child(0)), address); |
2339 | return true; |
2340 | } |
2341 | |
2342 | if (isValidForm(atomicOpcode, Arg::Tmp, address.kind())) { |
2343 | append(atomicOpcode, tmp(m_value->child(0)), address); |
2344 | return true; |
2345 | } |
2346 | |
2347 | return false; |
2348 | } |
2349 | |
2350 | void appendGeneralAtomic(Air::Opcode opcode, Commutativity commutativity = NotCommutative) |
2351 | { |
2352 | using namespace Air; |
2353 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
2354 | |
2355 | Arg address = addr(m_value); |
2356 | Tmp oldValue = m_code.newTmp(GP); |
2357 | Tmp newValue = opcode == Air::Nop ? tmp(atomic->child(0)) : m_code.newTmp(GP); |
2358 | |
2359 | // We need a CAS loop or a LL/SC loop. Using prepare/attempt jargon, we want: |
2360 | // |
2361 | // Block #reloop: |
2362 | // Prepare |
2363 | // opcode |
2364 | // Attempt |
2365 | // Successors: Then:#done, Else:#reloop |
2366 | // Block #done: |
2367 | // Move oldValue, result |
2368 | |
2369 | append(relaxedMoveForType(atomic->type()), oldValue, tmp(atomic)); |
2370 | |
2371 | Air::BasicBlock* reloopBlock = newBlock(); |
2372 | Air::BasicBlock* beginBlock; |
2373 | Air::BasicBlock* doneBlock; |
2374 | splitBlock(beginBlock, doneBlock); |
2375 | |
2376 | append(Air::Jump); |
2377 | beginBlock->setSuccessors(reloopBlock); |
2378 | |
2379 | Air::Opcode prepareOpcode; |
2380 | if (isX86()) { |
2381 | switch (atomic->accessWidth()) { |
2382 | case Width8: |
2383 | prepareOpcode = Load8SignedExtendTo32; |
2384 | break; |
2385 | case Width16: |
2386 | prepareOpcode = Load16SignedExtendTo32; |
2387 | break; |
2388 | case Width32: |
2389 | prepareOpcode = Move32; |
2390 | break; |
2391 | case Width64: |
2392 | prepareOpcode = Move; |
2393 | break; |
2394 | } |
2395 | } else { |
2396 | RELEASE_ASSERT(isARM64()); |
2397 | prepareOpcode = loadLinkOpcode(atomic->accessWidth(), atomic->hasFence()); |
2398 | } |
2399 | reloopBlock->append(trappingInst(m_value, prepareOpcode, m_value, address, oldValue)); |
2400 | |
2401 | if (opcode != Air::Nop) { |
2402 | // FIXME: If we ever have to write this again, we need to find a way to share the code with |
2403 | // appendBinOp. |
2404 | // https://bugs.webkit.org/show_bug.cgi?id=169249 |
2405 | if (commutativity == Commutative && imm(atomic->child(0)) && isValidForm(opcode, Arg::Imm, Arg::Tmp, Arg::Tmp)) |
2406 | reloopBlock->append(opcode, m_value, imm(atomic->child(0)), oldValue, newValue); |
2407 | else if (imm(atomic->child(0)) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) |
2408 | reloopBlock->append(opcode, m_value, oldValue, imm(atomic->child(0)), newValue); |
2409 | else if (commutativity == Commutative && bitImm(atomic->child(0)) && isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) |
2410 | reloopBlock->append(opcode, m_value, bitImm(atomic->child(0)), oldValue, newValue); |
2411 | else if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) |
2412 | reloopBlock->append(opcode, m_value, oldValue, tmp(atomic->child(0)), newValue); |
2413 | else { |
2414 | reloopBlock->append(relaxedMoveForType(atomic->type()), m_value, oldValue, newValue); |
2415 | if (imm(atomic->child(0)) && isValidForm(opcode, Arg::Imm, Arg::Tmp)) |
2416 | reloopBlock->append(opcode, m_value, imm(atomic->child(0)), newValue); |
2417 | else |
2418 | reloopBlock->append(opcode, m_value, tmp(atomic->child(0)), newValue); |
2419 | } |
2420 | } |
2421 | |
2422 | if (isX86()) { |
2423 | Air::Opcode casOpcode = OPCODE_FOR_WIDTH(BranchAtomicStrongCAS, atomic->accessWidth()); |
2424 | reloopBlock->append(relaxedMoveForType(atomic->type()), m_value, oldValue, m_eax); |
2425 | reloopBlock->append(trappingInst(m_value, casOpcode, m_value, Arg::statusCond(MacroAssembler::Success), m_eax, newValue, address)); |
2426 | } else { |
2427 | RELEASE_ASSERT(isARM64()); |
2428 | Tmp boolResult = m_code.newTmp(GP); |
2429 | reloopBlock->append(trappingInst(m_value, storeCondOpcode(atomic->accessWidth(), atomic->hasFence()), m_value, newValue, address, boolResult)); |
2430 | reloopBlock->append(BranchTest32, m_value, Arg::resCond(MacroAssembler::Zero), boolResult, boolResult); |
2431 | } |
2432 | reloopBlock->setSuccessors(doneBlock, reloopBlock); |
2433 | } |
2434 | |
2435 | void lower() |
2436 | { |
2437 | using namespace Air; |
2438 | switch (m_value->opcode()) { |
2439 | case B3::Nop: { |
2440 | // Yes, we will totally see Nop's because some phases will replaceWithNop() instead of |
2441 | // properly removing things. |
2442 | return; |
2443 | } |
2444 | |
2445 | case Load: { |
2446 | MemoryValue* memory = m_value->as<MemoryValue>(); |
2447 | Air::Kind kind = moveForType(memory->type()); |
2448 | if (memory->hasFence()) { |
2449 | if (isX86()) |
2450 | kind.effects = true; |
2451 | else { |
2452 | switch (memory->type()) { |
2453 | case Int32: |
2454 | kind = LoadAcq32; |
2455 | break; |
2456 | case Int64: |
2457 | kind = LoadAcq64; |
2458 | break; |
2459 | default: |
2460 | RELEASE_ASSERT_NOT_REACHED(); |
2461 | break; |
2462 | } |
2463 | } |
2464 | } |
2465 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2466 | return; |
2467 | } |
2468 | |
2469 | case Load8S: { |
2470 | Air::Kind kind = Load8SignedExtendTo32; |
2471 | if (m_value->as<MemoryValue>()->hasFence()) { |
2472 | if (isX86()) |
2473 | kind.effects = true; |
2474 | else |
2475 | kind = LoadAcq8SignedExtendTo32; |
2476 | } |
2477 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2478 | return; |
2479 | } |
2480 | |
2481 | case Load8Z: { |
2482 | Air::Kind kind = Load8; |
2483 | if (m_value->as<MemoryValue>()->hasFence()) { |
2484 | if (isX86()) |
2485 | kind.effects = true; |
2486 | else |
2487 | kind = LoadAcq8; |
2488 | } |
2489 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2490 | return; |
2491 | } |
2492 | |
2493 | case Load16S: { |
2494 | Air::Kind kind = Load16SignedExtendTo32; |
2495 | if (m_value->as<MemoryValue>()->hasFence()) { |
2496 | if (isX86()) |
2497 | kind.effects = true; |
2498 | else |
2499 | kind = LoadAcq16SignedExtendTo32; |
2500 | } |
2501 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2502 | return; |
2503 | } |
2504 | |
2505 | case Load16Z: { |
2506 | Air::Kind kind = Load16; |
2507 | if (m_value->as<MemoryValue>()->hasFence()) { |
2508 | if (isX86()) |
2509 | kind.effects = true; |
2510 | else |
2511 | kind = LoadAcq16; |
2512 | } |
2513 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2514 | return; |
2515 | } |
2516 | |
2517 | case Add: { |
2518 | if (tryAppendLea()) |
2519 | return; |
2520 | |
2521 | Air::Opcode multiplyAddOpcode = tryOpcodeForType(MultiplyAdd32, MultiplyAdd64, m_value->type()); |
2522 | if (isValidForm(multiplyAddOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
2523 | Value* left = m_value->child(0); |
2524 | Value* right = m_value->child(1); |
2525 | if (!imm(right) || m_valueToTmp[right]) { |
2526 | auto tryAppendMultiplyAdd = [&] (Value* left, Value* right) -> bool { |
2527 | if (left->opcode() != Mul || !canBeInternal(left)) |
2528 | return false; |
2529 | |
2530 | Value* multiplyLeft = left->child(0); |
2531 | Value* multiplyRight = left->child(1); |
2532 | if (canBeInternal(multiplyLeft) || canBeInternal(multiplyRight)) |
2533 | return false; |
2534 | |
2535 | append(multiplyAddOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(right), tmp(m_value)); |
2536 | commitInternal(left); |
2537 | |
2538 | return true; |
2539 | }; |
2540 | |
2541 | if (tryAppendMultiplyAdd(left, right)) |
2542 | return; |
2543 | if (tryAppendMultiplyAdd(right, left)) |
2544 | return; |
2545 | } |
2546 | } |
2547 | |
2548 | appendBinOp<Add32, Add64, AddDouble, AddFloat, Commutative>( |
2549 | m_value->child(0), m_value->child(1)); |
2550 | return; |
2551 | } |
2552 | |
2553 | case Sub: { |
2554 | Air::Opcode multiplySubOpcode = tryOpcodeForType(MultiplySub32, MultiplySub64, m_value->type()); |
2555 | if (multiplySubOpcode != Air::Oops |
2556 | && isValidForm(multiplySubOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
2557 | Value* left = m_value->child(0); |
2558 | Value* right = m_value->child(1); |
2559 | if (!imm(right) || m_valueToTmp[right]) { |
2560 | auto tryAppendMultiplySub = [&] () -> bool { |
2561 | if (right->opcode() != Mul || !canBeInternal(right)) |
2562 | return false; |
2563 | |
2564 | Value* multiplyLeft = right->child(0); |
2565 | Value* multiplyRight = right->child(1); |
2566 | if (m_locked.contains(multiplyLeft) || m_locked.contains(multiplyRight)) |
2567 | return false; |
2568 | |
2569 | append(multiplySubOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(left), tmp(m_value)); |
2570 | commitInternal(right); |
2571 | |
2572 | return true; |
2573 | }; |
2574 | |
2575 | if (tryAppendMultiplySub()) |
2576 | return; |
2577 | } |
2578 | } |
2579 | |
2580 | appendBinOp<Sub32, Sub64, SubDouble, SubFloat>(m_value->child(0), m_value->child(1)); |
2581 | return; |
2582 | } |
2583 | |
2584 | case Neg: { |
2585 | Air::Opcode multiplyNegOpcode = tryOpcodeForType(MultiplyNeg32, MultiplyNeg64, m_value->type()); |
2586 | if (multiplyNegOpcode != Air::Oops |
2587 | && isValidForm(multiplyNegOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp) |
2588 | && m_value->child(0)->opcode() == Mul |
2589 | && canBeInternal(m_value->child(0))) { |
2590 | Value* multiplyOperation = m_value->child(0); |
2591 | Value* multiplyLeft = multiplyOperation->child(0); |
2592 | Value* multiplyRight = multiplyOperation->child(1); |
2593 | if (!m_locked.contains(multiplyLeft) && !m_locked.contains(multiplyRight)) { |
2594 | append(multiplyNegOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(m_value)); |
2595 | commitInternal(multiplyOperation); |
2596 | return; |
2597 | } |
2598 | } |
2599 | |
2600 | appendUnOp<Neg32, Neg64, NegateDouble, NegateFloat>(m_value->child(0)); |
2601 | return; |
2602 | } |
2603 | |
2604 | case Mul: { |
2605 | appendBinOp<Mul32, Mul64, MulDouble, MulFloat, Commutative>( |
2606 | m_value->child(0), m_value->child(1)); |
2607 | return; |
2608 | } |
2609 | |
2610 | case Div: { |
2611 | if (m_value->isChill()) |
2612 | RELEASE_ASSERT(isARM64()); |
2613 | if (isInt(m_value->type()) && isX86()) { |
2614 | appendX86Div(Div); |
2615 | return; |
2616 | } |
2617 | ASSERT(!isX86() || isFloat(m_value->type())); |
2618 | |
2619 | appendBinOp<Div32, Div64, DivDouble, DivFloat>(m_value->child(0), m_value->child(1)); |
2620 | return; |
2621 | } |
2622 | |
2623 | case UDiv: { |
2624 | if (isInt(m_value->type()) && isX86()) { |
2625 | appendX86UDiv(UDiv); |
2626 | return; |
2627 | } |
2628 | |
2629 | ASSERT(!isX86() && !isFloat(m_value->type())); |
2630 | |
2631 | appendBinOp<UDiv32, UDiv64, Air::Oops, Air::Oops>(m_value->child(0), m_value->child(1)); |
2632 | return; |
2633 | |
2634 | } |
2635 | |
2636 | case Mod: { |
2637 | RELEASE_ASSERT(isX86()); |
2638 | RELEASE_ASSERT(!m_value->isChill()); |
2639 | appendX86Div(Mod); |
2640 | return; |
2641 | } |
2642 | |
2643 | case UMod: { |
2644 | RELEASE_ASSERT(isX86()); |
2645 | appendX86UDiv(UMod); |
2646 | return; |
2647 | } |
2648 | |
2649 | case BitAnd: { |
2650 | if (m_value->child(1)->isInt(0xff)) { |
2651 | appendUnOp<ZeroExtend8To32, ZeroExtend8To32>(m_value->child(0)); |
2652 | return; |
2653 | } |
2654 | |
2655 | if (m_value->child(1)->isInt(0xffff)) { |
2656 | appendUnOp<ZeroExtend16To32, ZeroExtend16To32>(m_value->child(0)); |
2657 | return; |
2658 | } |
2659 | |
2660 | if (m_value->child(1)->isInt(0xffffffff)) { |
2661 | appendUnOp<Move32, Move32>(m_value->child(0)); |
2662 | return; |
2663 | } |
2664 | |
2665 | appendBinOp<And32, And64, AndDouble, AndFloat, Commutative>( |
2666 | m_value->child(0), m_value->child(1)); |
2667 | return; |
2668 | } |
2669 | |
2670 | case BitOr: { |
2671 | appendBinOp<Or32, Or64, OrDouble, OrFloat, Commutative>( |
2672 | m_value->child(0), m_value->child(1)); |
2673 | return; |
2674 | } |
2675 | |
2676 | case BitXor: { |
2677 | // FIXME: If canBeInternal(child), we should generate this using the comparison path. |
2678 | // https://bugs.webkit.org/show_bug.cgi?id=152367 |
2679 | |
2680 | if (m_value->child(1)->isInt(-1)) { |
2681 | appendUnOp<Not32, Not64>(m_value->child(0)); |
2682 | return; |
2683 | } |
2684 | |
2685 | // This pattern is super useful on both x86 and ARM64, since the inversion of the CAS result |
2686 | // can be done with zero cost on x86 (just flip the set from E to NE) and it's a progression |
2687 | // on ARM64 (since STX returns 0 on success, so ordinarily we have to flip it). |
2688 | if (m_value->child(1)->isInt(1) |
2689 | && m_value->child(0)->opcode() == AtomicWeakCAS |
2690 | && canBeInternal(m_value->child(0))) { |
2691 | commitInternal(m_value->child(0)); |
2692 | appendCAS(m_value->child(0), true); |
2693 | return; |
2694 | } |
2695 | |
2696 | appendBinOp<Xor32, Xor64, XorDouble, XorFloat, Commutative>( |
2697 | m_value->child(0), m_value->child(1)); |
2698 | return; |
2699 | } |
2700 | |
2701 | case Depend: { |
2702 | RELEASE_ASSERT(isARM64()); |
2703 | appendUnOp<Depend32, Depend64>(m_value->child(0)); |
2704 | return; |
2705 | } |
2706 | |
2707 | case Shl: { |
2708 | if (m_value->child(1)->isInt32(1)) { |
2709 | appendBinOp<Add32, Add64, AddDouble, AddFloat, Commutative>(m_value->child(0), m_value->child(0)); |
2710 | return; |
2711 | } |
2712 | |
2713 | appendShift<Lshift32, Lshift64>(m_value->child(0), m_value->child(1)); |
2714 | return; |
2715 | } |
2716 | |
2717 | case SShr: { |
2718 | appendShift<Rshift32, Rshift64>(m_value->child(0), m_value->child(1)); |
2719 | return; |
2720 | } |
2721 | |
2722 | case ZShr: { |
2723 | appendShift<Urshift32, Urshift64>(m_value->child(0), m_value->child(1)); |
2724 | return; |
2725 | } |
2726 | |
2727 | case RotR: { |
2728 | appendShift<RotateRight32, RotateRight64>(m_value->child(0), m_value->child(1)); |
2729 | return; |
2730 | } |
2731 | |
2732 | case RotL: { |
2733 | appendShift<RotateLeft32, RotateLeft64>(m_value->child(0), m_value->child(1)); |
2734 | return; |
2735 | } |
2736 | |
2737 | case Clz: { |
2738 | appendUnOp<CountLeadingZeros32, CountLeadingZeros64>(m_value->child(0)); |
2739 | return; |
2740 | } |
2741 | |
2742 | case Abs: { |
2743 | RELEASE_ASSERT_WITH_MESSAGE(!isX86(), "Abs is not supported natively on x86. It must be replaced before generation." ); |
2744 | appendUnOp<Air::Oops, Air::Oops, AbsDouble, AbsFloat>(m_value->child(0)); |
2745 | return; |
2746 | } |
2747 | |
2748 | case Ceil: { |
2749 | appendUnOp<Air::Oops, Air::Oops, CeilDouble, CeilFloat>(m_value->child(0)); |
2750 | return; |
2751 | } |
2752 | |
2753 | case Floor: { |
2754 | appendUnOp<Air::Oops, Air::Oops, FloorDouble, FloorFloat>(m_value->child(0)); |
2755 | return; |
2756 | } |
2757 | |
2758 | case Sqrt: { |
2759 | appendUnOp<Air::Oops, Air::Oops, SqrtDouble, SqrtFloat>(m_value->child(0)); |
2760 | return; |
2761 | } |
2762 | |
2763 | case BitwiseCast: { |
2764 | appendUnOp<Move32ToFloat, Move64ToDouble, MoveDoubleTo64, MoveFloatTo32>(m_value->child(0)); |
2765 | return; |
2766 | } |
2767 | |
2768 | case Store: { |
2769 | Value* valueToStore = m_value->child(0); |
2770 | if (canBeInternal(valueToStore)) { |
2771 | bool matched = false; |
2772 | switch (valueToStore->opcode()) { |
2773 | case Add: |
2774 | matched = tryAppendStoreBinOp<Add32, Add64, Commutative>( |
2775 | valueToStore->child(0), valueToStore->child(1)); |
2776 | break; |
2777 | case Sub: |
2778 | if (valueToStore->child(0)->isInt(0)) { |
2779 | matched = tryAppendStoreUnOp<Neg32, Neg64>(valueToStore->child(1)); |
2780 | break; |
2781 | } |
2782 | matched = tryAppendStoreBinOp<Sub32, Sub64>( |
2783 | valueToStore->child(0), valueToStore->child(1)); |
2784 | break; |
2785 | case BitAnd: |
2786 | matched = tryAppendStoreBinOp<And32, And64, Commutative>( |
2787 | valueToStore->child(0), valueToStore->child(1)); |
2788 | break; |
2789 | case BitXor: |
2790 | if (valueToStore->child(1)->isInt(-1)) { |
2791 | matched = tryAppendStoreUnOp<Not32, Not64>(valueToStore->child(0)); |
2792 | break; |
2793 | } |
2794 | matched = tryAppendStoreBinOp<Xor32, Xor64, Commutative>( |
2795 | valueToStore->child(0), valueToStore->child(1)); |
2796 | break; |
2797 | default: |
2798 | break; |
2799 | } |
2800 | if (matched) { |
2801 | commitInternal(valueToStore); |
2802 | return; |
2803 | } |
2804 | } |
2805 | |
2806 | appendStore(m_value, addr(m_value)); |
2807 | return; |
2808 | } |
2809 | |
2810 | case B3::Store8: { |
2811 | Value* valueToStore = m_value->child(0); |
2812 | if (canBeInternal(valueToStore)) { |
2813 | bool matched = false; |
2814 | switch (valueToStore->opcode()) { |
2815 | case Add: |
2816 | matched = tryAppendStoreBinOp<Add8, Air::Oops, Commutative>( |
2817 | valueToStore->child(0), valueToStore->child(1)); |
2818 | break; |
2819 | default: |
2820 | break; |
2821 | } |
2822 | if (matched) { |
2823 | commitInternal(valueToStore); |
2824 | return; |
2825 | } |
2826 | } |
2827 | appendStore(m_value, addr(m_value)); |
2828 | return; |
2829 | } |
2830 | |
2831 | case B3::Store16: { |
2832 | Value* valueToStore = m_value->child(0); |
2833 | if (canBeInternal(valueToStore)) { |
2834 | bool matched = false; |
2835 | switch (valueToStore->opcode()) { |
2836 | case Add: |
2837 | matched = tryAppendStoreBinOp<Add16, Air::Oops, Commutative>( |
2838 | valueToStore->child(0), valueToStore->child(1)); |
2839 | break; |
2840 | default: |
2841 | break; |
2842 | } |
2843 | if (matched) { |
2844 | commitInternal(valueToStore); |
2845 | return; |
2846 | } |
2847 | } |
2848 | appendStore(m_value, addr(m_value)); |
2849 | return; |
2850 | } |
2851 | |
2852 | case WasmAddress: { |
2853 | WasmAddressValue* address = m_value->as<WasmAddressValue>(); |
2854 | |
2855 | append(Add64, Arg(address->pinnedGPR()), tmp(m_value->child(0)), tmp(address)); |
2856 | return; |
2857 | } |
2858 | |
2859 | case Fence: { |
2860 | FenceValue* fence = m_value->as<FenceValue>(); |
2861 | if (!fence->write && !fence->read) |
2862 | return; |
2863 | if (!fence->write) { |
2864 | // A fence that reads but does not write is for protecting motion of stores. |
2865 | append(StoreFence); |
2866 | return; |
2867 | } |
2868 | if (!fence->read) { |
2869 | // A fence that writes but does not read is for protecting motion of loads. |
2870 | append(LoadFence); |
2871 | return; |
2872 | } |
2873 | append(MemoryFence); |
2874 | return; |
2875 | } |
2876 | |
2877 | case Trunc: { |
2878 | ASSERT(tmp(m_value->child(0)) == tmp(m_value)); |
2879 | return; |
2880 | } |
2881 | |
2882 | case SExt8: { |
2883 | appendUnOp<SignExtend8To32, Air::Oops>(m_value->child(0)); |
2884 | return; |
2885 | } |
2886 | |
2887 | case SExt16: { |
2888 | appendUnOp<SignExtend16To32, Air::Oops>(m_value->child(0)); |
2889 | return; |
2890 | } |
2891 | |
2892 | case ZExt32: { |
2893 | appendUnOp<Move32, Air::Oops>(m_value->child(0)); |
2894 | return; |
2895 | } |
2896 | |
2897 | case SExt32: { |
2898 | // FIXME: We should have support for movsbq/movswq |
2899 | // https://bugs.webkit.org/show_bug.cgi?id=152232 |
2900 | |
2901 | appendUnOp<SignExtend32ToPtr, Air::Oops>(m_value->child(0)); |
2902 | return; |
2903 | } |
2904 | |
2905 | case FloatToDouble: { |
2906 | appendUnOp<Air::Oops, Air::Oops, Air::Oops, ConvertFloatToDouble>(m_value->child(0)); |
2907 | return; |
2908 | } |
2909 | |
2910 | case DoubleToFloat: { |
2911 | appendUnOp<Air::Oops, Air::Oops, ConvertDoubleToFloat>(m_value->child(0)); |
2912 | return; |
2913 | } |
2914 | |
2915 | case ArgumentReg: { |
2916 | m_prologue.append(Inst( |
2917 | moveForType(m_value->type()), m_value, |
2918 | Tmp(m_value->as<ArgumentRegValue>()->argumentReg()), |
2919 | tmp(m_value))); |
2920 | return; |
2921 | } |
2922 | |
2923 | case Const32: |
2924 | case Const64: { |
2925 | if (imm(m_value)) |
2926 | append(Move, imm(m_value), tmp(m_value)); |
2927 | else |
2928 | append(Move, Arg::bigImm(m_value->asInt()), tmp(m_value)); |
2929 | return; |
2930 | } |
2931 | |
2932 | case ConstDouble: |
2933 | case ConstFloat: { |
2934 | // We expect that the moveConstants() phase has run, and any doubles referenced from |
2935 | // stackmaps get fused. |
2936 | RELEASE_ASSERT(m_value->opcode() == ConstFloat || isIdentical(m_value->asDouble(), 0.0)); |
2937 | RELEASE_ASSERT(m_value->opcode() == ConstDouble || isIdentical(m_value->asFloat(), 0.0f)); |
2938 | append(MoveZeroToDouble, tmp(m_value)); |
2939 | return; |
2940 | } |
2941 | |
2942 | case FramePointer: { |
2943 | ASSERT(tmp(m_value) == Tmp(GPRInfo::callFrameRegister)); |
2944 | return; |
2945 | } |
2946 | |
2947 | case SlotBase: { |
2948 | append( |
2949 | pointerType() == Int64 ? Lea64 : Lea32, |
2950 | Arg::stack(m_stackToStack.get(m_value->as<SlotBaseValue>()->slot())), |
2951 | tmp(m_value)); |
2952 | return; |
2953 | } |
2954 | |
2955 | case Equal: |
2956 | case NotEqual: { |
2957 | // FIXME: Teach this to match patterns that arise from subwidth CAS. The CAS's result has to |
2958 | // be either zero- or sign-extended, and the value it's compared to should also be zero- or |
2959 | // sign-extended in a matching way. It's not super clear that this is very profitable. |
2960 | // https://bugs.webkit.org/show_bug.cgi?id=169250 |
2961 | if (m_value->child(0)->opcode() == AtomicStrongCAS |
2962 | && m_value->child(0)->as<AtomicValue>()->isCanonicalWidth() |
2963 | && m_value->child(0)->child(0) == m_value->child(1) |
2964 | && canBeInternal(m_value->child(0))) { |
2965 | ASSERT(!m_locked.contains(m_value->child(0)->child(1))); |
2966 | ASSERT(!m_locked.contains(m_value->child(1))); |
2967 | |
2968 | commitInternal(m_value->child(0)); |
2969 | appendCAS(m_value->child(0), m_value->opcode() == NotEqual); |
2970 | return; |
2971 | } |
2972 | |
2973 | m_insts.last().append(createCompare(m_value)); |
2974 | return; |
2975 | } |
2976 | |
2977 | case LessThan: |
2978 | case GreaterThan: |
2979 | case LessEqual: |
2980 | case GreaterEqual: |
2981 | case Above: |
2982 | case Below: |
2983 | case AboveEqual: |
2984 | case BelowEqual: |
2985 | case EqualOrUnordered: { |
2986 | m_insts.last().append(createCompare(m_value)); |
2987 | return; |
2988 | } |
2989 | |
2990 | case Select: { |
2991 | MoveConditionallyConfig config; |
2992 | if (isInt(m_value->type())) { |
2993 | config.moveConditionally32 = MoveConditionally32; |
2994 | config.moveConditionally64 = MoveConditionally64; |
2995 | config.moveConditionallyTest32 = MoveConditionallyTest32; |
2996 | config.moveConditionallyTest64 = MoveConditionallyTest64; |
2997 | config.moveConditionallyDouble = MoveConditionallyDouble; |
2998 | config.moveConditionallyFloat = MoveConditionallyFloat; |
2999 | } else { |
3000 | // FIXME: it's not obvious that these are particularly efficient. |
3001 | // https://bugs.webkit.org/show_bug.cgi?id=169251 |
3002 | config.moveConditionally32 = MoveDoubleConditionally32; |
3003 | config.moveConditionally64 = MoveDoubleConditionally64; |
3004 | config.moveConditionallyTest32 = MoveDoubleConditionallyTest32; |
3005 | config.moveConditionallyTest64 = MoveDoubleConditionallyTest64; |
3006 | config.moveConditionallyDouble = MoveDoubleConditionallyDouble; |
3007 | config.moveConditionallyFloat = MoveDoubleConditionallyFloat; |
3008 | } |
3009 | |
3010 | m_insts.last().append(createSelect(config)); |
3011 | return; |
3012 | } |
3013 | |
3014 | case IToD: { |
3015 | appendUnOp<ConvertInt32ToDouble, ConvertInt64ToDouble>(m_value->child(0)); |
3016 | return; |
3017 | } |
3018 | |
3019 | case IToF: { |
3020 | appendUnOp<ConvertInt32ToFloat, ConvertInt64ToFloat>(m_value->child(0)); |
3021 | return; |
3022 | } |
3023 | |
3024 | case B3::CCall: { |
3025 | CCallValue* cCall = m_value->as<CCallValue>(); |
3026 | |
3027 | Inst inst(m_isRare ? Air::ColdCCall : Air::CCall, cCall); |
3028 | |
3029 | // We have a ton of flexibility regarding the callee argument, but currently, we don't |
3030 | // use it yet. It gets weird for reasons: |
3031 | // 1) We probably will never take advantage of this. We don't have C calls to locations |
3032 | // loaded from addresses. We have JS calls like that, but those use Patchpoints. |
3033 | // 2) On X86_64 we still don't support call with BaseIndex. |
3034 | // 3) On non-X86, we don't natively support any kind of loading from address. |
3035 | // 4) We don't have an isValidForm() for the CCallSpecial so we have no smart way to |
3036 | // decide. |
3037 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=151052 |
3038 | inst.args.append(tmp(cCall->child(0))); |
3039 | |
3040 | if (cCall->type() != Void) |
3041 | inst.args.append(tmp(cCall)); |
3042 | |
3043 | for (unsigned i = 1; i < cCall->numChildren(); ++i) |
3044 | inst.args.append(immOrTmp(cCall->child(i))); |
3045 | |
3046 | m_insts.last().append(WTFMove(inst)); |
3047 | return; |
3048 | } |
3049 | |
3050 | case Patchpoint: { |
3051 | PatchpointValue* patchpointValue = m_value->as<PatchpointValue>(); |
3052 | ensureSpecial(m_patchpointSpecial); |
3053 | |
3054 | Inst inst(Patch, patchpointValue, Arg::special(m_patchpointSpecial)); |
3055 | |
3056 | Vector<Inst> after; |
3057 | if (patchpointValue->type() != Void) { |
3058 | switch (patchpointValue->resultConstraint.kind()) { |
3059 | case ValueRep::WarmAny: |
3060 | case ValueRep::ColdAny: |
3061 | case ValueRep::LateColdAny: |
3062 | case ValueRep::SomeRegister: |
3063 | case ValueRep::SomeEarlyRegister: |
3064 | inst.args.append(tmp(patchpointValue)); |
3065 | break; |
3066 | case ValueRep::Register: { |
3067 | Tmp reg = Tmp(patchpointValue->resultConstraint.reg()); |
3068 | inst.args.append(reg); |
3069 | after.append(Inst( |
3070 | relaxedMoveForType(patchpointValue->type()), m_value, reg, tmp(patchpointValue))); |
3071 | break; |
3072 | } |
3073 | case ValueRep::StackArgument: { |
3074 | Arg arg = Arg::callArg(patchpointValue->resultConstraint.offsetFromSP()); |
3075 | inst.args.append(arg); |
3076 | after.append(Inst( |
3077 | moveForType(patchpointValue->type()), m_value, arg, tmp(patchpointValue))); |
3078 | break; |
3079 | } |
3080 | default: |
3081 | RELEASE_ASSERT_NOT_REACHED(); |
3082 | break; |
3083 | } |
3084 | } |
3085 | |
3086 | fillStackmap(inst, patchpointValue, 0); |
3087 | |
3088 | if (patchpointValue->resultConstraint.isReg()) |
3089 | patchpointValue->lateClobbered().clear(patchpointValue->resultConstraint.reg()); |
3090 | |
3091 | for (unsigned i = patchpointValue->numGPScratchRegisters; i--;) |
3092 | inst.args.append(m_code.newTmp(GP)); |
3093 | for (unsigned i = patchpointValue->numFPScratchRegisters; i--;) |
3094 | inst.args.append(m_code.newTmp(FP)); |
3095 | |
3096 | m_insts.last().append(WTFMove(inst)); |
3097 | m_insts.last().appendVector(after); |
3098 | return; |
3099 | } |
3100 | |
3101 | case CheckAdd: |
3102 | case CheckSub: |
3103 | case CheckMul: { |
3104 | CheckValue* checkValue = m_value->as<CheckValue>(); |
3105 | |
3106 | Value* left = checkValue->child(0); |
3107 | Value* right = checkValue->child(1); |
3108 | |
3109 | Tmp result = tmp(m_value); |
3110 | |
3111 | // Handle checked negation. |
3112 | if (checkValue->opcode() == CheckSub && left->isInt(0)) { |
3113 | append(Move, tmp(right), result); |
3114 | |
3115 | Air::Opcode opcode = |
3116 | opcodeForType(BranchNeg32, BranchNeg64, checkValue->type()); |
3117 | CheckSpecial* special = ensureCheckSpecial(opcode, 2); |
3118 | |
3119 | Inst inst(Patch, checkValue, Arg::special(special)); |
3120 | inst.args.append(Arg::resCond(MacroAssembler::Overflow)); |
3121 | inst.args.append(result); |
3122 | |
3123 | fillStackmap(inst, checkValue, 2); |
3124 | |
3125 | m_insts.last().append(WTFMove(inst)); |
3126 | return; |
3127 | } |
3128 | |
3129 | Air::Opcode opcode = Air::Oops; |
3130 | Commutativity commutativity = NotCommutative; |
3131 | StackmapSpecial::RoleMode stackmapRole = StackmapSpecial::SameAsRep; |
3132 | switch (m_value->opcode()) { |
3133 | case CheckAdd: |
3134 | opcode = opcodeForType(BranchAdd32, BranchAdd64, m_value->type()); |
3135 | stackmapRole = StackmapSpecial::ForceLateUseUnlessRecoverable; |
3136 | commutativity = Commutative; |
3137 | break; |
3138 | case CheckSub: |
3139 | opcode = opcodeForType(BranchSub32, BranchSub64, m_value->type()); |
3140 | break; |
3141 | case CheckMul: |
3142 | opcode = opcodeForType(BranchMul32, BranchMul64, checkValue->type()); |
3143 | stackmapRole = StackmapSpecial::ForceLateUse; |
3144 | break; |
3145 | default: |
3146 | RELEASE_ASSERT_NOT_REACHED(); |
3147 | break; |
3148 | } |
3149 | |
3150 | // FIXME: It would be great to fuse Loads into these. We currently don't do it because the |
3151 | // rule for stackmaps is that all addresses are just stack addresses. Maybe we could relax |
3152 | // this rule here. |
3153 | // https://bugs.webkit.org/show_bug.cgi?id=151228 |
3154 | |
3155 | Vector<Arg, 2> sources; |
3156 | if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Imm, Arg::Tmp)) { |
3157 | sources.append(tmp(left)); |
3158 | sources.append(imm(right)); |
3159 | } else if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Imm, Arg::Tmp)) { |
3160 | sources.append(imm(right)); |
3161 | append(Move, tmp(left), result); |
3162 | } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
3163 | sources.append(tmp(left)); |
3164 | sources.append(tmp(right)); |
3165 | } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp)) { |
3166 | if (commutativity == Commutative && preferRightForResult(left, right)) { |
3167 | sources.append(tmp(left)); |
3168 | append(Move, tmp(right), result); |
3169 | } else { |
3170 | sources.append(tmp(right)); |
3171 | append(Move, tmp(left), result); |
3172 | } |
3173 | } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
3174 | sources.append(tmp(left)); |
3175 | sources.append(tmp(right)); |
3176 | sources.append(m_code.newTmp(m_value->resultBank())); |
3177 | sources.append(m_code.newTmp(m_value->resultBank())); |
3178 | } |
3179 | |
3180 | // There is a really hilarious case that arises when we do BranchAdd32(%x, %x). We won't emit |
3181 | // such code, but the coalescing in our register allocator also does copy propagation, so |
3182 | // although we emit: |
3183 | // |
3184 | // Move %tmp1, %tmp2 |
3185 | // BranchAdd32 %tmp1, %tmp2 |
3186 | // |
3187 | // The register allocator may turn this into: |
3188 | // |
3189 | // BranchAdd32 %rax, %rax |
3190 | // |
3191 | // Currently we handle this by ensuring that even this kind of addition can be undone. We can |
3192 | // undo it by using the carry flag. It's tempting to get rid of that code and just "fix" this |
3193 | // here by forcing LateUse on the stackmap. If we did that unconditionally, we'd lose a lot of |
3194 | // performance. So it's tempting to do it only if left == right. But that creates an awkward |
3195 | // constraint on Air: it means that Air would not be allowed to do any copy propagation. |
3196 | // Notice that the %rax,%rax situation happened after Air copy-propagated the Move we are |
3197 | // emitting. We know that copy-propagating over that Move causes add-to-self. But what if we |
3198 | // emit something like a Move - or even do other kinds of copy-propagation on tmp's - |
3199 | // somewhere else in this code. The add-to-self situation may only emerge after some other Air |
3200 | // optimizations remove other Move's or identity-like operations. That's why we don't use |
3201 | // LateUse here to take care of add-to-self. |
3202 | |
3203 | CheckSpecial* special = ensureCheckSpecial(opcode, 2 + sources.size(), stackmapRole); |
3204 | |
3205 | Inst inst(Patch, checkValue, Arg::special(special)); |
3206 | |
3207 | inst.args.append(Arg::resCond(MacroAssembler::Overflow)); |
3208 | |
3209 | inst.args.appendVector(sources); |
3210 | inst.args.append(result); |
3211 | |
3212 | fillStackmap(inst, checkValue, 2); |
3213 | |
3214 | m_insts.last().append(WTFMove(inst)); |
3215 | return; |
3216 | } |
3217 | |
3218 | case Check: { |
3219 | Inst branch = createBranch(m_value->child(0)); |
3220 | |
3221 | CheckSpecial* special = ensureCheckSpecial(branch); |
3222 | |
3223 | CheckValue* checkValue = m_value->as<CheckValue>(); |
3224 | |
3225 | Inst inst(Patch, checkValue, Arg::special(special)); |
3226 | inst.args.appendVector(branch.args); |
3227 | |
3228 | fillStackmap(inst, checkValue, 1); |
3229 | |
3230 | m_insts.last().append(WTFMove(inst)); |
3231 | return; |
3232 | } |
3233 | |
3234 | case B3::WasmBoundsCheck: { |
3235 | WasmBoundsCheckValue* value = m_value->as<WasmBoundsCheckValue>(); |
3236 | |
3237 | Value* ptr = value->child(0); |
3238 | Tmp pointer = tmp(ptr); |
3239 | |
3240 | Arg ptrPlusImm = m_code.newTmp(GP); |
3241 | append(Inst(Move32, value, pointer, ptrPlusImm)); |
3242 | if (value->offset()) { |
3243 | if (imm(value->offset())) |
3244 | append(Add64, imm(value->offset()), ptrPlusImm); |
3245 | else { |
3246 | Arg bigImm = m_code.newTmp(GP); |
3247 | append(Move, Arg::bigImm(value->offset()), bigImm); |
3248 | append(Add64, bigImm, ptrPlusImm); |
3249 | } |
3250 | } |
3251 | |
3252 | Arg limit; |
3253 | switch (value->boundsType()) { |
3254 | case WasmBoundsCheckValue::Type::Pinned: |
3255 | limit = Arg(value->bounds().pinnedSize); |
3256 | break; |
3257 | |
3258 | case WasmBoundsCheckValue::Type::Maximum: |
3259 | limit = m_code.newTmp(GP); |
3260 | if (imm(value->bounds().maximum)) |
3261 | append(Move, imm(value->bounds().maximum), limit); |
3262 | else |
3263 | append(Move, Arg::bigImm(value->bounds().maximum), limit); |
3264 | break; |
3265 | } |
3266 | |
3267 | append(Inst(Air::WasmBoundsCheck, value, ptrPlusImm, limit)); |
3268 | return; |
3269 | } |
3270 | |
3271 | case Upsilon: { |
3272 | Value* value = m_value->child(0); |
3273 | append( |
3274 | relaxedMoveForType(value->type()), immOrTmp(value), |
3275 | m_phiToTmp[m_value->as<UpsilonValue>()->phi()]); |
3276 | return; |
3277 | } |
3278 | |
3279 | case Phi: { |
3280 | // Snapshot the value of the Phi. It may change under us because you could do: |
3281 | // a = Phi() |
3282 | // Upsilon(@x, ^a) |
3283 | // @a => this should get the value of the Phi before the Upsilon, i.e. not @x. |
3284 | |
3285 | append(relaxedMoveForType(m_value->type()), m_phiToTmp[m_value], tmp(m_value)); |
3286 | return; |
3287 | } |
3288 | |
3289 | case Set: { |
3290 | Value* value = m_value->child(0); |
3291 | append( |
3292 | relaxedMoveForType(value->type()), immOrTmp(value), |
3293 | m_variableToTmp.get(m_value->as<VariableValue>()->variable())); |
3294 | return; |
3295 | } |
3296 | |
3297 | case Get: { |
3298 | append( |
3299 | relaxedMoveForType(m_value->type()), |
3300 | m_variableToTmp.get(m_value->as<VariableValue>()->variable()), tmp(m_value)); |
3301 | return; |
3302 | } |
3303 | |
3304 | case Branch: { |
3305 | if (canBeInternal(m_value->child(0))) { |
3306 | Value* branchChild = m_value->child(0); |
3307 | switch (branchChild->opcode()) { |
3308 | case AtomicWeakCAS: |
3309 | commitInternal(branchChild); |
3310 | appendCAS(branchChild, false); |
3311 | return; |
3312 | |
3313 | case AtomicStrongCAS: |
3314 | // A branch is a comparison to zero. |
3315 | // FIXME: Teach this to match patterns that arise from subwidth CAS. |
3316 | // https://bugs.webkit.org/show_bug.cgi?id=169250 |
3317 | if (branchChild->child(0)->isInt(0) |
3318 | && branchChild->as<AtomicValue>()->isCanonicalWidth()) { |
3319 | commitInternal(branchChild); |
3320 | appendCAS(branchChild, true); |
3321 | return; |
3322 | } |
3323 | break; |
3324 | |
3325 | case Equal: |
3326 | case NotEqual: |
3327 | // FIXME: Teach this to match patterns that arise from subwidth CAS. |
3328 | // https://bugs.webkit.org/show_bug.cgi?id=169250 |
3329 | if (branchChild->child(0)->opcode() == AtomicStrongCAS |
3330 | && branchChild->child(0)->as<AtomicValue>()->isCanonicalWidth() |
3331 | && canBeInternal(branchChild->child(0)) |
3332 | && branchChild->child(0)->child(0) == branchChild->child(1)) { |
3333 | commitInternal(branchChild); |
3334 | commitInternal(branchChild->child(0)); |
3335 | appendCAS(branchChild->child(0), branchChild->opcode() == NotEqual); |
3336 | return; |
3337 | } |
3338 | break; |
3339 | |
3340 | default: |
3341 | break; |
3342 | } |
3343 | } |
3344 | |
3345 | m_insts.last().append(createBranch(m_value->child(0))); |
3346 | return; |
3347 | } |
3348 | |
3349 | case B3::Jump: { |
3350 | append(Air::Jump); |
3351 | return; |
3352 | } |
3353 | |
3354 | case Identity: |
3355 | case Opaque: { |
3356 | ASSERT(tmp(m_value->child(0)) == tmp(m_value)); |
3357 | return; |
3358 | } |
3359 | |
3360 | case Return: { |
3361 | if (!m_value->numChildren()) { |
3362 | append(RetVoid); |
3363 | return; |
3364 | } |
3365 | Value* value = m_value->child(0); |
3366 | Tmp returnValueGPR = Tmp(GPRInfo::returnValueGPR); |
3367 | Tmp returnValueFPR = Tmp(FPRInfo::returnValueFPR); |
3368 | switch (value->type()) { |
3369 | case Void: |
3370 | // It's impossible for a void value to be used as a child. We use RetVoid |
3371 | // for void returns. |
3372 | RELEASE_ASSERT_NOT_REACHED(); |
3373 | break; |
3374 | case Int32: |
3375 | append(Move, immOrTmp(value), returnValueGPR); |
3376 | append(Ret32, returnValueGPR); |
3377 | break; |
3378 | case Int64: |
3379 | append(Move, immOrTmp(value), returnValueGPR); |
3380 | append(Ret64, returnValueGPR); |
3381 | break; |
3382 | case Float: |
3383 | append(MoveFloat, tmp(value), returnValueFPR); |
3384 | append(RetFloat, returnValueFPR); |
3385 | break; |
3386 | case Double: |
3387 | append(MoveDouble, tmp(value), returnValueFPR); |
3388 | append(RetDouble, returnValueFPR); |
3389 | break; |
3390 | } |
3391 | return; |
3392 | } |
3393 | |
3394 | case B3::Oops: { |
3395 | append(Air::Oops); |
3396 | return; |
3397 | } |
3398 | |
3399 | case B3::EntrySwitch: { |
3400 | append(Air::EntrySwitch); |
3401 | return; |
3402 | } |
3403 | |
3404 | case AtomicWeakCAS: |
3405 | case AtomicStrongCAS: { |
3406 | appendCAS(m_value, false); |
3407 | return; |
3408 | } |
3409 | |
3410 | case AtomicXchgAdd: { |
3411 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3412 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicAdd, atomic->accessWidth()))) |
3413 | return; |
3414 | |
3415 | Arg address = addr(atomic); |
3416 | Air::Opcode opcode = OPCODE_FOR_WIDTH(AtomicXchgAdd, atomic->accessWidth()); |
3417 | if (isValidForm(opcode, Arg::Tmp, address.kind())) { |
3418 | append(relaxedMoveForType(atomic->type()), tmp(atomic->child(0)), tmp(atomic)); |
3419 | append(opcode, tmp(atomic), address); |
3420 | return; |
3421 | } |
3422 | |
3423 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(Add, atomic->accessWidth()), Commutative); |
3424 | return; |
3425 | } |
3426 | |
3427 | case AtomicXchgSub: { |
3428 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3429 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicSub, atomic->accessWidth()))) |
3430 | return; |
3431 | |
3432 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(Sub, atomic->accessWidth())); |
3433 | return; |
3434 | } |
3435 | |
3436 | case AtomicXchgAnd: { |
3437 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3438 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicAnd, atomic->accessWidth()))) |
3439 | return; |
3440 | |
3441 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(And, atomic->accessWidth()), Commutative); |
3442 | return; |
3443 | } |
3444 | |
3445 | case AtomicXchgOr: { |
3446 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3447 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicOr, atomic->accessWidth()))) |
3448 | return; |
3449 | |
3450 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(Or, atomic->accessWidth()), Commutative); |
3451 | return; |
3452 | } |
3453 | |
3454 | case AtomicXchgXor: { |
3455 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3456 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicXor, atomic->accessWidth()))) |
3457 | return; |
3458 | |
3459 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(Xor, atomic->accessWidth()), Commutative); |
3460 | return; |
3461 | } |
3462 | |
3463 | case AtomicXchg: { |
3464 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3465 | |
3466 | Arg address = addr(atomic); |
3467 | Air::Opcode opcode = OPCODE_FOR_WIDTH(AtomicXchg, atomic->accessWidth()); |
3468 | if (isValidForm(opcode, Arg::Tmp, address.kind())) { |
3469 | append(relaxedMoveForType(atomic->type()), tmp(atomic->child(0)), tmp(atomic)); |
3470 | append(opcode, tmp(atomic), address); |
3471 | return; |
3472 | } |
3473 | |
3474 | appendGeneralAtomic(Air::Nop); |
3475 | return; |
3476 | } |
3477 | |
3478 | default: |
3479 | break; |
3480 | } |
3481 | |
3482 | dataLog("FATAL: could not lower " , deepDump(m_procedure, m_value), "\n" ); |
3483 | RELEASE_ASSERT_NOT_REACHED(); |
3484 | } |
3485 | |
3486 | IndexSet<Value*> m_locked; // These are values that will have no Tmp in Air. |
3487 | IndexMap<Value*, Tmp> m_valueToTmp; // These are values that must have a Tmp in Air. We say that a Value* with a non-null Tmp is "pinned". |
3488 | IndexMap<Value*, Tmp> m_phiToTmp; // Each Phi gets its own Tmp. |
3489 | IndexMap<B3::BasicBlock*, Air::BasicBlock*> m_blockToBlock; |
3490 | HashMap<B3::StackSlot*, Air::StackSlot*> m_stackToStack; |
3491 | HashMap<Variable*, Tmp> m_variableToTmp; |
3492 | |
3493 | UseCounts m_useCounts; |
3494 | PhiChildren m_phiChildren; |
3495 | BlockWorklist m_fastWorklist; |
3496 | Dominators& m_dominators; |
3497 | |
3498 | Vector<Vector<Inst, 4>> m_insts; |
3499 | Vector<Inst> m_prologue; |
3500 | |
3501 | B3::BasicBlock* m_block; |
3502 | bool m_isRare; |
3503 | unsigned m_index; |
3504 | Value* m_value; |
3505 | |
3506 | PatchpointSpecial* m_patchpointSpecial { nullptr }; |
3507 | HashMap<CheckSpecial::Key, CheckSpecial*> m_checkSpecials; |
3508 | |
3509 | Procedure& m_procedure; |
3510 | Code& m_code; |
3511 | |
3512 | Air::BlockInsertionSet m_blockInsertionSet; |
3513 | |
3514 | Tmp m_eax; |
3515 | Tmp m_ecx; |
3516 | Tmp m_edx; |
3517 | }; |
3518 | |
3519 | } // anonymous namespace |
3520 | |
3521 | void lowerToAir(Procedure& procedure) |
3522 | { |
3523 | PhaseScope phaseScope(procedure, "lowerToAir" ); |
3524 | LowerToAir lowerToAir(procedure); |
3525 | lowerToAir.run(); |
3526 | } |
3527 | |
3528 | } } // namespace JSC::B3 |
3529 | |
3530 | #if ASSERT_DISABLED |
3531 | IGNORE_RETURN_TYPE_WARNINGS_END |
3532 | #endif |
3533 | |
3534 | #endif // ENABLE(B3_JIT) |
3535 | |