1 | /* |
2 | * Copyright (C) 2015-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "B3LowerToAir.h" |
28 | |
29 | #if ENABLE(B3_JIT) |
30 | |
31 | #include "AirBlockInsertionSet.h" |
32 | #include "AirCCallSpecial.h" |
33 | #include "AirCode.h" |
34 | #include "AirHelpers.h" |
35 | #include "AirInsertionSet.h" |
36 | #include "AirInstInlines.h" |
37 | #include "AirPrintSpecial.h" |
38 | #include "AirStackSlot.h" |
39 | #include "B3ArgumentRegValue.h" |
40 | #include "B3AtomicValue.h" |
41 | #include "B3BasicBlockInlines.h" |
42 | #include "B3BlockWorklist.h" |
43 | #include "B3CCallValue.h" |
44 | #include "B3CheckSpecial.h" |
45 | #include "B3Commutativity.h" |
46 | #include "B3Dominators.h" |
47 | #include "B3ExtractValue.h" |
48 | #include "B3FenceValue.h" |
49 | #include "B3MemoryValueInlines.h" |
50 | #include "B3PatchpointSpecial.h" |
51 | #include "B3PatchpointValue.h" |
52 | #include "B3PhaseScope.h" |
53 | #include "B3PhiChildren.h" |
54 | #include "B3Procedure.h" |
55 | #include "B3SlotBaseValue.h" |
56 | #include "B3StackSlot.h" |
57 | #include "B3UpsilonValue.h" |
58 | #include "B3UseCounts.h" |
59 | #include "B3ValueInlines.h" |
60 | #include "B3Variable.h" |
61 | #include "B3VariableValue.h" |
62 | #include "B3WasmAddressValue.h" |
63 | #include <wtf/IndexMap.h> |
64 | #include <wtf/IndexSet.h> |
65 | #include <wtf/ListDump.h> |
66 | |
67 | #if ASSERT_DISABLED |
68 | IGNORE_RETURN_TYPE_WARNINGS_BEGIN |
69 | #endif |
70 | |
71 | namespace JSC { namespace B3 { |
72 | |
73 | namespace { |
74 | |
75 | namespace B3LowerToAirInternal { |
76 | static constexpr bool verbose = false; |
77 | } |
78 | |
79 | using Arg = Air::Arg; |
80 | using Inst = Air::Inst; |
81 | using Code = Air::Code; |
82 | using Tmp = Air::Tmp; |
83 | |
84 | using Air::moveForType; |
85 | using Air::relaxedMoveForType; |
86 | |
87 | // FIXME: We wouldn't need this if Air supported Width modifiers in Air::Kind. |
88 | // https://bugs.webkit.org/show_bug.cgi?id=169247 |
89 | #define OPCODE_FOR_WIDTH(opcode, width) ( \ |
90 | (width) == Width8 ? Air::opcode ## 8 : \ |
91 | (width) == Width16 ? Air::opcode ## 16 : \ |
92 | (width) == Width32 ? Air::opcode ## 32 : \ |
93 | Air::opcode ## 64) |
94 | #define OPCODE_FOR_CANONICAL_WIDTH(opcode, width) ( \ |
95 | (width) == Width64 ? Air::opcode ## 64 : Air::opcode ## 32) |
96 | |
97 | class LowerToAir { |
98 | public: |
99 | LowerToAir(Procedure& procedure) |
100 | : m_valueToTmp(procedure.values().size()) |
101 | , m_phiToTmp(procedure.values().size()) |
102 | , m_blockToBlock(procedure.size()) |
103 | , m_useCounts(procedure) |
104 | , m_phiChildren(procedure) |
105 | , m_dominators(procedure.dominators()) |
106 | , m_procedure(procedure) |
107 | , m_code(procedure.code()) |
108 | , m_blockInsertionSet(m_code) |
109 | #if CPU(X86) || CPU(X86_64) |
110 | , m_eax(X86Registers::eax) |
111 | , m_ecx(X86Registers::ecx) |
112 | , m_edx(X86Registers::edx) |
113 | #endif |
114 | { |
115 | } |
116 | |
117 | void run() |
118 | { |
119 | using namespace Air; |
120 | for (B3::BasicBlock* block : m_procedure) |
121 | m_blockToBlock[block] = m_code.addBlock(block->frequency()); |
122 | |
123 | auto ensureTupleTmps = [&] (Value* tupleValue, auto& hashTable) { |
124 | hashTable.ensure(tupleValue, [&] { |
125 | const auto tuple = m_procedure.tupleForType(tupleValue->type()); |
126 | Vector<Tmp> tmps(tuple.size()); |
127 | |
128 | for (unsigned i = 0; i < tuple.size(); ++i) |
129 | tmps[i] = tmpForType(tuple[i]); |
130 | return tmps; |
131 | }); |
132 | }; |
133 | |
134 | for (Value* value : m_procedure.values()) { |
135 | switch (value->opcode()) { |
136 | case Phi: { |
137 | if (value->type().isTuple()) { |
138 | ensureTupleTmps(value, m_tuplePhiToTmps); |
139 | ensureTupleTmps(value, m_tupleValueToTmps); |
140 | break; |
141 | } |
142 | |
143 | m_phiToTmp[value] = m_code.newTmp(value->resultBank()); |
144 | if (B3LowerToAirInternal::verbose) |
145 | dataLog("Phi tmp for " , *value, ": " , m_phiToTmp[value], "\n" ); |
146 | break; |
147 | } |
148 | case Get: |
149 | case Patchpoint: { |
150 | if (value->type().isTuple()) |
151 | ensureTupleTmps(value, m_tupleValueToTmps); |
152 | break; |
153 | } |
154 | default: |
155 | break; |
156 | } |
157 | } |
158 | |
159 | for (B3::StackSlot* stack : m_procedure.stackSlots()) |
160 | m_stackToStack.add(stack, m_code.addStackSlot(stack)); |
161 | for (Variable* variable : m_procedure.variables()) { |
162 | auto addResult = m_variableToTmps.add(variable, Vector<Tmp, 1>(m_procedure.resultCount(variable->type()))); |
163 | ASSERT(addResult.isNewEntry); |
164 | for (unsigned i = 0; i < m_procedure.resultCount(variable->type()); ++i) |
165 | addResult.iterator->value[i] = tmpForType(m_procedure.typeAtOffset(variable->type(), i)); |
166 | } |
167 | |
168 | // Figure out which blocks are not rare. |
169 | m_fastWorklist.push(m_procedure[0]); |
170 | while (B3::BasicBlock* block = m_fastWorklist.pop()) { |
171 | for (B3::FrequentedBlock& successor : block->successors()) { |
172 | if (!successor.isRare()) |
173 | m_fastWorklist.push(successor.block()); |
174 | } |
175 | } |
176 | |
177 | m_procedure.resetValueOwners(); // Used by crossesInterference(). |
178 | |
179 | // Lower defs before uses on a global level. This is a good heuristic to lock down a |
180 | // hoisted address expression before we duplicate it back into the loop. |
181 | for (B3::BasicBlock* block : m_procedure.blocksInPreOrder()) { |
182 | m_block = block; |
183 | |
184 | m_isRare = !m_fastWorklist.saw(block); |
185 | |
186 | if (B3LowerToAirInternal::verbose) |
187 | dataLog("Lowering Block " , *block, ":\n" ); |
188 | |
189 | // Make sure that the successors are set up correctly. |
190 | for (B3::FrequentedBlock successor : block->successors()) { |
191 | m_blockToBlock[block]->successors().append( |
192 | Air::FrequentedBlock(m_blockToBlock[successor.block()], successor.frequency())); |
193 | } |
194 | |
195 | // Process blocks in reverse order so we see uses before defs. That's what allows us |
196 | // to match patterns effectively. |
197 | for (unsigned i = block->size(); i--;) { |
198 | m_index = i; |
199 | m_value = block->at(i); |
200 | if (m_locked.contains(m_value)) |
201 | continue; |
202 | m_insts.append(Vector<Inst>()); |
203 | if (B3LowerToAirInternal::verbose) |
204 | dataLog("Lowering " , deepDump(m_procedure, m_value), ":\n" ); |
205 | lower(); |
206 | if (B3LowerToAirInternal::verbose) { |
207 | for (Inst& inst : m_insts.last()) |
208 | dataLog(" " , inst, "\n" ); |
209 | } |
210 | } |
211 | |
212 | finishAppendingInstructions(m_blockToBlock[block]); |
213 | } |
214 | |
215 | m_blockInsertionSet.execute(); |
216 | |
217 | Air::InsertionSet insertionSet(m_code); |
218 | for (Inst& inst : m_prologue) |
219 | insertionSet.insertInst(0, WTFMove(inst)); |
220 | insertionSet.execute(m_code[0]); |
221 | } |
222 | |
223 | private: |
224 | bool shouldCopyPropagate(Value* value) |
225 | { |
226 | switch (value->opcode()) { |
227 | case Trunc: |
228 | case Identity: |
229 | case Opaque: |
230 | return true; |
231 | default: |
232 | return false; |
233 | } |
234 | } |
235 | |
236 | class ArgPromise { |
237 | WTF_MAKE_NONCOPYABLE(ArgPromise); |
238 | public: |
239 | ArgPromise() { } |
240 | |
241 | ArgPromise(const Arg& arg, Value* valueToLock = nullptr) |
242 | : m_arg(arg) |
243 | , m_value(valueToLock) |
244 | { |
245 | } |
246 | |
247 | void swap(ArgPromise& other) |
248 | { |
249 | std::swap(m_arg, other.m_arg); |
250 | std::swap(m_value, other.m_value); |
251 | std::swap(m_wasConsumed, other.m_wasConsumed); |
252 | std::swap(m_wasWrapped, other.m_wasWrapped); |
253 | std::swap(m_traps, other.m_traps); |
254 | } |
255 | |
256 | ArgPromise(ArgPromise&& other) |
257 | { |
258 | swap(other); |
259 | } |
260 | |
261 | ArgPromise& operator=(ArgPromise&& other) |
262 | { |
263 | swap(other); |
264 | return *this; |
265 | } |
266 | |
267 | ~ArgPromise() |
268 | { |
269 | if (m_wasConsumed) |
270 | RELEASE_ASSERT(m_wasWrapped); |
271 | } |
272 | |
273 | void setTraps(bool value) |
274 | { |
275 | m_traps = value; |
276 | } |
277 | |
278 | static ArgPromise tmp(Value* value) |
279 | { |
280 | ArgPromise result; |
281 | result.m_value = value; |
282 | return result; |
283 | } |
284 | |
285 | explicit operator bool() const { return m_arg || m_value; } |
286 | |
287 | Arg::Kind kind() const |
288 | { |
289 | if (!m_arg && m_value) |
290 | return Arg::Tmp; |
291 | return m_arg.kind(); |
292 | } |
293 | |
294 | const Arg& peek() const |
295 | { |
296 | return m_arg; |
297 | } |
298 | |
299 | Arg consume(LowerToAir& lower) |
300 | { |
301 | m_wasConsumed = true; |
302 | if (!m_arg && m_value) |
303 | return lower.tmp(m_value); |
304 | if (m_value) |
305 | lower.commitInternal(m_value); |
306 | return m_arg; |
307 | } |
308 | |
309 | template<typename... Args> |
310 | Inst inst(Args&&... args) |
311 | { |
312 | Inst result(std::forward<Args>(args)...); |
313 | result.kind.effects |= m_traps; |
314 | m_wasWrapped = true; |
315 | return result; |
316 | } |
317 | |
318 | private: |
319 | // Three forms: |
320 | // Everything null: invalid. |
321 | // Arg non-null, value null: just use the arg, nothing special. |
322 | // Arg null, value non-null: it's a tmp, pin it when necessary. |
323 | // Arg non-null, value non-null: use the arg, lock the value. |
324 | Arg m_arg; |
325 | Value* m_value { nullptr }; |
326 | bool m_wasConsumed { false }; |
327 | bool m_wasWrapped { false }; |
328 | bool m_traps { false }; |
329 | }; |
330 | |
331 | // Consider using tmpPromise() in cases where you aren't sure that you want to pin the value yet. |
332 | // Here are three canonical ways of using tmp() and tmpPromise(): |
333 | // |
334 | // Idiom #1: You know that you want a tmp() and you know that it will be valid for the |
335 | // instruction you're emitting. |
336 | // |
337 | // append(Foo, tmp(bar)); |
338 | // |
339 | // Idiom #2: You don't know if you want to use a tmp() because you haven't determined if the |
340 | // instruction will accept it, so you query first. Note that the call to tmp() happens only after |
341 | // you are sure that you will use it. |
342 | // |
343 | // if (isValidForm(Foo, Arg::Tmp)) |
344 | // append(Foo, tmp(bar)) |
345 | // |
346 | // Idiom #3: Same as Idiom #2, but using tmpPromise. Notice that this calls consume() only after |
347 | // it's sure it will use the tmp. That's deliberate. Also note that you're required to pass any |
348 | // Inst you create with consumed promises through that promise's inst() function. |
349 | // |
350 | // ArgPromise promise = tmpPromise(bar); |
351 | // if (isValidForm(Foo, promise.kind())) |
352 | // append(promise.inst(Foo, promise.consume(*this))) |
353 | // |
354 | // In both idiom #2 and idiom #3, we don't pin the value to a temporary except when we actually |
355 | // emit the instruction. Both tmp() and tmpPromise().consume(*this) will pin it. Pinning means |
356 | // that we will henceforth require that the value of 'bar' is generated as a separate |
357 | // instruction. We don't want to pin the value to a temporary if we might change our minds, and |
358 | // pass an address operand representing 'bar' to Foo instead. |
359 | // |
360 | // Because tmp() pins, the following is not an idiom you should use: |
361 | // |
362 | // Tmp tmp = this->tmp(bar); |
363 | // if (isValidForm(Foo, tmp.kind())) |
364 | // append(Foo, tmp); |
365 | // |
366 | // That's because if isValidForm() returns false, you will have already pinned the 'bar' to a |
367 | // temporary. You might later want to try to do something like loadPromise(), and that will fail. |
368 | // This arises in operations that have both a Addr,Tmp and Tmp,Addr forms. The following code |
369 | // seems right, but will actually fail to ever match the Tmp,Addr form because by then, the right |
370 | // value is already pinned. |
371 | // |
372 | // auto tryThings = [this] (const Arg& left, const Arg& right) { |
373 | // if (isValidForm(Foo, left.kind(), right.kind())) |
374 | // return Inst(Foo, m_value, left, right); |
375 | // return Inst(); |
376 | // }; |
377 | // if (Inst result = tryThings(loadAddr(left), tmp(right))) |
378 | // return result; |
379 | // if (Inst result = tryThings(tmp(left), loadAddr(right))) // this never succeeds. |
380 | // return result; |
381 | // return Inst(Foo, m_value, tmp(left), tmp(right)); |
382 | // |
383 | // If you imagine that loadAddr(value) is just loadPromise(value).consume(*this), then this code |
384 | // will run correctly - it will generate OK code - but the second form is never matched. |
385 | // loadAddr(right) will never succeed because it will observe that 'right' is already pinned. |
386 | // Of course, it's exactly because of the risky nature of such code that we don't have a |
387 | // loadAddr() helper and require you to balance ArgPromise's in code like this. Such code will |
388 | // work fine if written as: |
389 | // |
390 | // auto tryThings = [this] (ArgPromise& left, ArgPromise& right) { |
391 | // if (isValidForm(Foo, left.kind(), right.kind())) |
392 | // return left.inst(right.inst(Foo, m_value, left.consume(*this), right.consume(*this))); |
393 | // return Inst(); |
394 | // }; |
395 | // if (Inst result = tryThings(loadPromise(left), tmpPromise(right))) |
396 | // return result; |
397 | // if (Inst result = tryThings(tmpPromise(left), loadPromise(right))) |
398 | // return result; |
399 | // return Inst(Foo, m_value, tmp(left), tmp(right)); |
400 | // |
401 | // Notice that we did use tmp in the fall-back case at the end, because by then, we know for sure |
402 | // that we want a tmp. But using tmpPromise in the tryThings() calls ensures that doing so |
403 | // doesn't prevent us from trying loadPromise on the same value. |
404 | Tmp tmp(Value* value) |
405 | { |
406 | Tmp& tmp = m_valueToTmp[value]; |
407 | if (!tmp) { |
408 | while (shouldCopyPropagate(value)) |
409 | value = value->child(0); |
410 | |
411 | if (value->opcode() == FramePointer) |
412 | return Tmp(GPRInfo::callFrameRegister); |
413 | |
414 | Tmp& realTmp = m_valueToTmp[value]; |
415 | if (!realTmp) { |
416 | realTmp = m_code.newTmp(value->resultBank()); |
417 | if (m_procedure.isFastConstant(value->key())) |
418 | m_code.addFastTmp(realTmp); |
419 | if (B3LowerToAirInternal::verbose) |
420 | dataLog("Tmp for " , *value, ": " , realTmp, "\n" ); |
421 | } |
422 | tmp = realTmp; |
423 | } |
424 | return tmp; |
425 | } |
426 | |
427 | ArgPromise tmpPromise(Value* value) |
428 | { |
429 | return ArgPromise::tmp(value); |
430 | } |
431 | |
432 | Tmp tmpForType(Type type) |
433 | { |
434 | return m_code.newTmp(bankForType(type)); |
435 | } |
436 | |
437 | const Vector<Tmp>& tmpsForTuple(Value* tupleValue) |
438 | { |
439 | ASSERT(tupleValue->type().isTuple()); |
440 | |
441 | switch (tupleValue->opcode()) { |
442 | case Phi: |
443 | case Patchpoint: { |
444 | return m_tupleValueToTmps.find(tupleValue)->value; |
445 | } |
446 | case Get: |
447 | case Set: |
448 | return m_variableToTmps.find(tupleValue->as<VariableValue>()->variable())->value; |
449 | default: |
450 | break; |
451 | } |
452 | RELEASE_ASSERT_NOT_REACHED(); |
453 | } |
454 | |
455 | bool canBeInternal(Value* value) |
456 | { |
457 | // If one of the internal things has already been computed, then we don't want to cause |
458 | // it to be recomputed again. |
459 | if (m_valueToTmp[value]) |
460 | return false; |
461 | |
462 | // We require internals to have only one use - us. It's not clear if this should be numUses() or |
463 | // numUsingInstructions(). Ideally, it would be numUsingInstructions(), except that it's not clear |
464 | // if we'd actually do the right thing when matching over such a DAG pattern. For now, it simply |
465 | // doesn't matter because we don't implement patterns that would trigger this. |
466 | if (m_useCounts.numUses(value) != 1) |
467 | return false; |
468 | |
469 | return true; |
470 | } |
471 | |
472 | // If you ask canBeInternal() and then construct something from that, and you commit to emitting |
473 | // that code, then you must commitInternal() on that value. This is tricky, and you only need to |
474 | // do it if you're pattern matching by hand rather than using the patterns language. Long story |
475 | // short, you should avoid this by using the pattern matcher to match patterns. |
476 | void commitInternal(Value* value) |
477 | { |
478 | if (value) |
479 | m_locked.add(value); |
480 | } |
481 | |
482 | bool crossesInterference(Value* value) |
483 | { |
484 | // If it's in a foreign block, then be conservative. We could handle this if we were |
485 | // willing to do heavier analysis. For example, if we had liveness, then we could label |
486 | // values as "crossing interference" if they interfere with anything that they are live |
487 | // across. But, it's not clear how useful this would be. |
488 | if (value->owner != m_value->owner) |
489 | return true; |
490 | |
491 | Effects effects = value->effects(); |
492 | |
493 | for (unsigned i = m_index; i--;) { |
494 | Value* otherValue = m_block->at(i); |
495 | if (otherValue == value) |
496 | return false; |
497 | if (effects.interferes(otherValue->effects())) |
498 | return true; |
499 | } |
500 | |
501 | ASSERT_NOT_REACHED(); |
502 | return true; |
503 | } |
504 | |
505 | template<typename Int, typename = Value::IsLegalOffset<Int>> |
506 | Optional<unsigned> scaleForShl(Value* shl, Int offset, Optional<Width> width = WTF::nullopt) |
507 | { |
508 | if (shl->opcode() != Shl) |
509 | return WTF::nullopt; |
510 | if (!shl->child(1)->hasInt32()) |
511 | return WTF::nullopt; |
512 | unsigned logScale = shl->child(1)->asInt32(); |
513 | if (shl->type() == Int32) |
514 | logScale &= 31; |
515 | else |
516 | logScale &= 63; |
517 | // Use 64-bit math to perform the shift so that <<32 does the right thing, but then switch |
518 | // to signed since that's what all of our APIs want. |
519 | int64_t bigScale = static_cast<uint64_t>(1) << static_cast<uint64_t>(logScale); |
520 | if (!isRepresentableAs<int32_t>(bigScale)) |
521 | return WTF::nullopt; |
522 | unsigned scale = static_cast<int32_t>(bigScale); |
523 | if (!Arg::isValidIndexForm(scale, offset, width)) |
524 | return WTF::nullopt; |
525 | return scale; |
526 | } |
527 | |
528 | // This turns the given operand into an address. |
529 | template<typename Int, typename = Value::IsLegalOffset<Int>> |
530 | Arg effectiveAddr(Value* address, Int offset, Width width) |
531 | { |
532 | ASSERT(Arg::isValidAddrForm(offset, width)); |
533 | |
534 | auto fallback = [&] () -> Arg { |
535 | return Arg::addr(tmp(address), offset); |
536 | }; |
537 | |
538 | static constexpr unsigned lotsOfUses = 10; // This is arbitrary and we should tune it eventually. |
539 | |
540 | // Only match if the address value isn't used in some large number of places. |
541 | if (m_useCounts.numUses(address) > lotsOfUses) |
542 | return fallback(); |
543 | |
544 | switch (address->opcode()) { |
545 | case Add: { |
546 | Value* left = address->child(0); |
547 | Value* right = address->child(1); |
548 | |
549 | auto tryIndex = [&] (Value* index, Value* base) -> Arg { |
550 | Optional<unsigned> scale = scaleForShl(index, offset, width); |
551 | if (!scale) |
552 | return Arg(); |
553 | if (m_locked.contains(index->child(0)) || m_locked.contains(base)) |
554 | return Arg(); |
555 | return Arg::index(tmp(base), tmp(index->child(0)), *scale, offset); |
556 | }; |
557 | |
558 | if (Arg result = tryIndex(left, right)) |
559 | return result; |
560 | if (Arg result = tryIndex(right, left)) |
561 | return result; |
562 | |
563 | if (m_locked.contains(left) || m_locked.contains(right) |
564 | || !Arg::isValidIndexForm(1, offset, width)) |
565 | return fallback(); |
566 | |
567 | return Arg::index(tmp(left), tmp(right), 1, offset); |
568 | } |
569 | |
570 | case Shl: { |
571 | Value* left = address->child(0); |
572 | |
573 | // We'll never see child(1)->isInt32(0), since that would have been reduced. If the shift |
574 | // amount is greater than 1, then there isn't really anything smart that we could do here. |
575 | // We avoid using baseless indexes because their encoding isn't particularly efficient. |
576 | if (m_locked.contains(left) || !address->child(1)->isInt32(1) |
577 | || !Arg::isValidIndexForm(1, offset, width)) |
578 | return fallback(); |
579 | |
580 | return Arg::index(tmp(left), tmp(left), 1, offset); |
581 | } |
582 | |
583 | case FramePointer: |
584 | return Arg::addr(Tmp(GPRInfo::callFrameRegister), offset); |
585 | |
586 | case SlotBase: |
587 | return Arg::stack(m_stackToStack.get(address->as<SlotBaseValue>()->slot()), offset); |
588 | |
589 | case WasmAddress: { |
590 | WasmAddressValue* wasmAddress = address->as<WasmAddressValue>(); |
591 | Value* pointer = wasmAddress->child(0); |
592 | if (!Arg::isValidIndexForm(1, offset, width) || m_locked.contains(pointer)) |
593 | return fallback(); |
594 | |
595 | // FIXME: We should support ARM64 LDR 32-bit addressing, which will |
596 | // allow us to fuse a Shl ptr, 2 into the address. Additionally, and |
597 | // perhaps more importantly, it would allow us to avoid a truncating |
598 | // move. See: https://bugs.webkit.org/show_bug.cgi?id=163465 |
599 | |
600 | return Arg::index(Tmp(wasmAddress->pinnedGPR()), tmp(pointer), 1, offset); |
601 | } |
602 | |
603 | default: |
604 | return fallback(); |
605 | } |
606 | } |
607 | |
608 | // This gives you the address of the given Load or Store. If it's not a Load or Store, then |
609 | // it returns Arg(). |
610 | Arg addr(Value* memoryValue) |
611 | { |
612 | MemoryValue* value = memoryValue->as<MemoryValue>(); |
613 | if (!value) |
614 | return Arg(); |
615 | |
616 | if (value->requiresSimpleAddr()) |
617 | return Arg::simpleAddr(tmp(value->lastChild())); |
618 | |
619 | Value::OffsetType offset = value->offset(); |
620 | Width width = value->accessWidth(); |
621 | |
622 | Arg result = effectiveAddr(value->lastChild(), offset, width); |
623 | RELEASE_ASSERT(result.isValidForm(width)); |
624 | |
625 | return result; |
626 | } |
627 | |
628 | template<typename... Args> |
629 | Inst trappingInst(bool traps, Args&&... args) |
630 | { |
631 | Inst result(std::forward<Args>(args)...); |
632 | result.kind.effects |= traps; |
633 | return result; |
634 | } |
635 | |
636 | template<typename... Args> |
637 | Inst trappingInst(Value* value, Args&&... args) |
638 | { |
639 | return trappingInst(value->traps(), std::forward<Args>(args)...); |
640 | } |
641 | |
642 | ArgPromise loadPromiseAnyOpcode(Value* loadValue) |
643 | { |
644 | RELEASE_ASSERT(loadValue->as<MemoryValue>()); |
645 | if (!canBeInternal(loadValue)) |
646 | return Arg(); |
647 | if (crossesInterference(loadValue)) |
648 | return Arg(); |
649 | // On x86, all loads have fences. Doing this kind of instruction selection will move the load, |
650 | // but that's fine because our interference analysis stops the motion of fences around other |
651 | // fences. So, any load motion we introduce here would not be observable. |
652 | if (!isX86() && loadValue->as<MemoryValue>()->hasFence()) |
653 | return Arg(); |
654 | Arg loadAddr = addr(loadValue); |
655 | RELEASE_ASSERT(loadAddr); |
656 | ArgPromise result(loadAddr, loadValue); |
657 | if (loadValue->traps()) |
658 | result.setTraps(true); |
659 | return result; |
660 | } |
661 | |
662 | ArgPromise loadPromise(Value* loadValue, B3::Opcode loadOpcode) |
663 | { |
664 | if (loadValue->opcode() != loadOpcode) |
665 | return Arg(); |
666 | return loadPromiseAnyOpcode(loadValue); |
667 | } |
668 | |
669 | ArgPromise loadPromise(Value* loadValue) |
670 | { |
671 | return loadPromise(loadValue, Load); |
672 | } |
673 | |
674 | Arg imm(int64_t intValue) |
675 | { |
676 | if (Arg::isValidImmForm(intValue)) |
677 | return Arg::imm(intValue); |
678 | return Arg(); |
679 | } |
680 | |
681 | Arg imm(Value* value) |
682 | { |
683 | if (value->hasInt()) |
684 | return imm(value->asInt()); |
685 | return Arg(); |
686 | } |
687 | |
688 | Arg bitImm(Value* value) |
689 | { |
690 | if (value->hasInt()) { |
691 | int64_t intValue = value->asInt(); |
692 | if (Arg::isValidBitImmForm(intValue)) |
693 | return Arg::bitImm(intValue); |
694 | } |
695 | return Arg(); |
696 | } |
697 | |
698 | Arg bitImm64(Value* value) |
699 | { |
700 | if (value->hasInt()) { |
701 | int64_t intValue = value->asInt(); |
702 | if (Arg::isValidBitImm64Form(intValue)) |
703 | return Arg::bitImm64(intValue); |
704 | } |
705 | return Arg(); |
706 | } |
707 | |
708 | Arg immOrTmp(Value* value) |
709 | { |
710 | if (Arg result = imm(value)) |
711 | return result; |
712 | return tmp(value); |
713 | } |
714 | |
715 | template<typename Functor> |
716 | void forEachImmOrTmp(Value* value, const Functor& func) |
717 | { |
718 | ASSERT(value->type() != Void); |
719 | if (!value->type().isTuple()) { |
720 | func(immOrTmp(value), value->type(), 0); |
721 | return; |
722 | } |
723 | |
724 | const Vector<Type>& tuple = m_procedure.tupleForType(value->type()); |
725 | const auto& tmps = tmpsForTuple(value); |
726 | for (unsigned i = 0; i < tuple.size(); ++i) |
727 | func(tmps[i], tuple[i], i); |
728 | } |
729 | |
730 | // By convention, we use Oops to mean "I don't know". |
731 | Air::Opcode tryOpcodeForType( |
732 | Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type) |
733 | { |
734 | Air::Opcode opcode; |
735 | switch (type.kind()) { |
736 | case Int32: |
737 | opcode = opcode32; |
738 | break; |
739 | case Int64: |
740 | opcode = opcode64; |
741 | break; |
742 | case Float: |
743 | opcode = opcodeFloat; |
744 | break; |
745 | case Double: |
746 | opcode = opcodeDouble; |
747 | break; |
748 | default: |
749 | opcode = Air::Oops; |
750 | break; |
751 | } |
752 | |
753 | return opcode; |
754 | } |
755 | |
756 | Air::Opcode tryOpcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type) |
757 | { |
758 | return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type); |
759 | } |
760 | |
761 | Air::Opcode opcodeForType( |
762 | Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Type type) |
763 | { |
764 | Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, type); |
765 | RELEASE_ASSERT(opcode != Air::Oops); |
766 | return opcode; |
767 | } |
768 | |
769 | Air::Opcode opcodeForType(Air::Opcode opcode32, Air::Opcode opcode64, Type type) |
770 | { |
771 | return tryOpcodeForType(opcode32, opcode64, Air::Oops, Air::Oops, type); |
772 | } |
773 | |
774 | template<Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble = Air::Oops, Air::Opcode opcodeFloat = Air::Oops> |
775 | void appendUnOp(Value* value) |
776 | { |
777 | Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, value->type()); |
778 | |
779 | Tmp result = tmp(m_value); |
780 | |
781 | // Two operand forms like: |
782 | // Op a, b |
783 | // mean something like: |
784 | // b = Op a |
785 | |
786 | ArgPromise addr = loadPromise(value); |
787 | if (isValidForm(opcode, addr.kind(), Arg::Tmp)) { |
788 | append(addr.inst(opcode, m_value, addr.consume(*this), result)); |
789 | return; |
790 | } |
791 | |
792 | if (isValidForm(opcode, Arg::Tmp, Arg::Tmp)) { |
793 | append(opcode, tmp(value), result); |
794 | return; |
795 | } |
796 | |
797 | ASSERT(value->type() == m_value->type()); |
798 | append(relaxedMoveForType(m_value->type()), tmp(value), result); |
799 | append(opcode, result); |
800 | } |
801 | |
802 | // Call this method when doing two-operand lowering of a commutative operation. You have a choice of |
803 | // which incoming Value is moved into the result. This will select which one is likely to be most |
804 | // profitable to use as the result. Doing the right thing can have big performance consequences in tight |
805 | // kernels. |
806 | bool preferRightForResult(Value* left, Value* right) |
807 | { |
808 | // The default is to move left into result, because that's required for non-commutative instructions. |
809 | // The value that we want to move into result position is the one that dies here. So, if we're |
810 | // compiling a commutative operation and we know that actually right is the one that dies right here, |
811 | // then we can flip things around to help coalescing, which then kills the move instruction. |
812 | // |
813 | // But it's more complicated: |
814 | // - Used-once is a bad estimate of whether the variable dies here. |
815 | // - A child might be a candidate for coalescing with this value. |
816 | // |
817 | // Currently, we have machinery in place to recognize super obvious forms of the latter issue. |
818 | |
819 | // We recognize when a child is a Phi that has this value as one of its children. We're very |
820 | // conservative about this; for example we don't even consider transitive Phi children. |
821 | bool leftIsPhiWithThis = m_phiChildren[left].transitivelyUses(m_value); |
822 | bool rightIsPhiWithThis = m_phiChildren[right].transitivelyUses(m_value); |
823 | |
824 | if (leftIsPhiWithThis != rightIsPhiWithThis) |
825 | return rightIsPhiWithThis; |
826 | |
827 | if (m_useCounts.numUsingInstructions(right) != 1) |
828 | return false; |
829 | |
830 | if (m_useCounts.numUsingInstructions(left) != 1) |
831 | return true; |
832 | |
833 | // The use count might be 1 if the variable is live around a loop. We can guarantee that we |
834 | // pick the variable that is least likely to suffer this problem if we pick the one that |
835 | // is closest to us in an idom walk. By convention, we slightly bias this in favor of |
836 | // returning true. |
837 | |
838 | // We cannot prefer right if right is further away in an idom walk. |
839 | if (m_dominators.strictlyDominates(right->owner, left->owner)) |
840 | return false; |
841 | |
842 | return true; |
843 | } |
844 | |
845 | template<Air::Opcode opcode32, Air::Opcode opcode64, Air::Opcode opcodeDouble, Air::Opcode opcodeFloat, Commutativity commutativity = NotCommutative> |
846 | void appendBinOp(Value* left, Value* right) |
847 | { |
848 | Air::Opcode opcode = opcodeForType(opcode32, opcode64, opcodeDouble, opcodeFloat, left->type()); |
849 | |
850 | Tmp result = tmp(m_value); |
851 | |
852 | // Three-operand forms like: |
853 | // Op a, b, c |
854 | // mean something like: |
855 | // c = a Op b |
856 | |
857 | if (isValidForm(opcode, Arg::Imm, Arg::Tmp, Arg::Tmp)) { |
858 | if (commutativity == Commutative) { |
859 | if (imm(right)) { |
860 | append(opcode, imm(right), tmp(left), result); |
861 | return; |
862 | } |
863 | } else { |
864 | // A non-commutative operation could have an immediate in left. |
865 | if (imm(left)) { |
866 | append(opcode, imm(left), tmp(right), result); |
867 | return; |
868 | } |
869 | } |
870 | } |
871 | |
872 | if (isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) { |
873 | if (commutativity == Commutative) { |
874 | if (Arg rightArg = bitImm(right)) { |
875 | append(opcode, rightArg, tmp(left), result); |
876 | return; |
877 | } |
878 | } else { |
879 | // A non-commutative operation could have an immediate in left. |
880 | if (Arg leftArg = bitImm(left)) { |
881 | append(opcode, leftArg, tmp(right), result); |
882 | return; |
883 | } |
884 | } |
885 | } |
886 | |
887 | if (isValidForm(opcode, Arg::BitImm64, Arg::Tmp, Arg::Tmp)) { |
888 | if (commutativity == Commutative) { |
889 | if (Arg rightArg = bitImm64(right)) { |
890 | append(opcode, rightArg, tmp(left), result); |
891 | return; |
892 | } |
893 | } else { |
894 | // A non-commutative operation could have an immediate in left. |
895 | if (Arg leftArg = bitImm64(left)) { |
896 | append(opcode, leftArg, tmp(right), result); |
897 | return; |
898 | } |
899 | } |
900 | } |
901 | |
902 | if (imm(right) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) { |
903 | append(opcode, tmp(left), imm(right), result); |
904 | return; |
905 | } |
906 | |
907 | // Note that no extant architecture has a three-operand form of binary operations that also |
908 | // load from memory. If such an abomination did exist, we would handle it somewhere around |
909 | // here. |
910 | |
911 | // Two-operand forms like: |
912 | // Op a, b |
913 | // mean something like: |
914 | // b = b Op a |
915 | |
916 | // At this point, we prefer versions of the operation that have a fused load or an immediate |
917 | // over three operand forms. |
918 | |
919 | if (left != right) { |
920 | ArgPromise leftAddr = loadPromise(left); |
921 | if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp, Arg::Tmp)) { |
922 | append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), tmp(right), result)); |
923 | return; |
924 | } |
925 | |
926 | if (commutativity == Commutative) { |
927 | if (isValidForm(opcode, leftAddr.kind(), Arg::Tmp)) { |
928 | append(relaxedMoveForType(m_value->type()), tmp(right), result); |
929 | append(leftAddr.inst(opcode, m_value, leftAddr.consume(*this), result)); |
930 | return; |
931 | } |
932 | } |
933 | |
934 | ArgPromise rightAddr = loadPromise(right); |
935 | if (isValidForm(opcode, Arg::Tmp, rightAddr.kind(), Arg::Tmp)) { |
936 | append(rightAddr.inst(opcode, m_value, tmp(left), rightAddr.consume(*this), result)); |
937 | return; |
938 | } |
939 | |
940 | if (commutativity == Commutative) { |
941 | if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp, Arg::Tmp)) { |
942 | append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), tmp(left), result)); |
943 | return; |
944 | } |
945 | } |
946 | |
947 | if (isValidForm(opcode, rightAddr.kind(), Arg::Tmp)) { |
948 | append(relaxedMoveForType(m_value->type()), tmp(left), result); |
949 | append(rightAddr.inst(opcode, m_value, rightAddr.consume(*this), result)); |
950 | return; |
951 | } |
952 | } |
953 | |
954 | if (imm(right) && isValidForm(opcode, Arg::Imm, Arg::Tmp)) { |
955 | append(relaxedMoveForType(m_value->type()), tmp(left), result); |
956 | append(opcode, imm(right), result); |
957 | return; |
958 | } |
959 | |
960 | if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
961 | append(opcode, tmp(left), tmp(right), result); |
962 | return; |
963 | } |
964 | |
965 | if (commutativity == Commutative && preferRightForResult(left, right)) { |
966 | append(relaxedMoveForType(m_value->type()), tmp(right), result); |
967 | append(opcode, tmp(left), result); |
968 | return; |
969 | } |
970 | |
971 | append(relaxedMoveForType(m_value->type()), tmp(left), result); |
972 | append(opcode, tmp(right), result); |
973 | } |
974 | |
975 | template<Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative> |
976 | void appendBinOp(Value* left, Value* right) |
977 | { |
978 | appendBinOp<opcode32, opcode64, Air::Oops, Air::Oops, commutativity>(left, right); |
979 | } |
980 | |
981 | template<Air::Opcode opcode32, Air::Opcode opcode64> |
982 | void appendShift(Value* value, Value* amount) |
983 | { |
984 | using namespace Air; |
985 | Air::Opcode opcode = opcodeForType(opcode32, opcode64, value->type()); |
986 | |
987 | if (imm(amount)) { |
988 | if (isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) { |
989 | append(opcode, tmp(value), imm(amount), tmp(m_value)); |
990 | return; |
991 | } |
992 | if (isValidForm(opcode, Arg::Imm, Arg::Tmp)) { |
993 | append(Move, tmp(value), tmp(m_value)); |
994 | append(opcode, imm(amount), tmp(m_value)); |
995 | return; |
996 | } |
997 | } |
998 | |
999 | if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
1000 | append(opcode, tmp(value), tmp(amount), tmp(m_value)); |
1001 | return; |
1002 | } |
1003 | |
1004 | append(Move, tmp(value), tmp(m_value)); |
1005 | append(Move, tmp(amount), m_ecx); |
1006 | append(opcode, m_ecx, tmp(m_value)); |
1007 | } |
1008 | |
1009 | template<Air::Opcode opcode32, Air::Opcode opcode64> |
1010 | bool tryAppendStoreUnOp(Value* value) |
1011 | { |
1012 | Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, value->type()); |
1013 | if (opcode == Air::Oops) |
1014 | return false; |
1015 | |
1016 | Arg storeAddr = addr(m_value); |
1017 | ASSERT(storeAddr); |
1018 | |
1019 | ArgPromise loadPromise = this->loadPromise(value); |
1020 | if (loadPromise.peek() != storeAddr) |
1021 | return false; |
1022 | |
1023 | if (!isValidForm(opcode, storeAddr.kind())) |
1024 | return false; |
1025 | |
1026 | loadPromise.consume(*this); |
1027 | append(trappingInst(m_value, loadPromise.inst(opcode, m_value, storeAddr))); |
1028 | return true; |
1029 | } |
1030 | |
1031 | template< |
1032 | Air::Opcode opcode32, Air::Opcode opcode64, Commutativity commutativity = NotCommutative> |
1033 | bool tryAppendStoreBinOp(Value* left, Value* right) |
1034 | { |
1035 | RELEASE_ASSERT(m_value->as<MemoryValue>()); |
1036 | |
1037 | Air::Opcode opcode = tryOpcodeForType(opcode32, opcode64, left->type()); |
1038 | if (opcode == Air::Oops) |
1039 | return false; |
1040 | |
1041 | if (m_value->as<MemoryValue>()->hasFence()) |
1042 | return false; |
1043 | |
1044 | Arg storeAddr = addr(m_value); |
1045 | ASSERT(storeAddr); |
1046 | |
1047 | auto getLoadPromise = [&] (Value* load) -> ArgPromise { |
1048 | switch (m_value->opcode()) { |
1049 | case B3::Store: |
1050 | if (load->opcode() != B3::Load) |
1051 | return ArgPromise(); |
1052 | break; |
1053 | case B3::Store8: |
1054 | if (load->opcode() != B3::Load8Z && load->opcode() != B3::Load8S) |
1055 | return ArgPromise(); |
1056 | break; |
1057 | case B3::Store16: |
1058 | if (load->opcode() != B3::Load16Z && load->opcode() != B3::Load16S) |
1059 | return ArgPromise(); |
1060 | break; |
1061 | default: |
1062 | return ArgPromise(); |
1063 | } |
1064 | return loadPromiseAnyOpcode(load); |
1065 | }; |
1066 | |
1067 | ArgPromise loadPromise; |
1068 | Value* otherValue = nullptr; |
1069 | |
1070 | loadPromise = getLoadPromise(left); |
1071 | if (loadPromise.peek() == storeAddr) |
1072 | otherValue = right; |
1073 | else if (commutativity == Commutative) { |
1074 | loadPromise = getLoadPromise(right); |
1075 | if (loadPromise.peek() == storeAddr) |
1076 | otherValue = left; |
1077 | } |
1078 | |
1079 | if (!otherValue) |
1080 | return false; |
1081 | |
1082 | if (isValidForm(opcode, Arg::Imm, storeAddr.kind()) && imm(otherValue)) { |
1083 | loadPromise.consume(*this); |
1084 | append(trappingInst(m_value, loadPromise.inst(opcode, m_value, imm(otherValue), storeAddr))); |
1085 | return true; |
1086 | } |
1087 | |
1088 | if (!isValidForm(opcode, Arg::Tmp, storeAddr.kind())) |
1089 | return false; |
1090 | |
1091 | loadPromise.consume(*this); |
1092 | append(trappingInst(m_value, loadPromise.inst(opcode, m_value, tmp(otherValue), storeAddr))); |
1093 | return true; |
1094 | } |
1095 | |
1096 | Inst createStore(Air::Kind move, Value* value, const Arg& dest) |
1097 | { |
1098 | using namespace Air; |
1099 | if (auto imm_value = imm(value)) { |
1100 | if (isARM64() && imm_value.value() == 0) { |
1101 | switch (move.opcode) { |
1102 | default: |
1103 | break; |
1104 | case Air::Move32: |
1105 | if (isValidForm(StoreZero32, dest.kind()) && dest.isValidForm(Width32)) |
1106 | return Inst(StoreZero32, m_value, dest); |
1107 | break; |
1108 | case Air::Move: |
1109 | if (isValidForm(StoreZero64, dest.kind()) && dest.isValidForm(Width64)) |
1110 | return Inst(StoreZero64, m_value, dest); |
1111 | break; |
1112 | } |
1113 | } |
1114 | if (isValidForm(move.opcode, Arg::Imm, dest.kind())) |
1115 | return Inst(move, m_value, imm_value, dest); |
1116 | } |
1117 | |
1118 | return Inst(move, m_value, tmp(value), dest); |
1119 | } |
1120 | |
1121 | Air::Opcode storeOpcode(Width width, Bank bank) |
1122 | { |
1123 | using namespace Air; |
1124 | switch (width) { |
1125 | case Width8: |
1126 | RELEASE_ASSERT(bank == GP); |
1127 | return Air::Store8; |
1128 | case Width16: |
1129 | RELEASE_ASSERT(bank == GP); |
1130 | return Air::Store16; |
1131 | case Width32: |
1132 | switch (bank) { |
1133 | case GP: |
1134 | return Move32; |
1135 | case FP: |
1136 | return MoveFloat; |
1137 | } |
1138 | break; |
1139 | case Width64: |
1140 | RELEASE_ASSERT(is64Bit()); |
1141 | switch (bank) { |
1142 | case GP: |
1143 | return Move; |
1144 | case FP: |
1145 | return MoveDouble; |
1146 | } |
1147 | break; |
1148 | } |
1149 | RELEASE_ASSERT_NOT_REACHED(); |
1150 | } |
1151 | |
1152 | void appendStore(Value* value, const Arg& dest) |
1153 | { |
1154 | using namespace Air; |
1155 | MemoryValue* memory = value->as<MemoryValue>(); |
1156 | RELEASE_ASSERT(memory->isStore()); |
1157 | |
1158 | Air::Kind kind; |
1159 | if (memory->hasFence()) { |
1160 | RELEASE_ASSERT(memory->accessBank() == GP); |
1161 | |
1162 | if (isX86()) { |
1163 | kind = OPCODE_FOR_WIDTH(Xchg, memory->accessWidth()); |
1164 | kind.effects = true; |
1165 | Tmp swapTmp = m_code.newTmp(GP); |
1166 | append(relaxedMoveForType(memory->accessType()), tmp(memory->child(0)), swapTmp); |
1167 | append(kind, swapTmp, dest); |
1168 | return; |
1169 | } |
1170 | |
1171 | kind = OPCODE_FOR_WIDTH(StoreRel, memory->accessWidth()); |
1172 | } else |
1173 | kind = storeOpcode(memory->accessWidth(), memory->accessBank()); |
1174 | |
1175 | kind.effects |= memory->traps(); |
1176 | |
1177 | append(createStore(kind, memory->child(0), dest)); |
1178 | } |
1179 | |
1180 | #if ENABLE(MASM_PROBE) |
1181 | template<typename... Arguments> |
1182 | void print(Arguments&&... arguments) |
1183 | { |
1184 | Value* origin = m_value; |
1185 | print(origin, std::forward<Arguments>(arguments)...); |
1186 | } |
1187 | |
1188 | template<typename... Arguments> |
1189 | void print(Value* origin, Arguments&&... arguments) |
1190 | { |
1191 | auto printList = Printer::makePrintRecordList(arguments...); |
1192 | auto printSpecial = static_cast<Air::PrintSpecial*>(m_code.addSpecial(makeUnique<Air::PrintSpecial>(printList))); |
1193 | Inst inst(Air::Patch, origin, Arg::special(printSpecial)); |
1194 | Printer::appendAirArgs(inst, std::forward<Arguments>(arguments)...); |
1195 | append(WTFMove(inst)); |
1196 | } |
1197 | #endif // ENABLE(MASM_PROBE) |
1198 | |
1199 | template<typename... Arguments> |
1200 | void append(Air::Kind kind, Arguments&&... arguments) |
1201 | { |
1202 | m_insts.last().append(Inst(kind, m_value, std::forward<Arguments>(arguments)...)); |
1203 | } |
1204 | |
1205 | template<typename... Arguments> |
1206 | void appendTrapping(Air::Kind kind, Arguments&&... arguments) |
1207 | { |
1208 | m_insts.last().append(trappingInst(m_value, kind, m_value, std::forward<Arguments>(arguments)...)); |
1209 | } |
1210 | |
1211 | void append(Inst&& inst) |
1212 | { |
1213 | m_insts.last().append(WTFMove(inst)); |
1214 | } |
1215 | void append(const Inst& inst) |
1216 | { |
1217 | m_insts.last().append(inst); |
1218 | } |
1219 | |
1220 | void finishAppendingInstructions(Air::BasicBlock* target) |
1221 | { |
1222 | // Now append the instructions. m_insts contains them in reverse order, so we process |
1223 | // it in reverse. |
1224 | for (unsigned i = m_insts.size(); i--;) { |
1225 | for (Inst& inst : m_insts[i]) |
1226 | target->appendInst(WTFMove(inst)); |
1227 | } |
1228 | m_insts.shrink(0); |
1229 | } |
1230 | |
1231 | Air::BasicBlock* newBlock() |
1232 | { |
1233 | return m_blockInsertionSet.insertAfter(m_blockToBlock[m_block]); |
1234 | } |
1235 | |
1236 | // NOTE: This will create a continuation block (`nextBlock`) *after* any blocks you've created using |
1237 | // newBlock(). So, it's preferable to create all of your blocks upfront using newBlock(). Also note |
1238 | // that any code you emit before this will be prepended to the continuation, and any code you emit |
1239 | // after this will be appended to the previous block. |
1240 | void splitBlock(Air::BasicBlock*& previousBlock, Air::BasicBlock*& nextBlock) |
1241 | { |
1242 | Air::BasicBlock* block = m_blockToBlock[m_block]; |
1243 | |
1244 | previousBlock = block; |
1245 | nextBlock = m_blockInsertionSet.insertAfter(block); |
1246 | |
1247 | finishAppendingInstructions(nextBlock); |
1248 | nextBlock->successors() = block->successors(); |
1249 | block->successors().clear(); |
1250 | |
1251 | m_insts.append(Vector<Inst>()); |
1252 | } |
1253 | |
1254 | template<typename T, typename... Arguments> |
1255 | T* ensureSpecial(T*& field, Arguments&&... arguments) |
1256 | { |
1257 | if (!field) { |
1258 | field = static_cast<T*>( |
1259 | m_code.addSpecial(makeUnique<T>(std::forward<Arguments>(arguments)...))); |
1260 | } |
1261 | return field; |
1262 | } |
1263 | |
1264 | template<typename... Arguments> |
1265 | CheckSpecial* ensureCheckSpecial(Arguments&&... arguments) |
1266 | { |
1267 | CheckSpecial::Key key(std::forward<Arguments>(arguments)...); |
1268 | auto result = m_checkSpecials.add(key, nullptr); |
1269 | return ensureSpecial(result.iterator->value, key); |
1270 | } |
1271 | |
1272 | void fillStackmap(Inst& inst, StackmapValue* stackmap, unsigned numSkipped) |
1273 | { |
1274 | for (unsigned i = numSkipped; i < stackmap->numChildren(); ++i) { |
1275 | ConstrainedValue value = stackmap->constrainedChild(i); |
1276 | |
1277 | Arg arg; |
1278 | switch (value.rep().kind()) { |
1279 | case ValueRep::WarmAny: |
1280 | case ValueRep::ColdAny: |
1281 | case ValueRep::LateColdAny: |
1282 | if (imm(value.value())) |
1283 | arg = imm(value.value()); |
1284 | else if (value.value()->hasInt64()) |
1285 | arg = Arg::bigImm(value.value()->asInt64()); |
1286 | else if (value.value()->hasDouble() && canBeInternal(value.value())) { |
1287 | commitInternal(value.value()); |
1288 | arg = Arg::bigImm(bitwise_cast<int64_t>(value.value()->asDouble())); |
1289 | } else if (value.value()->hasFloat() && canBeInternal(value.value())) { |
1290 | commitInternal(value.value()); |
1291 | arg = Arg::bigImm(static_cast<uint64_t>(bitwise_cast<uint32_t>(value.value()->asFloat()))); |
1292 | } else |
1293 | arg = tmp(value.value()); |
1294 | break; |
1295 | case ValueRep::SomeRegister: |
1296 | case ValueRep::SomeLateRegister: |
1297 | arg = tmp(value.value()); |
1298 | break; |
1299 | case ValueRep::SomeRegisterWithClobber: { |
1300 | Tmp dstTmp = m_code.newTmp(value.value()->resultBank()); |
1301 | append(relaxedMoveForType(value.value()->type()), immOrTmp(value.value()), dstTmp); |
1302 | arg = dstTmp; |
1303 | break; |
1304 | } |
1305 | case ValueRep::LateRegister: |
1306 | case ValueRep::Register: |
1307 | stackmap->earlyClobbered().clear(value.rep().reg()); |
1308 | arg = Tmp(value.rep().reg()); |
1309 | append(relaxedMoveForType(value.value()->type()), immOrTmp(value.value()), arg); |
1310 | break; |
1311 | case ValueRep::StackArgument: |
1312 | arg = Arg::callArg(value.rep().offsetFromSP()); |
1313 | append(trappingInst(m_value, createStore(moveForType(value.value()->type()), value.value(), arg))); |
1314 | break; |
1315 | default: |
1316 | RELEASE_ASSERT_NOT_REACHED(); |
1317 | break; |
1318 | } |
1319 | inst.args.append(arg); |
1320 | } |
1321 | } |
1322 | |
1323 | // Create an Inst to do the comparison specified by the given value. |
1324 | template<typename CompareFunctor, typename TestFunctor, typename CompareDoubleFunctor, typename CompareFloatFunctor> |
1325 | Inst createGenericCompare( |
1326 | Value* value, |
1327 | const CompareFunctor& compare, // Signature: (Width, Arg relCond, Arg, Arg) -> Inst |
1328 | const TestFunctor& test, // Signature: (Width, Arg resCond, Arg, Arg) -> Inst |
1329 | const CompareDoubleFunctor& compareDouble, // Signature: (Arg doubleCond, Arg, Arg) -> Inst |
1330 | const CompareFloatFunctor& compareFloat, // Signature: (Arg doubleCond, Arg, Arg) -> Inst |
1331 | bool inverted = false) |
1332 | { |
1333 | // NOTE: This is totally happy to match comparisons that have already been computed elsewhere |
1334 | // since on most architectures, the cost of branching on a previously computed comparison |
1335 | // result is almost always higher than just doing another fused compare/branch. The only time |
1336 | // it could be worse is if we have a binary comparison and both operands are variables (not |
1337 | // constants), and we encounter register pressure. Even in this case, duplicating the compare |
1338 | // so that we can fuse it to the branch will be more efficient most of the time, since |
1339 | // register pressure is not *that* common. For this reason, this algorithm will always |
1340 | // duplicate the comparison. |
1341 | // |
1342 | // However, we cannot duplicate loads. The canBeInternal() on a load will assume that we |
1343 | // already validated canBeInternal() on all of the values that got us to the load. So, even |
1344 | // if we are sharing a value, we still need to call canBeInternal() for the purpose of |
1345 | // tracking whether we are still in good shape to fuse loads. |
1346 | // |
1347 | // We could even have a chain of compare values that we fuse, and any member of the chain |
1348 | // could be shared. Once any of them are shared, then the shared one's transitive children |
1349 | // cannot be locked (i.e. commitInternal()). But if none of them are shared, then we want to |
1350 | // lock all of them because that's a prerequisite to fusing the loads so that the loads don't |
1351 | // get duplicated. For example, we might have: |
1352 | // |
1353 | // @tmp1 = LessThan(@a, @b) |
1354 | // @tmp2 = Equal(@tmp1, 0) |
1355 | // Branch(@tmp2) |
1356 | // |
1357 | // If either @a or @b are loads, then we want to have locked @tmp1 and @tmp2 so that they |
1358 | // don't emit the loads a second time. But if we had another use of @tmp2, then we cannot |
1359 | // lock @tmp1 (or @a or @b) because then we'll get into trouble when the other values that |
1360 | // try to share @tmp1 with us try to do their lowering. |
1361 | // |
1362 | // There's one more wrinkle. If we don't lock an internal value, then this internal value may |
1363 | // have already separately locked its children. So, if we're not locking a value then we need |
1364 | // to make sure that its children aren't locked. We encapsulate this in two ways: |
1365 | // |
1366 | // canCommitInternal: This variable tells us if the values that we've fused so far are |
1367 | // locked. This means that we're not sharing any of them with anyone. This permits us to fuse |
1368 | // loads. If it's false, then we cannot fuse loads and we also need to ensure that the |
1369 | // children of any values we try to fuse-by-sharing are not already locked. You don't have to |
1370 | // worry about the children locking thing if you use prepareToFuse() before trying to fuse a |
1371 | // sharable value. But, you do need to guard any load fusion by checking if canCommitInternal |
1372 | // is true. |
1373 | // |
1374 | // FusionResult prepareToFuse(value): Call this when you think that you would like to fuse |
1375 | // some value and that value is not a load. It will automatically handle the shared-or-locked |
1376 | // issues and it will clear canCommitInternal if necessary. This will return CannotFuse |
1377 | // (which acts like false) if the value cannot be locked and its children are locked. That's |
1378 | // rare, but you just need to make sure that you do smart things when this happens (i.e. just |
1379 | // use the value rather than trying to fuse it). After you call prepareToFuse(), you can |
1380 | // still change your mind about whether you will actually fuse the value. If you do fuse it, |
1381 | // you need to call commitFusion(value, fusionResult). |
1382 | // |
1383 | // commitFusion(value, fusionResult): Handles calling commitInternal(value) if fusionResult |
1384 | // is FuseAndCommit. |
1385 | |
1386 | bool canCommitInternal = true; |
1387 | |
1388 | enum FusionResult { |
1389 | CannotFuse, |
1390 | FuseAndCommit, |
1391 | Fuse |
1392 | }; |
1393 | auto prepareToFuse = [&] (Value* value) -> FusionResult { |
1394 | if (value == m_value) { |
1395 | // It's not actually internal. It's the root value. We're good to go. |
1396 | return Fuse; |
1397 | } |
1398 | |
1399 | if (canCommitInternal && canBeInternal(value)) { |
1400 | // We are the only users of this value. This also means that the value's children |
1401 | // could not have been locked, since we have now proved that m_value dominates value |
1402 | // in the data flow graph. To only other way to value is from a user of m_value. If |
1403 | // value's children are shared with others, then they could not have been locked |
1404 | // because their use count is greater than 1. If they are only used from value, then |
1405 | // in order for value's children to be locked, value would also have to be locked, |
1406 | // and we just proved that it wasn't. |
1407 | return FuseAndCommit; |
1408 | } |
1409 | |
1410 | // We're going to try to share value with others. It's possible that some other basic |
1411 | // block had already emitted code for value and then matched over its children and then |
1412 | // locked them, in which case we just want to use value instead of duplicating it. So, we |
1413 | // validate the children. Note that this only arises in linear chains like: |
1414 | // |
1415 | // BB#1: |
1416 | // @1 = Foo(...) |
1417 | // @2 = Bar(@1) |
1418 | // Jump(#2) |
1419 | // BB#2: |
1420 | // @3 = Baz(@2) |
1421 | // |
1422 | // Notice how we could start by generating code for BB#1 and then decide to lock @1 when |
1423 | // generating code for @2, if we have some way of fusing Bar and Foo into a single |
1424 | // instruction. This is legal, since indeed @1 only has one user. The fact that @2 now |
1425 | // has a tmp (i.e. @2 is pinned), canBeInternal(@2) will return false, which brings us |
1426 | // here. In that case, we cannot match over @2 because then we'd hit a hazard if we end |
1427 | // up deciding not to fuse Foo into the fused Baz/Bar. |
1428 | // |
1429 | // Happily, there are only two places where this kind of child validation happens is in |
1430 | // rules that admit sharing, like this and effectiveAddress(). |
1431 | // |
1432 | // N.B. We could probably avoid the need to do value locking if we committed to a well |
1433 | // chosen code generation order. For example, if we guaranteed that all of the users of |
1434 | // a value get generated before that value, then there's no way for the lowering of @3 to |
1435 | // see @1 locked. But we don't want to do that, since this is a greedy instruction |
1436 | // selector and so we want to be able to play with order. |
1437 | for (Value* child : value->children()) { |
1438 | if (m_locked.contains(child)) |
1439 | return CannotFuse; |
1440 | } |
1441 | |
1442 | // It's safe to share value, but since we're sharing, it means that we aren't locking it. |
1443 | // If we don't lock it, then fusing loads is off limits and all of value's children will |
1444 | // have to go through the sharing path as well. Fusing loads is off limits because the load |
1445 | // could already have been emitted elsehwere - so fusing it here would duplicate the load. |
1446 | // We don't consider that to be a legal optimization. |
1447 | canCommitInternal = false; |
1448 | |
1449 | return Fuse; |
1450 | }; |
1451 | |
1452 | auto commitFusion = [&] (Value* value, FusionResult result) { |
1453 | if (result == FuseAndCommit) |
1454 | commitInternal(value); |
1455 | }; |
1456 | |
1457 | // Chew through any inversions. This loop isn't necessary for comparisons and branches, but |
1458 | // we do need at least one iteration of it for Check. |
1459 | for (;;) { |
1460 | bool shouldInvert = |
1461 | (value->opcode() == BitXor && value->child(1)->hasInt() && (value->child(1)->asInt() == 1) && value->child(0)->returnsBool()) |
1462 | || (value->opcode() == Equal && value->child(1)->isInt(0)); |
1463 | if (!shouldInvert) |
1464 | break; |
1465 | |
1466 | FusionResult fusionResult = prepareToFuse(value); |
1467 | if (fusionResult == CannotFuse) |
1468 | break; |
1469 | commitFusion(value, fusionResult); |
1470 | |
1471 | value = value->child(0); |
1472 | inverted = !inverted; |
1473 | } |
1474 | |
1475 | auto createRelCond = [&] ( |
1476 | MacroAssembler::RelationalCondition relationalCondition, |
1477 | MacroAssembler::DoubleCondition doubleCondition) { |
1478 | Arg relCond = Arg::relCond(relationalCondition).inverted(inverted); |
1479 | Arg doubleCond = Arg::doubleCond(doubleCondition).inverted(inverted); |
1480 | Value* left = value->child(0); |
1481 | Value* right = value->child(1); |
1482 | |
1483 | if (value->child(0)->type().isInt()) { |
1484 | Arg rightImm = imm(right); |
1485 | |
1486 | auto tryCompare = [&] ( |
1487 | Width width, ArgPromise&& left, ArgPromise&& right) -> Inst { |
1488 | if (Inst result = compare(width, relCond, left, right)) |
1489 | return result; |
1490 | if (Inst result = compare(width, relCond.flipped(), right, left)) |
1491 | return result; |
1492 | return Inst(); |
1493 | }; |
1494 | |
1495 | auto tryCompareLoadImm = [&] ( |
1496 | Width width, B3::Opcode loadOpcode, Arg::Signedness signedness) -> Inst { |
1497 | if (rightImm && rightImm.isRepresentableAs(width, signedness)) { |
1498 | if (Inst result = tryCompare(width, loadPromise(left, loadOpcode), rightImm)) { |
1499 | commitInternal(left); |
1500 | return result; |
1501 | } |
1502 | } |
1503 | return Inst(); |
1504 | }; |
1505 | |
1506 | Width width = value->child(0)->resultWidth(); |
1507 | |
1508 | if (canCommitInternal) { |
1509 | // First handle compares that involve fewer bits than B3's type system supports. |
1510 | // This is pretty important. For example, we want this to be a single |
1511 | // instruction: |
1512 | // |
1513 | // @1 = Load8S(...) |
1514 | // @2 = Const32(...) |
1515 | // @3 = LessThan(@1, @2) |
1516 | // Branch(@3) |
1517 | |
1518 | if (relCond.isSignedCond()) { |
1519 | if (Inst result = tryCompareLoadImm(Width8, Load8S, Arg::Signed)) |
1520 | return result; |
1521 | } |
1522 | |
1523 | if (relCond.isUnsignedCond()) { |
1524 | if (Inst result = tryCompareLoadImm(Width8, Load8Z, Arg::Unsigned)) |
1525 | return result; |
1526 | } |
1527 | |
1528 | if (relCond.isSignedCond()) { |
1529 | if (Inst result = tryCompareLoadImm(Width16, Load16S, Arg::Signed)) |
1530 | return result; |
1531 | } |
1532 | |
1533 | if (relCond.isUnsignedCond()) { |
1534 | if (Inst result = tryCompareLoadImm(Width16, Load16Z, Arg::Unsigned)) |
1535 | return result; |
1536 | } |
1537 | |
1538 | // Now handle compares that involve a load and an immediate. |
1539 | |
1540 | if (Inst result = tryCompareLoadImm(width, Load, Arg::Signed)) |
1541 | return result; |
1542 | |
1543 | // Now handle compares that involve a load. It's not obvious that it's better to |
1544 | // handle this before the immediate cases or not. Probably doesn't matter. |
1545 | |
1546 | if (Inst result = tryCompare(width, loadPromise(left), tmpPromise(right))) { |
1547 | commitInternal(left); |
1548 | return result; |
1549 | } |
1550 | |
1551 | if (Inst result = tryCompare(width, tmpPromise(left), loadPromise(right))) { |
1552 | commitInternal(right); |
1553 | return result; |
1554 | } |
1555 | } |
1556 | |
1557 | // Now handle compares that involve an immediate and a tmp. |
1558 | |
1559 | if (rightImm && rightImm.isRepresentableAs<int32_t>()) { |
1560 | if (Inst result = tryCompare(width, tmpPromise(left), rightImm)) |
1561 | return result; |
1562 | } |
1563 | |
1564 | // Finally, handle comparison between tmps. |
1565 | ArgPromise leftPromise = tmpPromise(left); |
1566 | ArgPromise rightPromise = tmpPromise(right); |
1567 | return compare(width, relCond, leftPromise, rightPromise); |
1568 | } |
1569 | |
1570 | // Floating point comparisons can't really do anything smart. |
1571 | ArgPromise leftPromise = tmpPromise(left); |
1572 | ArgPromise rightPromise = tmpPromise(right); |
1573 | if (value->child(0)->type() == Float) |
1574 | return compareFloat(doubleCond, leftPromise, rightPromise); |
1575 | return compareDouble(doubleCond, leftPromise, rightPromise); |
1576 | }; |
1577 | |
1578 | Width width = value->resultWidth(); |
1579 | Arg resCond = Arg::resCond(MacroAssembler::NonZero).inverted(inverted); |
1580 | |
1581 | auto tryTest = [&] ( |
1582 | Width width, ArgPromise&& left, ArgPromise&& right) -> Inst { |
1583 | if (Inst result = test(width, resCond, left, right)) |
1584 | return result; |
1585 | if (Inst result = test(width, resCond, right, left)) |
1586 | return result; |
1587 | return Inst(); |
1588 | }; |
1589 | |
1590 | auto attemptFused = [&] () -> Inst { |
1591 | switch (value->opcode()) { |
1592 | case NotEqual: |
1593 | return createRelCond(MacroAssembler::NotEqual, MacroAssembler::DoubleNotEqualOrUnordered); |
1594 | case Equal: |
1595 | return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqual); |
1596 | case LessThan: |
1597 | return createRelCond(MacroAssembler::LessThan, MacroAssembler::DoubleLessThan); |
1598 | case GreaterThan: |
1599 | return createRelCond(MacroAssembler::GreaterThan, MacroAssembler::DoubleGreaterThan); |
1600 | case LessEqual: |
1601 | return createRelCond(MacroAssembler::LessThanOrEqual, MacroAssembler::DoubleLessThanOrEqual); |
1602 | case GreaterEqual: |
1603 | return createRelCond(MacroAssembler::GreaterThanOrEqual, MacroAssembler::DoubleGreaterThanOrEqual); |
1604 | case EqualOrUnordered: |
1605 | // The integer condition is never used in this case. |
1606 | return createRelCond(MacroAssembler::Equal, MacroAssembler::DoubleEqualOrUnordered); |
1607 | case Above: |
1608 | // We use a bogus double condition because these integer comparisons won't got down that |
1609 | // path anyway. |
1610 | return createRelCond(MacroAssembler::Above, MacroAssembler::DoubleEqual); |
1611 | case Below: |
1612 | return createRelCond(MacroAssembler::Below, MacroAssembler::DoubleEqual); |
1613 | case AboveEqual: |
1614 | return createRelCond(MacroAssembler::AboveOrEqual, MacroAssembler::DoubleEqual); |
1615 | case BelowEqual: |
1616 | return createRelCond(MacroAssembler::BelowOrEqual, MacroAssembler::DoubleEqual); |
1617 | case BitAnd: { |
1618 | Value* left = value->child(0); |
1619 | Value* right = value->child(1); |
1620 | |
1621 | bool hasRightConst; |
1622 | int64_t rightConst; |
1623 | Arg rightImm; |
1624 | Arg rightImm64; |
1625 | |
1626 | hasRightConst = right->hasInt(); |
1627 | if (hasRightConst) { |
1628 | rightConst = right->asInt(); |
1629 | rightImm = bitImm(right); |
1630 | rightImm64 = bitImm64(right); |
1631 | } |
1632 | |
1633 | auto tryTestLoadImm = [&] (Width width, Arg::Signedness signedness, B3::Opcode loadOpcode) -> Inst { |
1634 | if (!hasRightConst) |
1635 | return Inst(); |
1636 | // Signed loads will create high bits, so if the immediate has high bits |
1637 | // then we cannot proceed. Consider BitAnd(Load8S(ptr), 0x101). This cannot |
1638 | // be turned into testb (ptr), $1, since if the high bit within that byte |
1639 | // was set then it would be extended to include 0x100. The handling below |
1640 | // won't anticipate this, so we need to catch it here. |
1641 | if (signedness == Arg::Signed |
1642 | && !Arg::isRepresentableAs(width, Arg::Unsigned, rightConst)) |
1643 | return Inst(); |
1644 | |
1645 | // FIXME: If this is unsigned then we can chop things off of the immediate. |
1646 | // This might make the immediate more legal. Perhaps that's a job for |
1647 | // strength reduction? |
1648 | // https://bugs.webkit.org/show_bug.cgi?id=169248 |
1649 | |
1650 | if (rightImm) { |
1651 | if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm)) { |
1652 | commitInternal(left); |
1653 | return result; |
1654 | } |
1655 | } |
1656 | if (rightImm64) { |
1657 | if (Inst result = tryTest(width, loadPromise(left, loadOpcode), rightImm64)) { |
1658 | commitInternal(left); |
1659 | return result; |
1660 | } |
1661 | } |
1662 | return Inst(); |
1663 | }; |
1664 | |
1665 | if (canCommitInternal) { |
1666 | // First handle test's that involve fewer bits than B3's type system supports. |
1667 | |
1668 | if (Inst result = tryTestLoadImm(Width8, Arg::Unsigned, Load8Z)) |
1669 | return result; |
1670 | |
1671 | if (Inst result = tryTestLoadImm(Width8, Arg::Signed, Load8S)) |
1672 | return result; |
1673 | |
1674 | if (Inst result = tryTestLoadImm(Width16, Arg::Unsigned, Load16Z)) |
1675 | return result; |
1676 | |
1677 | if (Inst result = tryTestLoadImm(Width16, Arg::Signed, Load16S)) |
1678 | return result; |
1679 | |
1680 | // This allows us to use a 32-bit test for 64-bit BitAnd if the immediate is |
1681 | // representable as an unsigned 32-bit value. The logic involved is the same |
1682 | // as if we were pondering using a 32-bit test for |
1683 | // BitAnd(SExt(Load(ptr)), const), in the sense that in both cases we have |
1684 | // to worry about high bits. So, we use the "Signed" version of this helper. |
1685 | if (Inst result = tryTestLoadImm(Width32, Arg::Signed, Load)) |
1686 | return result; |
1687 | |
1688 | // This is needed to handle 32-bit test for arbitrary 32-bit immediates. |
1689 | if (Inst result = tryTestLoadImm(width, Arg::Unsigned, Load)) |
1690 | return result; |
1691 | |
1692 | // Now handle test's that involve a load. |
1693 | |
1694 | Width width = value->child(0)->resultWidth(); |
1695 | if (Inst result = tryTest(width, loadPromise(left), tmpPromise(right))) { |
1696 | commitInternal(left); |
1697 | return result; |
1698 | } |
1699 | |
1700 | if (Inst result = tryTest(width, tmpPromise(left), loadPromise(right))) { |
1701 | commitInternal(right); |
1702 | return result; |
1703 | } |
1704 | } |
1705 | |
1706 | // Now handle test's that involve an immediate and a tmp. |
1707 | |
1708 | if (hasRightConst) { |
1709 | if ((width == Width32 && rightConst == 0xffffffff) |
1710 | || (width == Width64 && rightConst == -1)) { |
1711 | if (Inst result = tryTest(width, tmpPromise(left), tmpPromise(left))) |
1712 | return result; |
1713 | } |
1714 | if (isRepresentableAs<uint32_t>(rightConst)) { |
1715 | if (Inst result = tryTest(Width32, tmpPromise(left), rightImm)) |
1716 | return result; |
1717 | if (Inst result = tryTest(Width32, tmpPromise(left), rightImm64)) |
1718 | return result; |
1719 | } |
1720 | if (Inst result = tryTest(width, tmpPromise(left), rightImm)) |
1721 | return result; |
1722 | if (Inst result = tryTest(width, tmpPromise(left), rightImm64)) |
1723 | return result; |
1724 | } |
1725 | |
1726 | // Finally, just do tmp's. |
1727 | return tryTest(width, tmpPromise(left), tmpPromise(right)); |
1728 | } |
1729 | default: |
1730 | return Inst(); |
1731 | } |
1732 | }; |
1733 | |
1734 | if (FusionResult fusionResult = prepareToFuse(value)) { |
1735 | if (Inst result = attemptFused()) { |
1736 | commitFusion(value, fusionResult); |
1737 | return result; |
1738 | } |
1739 | } |
1740 | |
1741 | if (Arg::isValidBitImmForm(-1)) { |
1742 | if (canCommitInternal && value->as<MemoryValue>()) { |
1743 | // Handle things like Branch(Load8Z(value)) |
1744 | |
1745 | if (Inst result = tryTest(Width8, loadPromise(value, Load8Z), Arg::bitImm(-1))) { |
1746 | commitInternal(value); |
1747 | return result; |
1748 | } |
1749 | |
1750 | if (Inst result = tryTest(Width8, loadPromise(value, Load8S), Arg::bitImm(-1))) { |
1751 | commitInternal(value); |
1752 | return result; |
1753 | } |
1754 | |
1755 | if (Inst result = tryTest(Width16, loadPromise(value, Load16Z), Arg::bitImm(-1))) { |
1756 | commitInternal(value); |
1757 | return result; |
1758 | } |
1759 | |
1760 | if (Inst result = tryTest(Width16, loadPromise(value, Load16S), Arg::bitImm(-1))) { |
1761 | commitInternal(value); |
1762 | return result; |
1763 | } |
1764 | |
1765 | if (Inst result = tryTest(width, loadPromise(value), Arg::bitImm(-1))) { |
1766 | commitInternal(value); |
1767 | return result; |
1768 | } |
1769 | } |
1770 | |
1771 | ArgPromise leftPromise = tmpPromise(value); |
1772 | ArgPromise rightPromise = Arg::bitImm(-1); |
1773 | if (Inst result = test(width, resCond, leftPromise, rightPromise)) |
1774 | return result; |
1775 | } |
1776 | |
1777 | // Sometimes this is the only form of test available. We prefer not to use this because |
1778 | // it's less canonical. |
1779 | ArgPromise leftPromise = tmpPromise(value); |
1780 | ArgPromise rightPromise = tmpPromise(value); |
1781 | return test(width, resCond, leftPromise, rightPromise); |
1782 | } |
1783 | |
1784 | Inst createBranch(Value* value, bool inverted = false) |
1785 | { |
1786 | using namespace Air; |
1787 | return createGenericCompare( |
1788 | value, |
1789 | [this] ( |
1790 | Width width, const Arg& relCond, |
1791 | ArgPromise& left, ArgPromise& right) -> Inst { |
1792 | switch (width) { |
1793 | case Width8: |
1794 | if (isValidForm(Branch8, Arg::RelCond, left.kind(), right.kind())) { |
1795 | return left.inst(right.inst( |
1796 | Branch8, m_value, relCond, |
1797 | left.consume(*this), right.consume(*this))); |
1798 | } |
1799 | return Inst(); |
1800 | case Width16: |
1801 | return Inst(); |
1802 | case Width32: |
1803 | if (isValidForm(Branch32, Arg::RelCond, left.kind(), right.kind())) { |
1804 | return left.inst(right.inst( |
1805 | Branch32, m_value, relCond, |
1806 | left.consume(*this), right.consume(*this))); |
1807 | } |
1808 | return Inst(); |
1809 | case Width64: |
1810 | if (isValidForm(Branch64, Arg::RelCond, left.kind(), right.kind())) { |
1811 | return left.inst(right.inst( |
1812 | Branch64, m_value, relCond, |
1813 | left.consume(*this), right.consume(*this))); |
1814 | } |
1815 | return Inst(); |
1816 | } |
1817 | ASSERT_NOT_REACHED(); |
1818 | }, |
1819 | [this] ( |
1820 | Width width, const Arg& resCond, |
1821 | ArgPromise& left, ArgPromise& right) -> Inst { |
1822 | switch (width) { |
1823 | case Width8: |
1824 | if (isValidForm(BranchTest8, Arg::ResCond, left.kind(), right.kind())) { |
1825 | return left.inst(right.inst( |
1826 | BranchTest8, m_value, resCond, |
1827 | left.consume(*this), right.consume(*this))); |
1828 | } |
1829 | return Inst(); |
1830 | case Width16: |
1831 | return Inst(); |
1832 | case Width32: |
1833 | if (isValidForm(BranchTest32, Arg::ResCond, left.kind(), right.kind())) { |
1834 | return left.inst(right.inst( |
1835 | BranchTest32, m_value, resCond, |
1836 | left.consume(*this), right.consume(*this))); |
1837 | } |
1838 | return Inst(); |
1839 | case Width64: |
1840 | if (isValidForm(BranchTest64, Arg::ResCond, left.kind(), right.kind())) { |
1841 | return left.inst(right.inst( |
1842 | BranchTest64, m_value, resCond, |
1843 | left.consume(*this), right.consume(*this))); |
1844 | } |
1845 | return Inst(); |
1846 | } |
1847 | ASSERT_NOT_REACHED(); |
1848 | }, |
1849 | [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1850 | if (isValidForm(BranchDouble, Arg::DoubleCond, left.kind(), right.kind())) { |
1851 | return left.inst(right.inst( |
1852 | BranchDouble, m_value, doubleCond, |
1853 | left.consume(*this), right.consume(*this))); |
1854 | } |
1855 | return Inst(); |
1856 | }, |
1857 | [this] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1858 | if (isValidForm(BranchFloat, Arg::DoubleCond, left.kind(), right.kind())) { |
1859 | return left.inst(right.inst( |
1860 | BranchFloat, m_value, doubleCond, |
1861 | left.consume(*this), right.consume(*this))); |
1862 | } |
1863 | return Inst(); |
1864 | }, |
1865 | inverted); |
1866 | } |
1867 | |
1868 | Inst createCompare(Value* value, bool inverted = false) |
1869 | { |
1870 | using namespace Air; |
1871 | return createGenericCompare( |
1872 | value, |
1873 | [this] ( |
1874 | Width width, const Arg& relCond, |
1875 | ArgPromise& left, ArgPromise& right) -> Inst { |
1876 | switch (width) { |
1877 | case Width8: |
1878 | case Width16: |
1879 | return Inst(); |
1880 | case Width32: |
1881 | if (isValidForm(Compare32, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) { |
1882 | return left.inst(right.inst( |
1883 | Compare32, m_value, relCond, |
1884 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1885 | } |
1886 | return Inst(); |
1887 | case Width64: |
1888 | if (isValidForm(Compare64, Arg::RelCond, left.kind(), right.kind(), Arg::Tmp)) { |
1889 | return left.inst(right.inst( |
1890 | Compare64, m_value, relCond, |
1891 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1892 | } |
1893 | return Inst(); |
1894 | } |
1895 | ASSERT_NOT_REACHED(); |
1896 | }, |
1897 | [this] ( |
1898 | Width width, const Arg& resCond, |
1899 | ArgPromise& left, ArgPromise& right) -> Inst { |
1900 | switch (width) { |
1901 | case Width8: |
1902 | case Width16: |
1903 | return Inst(); |
1904 | case Width32: |
1905 | if (isValidForm(Test32, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) { |
1906 | return left.inst(right.inst( |
1907 | Test32, m_value, resCond, |
1908 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1909 | } |
1910 | return Inst(); |
1911 | case Width64: |
1912 | if (isValidForm(Test64, Arg::ResCond, left.kind(), right.kind(), Arg::Tmp)) { |
1913 | return left.inst(right.inst( |
1914 | Test64, m_value, resCond, |
1915 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1916 | } |
1917 | return Inst(); |
1918 | } |
1919 | ASSERT_NOT_REACHED(); |
1920 | }, |
1921 | [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1922 | if (isValidForm(CompareDouble, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) { |
1923 | return left.inst(right.inst( |
1924 | CompareDouble, m_value, doubleCond, |
1925 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1926 | } |
1927 | return Inst(); |
1928 | }, |
1929 | [this] (const Arg& doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1930 | if (isValidForm(CompareFloat, Arg::DoubleCond, left.kind(), right.kind(), Arg::Tmp)) { |
1931 | return left.inst(right.inst( |
1932 | CompareFloat, m_value, doubleCond, |
1933 | left.consume(*this), right.consume(*this), tmp(m_value))); |
1934 | } |
1935 | return Inst(); |
1936 | }, |
1937 | inverted); |
1938 | } |
1939 | |
1940 | struct MoveConditionallyConfig { |
1941 | Air::Opcode moveConditionally32; |
1942 | Air::Opcode moveConditionally64; |
1943 | Air::Opcode moveConditionallyTest32; |
1944 | Air::Opcode moveConditionallyTest64; |
1945 | Air::Opcode moveConditionallyDouble; |
1946 | Air::Opcode moveConditionallyFloat; |
1947 | }; |
1948 | Inst createSelect(const MoveConditionallyConfig& config) |
1949 | { |
1950 | using namespace Air; |
1951 | auto createSelectInstruction = [&] (Air::Opcode opcode, const Arg& condition, ArgPromise& left, ArgPromise& right) -> Inst { |
1952 | if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
1953 | Tmp result = tmp(m_value); |
1954 | Tmp thenCase = tmp(m_value->child(1)); |
1955 | Tmp elseCase = tmp(m_value->child(2)); |
1956 | return left.inst(right.inst( |
1957 | opcode, m_value, condition, |
1958 | left.consume(*this), right.consume(*this), thenCase, elseCase, result)); |
1959 | } |
1960 | if (isValidForm(opcode, condition.kind(), left.kind(), right.kind(), Arg::Tmp, Arg::Tmp)) { |
1961 | Tmp result = tmp(m_value); |
1962 | Tmp source = tmp(m_value->child(1)); |
1963 | append(relaxedMoveForType(m_value->type()), tmp(m_value->child(2)), result); |
1964 | return left.inst(right.inst( |
1965 | opcode, m_value, condition, |
1966 | left.consume(*this), right.consume(*this), source, result)); |
1967 | } |
1968 | return Inst(); |
1969 | }; |
1970 | |
1971 | return createGenericCompare( |
1972 | m_value->child(0), |
1973 | [&] (Width width, const Arg& relCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1974 | switch (width) { |
1975 | case Width8: |
1976 | // FIXME: Support these things. |
1977 | // https://bugs.webkit.org/show_bug.cgi?id=151504 |
1978 | return Inst(); |
1979 | case Width16: |
1980 | return Inst(); |
1981 | case Width32: |
1982 | return createSelectInstruction(config.moveConditionally32, relCond, left, right); |
1983 | case Width64: |
1984 | return createSelectInstruction(config.moveConditionally64, relCond, left, right); |
1985 | } |
1986 | ASSERT_NOT_REACHED(); |
1987 | }, |
1988 | [&] (Width width, const Arg& resCond, ArgPromise& left, ArgPromise& right) -> Inst { |
1989 | switch (width) { |
1990 | case Width8: |
1991 | // FIXME: Support more things. |
1992 | // https://bugs.webkit.org/show_bug.cgi?id=151504 |
1993 | return Inst(); |
1994 | case Width16: |
1995 | return Inst(); |
1996 | case Width32: |
1997 | return createSelectInstruction(config.moveConditionallyTest32, resCond, left, right); |
1998 | case Width64: |
1999 | return createSelectInstruction(config.moveConditionallyTest64, resCond, left, right); |
2000 | } |
2001 | ASSERT_NOT_REACHED(); |
2002 | }, |
2003 | [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
2004 | return createSelectInstruction(config.moveConditionallyDouble, doubleCond, left, right); |
2005 | }, |
2006 | [&] (Arg doubleCond, ArgPromise& left, ArgPromise& right) -> Inst { |
2007 | return createSelectInstruction(config.moveConditionallyFloat, doubleCond, left, right); |
2008 | }, |
2009 | false); |
2010 | } |
2011 | |
2012 | bool tryAppendLea() |
2013 | { |
2014 | using namespace Air; |
2015 | Air::Opcode leaOpcode = tryOpcodeForType(Lea32, Lea64, m_value->type()); |
2016 | if (!isValidForm(leaOpcode, Arg::Index, Arg::Tmp)) |
2017 | return false; |
2018 | |
2019 | // This lets us turn things like this: |
2020 | // |
2021 | // Add(Add(@x, Shl(@y, $2)), $100) |
2022 | // |
2023 | // Into this: |
2024 | // |
2025 | // lea 100(%rdi,%rsi,4), %rax |
2026 | // |
2027 | // We have a choice here between committing the internal bits of an index or sharing |
2028 | // them. There are solid arguments for both. |
2029 | // |
2030 | // Sharing: The word on the street is that the cost of a lea is one cycle no matter |
2031 | // what it does. Every experiment I've ever seen seems to confirm this. So, sharing |
2032 | // helps us in situations where Wasm input did this: |
2033 | // |
2034 | // x = a[i].x; |
2035 | // y = a[i].y; |
2036 | // |
2037 | // With sharing we would do: |
2038 | // |
2039 | // leal (%a,%i,4), %tmp |
2040 | // cmp (%size, %tmp) |
2041 | // ja _fail |
2042 | // movl (%base, %tmp), %x |
2043 | // leal 4(%a,%i,4), %tmp |
2044 | // cmp (%size, %tmp) |
2045 | // ja _fail |
2046 | // movl (%base, %tmp), %y |
2047 | // |
2048 | // In the absence of sharing, we may find ourselves needing separate registers for |
2049 | // the innards of the index. That's relatively unlikely to be a thing due to other |
2050 | // optimizations that we already have, but it could happen |
2051 | // |
2052 | // Committing: The worst case is that there is a complicated graph of additions and |
2053 | // shifts, where each value has multiple uses. In that case, it's better to compute |
2054 | // each one separately from the others since that way, each calculation will use a |
2055 | // relatively nearby tmp as its input. That seems uncommon, but in those cases, |
2056 | // committing is a clear winner: it would result in a simple interference graph |
2057 | // while sharing would result in a complex one. Interference sucks because it means |
2058 | // more time in IRC and it means worse code. |
2059 | // |
2060 | // It's not super clear if any of these corner cases would ever arise. Committing |
2061 | // has the benefit that it's easier to reason about, and protects a much darker |
2062 | // corner case (more interference). |
2063 | |
2064 | // Here are the things we want to match: |
2065 | // Add(Add(@x, @y), $c) |
2066 | // Add(Shl(@x, $c), @y) |
2067 | // Add(@x, Shl(@y, $c)) |
2068 | // Add(Add(@x, Shl(@y, $c)), $d) |
2069 | // Add(Add(Shl(@x, $c), @y), $d) |
2070 | // |
2071 | // Note that if you do Add(Shl(@x, $c), $d) then we will treat $d as a non-constant and |
2072 | // force it to materialize. You'll get something like this: |
2073 | // |
2074 | // movl $d, %tmp |
2075 | // leal (%tmp,%x,1<<c), %result |
2076 | // |
2077 | // Which is pretty close to optimal and has the nice effect of being able to handle large |
2078 | // constants gracefully. |
2079 | |
2080 | Value* innerAdd = nullptr; |
2081 | |
2082 | Value* value = m_value; |
2083 | |
2084 | // We're going to consume Add(Add(_), $c). If we succeed at consuming it then we have these |
2085 | // patterns left (i.e. in the Add(_)): |
2086 | // |
2087 | // Add(Add(@x, @y), $c) |
2088 | // Add(Add(@x, Shl(@y, $c)), $d) |
2089 | // Add(Add(Shl(@x, $c), @y), $d) |
2090 | // |
2091 | // Otherwise we are looking at these patterns: |
2092 | // |
2093 | // Add(Shl(@x, $c), @y) |
2094 | // Add(@x, Shl(@y, $c)) |
2095 | // |
2096 | // This means that the subsequent code only has to worry about three patterns: |
2097 | // |
2098 | // Add(Shl(@x, $c), @y) |
2099 | // Add(@x, Shl(@y, $c)) |
2100 | // Add(@x, @y) (only if offset != 0) |
2101 | Value::OffsetType offset = 0; |
2102 | if (value->child(1)->isRepresentableAs<Value::OffsetType>() |
2103 | && canBeInternal(value->child(0)) |
2104 | && value->child(0)->opcode() == Add) { |
2105 | innerAdd = value->child(0); |
2106 | offset = static_cast<Value::OffsetType>(value->child(1)->asInt()); |
2107 | value = value->child(0); |
2108 | } |
2109 | |
2110 | auto tryShl = [&] (Value* shl, Value* other) -> bool { |
2111 | Optional<unsigned> scale = scaleForShl(shl, offset); |
2112 | if (!scale) |
2113 | return false; |
2114 | if (!canBeInternal(shl)) |
2115 | return false; |
2116 | |
2117 | ASSERT(!m_locked.contains(shl->child(0))); |
2118 | ASSERT(!m_locked.contains(other)); |
2119 | |
2120 | append(leaOpcode, Arg::index(tmp(other), tmp(shl->child(0)), *scale, offset), tmp(m_value)); |
2121 | commitInternal(innerAdd); |
2122 | commitInternal(shl); |
2123 | return true; |
2124 | }; |
2125 | |
2126 | if (tryShl(value->child(0), value->child(1))) |
2127 | return true; |
2128 | if (tryShl(value->child(1), value->child(0))) |
2129 | return true; |
2130 | |
2131 | // The remaining pattern is just: |
2132 | // Add(@x, @y) (only if offset != 0) |
2133 | if (!offset) |
2134 | return false; |
2135 | ASSERT(!m_locked.contains(value->child(0))); |
2136 | ASSERT(!m_locked.contains(value->child(1))); |
2137 | append(leaOpcode, Arg::index(tmp(value->child(0)), tmp(value->child(1)), 1, offset), tmp(m_value)); |
2138 | commitInternal(innerAdd); |
2139 | return true; |
2140 | } |
2141 | |
2142 | void appendX86Div(B3::Opcode op) |
2143 | { |
2144 | using namespace Air; |
2145 | Air::Opcode convertToDoubleWord; |
2146 | Air::Opcode div; |
2147 | switch (m_value->type().kind()) { |
2148 | case Int32: |
2149 | convertToDoubleWord = X86ConvertToDoubleWord32; |
2150 | div = X86Div32; |
2151 | break; |
2152 | case Int64: |
2153 | convertToDoubleWord = X86ConvertToQuadWord64; |
2154 | div = X86Div64; |
2155 | break; |
2156 | default: |
2157 | RELEASE_ASSERT_NOT_REACHED(); |
2158 | return; |
2159 | } |
2160 | |
2161 | ASSERT(op == Div || op == Mod); |
2162 | Tmp result = op == Div ? m_eax : m_edx; |
2163 | |
2164 | append(Move, tmp(m_value->child(0)), m_eax); |
2165 | append(convertToDoubleWord, m_eax, m_edx); |
2166 | append(div, m_eax, m_edx, tmp(m_value->child(1))); |
2167 | append(Move, result, tmp(m_value)); |
2168 | } |
2169 | |
2170 | void appendX86UDiv(B3::Opcode op) |
2171 | { |
2172 | using namespace Air; |
2173 | Air::Opcode div = m_value->type() == Int32 ? X86UDiv32 : X86UDiv64; |
2174 | |
2175 | ASSERT(op == UDiv || op == UMod); |
2176 | Tmp result = op == UDiv ? m_eax : m_edx; |
2177 | |
2178 | append(Move, tmp(m_value->child(0)), m_eax); |
2179 | append(Xor64, m_edx, m_edx); |
2180 | append(div, m_eax, m_edx, tmp(m_value->child(1))); |
2181 | append(Move, result, tmp(m_value)); |
2182 | } |
2183 | |
2184 | Air::Opcode loadLinkOpcode(Width width, bool fence) |
2185 | { |
2186 | return fence ? OPCODE_FOR_WIDTH(LoadLinkAcq, width) : OPCODE_FOR_WIDTH(LoadLink, width); |
2187 | } |
2188 | |
2189 | Air::Opcode storeCondOpcode(Width width, bool fence) |
2190 | { |
2191 | return fence ? OPCODE_FOR_WIDTH(StoreCondRel, width) : OPCODE_FOR_WIDTH(StoreCond, width); |
2192 | } |
2193 | |
2194 | // This can emit code for the following patterns: |
2195 | // AtomicWeakCAS |
2196 | // BitXor(AtomicWeakCAS, 1) |
2197 | // AtomicStrongCAS |
2198 | // Equal(AtomicStrongCAS, expected) |
2199 | // NotEqual(AtomicStrongCAS, expected) |
2200 | // Branch(AtomicWeakCAS) |
2201 | // Branch(Equal(AtomicStrongCAS, expected)) |
2202 | // Branch(NotEqual(AtomicStrongCAS, expected)) |
2203 | // |
2204 | // It assumes that atomicValue points to the CAS, and m_value points to the instruction being |
2205 | // generated. It assumes that you've consumed everything that needs to be consumed. |
2206 | void appendCAS(Value* atomicValue, bool invert) |
2207 | { |
2208 | using namespace Air; |
2209 | AtomicValue* atomic = atomicValue->as<AtomicValue>(); |
2210 | RELEASE_ASSERT(atomic); |
2211 | |
2212 | bool isBranch = m_value->opcode() == Branch; |
2213 | bool isStrong = atomic->opcode() == AtomicStrongCAS; |
2214 | bool returnsOldValue = m_value->opcode() == AtomicStrongCAS; |
2215 | bool hasFence = atomic->hasFence(); |
2216 | |
2217 | Width width = atomic->accessWidth(); |
2218 | Arg address = addr(atomic); |
2219 | |
2220 | Tmp valueResultTmp; |
2221 | Tmp boolResultTmp; |
2222 | if (returnsOldValue) { |
2223 | RELEASE_ASSERT(!invert); |
2224 | valueResultTmp = tmp(m_value); |
2225 | boolResultTmp = m_code.newTmp(GP); |
2226 | } else if (isBranch) { |
2227 | valueResultTmp = m_code.newTmp(GP); |
2228 | boolResultTmp = m_code.newTmp(GP); |
2229 | } else { |
2230 | valueResultTmp = m_code.newTmp(GP); |
2231 | boolResultTmp = tmp(m_value); |
2232 | } |
2233 | |
2234 | Tmp successBoolResultTmp; |
2235 | if (isStrong && !isBranch) |
2236 | successBoolResultTmp = m_code.newTmp(GP); |
2237 | else |
2238 | successBoolResultTmp = boolResultTmp; |
2239 | |
2240 | Tmp expectedValueTmp = tmp(atomic->child(0)); |
2241 | Tmp newValueTmp = tmp(atomic->child(1)); |
2242 | |
2243 | Air::FrequentedBlock success; |
2244 | Air::FrequentedBlock failure; |
2245 | if (isBranch) { |
2246 | success = m_blockToBlock[m_block]->successor(invert); |
2247 | failure = m_blockToBlock[m_block]->successor(!invert); |
2248 | } |
2249 | |
2250 | if (isX86()) { |
2251 | append(relaxedMoveForType(atomic->accessType()), immOrTmp(atomic->child(0)), m_eax); |
2252 | if (returnsOldValue) { |
2253 | appendTrapping(OPCODE_FOR_WIDTH(AtomicStrongCAS, width), m_eax, newValueTmp, address); |
2254 | append(relaxedMoveForType(atomic->accessType()), m_eax, valueResultTmp); |
2255 | } else if (isBranch) { |
2256 | appendTrapping(OPCODE_FOR_WIDTH(BranchAtomicStrongCAS, width), Arg::statusCond(MacroAssembler::Success), m_eax, newValueTmp, address); |
2257 | m_blockToBlock[m_block]->setSuccessors(success, failure); |
2258 | } else |
2259 | appendTrapping(OPCODE_FOR_WIDTH(AtomicStrongCAS, width), Arg::statusCond(invert ? MacroAssembler::Failure : MacroAssembler::Success), m_eax, tmp(atomic->child(1)), address, boolResultTmp); |
2260 | return; |
2261 | } |
2262 | |
2263 | RELEASE_ASSERT(isARM64()); |
2264 | // We wish to emit: |
2265 | // |
2266 | // Block #reloop: |
2267 | // LoadLink |
2268 | // Branch NotEqual |
2269 | // Successors: Then:#fail, Else: #store |
2270 | // Block #store: |
2271 | // StoreCond |
2272 | // Xor $1, %result <--- only if !invert |
2273 | // Jump |
2274 | // Successors: #done |
2275 | // Block #fail: |
2276 | // Move $invert, %result |
2277 | // Jump |
2278 | // Successors: #done |
2279 | // Block #done: |
2280 | |
2281 | Air::BasicBlock* reloopBlock = newBlock(); |
2282 | Air::BasicBlock* storeBlock = newBlock(); |
2283 | Air::BasicBlock* successBlock = nullptr; |
2284 | if (!isBranch && isStrong) |
2285 | successBlock = newBlock(); |
2286 | Air::BasicBlock* failBlock = nullptr; |
2287 | if (!isBranch) { |
2288 | failBlock = newBlock(); |
2289 | failure = failBlock; |
2290 | } |
2291 | Air::BasicBlock* strongFailBlock; |
2292 | if (isStrong && hasFence) |
2293 | strongFailBlock = newBlock(); |
2294 | Air::FrequentedBlock comparisonFail = failure; |
2295 | Air::FrequentedBlock weakFail; |
2296 | if (isStrong) { |
2297 | if (hasFence) |
2298 | comparisonFail = strongFailBlock; |
2299 | weakFail = reloopBlock; |
2300 | } else |
2301 | weakFail = failure; |
2302 | Air::BasicBlock* beginBlock; |
2303 | Air::BasicBlock* doneBlock; |
2304 | splitBlock(beginBlock, doneBlock); |
2305 | |
2306 | append(Air::Jump); |
2307 | beginBlock->setSuccessors(reloopBlock); |
2308 | |
2309 | reloopBlock->append(trappingInst(m_value, loadLinkOpcode(width, atomic->hasFence()), m_value, address, valueResultTmp)); |
2310 | reloopBlock->append(OPCODE_FOR_CANONICAL_WIDTH(Branch, width), m_value, Arg::relCond(MacroAssembler::NotEqual), valueResultTmp, expectedValueTmp); |
2311 | reloopBlock->setSuccessors(comparisonFail, storeBlock); |
2312 | |
2313 | storeBlock->append(trappingInst(m_value, storeCondOpcode(width, atomic->hasFence()), m_value, newValueTmp, address, successBoolResultTmp)); |
2314 | if (isBranch) { |
2315 | storeBlock->append(BranchTest32, m_value, Arg::resCond(MacroAssembler::Zero), boolResultTmp, boolResultTmp); |
2316 | storeBlock->setSuccessors(success, weakFail); |
2317 | doneBlock->successors().clear(); |
2318 | RELEASE_ASSERT(!doneBlock->size()); |
2319 | doneBlock->append(Air::Oops, m_value); |
2320 | } else { |
2321 | if (isStrong) { |
2322 | storeBlock->append(BranchTest32, m_value, Arg::resCond(MacroAssembler::Zero), successBoolResultTmp, successBoolResultTmp); |
2323 | storeBlock->setSuccessors(successBlock, reloopBlock); |
2324 | |
2325 | successBlock->append(Move, m_value, Arg::imm(!invert), boolResultTmp); |
2326 | successBlock->append(Air::Jump, m_value); |
2327 | successBlock->setSuccessors(doneBlock); |
2328 | } else { |
2329 | if (!invert) |
2330 | storeBlock->append(Xor32, m_value, Arg::bitImm(1), boolResultTmp, boolResultTmp); |
2331 | |
2332 | storeBlock->append(Air::Jump, m_value); |
2333 | storeBlock->setSuccessors(doneBlock); |
2334 | } |
2335 | |
2336 | failBlock->append(Move, m_value, Arg::imm(invert), boolResultTmp); |
2337 | failBlock->append(Air::Jump, m_value); |
2338 | failBlock->setSuccessors(doneBlock); |
2339 | } |
2340 | |
2341 | if (isStrong && hasFence) { |
2342 | Tmp tmp = m_code.newTmp(GP); |
2343 | strongFailBlock->append(trappingInst(m_value, storeCondOpcode(width, atomic->hasFence()), m_value, valueResultTmp, address, tmp)); |
2344 | strongFailBlock->append(BranchTest32, m_value, Arg::resCond(MacroAssembler::Zero), tmp, tmp); |
2345 | strongFailBlock->setSuccessors(failure, reloopBlock); |
2346 | } |
2347 | } |
2348 | |
2349 | bool appendVoidAtomic(Air::Opcode atomicOpcode) |
2350 | { |
2351 | if (m_useCounts.numUses(m_value)) |
2352 | return false; |
2353 | |
2354 | Arg address = addr(m_value); |
2355 | |
2356 | if (isValidForm(atomicOpcode, Arg::Imm, address.kind()) && imm(m_value->child(0))) { |
2357 | append(atomicOpcode, imm(m_value->child(0)), address); |
2358 | return true; |
2359 | } |
2360 | |
2361 | if (isValidForm(atomicOpcode, Arg::Tmp, address.kind())) { |
2362 | append(atomicOpcode, tmp(m_value->child(0)), address); |
2363 | return true; |
2364 | } |
2365 | |
2366 | return false; |
2367 | } |
2368 | |
2369 | void appendGeneralAtomic(Air::Opcode opcode, Commutativity commutativity = NotCommutative) |
2370 | { |
2371 | using namespace Air; |
2372 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
2373 | |
2374 | Arg address = addr(m_value); |
2375 | Tmp oldValue = m_code.newTmp(GP); |
2376 | Tmp newValue = opcode == Air::Nop ? tmp(atomic->child(0)) : m_code.newTmp(GP); |
2377 | |
2378 | // We need a CAS loop or a LL/SC loop. Using prepare/attempt jargon, we want: |
2379 | // |
2380 | // Block #reloop: |
2381 | // Prepare |
2382 | // opcode |
2383 | // Attempt |
2384 | // Successors: Then:#done, Else:#reloop |
2385 | // Block #done: |
2386 | // Move oldValue, result |
2387 | |
2388 | append(relaxedMoveForType(atomic->type()), oldValue, tmp(atomic)); |
2389 | |
2390 | Air::BasicBlock* reloopBlock = newBlock(); |
2391 | Air::BasicBlock* beginBlock; |
2392 | Air::BasicBlock* doneBlock; |
2393 | splitBlock(beginBlock, doneBlock); |
2394 | |
2395 | append(Air::Jump); |
2396 | beginBlock->setSuccessors(reloopBlock); |
2397 | |
2398 | Air::Opcode prepareOpcode; |
2399 | if (isX86()) { |
2400 | switch (atomic->accessWidth()) { |
2401 | case Width8: |
2402 | prepareOpcode = Load8SignedExtendTo32; |
2403 | break; |
2404 | case Width16: |
2405 | prepareOpcode = Load16SignedExtendTo32; |
2406 | break; |
2407 | case Width32: |
2408 | prepareOpcode = Move32; |
2409 | break; |
2410 | case Width64: |
2411 | prepareOpcode = Move; |
2412 | break; |
2413 | } |
2414 | } else { |
2415 | RELEASE_ASSERT(isARM64()); |
2416 | prepareOpcode = loadLinkOpcode(atomic->accessWidth(), atomic->hasFence()); |
2417 | } |
2418 | reloopBlock->append(trappingInst(m_value, prepareOpcode, m_value, address, oldValue)); |
2419 | |
2420 | if (opcode != Air::Nop) { |
2421 | // FIXME: If we ever have to write this again, we need to find a way to share the code with |
2422 | // appendBinOp. |
2423 | // https://bugs.webkit.org/show_bug.cgi?id=169249 |
2424 | if (commutativity == Commutative && imm(atomic->child(0)) && isValidForm(opcode, Arg::Imm, Arg::Tmp, Arg::Tmp)) |
2425 | reloopBlock->append(opcode, m_value, imm(atomic->child(0)), oldValue, newValue); |
2426 | else if (imm(atomic->child(0)) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) |
2427 | reloopBlock->append(opcode, m_value, oldValue, imm(atomic->child(0)), newValue); |
2428 | else if (commutativity == Commutative && bitImm(atomic->child(0)) && isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) |
2429 | reloopBlock->append(opcode, m_value, bitImm(atomic->child(0)), oldValue, newValue); |
2430 | else if (isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp)) |
2431 | reloopBlock->append(opcode, m_value, oldValue, tmp(atomic->child(0)), newValue); |
2432 | else { |
2433 | reloopBlock->append(relaxedMoveForType(atomic->type()), m_value, oldValue, newValue); |
2434 | if (imm(atomic->child(0)) && isValidForm(opcode, Arg::Imm, Arg::Tmp)) |
2435 | reloopBlock->append(opcode, m_value, imm(atomic->child(0)), newValue); |
2436 | else |
2437 | reloopBlock->append(opcode, m_value, tmp(atomic->child(0)), newValue); |
2438 | } |
2439 | } |
2440 | |
2441 | if (isX86()) { |
2442 | Air::Opcode casOpcode = OPCODE_FOR_WIDTH(BranchAtomicStrongCAS, atomic->accessWidth()); |
2443 | reloopBlock->append(relaxedMoveForType(atomic->type()), m_value, oldValue, m_eax); |
2444 | reloopBlock->append(trappingInst(m_value, casOpcode, m_value, Arg::statusCond(MacroAssembler::Success), m_eax, newValue, address)); |
2445 | } else { |
2446 | RELEASE_ASSERT(isARM64()); |
2447 | Tmp boolResult = m_code.newTmp(GP); |
2448 | reloopBlock->append(trappingInst(m_value, storeCondOpcode(atomic->accessWidth(), atomic->hasFence()), m_value, newValue, address, boolResult)); |
2449 | reloopBlock->append(BranchTest32, m_value, Arg::resCond(MacroAssembler::Zero), boolResult, boolResult); |
2450 | } |
2451 | reloopBlock->setSuccessors(doneBlock, reloopBlock); |
2452 | } |
2453 | |
2454 | void lower() |
2455 | { |
2456 | using namespace Air; |
2457 | switch (m_value->opcode()) { |
2458 | case B3::Nop: { |
2459 | // Yes, we will totally see Nop's because some phases will replaceWithNop() instead of |
2460 | // properly removing things. |
2461 | return; |
2462 | } |
2463 | |
2464 | case Load: { |
2465 | MemoryValue* memory = m_value->as<MemoryValue>(); |
2466 | Air::Kind kind = moveForType(memory->type()); |
2467 | if (memory->hasFence()) { |
2468 | if (isX86()) |
2469 | kind.effects = true; |
2470 | else { |
2471 | switch (memory->type().kind()) { |
2472 | case Int32: |
2473 | kind = LoadAcq32; |
2474 | break; |
2475 | case Int64: |
2476 | kind = LoadAcq64; |
2477 | break; |
2478 | default: |
2479 | RELEASE_ASSERT_NOT_REACHED(); |
2480 | break; |
2481 | } |
2482 | } |
2483 | } |
2484 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2485 | return; |
2486 | } |
2487 | |
2488 | case Load8S: { |
2489 | Air::Kind kind = Load8SignedExtendTo32; |
2490 | if (m_value->as<MemoryValue>()->hasFence()) { |
2491 | if (isX86()) |
2492 | kind.effects = true; |
2493 | else |
2494 | kind = LoadAcq8SignedExtendTo32; |
2495 | } |
2496 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2497 | return; |
2498 | } |
2499 | |
2500 | case Load8Z: { |
2501 | Air::Kind kind = Load8; |
2502 | if (m_value->as<MemoryValue>()->hasFence()) { |
2503 | if (isX86()) |
2504 | kind.effects = true; |
2505 | else |
2506 | kind = LoadAcq8; |
2507 | } |
2508 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2509 | return; |
2510 | } |
2511 | |
2512 | case Load16S: { |
2513 | Air::Kind kind = Load16SignedExtendTo32; |
2514 | if (m_value->as<MemoryValue>()->hasFence()) { |
2515 | if (isX86()) |
2516 | kind.effects = true; |
2517 | else |
2518 | kind = LoadAcq16SignedExtendTo32; |
2519 | } |
2520 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2521 | return; |
2522 | } |
2523 | |
2524 | case Load16Z: { |
2525 | Air::Kind kind = Load16; |
2526 | if (m_value->as<MemoryValue>()->hasFence()) { |
2527 | if (isX86()) |
2528 | kind.effects = true; |
2529 | else |
2530 | kind = LoadAcq16; |
2531 | } |
2532 | append(trappingInst(m_value, kind, m_value, addr(m_value), tmp(m_value))); |
2533 | return; |
2534 | } |
2535 | |
2536 | case Add: { |
2537 | if (tryAppendLea()) |
2538 | return; |
2539 | |
2540 | Air::Opcode multiplyAddOpcode = tryOpcodeForType(MultiplyAdd32, MultiplyAdd64, m_value->type()); |
2541 | if (isValidForm(multiplyAddOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
2542 | Value* left = m_value->child(0); |
2543 | Value* right = m_value->child(1); |
2544 | if (!imm(right) || m_valueToTmp[right]) { |
2545 | auto tryAppendMultiplyAdd = [&] (Value* left, Value* right) -> bool { |
2546 | if (left->opcode() != Mul || !canBeInternal(left)) |
2547 | return false; |
2548 | |
2549 | Value* multiplyLeft = left->child(0); |
2550 | Value* multiplyRight = left->child(1); |
2551 | if (canBeInternal(multiplyLeft) || canBeInternal(multiplyRight)) |
2552 | return false; |
2553 | |
2554 | append(multiplyAddOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(right), tmp(m_value)); |
2555 | commitInternal(left); |
2556 | |
2557 | return true; |
2558 | }; |
2559 | |
2560 | if (tryAppendMultiplyAdd(left, right)) |
2561 | return; |
2562 | if (tryAppendMultiplyAdd(right, left)) |
2563 | return; |
2564 | } |
2565 | } |
2566 | |
2567 | appendBinOp<Add32, Add64, AddDouble, AddFloat, Commutative>( |
2568 | m_value->child(0), m_value->child(1)); |
2569 | return; |
2570 | } |
2571 | |
2572 | case Sub: { |
2573 | Air::Opcode multiplySubOpcode = tryOpcodeForType(MultiplySub32, MultiplySub64, m_value->type()); |
2574 | if (multiplySubOpcode != Air::Oops |
2575 | && isValidForm(multiplySubOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
2576 | Value* left = m_value->child(0); |
2577 | Value* right = m_value->child(1); |
2578 | if (!imm(right) || m_valueToTmp[right]) { |
2579 | auto tryAppendMultiplySub = [&] () -> bool { |
2580 | if (right->opcode() != Mul || !canBeInternal(right)) |
2581 | return false; |
2582 | |
2583 | Value* multiplyLeft = right->child(0); |
2584 | Value* multiplyRight = right->child(1); |
2585 | if (m_locked.contains(multiplyLeft) || m_locked.contains(multiplyRight)) |
2586 | return false; |
2587 | |
2588 | append(multiplySubOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(left), tmp(m_value)); |
2589 | commitInternal(right); |
2590 | |
2591 | return true; |
2592 | }; |
2593 | |
2594 | if (tryAppendMultiplySub()) |
2595 | return; |
2596 | } |
2597 | } |
2598 | |
2599 | appendBinOp<Sub32, Sub64, SubDouble, SubFloat>(m_value->child(0), m_value->child(1)); |
2600 | return; |
2601 | } |
2602 | |
2603 | case Neg: { |
2604 | Air::Opcode multiplyNegOpcode = tryOpcodeForType(MultiplyNeg32, MultiplyNeg64, m_value->type()); |
2605 | if (multiplyNegOpcode != Air::Oops |
2606 | && isValidForm(multiplyNegOpcode, Arg::Tmp, Arg::Tmp, Arg::Tmp) |
2607 | && m_value->child(0)->opcode() == Mul |
2608 | && canBeInternal(m_value->child(0))) { |
2609 | Value* multiplyOperation = m_value->child(0); |
2610 | Value* multiplyLeft = multiplyOperation->child(0); |
2611 | Value* multiplyRight = multiplyOperation->child(1); |
2612 | if (!m_locked.contains(multiplyLeft) && !m_locked.contains(multiplyRight)) { |
2613 | append(multiplyNegOpcode, tmp(multiplyLeft), tmp(multiplyRight), tmp(m_value)); |
2614 | commitInternal(multiplyOperation); |
2615 | return; |
2616 | } |
2617 | } |
2618 | |
2619 | appendUnOp<Neg32, Neg64, NegateDouble, NegateFloat>(m_value->child(0)); |
2620 | return; |
2621 | } |
2622 | |
2623 | case Mul: { |
2624 | if (m_value->type() == Int64 |
2625 | && isValidForm(MultiplySignExtend32, Arg::Tmp, Arg::Tmp, Arg::Tmp) |
2626 | && m_value->child(0)->opcode() == SExt32 |
2627 | && !m_locked.contains(m_value->child(0))) { |
2628 | Value* opLeft = m_value->child(0); |
2629 | Value* left = opLeft->child(0); |
2630 | Value* opRight = m_value->child(1); |
2631 | Value* right = nullptr; |
2632 | |
2633 | if (opRight->opcode() == SExt32 && !m_locked.contains(opRight->child(0))) { |
2634 | right = opRight->child(0); |
2635 | } else if (m_value->child(1)->isRepresentableAs<int32_t>() && !m_locked.contains(m_value->child(1))) { |
2636 | // We just use the 64-bit const int as a 32 bit const int directly |
2637 | right = opRight; |
2638 | } |
2639 | |
2640 | if (right) { |
2641 | append(MultiplySignExtend32, tmp(left), tmp(right), tmp(m_value)); |
2642 | return; |
2643 | } |
2644 | } |
2645 | appendBinOp<Mul32, Mul64, MulDouble, MulFloat, Commutative>( |
2646 | m_value->child(0), m_value->child(1)); |
2647 | return; |
2648 | } |
2649 | |
2650 | case Div: { |
2651 | if (m_value->isChill()) |
2652 | RELEASE_ASSERT(isARM64()); |
2653 | if (m_value->type().isInt() && isX86()) { |
2654 | appendX86Div(Div); |
2655 | return; |
2656 | } |
2657 | ASSERT(!isX86() || m_value->type().isFloat()); |
2658 | |
2659 | appendBinOp<Div32, Div64, DivDouble, DivFloat>(m_value->child(0), m_value->child(1)); |
2660 | return; |
2661 | } |
2662 | |
2663 | case UDiv: { |
2664 | if (m_value->type().isInt() && isX86()) { |
2665 | appendX86UDiv(UDiv); |
2666 | return; |
2667 | } |
2668 | |
2669 | ASSERT(!isX86() && !m_value->type().isFloat()); |
2670 | |
2671 | appendBinOp<UDiv32, UDiv64, Air::Oops, Air::Oops>(m_value->child(0), m_value->child(1)); |
2672 | return; |
2673 | |
2674 | } |
2675 | |
2676 | case Mod: { |
2677 | RELEASE_ASSERT(isX86()); |
2678 | RELEASE_ASSERT(!m_value->isChill()); |
2679 | appendX86Div(Mod); |
2680 | return; |
2681 | } |
2682 | |
2683 | case UMod: { |
2684 | RELEASE_ASSERT(isX86()); |
2685 | appendX86UDiv(UMod); |
2686 | return; |
2687 | } |
2688 | |
2689 | case BitAnd: { |
2690 | if (m_value->child(1)->isInt(0xff)) { |
2691 | appendUnOp<ZeroExtend8To32, ZeroExtend8To32>(m_value->child(0)); |
2692 | return; |
2693 | } |
2694 | |
2695 | if (m_value->child(1)->isInt(0xffff)) { |
2696 | appendUnOp<ZeroExtend16To32, ZeroExtend16To32>(m_value->child(0)); |
2697 | return; |
2698 | } |
2699 | |
2700 | if (m_value->child(1)->isInt64(0xffffffff) || m_value->child(1)->isInt32(0xffffffff)) { |
2701 | appendUnOp<Move32, Move32>(m_value->child(0)); |
2702 | return; |
2703 | } |
2704 | |
2705 | appendBinOp<And32, And64, AndDouble, AndFloat, Commutative>( |
2706 | m_value->child(0), m_value->child(1)); |
2707 | return; |
2708 | } |
2709 | |
2710 | case BitOr: { |
2711 | appendBinOp<Or32, Or64, OrDouble, OrFloat, Commutative>( |
2712 | m_value->child(0), m_value->child(1)); |
2713 | return; |
2714 | } |
2715 | |
2716 | case BitXor: { |
2717 | // FIXME: If canBeInternal(child), we should generate this using the comparison path. |
2718 | // https://bugs.webkit.org/show_bug.cgi?id=152367 |
2719 | |
2720 | if (m_value->child(1)->isInt(-1)) { |
2721 | appendUnOp<Not32, Not64>(m_value->child(0)); |
2722 | return; |
2723 | } |
2724 | |
2725 | // This pattern is super useful on both x86 and ARM64, since the inversion of the CAS result |
2726 | // can be done with zero cost on x86 (just flip the set from E to NE) and it's a progression |
2727 | // on ARM64 (since STX returns 0 on success, so ordinarily we have to flip it). |
2728 | if (m_value->child(1)->isInt(1) |
2729 | && m_value->child(0)->opcode() == AtomicWeakCAS |
2730 | && canBeInternal(m_value->child(0))) { |
2731 | commitInternal(m_value->child(0)); |
2732 | appendCAS(m_value->child(0), true); |
2733 | return; |
2734 | } |
2735 | |
2736 | appendBinOp<Xor32, Xor64, XorDouble, XorFloat, Commutative>( |
2737 | m_value->child(0), m_value->child(1)); |
2738 | return; |
2739 | } |
2740 | |
2741 | case Depend: { |
2742 | RELEASE_ASSERT(isARM64()); |
2743 | appendUnOp<Depend32, Depend64>(m_value->child(0)); |
2744 | return; |
2745 | } |
2746 | |
2747 | case Shl: { |
2748 | if (m_value->child(1)->isInt32(1)) { |
2749 | appendBinOp<Add32, Add64, AddDouble, AddFloat, Commutative>(m_value->child(0), m_value->child(0)); |
2750 | return; |
2751 | } |
2752 | |
2753 | appendShift<Lshift32, Lshift64>(m_value->child(0), m_value->child(1)); |
2754 | return; |
2755 | } |
2756 | |
2757 | case SShr: { |
2758 | appendShift<Rshift32, Rshift64>(m_value->child(0), m_value->child(1)); |
2759 | return; |
2760 | } |
2761 | |
2762 | case ZShr: { |
2763 | appendShift<Urshift32, Urshift64>(m_value->child(0), m_value->child(1)); |
2764 | return; |
2765 | } |
2766 | |
2767 | case RotR: { |
2768 | appendShift<RotateRight32, RotateRight64>(m_value->child(0), m_value->child(1)); |
2769 | return; |
2770 | } |
2771 | |
2772 | case RotL: { |
2773 | appendShift<RotateLeft32, RotateLeft64>(m_value->child(0), m_value->child(1)); |
2774 | return; |
2775 | } |
2776 | |
2777 | case Clz: { |
2778 | appendUnOp<CountLeadingZeros32, CountLeadingZeros64>(m_value->child(0)); |
2779 | return; |
2780 | } |
2781 | |
2782 | case Abs: { |
2783 | RELEASE_ASSERT_WITH_MESSAGE(!isX86(), "Abs is not supported natively on x86. It must be replaced before generation." ); |
2784 | appendUnOp<Air::Oops, Air::Oops, AbsDouble, AbsFloat>(m_value->child(0)); |
2785 | return; |
2786 | } |
2787 | |
2788 | case Ceil: { |
2789 | appendUnOp<Air::Oops, Air::Oops, CeilDouble, CeilFloat>(m_value->child(0)); |
2790 | return; |
2791 | } |
2792 | |
2793 | case Floor: { |
2794 | appendUnOp<Air::Oops, Air::Oops, FloorDouble, FloorFloat>(m_value->child(0)); |
2795 | return; |
2796 | } |
2797 | |
2798 | case Sqrt: { |
2799 | appendUnOp<Air::Oops, Air::Oops, SqrtDouble, SqrtFloat>(m_value->child(0)); |
2800 | return; |
2801 | } |
2802 | |
2803 | case BitwiseCast: { |
2804 | appendUnOp<Move32ToFloat, Move64ToDouble, MoveDoubleTo64, MoveFloatTo32>(m_value->child(0)); |
2805 | return; |
2806 | } |
2807 | |
2808 | case Store: { |
2809 | Value* valueToStore = m_value->child(0); |
2810 | if (canBeInternal(valueToStore)) { |
2811 | bool matched = false; |
2812 | switch (valueToStore->opcode()) { |
2813 | case Add: |
2814 | matched = tryAppendStoreBinOp<Add32, Add64, Commutative>( |
2815 | valueToStore->child(0), valueToStore->child(1)); |
2816 | break; |
2817 | case Sub: |
2818 | if (valueToStore->child(0)->isInt(0)) { |
2819 | matched = tryAppendStoreUnOp<Neg32, Neg64>(valueToStore->child(1)); |
2820 | break; |
2821 | } |
2822 | matched = tryAppendStoreBinOp<Sub32, Sub64>( |
2823 | valueToStore->child(0), valueToStore->child(1)); |
2824 | break; |
2825 | case BitAnd: |
2826 | matched = tryAppendStoreBinOp<And32, And64, Commutative>( |
2827 | valueToStore->child(0), valueToStore->child(1)); |
2828 | break; |
2829 | case BitXor: |
2830 | if (valueToStore->child(1)->isInt(-1)) { |
2831 | matched = tryAppendStoreUnOp<Not32, Not64>(valueToStore->child(0)); |
2832 | break; |
2833 | } |
2834 | matched = tryAppendStoreBinOp<Xor32, Xor64, Commutative>( |
2835 | valueToStore->child(0), valueToStore->child(1)); |
2836 | break; |
2837 | default: |
2838 | break; |
2839 | } |
2840 | if (matched) { |
2841 | commitInternal(valueToStore); |
2842 | return; |
2843 | } |
2844 | } |
2845 | |
2846 | appendStore(m_value, addr(m_value)); |
2847 | return; |
2848 | } |
2849 | |
2850 | case B3::Store8: { |
2851 | Value* valueToStore = m_value->child(0); |
2852 | if (canBeInternal(valueToStore)) { |
2853 | bool matched = false; |
2854 | switch (valueToStore->opcode()) { |
2855 | case Add: |
2856 | matched = tryAppendStoreBinOp<Add8, Air::Oops, Commutative>( |
2857 | valueToStore->child(0), valueToStore->child(1)); |
2858 | break; |
2859 | default: |
2860 | break; |
2861 | } |
2862 | if (matched) { |
2863 | commitInternal(valueToStore); |
2864 | return; |
2865 | } |
2866 | } |
2867 | appendStore(m_value, addr(m_value)); |
2868 | return; |
2869 | } |
2870 | |
2871 | case B3::Store16: { |
2872 | Value* valueToStore = m_value->child(0); |
2873 | if (canBeInternal(valueToStore)) { |
2874 | bool matched = false; |
2875 | switch (valueToStore->opcode()) { |
2876 | case Add: |
2877 | matched = tryAppendStoreBinOp<Add16, Air::Oops, Commutative>( |
2878 | valueToStore->child(0), valueToStore->child(1)); |
2879 | break; |
2880 | default: |
2881 | break; |
2882 | } |
2883 | if (matched) { |
2884 | commitInternal(valueToStore); |
2885 | return; |
2886 | } |
2887 | } |
2888 | appendStore(m_value, addr(m_value)); |
2889 | return; |
2890 | } |
2891 | |
2892 | case WasmAddress: { |
2893 | WasmAddressValue* address = m_value->as<WasmAddressValue>(); |
2894 | |
2895 | append(Add64, Arg(address->pinnedGPR()), tmp(m_value->child(0)), tmp(address)); |
2896 | return; |
2897 | } |
2898 | |
2899 | case Fence: { |
2900 | FenceValue* fence = m_value->as<FenceValue>(); |
2901 | if (!fence->write && !fence->read) |
2902 | return; |
2903 | if (!fence->write) { |
2904 | // A fence that reads but does not write is for protecting motion of stores. |
2905 | append(StoreFence); |
2906 | return; |
2907 | } |
2908 | if (!fence->read) { |
2909 | // A fence that writes but does not read is for protecting motion of loads. |
2910 | append(LoadFence); |
2911 | return; |
2912 | } |
2913 | append(MemoryFence); |
2914 | return; |
2915 | } |
2916 | |
2917 | case Trunc: { |
2918 | ASSERT(tmp(m_value->child(0)) == tmp(m_value)); |
2919 | return; |
2920 | } |
2921 | |
2922 | case SExt8: { |
2923 | appendUnOp<SignExtend8To32, Air::Oops>(m_value->child(0)); |
2924 | return; |
2925 | } |
2926 | |
2927 | case SExt16: { |
2928 | appendUnOp<SignExtend16To32, Air::Oops>(m_value->child(0)); |
2929 | return; |
2930 | } |
2931 | |
2932 | case ZExt32: { |
2933 | appendUnOp<Move32, Air::Oops>(m_value->child(0)); |
2934 | return; |
2935 | } |
2936 | |
2937 | case SExt32: { |
2938 | // FIXME: We should have support for movsbq/movswq |
2939 | // https://bugs.webkit.org/show_bug.cgi?id=152232 |
2940 | |
2941 | appendUnOp<SignExtend32ToPtr, Air::Oops>(m_value->child(0)); |
2942 | return; |
2943 | } |
2944 | |
2945 | case FloatToDouble: { |
2946 | appendUnOp<Air::Oops, Air::Oops, Air::Oops, ConvertFloatToDouble>(m_value->child(0)); |
2947 | return; |
2948 | } |
2949 | |
2950 | case DoubleToFloat: { |
2951 | appendUnOp<Air::Oops, Air::Oops, ConvertDoubleToFloat>(m_value->child(0)); |
2952 | return; |
2953 | } |
2954 | |
2955 | case ArgumentReg: { |
2956 | m_prologue.append(Inst( |
2957 | moveForType(m_value->type()), m_value, |
2958 | Tmp(m_value->as<ArgumentRegValue>()->argumentReg()), |
2959 | tmp(m_value))); |
2960 | return; |
2961 | } |
2962 | |
2963 | case Const32: |
2964 | case Const64: { |
2965 | if (imm(m_value)) |
2966 | append(Move, imm(m_value), tmp(m_value)); |
2967 | else |
2968 | append(Move, Arg::bigImm(m_value->asInt()), tmp(m_value)); |
2969 | return; |
2970 | } |
2971 | |
2972 | case ConstDouble: |
2973 | case ConstFloat: { |
2974 | // We expect that the moveConstants() phase has run, and any doubles referenced from |
2975 | // stackmaps get fused. |
2976 | RELEASE_ASSERT(m_value->opcode() == ConstFloat || isIdentical(m_value->asDouble(), 0.0)); |
2977 | RELEASE_ASSERT(m_value->opcode() == ConstDouble || isIdentical(m_value->asFloat(), 0.0f)); |
2978 | append(MoveZeroToDouble, tmp(m_value)); |
2979 | return; |
2980 | } |
2981 | |
2982 | case FramePointer: { |
2983 | ASSERT(tmp(m_value) == Tmp(GPRInfo::callFrameRegister)); |
2984 | return; |
2985 | } |
2986 | |
2987 | case SlotBase: { |
2988 | append( |
2989 | pointerType() == Int64 ? Lea64 : Lea32, |
2990 | Arg::stack(m_stackToStack.get(m_value->as<SlotBaseValue>()->slot())), |
2991 | tmp(m_value)); |
2992 | return; |
2993 | } |
2994 | |
2995 | case Equal: |
2996 | case NotEqual: { |
2997 | // FIXME: Teach this to match patterns that arise from subwidth CAS. The CAS's result has to |
2998 | // be either zero- or sign-extended, and the value it's compared to should also be zero- or |
2999 | // sign-extended in a matching way. It's not super clear that this is very profitable. |
3000 | // https://bugs.webkit.org/show_bug.cgi?id=169250 |
3001 | if (m_value->child(0)->opcode() == AtomicStrongCAS |
3002 | && m_value->child(0)->as<AtomicValue>()->isCanonicalWidth() |
3003 | && m_value->child(0)->child(0) == m_value->child(1) |
3004 | && canBeInternal(m_value->child(0))) { |
3005 | ASSERT(!m_locked.contains(m_value->child(0)->child(1))); |
3006 | ASSERT(!m_locked.contains(m_value->child(1))); |
3007 | |
3008 | commitInternal(m_value->child(0)); |
3009 | appendCAS(m_value->child(0), m_value->opcode() == NotEqual); |
3010 | return; |
3011 | } |
3012 | |
3013 | m_insts.last().append(createCompare(m_value)); |
3014 | return; |
3015 | } |
3016 | |
3017 | case LessThan: |
3018 | case GreaterThan: |
3019 | case LessEqual: |
3020 | case GreaterEqual: |
3021 | case Above: |
3022 | case Below: |
3023 | case AboveEqual: |
3024 | case BelowEqual: |
3025 | case EqualOrUnordered: { |
3026 | m_insts.last().append(createCompare(m_value)); |
3027 | return; |
3028 | } |
3029 | |
3030 | case Select: { |
3031 | MoveConditionallyConfig config; |
3032 | if (m_value->type().isInt()) { |
3033 | config.moveConditionally32 = MoveConditionally32; |
3034 | config.moveConditionally64 = MoveConditionally64; |
3035 | config.moveConditionallyTest32 = MoveConditionallyTest32; |
3036 | config.moveConditionallyTest64 = MoveConditionallyTest64; |
3037 | config.moveConditionallyDouble = MoveConditionallyDouble; |
3038 | config.moveConditionallyFloat = MoveConditionallyFloat; |
3039 | } else { |
3040 | // FIXME: it's not obvious that these are particularly efficient. |
3041 | // https://bugs.webkit.org/show_bug.cgi?id=169251 |
3042 | config.moveConditionally32 = MoveDoubleConditionally32; |
3043 | config.moveConditionally64 = MoveDoubleConditionally64; |
3044 | config.moveConditionallyTest32 = MoveDoubleConditionallyTest32; |
3045 | config.moveConditionallyTest64 = MoveDoubleConditionallyTest64; |
3046 | config.moveConditionallyDouble = MoveDoubleConditionallyDouble; |
3047 | config.moveConditionallyFloat = MoveDoubleConditionallyFloat; |
3048 | } |
3049 | |
3050 | m_insts.last().append(createSelect(config)); |
3051 | return; |
3052 | } |
3053 | |
3054 | case IToD: { |
3055 | appendUnOp<ConvertInt32ToDouble, ConvertInt64ToDouble>(m_value->child(0)); |
3056 | return; |
3057 | } |
3058 | |
3059 | case IToF: { |
3060 | appendUnOp<ConvertInt32ToFloat, ConvertInt64ToFloat>(m_value->child(0)); |
3061 | return; |
3062 | } |
3063 | |
3064 | case B3::CCall: { |
3065 | CCallValue* cCall = m_value->as<CCallValue>(); |
3066 | |
3067 | Inst inst(m_isRare ? Air::ColdCCall : Air::CCall, cCall); |
3068 | |
3069 | // We have a ton of flexibility regarding the callee argument, but currently, we don't |
3070 | // use it yet. It gets weird for reasons: |
3071 | // 1) We probably will never take advantage of this. We don't have C calls to locations |
3072 | // loaded from addresses. We have JS calls like that, but those use Patchpoints. |
3073 | // 2) On X86_64 we still don't support call with BaseIndex. |
3074 | // 3) On non-X86, we don't natively support any kind of loading from address. |
3075 | // 4) We don't have an isValidForm() for the CCallSpecial so we have no smart way to |
3076 | // decide. |
3077 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=151052 |
3078 | inst.args.append(tmp(cCall->child(0))); |
3079 | |
3080 | if (cCall->type() != Void) |
3081 | inst.args.append(tmp(cCall)); |
3082 | |
3083 | for (unsigned i = 1; i < cCall->numChildren(); ++i) |
3084 | inst.args.append(immOrTmp(cCall->child(i))); |
3085 | |
3086 | m_insts.last().append(WTFMove(inst)); |
3087 | return; |
3088 | } |
3089 | |
3090 | case Patchpoint: { |
3091 | PatchpointValue* patchpointValue = m_value->as<PatchpointValue>(); |
3092 | ensureSpecial(m_patchpointSpecial); |
3093 | |
3094 | Inst inst(Patch, patchpointValue, Arg::special(m_patchpointSpecial)); |
3095 | |
3096 | Vector<Inst> after; |
3097 | auto generateResultOperand = [&] (Type type, ValueRep rep, Tmp tmp) { |
3098 | switch (rep.kind()) { |
3099 | case ValueRep::WarmAny: |
3100 | case ValueRep::ColdAny: |
3101 | case ValueRep::LateColdAny: |
3102 | case ValueRep::SomeRegister: |
3103 | case ValueRep::SomeEarlyRegister: |
3104 | case ValueRep::SomeLateRegister: |
3105 | inst.args.append(tmp); |
3106 | return; |
3107 | case ValueRep::Register: { |
3108 | Tmp reg = Tmp(rep.reg()); |
3109 | inst.args.append(reg); |
3110 | after.append(Inst(relaxedMoveForType(type), m_value, reg, tmp)); |
3111 | return; |
3112 | } |
3113 | case ValueRep::StackArgument: { |
3114 | Arg arg = Arg::callArg(rep.offsetFromSP()); |
3115 | inst.args.append(arg); |
3116 | after.append(Inst(moveForType(type), m_value, arg, tmp)); |
3117 | return; |
3118 | } |
3119 | default: |
3120 | RELEASE_ASSERT_NOT_REACHED(); |
3121 | return; |
3122 | } |
3123 | }; |
3124 | |
3125 | if (patchpointValue->type() != Void) { |
3126 | forEachImmOrTmp(patchpointValue, [&] (Arg arg, Type type, unsigned index) { |
3127 | generateResultOperand(type, patchpointValue->resultConstraints[index], arg.tmp()); |
3128 | }); |
3129 | } |
3130 | |
3131 | fillStackmap(inst, patchpointValue, 0); |
3132 | for (auto& constraint : patchpointValue->resultConstraints) { |
3133 | if (constraint.isReg()) |
3134 | patchpointValue->lateClobbered().clear(constraint.reg()); |
3135 | } |
3136 | |
3137 | for (unsigned i = patchpointValue->numGPScratchRegisters; i--;) |
3138 | inst.args.append(m_code.newTmp(GP)); |
3139 | for (unsigned i = patchpointValue->numFPScratchRegisters; i--;) |
3140 | inst.args.append(m_code.newTmp(FP)); |
3141 | |
3142 | m_insts.last().append(WTFMove(inst)); |
3143 | m_insts.last().appendVector(after); |
3144 | return; |
3145 | } |
3146 | |
3147 | case Extract: { |
3148 | Value* tupleValue = m_value->child(0); |
3149 | unsigned index = m_value->as<ExtractValue>()->index(); |
3150 | |
3151 | const auto& tmps = tmpsForTuple(tupleValue); |
3152 | append(relaxedMoveForType(m_value->type()), tmps[index], tmp(m_value)); |
3153 | return; |
3154 | } |
3155 | |
3156 | case CheckAdd: |
3157 | case CheckSub: |
3158 | case CheckMul: { |
3159 | CheckValue* checkValue = m_value->as<CheckValue>(); |
3160 | |
3161 | Value* left = checkValue->child(0); |
3162 | Value* right = checkValue->child(1); |
3163 | |
3164 | Tmp result = tmp(m_value); |
3165 | |
3166 | // Handle checked negation. |
3167 | if (checkValue->opcode() == CheckSub && left->isInt(0)) { |
3168 | append(Move, tmp(right), result); |
3169 | |
3170 | Air::Opcode opcode = |
3171 | opcodeForType(BranchNeg32, BranchNeg64, checkValue->type()); |
3172 | CheckSpecial* special = ensureCheckSpecial(opcode, 2); |
3173 | |
3174 | Inst inst(Patch, checkValue, Arg::special(special)); |
3175 | inst.args.append(Arg::resCond(MacroAssembler::Overflow)); |
3176 | inst.args.append(result); |
3177 | |
3178 | fillStackmap(inst, checkValue, 2); |
3179 | |
3180 | m_insts.last().append(WTFMove(inst)); |
3181 | return; |
3182 | } |
3183 | |
3184 | Air::Opcode opcode = Air::Oops; |
3185 | Commutativity commutativity = NotCommutative; |
3186 | StackmapSpecial::RoleMode stackmapRole = StackmapSpecial::SameAsRep; |
3187 | switch (m_value->opcode()) { |
3188 | case CheckAdd: |
3189 | opcode = opcodeForType(BranchAdd32, BranchAdd64, m_value->type()); |
3190 | stackmapRole = StackmapSpecial::ForceLateUseUnlessRecoverable; |
3191 | commutativity = Commutative; |
3192 | break; |
3193 | case CheckSub: |
3194 | opcode = opcodeForType(BranchSub32, BranchSub64, m_value->type()); |
3195 | break; |
3196 | case CheckMul: |
3197 | opcode = opcodeForType(BranchMul32, BranchMul64, checkValue->type()); |
3198 | stackmapRole = StackmapSpecial::ForceLateUse; |
3199 | break; |
3200 | default: |
3201 | RELEASE_ASSERT_NOT_REACHED(); |
3202 | break; |
3203 | } |
3204 | |
3205 | // FIXME: It would be great to fuse Loads into these. We currently don't do it because the |
3206 | // rule for stackmaps is that all addresses are just stack addresses. Maybe we could relax |
3207 | // this rule here. |
3208 | // https://bugs.webkit.org/show_bug.cgi?id=151228 |
3209 | |
3210 | Vector<Arg, 2> sources; |
3211 | if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Imm, Arg::Tmp)) { |
3212 | sources.append(tmp(left)); |
3213 | sources.append(imm(right)); |
3214 | } else if (imm(right) && isValidForm(opcode, Arg::ResCond, Arg::Imm, Arg::Tmp)) { |
3215 | sources.append(imm(right)); |
3216 | append(Move, tmp(left), result); |
3217 | } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
3218 | sources.append(tmp(left)); |
3219 | sources.append(tmp(right)); |
3220 | } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp)) { |
3221 | if (commutativity == Commutative && preferRightForResult(left, right)) { |
3222 | sources.append(tmp(left)); |
3223 | append(Move, tmp(right), result); |
3224 | } else { |
3225 | sources.append(tmp(right)); |
3226 | append(Move, tmp(left), result); |
3227 | } |
3228 | } else if (isValidForm(opcode, Arg::ResCond, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp, Arg::Tmp)) { |
3229 | sources.append(tmp(left)); |
3230 | sources.append(tmp(right)); |
3231 | sources.append(m_code.newTmp(m_value->resultBank())); |
3232 | sources.append(m_code.newTmp(m_value->resultBank())); |
3233 | } |
3234 | |
3235 | // There is a really hilarious case that arises when we do BranchAdd32(%x, %x). We won't emit |
3236 | // such code, but the coalescing in our register allocator also does copy propagation, so |
3237 | // although we emit: |
3238 | // |
3239 | // Move %tmp1, %tmp2 |
3240 | // BranchAdd32 %tmp1, %tmp2 |
3241 | // |
3242 | // The register allocator may turn this into: |
3243 | // |
3244 | // BranchAdd32 %rax, %rax |
3245 | // |
3246 | // Currently we handle this by ensuring that even this kind of addition can be undone. We can |
3247 | // undo it by using the carry flag. It's tempting to get rid of that code and just "fix" this |
3248 | // here by forcing LateUse on the stackmap. If we did that unconditionally, we'd lose a lot of |
3249 | // performance. So it's tempting to do it only if left == right. But that creates an awkward |
3250 | // constraint on Air: it means that Air would not be allowed to do any copy propagation. |
3251 | // Notice that the %rax,%rax situation happened after Air copy-propagated the Move we are |
3252 | // emitting. We know that copy-propagating over that Move causes add-to-self. But what if we |
3253 | // emit something like a Move - or even do other kinds of copy-propagation on tmp's - |
3254 | // somewhere else in this code. The add-to-self situation may only emerge after some other Air |
3255 | // optimizations remove other Move's or identity-like operations. That's why we don't use |
3256 | // LateUse here to take care of add-to-self. |
3257 | |
3258 | CheckSpecial* special = ensureCheckSpecial(opcode, 2 + sources.size(), stackmapRole); |
3259 | |
3260 | Inst inst(Patch, checkValue, Arg::special(special)); |
3261 | |
3262 | inst.args.append(Arg::resCond(MacroAssembler::Overflow)); |
3263 | |
3264 | inst.args.appendVector(sources); |
3265 | inst.args.append(result); |
3266 | |
3267 | fillStackmap(inst, checkValue, 2); |
3268 | |
3269 | m_insts.last().append(WTFMove(inst)); |
3270 | return; |
3271 | } |
3272 | |
3273 | case Check: { |
3274 | Inst branch = createBranch(m_value->child(0)); |
3275 | |
3276 | CheckSpecial* special = ensureCheckSpecial(branch); |
3277 | |
3278 | CheckValue* checkValue = m_value->as<CheckValue>(); |
3279 | |
3280 | Inst inst(Patch, checkValue, Arg::special(special)); |
3281 | inst.args.appendVector(branch.args); |
3282 | |
3283 | fillStackmap(inst, checkValue, 1); |
3284 | |
3285 | m_insts.last().append(WTFMove(inst)); |
3286 | return; |
3287 | } |
3288 | |
3289 | case B3::WasmBoundsCheck: { |
3290 | WasmBoundsCheckValue* value = m_value->as<WasmBoundsCheckValue>(); |
3291 | |
3292 | Value* ptr = value->child(0); |
3293 | Tmp pointer = tmp(ptr); |
3294 | |
3295 | Arg ptrPlusImm = m_code.newTmp(GP); |
3296 | append(Inst(Move32, value, pointer, ptrPlusImm)); |
3297 | if (value->offset()) { |
3298 | if (imm(value->offset())) |
3299 | append(Add64, imm(value->offset()), ptrPlusImm); |
3300 | else { |
3301 | Arg bigImm = m_code.newTmp(GP); |
3302 | append(Move, Arg::bigImm(value->offset()), bigImm); |
3303 | append(Add64, bigImm, ptrPlusImm); |
3304 | } |
3305 | } |
3306 | |
3307 | Arg limit; |
3308 | switch (value->boundsType()) { |
3309 | case WasmBoundsCheckValue::Type::Pinned: |
3310 | limit = Arg(value->bounds().pinnedSize); |
3311 | break; |
3312 | |
3313 | case WasmBoundsCheckValue::Type::Maximum: |
3314 | limit = m_code.newTmp(GP); |
3315 | if (imm(value->bounds().maximum)) |
3316 | append(Move, imm(value->bounds().maximum), limit); |
3317 | else |
3318 | append(Move, Arg::bigImm(value->bounds().maximum), limit); |
3319 | break; |
3320 | } |
3321 | |
3322 | append(Inst(Air::WasmBoundsCheck, value, ptrPlusImm, limit)); |
3323 | return; |
3324 | } |
3325 | |
3326 | case Upsilon: { |
3327 | Value* value = m_value->child(0); |
3328 | Value* phi = m_value->as<UpsilonValue>()->phi(); |
3329 | if (value->type().isNumeric()) { |
3330 | append(relaxedMoveForType(value->type()), immOrTmp(value), m_phiToTmp[phi]); |
3331 | return; |
3332 | } |
3333 | |
3334 | const Vector<Type>& tuple = m_procedure.tupleForType(value->type()); |
3335 | const auto& valueTmps = tmpsForTuple(value); |
3336 | const auto& phiTmps = m_tuplePhiToTmps.find(phi)->value; |
3337 | ASSERT(valueTmps.size() == phiTmps.size()); |
3338 | for (unsigned i = 0; i < valueTmps.size(); ++i) |
3339 | append(relaxedMoveForType(tuple[i]), valueTmps[i], phiTmps[i]); |
3340 | return; |
3341 | } |
3342 | |
3343 | case Phi: { |
3344 | // Snapshot the value of the Phi. It may change under us because you could do: |
3345 | // a = Phi() |
3346 | // Upsilon(@x, ^a) |
3347 | // @a => this should get the value of the Phi before the Upsilon, i.e. not @x. |
3348 | |
3349 | if (m_value->type().isNumeric()) { |
3350 | append(relaxedMoveForType(m_value->type()), m_phiToTmp[m_value], tmp(m_value)); |
3351 | return; |
3352 | } |
3353 | |
3354 | const Vector<Type>& tuple = m_procedure.tupleForType(m_value->type()); |
3355 | const auto& valueTmps = tmpsForTuple(m_value); |
3356 | const auto& phiTmps = m_tuplePhiToTmps.find(m_value)->value; |
3357 | ASSERT(valueTmps.size() == phiTmps.size()); |
3358 | for (unsigned i = 0; i < valueTmps.size(); ++i) |
3359 | append(relaxedMoveForType(tuple[i]), phiTmps[i], valueTmps[i]); |
3360 | return; |
3361 | } |
3362 | |
3363 | case Set: { |
3364 | Value* value = m_value->child(0); |
3365 | const Vector<Tmp>& variableTmps = m_variableToTmps.get(m_value->as<VariableValue>()->variable()); |
3366 | forEachImmOrTmp(value, [&] (Arg immOrTmp, Type type, unsigned index) { |
3367 | append(relaxedMoveForType(type), immOrTmp, variableTmps[index]); |
3368 | }); |
3369 | return; |
3370 | } |
3371 | |
3372 | case Get: { |
3373 | // Snapshot the value of the Get. It may change under us because you could do: |
3374 | // a = Get(var) |
3375 | // Set(@x, var) |
3376 | // @a => this should get the value of the Get before the Set, i.e. not @x. |
3377 | |
3378 | const Vector<Tmp>& variableTmps = m_variableToTmps.get(m_value->as<VariableValue>()->variable()); |
3379 | forEachImmOrTmp(m_value, [&] (Arg tmp, Type type, unsigned index) { |
3380 | append(relaxedMoveForType(type), variableTmps[index], tmp.tmp()); |
3381 | }); |
3382 | return; |
3383 | } |
3384 | |
3385 | case Branch: { |
3386 | if (canBeInternal(m_value->child(0))) { |
3387 | Value* branchChild = m_value->child(0); |
3388 | |
3389 | switch (branchChild->opcode()) { |
3390 | case BitAnd: { |
3391 | Value* andValue = branchChild->child(0); |
3392 | Value* andMask = branchChild->child(1); |
3393 | Air::Opcode opcode = opcodeForType(BranchTestBit32, BranchTestBit64, andValue->type()); |
3394 | |
3395 | Value* testValue = nullptr; |
3396 | Value* bitOffset = nullptr; |
3397 | Value* internalNode = nullptr; |
3398 | Value* negationNode = nullptr; |
3399 | bool inverted = false; |
3400 | |
3401 | // if (~(val >> x)&1) |
3402 | if (andMask->isInt(1) |
3403 | && andValue->opcode() == BitXor && (andValue->child(1)->isInt32(-1) || andValue->child(1)->isInt64(-1l)) |
3404 | && (andValue->child(0)->opcode() == SShr || andValue->child(0)->opcode() == ZShr)) { |
3405 | |
3406 | negationNode = andValue; |
3407 | testValue = andValue->child(0)->child(0); |
3408 | bitOffset = andValue->child(0)->child(1); |
3409 | internalNode = andValue->child(0); |
3410 | inverted = !inverted; |
3411 | } |
3412 | |
3413 | // Turn if ((val >> x)&1) -> Bt val x |
3414 | if (andMask->isInt(1) && (andValue->opcode() == SShr || andValue->opcode() == ZShr)) { |
3415 | testValue = andValue->child(0); |
3416 | bitOffset = andValue->child(1); |
3417 | internalNode = andValue; |
3418 | } |
3419 | |
3420 | // Turn if (val & (1<<x)) -> Bt val x |
3421 | if ((andMask->opcode() == Shl) && andMask->child(0)->isInt(1)) { |
3422 | testValue = andValue; |
3423 | bitOffset = andMask->child(1); |
3424 | internalNode = andMask; |
3425 | } |
3426 | |
3427 | // if (~val & (1<<x)) or if ((~val >> x)&1) |
3428 | if (!negationNode && testValue && testValue->opcode() == BitXor && (testValue->child(1)->isInt32(-1) || testValue->child(1)->isInt64(-1l))) { |
3429 | negationNode = testValue; |
3430 | testValue = testValue->child(0); |
3431 | inverted = !inverted; |
3432 | } |
3433 | |
3434 | if (testValue && bitOffset) { |
3435 | for (auto& basePromise : Vector<ArgPromise>::from(loadPromise(testValue), tmpPromise(testValue))) { |
3436 | bool hasLoad = basePromise.kind() != Arg::Tmp; |
3437 | bool canMakeInternal = (hasLoad ? canBeInternal(testValue) : !m_locked.contains(testValue)) |
3438 | && (!negationNode || canBeInternal(negationNode)) |
3439 | && (!internalNode || canBeInternal(internalNode)); |
3440 | |
3441 | if (basePromise && canMakeInternal) { |
3442 | if (bitOffset->hasInt() && isValidForm(opcode, Arg::ResCond, basePromise.kind(), Arg::Imm)) { |
3443 | commitInternal(branchChild); |
3444 | commitInternal(internalNode); |
3445 | if (hasLoad) |
3446 | commitInternal(testValue); |
3447 | commitInternal(negationNode); |
3448 | append(basePromise.inst(opcode, m_value, Arg::resCond(MacroAssembler::NonZero).inverted(inverted), basePromise.consume(*this), Arg::imm(bitOffset->asInt()))); |
3449 | return; |
3450 | } |
3451 | |
3452 | if (!m_locked.contains(bitOffset) && isValidForm(opcode, Arg::ResCond, basePromise.kind(), Arg::Tmp)) { |
3453 | commitInternal(branchChild); |
3454 | commitInternal(internalNode); |
3455 | if (hasLoad) |
3456 | commitInternal(testValue); |
3457 | commitInternal(negationNode); |
3458 | append(basePromise.inst(opcode, m_value, Arg::resCond(MacroAssembler::NonZero).inverted(inverted), basePromise.consume(*this), tmp(bitOffset))); |
3459 | return; |
3460 | } |
3461 | } |
3462 | } |
3463 | } |
3464 | break; |
3465 | } |
3466 | case AtomicWeakCAS: |
3467 | commitInternal(branchChild); |
3468 | appendCAS(branchChild, false); |
3469 | return; |
3470 | |
3471 | case AtomicStrongCAS: |
3472 | // A branch is a comparison to zero. |
3473 | // FIXME: Teach this to match patterns that arise from subwidth CAS. |
3474 | // https://bugs.webkit.org/show_bug.cgi?id=169250 |
3475 | if (branchChild->child(0)->isInt(0) |
3476 | && branchChild->as<AtomicValue>()->isCanonicalWidth()) { |
3477 | commitInternal(branchChild); |
3478 | appendCAS(branchChild, true); |
3479 | return; |
3480 | } |
3481 | break; |
3482 | |
3483 | case Equal: |
3484 | case NotEqual: |
3485 | // FIXME: Teach this to match patterns that arise from subwidth CAS. |
3486 | // https://bugs.webkit.org/show_bug.cgi?id=169250 |
3487 | if (branchChild->child(0)->opcode() == AtomicStrongCAS |
3488 | && branchChild->child(0)->as<AtomicValue>()->isCanonicalWidth() |
3489 | && canBeInternal(branchChild->child(0)) |
3490 | && branchChild->child(0)->child(0) == branchChild->child(1)) { |
3491 | commitInternal(branchChild); |
3492 | commitInternal(branchChild->child(0)); |
3493 | appendCAS(branchChild->child(0), branchChild->opcode() == NotEqual); |
3494 | return; |
3495 | } |
3496 | break; |
3497 | |
3498 | default: |
3499 | break; |
3500 | } |
3501 | } |
3502 | |
3503 | m_insts.last().append(createBranch(m_value->child(0))); |
3504 | return; |
3505 | } |
3506 | |
3507 | case B3::Jump: { |
3508 | append(Air::Jump); |
3509 | return; |
3510 | } |
3511 | |
3512 | case Identity: |
3513 | case Opaque: { |
3514 | ASSERT(tmp(m_value->child(0)) == tmp(m_value)); |
3515 | return; |
3516 | } |
3517 | |
3518 | case Return: { |
3519 | if (!m_value->numChildren()) { |
3520 | append(RetVoid); |
3521 | return; |
3522 | } |
3523 | Value* value = m_value->child(0); |
3524 | Tmp returnValueGPR = Tmp(GPRInfo::returnValueGPR); |
3525 | Tmp returnValueFPR = Tmp(FPRInfo::returnValueFPR); |
3526 | switch (value->type().kind()) { |
3527 | case Void: |
3528 | case Tuple: |
3529 | // It's impossible for a void value to be used as a child. We use RetVoid |
3530 | // for void returns. |
3531 | RELEASE_ASSERT_NOT_REACHED(); |
3532 | break; |
3533 | case Int32: |
3534 | append(Move, immOrTmp(value), returnValueGPR); |
3535 | append(Ret32, returnValueGPR); |
3536 | break; |
3537 | case Int64: |
3538 | append(Move, immOrTmp(value), returnValueGPR); |
3539 | append(Ret64, returnValueGPR); |
3540 | break; |
3541 | case Float: |
3542 | append(MoveFloat, tmp(value), returnValueFPR); |
3543 | append(RetFloat, returnValueFPR); |
3544 | break; |
3545 | case Double: |
3546 | append(MoveDouble, tmp(value), returnValueFPR); |
3547 | append(RetDouble, returnValueFPR); |
3548 | break; |
3549 | } |
3550 | return; |
3551 | } |
3552 | |
3553 | case B3::Oops: { |
3554 | append(Air::Oops); |
3555 | return; |
3556 | } |
3557 | |
3558 | case B3::EntrySwitch: { |
3559 | append(Air::EntrySwitch); |
3560 | return; |
3561 | } |
3562 | |
3563 | case AtomicWeakCAS: |
3564 | case AtomicStrongCAS: { |
3565 | appendCAS(m_value, false); |
3566 | return; |
3567 | } |
3568 | |
3569 | case AtomicXchgAdd: { |
3570 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3571 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicAdd, atomic->accessWidth()))) |
3572 | return; |
3573 | |
3574 | Arg address = addr(atomic); |
3575 | Air::Opcode opcode = OPCODE_FOR_WIDTH(AtomicXchgAdd, atomic->accessWidth()); |
3576 | if (isValidForm(opcode, Arg::Tmp, address.kind())) { |
3577 | append(relaxedMoveForType(atomic->type()), tmp(atomic->child(0)), tmp(atomic)); |
3578 | append(opcode, tmp(atomic), address); |
3579 | return; |
3580 | } |
3581 | |
3582 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(Add, atomic->accessWidth()), Commutative); |
3583 | return; |
3584 | } |
3585 | |
3586 | case AtomicXchgSub: { |
3587 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3588 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicSub, atomic->accessWidth()))) |
3589 | return; |
3590 | |
3591 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(Sub, atomic->accessWidth())); |
3592 | return; |
3593 | } |
3594 | |
3595 | case AtomicXchgAnd: { |
3596 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3597 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicAnd, atomic->accessWidth()))) |
3598 | return; |
3599 | |
3600 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(And, atomic->accessWidth()), Commutative); |
3601 | return; |
3602 | } |
3603 | |
3604 | case AtomicXchgOr: { |
3605 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3606 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicOr, atomic->accessWidth()))) |
3607 | return; |
3608 | |
3609 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(Or, atomic->accessWidth()), Commutative); |
3610 | return; |
3611 | } |
3612 | |
3613 | case AtomicXchgXor: { |
3614 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3615 | if (appendVoidAtomic(OPCODE_FOR_WIDTH(AtomicXor, atomic->accessWidth()))) |
3616 | return; |
3617 | |
3618 | appendGeneralAtomic(OPCODE_FOR_CANONICAL_WIDTH(Xor, atomic->accessWidth()), Commutative); |
3619 | return; |
3620 | } |
3621 | |
3622 | case AtomicXchg: { |
3623 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
3624 | |
3625 | Arg address = addr(atomic); |
3626 | Air::Opcode opcode = OPCODE_FOR_WIDTH(AtomicXchg, atomic->accessWidth()); |
3627 | if (isValidForm(opcode, Arg::Tmp, address.kind())) { |
3628 | append(relaxedMoveForType(atomic->type()), tmp(atomic->child(0)), tmp(atomic)); |
3629 | append(opcode, tmp(atomic), address); |
3630 | return; |
3631 | } |
3632 | |
3633 | appendGeneralAtomic(Air::Nop); |
3634 | return; |
3635 | } |
3636 | |
3637 | default: |
3638 | break; |
3639 | } |
3640 | |
3641 | dataLog("FATAL: could not lower " , deepDump(m_procedure, m_value), "\n" ); |
3642 | RELEASE_ASSERT_NOT_REACHED(); |
3643 | } |
3644 | |
3645 | IndexSet<Value*> m_locked; // These are values that will have no Tmp in Air. |
3646 | IndexMap<Value*, Tmp> m_valueToTmp; // These are values that must have a Tmp in Air. We say that a Value* with a non-null Tmp is "pinned". |
3647 | IndexMap<Value*, Tmp> m_phiToTmp; // Each Phi gets its own Tmp. |
3648 | HashMap<Value*, Vector<Tmp>> m_tupleValueToTmps; // This is the same as m_valueToTmp for Values that are Tuples. |
3649 | HashMap<Value*, Vector<Tmp>> m_tuplePhiToTmps; // This is the same as m_phiToTmp for Phis that are Tuples. |
3650 | IndexMap<B3::BasicBlock*, Air::BasicBlock*> m_blockToBlock; |
3651 | HashMap<B3::StackSlot*, Air::StackSlot*> m_stackToStack; |
3652 | HashMap<Variable*, Vector<Tmp>> m_variableToTmps; |
3653 | |
3654 | UseCounts m_useCounts; |
3655 | PhiChildren m_phiChildren; |
3656 | BlockWorklist m_fastWorklist; |
3657 | Dominators& m_dominators; |
3658 | |
3659 | Vector<Vector<Inst, 4>> m_insts; |
3660 | Vector<Inst> m_prologue; |
3661 | |
3662 | B3::BasicBlock* m_block; |
3663 | bool m_isRare; |
3664 | unsigned m_index; |
3665 | Value* m_value; |
3666 | |
3667 | PatchpointSpecial* m_patchpointSpecial { nullptr }; |
3668 | HashMap<CheckSpecial::Key, CheckSpecial*> m_checkSpecials; |
3669 | |
3670 | Procedure& m_procedure; |
3671 | Code& m_code; |
3672 | |
3673 | Air::BlockInsertionSet m_blockInsertionSet; |
3674 | |
3675 | Tmp m_eax; |
3676 | Tmp m_ecx; |
3677 | Tmp m_edx; |
3678 | }; |
3679 | |
3680 | } // anonymous namespace |
3681 | |
3682 | void lowerToAir(Procedure& procedure) |
3683 | { |
3684 | PhaseScope phaseScope(procedure, "lowerToAir" ); |
3685 | LowerToAir lowerToAir(procedure); |
3686 | lowerToAir.run(); |
3687 | } |
3688 | |
3689 | } } // namespace JSC::B3 |
3690 | |
3691 | #if ASSERT_DISABLED |
3692 | IGNORE_RETURN_TYPE_WARNINGS_END |
3693 | #endif |
3694 | |
3695 | #endif // ENABLE(B3_JIT) |
3696 | |