1 | /* |
2 | * Copyright (C) 2016-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "B3EliminateCommonSubexpressions.h" |
28 | |
29 | #if ENABLE(B3_JIT) |
30 | |
31 | #include "B3BasicBlockInlines.h" |
32 | #include "B3BlockWorklist.h" |
33 | #include "B3Dominators.h" |
34 | #include "B3HeapRange.h" |
35 | #include "B3InsertionSetInlines.h" |
36 | #include "B3MemoryValue.h" |
37 | #include "B3PhaseScope.h" |
38 | #include "B3ProcedureInlines.h" |
39 | #include "B3PureCSE.h" |
40 | #include "B3SlotBaseValue.h" |
41 | #include "B3StackSlot.h" |
42 | #include "B3ValueKey.h" |
43 | #include "B3ValueInlines.h" |
44 | #include "B3Variable.h" |
45 | #include "B3VariableValue.h" |
46 | #include "DFGGraph.h" |
47 | #include <wtf/CommaPrinter.h> |
48 | #include <wtf/HashMap.h> |
49 | #include <wtf/ListDump.h> |
50 | #include <wtf/RangeSet.h> |
51 | |
52 | namespace JSC { namespace B3 { |
53 | |
54 | namespace { |
55 | |
56 | namespace B3EliminateCommonSubexpressionsInternal { |
57 | static constexpr bool verbose = false; |
58 | } |
59 | |
60 | // FIXME: We could treat Patchpoints with a non-empty set of reads as a "memory value" and somehow |
61 | // eliminate redundant ones. We would need some way of determining if two patchpoints are replacable. |
62 | // It doesn't seem right to use the reads set for this. We could use the generator, but that feels |
63 | // lame because the FTL will pretty much use a unique generator for each patchpoint even when two |
64 | // patchpoints have the same semantics as far as CSE would be concerned. We could invent something |
65 | // like a "value ID" for patchpoints. By default, each one gets a unique value ID, but FTL could force |
66 | // some patchpoints to share the same one as a signal that they will return the same value if executed |
67 | // in the same heap with the same inputs. |
68 | |
69 | typedef Vector<MemoryValue*, 1> MemoryMatches; |
70 | |
71 | class MemoryValueMap { |
72 | public: |
73 | MemoryValueMap() { } |
74 | |
75 | void add(MemoryValue* memory) |
76 | { |
77 | Matches& matches = m_map.add(memory->lastChild(), Matches()).iterator->value; |
78 | if (matches.contains(memory)) |
79 | return; |
80 | matches.append(memory); |
81 | } |
82 | |
83 | template<typename Functor> |
84 | void removeIf(const Functor& functor) |
85 | { |
86 | m_map.removeIf( |
87 | [&] (HashMap<Value*, Matches>::KeyValuePairType& entry) -> bool { |
88 | entry.value.removeAllMatching( |
89 | [&] (Value* value) -> bool { |
90 | if (MemoryValue* memory = value->as<MemoryValue>()) |
91 | return functor(memory); |
92 | return true; |
93 | }); |
94 | return entry.value.isEmpty(); |
95 | }); |
96 | } |
97 | |
98 | Matches* find(Value* ptr) |
99 | { |
100 | auto iter = m_map.find(ptr); |
101 | if (iter == m_map.end()) |
102 | return nullptr; |
103 | return &iter->value; |
104 | } |
105 | |
106 | template<typename Functor> |
107 | MemoryValue* find(Value* ptr, const Functor& functor) |
108 | { |
109 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
110 | dataLog(" Looking for " , pointerDump(ptr), " in " , *this, "\n" ); |
111 | if (Matches* matches = find(ptr)) { |
112 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
113 | dataLog(" Matches: " , pointerListDump(*matches), "\n" ); |
114 | for (Value* candidateValue : *matches) { |
115 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
116 | dataLog(" Having candidate: " , pointerDump(candidateValue), "\n" ); |
117 | if (MemoryValue* candidateMemory = candidateValue->as<MemoryValue>()) { |
118 | if (functor(candidateMemory)) |
119 | return candidateMemory; |
120 | } |
121 | } |
122 | } |
123 | return nullptr; |
124 | } |
125 | |
126 | void dump(PrintStream& out) const |
127 | { |
128 | out.print("{" ); |
129 | CommaPrinter comma; |
130 | for (auto& entry : m_map) |
131 | out.print(comma, pointerDump(entry.key), "=>" , pointerListDump(entry.value)); |
132 | out.print("}" ); |
133 | } |
134 | |
135 | private: |
136 | // This uses Matches for two reasons: |
137 | // - It cannot be a MemoryValue* because the key is imprecise. Many MemoryValues could have the |
138 | // same key while being unaliased. |
139 | // - It can't be a MemoryMatches array because the MemoryValue*'s could be turned into Identity's. |
140 | HashMap<Value*, Matches> m_map; |
141 | }; |
142 | |
143 | struct ImpureBlockData { |
144 | void dump(PrintStream& out) const |
145 | { |
146 | out.print( |
147 | "{reads = " , reads, ", writes = " , writes, ", storesAtHead = " , storesAtHead, |
148 | ", memoryValuesAtTail = " , memoryValuesAtTail, "}" ); |
149 | } |
150 | |
151 | RangeSet<HeapRange> reads; // This only gets used for forward store elimination. |
152 | RangeSet<HeapRange> writes; // This gets used for both load and store elimination. |
153 | bool fence; |
154 | |
155 | MemoryValueMap storesAtHead; |
156 | MemoryValueMap memoryValuesAtTail; |
157 | }; |
158 | |
159 | class CSE { |
160 | public: |
161 | CSE(Procedure& proc) |
162 | : m_proc(proc) |
163 | , m_dominators(proc.dominators()) |
164 | , m_impureBlockData(proc.size()) |
165 | , m_insertionSet(proc) |
166 | { |
167 | } |
168 | |
169 | bool run() |
170 | { |
171 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
172 | dataLog("B3 before CSE:\n" , m_proc); |
173 | |
174 | m_proc.resetValueOwners(); |
175 | |
176 | // Summarize the impure effects of each block, and the impure values available at the end of |
177 | // each block. This doesn't edit code yet. |
178 | for (BasicBlock* block : m_proc) { |
179 | ImpureBlockData& data = m_impureBlockData[block]; |
180 | for (Value* value : *block) { |
181 | Effects effects = value->effects(); |
182 | MemoryValue* memory = value->as<MemoryValue>(); |
183 | |
184 | if (memory && memory->isStore() |
185 | && !data.reads.overlaps(memory->range()) |
186 | && !data.writes.overlaps(memory->range()) |
187 | && (!data.fence || !memory->hasFence())) |
188 | data.storesAtHead.add(memory); |
189 | data.reads.add(effects.reads); |
190 | |
191 | if (HeapRange writes = effects.writes) |
192 | clobber(data, writes); |
193 | data.fence |= effects.fence; |
194 | |
195 | if (memory) |
196 | data.memoryValuesAtTail.add(memory); |
197 | } |
198 | |
199 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
200 | dataLog("Block " , *block, ": " , data, "\n" ); |
201 | } |
202 | |
203 | // Perform CSE. This edits code. |
204 | Vector<BasicBlock*> postOrder = m_proc.blocksInPostOrder(); |
205 | for (unsigned i = postOrder.size(); i--;) { |
206 | m_block = postOrder[i]; |
207 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
208 | dataLog("Looking at " , *m_block, ":\n" ); |
209 | |
210 | m_data = ImpureBlockData(); |
211 | for (m_index = 0; m_index < m_block->size(); ++m_index) { |
212 | m_value = m_block->at(m_index); |
213 | process(); |
214 | } |
215 | m_insertionSet.execute(m_block); |
216 | m_impureBlockData[m_block] = m_data; |
217 | } |
218 | |
219 | // The previous pass might have requested that we insert code in some basic block other than |
220 | // the one that it was looking at. This inserts them. |
221 | for (BasicBlock* block : m_proc) { |
222 | for (unsigned valueIndex = 0; valueIndex < block->size(); ++valueIndex) { |
223 | auto iter = m_sets.find(block->at(valueIndex)); |
224 | if (iter == m_sets.end()) |
225 | continue; |
226 | |
227 | for (Value* value : iter->value) |
228 | m_insertionSet.insertValue(valueIndex + 1, value); |
229 | } |
230 | m_insertionSet.execute(block); |
231 | } |
232 | |
233 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
234 | dataLog("B3 after CSE:\n" , m_proc); |
235 | |
236 | return m_changed; |
237 | } |
238 | |
239 | private: |
240 | void process() |
241 | { |
242 | m_value->performSubstitution(); |
243 | |
244 | if (m_pureCSE.process(m_value, m_dominators)) { |
245 | ASSERT(!m_value->effects().writes); |
246 | m_changed = true; |
247 | return; |
248 | } |
249 | |
250 | MemoryValue* memory = m_value->as<MemoryValue>(); |
251 | if (memory && processMemoryBeforeClobber(memory)) |
252 | return; |
253 | |
254 | if (HeapRange writes = m_value->effects().writes) |
255 | clobber(m_data, writes); |
256 | |
257 | if (memory) |
258 | processMemoryAfterClobber(memory); |
259 | } |
260 | |
261 | // Return true if we got rid of the operation. If you changed IR in this function, you have to |
262 | // set m_changed even if you also return true. |
263 | bool processMemoryBeforeClobber(MemoryValue* memory) |
264 | { |
265 | Value* value = memory->child(0); |
266 | Value* ptr = memory->lastChild(); |
267 | HeapRange range = memory->range(); |
268 | Value::OffsetType offset = memory->offset(); |
269 | |
270 | switch (memory->opcode()) { |
271 | case Store8: |
272 | return handleStoreBeforeClobber( |
273 | ptr, range, |
274 | [&] (MemoryValue* candidate) -> bool { |
275 | return candidate->offset() == offset |
276 | && ((candidate->opcode() == Store8 && candidate->child(0) == value) |
277 | || ((candidate->opcode() == Load8Z || candidate->opcode() == Load8S) |
278 | && candidate == value)); |
279 | }); |
280 | case Store16: |
281 | return handleStoreBeforeClobber( |
282 | ptr, range, |
283 | [&] (MemoryValue* candidate) -> bool { |
284 | return candidate->offset() == offset |
285 | && ((candidate->opcode() == Store16 && candidate->child(0) == value) |
286 | || ((candidate->opcode() == Load16Z || candidate->opcode() == Load16S) |
287 | && candidate == value)); |
288 | }); |
289 | case Store: |
290 | return handleStoreBeforeClobber( |
291 | ptr, range, |
292 | [&] (MemoryValue* candidate) -> bool { |
293 | return candidate->offset() == offset |
294 | && ((candidate->opcode() == Store && candidate->child(0) == value) |
295 | || (candidate->opcode() == Load && candidate == value)); |
296 | }); |
297 | default: |
298 | return false; |
299 | } |
300 | } |
301 | |
302 | void clobber(ImpureBlockData& data, HeapRange writes) |
303 | { |
304 | data.writes.add(writes); |
305 | |
306 | data.memoryValuesAtTail.removeIf( |
307 | [&] (MemoryValue* memory) { |
308 | return memory->range().overlaps(writes); |
309 | }); |
310 | } |
311 | |
312 | void processMemoryAfterClobber(MemoryValue* memory) |
313 | { |
314 | Value* ptr = memory->lastChild(); |
315 | HeapRange range = memory->range(); |
316 | Value::OffsetType offset = memory->offset(); |
317 | Type type = memory->type(); |
318 | |
319 | // FIXME: Empower this to insert more casts and shifts. For example, a Load8 could match a |
320 | // Store and mask the result. You could even have: |
321 | // |
322 | // Store(@value, @ptr, offset = 0) |
323 | // Load8Z(@ptr, offset = 2) |
324 | // |
325 | // Which could be turned into something like this: |
326 | // |
327 | // Store(@value, @ptr, offset = 0) |
328 | // ZShr(@value, 16) |
329 | |
330 | switch (memory->opcode()) { |
331 | case Load8Z: { |
332 | handleMemoryValue( |
333 | ptr, range, |
334 | [&] (MemoryValue* candidate) -> bool { |
335 | return candidate->offset() == offset |
336 | && (candidate->opcode() == Load8Z || candidate->opcode() == Store8); |
337 | }, |
338 | [&] (MemoryValue* match, Vector<Value*>& fixups) -> Value* { |
339 | if (match->opcode() == Store8) { |
340 | Value* mask = m_proc.add<Const32Value>(m_value->origin(), 0xff); |
341 | fixups.append(mask); |
342 | Value* zext = m_proc.add<Value>( |
343 | BitAnd, m_value->origin(), match->child(0), mask); |
344 | fixups.append(zext); |
345 | return zext; |
346 | } |
347 | return nullptr; |
348 | }); |
349 | break; |
350 | } |
351 | |
352 | case Load8S: { |
353 | handleMemoryValue( |
354 | ptr, range, |
355 | [&] (MemoryValue* candidate) -> bool { |
356 | return candidate->offset() == offset |
357 | && (candidate->opcode() == Load8S || candidate->opcode() == Store8); |
358 | }, |
359 | [&] (MemoryValue* match, Vector<Value*>& fixups) -> Value* { |
360 | if (match->opcode() == Store8) { |
361 | Value* sext = m_proc.add<Value>( |
362 | SExt8, m_value->origin(), match->child(0)); |
363 | fixups.append(sext); |
364 | return sext; |
365 | } |
366 | return nullptr; |
367 | }); |
368 | break; |
369 | } |
370 | |
371 | case Load16Z: { |
372 | handleMemoryValue( |
373 | ptr, range, |
374 | [&] (MemoryValue* candidate) -> bool { |
375 | return candidate->offset() == offset |
376 | && (candidate->opcode() == Load16Z || candidate->opcode() == Store16); |
377 | }, |
378 | [&] (MemoryValue* match, Vector<Value*>& fixups) -> Value* { |
379 | if (match->opcode() == Store16) { |
380 | Value* mask = m_proc.add<Const32Value>(m_value->origin(), 0xffff); |
381 | fixups.append(mask); |
382 | Value* zext = m_proc.add<Value>( |
383 | BitAnd, m_value->origin(), match->child(0), mask); |
384 | fixups.append(zext); |
385 | return zext; |
386 | } |
387 | return nullptr; |
388 | }); |
389 | break; |
390 | } |
391 | |
392 | case Load16S: { |
393 | handleMemoryValue( |
394 | ptr, range, [&] (MemoryValue* candidate) -> bool { |
395 | return candidate->offset() == offset |
396 | && (candidate->opcode() == Load16S || candidate->opcode() == Store16); |
397 | }, |
398 | [&] (MemoryValue* match, Vector<Value*>& fixups) -> Value* { |
399 | if (match->opcode() == Store16) { |
400 | Value* sext = m_proc.add<Value>( |
401 | SExt16, m_value->origin(), match->child(0)); |
402 | fixups.append(sext); |
403 | return sext; |
404 | } |
405 | return nullptr; |
406 | }); |
407 | break; |
408 | } |
409 | |
410 | case Load: { |
411 | handleMemoryValue( |
412 | ptr, range, |
413 | [&] (MemoryValue* candidate) -> bool { |
414 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
415 | dataLog(" Consdering " , pointerDump(candidate), "\n" ); |
416 | if (candidate->offset() != offset) |
417 | return false; |
418 | |
419 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
420 | dataLog(" offset ok.\n" ); |
421 | |
422 | if (candidate->opcode() == Load && candidate->type() == type) |
423 | return true; |
424 | |
425 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
426 | dataLog(" not a load with ok type.\n" ); |
427 | |
428 | if (candidate->opcode() == Store && candidate->child(0)->type() == type) |
429 | return true; |
430 | |
431 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
432 | dataLog(" not a store with ok type.\n" ); |
433 | |
434 | return false; |
435 | }); |
436 | break; |
437 | } |
438 | |
439 | case Store8: { |
440 | handleStoreAfterClobber( |
441 | ptr, range, |
442 | [&] (MemoryValue* candidate) -> bool { |
443 | return candidate->opcode() == Store8 |
444 | && candidate->offset() == offset; |
445 | }); |
446 | break; |
447 | } |
448 | |
449 | case Store16: { |
450 | handleStoreAfterClobber( |
451 | ptr, range, |
452 | [&] (MemoryValue* candidate) -> bool { |
453 | return candidate->opcode() == Store16 |
454 | && candidate->offset() == offset; |
455 | }); |
456 | break; |
457 | } |
458 | |
459 | case Store: { |
460 | handleStoreAfterClobber( |
461 | ptr, range, |
462 | [&] (MemoryValue* candidate) -> bool { |
463 | return candidate->opcode() == Store |
464 | && candidate->offset() == offset; |
465 | }); |
466 | break; |
467 | } |
468 | |
469 | default: |
470 | break; |
471 | } |
472 | } |
473 | |
474 | template<typename Filter> |
475 | bool handleStoreBeforeClobber(Value* ptr, HeapRange range, const Filter& filter) |
476 | { |
477 | MemoryMatches matches = findMemoryValue(ptr, range, filter); |
478 | if (matches.isEmpty()) |
479 | return false; |
480 | |
481 | m_value->replaceWithNop(); |
482 | m_changed = true; |
483 | return true; |
484 | } |
485 | |
486 | template<typename Filter> |
487 | void handleStoreAfterClobber(Value* ptr, HeapRange range, const Filter& filter) |
488 | { |
489 | if (!m_value->traps() && findStoreAfterClobber(ptr, range, filter)) { |
490 | m_value->replaceWithNop(); |
491 | m_changed = true; |
492 | return; |
493 | } |
494 | |
495 | m_data.memoryValuesAtTail.add(m_value->as<MemoryValue>()); |
496 | } |
497 | |
498 | template<typename Filter> |
499 | bool findStoreAfterClobber(Value* ptr, HeapRange range, const Filter& filter) |
500 | { |
501 | if (m_value->as<MemoryValue>()->hasFence()) |
502 | return false; |
503 | |
504 | // We can eliminate a store if every forward path hits a store to the same location before |
505 | // hitting any operation that observes the store. This search seems like it should be |
506 | // expensive, but in the overwhelming majority of cases it will almost immediately hit an |
507 | // operation that interferes. |
508 | |
509 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
510 | dataLog(*m_value, ": looking forward for stores to " , *ptr, "...\n" ); |
511 | |
512 | // First search forward in this basic block. |
513 | // FIXME: It would be cool to get rid of this linear search. It's not super critical since |
514 | // we will probably bail out very quickly, but it *is* annoying. |
515 | for (unsigned index = m_index + 1; index < m_block->size(); ++index) { |
516 | Value* value = m_block->at(index); |
517 | |
518 | if (MemoryValue* memoryValue = value->as<MemoryValue>()) { |
519 | if (memoryValue->lastChild() == ptr && filter(memoryValue)) |
520 | return true; |
521 | } |
522 | |
523 | Effects effects = value->effects(); |
524 | if (effects.reads.overlaps(range) || effects.writes.overlaps(range)) |
525 | return false; |
526 | } |
527 | |
528 | if (!m_block->numSuccessors()) |
529 | return false; |
530 | |
531 | BlockWorklist worklist; |
532 | worklist.pushAll(m_block->successorBlocks()); |
533 | |
534 | while (BasicBlock* block = worklist.pop()) { |
535 | ImpureBlockData& data = m_impureBlockData[block]; |
536 | |
537 | MemoryValue* match = data.storesAtHead.find(ptr, filter); |
538 | if (match && match != m_value) |
539 | continue; |
540 | |
541 | if (data.writes.overlaps(range) || data.reads.overlaps(range)) |
542 | return false; |
543 | |
544 | if (!block->numSuccessors()) |
545 | return false; |
546 | |
547 | worklist.pushAll(block->successorBlocks()); |
548 | } |
549 | |
550 | return true; |
551 | } |
552 | |
553 | template<typename Filter> |
554 | void handleMemoryValue(Value* ptr, HeapRange range, const Filter& filter) |
555 | { |
556 | handleMemoryValue( |
557 | ptr, range, filter, |
558 | [] (MemoryValue*, Vector<Value*>&) -> Value* { |
559 | return nullptr; |
560 | }); |
561 | } |
562 | |
563 | template<typename Filter, typename Replace> |
564 | void handleMemoryValue( |
565 | Value* ptr, HeapRange range, const Filter& filter, const Replace& replace) |
566 | { |
567 | MemoryMatches matches = findMemoryValue(ptr, range, filter); |
568 | if (replaceMemoryValue(matches, replace)) |
569 | return; |
570 | m_data.memoryValuesAtTail.add(m_value->as<MemoryValue>()); |
571 | } |
572 | |
573 | template<typename Replace> |
574 | bool replaceMemoryValue(const MemoryMatches& matches, const Replace& replace) |
575 | { |
576 | if (matches.isEmpty()) |
577 | return false; |
578 | |
579 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
580 | dataLog("Eliminating " , *m_value, " due to " , pointerListDump(matches), "\n" ); |
581 | |
582 | m_changed = true; |
583 | |
584 | if (matches.size() == 1) { |
585 | MemoryValue* dominatingMatch = matches[0]; |
586 | RELEASE_ASSERT(m_dominators.dominates(dominatingMatch->owner, m_block)); |
587 | |
588 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
589 | dataLog(" Eliminating using " , *dominatingMatch, "\n" ); |
590 | Vector<Value*> ; |
591 | if (Value* value = replace(dominatingMatch, extraValues)) { |
592 | for (Value* : extraValues) |
593 | m_insertionSet.insertValue(m_index, extraValue); |
594 | m_value->replaceWithIdentity(value); |
595 | } else { |
596 | if (dominatingMatch->isStore()) |
597 | m_value->replaceWithIdentity(dominatingMatch->child(0)); |
598 | else |
599 | m_value->replaceWithIdentity(dominatingMatch); |
600 | } |
601 | return true; |
602 | } |
603 | |
604 | // FIXME: It would be way better if this phase just did SSA calculation directly. |
605 | // Right now we're relying on the fact that CSE's position in the phase order is |
606 | // almost right before SSA fixup. |
607 | |
608 | Variable* variable = m_proc.addVariable(m_value->type()); |
609 | |
610 | VariableValue* get = m_insertionSet.insert<VariableValue>( |
611 | m_index, Get, m_value->origin(), variable); |
612 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
613 | dataLog(" Inserting get of value: " , *get, "\n" ); |
614 | m_value->replaceWithIdentity(get); |
615 | |
616 | for (MemoryValue* match : matches) { |
617 | Vector<Value*>& sets = m_sets.add(match, Vector<Value*>()).iterator->value; |
618 | |
619 | Value* value = replace(match, sets); |
620 | if (!value) { |
621 | if (match->isStore()) |
622 | value = match->child(0); |
623 | else |
624 | value = match; |
625 | } |
626 | |
627 | Value* set = m_proc.add<VariableValue>(Set, m_value->origin(), variable, value); |
628 | sets.append(set); |
629 | } |
630 | |
631 | return true; |
632 | } |
633 | |
634 | template<typename Filter> |
635 | MemoryMatches findMemoryValue(Value* ptr, HeapRange range, const Filter& filter) |
636 | { |
637 | if (B3EliminateCommonSubexpressionsInternal::verbose) { |
638 | dataLog(*m_value, ": looking backward for " , *ptr, "...\n" ); |
639 | dataLog(" Full value: " , deepDump(m_value), "\n" ); |
640 | } |
641 | |
642 | if (m_value->as<MemoryValue>()->hasFence()) { |
643 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
644 | dataLog(" Giving up because fences.\n" ); |
645 | return { }; |
646 | } |
647 | |
648 | if (MemoryValue* match = m_data.memoryValuesAtTail.find(ptr, filter)) { |
649 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
650 | dataLog(" Found " , *match, " locally.\n" ); |
651 | return { match }; |
652 | } |
653 | |
654 | if (m_data.writes.overlaps(range)) { |
655 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
656 | dataLog(" Giving up because of writes.\n" ); |
657 | return { }; |
658 | } |
659 | |
660 | BlockWorklist worklist; |
661 | worklist.pushAll(m_block->predecessors()); |
662 | |
663 | MemoryMatches matches; |
664 | |
665 | while (BasicBlock* block = worklist.pop()) { |
666 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
667 | dataLog(" Looking at " , *block, "\n" ); |
668 | |
669 | ImpureBlockData& data = m_impureBlockData[block]; |
670 | |
671 | MemoryValue* match = data.memoryValuesAtTail.find(ptr, filter); |
672 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
673 | dataLog(" Consdering match: " , pointerDump(match), "\n" ); |
674 | if (match && match != m_value) { |
675 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
676 | dataLog(" Found match: " , *match, "\n" ); |
677 | matches.append(match); |
678 | continue; |
679 | } |
680 | |
681 | if (data.writes.overlaps(range)) { |
682 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
683 | dataLog(" Giving up because of writes.\n" ); |
684 | return { }; |
685 | } |
686 | |
687 | if (!block->numPredecessors()) { |
688 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
689 | dataLog(" Giving up because it's live at root.\n" ); |
690 | // This essentially proves that this is live at the prologue. That means that we |
691 | // cannot reliably optimize this case. |
692 | return { }; |
693 | } |
694 | |
695 | worklist.pushAll(block->predecessors()); |
696 | } |
697 | |
698 | if (B3EliminateCommonSubexpressionsInternal::verbose) |
699 | dataLog(" Got matches: " , pointerListDump(matches), "\n" ); |
700 | return matches; |
701 | } |
702 | |
703 | Procedure& m_proc; |
704 | |
705 | Dominators& m_dominators; |
706 | PureCSE m_pureCSE; |
707 | |
708 | IndexMap<BasicBlock*, ImpureBlockData> m_impureBlockData; |
709 | |
710 | ImpureBlockData m_data; |
711 | |
712 | BasicBlock* m_block; |
713 | unsigned m_index; |
714 | Value* m_value; |
715 | |
716 | HashMap<Value*, Vector<Value*>> m_sets; |
717 | |
718 | InsertionSet m_insertionSet; |
719 | |
720 | bool m_changed { false }; |
721 | }; |
722 | |
723 | } // anonymous namespace |
724 | |
725 | bool eliminateCommonSubexpressions(Procedure& proc) |
726 | { |
727 | PhaseScope phaseScope(proc, "eliminateCommonSubexpressions" ); |
728 | |
729 | CSE cse(proc); |
730 | return cse.run(); |
731 | } |
732 | |
733 | } } // namespace JSC::B3 |
734 | |
735 | #endif // ENABLE(B3_JIT) |
736 | |
737 | |