1/*
2 * Copyright (C) 2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(JIT)
29
30#include "CachedRecovery.h"
31#include "CallFrameShuffleData.h"
32#include "MacroAssembler.h"
33#include "RegisterSet.h"
34#include <wtf/Vector.h>
35
36namespace JSC {
37
38class CallFrameShuffler {
39 WTF_MAKE_FAST_ALLOCATED;
40public:
41 CallFrameShuffler(CCallHelpers&, const CallFrameShuffleData&);
42
43 void dump(PrintStream&) const;
44
45 // Any register that has been locked or acquired must be released
46 // before calling prepareForTailCall() or prepareForSlowPath().
47 void lockGPR(GPRReg gpr)
48 {
49 ASSERT(!m_lockedRegisters.get(gpr));
50 m_lockedRegisters.set(gpr);
51 if (verbose)
52 dataLog(" * Locking ", gpr, "\n");
53 }
54
55 GPRReg acquireGPR()
56 {
57 ensureGPR();
58 GPRReg gpr { getFreeGPR() };
59 ASSERT(!m_registers[gpr]);
60 lockGPR(gpr);
61 return gpr;
62 }
63
64 void releaseGPR(GPRReg gpr)
65 {
66 if (verbose) {
67 if (m_lockedRegisters.get(gpr))
68 dataLog(" * Releasing ", gpr, "\n");
69 else
70 dataLog(" * ", gpr, " was not locked\n");
71 }
72 m_lockedRegisters.clear(gpr);
73 }
74
75 void restoreGPR(GPRReg gpr)
76 {
77 if (!m_newRegisters[gpr])
78 return;
79
80 ensureGPR();
81#if USE(JSVALUE32_64)
82 GPRReg tempGPR { getFreeGPR() };
83 lockGPR(tempGPR);
84 ensureGPR();
85 releaseGPR(tempGPR);
86#endif
87 emitDisplace(*m_newRegisters[gpr]);
88 }
89
90 // You can only take a snapshot if the recovery has not started
91 // yet. The only operations that are valid before taking a
92 // snapshot are lockGPR(), acquireGPR() and releaseGPR().
93 //
94 // Locking status is *NOT* preserved by the snapshot: it only
95 // contains information about where the
96 // arguments/callee/callee-save registers are by taking into
97 // account any spilling that acquireGPR() could have done.
98 CallFrameShuffleData snapshot() const
99 {
100 ASSERT(isUndecided());
101
102 CallFrameShuffleData data;
103 data.numLocals = numLocals();
104 data.numPassedArgs = m_numPassedArgs;
105 data.callee = getNew(VirtualRegister { CallFrameSlot::callee })->recovery();
106 data.args.resize(argCount());
107 for (size_t i = 0; i < argCount(); ++i)
108 data.args[i] = getNew(virtualRegisterForArgument(i))->recovery();
109 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
110 CachedRecovery* cachedRecovery { m_newRegisters[reg] };
111 if (!cachedRecovery)
112 continue;
113
114#if USE(JSVALUE64)
115 data.registers[reg] = cachedRecovery->recovery();
116#else
117 RELEASE_ASSERT_NOT_REACHED();
118#endif
119 }
120 return data;
121 }
122
123 // Ask the shuffler to put the callee into some registers once the
124 // shuffling is done. You should call this before any of the
125 // prepare() methods, and must not take a snapshot afterwards, as
126 // this would crash 32bits platforms.
127 void setCalleeJSValueRegs(JSValueRegs jsValueRegs)
128 {
129 ASSERT(isUndecided());
130 ASSERT(!getNew(jsValueRegs));
131 CachedRecovery* cachedRecovery { getNew(VirtualRegister(CallFrameSlot::callee)) };
132 ASSERT(cachedRecovery);
133 addNew(jsValueRegs, cachedRecovery->recovery());
134 }
135
136 // Ask the suhffler to assume the callee has already be checked to
137 // be a cell. This is a no-op on 64bit platforms, but allows to
138 // free up a GPR on 32bit platforms.
139 // You obviously must have ensured that this is the case before
140 // running any of the prepare methods.
141 void assumeCalleeIsCell()
142 {
143#if USE(JSVALUE32_64)
144 CachedRecovery& calleeCachedRecovery = *getNew(VirtualRegister(CallFrameSlot::callee));
145 switch (calleeCachedRecovery.recovery().technique()) {
146 case InPair:
147 updateRecovery(
148 calleeCachedRecovery,
149 ValueRecovery::inGPR(
150 calleeCachedRecovery.recovery().payloadGPR(),
151 DataFormatCell));
152 break;
153 case DisplacedInJSStack:
154 updateRecovery(
155 calleeCachedRecovery,
156 ValueRecovery::displacedInJSStack(
157 calleeCachedRecovery.recovery().virtualRegister(),
158 DataFormatCell));
159 break;
160 case InFPR:
161 case UnboxedCellInGPR:
162 case CellDisplacedInJSStack:
163 break;
164 case Constant:
165 ASSERT(calleeCachedRecovery.recovery().constant().isCell());
166 break;
167 default:
168 RELEASE_ASSERT_NOT_REACHED();
169 break;
170 }
171#endif
172 }
173
174 // This will emit code to build the new frame over the old one.
175 void prepareForTailCall();
176
177 // This will emit code to build the new frame as if performing a
178 // regular call. However, the callee save registers will be
179 // restored, and any locals (not the header or arguments) of the
180 // current frame can be overwritten.
181 //
182 // A frame built using prepareForSlowPath() should be used either
183 // to throw an exception in, or destroyed using
184 // CCallHelpers::prepareForTailCallSlow() followed by a tail call.
185 void prepareForSlowPath();
186
187private:
188 static const bool verbose = false;
189
190 CCallHelpers& m_jit;
191
192 void prepareAny();
193
194 void spill(CachedRecovery&);
195
196 // "box" is arguably a bad name here. The meaning is that after
197 // calling emitBox(), your ensure that subsequently calling
198 // emitStore() will be able to store the value without additional
199 // transformation. In particular, this is a no-op for constants,
200 // and is a complete no-op on 32bits since any unboxed value can
201 // still be stored by storing the payload and a statically known
202 // tag.
203 void emitBox(CachedRecovery&);
204
205 bool canBox(CachedRecovery& cachedRecovery)
206 {
207 if (cachedRecovery.boxingRequiresGPR() && getFreeGPR() == InvalidGPRReg)
208 return false;
209
210 if (cachedRecovery.boxingRequiresFPR() && getFreeFPR() == InvalidFPRReg)
211 return false;
212
213 return true;
214 }
215
216 void ensureBox(CachedRecovery& cachedRecovery)
217 {
218 if (canBox(cachedRecovery))
219 return;
220
221 if (cachedRecovery.boxingRequiresGPR())
222 ensureGPR();
223
224 if (cachedRecovery.boxingRequiresFPR())
225 ensureFPR();
226 }
227
228 void emitLoad(CachedRecovery&);
229
230 bool canLoad(CachedRecovery&);
231
232 void ensureLoad(CachedRecovery& cachedRecovery)
233 {
234 if (canLoad(cachedRecovery))
235 return;
236
237 ASSERT(cachedRecovery.loadsIntoGPR() || cachedRecovery.loadsIntoFPR());
238
239 if (cachedRecovery.loadsIntoFPR()) {
240 if (cachedRecovery.loadsIntoGPR())
241 ensureRegister();
242 else
243 ensureFPR();
244 } else
245 ensureGPR();
246 }
247
248 bool canLoadAndBox(CachedRecovery& cachedRecovery)
249 {
250 // We don't have interfering loads & boxes
251 ASSERT(!cachedRecovery.loadsIntoFPR() || !cachedRecovery.boxingRequiresFPR());
252 ASSERT(!cachedRecovery.loadsIntoGPR() || !cachedRecovery.boxingRequiresGPR());
253
254 return canLoad(cachedRecovery) && canBox(cachedRecovery);
255 }
256
257 DataFormat emitStore(CachedRecovery&, MacroAssembler::Address);
258
259 void emitDisplace(CachedRecovery&);
260
261 void emitDeltaCheck();
262
263 Bag<CachedRecovery> m_cachedRecoveries;
264
265 void updateRecovery(CachedRecovery& cachedRecovery, ValueRecovery recovery)
266 {
267 clearCachedRecovery(cachedRecovery.recovery());
268 cachedRecovery.setRecovery(recovery);
269 setCachedRecovery(recovery, &cachedRecovery);
270 }
271
272 CachedRecovery* getCachedRecovery(ValueRecovery);
273
274 CachedRecovery* setCachedRecovery(ValueRecovery, CachedRecovery*);
275
276 void clearCachedRecovery(ValueRecovery recovery)
277 {
278 if (!recovery.isConstant())
279 setCachedRecovery(recovery, nullptr);
280 }
281
282 CachedRecovery* addCachedRecovery(ValueRecovery recovery)
283 {
284 if (recovery.isConstant())
285 return m_cachedRecoveries.add(recovery);
286 CachedRecovery* cachedRecovery = getCachedRecovery(recovery);
287 if (!cachedRecovery)
288 return setCachedRecovery(recovery, m_cachedRecoveries.add(recovery));
289 return cachedRecovery;
290 }
291
292 // This is the current recoveries present in the old frame's
293 // slots. A null CachedRecovery means we can trash the current
294 // value as we don't care about it.
295 Vector<CachedRecovery*> m_oldFrame;
296
297 int numLocals() const
298 {
299 return m_oldFrame.size() - CallerFrameAndPC::sizeInRegisters;
300 }
301
302 CachedRecovery* getOld(VirtualRegister reg) const
303 {
304 return m_oldFrame[CallerFrameAndPC::sizeInRegisters - reg.offset() - 1];
305 }
306
307 void setOld(VirtualRegister reg, CachedRecovery* cachedRecovery)
308 {
309 m_oldFrame[CallerFrameAndPC::sizeInRegisters - reg.offset() - 1] = cachedRecovery;
310 }
311
312 VirtualRegister firstOld() const
313 {
314 return VirtualRegister { static_cast<int>(-numLocals()) };
315 }
316
317 VirtualRegister lastOld() const
318 {
319 return VirtualRegister { CallerFrameAndPC::sizeInRegisters - 1 };
320 }
321
322 bool isValidOld(VirtualRegister reg) const
323 {
324 return reg >= firstOld() && reg <= lastOld();
325 }
326
327 bool m_didExtendFrame { false };
328
329 void extendFrameIfNeeded();
330
331 // This stores, for each slot in the new frame, information about
332 // the recovery for the value that should eventually go into that
333 // slot.
334 //
335 // Once the slot has been written, the corresponding entry in
336 // m_newFrame will be empty.
337 Vector<CachedRecovery*> m_newFrame;
338
339 size_t argCount() const
340 {
341 return m_newFrame.size() - CallFrame::headerSizeInRegisters;
342 }
343
344 CachedRecovery* getNew(VirtualRegister newRegister) const
345 {
346 return m_newFrame[newRegister.offset()];
347 }
348
349 void setNew(VirtualRegister newRegister, CachedRecovery* cachedRecovery)
350 {
351 m_newFrame[newRegister.offset()] = cachedRecovery;
352 }
353
354 void addNew(VirtualRegister newRegister, ValueRecovery recovery)
355 {
356 CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
357 cachedRecovery->addTarget(newRegister);
358 setNew(newRegister, cachedRecovery);
359 }
360
361 VirtualRegister firstNew() const
362 {
363 return VirtualRegister { 0 };
364 }
365
366 VirtualRegister lastNew() const
367 {
368 return VirtualRegister { static_cast<int>(m_newFrame.size()) - 1 };
369 }
370
371 bool isValidNew(VirtualRegister reg) const
372 {
373 return reg >= firstNew() && reg <= lastNew();
374 }
375
376
377 int m_alignedOldFrameSize;
378 int m_alignedNewFrameSize;
379
380 // This is the distance, in slots, between the base of the new
381 // frame and the base of the old frame. It could be negative when
382 // preparing for a tail call to a function with smaller argument
383 // count.
384 //
385 // We will overwrite this appropriately for slow path calls, but
386 // we initialize it as if doing a fast path for the spills we
387 // could do while undecided (typically while calling acquireGPR()
388 // for a polymorphic call).
389 int m_frameDelta;
390
391 VirtualRegister newAsOld(VirtualRegister reg) const
392 {
393 return reg - m_frameDelta;
394 }
395
396 // This stores the set of locked registers, i.e. registers for
397 // which we have an implicit requirement that they are not changed.
398 //
399 // This will usually contains the link register on architectures
400 // that have one, any scratch register used by the macro assembler
401 // (e.g. r11 on X86_64), as well as any register that we use for
402 // addressing (see m_oldFrameBase and m_newFrameBase).
403 //
404 // We also use this to lock registers temporarily, for instance to
405 // ensure that we have at least 2 available registers for loading
406 // a pair on 32bits.
407 mutable RegisterSet m_lockedRegisters;
408
409 // This stores the current recoveries present in registers. A null
410 // CachedRecovery means we can trash the current value as we don't
411 // care about it.
412 RegisterMap<CachedRecovery*> m_registers;
413
414#if USE(JSVALUE64)
415 mutable GPRReg m_tagTypeNumber;
416
417 bool tryAcquireTagTypeNumber();
418#endif
419
420 // This stores, for each register, information about the recovery
421 // for the value that should eventually go into that register. The
422 // only registers that have a target recovery will be callee-save
423 // registers, as well as possibly one JSValueRegs for holding the
424 // callee.
425 //
426 // Once the correct value has been put into the registers, and
427 // contrary to what we do with m_newFrame, we keep the entry in
428 // m_newRegisters to simplify spilling.
429 RegisterMap<CachedRecovery*> m_newRegisters;
430
431 template<typename CheckFunctor>
432 Reg getFreeRegister(const CheckFunctor& check) const
433 {
434 Reg nonTemp { };
435 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
436 if (m_lockedRegisters.get(reg))
437 continue;
438
439 if (!check(reg))
440 continue;
441
442 if (!m_registers[reg]) {
443 if (!m_newRegisters[reg])
444 return reg;
445 if (!nonTemp)
446 nonTemp = reg;
447 }
448 }
449
450#if USE(JSVALUE64)
451 if (!nonTemp && m_tagTypeNumber != InvalidGPRReg && check(Reg { m_tagTypeNumber })) {
452 ASSERT(m_lockedRegisters.get(m_tagTypeNumber));
453 m_lockedRegisters.clear(m_tagTypeNumber);
454 nonTemp = Reg { m_tagTypeNumber };
455 m_tagTypeNumber = InvalidGPRReg;
456 }
457#endif
458 return nonTemp;
459 }
460
461 GPRReg getFreeTempGPR() const
462 {
463 Reg freeTempGPR { getFreeRegister([this] (Reg reg) { return reg.isGPR() && !m_newRegisters[reg]; }) };
464 if (!freeTempGPR)
465 return InvalidGPRReg;
466 return freeTempGPR.gpr();
467 }
468
469 GPRReg getFreeGPR() const
470 {
471 Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) };
472 if (!freeGPR)
473 return InvalidGPRReg;
474 return freeGPR.gpr();
475 }
476
477 FPRReg getFreeFPR() const
478 {
479 Reg freeFPR { getFreeRegister([] (Reg reg) { return reg.isFPR(); }) };
480 if (!freeFPR)
481 return InvalidFPRReg;
482 return freeFPR.fpr();
483 }
484
485 bool hasFreeRegister() const
486 {
487 return static_cast<bool>(getFreeRegister([] (Reg) { return true; }));
488 }
489
490 // This frees up a register satisfying the check functor (this
491 // functor could theoretically have any kind of logic, but it must
492 // ensure that it will only return true for registers - spill
493 // assumes and asserts that it is passed a cachedRecovery stored in a
494 // register).
495 template<typename CheckFunctor>
496 void ensureRegister(const CheckFunctor& check)
497 {
498 // If we can spill a callee-save, that's best, because it will
499 // free up a register that would otherwise been taken for the
500 // longest amount of time.
501 //
502 // We could try to bias towards those that are not in their
503 // target registers yet, but the gain is probably super
504 // small. Unless you have a huge number of argument (at least
505 // around twice the number of available registers on your
506 // architecture), no spilling is going to take place anyways.
507 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
508 if (m_lockedRegisters.get(reg))
509 continue;
510
511 CachedRecovery* cachedRecovery { m_newRegisters[reg] };
512 if (!cachedRecovery)
513 continue;
514
515 if (check(*cachedRecovery)) {
516 if (verbose)
517 dataLog(" ", cachedRecovery->recovery(), " looks like a good spill candidate\n");
518 spill(*cachedRecovery);
519 return;
520 }
521 }
522
523 // We use the cachedRecovery associated with the first new slot we
524 // can, because that is the one for which a write will be
525 // possible the latest, i.e. that is the one that we would
526 // have had to retain in registers for the longest.
527 for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
528 CachedRecovery* cachedRecovery { getNew(reg) };
529 if (!cachedRecovery)
530 continue;
531
532 if (check(*cachedRecovery)) {
533 spill(*cachedRecovery);
534 return;
535 }
536 }
537
538 RELEASE_ASSERT_NOT_REACHED();
539 }
540
541 void ensureRegister()
542 {
543 if (hasFreeRegister())
544 return;
545
546 if (verbose)
547 dataLog(" Finding a register to spill\n");
548 ensureRegister(
549 [this] (const CachedRecovery& cachedRecovery) {
550 if (cachedRecovery.recovery().isInGPR())
551 return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
552 if (cachedRecovery.recovery().isInFPR())
553 return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
554#if USE(JSVALUE32_64)
555 if (cachedRecovery.recovery().technique() == InPair) {
556 return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
557 && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
558 }
559#endif
560 return false;
561 });
562 }
563
564 void ensureTempGPR()
565 {
566 if (getFreeTempGPR() != InvalidGPRReg)
567 return;
568
569 if (verbose)
570 dataLog(" Finding a temp GPR to spill\n");
571 ensureRegister(
572 [this] (const CachedRecovery& cachedRecovery) {
573 if (cachedRecovery.recovery().isInGPR()) {
574 return !m_lockedRegisters.get(cachedRecovery.recovery().gpr())
575 && !m_newRegisters[cachedRecovery.recovery().gpr()];
576 }
577#if USE(JSVALUE32_64)
578 if (cachedRecovery.recovery().technique() == InPair) {
579 return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
580 && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR())
581 && !m_newRegisters[cachedRecovery.recovery().tagGPR()]
582 && !m_newRegisters[cachedRecovery.recovery().payloadGPR()];
583 }
584#endif
585 return false;
586 });
587 }
588
589 void ensureGPR()
590 {
591 if (getFreeGPR() != InvalidGPRReg)
592 return;
593
594 if (verbose)
595 dataLog(" Finding a GPR to spill\n");
596 ensureRegister(
597 [this] (const CachedRecovery& cachedRecovery) {
598 if (cachedRecovery.recovery().isInGPR())
599 return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
600#if USE(JSVALUE32_64)
601 if (cachedRecovery.recovery().technique() == InPair) {
602 return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
603 && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
604 }
605#endif
606 return false;
607 });
608 }
609
610 void ensureFPR()
611 {
612 if (getFreeFPR() != InvalidFPRReg)
613 return;
614
615 if (verbose)
616 dataLog(" Finding an FPR to spill\n");
617 ensureRegister(
618 [this] (const CachedRecovery& cachedRecovery) {
619 if (cachedRecovery.recovery().isInFPR())
620 return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
621 return false;
622 });
623 }
624
625 CachedRecovery* getNew(JSValueRegs jsValueRegs) const
626 {
627#if USE(JSVALUE64)
628 return m_newRegisters[jsValueRegs.gpr()];
629#else
630 ASSERT(
631 jsValueRegs.tagGPR() == InvalidGPRReg || jsValueRegs.payloadGPR() == InvalidGPRReg
632 || m_newRegisters[jsValueRegs.payloadGPR()] == m_newRegisters[jsValueRegs.tagGPR()]);
633 if (jsValueRegs.payloadGPR() == InvalidGPRReg)
634 return m_newRegisters[jsValueRegs.tagGPR()];
635 return m_newRegisters[jsValueRegs.payloadGPR()];
636#endif
637 }
638
639 void addNew(JSValueRegs jsValueRegs, ValueRecovery recovery)
640 {
641 ASSERT(jsValueRegs && !getNew(jsValueRegs));
642 CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
643#if USE(JSVALUE64)
644 if (cachedRecovery->wantedJSValueRegs())
645 m_newRegisters[cachedRecovery->wantedJSValueRegs().gpr()] = nullptr;
646 m_newRegisters[jsValueRegs.gpr()] = cachedRecovery;
647#else
648 if (JSValueRegs oldRegs { cachedRecovery->wantedJSValueRegs() }) {
649 if (oldRegs.payloadGPR())
650 m_newRegisters[oldRegs.payloadGPR()] = nullptr;
651 if (oldRegs.tagGPR())
652 m_newRegisters[oldRegs.tagGPR()] = nullptr;
653 }
654 if (jsValueRegs.payloadGPR() != InvalidGPRReg)
655 m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery;
656 if (jsValueRegs.tagGPR() != InvalidGPRReg)
657 m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery;
658#endif
659 ASSERT(!cachedRecovery->wantedJSValueRegs());
660 cachedRecovery->setWantedJSValueRegs(jsValueRegs);
661 }
662
663 void addNew(FPRReg fpr, ValueRecovery recovery)
664 {
665 ASSERT(fpr != InvalidFPRReg && !m_newRegisters[fpr]);
666 CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
667 m_newRegisters[fpr] = cachedRecovery;
668 ASSERT(cachedRecovery->wantedFPR() == InvalidFPRReg);
669 cachedRecovery->setWantedFPR(fpr);
670 }
671
672 // m_oldFrameBase is the register relative to which we access
673 // slots in the old call frame, with an additional offset of
674 // m_oldFrameOffset.
675 //
676 // - For an actual tail call, m_oldFrameBase is the stack
677 // pointer, and m_oldFrameOffset is the number of locals of the
678 // tail caller's frame. We use such stack pointer-based
679 // addressing because it allows us to load the tail caller's
680 // caller's frame pointer in the frame pointer register
681 // immediately instead of awkwardly keeping it around on the
682 // stack.
683 //
684 // - For a slow path call, m_oldFrameBase is just the frame
685 // pointer, and m_oldFrameOffset is 0.
686 GPRReg m_oldFrameBase { MacroAssembler::framePointerRegister };
687 int m_oldFrameOffset { 0 };
688
689 MacroAssembler::Address addressForOld(VirtualRegister reg) const
690 {
691 return MacroAssembler::Address(m_oldFrameBase,
692 (m_oldFrameOffset + reg.offset()) * sizeof(Register));
693 }
694
695 // m_newFrameBase is the register relative to which we access
696 // slots in the new call frame, and we always make it point to
697 // wherever the stack pointer will be right before making the
698 // actual call/jump. The actual base of the new frame is at offset
699 // m_newFrameOffset relative to m_newFrameBase.
700 //
701 // - For an actual tail call, m_newFrameBase is computed
702 // dynamically, and m_newFrameOffset varies between 0 and -2
703 // depending on the architecture's calling convention (see
704 // prepareForTailCall).
705 //
706 // - For a slow path call, m_newFrameBase is the actual stack
707 // pointer, and m_newFrameOffset is - CallerFrameAndPCSize,
708 // following the convention for a regular call.
709 GPRReg m_newFrameBase { InvalidGPRReg };
710 int m_newFrameOffset { 0};
711
712 bool isUndecided() const
713 {
714 return m_newFrameBase == InvalidGPRReg;
715 }
716
717 bool isSlowPath() const
718 {
719 return m_newFrameBase == MacroAssembler::stackPointerRegister;
720 }
721
722 MacroAssembler::Address addressForNew(VirtualRegister reg) const
723 {
724 return MacroAssembler::Address(m_newFrameBase,
725 (m_newFrameOffset + reg.offset()) * sizeof(Register));
726 }
727
728 // We use a concept of "danger zone". The danger zone consists of
729 // all the writes in the new frame that could overlap with reads
730 // in the old frame.
731 //
732 // Because we could have a higher actual number of arguments than
733 // parameters, when preparing a tail call, we need to assume that
734 // writing to a slot on the new frame could overlap not only with
735 // the corresponding slot in the old frame, but also with any slot
736 // above it. Thus, the danger zone consists of all writes between
737 // the first write and what I call the "danger frontier": the
738 // highest slot in the old frame we still care about. Thus, the
739 // danger zone contains all the slots between the first slot of
740 // the new frame and the danger frontier. Because the danger
741 // frontier is related to the new frame, it is stored as a virtual
742 // register *in the new frame*.
743 VirtualRegister m_dangerFrontier;
744
745 VirtualRegister dangerFrontier() const
746 {
747 ASSERT(!isUndecided());
748
749 return m_dangerFrontier;
750 }
751
752 bool isDangerNew(VirtualRegister reg) const
753 {
754 ASSERT(!isUndecided() && isValidNew(reg));
755 return reg <= dangerFrontier();
756 }
757
758 void updateDangerFrontier()
759 {
760 ASSERT(!isUndecided());
761
762 m_dangerFrontier = firstNew() - 1;
763 for (VirtualRegister reg = lastNew(); reg >= firstNew(); reg -= 1) {
764 if (!getNew(reg) || !isValidOld(newAsOld(reg)) || !getOld(newAsOld(reg)))
765 continue;
766
767 m_dangerFrontier = reg;
768 if (verbose)
769 dataLog(" Danger frontier now at NEW ", m_dangerFrontier, "\n");
770 break;
771 }
772 if (verbose)
773 dataLog(" All clear! Danger zone is empty.\n");
774 }
775
776 // A safe write is a write that never writes into the danger zone.
777 bool hasOnlySafeWrites(CachedRecovery& cachedRecovery) const
778 {
779 for (VirtualRegister target : cachedRecovery.targets()) {
780 if (isDangerNew(target))
781 return false;
782 }
783 return true;
784 }
785
786 // You must ensure that there is no dangerous writes before
787 // calling this function.
788 bool tryWrites(CachedRecovery&);
789
790 // This function tries to ensure that there is no longer any
791 // possible safe write, i.e. all remaining writes are either to
792 // the danger zone or callee save restorations.
793 //
794 // It returns false if it was unable to perform some safe writes
795 // due to high register pressure.
796 bool performSafeWrites();
797
798 unsigned m_numPassedArgs { UINT_MAX };
799};
800
801} // namespace JSC
802
803#endif // ENABLE(JIT)
804