1/*
2 * Copyright (C) 2015-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "CallFrameShuffler.h"
28
29#if ENABLE(JIT)
30
31#include "CachedRecovery.h"
32#include "CCallHelpers.h"
33#include "CodeBlock.h"
34
35namespace JSC {
36
37CallFrameShuffler::CallFrameShuffler(CCallHelpers& jit, const CallFrameShuffleData& data)
38 : m_jit(jit)
39 , m_oldFrame(data.numLocals + CallerFrameAndPC::sizeInRegisters, nullptr)
40 , m_newFrame(data.args.size() + CallFrame::headerSizeInRegisters, nullptr)
41 , m_alignedOldFrameSize(CallFrame::headerSizeInRegisters
42 + roundArgumentCountToAlignFrame(jit.codeBlock()->numParameters()))
43 , m_alignedNewFrameSize(CallFrame::headerSizeInRegisters
44 + roundArgumentCountToAlignFrame(data.args.size()))
45 , m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize)
46 , m_lockedRegisters(RegisterSet::allRegisters())
47 , m_numPassedArgs(data.numPassedArgs)
48{
49 // We are allowed all the usual registers...
50 for (unsigned i = GPRInfo::numberOfRegisters; i--; )
51 m_lockedRegisters.clear(GPRInfo::toRegister(i));
52 for (unsigned i = FPRInfo::numberOfRegisters; i--; )
53 m_lockedRegisters.clear(FPRInfo::toRegister(i));
54
55#if USE(JSVALUE64)
56 // ... as well as the runtime registers on 64-bit architectures.
57 // However do not use these registers on 32-bit architectures since
58 // saving and restoring callee-saved registers in CallFrameShuffler isn't supported
59 // on 32-bit architectures yet.
60 m_lockedRegisters.exclude(RegisterSet::vmCalleeSaveRegisters());
61#endif
62
63 ASSERT(!data.callee.isInJSStack() || data.callee.virtualRegister().isLocal());
64 addNew(VirtualRegister(CallFrameSlot::callee), data.callee);
65
66 for (size_t i = 0; i < data.args.size(); ++i) {
67 ASSERT(!data.args[i].isInJSStack() || data.args[i].virtualRegister().isLocal());
68 addNew(virtualRegisterForArgument(i), data.args[i]);
69 }
70
71#if USE(JSVALUE64)
72 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
73 if (!data.registers[reg].isSet())
74 continue;
75
76 if (reg.isGPR())
77 addNew(JSValueRegs(reg.gpr()), data.registers[reg]);
78 else
79 addNew(reg.fpr(), data.registers[reg]);
80 }
81
82 m_numberTagRegister = data.numberTagRegister;
83 if (m_numberTagRegister != InvalidGPRReg)
84 lockGPR(m_numberTagRegister);
85#endif
86}
87
88void CallFrameShuffler::dump(PrintStream& out) const
89{
90 static const char* delimiter = " +-------------------------------+ ";
91 static const char* dangerDelimiter = " X-------------------------------X ";
92 static const char* dangerBoundsDelimiter = " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
93 static const char* emptySpace = " ";
94 out.print(" ");
95 out.print(" Old frame ");
96 out.print(" New frame ");
97 out.print("\n");
98 int totalSize = m_alignedOldFrameSize + std::max(numLocals(), m_alignedNewFrameSize) + 3;
99 for (int i = 0; i < totalSize; ++i) {
100 VirtualRegister old { m_alignedOldFrameSize - i - 1 };
101 VirtualRegister newReg { old + m_frameDelta };
102
103 if (!isValidOld(old) && old != firstOld() - 1
104 && !isValidNew(newReg) && newReg != firstNew() - 1)
105 continue;
106
107 out.print(" ");
108 if (dangerFrontier() >= firstNew()
109 && (newReg == dangerFrontier() || newReg == firstNew() - 1))
110 out.print(dangerBoundsDelimiter);
111 else if (isValidOld(old))
112 out.print(isValidNew(newReg) && isDangerNew(newReg) ? dangerDelimiter : delimiter);
113 else if (old == firstOld() - 1)
114 out.print(delimiter);
115 else
116 out.print(emptySpace);
117 if (dangerFrontier() >= firstNew()
118 && (newReg == dangerFrontier() || newReg == firstNew() - 1))
119 out.print(dangerBoundsDelimiter);
120 else if (isValidNew(newReg) || newReg == firstNew() - 1)
121 out.print(isDangerNew(newReg) ? dangerDelimiter : delimiter);
122 else
123 out.print(emptySpace);
124 out.print("\n");
125 if (old == firstOld())
126 out.print(" sp --> ");
127 else if (!old.offset())
128 out.print(" fp --> ");
129 else
130 out.print(" ");
131 if (isValidOld(old)) {
132 if (getOld(old)) {
133 auto str = toCString(old);
134 if (isValidNew(newReg) && isDangerNew(newReg))
135 out.printf(" X %18s X ", str.data());
136 else
137 out.printf(" | %18s | ", str.data());
138 } else if (isValidNew(newReg) && isDangerNew(newReg))
139 out.printf(" X%30s X ", "");
140 else
141 out.printf(" |%30s | ", "");
142 } else
143 out.print(emptySpace);
144 if (isValidNew(newReg)) {
145 const char d = isDangerNew(newReg) ? 'X' : '|';
146 auto str = toCString(newReg);
147 if (getNew(newReg)) {
148 if (getNew(newReg)->recovery().isConstant())
149 out.printf(" %c%8s <- constant %c ", d, str.data(), d);
150 else {
151 auto recoveryStr = toCString(getNew(newReg)->recovery());
152 out.printf(" %c%8s <- %18s %c ", d, str.data(),
153 recoveryStr.data(), d);
154 }
155 } else if (newReg == VirtualRegister { CallFrameSlot::argumentCount })
156 out.printf(" %c%8s <- %18zu %c ", d, str.data(), argCount(), d);
157 else
158 out.printf(" %c%30s %c ", d, "", d);
159 } else
160 out.print(emptySpace);
161 if (newReg == firstNew() - m_newFrameOffset && !isSlowPath())
162 out.print(" <-- new sp before jump (current ", m_newFrameBase, ") ");
163 if (newReg == firstNew())
164 out.print(" <-- new fp after prologue");
165 out.print("\n");
166 }
167 out.print(" ");
168 out.print(" Live registers ");
169 out.print(" Wanted registers ");
170 out.print("\n");
171 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
172 CachedRecovery* oldCachedRecovery { m_registers[reg] };
173 CachedRecovery* newCachedRecovery { m_newRegisters[reg] };
174 if (!oldCachedRecovery && !newCachedRecovery)
175 continue;
176 out.print(" ");
177 if (oldCachedRecovery) {
178 auto str = toCString(reg);
179 out.printf(" %8s ", str.data());
180 } else
181 out.print(emptySpace);
182#if USE(JSVALUE32_64)
183 if (newCachedRecovery) {
184 JSValueRegs wantedJSValueRegs { newCachedRecovery->wantedJSValueRegs() };
185 if (reg.isFPR())
186 out.print(reg, " <- ", newCachedRecovery->recovery());
187 else {
188 if (reg.gpr() == wantedJSValueRegs.tagGPR())
189 out.print(reg.gpr(), " <- tag(", newCachedRecovery->recovery(), ")");
190 else
191 out.print(reg.gpr(), " <- payload(", newCachedRecovery->recovery(), ")");
192 }
193 }
194#else
195 if (newCachedRecovery)
196 out.print(" ", reg, " <- ", newCachedRecovery->recovery());
197#endif
198 out.print("\n");
199 }
200 out.print(" Locked registers: ");
201 bool firstLocked { true };
202 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
203 if (m_lockedRegisters.get(reg)) {
204 out.print(firstLocked ? "" : ", ", reg);
205 firstLocked = false;
206 }
207 }
208 out.print("\n");
209
210 if (isSlowPath())
211 out.print(" Using fp-relative addressing for slow path call\n");
212 else
213 out.print(" Using sp-relative addressing for jump (using ", m_newFrameBase, " as new sp)\n");
214 if (m_oldFrameOffset)
215 out.print(" Old frame offset is ", m_oldFrameOffset, "\n");
216 if (m_newFrameOffset)
217 out.print(" New frame offset is ", m_newFrameOffset, "\n");
218#if USE(JSVALUE64)
219 if (m_numberTagRegister != InvalidGPRReg)
220 out.print(" NumberTag is currently in ", m_numberTagRegister, "\n");
221#endif
222}
223
224CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery)
225{
226 ASSERT(!recovery.isConstant());
227 if (recovery.isInGPR())
228 return m_registers[recovery.gpr()];
229 if (recovery.isInFPR())
230 return m_registers[recovery.fpr()];
231#if USE(JSVALUE32_64)
232 if (recovery.technique() == InPair) {
233 ASSERT(m_registers[recovery.tagGPR()] == m_registers[recovery.payloadGPR()]);
234 return m_registers[recovery.payloadGPR()];
235 }
236#endif
237 ASSERT(recovery.isInJSStack());
238 return getOld(recovery.virtualRegister());
239}
240
241CachedRecovery* CallFrameShuffler::setCachedRecovery(ValueRecovery recovery, CachedRecovery* cachedRecovery)
242{
243 ASSERT(!recovery.isConstant());
244 if (recovery.isInGPR())
245 return m_registers[recovery.gpr()] = cachedRecovery;
246 if (recovery.isInFPR())
247 return m_registers[recovery.fpr()] = cachedRecovery;
248#if USE(JSVALUE32_64)
249 if (recovery.technique() == InPair) {
250 m_registers[recovery.tagGPR()] = cachedRecovery;
251 return m_registers[recovery.payloadGPR()] = cachedRecovery;
252 }
253#endif
254 ASSERT(recovery.isInJSStack());
255 setOld(recovery.virtualRegister(), cachedRecovery);
256 return cachedRecovery;
257}
258
259void CallFrameShuffler::spill(CachedRecovery& cachedRecovery)
260{
261 ASSERT(!isSlowPath());
262 ASSERT(cachedRecovery.recovery().isInRegisters());
263
264 VirtualRegister spillSlot { 0 };
265 for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot += 1) {
266 if (slot >= newAsOld(firstNew()))
267 break;
268
269 if (getOld(slot))
270 continue;
271
272 spillSlot = slot;
273 break;
274 }
275 // We must have enough slots to be able to fit the whole callee's
276 // frame for the slow path - unless we are in the FTL. In that
277 // case, we are allowed to extend the frame *once*, since we are
278 // guaranteed to have enough available space for that.
279 if (spillSlot >= newAsOld(firstNew()) || !spillSlot.isLocal()) {
280 RELEASE_ASSERT(!m_didExtendFrame);
281 extendFrameIfNeeded();
282 spill(cachedRecovery);
283 return;
284 }
285
286 if (verbose)
287 dataLog(" * Spilling ", cachedRecovery.recovery(), " into ", spillSlot, "\n");
288 auto format = emitStore(cachedRecovery, addressForOld(spillSlot));
289 ASSERT(format != DataFormatNone);
290 updateRecovery(cachedRecovery, ValueRecovery::displacedInJSStack(spillSlot, format));
291}
292
293void CallFrameShuffler::emitDeltaCheck()
294{
295 if (ASSERT_DISABLED)
296 return;
297
298 GPRReg scratchGPR { getFreeGPR() };
299 if (scratchGPR != InvalidGPRReg) {
300 if (verbose)
301 dataLog(" Using ", scratchGPR, " for the fp-sp delta check\n");
302 m_jit.move(MacroAssembler::stackPointerRegister, scratchGPR);
303 m_jit.subPtr(GPRInfo::callFrameRegister, scratchGPR);
304 MacroAssembler::Jump ok = m_jit.branch32(
305 MacroAssembler::Equal, scratchGPR,
306 MacroAssembler::TrustedImm32(-numLocals() * sizeof(Register)));
307 m_jit.abortWithReason(JITUnexpectedCallFrameSize);
308 ok.link(&m_jit);
309 } else if (verbose)
310 dataLog(" Skipping the fp-sp delta check since there is too much pressure");
311}
312
313void CallFrameShuffler::extendFrameIfNeeded()
314{
315 ASSERT(!m_didExtendFrame);
316
317 VirtualRegister firstRead { firstOld() };
318 for (; firstRead <= virtualRegisterForLocal(0); firstRead += 1) {
319 if (getOld(firstRead))
320 break;
321 }
322 size_t availableSize = static_cast<size_t>(firstRead.offset() - firstOld().offset());
323 size_t wantedSize = m_newFrame.size() + m_newFrameOffset;
324
325 if (availableSize < wantedSize) {
326 size_t delta = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), wantedSize - availableSize);
327 m_oldFrame.grow(m_oldFrame.size() + delta);
328 for (size_t i = 0; i < delta; ++i)
329 m_oldFrame[m_oldFrame.size() - i - 1] = nullptr;
330 m_jit.subPtr(MacroAssembler::TrustedImm32(delta * sizeof(Register)), MacroAssembler::stackPointerRegister);
331
332 if (isSlowPath())
333 m_frameDelta = numLocals() + CallerFrameAndPC::sizeInRegisters;
334 else
335 m_oldFrameOffset = numLocals();
336
337 if (verbose)
338 dataLogF(" Not enough space - extending the old frame %zu slot\n", delta);
339 }
340
341 m_didExtendFrame = true;
342}
343
344void CallFrameShuffler::prepareForSlowPath()
345{
346 ASSERT(isUndecided());
347 emitDeltaCheck();
348
349 m_frameDelta = numLocals() + CallerFrameAndPC::sizeInRegisters;
350 m_newFrameBase = MacroAssembler::stackPointerRegister;
351 m_newFrameOffset = -CallerFrameAndPC::sizeInRegisters;
352
353 if (verbose)
354 dataLog("\n\nPreparing frame for slow path call:\n");
355
356 // When coming from the FTL, we need to extend the frame. In other
357 // cases, we may end up extending the frame if we previously
358 // spilled things (e.g. in polymorphic cache).
359 extendFrameIfNeeded();
360
361 if (verbose)
362 dataLog(*this);
363
364 prepareAny();
365
366 if (verbose)
367 dataLog("Ready for slow path call!\n");
368}
369
370void CallFrameShuffler::prepareForTailCall()
371{
372 ASSERT(isUndecided());
373 emitDeltaCheck();
374
375 // We'll use sp-based indexing so that we can load the
376 // caller's frame pointer into the fpr immediately
377 m_oldFrameBase = MacroAssembler::stackPointerRegister;
378 m_oldFrameOffset = numLocals();
379 m_newFrameBase = acquireGPR();
380#if CPU(ARM_THUMB2) || CPU(MIPS)
381 // We load the frame pointer and link register
382 // manually. We could ask the algorithm to load them for us,
383 // and it would allow us to use the link register as an extra
384 // temporary - but it'd mean that the frame pointer can also
385 // be used as an extra temporary, so we keep the link register
386 // locked instead.
387
388 // sp will point to head1 since the callee's prologue pushes
389 // the call frame and link register.
390 m_newFrameOffset = -1;
391#elif CPU(ARM64)
392 // We load the frame pointer and link register manually. We
393 // could ask the algorithm to load the link register for us
394 // (which would allow for its use as an extra temporary), but
395 // since its not in GPRInfo, we can't do it.
396
397 // sp will point to head2 since the callee's prologue pushes the
398 // call frame and link register
399 m_newFrameOffset = -2;
400#elif CPU(X86_64)
401 // We load the frame pointer manually, but we ask the
402 // algorithm to move the return PC for us (it'd probably
403 // require a write in the danger zone)
404 addNew(VirtualRegister { 1 },
405 ValueRecovery::displacedInJSStack(VirtualRegister(1), DataFormatJS));
406
407 // sp will point to head1 since the callee's prologue pushes
408 // the call frame register
409 m_newFrameOffset = -1;
410#else
411 UNREACHABLE_FOR_PLATFORM();
412#endif
413
414 if (verbose)
415 dataLog(" Emitting code for computing the new frame base\n");
416
417 // We compute the new frame base by first computing the top of the
418 // old frame (taking into account an argument count higher than
419 // the number of parameters), then substracting to it the aligned
420 // new frame size (adjusted).
421 m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), m_newFrameBase);
422 MacroAssembler::Jump argumentCountOK =
423 m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase,
424 MacroAssembler::TrustedImm32(m_jit.codeBlock()->numParameters()));
425 m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + CallFrame::headerSizeInRegisters), m_newFrameBase);
426 m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase);
427 m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase);
428 MacroAssembler::Jump done = m_jit.jump();
429 argumentCountOK.link(&m_jit);
430 m_jit.move(
431 MacroAssembler::TrustedImm32(m_alignedOldFrameSize * sizeof(Register)),
432 m_newFrameBase);
433 done.link(&m_jit);
434
435 m_jit.addPtr(GPRInfo::callFrameRegister, m_newFrameBase);
436 m_jit.subPtr(
437 MacroAssembler::TrustedImm32(
438 (m_alignedNewFrameSize + m_newFrameOffset) * sizeof(Register)),
439 m_newFrameBase);
440
441 // We load the link register manually for architectures that have one
442#if CPU(ARM_THUMB2) || CPU(ARM64)
443 m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, CallFrame::returnPCOffset()),
444 MacroAssembler::linkRegister);
445#if CPU(ARM64E)
446 m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(CallerFrameAndPC)), MacroAssembler::framePointerRegister);
447 m_jit.untagPtr(MacroAssembler::framePointerRegister, MacroAssembler::linkRegister);
448 m_jit.subPtr(MacroAssembler::TrustedImm32(sizeof(CallerFrameAndPC)), MacroAssembler::framePointerRegister);
449#endif
450
451#elif CPU(MIPS)
452 m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
453 MacroAssembler::returnAddressRegister);
454#endif
455
456 // We want the frame pointer to always point to a valid frame, and
457 // we are going to trash the current one. Let's make it point to
458 // our caller's frame, since that's what we want to end up with.
459 m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister),
460 MacroAssembler::framePointerRegister);
461
462 if (verbose)
463 dataLog("Preparing frame for tail call:\n", *this);
464
465 prepareAny();
466
467 if (verbose)
468 dataLog("Ready for tail call!\n");
469}
470
471bool CallFrameShuffler::tryWrites(CachedRecovery& cachedRecovery)
472{
473 ASSERT(m_newFrameBase != InvalidGPRReg);
474
475 // If the value is already set up correctly, we don't have
476 // anything to do.
477 if (isSlowPath() && cachedRecovery.recovery().isInJSStack()
478 && cachedRecovery.targets().size() == 1
479 && newAsOld(cachedRecovery.targets()[0]) == cachedRecovery.recovery().virtualRegister()) {
480 cachedRecovery.clearTargets();
481 if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
482 clearCachedRecovery(cachedRecovery.recovery());
483 return true;
484 }
485
486 if (!canLoadAndBox(cachedRecovery))
487 return false;
488
489 emitLoad(cachedRecovery);
490 emitBox(cachedRecovery);
491 ASSERT(cachedRecovery.recovery().isInRegisters()
492 || cachedRecovery.recovery().isConstant());
493
494 if (verbose)
495 dataLog(" * Storing ", cachedRecovery.recovery());
496 for (size_t i = 0; i < cachedRecovery.targets().size(); ++i) {
497 VirtualRegister target { cachedRecovery.targets()[i] };
498 ASSERT(!isDangerNew(target));
499 if (verbose)
500 dataLog(!i ? " into " : ", and ", "NEW ", target);
501 emitStore(cachedRecovery, addressForNew(target));
502 setNew(target, nullptr);
503 }
504 if (verbose)
505 dataLog("\n");
506 cachedRecovery.clearTargets();
507 if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
508 clearCachedRecovery(cachedRecovery.recovery());
509
510 return true;
511}
512
513bool CallFrameShuffler::performSafeWrites()
514{
515 VirtualRegister firstSafe;
516 VirtualRegister end { lastNew() + 1 };
517 Vector<VirtualRegister> failures;
518
519 // For all cachedRecoveries that writes to the safe zone, if it
520 // doesn't also write to the danger zone, we try to perform
521 // the writes. This may free up danger slots, so we iterate
522 // again until it doesn't happen anymore.
523 //
524 // Note that even though we have a while block, we look at
525 // each slot of the new call frame at most once since in each
526 // iteration beyond the first, we only load up the portion of
527 // the new call frame that was dangerous and became safe due
528 // to the previous iteration.
529 do {
530 firstSafe = dangerFrontier() + 1;
531 if (verbose)
532 dataLog(" Trying safe writes (between NEW ", firstSafe, " and NEW ", end - 1, ")\n");
533 bool didProgress = false;
534 for (VirtualRegister reg = firstSafe; reg < end; reg += 1) {
535 CachedRecovery* cachedRecovery = getNew(reg);
536 if (!cachedRecovery) {
537 if (verbose)
538 dataLog(" + ", reg, " is OK.\n");
539 continue;
540 }
541 if (!hasOnlySafeWrites(*cachedRecovery)) {
542 if (verbose) {
543 dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
544 " but also has dangerous writes.\n");
545 }
546 continue;
547 }
548 if (cachedRecovery->wantedJSValueRegs()) {
549 if (verbose) {
550 dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
551 " but is also needed in registers.\n");
552 }
553 continue;
554 }
555 if (cachedRecovery->wantedFPR() != InvalidFPRReg) {
556 if (verbose) {
557 dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
558 " but is also needed in an FPR.\n");
559 }
560 continue;
561 }
562 if (!tryWrites(*cachedRecovery)) {
563 if (verbose)
564 dataLog(" - Unable to write to NEW ", reg, " from ", cachedRecovery->recovery(), "\n");
565 failures.append(reg);
566 }
567 didProgress = true;
568 }
569 end = firstSafe;
570
571 // If we have cachedRecoveries that failed to write, it is
572 // because they are on the stack and we didn't have enough
573 // registers available at the time to load them into. If
574 // we have a free register, we should try again because it
575 // could free up some danger slots.
576 if (didProgress && hasFreeRegister()) {
577 Vector<VirtualRegister> stillFailing;
578 for (VirtualRegister failed : failures) {
579 CachedRecovery* cachedRecovery = getNew(failed);
580 // It could have been handled later if it had
581 // several targets
582 if (!cachedRecovery)
583 continue;
584
585 ASSERT(hasOnlySafeWrites(*cachedRecovery)
586 && !cachedRecovery->wantedJSValueRegs()
587 && cachedRecovery->wantedFPR() == InvalidFPRReg);
588 if (!tryWrites(*cachedRecovery))
589 stillFailing.append(failed);
590 }
591 failures = WTFMove(stillFailing);
592 }
593 if (verbose && firstSafe != dangerFrontier() + 1)
594 dataLog(" We freed up danger slots!\n");
595 } while (firstSafe != dangerFrontier() + 1);
596
597 return failures.isEmpty();
598}
599
600void CallFrameShuffler::prepareAny()
601{
602 ASSERT(!isUndecided());
603
604 updateDangerFrontier();
605
606 // First, we try to store any value that goes above the danger
607 // frontier. This will never use more registers since we are only
608 // loading+storing if we ensure that any register used for the load
609 // will be freed up after the stores (i.e., all stores are above
610 // the danger frontier, and there is no wanted register).
611 performSafeWrites();
612
613 // At this point, we couldn't have more available registers than
614 // we have withouth spilling: all values currently in registers
615 // either require a write to the danger zone, or have a wanted
616 // register, which means that in any case they will have to go
617 // through registers again.
618
619 // We now slowly free up the danger zone by first loading the old
620 // value on the danger frontier, spilling as many registers as
621 // needed to do so and ensuring that the corresponding slot in the
622 // new frame is now ready to be written. Then, we store the old
623 // value to its target location if possible (we could have failed
624 // to load it previously due to high pressure). Finally, we write
625 // to any of the newly safe slots that we can, which could free up
626 // registers (hence why we do it eagerly).
627 for (VirtualRegister reg = dangerFrontier(); reg >= firstNew(); reg -= 1) {
628 if (reg == dangerFrontier()) {
629 if (verbose)
630 dataLog(" Next slot (NEW ", reg, ") is the danger frontier\n");
631 CachedRecovery* cachedRecovery { getOld(newAsOld(dangerFrontier())) };
632 ASSERT(cachedRecovery);
633 ensureLoad(*cachedRecovery);
634 emitLoad(*cachedRecovery);
635 ensureBox(*cachedRecovery);
636 emitBox(*cachedRecovery);
637 if (hasOnlySafeWrites(*cachedRecovery))
638 tryWrites(*cachedRecovery);
639 } else if (verbose)
640 dataLog(" Next slot is NEW ", reg, "\n");
641
642 ASSERT(!isDangerNew(reg));
643 CachedRecovery* cachedRecovery = getNew(reg);
644 // This could be one of the header slots we don't care about.
645 if (!cachedRecovery) {
646 if (verbose)
647 dataLog(" + ", reg, " is OK\n");
648 continue;
649 }
650
651 if (canLoadAndBox(*cachedRecovery) && hasOnlySafeWrites(*cachedRecovery)
652 && !cachedRecovery->wantedJSValueRegs()
653 && cachedRecovery->wantedFPR() == InvalidFPRReg) {
654 emitLoad(*cachedRecovery);
655 emitBox(*cachedRecovery);
656 bool writesOK = tryWrites(*cachedRecovery);
657 ASSERT_UNUSED(writesOK, writesOK);
658 } else if (verbose)
659 dataLog(" - ", cachedRecovery->recovery(), " can't be handled just yet.\n");
660 }
661 ASSERT(dangerFrontier() < firstNew());
662
663 // Now, the danger zone is empty, but we still have a couple of
664 // things to do:
665 //
666 // 1) There could be remaining safe writes that failed earlier due
667 // to high register pressure and had nothing to do with the
668 // danger zone whatsoever.
669 //
670 // 2) Some wanted registers could have to be loaded (this could
671 // happen either when making a call to a new function with a
672 // lower number of arguments - since above here, we only load
673 // wanted registers when they are at the danger frontier -, or
674 // if a wanted register got spilled).
675 //
676 // 3) Some wanted registers could have been loaded in the wrong
677 // registers
678 //
679 // 4) We have to take care of some bookkeeping - namely, storing
680 // the argument count and updating the stack pointer.
681
682 // At this point, we must have enough registers available for
683 // handling 1). None of the loads can fail because we have been
684 // eagerly freeing up registers in all the previous phases - so
685 // the only values that are in registers at this point must have
686 // wanted registers.
687 if (verbose)
688 dataLog(" Danger zone is clear, performing remaining writes.\n");
689 for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
690 CachedRecovery* cachedRecovery { getNew(reg) };
691 if (!cachedRecovery)
692 continue;
693
694 emitLoad(*cachedRecovery);
695 emitBox(*cachedRecovery);
696 bool writesOK = tryWrites(*cachedRecovery);
697 ASSERT_UNUSED(writesOK, writesOK);
698 }
699
700#if USE(JSVALUE64)
701 if (m_numberTagRegister != InvalidGPRReg && m_newRegisters[m_numberTagRegister])
702 releaseGPR(m_numberTagRegister);
703#endif
704
705 // Handle 2) by loading all registers. We don't have to do any
706 // writes, since they have been taken care of above.
707 if (verbose)
708 dataLog(" Loading wanted registers into registers\n");
709 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
710 CachedRecovery* cachedRecovery { m_newRegisters[reg] };
711 if (!cachedRecovery)
712 continue;
713
714 emitLoad(*cachedRecovery);
715 emitBox(*cachedRecovery);
716 ASSERT(cachedRecovery->targets().isEmpty());
717 }
718
719#if USE(JSVALUE64)
720 if (m_numberTagRegister != InvalidGPRReg)
721 releaseGPR(m_numberTagRegister);
722#endif
723
724 // At this point, we have read everything we cared about from the
725 // stack, and written everything we had to to the stack.
726 if (verbose)
727 dataLog(" Callee frame is fully set up\n");
728 if (!ASSERT_DISABLED) {
729 for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1)
730 ASSERT_UNUSED(reg, !getNew(reg));
731
732 for (CachedRecovery* cachedRecovery : m_cachedRecoveries) {
733 ASSERT_UNUSED(cachedRecovery, cachedRecovery->targets().isEmpty());
734 ASSERT(!cachedRecovery->recovery().isInJSStack());
735 }
736 }
737
738 // We need to handle 4) first because it implies releasing
739 // m_newFrameBase, which could be a wanted register.
740 if (verbose)
741 dataLog(" * Storing the argument count into ", VirtualRegister { CallFrameSlot::argumentCount }, "\n");
742 m_jit.store32(MacroAssembler::TrustedImm32(0),
743 addressForNew(VirtualRegister { CallFrameSlot::argumentCount }).withOffset(TagOffset));
744 RELEASE_ASSERT(m_numPassedArgs != UINT_MAX);
745 m_jit.store32(MacroAssembler::TrustedImm32(m_numPassedArgs),
746 addressForNew(VirtualRegister { CallFrameSlot::argumentCount }).withOffset(PayloadOffset));
747
748 if (!isSlowPath()) {
749 ASSERT(m_newFrameBase != MacroAssembler::stackPointerRegister);
750 if (verbose)
751 dataLog(" Releasing the new frame base pointer\n");
752 m_jit.move(m_newFrameBase, MacroAssembler::stackPointerRegister);
753 releaseGPR(m_newFrameBase);
754 }
755
756 // Finally we handle 3)
757 if (verbose)
758 dataLog(" Ensuring wanted registers are in the right register\n");
759 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
760 CachedRecovery* cachedRecovery { m_newRegisters[reg] };
761 if (!cachedRecovery)
762 continue;
763
764 emitDisplace(*cachedRecovery);
765 }
766}
767
768} // namespace JSC
769
770#endif // ENABLE(JIT)
771