1/*
2 * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "PolymorphicAccess.h"
28
29#if ENABLE(JIT)
30
31#include "BinarySwitch.h"
32#include "CCallHelpers.h"
33#include "CodeBlock.h"
34#include "FullCodeOrigin.h"
35#include "Heap.h"
36#include "JITOperations.h"
37#include "JSCInlines.h"
38#include "LinkBuffer.h"
39#include "StructureStubClearingWatchpoint.h"
40#include "StructureStubInfo.h"
41#include "SuperSampler.h"
42#include <wtf/CommaPrinter.h>
43#include <wtf/ListDump.h>
44
45namespace JSC {
46
47namespace PolymorphicAccessInternal {
48static const bool verbose = false;
49}
50
51void AccessGenerationResult::dump(PrintStream& out) const
52{
53 out.print(m_kind);
54 if (m_code)
55 out.print(":", m_code);
56}
57
58Watchpoint* AccessGenerationState::addWatchpoint(const ObjectPropertyCondition& condition)
59{
60 return WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
61 watchpoints, jit->codeBlock(), stubInfo, condition);
62}
63
64void AccessGenerationState::restoreScratch()
65{
66 allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
67}
68
69void AccessGenerationState::succeed()
70{
71 restoreScratch();
72 success.append(jit->jump());
73}
74
75const RegisterSet& AccessGenerationState::liveRegistersForCall()
76{
77 if (!m_calculatedRegistersForCallAndExceptionHandling)
78 calculateLiveRegistersForCallAndExceptionHandling();
79 return m_liveRegistersForCall;
80}
81
82const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite()
83{
84 if (!m_calculatedRegistersForCallAndExceptionHandling)
85 calculateLiveRegistersForCallAndExceptionHandling();
86 return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
87}
88
89static RegisterSet calleeSaveRegisters()
90{
91 RegisterSet result = RegisterSet::registersToNotSaveForJSCall();
92 result.filter(RegisterSet::registersToNotSaveForCCall());
93 return result;
94}
95
96const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
97{
98 if (!m_calculatedRegistersForCallAndExceptionHandling) {
99 m_calculatedRegistersForCallAndExceptionHandling = true;
100
101 m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
102 m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
103 if (m_needsToRestoreRegistersIfException)
104 RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
105
106 m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
107 m_liveRegistersForCall.exclude(calleeSaveRegisters());
108 }
109 return m_liveRegistersForCall;
110}
111
112auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState
113{
114 RegisterSet liveRegisters = liveRegistersForCall();
115 liveRegisters.merge(extra);
116
117 unsigned extraStackPadding = 0;
118 unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
119 return SpillState {
120 WTFMove(liveRegisters),
121 numberOfStackBytesUsedForRegisterPreservation
122 };
123}
124
125void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState)
126{
127 // Even if we're a getter, we don't want to ignore the result value like we normally do
128 // because the getter threw, and therefore, didn't return a value that means anything.
129 // Instead, we want to restore that register to what it was upon entering the getter
130 // inline cache. The subtlety here is if the base and the result are the same register,
131 // and the getter threw, we want OSR exit to see the original base value, not the result
132 // of the getter call.
133 RegisterSet dontRestore = spillState.spilledRegisters;
134 // As an optimization here, we only need to restore what is live for exception handling.
135 // We can construct the dontRestore set to accomplish this goal by having it contain only
136 // what is live for call but not live for exception handling. By ignoring things that are
137 // only live at the call but not the exception handler, we will only restore things live
138 // at the exception handler.
139 dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
140 restoreLiveRegistersFromStackForCall(spillState, dontRestore);
141}
142
143void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore)
144{
145 unsigned extraStackPadding = 0;
146 ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
147}
148
149CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
150{
151 if (!m_calculatedRegistersForCallAndExceptionHandling)
152 calculateLiveRegistersForCallAndExceptionHandling();
153
154 if (!m_calculatedCallSiteIndex) {
155 m_calculatedCallSiteIndex = true;
156
157 if (m_needsToRestoreRegistersIfException)
158 m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
159 else
160 m_callSiteIndex = originalCallSiteIndex();
161 }
162
163 return m_callSiteIndex;
164}
165
166const HandlerInfo& AccessGenerationState::originalExceptionHandler()
167{
168 if (!m_calculatedRegistersForCallAndExceptionHandling)
169 calculateLiveRegistersForCallAndExceptionHandling();
170
171 RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
172 HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
173 RELEASE_ASSERT(exceptionHandler);
174 return *exceptionHandler;
175}
176
177CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
178
179void AccessGenerationState::emitExplicitExceptionHandler()
180{
181 restoreScratch();
182 jit->pushToSave(GPRInfo::regT0);
183 jit->loadPtr(&m_vm.topEntryFrame, GPRInfo::regT0);
184 jit->copyCalleeSavesToEntryFrameCalleeSavesBuffer(GPRInfo::regT0);
185 jit->popToRestore(GPRInfo::regT0);
186
187 if (needsToRestoreRegistersIfException()) {
188 // To the JIT that produces the original exception handling
189 // call site, they will expect the OSR exit to be arrived
190 // at from genericUnwind. Therefore we must model what genericUnwind
191 // does here. I.e, set callFrameForCatch and copy callee saves.
192
193 jit->storePtr(GPRInfo::callFrameRegister, m_vm.addressOfCallFrameForCatch());
194 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
195
196 // We don't need to insert a new exception handler in the table
197 // because we're doing a manual exception check here. i.e, we'll
198 // never arrive here from genericUnwind().
199 HandlerInfo originalHandler = originalExceptionHandler();
200 jit->addLinkTask(
201 [=] (LinkBuffer& linkBuffer) {
202 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
203 });
204 } else {
205 jit->setupArguments<decltype(lookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&m_vm), GPRInfo::callFrameRegister);
206 CCallHelpers::Call lookupExceptionHandlerCall = jit->call(OperationPtrTag);
207 jit->addLinkTask(
208 [=] (LinkBuffer& linkBuffer) {
209 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(lookupExceptionHandler));
210 });
211 jit->jumpToExceptionHandler(m_vm);
212 }
213}
214
215
216PolymorphicAccess::PolymorphicAccess() { }
217PolymorphicAccess::~PolymorphicAccess() { }
218
219AccessGenerationResult PolymorphicAccess::addCases(
220 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
221 const Identifier& ident, Vector<std::unique_ptr<AccessCase>, 2> originalCasesToAdd)
222{
223 SuperSamplerScope superSamplerScope(false);
224
225 // This method will add the originalCasesToAdd to the list one at a time while preserving the
226 // invariants:
227 // - If a newly added case canReplace() any existing case, then the existing case is removed before
228 // the new case is added. Removal doesn't change order of the list. Any number of existing cases
229 // can be removed via the canReplace() rule.
230 // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
231 // cascade through the cases in reverse order, you will get the most recent cases first.
232 // - If this method fails (returns null, doesn't add the cases), then both the previous case list
233 // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
234 // add more things after failure.
235
236 // First ensure that the originalCasesToAdd doesn't contain duplicates.
237 Vector<std::unique_ptr<AccessCase>> casesToAdd;
238 for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
239 std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
240
241 // Add it only if it is not replaced by the subsequent cases in the list.
242 bool found = false;
243 for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
244 if (originalCasesToAdd[j]->canReplace(*myCase)) {
245 found = true;
246 break;
247 }
248 }
249
250 if (found)
251 continue;
252
253 casesToAdd.append(WTFMove(myCase));
254 }
255
256 if (PolymorphicAccessInternal::verbose)
257 dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
258
259 // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
260 // new stub that will be identical to the old one. Returning null should tell the caller to just
261 // keep doing what they were doing before.
262 if (casesToAdd.isEmpty())
263 return AccessGenerationResult::MadeNoChanges;
264
265 if (stubInfo.accessType != AccessType::InstanceOf) {
266 bool shouldReset = false;
267 AccessGenerationResult resetResult(AccessGenerationResult::ResetStubAndFireWatchpoints);
268 auto considerPolyProtoReset = [&] (Structure* a, Structure* b) {
269 if (Structure::shouldConvertToPolyProto(a, b)) {
270 // For now, we only reset if this is our first time invalidating this watchpoint.
271 // The reason we don't immediately fire this watchpoint is that we may be already
272 // watching the poly proto watchpoint, which if fired, would destroy us. We let
273 // the person handling the result to do a delayed fire.
274 ASSERT(a->rareData()->sharedPolyProtoWatchpoint().get() == b->rareData()->sharedPolyProtoWatchpoint().get());
275 if (a->rareData()->sharedPolyProtoWatchpoint()->isStillValid()) {
276 shouldReset = true;
277 resetResult.addWatchpointToFire(*a->rareData()->sharedPolyProtoWatchpoint(), StringFireDetail("Detected poly proto optimization opportunity."));
278 }
279 }
280 };
281
282 for (auto& caseToAdd : casesToAdd) {
283 for (auto& existingCase : m_list) {
284 Structure* a = caseToAdd->structure();
285 Structure* b = existingCase->structure();
286 considerPolyProtoReset(a, b);
287 }
288 }
289 for (unsigned i = 0; i < casesToAdd.size(); ++i) {
290 for (unsigned j = i + 1; j < casesToAdd.size(); ++j) {
291 Structure* a = casesToAdd[i]->structure();
292 Structure* b = casesToAdd[j]->structure();
293 considerPolyProtoReset(a, b);
294 }
295 }
296
297 if (shouldReset)
298 return resetResult;
299 }
300
301 // Now add things to the new list. Note that at this point, we will still have old cases that
302 // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
303 for (auto& caseToAdd : casesToAdd) {
304 commit(locker, vm, m_watchpoints, codeBlock, stubInfo, ident, *caseToAdd);
305 m_list.append(WTFMove(caseToAdd));
306 }
307
308 if (PolymorphicAccessInternal::verbose)
309 dataLog("After addCases: m_list: ", listDump(m_list), "\n");
310
311 return AccessGenerationResult::Buffered;
312}
313
314AccessGenerationResult PolymorphicAccess::addCase(
315 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
316 const Identifier& ident, std::unique_ptr<AccessCase> newAccess)
317{
318 Vector<std::unique_ptr<AccessCase>, 2> newAccesses;
319 newAccesses.append(WTFMove(newAccess));
320 return addCases(locker, vm, codeBlock, stubInfo, ident, WTFMove(newAccesses));
321}
322
323bool PolymorphicAccess::visitWeak(VM& vm) const
324{
325 for (unsigned i = 0; i < size(); ++i) {
326 if (!at(i).visitWeak(vm))
327 return false;
328 }
329 if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
330 for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
331 if (!vm.heap.isMarked(weakReference.get()))
332 return false;
333 }
334 }
335 return true;
336}
337
338bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
339{
340 bool result = true;
341 for (unsigned i = 0; i < size(); ++i)
342 result &= at(i).propagateTransitions(visitor);
343 return result;
344}
345
346void PolymorphicAccess::dump(PrintStream& out) const
347{
348 out.print(RawPointer(this), ":[");
349 CommaPrinter comma;
350 for (auto& entry : m_list)
351 out.print(comma, *entry);
352 out.print("]");
353}
354
355void PolymorphicAccess::commit(
356 const GCSafeConcurrentJSLocker&, VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
357 StructureStubInfo& stubInfo, const Identifier& ident, AccessCase& accessCase)
358{
359 // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
360 // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
361 // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
362 // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
363 // Those common kinds of JSC object accesses don't hit this case.
364
365 for (WatchpointSet* set : accessCase.commit(vm, ident)) {
366 Watchpoint* watchpoint =
367 WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
368 watchpoints, codeBlock, &stubInfo, ObjectPropertyCondition());
369
370 set->add(watchpoint);
371 }
372}
373
374AccessGenerationResult PolymorphicAccess::regenerate(
375 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, const Identifier& ident)
376{
377 SuperSamplerScope superSamplerScope(false);
378
379 if (PolymorphicAccessInternal::verbose)
380 dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
381
382 AccessGenerationState state(vm, codeBlock->globalObject());
383
384 state.access = this;
385 state.stubInfo = &stubInfo;
386 state.ident = &ident;
387
388 state.baseGPR = stubInfo.baseGPR();
389 state.thisGPR = stubInfo.patch.thisGPR;
390 state.valueRegs = stubInfo.valueRegs();
391
392 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
393 state.allocator = &allocator;
394 allocator.lock(state.baseGPR);
395 if (state.thisGPR != InvalidGPRReg)
396 allocator.lock(state.thisGPR);
397 allocator.lock(state.valueRegs);
398#if USE(JSVALUE32_64)
399 allocator.lock(stubInfo.patch.baseTagGPR);
400#endif
401
402 state.scratchGPR = allocator.allocateScratchGPR();
403
404 CCallHelpers jit(codeBlock);
405 state.jit = &jit;
406
407 state.preservedReusedRegisterState =
408 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
409
410 // Regenerating is our opportunity to figure out what our list of cases should look like. We
411 // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
412 // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
413 // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
414 // from the code of the current stub (aka previous).
415 ListType cases;
416 unsigned srcIndex = 0;
417 unsigned dstIndex = 0;
418 while (srcIndex < m_list.size()) {
419 std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
420
421 // If the case had been generated, then we have to keep the original in m_list in case we
422 // fail to regenerate. That case may have data structures that are used by the code that it
423 // had generated. If the case had not been generated, then we want to remove it from m_list.
424 bool isGenerated = someCase->state() == AccessCase::Generated;
425
426 [&] () {
427 if (!someCase->couldStillSucceed())
428 return;
429
430 // Figure out if this is replaced by any later case. Given two cases A and B where A
431 // comes first in the case list, we know that A would have triggered first if we had
432 // generated the cases in a cascade. That's why this loop asks B->canReplace(A) but not
433 // A->canReplace(B). If A->canReplace(B) was true then A would never have requested
434 // repatching in cases where Repatch.cpp would have then gone on to generate B. If that
435 // did happen by some fluke, then we'd just miss the redundancy here, which wouldn't be
436 // incorrect - just slow. However, if A's checks failed and Repatch.cpp concluded that
437 // this new condition could be handled by B and B->canReplace(A), then this says that we
438 // don't need A anymore.
439 //
440 // If we can generate a binary switch, then A->canReplace(B) == B->canReplace(A). So,
441 // it doesn't matter that we only do the check in one direction.
442 for (unsigned j = srcIndex; j < m_list.size(); ++j) {
443 if (m_list[j]->canReplace(*someCase))
444 return;
445 }
446
447 if (isGenerated)
448 cases.append(someCase->clone());
449 else
450 cases.append(WTFMove(someCase));
451 }();
452
453 if (isGenerated)
454 m_list[dstIndex++] = WTFMove(someCase);
455 }
456 m_list.resize(dstIndex);
457
458 bool generatedFinalCode = false;
459
460 // If the resulting set of cases is so big that we would stop caching and this is InstanceOf,
461 // then we want to generate the generic InstanceOf and then stop.
462 if (cases.size() >= Options::maxAccessVariantListSize()
463 && stubInfo.accessType == AccessType::InstanceOf) {
464 while (!cases.isEmpty())
465 m_list.append(cases.takeLast());
466 cases.append(AccessCase::create(vm, codeBlock, AccessCase::InstanceOfGeneric));
467 generatedFinalCode = true;
468 }
469
470 if (PolymorphicAccessInternal::verbose)
471 dataLog("Optimized cases: ", listDump(cases), "\n");
472
473 // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
474 // won't change that set anymore.
475
476 bool allGuardedByStructureCheck = true;
477 bool hasJSGetterSetterCall = false;
478 for (auto& newCase : cases) {
479 commit(locker, vm, state.watchpoints, codeBlock, stubInfo, ident, *newCase);
480 allGuardedByStructureCheck &= newCase->guardedByStructureCheck();
481 if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
482 hasJSGetterSetterCall = true;
483 }
484
485 if (cases.isEmpty()) {
486 // This is super unlikely, but we make it legal anyway.
487 state.failAndRepatch.append(jit.jump());
488 } else if (!allGuardedByStructureCheck || cases.size() == 1) {
489 // If there are any proxies in the list, we cannot just use a binary switch over the structure.
490 // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
491 // one case.
492 CCallHelpers::JumpList fallThrough;
493
494 // Cascade through the list, preferring newer entries.
495 for (unsigned i = cases.size(); i--;) {
496 fallThrough.link(&jit);
497 fallThrough.clear();
498 cases[i]->generateWithGuard(state, fallThrough);
499 }
500 state.failAndRepatch.append(fallThrough);
501 } else {
502 jit.load32(
503 CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
504 state.scratchGPR);
505
506 Vector<int64_t> caseValues(cases.size());
507 for (unsigned i = 0; i < cases.size(); ++i)
508 caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
509
510 BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
511 while (binarySwitch.advance(jit))
512 cases[binarySwitch.caseIndex()]->generate(state);
513 state.failAndRepatch.append(binarySwitch.fallThrough());
514 }
515
516 if (!state.failAndIgnore.empty()) {
517 state.failAndIgnore.link(&jit);
518
519 // Make sure that the inline cache optimization code knows that we are taking slow path because
520 // of something that isn't patchable. The slow path will decrement "countdown" and will only
521 // patch things if the countdown reaches zero. We increment the slow path count here to ensure
522 // that the slow path does not try to patch.
523#if CPU(X86) || CPU(X86_64)
524 jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
525 jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
526#else
527 jit.load8(&stubInfo.countdown, state.scratchGPR);
528 jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
529 jit.store8(state.scratchGPR, &stubInfo.countdown);
530#endif
531 }
532
533 CCallHelpers::JumpList failure;
534 if (allocator.didReuseRegisters()) {
535 state.failAndRepatch.link(&jit);
536 state.restoreScratch();
537 } else
538 failure = state.failAndRepatch;
539 failure.append(jit.jump());
540
541 CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
542 CallSiteIndex callSiteIndexForExceptionHandling;
543 if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
544 // Emit the exception handler.
545 // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
546 // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
547 // their own exception handling logic that doesn't go through genericUnwind.
548 MacroAssembler::Label makeshiftCatchHandler = jit.label();
549
550 int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
551 AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter();
552 ASSERT(!spillStateForJSGetterSetter.isEmpty());
553 stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
554 stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation;
555
556 jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
557 jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
558
559 state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter);
560 state.restoreScratch();
561 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
562
563 HandlerInfo oldHandler = state.originalExceptionHandler();
564 CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
565 jit.addLinkTask(
566 [=] (LinkBuffer& linkBuffer) {
567 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
568
569 HandlerInfo handlerToRegister = oldHandler;
570 handlerToRegister.nativeCode = linkBuffer.locationOf<ExceptionHandlerPtrTag>(makeshiftCatchHandler);
571 handlerToRegister.start = newExceptionHandlingCallSite.bits();
572 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
573 codeBlock->appendExceptionHandler(handlerToRegister);
574 });
575
576 // We set these to indicate to the stub to remove itself from the CodeBlock's
577 // exception handler table when it is deallocated.
578 codeBlockThatOwnsExceptionHandlers = codeBlock;
579 ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
580 callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
581 }
582
583 LinkBuffer linkBuffer(jit, codeBlock, JITCompilationCanFail);
584 if (linkBuffer.didFailToAllocate()) {
585 if (PolymorphicAccessInternal::verbose)
586 dataLog("Did fail to allocate.\n");
587 return AccessGenerationResult::GaveUp;
588 }
589
590 CodeLocationLabel<JSInternalPtrTag> successLabel = stubInfo.doneLocation();
591
592 linkBuffer.link(state.success, successLabel);
593
594 linkBuffer.link(failure, stubInfo.slowPathStartLocation());
595
596 if (PolymorphicAccessInternal::verbose)
597 dataLog(FullCodeOrigin(codeBlock, stubInfo.codeOrigin), ": Generating polymorphic access stub for ", listDump(cases), "\n");
598
599 MacroAssemblerCodeRef<JITStubRoutinePtrTag> code = FINALIZE_CODE_FOR(
600 codeBlock, linkBuffer, JITStubRoutinePtrTag,
601 "%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data());
602
603 bool doesCalls = false;
604 Vector<JSCell*> cellsToMark;
605 for (auto& entry : cases)
606 doesCalls |= entry->doesCalls(&cellsToMark);
607
608 m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
609 m_watchpoints = WTFMove(state.watchpoints);
610 if (!state.weakReferences.isEmpty())
611 m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
612 if (PolymorphicAccessInternal::verbose)
613 dataLog("Returning: ", code.code(), "\n");
614
615 m_list = WTFMove(cases);
616
617 AccessGenerationResult::Kind resultKind;
618 if (m_list.size() >= Options::maxAccessVariantListSize() || generatedFinalCode)
619 resultKind = AccessGenerationResult::GeneratedFinalCode;
620 else
621 resultKind = AccessGenerationResult::GeneratedNewCode;
622
623 return AccessGenerationResult(resultKind, code.code());
624}
625
626void PolymorphicAccess::aboutToDie()
627{
628 if (m_stubRoutine)
629 m_stubRoutine->aboutToDie();
630}
631
632} // namespace JSC
633
634namespace WTF {
635
636using namespace JSC;
637
638void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
639{
640 switch (kind) {
641 case AccessGenerationResult::MadeNoChanges:
642 out.print("MadeNoChanges");
643 return;
644 case AccessGenerationResult::GaveUp:
645 out.print("GaveUp");
646 return;
647 case AccessGenerationResult::Buffered:
648 out.print("Buffered");
649 return;
650 case AccessGenerationResult::GeneratedNewCode:
651 out.print("GeneratedNewCode");
652 return;
653 case AccessGenerationResult::GeneratedFinalCode:
654 out.print("GeneratedFinalCode");
655 return;
656 case AccessGenerationResult::ResetStubAndFireWatchpoints:
657 out.print("ResetStubAndFireWatchpoints");
658 return;
659 }
660
661 RELEASE_ASSERT_NOT_REACHED();
662}
663
664void printInternal(PrintStream& out, AccessCase::AccessType type)
665{
666 switch (type) {
667 case AccessCase::Load:
668 out.print("Load");
669 return;
670 case AccessCase::Transition:
671 out.print("Transition");
672 return;
673 case AccessCase::Replace:
674 out.print("Replace");
675 return;
676 case AccessCase::Miss:
677 out.print("Miss");
678 return;
679 case AccessCase::GetGetter:
680 out.print("GetGetter");
681 return;
682 case AccessCase::Getter:
683 out.print("Getter");
684 return;
685 case AccessCase::Setter:
686 out.print("Setter");
687 return;
688 case AccessCase::CustomValueGetter:
689 out.print("CustomValueGetter");
690 return;
691 case AccessCase::CustomAccessorGetter:
692 out.print("CustomAccessorGetter");
693 return;
694 case AccessCase::CustomValueSetter:
695 out.print("CustomValueSetter");
696 return;
697 case AccessCase::CustomAccessorSetter:
698 out.print("CustomAccessorSetter");
699 return;
700 case AccessCase::IntrinsicGetter:
701 out.print("IntrinsicGetter");
702 return;
703 case AccessCase::InHit:
704 out.print("InHit");
705 return;
706 case AccessCase::InMiss:
707 out.print("InMiss");
708 return;
709 case AccessCase::ArrayLength:
710 out.print("ArrayLength");
711 return;
712 case AccessCase::StringLength:
713 out.print("StringLength");
714 return;
715 case AccessCase::DirectArgumentsLength:
716 out.print("DirectArgumentsLength");
717 return;
718 case AccessCase::ScopedArgumentsLength:
719 out.print("ScopedArgumentsLength");
720 return;
721 case AccessCase::ModuleNamespaceLoad:
722 out.print("ModuleNamespaceLoad");
723 return;
724 case AccessCase::InstanceOfHit:
725 out.print("InstanceOfHit");
726 return;
727 case AccessCase::InstanceOfMiss:
728 out.print("InstanceOfMiss");
729 return;
730 case AccessCase::InstanceOfGeneric:
731 out.print("InstanceOfGeneric");
732 return;
733 }
734
735 RELEASE_ASSERT_NOT_REACHED();
736}
737
738void printInternal(PrintStream& out, AccessCase::State state)
739{
740 switch (state) {
741 case AccessCase::Primordial:
742 out.print("Primordial");
743 return;
744 case AccessCase::Committed:
745 out.print("Committed");
746 return;
747 case AccessCase::Generated:
748 out.print("Generated");
749 return;
750 }
751
752 RELEASE_ASSERT_NOT_REACHED();
753}
754
755} // namespace WTF
756
757#endif // ENABLE(JIT)
758
759
760