1/*
2 * Copyright (C) 2014-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "PolymorphicAccess.h"
28
29#if ENABLE(JIT)
30
31#include "BinarySwitch.h"
32#include "CCallHelpers.h"
33#include "CodeBlock.h"
34#include "FullCodeOrigin.h"
35#include "Heap.h"
36#include "JITOperations.h"
37#include "JSCInlines.h"
38#include "LinkBuffer.h"
39#include "StructureStubClearingWatchpoint.h"
40#include "StructureStubInfo.h"
41#include "SuperSampler.h"
42#include <wtf/CommaPrinter.h>
43#include <wtf/ListDump.h>
44
45namespace JSC {
46
47namespace PolymorphicAccessInternal {
48static constexpr bool verbose = false;
49}
50
51void AccessGenerationResult::dump(PrintStream& out) const
52{
53 out.print(m_kind);
54 if (m_code)
55 out.print(":", m_code);
56}
57
58void AccessGenerationState::installWatchpoint(const ObjectPropertyCondition& condition)
59{
60 WatchpointsOnStructureStubInfo::ensureReferenceAndInstallWatchpoint(
61 watchpoints, jit->codeBlock(), stubInfo, condition);
62}
63
64void AccessGenerationState::restoreScratch()
65{
66 allocator->restoreReusedRegistersByPopping(*jit, preservedReusedRegisterState);
67}
68
69void AccessGenerationState::succeed()
70{
71 restoreScratch();
72 success.append(jit->jump());
73}
74
75const RegisterSet& AccessGenerationState::liveRegistersForCall()
76{
77 if (!m_calculatedRegistersForCallAndExceptionHandling)
78 calculateLiveRegistersForCallAndExceptionHandling();
79 return m_liveRegistersForCall;
80}
81
82const RegisterSet& AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite()
83{
84 if (!m_calculatedRegistersForCallAndExceptionHandling)
85 calculateLiveRegistersForCallAndExceptionHandling();
86 return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
87}
88
89static RegisterSet calleeSaveRegisters()
90{
91 RegisterSet result = RegisterSet::registersToNotSaveForJSCall();
92 result.filter(RegisterSet::registersToNotSaveForCCall());
93 return result;
94}
95
96const RegisterSet& AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling()
97{
98 if (!m_calculatedRegistersForCallAndExceptionHandling) {
99 m_calculatedRegistersForCallAndExceptionHandling = true;
100
101 m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
102 m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
103 if (m_needsToRestoreRegistersIfException)
104 RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
105
106 m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
107 m_liveRegistersForCall.exclude(calleeSaveRegisters());
108 }
109 return m_liveRegistersForCall;
110}
111
112auto AccessGenerationState::preserveLiveRegistersToStackForCall(const RegisterSet& extra) -> SpillState
113{
114 RegisterSet liveRegisters = liveRegistersForCall();
115 liveRegisters.merge(extra);
116
117 unsigned extraStackPadding = 0;
118 unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegisters, extraStackPadding);
119 return SpillState {
120 WTFMove(liveRegisters),
121 numberOfStackBytesUsedForRegisterPreservation
122 };
123}
124
125void AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException(const SpillState& spillState)
126{
127 // Even if we're a getter, we don't want to ignore the result value like we normally do
128 // because the getter threw, and therefore, didn't return a value that means anything.
129 // Instead, we want to restore that register to what it was upon entering the getter
130 // inline cache. The subtlety here is if the base and the result are the same register,
131 // and the getter threw, we want OSR exit to see the original base value, not the result
132 // of the getter call.
133 RegisterSet dontRestore = spillState.spilledRegisters;
134 // As an optimization here, we only need to restore what is live for exception handling.
135 // We can construct the dontRestore set to accomplish this goal by having it contain only
136 // what is live for call but not live for exception handling. By ignoring things that are
137 // only live at the call but not the exception handler, we will only restore things live
138 // at the exception handler.
139 dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
140 restoreLiveRegistersFromStackForCall(spillState, dontRestore);
141}
142
143void AccessGenerationState::restoreLiveRegistersFromStackForCall(const SpillState& spillState, const RegisterSet& dontRestore)
144{
145 unsigned extraStackPadding = 0;
146 ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, spillState.spilledRegisters, dontRestore, spillState.numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
147}
148
149CallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal()
150{
151 if (!m_calculatedRegistersForCallAndExceptionHandling)
152 calculateLiveRegistersForCallAndExceptionHandling();
153
154 if (!m_calculatedCallSiteIndex) {
155 m_calculatedCallSiteIndex = true;
156
157 if (m_needsToRestoreRegistersIfException)
158 m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
159 else
160 m_callSiteIndex = originalCallSiteIndex();
161 }
162
163 return m_callSiteIndex;
164}
165
166DisposableCallSiteIndex AccessGenerationState::callSiteIndexForExceptionHandling()
167{
168 RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
169 RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
170 RELEASE_ASSERT(m_calculatedCallSiteIndex);
171 return DisposableCallSiteIndex::fromCallSiteIndex(m_callSiteIndex);
172}
173
174const HandlerInfo& AccessGenerationState::originalExceptionHandler()
175{
176 if (!m_calculatedRegistersForCallAndExceptionHandling)
177 calculateLiveRegistersForCallAndExceptionHandling();
178
179 RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
180 HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
181 RELEASE_ASSERT(exceptionHandler);
182 return *exceptionHandler;
183}
184
185CallSiteIndex AccessGenerationState::originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
186
187void AccessGenerationState::emitExplicitExceptionHandler()
188{
189 restoreScratch();
190 jit->pushToSave(GPRInfo::regT0);
191 jit->loadPtr(&m_vm.topEntryFrame, GPRInfo::regT0);
192 jit->copyCalleeSavesToEntryFrameCalleeSavesBuffer(GPRInfo::regT0);
193 jit->popToRestore(GPRInfo::regT0);
194
195 if (needsToRestoreRegistersIfException()) {
196 // To the JIT that produces the original exception handling
197 // call site, they will expect the OSR exit to be arrived
198 // at from genericUnwind. Therefore we must model what genericUnwind
199 // does here. I.e, set callFrameForCatch and copy callee saves.
200
201 jit->storePtr(GPRInfo::callFrameRegister, m_vm.addressOfCallFrameForCatch());
202 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit->jump();
203
204 // We don't need to insert a new exception handler in the table
205 // because we're doing a manual exception check here. i.e, we'll
206 // never arrive here from genericUnwind().
207 HandlerInfo originalHandler = originalExceptionHandler();
208 jit->addLinkTask(
209 [=] (LinkBuffer& linkBuffer) {
210 linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
211 });
212 } else {
213 jit->setupArguments<decltype(operationLookupExceptionHandler)>(CCallHelpers::TrustedImmPtr(&m_vm));
214 jit->prepareCallOperation(m_vm);
215 CCallHelpers::Call lookupExceptionHandlerCall = jit->call(OperationPtrTag);
216 jit->addLinkTask(
217 [=] (LinkBuffer& linkBuffer) {
218 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(operationLookupExceptionHandler));
219 });
220 jit->jumpToExceptionHandler(m_vm);
221 }
222}
223
224
225PolymorphicAccess::PolymorphicAccess() { }
226PolymorphicAccess::~PolymorphicAccess() { }
227
228AccessGenerationResult PolymorphicAccess::addCases(
229 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
230 Vector<std::unique_ptr<AccessCase>, 2> originalCasesToAdd)
231{
232 SuperSamplerScope superSamplerScope(false);
233
234 // This method will add the originalCasesToAdd to the list one at a time while preserving the
235 // invariants:
236 // - If a newly added case canReplace() any existing case, then the existing case is removed before
237 // the new case is added. Removal doesn't change order of the list. Any number of existing cases
238 // can be removed via the canReplace() rule.
239 // - Cases in the list always appear in ascending order of time of addition. Therefore, if you
240 // cascade through the cases in reverse order, you will get the most recent cases first.
241 // - If this method fails (returns null, doesn't add the cases), then both the previous case list
242 // and the previous stub are kept intact and the new cases are destroyed. It's OK to attempt to
243 // add more things after failure.
244
245 // First ensure that the originalCasesToAdd doesn't contain duplicates.
246 Vector<std::unique_ptr<AccessCase>> casesToAdd;
247 for (unsigned i = 0; i < originalCasesToAdd.size(); ++i) {
248 std::unique_ptr<AccessCase> myCase = WTFMove(originalCasesToAdd[i]);
249
250 // Add it only if it is not replaced by the subsequent cases in the list.
251 bool found = false;
252 for (unsigned j = i + 1; j < originalCasesToAdd.size(); ++j) {
253 if (originalCasesToAdd[j]->canReplace(*myCase)) {
254 found = true;
255 break;
256 }
257 }
258
259 if (found)
260 continue;
261
262 casesToAdd.append(WTFMove(myCase));
263 }
264
265 if (PolymorphicAccessInternal::verbose)
266 dataLog("casesToAdd: ", listDump(casesToAdd), "\n");
267
268 // If there aren't any cases to add, then fail on the grounds that there's no point to generating a
269 // new stub that will be identical to the old one. Returning null should tell the caller to just
270 // keep doing what they were doing before.
271 if (casesToAdd.isEmpty())
272 return AccessGenerationResult::MadeNoChanges;
273
274 if (stubInfo.accessType != AccessType::InstanceOf) {
275 bool shouldReset = false;
276 AccessGenerationResult resetResult(AccessGenerationResult::ResetStubAndFireWatchpoints);
277 auto considerPolyProtoReset = [&] (Structure* a, Structure* b) {
278 if (Structure::shouldConvertToPolyProto(a, b)) {
279 // For now, we only reset if this is our first time invalidating this watchpoint.
280 // The reason we don't immediately fire this watchpoint is that we may be already
281 // watching the poly proto watchpoint, which if fired, would destroy us. We let
282 // the person handling the result to do a delayed fire.
283 ASSERT(a->rareData()->sharedPolyProtoWatchpoint().get() == b->rareData()->sharedPolyProtoWatchpoint().get());
284 if (a->rareData()->sharedPolyProtoWatchpoint()->isStillValid()) {
285 shouldReset = true;
286 resetResult.addWatchpointToFire(*a->rareData()->sharedPolyProtoWatchpoint(), StringFireDetail("Detected poly proto optimization opportunity."));
287 }
288 }
289 };
290
291 for (auto& caseToAdd : casesToAdd) {
292 for (auto& existingCase : m_list) {
293 Structure* a = caseToAdd->structure();
294 Structure* b = existingCase->structure();
295 considerPolyProtoReset(a, b);
296 }
297 }
298 for (unsigned i = 0; i < casesToAdd.size(); ++i) {
299 for (unsigned j = i + 1; j < casesToAdd.size(); ++j) {
300 Structure* a = casesToAdd[i]->structure();
301 Structure* b = casesToAdd[j]->structure();
302 considerPolyProtoReset(a, b);
303 }
304 }
305
306 if (shouldReset)
307 return resetResult;
308 }
309
310 // Now add things to the new list. Note that at this point, we will still have old cases that
311 // may be replaced by the new ones. That's fine. We will sort that out when we regenerate.
312 for (auto& caseToAdd : casesToAdd) {
313 commit(locker, vm, m_watchpoints, codeBlock, stubInfo, *caseToAdd);
314 m_list.append(WTFMove(caseToAdd));
315 }
316
317 if (PolymorphicAccessInternal::verbose)
318 dataLog("After addCases: m_list: ", listDump(m_list), "\n");
319
320 return AccessGenerationResult::Buffered;
321}
322
323AccessGenerationResult PolymorphicAccess::addCase(
324 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, std::unique_ptr<AccessCase> newAccess)
325{
326 Vector<std::unique_ptr<AccessCase>, 2> newAccesses;
327 newAccesses.append(WTFMove(newAccess));
328 return addCases(locker, vm, codeBlock, stubInfo, WTFMove(newAccesses));
329}
330
331bool PolymorphicAccess::visitWeak(VM& vm) const
332{
333 for (unsigned i = 0; i < size(); ++i) {
334 if (!at(i).visitWeak(vm))
335 return false;
336 }
337 if (Vector<WriteBarrier<JSCell>>* weakReferences = m_weakReferences.get()) {
338 for (WriteBarrier<JSCell>& weakReference : *weakReferences) {
339 if (!vm.heap.isMarked(weakReference.get()))
340 return false;
341 }
342 }
343 return true;
344}
345
346bool PolymorphicAccess::propagateTransitions(SlotVisitor& visitor) const
347{
348 bool result = true;
349 for (unsigned i = 0; i < size(); ++i)
350 result &= at(i).propagateTransitions(visitor);
351 return result;
352}
353
354void PolymorphicAccess::dump(PrintStream& out) const
355{
356 out.print(RawPointer(this), ":[");
357 CommaPrinter comma;
358 for (auto& entry : m_list)
359 out.print(comma, *entry);
360 out.print("]");
361}
362
363void PolymorphicAccess::commit(
364 const GCSafeConcurrentJSLocker&, VM& vm, std::unique_ptr<WatchpointsOnStructureStubInfo>& watchpoints, CodeBlock* codeBlock,
365 StructureStubInfo& stubInfo, AccessCase& accessCase)
366{
367 // NOTE: We currently assume that this is relatively rare. It mainly arises for accesses to
368 // properties on DOM nodes. For sure we cache many DOM node accesses, but even in
369 // Real Pages (TM), we appear to spend most of our time caching accesses to properties on
370 // vanilla objects or exotic objects from within JSC (like Arguments, those are super popular).
371 // Those common kinds of JSC object accesses don't hit this case.
372
373 for (WatchpointSet* set : accessCase.commit(vm)) {
374 Watchpoint* watchpoint =
375 WatchpointsOnStructureStubInfo::ensureReferenceAndAddWatchpoint(
376 watchpoints, codeBlock, &stubInfo);
377
378 set->add(watchpoint);
379 }
380}
381
382AccessGenerationResult PolymorphicAccess::regenerate(
383 const GCSafeConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo)
384{
385 SuperSamplerScope superSamplerScope(false);
386
387 if (PolymorphicAccessInternal::verbose)
388 dataLog("Regenerate with m_list: ", listDump(m_list), "\n");
389
390 AccessGenerationState state(vm, codeBlock->globalObject());
391
392 state.access = this;
393 state.stubInfo = &stubInfo;
394
395 state.baseGPR = stubInfo.baseGPR();
396 state.u.thisGPR = stubInfo.patch.u.thisGPR;
397 state.valueRegs = stubInfo.valueRegs();
398
399 // Regenerating is our opportunity to figure out what our list of cases should look like. We
400 // do this here. The newly produced 'cases' list may be smaller than m_list. We don't edit
401 // m_list in-place because we may still fail, in which case we want the PolymorphicAccess object
402 // to be unmutated. For sure, we want it to hang onto any data structures that may be referenced
403 // from the code of the current stub (aka previous).
404 ListType cases;
405 unsigned srcIndex = 0;
406 unsigned dstIndex = 0;
407 while (srcIndex < m_list.size()) {
408 std::unique_ptr<AccessCase> someCase = WTFMove(m_list[srcIndex++]);
409
410 // If the case had been generated, then we have to keep the original in m_list in case we
411 // fail to regenerate. That case may have data structures that are used by the code that it
412 // had generated. If the case had not been generated, then we want to remove it from m_list.
413 bool isGenerated = someCase->state() == AccessCase::Generated;
414
415 [&] () {
416 if (!someCase->couldStillSucceed())
417 return;
418
419 // Figure out if this is replaced by any later case. Given two cases A and B where A
420 // comes first in the case list, we know that A would have triggered first if we had
421 // generated the cases in a cascade. That's why this loop asks B->canReplace(A) but not
422 // A->canReplace(B). If A->canReplace(B) was true then A would never have requested
423 // repatching in cases where Repatch.cpp would have then gone on to generate B. If that
424 // did happen by some fluke, then we'd just miss the redundancy here, which wouldn't be
425 // incorrect - just slow. However, if A's checks failed and Repatch.cpp concluded that
426 // this new condition could be handled by B and B->canReplace(A), then this says that we
427 // don't need A anymore.
428 //
429 // If we can generate a binary switch, then A->canReplace(B) == B->canReplace(A). So,
430 // it doesn't matter that we only do the check in one direction.
431 for (unsigned j = srcIndex; j < m_list.size(); ++j) {
432 if (m_list[j]->canReplace(*someCase))
433 return;
434 }
435
436 if (isGenerated)
437 cases.append(someCase->clone());
438 else
439 cases.append(WTFMove(someCase));
440 }();
441
442 if (isGenerated)
443 m_list[dstIndex++] = WTFMove(someCase);
444 }
445 m_list.resize(dstIndex);
446
447 ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
448 state.allocator = &allocator;
449 allocator.lock(state.baseGPR);
450 if (state.u.thisGPR != InvalidGPRReg)
451 allocator.lock(state.u.thisGPR);
452 allocator.lock(state.valueRegs);
453#if USE(JSVALUE32_64)
454 allocator.lock(stubInfo.patch.baseTagGPR);
455#endif
456
457 state.scratchGPR = allocator.allocateScratchGPR();
458
459 for (auto& accessCase : cases) {
460 if (accessCase->needsScratchFPR()) {
461 state.scratchFPR = allocator.allocateScratchFPR();
462 break;
463 }
464 }
465
466 CCallHelpers jit(codeBlock);
467 state.jit = &jit;
468
469 state.preservedReusedRegisterState =
470 allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
471
472
473 bool generatedFinalCode = false;
474
475 // If the resulting set of cases is so big that we would stop caching and this is InstanceOf,
476 // then we want to generate the generic InstanceOf and then stop.
477 if (cases.size() >= Options::maxAccessVariantListSize()
478 && stubInfo.accessType == AccessType::InstanceOf) {
479 while (!cases.isEmpty())
480 m_list.append(cases.takeLast());
481 cases.append(AccessCase::create(vm, codeBlock, AccessCase::InstanceOfGeneric, Identifier()));
482 generatedFinalCode = true;
483 }
484
485 if (PolymorphicAccessInternal::verbose)
486 dataLog("Optimized cases: ", listDump(cases), "\n");
487
488 // At this point we're convinced that 'cases' contains the cases that we want to JIT now and we
489 // won't change that set anymore.
490
491 bool allGuardedByStructureCheck = true;
492 bool hasJSGetterSetterCall = false;
493 bool needsInt32PropertyCheck = false;
494 bool needsStringPropertyCheck = false;
495 bool needsSymbolPropertyCheck = false;
496 for (auto& newCase : cases) {
497 if (!stubInfo.hasConstantIdentifier) {
498 if (newCase->requiresIdentifierNameMatch()) {
499 if (newCase->uid()->isSymbol())
500 needsSymbolPropertyCheck = true;
501 else
502 needsStringPropertyCheck = true;
503 } else if (newCase->requiresInt32PropertyCheck())
504 needsInt32PropertyCheck = true;
505 }
506 commit(locker, vm, state.watchpoints, codeBlock, stubInfo, *newCase);
507 allGuardedByStructureCheck &= newCase->guardedByStructureCheck(stubInfo);
508 if (newCase->type() == AccessCase::Getter || newCase->type() == AccessCase::Setter)
509 hasJSGetterSetterCall = true;
510 }
511
512 if (cases.isEmpty()) {
513 // This is super unlikely, but we make it legal anyway.
514 state.failAndRepatch.append(jit.jump());
515 } else if (!allGuardedByStructureCheck || cases.size() == 1) {
516 // If there are any proxies in the list, we cannot just use a binary switch over the structure.
517 // We need to resort to a cascade. A cascade also happens to be optimal if we only have just
518 // one case.
519 CCallHelpers::JumpList fallThrough;
520 if (needsInt32PropertyCheck || needsStringPropertyCheck || needsSymbolPropertyCheck) {
521 if (needsInt32PropertyCheck) {
522 CCallHelpers::Jump notInt32;
523
524 if (!stubInfo.propertyIsInt32)
525 notInt32 = jit.branchIfNotInt32(state.u.propertyGPR);
526 for (unsigned i = cases.size(); i--;) {
527 fallThrough.link(&jit);
528 fallThrough.clear();
529 if (cases[i]->requiresInt32PropertyCheck())
530 cases[i]->generateWithGuard(state, fallThrough);
531 }
532
533 if (needsStringPropertyCheck || needsSymbolPropertyCheck) {
534 if (notInt32.isSet())
535 notInt32.link(&jit);
536 fallThrough.link(&jit);
537 fallThrough.clear();
538 } else {
539 if (notInt32.isSet())
540 state.failAndRepatch.append(notInt32);
541 }
542 }
543
544 if (needsStringPropertyCheck) {
545 GPRReg propertyGPR = state.u.propertyGPR;
546 CCallHelpers::JumpList notString;
547 if (!stubInfo.propertyIsString) {
548 notString.append(jit.branchIfNotCell(propertyGPR));
549 notString.append(jit.branchIfNotString(propertyGPR));
550 }
551 jit.loadPtr(MacroAssembler::Address(propertyGPR, JSString::offsetOfValue()), state.scratchGPR);
552
553 state.failAndRepatch.append(jit.branchIfRopeStringImpl(state.scratchGPR));
554
555 for (unsigned i = cases.size(); i--;) {
556 fallThrough.link(&jit);
557 fallThrough.clear();
558 if (cases[i]->requiresIdentifierNameMatch() && !cases[i]->uid()->isSymbol())
559 cases[i]->generateWithGuard(state, fallThrough);
560 }
561
562 if (needsSymbolPropertyCheck) {
563 notString.link(&jit);
564 fallThrough.link(&jit);
565 fallThrough.clear();
566 } else
567 state.failAndRepatch.append(notString);
568 }
569
570 if (needsSymbolPropertyCheck) {
571 CCallHelpers::JumpList notSymbol;
572 if (!stubInfo.propertyIsSymbol) {
573 GPRReg propertyGPR = state.u.propertyGPR;
574 notSymbol.append(jit.branchIfNotCell(propertyGPR));
575 notSymbol.append(jit.branchIfNotSymbol(propertyGPR));
576 }
577
578 for (unsigned i = cases.size(); i--;) {
579 fallThrough.link(&jit);
580 fallThrough.clear();
581 if (cases[i]->requiresIdentifierNameMatch() && cases[i]->uid()->isSymbol())
582 cases[i]->generateWithGuard(state, fallThrough);
583 }
584
585 state.failAndRepatch.append(notSymbol);
586 }
587 } else {
588 // Cascade through the list, preferring newer entries.
589 for (unsigned i = cases.size(); i--;) {
590 fallThrough.link(&jit);
591 fallThrough.clear();
592 cases[i]->generateWithGuard(state, fallThrough);
593 }
594 }
595
596 state.failAndRepatch.append(fallThrough);
597
598 } else {
599 jit.load32(
600 CCallHelpers::Address(state.baseGPR, JSCell::structureIDOffset()),
601 state.scratchGPR);
602
603 Vector<int64_t> caseValues(cases.size());
604 for (unsigned i = 0; i < cases.size(); ++i)
605 caseValues[i] = bitwise_cast<int32_t>(cases[i]->structure()->id());
606
607 BinarySwitch binarySwitch(state.scratchGPR, caseValues, BinarySwitch::Int32);
608 while (binarySwitch.advance(jit))
609 cases[binarySwitch.caseIndex()]->generate(state);
610 state.failAndRepatch.append(binarySwitch.fallThrough());
611 }
612
613 if (!state.failAndIgnore.empty()) {
614 state.failAndIgnore.link(&jit);
615
616 // Make sure that the inline cache optimization code knows that we are taking slow path because
617 // of something that isn't patchable. The slow path will decrement "countdown" and will only
618 // patch things if the countdown reaches zero. We increment the slow path count here to ensure
619 // that the slow path does not try to patch.
620#if CPU(X86) || CPU(X86_64)
621 jit.move(CCallHelpers::TrustedImmPtr(&stubInfo.countdown), state.scratchGPR);
622 jit.add8(CCallHelpers::TrustedImm32(1), CCallHelpers::Address(state.scratchGPR));
623#else
624 jit.load8(&stubInfo.countdown, state.scratchGPR);
625 jit.add32(CCallHelpers::TrustedImm32(1), state.scratchGPR);
626 jit.store8(state.scratchGPR, &stubInfo.countdown);
627#endif
628 }
629
630 CCallHelpers::JumpList failure;
631 if (allocator.didReuseRegisters()) {
632 state.failAndRepatch.link(&jit);
633 state.restoreScratch();
634 } else
635 failure = state.failAndRepatch;
636 failure.append(jit.jump());
637
638 CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
639 DisposableCallSiteIndex callSiteIndexForExceptionHandling;
640 if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
641 // Emit the exception handler.
642 // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
643 // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
644 // their own exception handling logic that doesn't go through genericUnwind.
645 MacroAssembler::Label makeshiftCatchHandler = jit.label();
646
647 int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
648 AccessGenerationState::SpillState spillStateForJSGetterSetter = state.spillStateForJSGetterSetter();
649 ASSERT(!spillStateForJSGetterSetter.isEmpty());
650 stackPointerOffset -= state.preservedReusedRegisterState.numberOfBytesPreserved;
651 stackPointerOffset -= spillStateForJSGetterSetter.numberOfStackBytesUsedForRegisterPreservation;
652
653 jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
654 jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
655
656 state.restoreLiveRegistersFromStackForCallWithThrownException(spillStateForJSGetterSetter);
657 state.restoreScratch();
658 CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
659
660 HandlerInfo oldHandler = state.originalExceptionHandler();
661 DisposableCallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
662 jit.addLinkTask(
663 [=] (LinkBuffer& linkBuffer) {
664 linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
665
666 HandlerInfo handlerToRegister = oldHandler;
667 handlerToRegister.nativeCode = linkBuffer.locationOf<ExceptionHandlerPtrTag>(makeshiftCatchHandler);
668 handlerToRegister.start = newExceptionHandlingCallSite.bits();
669 handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
670 codeBlock->appendExceptionHandler(handlerToRegister);
671 });
672
673 // We set these to indicate to the stub to remove itself from the CodeBlock's
674 // exception handler table when it is deallocated.
675 codeBlockThatOwnsExceptionHandlers = codeBlock;
676 ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
677 callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
678 }
679
680 LinkBuffer linkBuffer(jit, codeBlock, JITCompilationCanFail);
681 if (linkBuffer.didFailToAllocate()) {
682 if (PolymorphicAccessInternal::verbose)
683 dataLog("Did fail to allocate.\n");
684 return AccessGenerationResult::GaveUp;
685 }
686
687 CodeLocationLabel<JSInternalPtrTag> successLabel = stubInfo.doneLocation();
688
689 linkBuffer.link(state.success, successLabel);
690
691 linkBuffer.link(failure, stubInfo.slowPathStartLocation());
692
693 if (PolymorphicAccessInternal::verbose)
694 dataLog(FullCodeOrigin(codeBlock, stubInfo.codeOrigin), ": Generating polymorphic access stub for ", listDump(cases), "\n");
695
696 MacroAssemblerCodeRef<JITStubRoutinePtrTag> code = FINALIZE_CODE_FOR(
697 codeBlock, linkBuffer, JITStubRoutinePtrTag,
698 "%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data());
699
700 bool doesCalls = false;
701 Vector<JSCell*> cellsToMark;
702 for (auto& entry : cases)
703 doesCalls |= entry->doesCalls(&cellsToMark);
704
705 m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, cellsToMark, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
706 m_watchpoints = WTFMove(state.watchpoints);
707 if (!state.weakReferences.isEmpty())
708 m_weakReferences = makeUnique<Vector<WriteBarrier<JSCell>>>(WTFMove(state.weakReferences));
709 if (PolymorphicAccessInternal::verbose)
710 dataLog("Returning: ", code.code(), "\n");
711
712 m_list = WTFMove(cases);
713
714 AccessGenerationResult::Kind resultKind;
715 if (m_list.size() >= Options::maxAccessVariantListSize() || generatedFinalCode)
716 resultKind = AccessGenerationResult::GeneratedFinalCode;
717 else
718 resultKind = AccessGenerationResult::GeneratedNewCode;
719
720 return AccessGenerationResult(resultKind, code.code());
721}
722
723void PolymorphicAccess::aboutToDie()
724{
725 if (m_stubRoutine)
726 m_stubRoutine->aboutToDie();
727}
728
729} // namespace JSC
730
731namespace WTF {
732
733using namespace JSC;
734
735void printInternal(PrintStream& out, AccessGenerationResult::Kind kind)
736{
737 switch (kind) {
738 case AccessGenerationResult::MadeNoChanges:
739 out.print("MadeNoChanges");
740 return;
741 case AccessGenerationResult::GaveUp:
742 out.print("GaveUp");
743 return;
744 case AccessGenerationResult::Buffered:
745 out.print("Buffered");
746 return;
747 case AccessGenerationResult::GeneratedNewCode:
748 out.print("GeneratedNewCode");
749 return;
750 case AccessGenerationResult::GeneratedFinalCode:
751 out.print("GeneratedFinalCode");
752 return;
753 case AccessGenerationResult::ResetStubAndFireWatchpoints:
754 out.print("ResetStubAndFireWatchpoints");
755 return;
756 }
757
758 RELEASE_ASSERT_NOT_REACHED();
759}
760
761void printInternal(PrintStream& out, AccessCase::AccessType type)
762{
763 switch (type) {
764 case AccessCase::Load:
765 out.print("Load");
766 return;
767 case AccessCase::Transition:
768 out.print("Transition");
769 return;
770 case AccessCase::Replace:
771 out.print("Replace");
772 return;
773 case AccessCase::Miss:
774 out.print("Miss");
775 return;
776 case AccessCase::GetGetter:
777 out.print("GetGetter");
778 return;
779 case AccessCase::Getter:
780 out.print("Getter");
781 return;
782 case AccessCase::Setter:
783 out.print("Setter");
784 return;
785 case AccessCase::CustomValueGetter:
786 out.print("CustomValueGetter");
787 return;
788 case AccessCase::CustomAccessorGetter:
789 out.print("CustomAccessorGetter");
790 return;
791 case AccessCase::CustomValueSetter:
792 out.print("CustomValueSetter");
793 return;
794 case AccessCase::CustomAccessorSetter:
795 out.print("CustomAccessorSetter");
796 return;
797 case AccessCase::IntrinsicGetter:
798 out.print("IntrinsicGetter");
799 return;
800 case AccessCase::InHit:
801 out.print("InHit");
802 return;
803 case AccessCase::InMiss:
804 out.print("InMiss");
805 return;
806 case AccessCase::ArrayLength:
807 out.print("ArrayLength");
808 return;
809 case AccessCase::StringLength:
810 out.print("StringLength");
811 return;
812 case AccessCase::DirectArgumentsLength:
813 out.print("DirectArgumentsLength");
814 return;
815 case AccessCase::ScopedArgumentsLength:
816 out.print("ScopedArgumentsLength");
817 return;
818 case AccessCase::ModuleNamespaceLoad:
819 out.print("ModuleNamespaceLoad");
820 return;
821 case AccessCase::InstanceOfHit:
822 out.print("InstanceOfHit");
823 return;
824 case AccessCase::InstanceOfMiss:
825 out.print("InstanceOfMiss");
826 return;
827 case AccessCase::InstanceOfGeneric:
828 out.print("InstanceOfGeneric");
829 return;
830 case AccessCase::IndexedInt32Load:
831 out.print("IndexedInt32Load");
832 return;
833 case AccessCase::IndexedDoubleLoad:
834 out.print("IndexedDoubleLoad");
835 return;
836 case AccessCase::IndexedContiguousLoad:
837 out.print("IndexedContiguousLoad");
838 return;
839 case AccessCase::IndexedArrayStorageLoad:
840 out.print("IndexedArrayStorageLoad");
841 return;
842 case AccessCase::IndexedScopedArgumentsLoad:
843 out.print("IndexedScopedArgumentsLoad");
844 return;
845 case AccessCase::IndexedDirectArgumentsLoad:
846 out.print("IndexedDirectArgumentsLoad");
847 return;
848 case AccessCase::IndexedTypedArrayInt8Load:
849 out.print("IndexedTypedArrayInt8Load");
850 return;
851 case AccessCase::IndexedTypedArrayUint8Load:
852 out.print("IndexedTypedArrayUint8Load");
853 return;
854 case AccessCase::IndexedTypedArrayUint8ClampedLoad:
855 out.print("IndexedTypedArrayUint8ClampedLoad");
856 return;
857 case AccessCase::IndexedTypedArrayInt16Load:
858 out.print("IndexedTypedArrayInt16Load");
859 return;
860 case AccessCase::IndexedTypedArrayUint16Load:
861 out.print("IndexedTypedArrayUint16Load");
862 return;
863 case AccessCase::IndexedTypedArrayInt32Load:
864 out.print("IndexedTypedArrayInt32Load");
865 return;
866 case AccessCase::IndexedTypedArrayUint32Load:
867 out.print("IndexedTypedArrayUint32Load");
868 return;
869 case AccessCase::IndexedTypedArrayFloat32Load:
870 out.print("IndexedTypedArrayFloat32Load");
871 return;
872 case AccessCase::IndexedTypedArrayFloat64Load:
873 out.print("IndexedTypedArrayFloat64Load");
874 return;
875 case AccessCase::IndexedStringLoad:
876 out.print("IndexedStringLoad");
877 return;
878 }
879
880 RELEASE_ASSERT_NOT_REACHED();
881}
882
883void printInternal(PrintStream& out, AccessCase::State state)
884{
885 switch (state) {
886 case AccessCase::Primordial:
887 out.print("Primordial");
888 return;
889 case AccessCase::Committed:
890 out.print("Committed");
891 return;
892 case AccessCase::Generated:
893 out.print("Generated");
894 return;
895 }
896
897 RELEASE_ASSERT_NOT_REACHED();
898}
899
900} // namespace WTF
901
902#endif // ENABLE(JIT)
903
904
905