1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #include "ClassInfo.h" |
29 | #include "ConcurrentJSLock.h" |
30 | #include "IndexingType.h" |
31 | #include "JSCJSValue.h" |
32 | #include "JSCast.h" |
33 | #include "JSType.h" |
34 | #include "PropertyName.h" |
35 | #include "PropertyNameArray.h" |
36 | #include "PropertyOffset.h" |
37 | #include "PutPropertySlot.h" |
38 | #include "StructureIDBlob.h" |
39 | #include "StructureRareData.h" |
40 | #include "StructureTransitionTable.h" |
41 | #include "JSTypeInfo.h" |
42 | #include "Watchpoint.h" |
43 | #include "WriteBarrierInlines.h" |
44 | #include <wtf/PrintStream.h> |
45 | |
46 | namespace WTF { |
47 | |
48 | class UniquedStringImpl; |
49 | |
50 | } // namespace WTF |
51 | |
52 | namespace JSC { |
53 | |
54 | class DeferGC; |
55 | class ; |
56 | class PropertyNameArray; |
57 | class PropertyNameArrayData; |
58 | class PropertyTable; |
59 | class StructureChain; |
60 | class StructureShape; |
61 | class SlotVisitor; |
62 | class JSString; |
63 | struct DumpContext; |
64 | struct HashTable; |
65 | struct HashTableValue; |
66 | |
67 | // The out-of-line property storage capacity to use when first allocating out-of-line |
68 | // storage. Note that all objects start out without having any out-of-line storage; |
69 | // this comes into play only on the first property store that exhausts inline storage. |
70 | static constexpr unsigned initialOutOfLineCapacity = 4; |
71 | |
72 | // The factor by which to grow out-of-line storage when it is exhausted, after the |
73 | // initial allocation. |
74 | static constexpr unsigned outOfLineGrowthFactor = 2; |
75 | |
76 | struct PropertyMapEntry { |
77 | UniquedStringImpl* key; |
78 | PropertyOffset offset; |
79 | uint8_t attributes; |
80 | |
81 | PropertyMapEntry() |
82 | : key(nullptr) |
83 | , offset(invalidOffset) |
84 | , attributes(0) |
85 | { |
86 | } |
87 | |
88 | PropertyMapEntry(UniquedStringImpl* key, PropertyOffset offset, unsigned attributes) |
89 | : key(key) |
90 | , offset(offset) |
91 | , attributes(attributes) |
92 | { |
93 | ASSERT(this->attributes == attributes); |
94 | } |
95 | }; |
96 | |
97 | class StructureFireDetail : public FireDetail { |
98 | public: |
99 | StructureFireDetail(const Structure* structure) |
100 | : m_structure(structure) |
101 | { |
102 | } |
103 | |
104 | void dump(PrintStream& out) const override; |
105 | |
106 | private: |
107 | const Structure* m_structure; |
108 | }; |
109 | |
110 | class DeferredStructureTransitionWatchpointFire : public DeferredWatchpointFire { |
111 | WTF_MAKE_NONCOPYABLE(DeferredStructureTransitionWatchpointFire); |
112 | public: |
113 | JS_EXPORT_PRIVATE DeferredStructureTransitionWatchpointFire(VM&, Structure*); |
114 | JS_EXPORT_PRIVATE ~DeferredStructureTransitionWatchpointFire(); |
115 | |
116 | void dump(PrintStream& out) const override; |
117 | |
118 | const Structure* structure() const { return m_structure; } |
119 | |
120 | private: |
121 | const Structure* m_structure; |
122 | }; |
123 | |
124 | class Structure final : public JSCell { |
125 | public: |
126 | friend class StructureTransitionTable; |
127 | |
128 | typedef JSCell Base; |
129 | static constexpr unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; |
130 | |
131 | enum PolyProtoTag { PolyProto }; |
132 | static Structure* create(VM&, JSGlobalObject*, JSValue prototype, const TypeInfo&, const ClassInfo*, IndexingType = NonArray, unsigned inlineCapacity = 0); |
133 | static Structure* create(PolyProtoTag, VM&, JSGlobalObject*, JSObject* prototype, const TypeInfo&, const ClassInfo*, IndexingType = NonArray, unsigned inlineCapacity = 0); |
134 | |
135 | ~Structure(); |
136 | |
137 | template<typename CellType, SubspaceAccess> |
138 | static IsoSubspace* subspaceFor(VM& vm) |
139 | { |
140 | return &vm.structureSpace; |
141 | } |
142 | |
143 | JS_EXPORT_PRIVATE static bool isValidPrototype(JSValue); |
144 | |
145 | protected: |
146 | void finishCreation(VM& vm) |
147 | { |
148 | Base::finishCreation(vm); |
149 | ASSERT(m_prototype.get().isEmpty() || isValidPrototype(m_prototype.get())); |
150 | } |
151 | |
152 | void finishCreation(VM& vm, const Structure* previous) |
153 | { |
154 | this->finishCreation(vm); |
155 | if (previous->hasRareData()) { |
156 | const StructureRareData* previousRareData = previous->rareData(); |
157 | if (previousRareData->hasSharedPolyProtoWatchpoint()) { |
158 | ensureRareData(vm); |
159 | rareData()->setSharedPolyProtoWatchpoint(previousRareData->copySharedPolyProtoWatchpoint()); |
160 | } |
161 | } |
162 | } |
163 | |
164 | void finishCreation(VM& vm, CreatingEarlyCellTag) |
165 | { |
166 | Base::finishCreation(vm, this, CreatingEarlyCell); |
167 | ASSERT(m_prototype); |
168 | ASSERT(m_prototype.isNull()); |
169 | ASSERT(!vm.structureStructure); |
170 | } |
171 | |
172 | public: |
173 | StructureID id() const { return m_blob.structureID(); } |
174 | int32_t objectInitializationBlob() const { return m_blob.blobExcludingStructureID(); } |
175 | int64_t idBlob() const { return m_blob.blob(); } |
176 | |
177 | bool isProxy() const |
178 | { |
179 | JSType type = m_blob.type(); |
180 | return type == ImpureProxyType || type == PureForwardingProxyType || type == ProxyObjectType; |
181 | } |
182 | |
183 | static void dumpStatistics(); |
184 | |
185 | JS_EXPORT_PRIVATE static Structure* addPropertyTransition(VM&, Structure*, PropertyName, unsigned attributes, PropertyOffset&); |
186 | JS_EXPORT_PRIVATE static Structure* addNewPropertyTransition(VM&, Structure*, PropertyName, unsigned attributes, PropertyOffset&, PutPropertySlot::Context = PutPropertySlot::UnknownContext, DeferredStructureTransitionWatchpointFire* = nullptr); |
187 | static Structure* addPropertyTransitionToExistingStructureConcurrently(Structure*, UniquedStringImpl* uid, unsigned attributes, PropertyOffset&); |
188 | JS_EXPORT_PRIVATE static Structure* addPropertyTransitionToExistingStructure(Structure*, PropertyName, unsigned attributes, PropertyOffset&); |
189 | static Structure* removePropertyTransition(VM&, Structure*, PropertyName, PropertyOffset&); |
190 | static Structure* changePrototypeTransition(VM&, Structure*, JSValue prototype, DeferredStructureTransitionWatchpointFire&); |
191 | JS_EXPORT_PRIVATE static Structure* attributeChangeTransition(VM&, Structure*, PropertyName, unsigned attributes); |
192 | JS_EXPORT_PRIVATE static Structure* toCacheableDictionaryTransition(VM&, Structure*, DeferredStructureTransitionWatchpointFire* = nullptr); |
193 | static Structure* toUncacheableDictionaryTransition(VM&, Structure*); |
194 | JS_EXPORT_PRIVATE static Structure* sealTransition(VM&, Structure*); |
195 | JS_EXPORT_PRIVATE static Structure* freezeTransition(VM&, Structure*); |
196 | static Structure* preventExtensionsTransition(VM&, Structure*); |
197 | static Structure* nonPropertyTransition(VM&, Structure*, NonPropertyTransition); |
198 | JS_EXPORT_PRIVATE static Structure* nonPropertyTransitionSlow(VM&, Structure*, NonPropertyTransition); |
199 | |
200 | JS_EXPORT_PRIVATE bool isSealed(VM&); |
201 | JS_EXPORT_PRIVATE bool isFrozen(VM&); |
202 | bool isStructureExtensible() const { return !didPreventExtensions(); } |
203 | |
204 | JS_EXPORT_PRIVATE Structure* flattenDictionaryStructure(VM&, JSObject*); |
205 | |
206 | static constexpr bool needsDestruction = true; |
207 | static void destroy(JSCell*); |
208 | |
209 | // Versions that take a func will call it after making the change but while still holding |
210 | // the lock. The callback is not called if there is no change being made, like if you call |
211 | // removePropertyWithoutTransition() and the property is not found. |
212 | template<typename Func> |
213 | PropertyOffset addPropertyWithoutTransition(VM&, PropertyName, unsigned attributes, const Func&); |
214 | template<typename Func> |
215 | PropertyOffset removePropertyWithoutTransition(VM&, PropertyName, const Func&); |
216 | void setPrototypeWithoutTransition(VM&, JSValue prototype); |
217 | |
218 | bool isDictionary() const { return dictionaryKind() != NoneDictionaryKind; } |
219 | bool isUncacheableDictionary() const { return dictionaryKind() == UncachedDictionaryKind; } |
220 | |
221 | bool prototypeQueriesAreCacheable() |
222 | { |
223 | return !typeInfo().prohibitsPropertyCaching(); |
224 | } |
225 | |
226 | bool propertyAccessesAreCacheable() |
227 | { |
228 | return dictionaryKind() != UncachedDictionaryKind |
229 | && prototypeQueriesAreCacheable() |
230 | && !(typeInfo().getOwnPropertySlotIsImpure() && !typeInfo().newImpurePropertyFiresWatchpoints()); |
231 | } |
232 | |
233 | bool propertyAccessesAreCacheableForAbsence() |
234 | { |
235 | return !typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence(); |
236 | } |
237 | |
238 | bool needImpurePropertyWatchpoint() |
239 | { |
240 | return propertyAccessesAreCacheable() |
241 | && typeInfo().getOwnPropertySlotIsImpure() |
242 | && typeInfo().newImpurePropertyFiresWatchpoints(); |
243 | } |
244 | |
245 | bool isImmutablePrototypeExoticObject() |
246 | { |
247 | return typeInfo().isImmutablePrototypeExoticObject(); |
248 | } |
249 | |
250 | // We use SlowPath in GetByStatus for structures that may get new impure properties later to prevent |
251 | // DFG from inlining property accesses since structures don't transition when a new impure property appears. |
252 | bool takesSlowPathInDFGForImpureProperty() |
253 | { |
254 | return typeInfo().getOwnPropertySlotIsImpure(); |
255 | } |
256 | |
257 | // Type accessors. |
258 | TypeInfo typeInfo() const { return m_blob.typeInfo(m_outOfLineTypeFlags); } |
259 | bool isObject() const { return typeInfo().isObject(); } |
260 | |
261 | IndexingType indexingType() const { return m_blob.indexingModeIncludingHistory() & AllWritableArrayTypes; } |
262 | IndexingType indexingMode() const { return m_blob.indexingModeIncludingHistory() & AllArrayTypes; } |
263 | IndexingType indexingModeIncludingHistory() const { return m_blob.indexingModeIncludingHistory(); } |
264 | |
265 | inline bool mayInterceptIndexedAccesses() const; |
266 | |
267 | bool holesMustForwardToPrototype(VM&, JSObject*) const; |
268 | |
269 | JSGlobalObject* globalObject() const { return m_globalObject.get(); } |
270 | |
271 | // NOTE: This method should only be called during the creation of structures, since the global |
272 | // object of a structure is presumed to be immutable in a bunch of places. |
273 | void setGlobalObject(VM&, JSGlobalObject*); |
274 | |
275 | ALWAYS_INLINE bool hasMonoProto() const |
276 | { |
277 | return !m_prototype.get().isEmpty(); |
278 | } |
279 | ALWAYS_INLINE bool hasPolyProto() const |
280 | { |
281 | return !hasMonoProto(); |
282 | } |
283 | ALWAYS_INLINE JSValue storedPrototype() const |
284 | { |
285 | ASSERT(hasMonoProto()); |
286 | return m_prototype.get(); |
287 | } |
288 | JSValue storedPrototype(const JSObject*) const; |
289 | JSObject* storedPrototypeObject(const JSObject*) const; |
290 | Structure* storedPrototypeStructure(const JSObject*) const; |
291 | |
292 | JSObject* storedPrototypeObject() const; |
293 | Structure* storedPrototypeStructure() const; |
294 | JSValue prototypeForLookup(JSGlobalObject*) const; |
295 | JSValue prototypeForLookup(JSGlobalObject*, JSCell* base) const; |
296 | StructureChain* prototypeChain(VM&, JSGlobalObject*, JSObject* base) const; |
297 | StructureChain* prototypeChain(JSGlobalObject*, JSObject* base) const; |
298 | static void visitChildren(JSCell*, SlotVisitor&); |
299 | |
300 | // A Structure is cheap to mark during GC if doing so would only add a small and bounded amount |
301 | // to our heap footprint. For example, if the structure refers to a global object that is not |
302 | // yet marked, then as far as we know, the decision to mark this Structure would lead to a large |
303 | // increase in footprint because no other object refers to that global object. This method |
304 | // returns true if all user-controlled (and hence unbounded in size) objects referenced from the |
305 | // Structure are already marked. |
306 | bool isCheapDuringGC(VM&); |
307 | |
308 | // Returns true if this structure is now marked. |
309 | bool markIfCheap(SlotVisitor&); |
310 | |
311 | bool hasRareData() const |
312 | { |
313 | return isRareData(m_previousOrRareData.get()); |
314 | } |
315 | |
316 | StructureRareData* rareData() |
317 | { |
318 | ASSERT(hasRareData()); |
319 | return static_cast<StructureRareData*>(m_previousOrRareData.get()); |
320 | } |
321 | |
322 | const StructureRareData* rareData() const |
323 | { |
324 | ASSERT(hasRareData()); |
325 | return static_cast<const StructureRareData*>(m_previousOrRareData.get()); |
326 | } |
327 | |
328 | const StructureRareData* rareDataConcurrently() const |
329 | { |
330 | JSCell* cell = m_previousOrRareData.get(); |
331 | if (isRareData(cell)) |
332 | return static_cast<StructureRareData*>(cell); |
333 | return nullptr; |
334 | } |
335 | |
336 | StructureRareData* ensureRareData(VM& vm) |
337 | { |
338 | if (!hasRareData()) |
339 | allocateRareData(vm); |
340 | return rareData(); |
341 | } |
342 | |
343 | Structure* previousID() const |
344 | { |
345 | ASSERT(structure()->classInfo() == info()); |
346 | // This is so written because it's used concurrently. We only load from m_previousOrRareData |
347 | // once, and this load is guaranteed atomic. |
348 | JSCell* cell = m_previousOrRareData.get(); |
349 | if (isRareData(cell)) |
350 | return static_cast<StructureRareData*>(cell)->previousID(); |
351 | return static_cast<Structure*>(cell); |
352 | } |
353 | bool transitivelyTransitionedFrom(Structure* structureToFind); |
354 | |
355 | PropertyOffset lastOffset() const { return m_offset; } |
356 | |
357 | void setLastOffset(PropertyOffset offset) { m_offset = offset; } |
358 | |
359 | static unsigned outOfLineCapacity(PropertyOffset lastOffset) |
360 | { |
361 | unsigned outOfLineSize = Structure::outOfLineSize(lastOffset); |
362 | |
363 | // This algorithm completely determines the out-of-line property storage growth algorithm. |
364 | // The JSObject code will only trigger a resize if the value returned by this algorithm |
365 | // changed between the new and old structure. So, it's important to keep this simple because |
366 | // it's on a fast path. |
367 | |
368 | if (!outOfLineSize) |
369 | return 0; |
370 | |
371 | if (outOfLineSize <= initialOutOfLineCapacity) |
372 | return initialOutOfLineCapacity; |
373 | |
374 | ASSERT(outOfLineSize > initialOutOfLineCapacity); |
375 | COMPILE_ASSERT(outOfLineGrowthFactor == 2, outOfLineGrowthFactor_is_two); |
376 | return WTF::roundUpToPowerOfTwo(outOfLineSize); |
377 | } |
378 | |
379 | static unsigned outOfLineSize(PropertyOffset lastOffset) |
380 | { |
381 | return numberOfOutOfLineSlotsForLastOffset(lastOffset); |
382 | } |
383 | |
384 | unsigned outOfLineCapacity() const |
385 | { |
386 | return outOfLineCapacity(m_offset); |
387 | } |
388 | unsigned outOfLineSize() const |
389 | { |
390 | return outOfLineSize(m_offset); |
391 | } |
392 | bool hasInlineStorage() const |
393 | { |
394 | return !!m_inlineCapacity; |
395 | } |
396 | unsigned inlineCapacity() const |
397 | { |
398 | return m_inlineCapacity; |
399 | } |
400 | unsigned inlineSize() const |
401 | { |
402 | return std::min<unsigned>(m_offset + 1, m_inlineCapacity); |
403 | } |
404 | unsigned totalStorageSize() const |
405 | { |
406 | return numberOfSlotsForLastOffset(m_offset, m_inlineCapacity); |
407 | } |
408 | unsigned totalStorageCapacity() const |
409 | { |
410 | ASSERT(structure()->classInfo() == info()); |
411 | return outOfLineCapacity() + inlineCapacity(); |
412 | } |
413 | |
414 | bool isValidOffset(PropertyOffset offset) const |
415 | { |
416 | return JSC::isValidOffset(offset) |
417 | && offset <= m_offset |
418 | && (offset < m_inlineCapacity || offset >= firstOutOfLineOffset); |
419 | } |
420 | |
421 | bool () const |
422 | { |
423 | return isTypedView(m_classInfo->typedArrayStorageType); |
424 | } |
425 | |
426 | bool () const |
427 | { |
428 | return hasIndexedProperties(indexingType()) |
429 | || hijacksIndexingHeader(); |
430 | } |
431 | |
432 | bool (const JSCell*) const; |
433 | |
434 | bool masqueradesAsUndefined(JSGlobalObject* lexicalGlobalObject); |
435 | |
436 | PropertyOffset get(VM&, PropertyName); |
437 | PropertyOffset get(VM&, PropertyName, unsigned& attributes); |
438 | |
439 | // This is a somewhat internalish method. It will call your functor while possibly holding the |
440 | // Structure's lock. There is no guarantee whether the lock is held or not in any particular |
441 | // call. So, you have to assume the worst. Also, the functor returns true if it wishes for you |
442 | // to continue or false if it's done. |
443 | template<typename Functor> |
444 | void forEachPropertyConcurrently(const Functor&); |
445 | |
446 | template<typename Functor> |
447 | void forEachProperty(VM&, const Functor&); |
448 | |
449 | PropertyOffset getConcurrently(UniquedStringImpl* uid); |
450 | PropertyOffset getConcurrently(UniquedStringImpl* uid, unsigned& attributes); |
451 | |
452 | Vector<PropertyMapEntry> getPropertiesConcurrently(); |
453 | |
454 | void setHasGetterSetterPropertiesWithProtoCheck(bool is__proto__) |
455 | { |
456 | setHasGetterSetterProperties(true); |
457 | if (!is__proto__) |
458 | setHasReadOnlyOrGetterSetterPropertiesExcludingProto(true); |
459 | } |
460 | |
461 | void setContainsReadOnlyProperties() { setHasReadOnlyOrGetterSetterPropertiesExcludingProto(true); } |
462 | |
463 | void setHasCustomGetterSetterPropertiesWithProtoCheck(bool is__proto__) |
464 | { |
465 | setHasCustomGetterSetterProperties(true); |
466 | if (!is__proto__) |
467 | setHasReadOnlyOrGetterSetterPropertiesExcludingProto(true); |
468 | } |
469 | |
470 | bool isEmpty() const |
471 | { |
472 | ASSERT(checkOffsetConsistency()); |
473 | return !JSC::isValidOffset(m_offset); |
474 | } |
475 | |
476 | void setCachedPropertyNameEnumerator(VM&, JSPropertyNameEnumerator*); |
477 | JSPropertyNameEnumerator* cachedPropertyNameEnumerator() const; |
478 | bool canCachePropertyNameEnumerator() const; |
479 | bool canAccessPropertiesQuicklyForEnumeration() const; |
480 | |
481 | void setCachedOwnKeys(VM&, JSImmutableButterfly*); |
482 | JSImmutableButterfly* cachedOwnKeys() const; |
483 | JSImmutableButterfly* cachedOwnKeysIgnoringSentinel() const; |
484 | bool canCacheOwnKeys() const; |
485 | |
486 | void getPropertyNamesFromStructure(VM&, PropertyNameArray&, EnumerationMode); |
487 | |
488 | JSString* objectToStringValue() |
489 | { |
490 | if (!hasRareData()) |
491 | return 0; |
492 | return rareData()->objectToStringValue(); |
493 | } |
494 | |
495 | void setObjectToStringValue(JSGlobalObject*, VM&, JSString* value, PropertySlot toStringTagSymbolSlot); |
496 | |
497 | const ClassInfo* classInfo() const { return m_classInfo; } |
498 | |
499 | static ptrdiff_t structureIDOffset() |
500 | { |
501 | return OBJECT_OFFSETOF(Structure, m_blob) + StructureIDBlob::structureIDOffset(); |
502 | } |
503 | |
504 | static ptrdiff_t prototypeOffset() |
505 | { |
506 | return OBJECT_OFFSETOF(Structure, m_prototype); |
507 | } |
508 | |
509 | static ptrdiff_t globalObjectOffset() |
510 | { |
511 | return OBJECT_OFFSETOF(Structure, m_globalObject); |
512 | } |
513 | |
514 | static ptrdiff_t classInfoOffset() |
515 | { |
516 | return OBJECT_OFFSETOF(Structure, m_classInfo); |
517 | } |
518 | |
519 | static ptrdiff_t indexingModeIncludingHistoryOffset() |
520 | { |
521 | return OBJECT_OFFSETOF(Structure, m_blob) + StructureIDBlob::indexingModeIncludingHistoryOffset(); |
522 | } |
523 | |
524 | static ptrdiff_t propertyTableUnsafeOffset() |
525 | { |
526 | return OBJECT_OFFSETOF(Structure, m_propertyTableUnsafe); |
527 | } |
528 | |
529 | static ptrdiff_t inlineCapacityOffset() |
530 | { |
531 | return OBJECT_OFFSETOF(Structure, m_inlineCapacity); |
532 | } |
533 | |
534 | static ptrdiff_t previousOrRareDataOffset() |
535 | { |
536 | return OBJECT_OFFSETOF(Structure, m_previousOrRareData); |
537 | } |
538 | |
539 | static Structure* createStructure(VM&); |
540 | |
541 | bool transitionWatchpointSetHasBeenInvalidated() const |
542 | { |
543 | return m_transitionWatchpointSet.hasBeenInvalidated(); |
544 | } |
545 | |
546 | bool transitionWatchpointSetIsStillValid() const |
547 | { |
548 | return m_transitionWatchpointSet.isStillValid(); |
549 | } |
550 | |
551 | bool dfgShouldWatchIfPossible() const |
552 | { |
553 | // FIXME: We would like to not watch things that are unprofitable to watch, like |
554 | // dictionaries. Unfortunately, we can't do such things: a dictionary could get flattened, |
555 | // in which case it will start to appear watchable and so the DFG will think that it is |
556 | // watching it. We should come up with a comprehensive story for not watching things that |
557 | // aren't profitable to watch. |
558 | // https://bugs.webkit.org/show_bug.cgi?id=133625 |
559 | |
560 | // - We don't watch Structures that either decided not to be watched, or whose predecessors |
561 | // decided not to be watched. This happens when a transition is fired while being watched. |
562 | if (transitionWatchpointIsLikelyToBeFired()) |
563 | return false; |
564 | |
565 | // - Don't watch Structures that had been dictionaries. |
566 | if (hasBeenDictionary()) |
567 | return false; |
568 | |
569 | return true; |
570 | } |
571 | |
572 | bool dfgShouldWatch() const |
573 | { |
574 | return dfgShouldWatchIfPossible() && transitionWatchpointSetIsStillValid(); |
575 | } |
576 | |
577 | void addTransitionWatchpoint(Watchpoint* watchpoint) const |
578 | { |
579 | ASSERT(transitionWatchpointSetIsStillValid()); |
580 | m_transitionWatchpointSet.add(watchpoint); |
581 | } |
582 | |
583 | void didTransitionFromThisStructure(DeferredStructureTransitionWatchpointFire* = nullptr) const; |
584 | |
585 | InlineWatchpointSet& transitionWatchpointSet() const |
586 | { |
587 | return m_transitionWatchpointSet; |
588 | } |
589 | |
590 | WatchpointSet* ensurePropertyReplacementWatchpointSet(VM&, PropertyOffset); |
591 | void startWatchingPropertyForReplacements(VM& vm, PropertyOffset offset) |
592 | { |
593 | ensurePropertyReplacementWatchpointSet(vm, offset); |
594 | } |
595 | void startWatchingPropertyForReplacements(VM&, PropertyName); |
596 | WatchpointSet* propertyReplacementWatchpointSet(PropertyOffset); |
597 | void didReplaceProperty(PropertyOffset); |
598 | void didCachePropertyReplacement(VM&, PropertyOffset); |
599 | |
600 | void startWatchingInternalPropertiesIfNecessary(VM& vm) |
601 | { |
602 | if (LIKELY(didWatchInternalProperties())) |
603 | return; |
604 | startWatchingInternalProperties(vm); |
605 | } |
606 | |
607 | Ref<StructureShape> toStructureShape(JSValue, bool& sawPolyProtoStructure); |
608 | |
609 | void dump(PrintStream&) const; |
610 | void dumpInContext(PrintStream&, DumpContext*) const; |
611 | void dumpBrief(PrintStream&, const CString&) const; |
612 | |
613 | static void (PrintStream&); |
614 | |
615 | ConcurrentJSLock& lock() { return m_lock; } |
616 | |
617 | unsigned propertyHash() const { return m_propertyHash; } |
618 | |
619 | static bool shouldConvertToPolyProto(const Structure* a, const Structure* b); |
620 | |
621 | struct PropertyHashEntry { |
622 | const HashTable* table; |
623 | const HashTableValue* value; |
624 | }; |
625 | Optional<PropertyHashEntry> findPropertyHashEntry(PropertyName) const; |
626 | |
627 | DECLARE_EXPORT_INFO; |
628 | |
629 | private: |
630 | typedef enum { |
631 | NoneDictionaryKind = 0, |
632 | CachedDictionaryKind = 1, |
633 | UncachedDictionaryKind = 2 |
634 | } DictionaryKind; |
635 | |
636 | public: |
637 | #define DEFINE_BITFIELD(type, lowerName, upperName, width, offset) \ |
638 | static constexpr uint32_t s_##lowerName##Shift = offset;\ |
639 | static constexpr uint32_t s_##lowerName##Mask = ((1 << (width - 1)) | ((1 << (width - 1)) - 1));\ |
640 | type lowerName() const { return static_cast<type>((m_bitField >> offset) & s_##lowerName##Mask); }\ |
641 | void set##upperName(type newValue) \ |
642 | {\ |
643 | m_bitField &= ~(s_##lowerName##Mask << offset);\ |
644 | m_bitField |= (newValue & s_##lowerName##Mask) << offset;\ |
645 | } |
646 | |
647 | DEFINE_BITFIELD(DictionaryKind, dictionaryKind, DictionaryKind, 2, 0); |
648 | DEFINE_BITFIELD(bool, isPinnedPropertyTable, IsPinnedPropertyTable, 1, 2); |
649 | DEFINE_BITFIELD(bool, hasGetterSetterProperties, HasGetterSetterProperties, 1, 3); |
650 | DEFINE_BITFIELD(bool, hasReadOnlyOrGetterSetterPropertiesExcludingProto, HasReadOnlyOrGetterSetterPropertiesExcludingProto, 1, 4); |
651 | DEFINE_BITFIELD(bool, isQuickPropertyAccessAllowedForEnumeration, IsQuickPropertyAccessAllowedForEnumeration, 1, 5); |
652 | DEFINE_BITFIELD(unsigned, attributesInPrevious, AttributesInPrevious, 14, 6); |
653 | DEFINE_BITFIELD(bool, didPreventExtensions, DidPreventExtensions, 1, 20); |
654 | DEFINE_BITFIELD(bool, didTransition, DidTransition, 1, 21); |
655 | DEFINE_BITFIELD(bool, staticPropertiesReified, StaticPropertiesReified, 1, 22); |
656 | DEFINE_BITFIELD(bool, hasBeenFlattenedBefore, HasBeenFlattenedBefore, 1, 23); |
657 | DEFINE_BITFIELD(bool, hasCustomGetterSetterProperties, HasCustomGetterSetterProperties, 1, 24); |
658 | DEFINE_BITFIELD(bool, didWatchInternalProperties, DidWatchInternalProperties, 1, 25); |
659 | DEFINE_BITFIELD(bool, transitionWatchpointIsLikelyToBeFired, TransitionWatchpointIsLikelyToBeFired, 1, 26); |
660 | DEFINE_BITFIELD(bool, hasBeenDictionary, HasBeenDictionary, 1, 27); |
661 | DEFINE_BITFIELD(bool, isAddingPropertyForTransition, IsAddingPropertyForTransition, 1, 28); |
662 | DEFINE_BITFIELD(bool, hasUnderscoreProtoPropertyExcludingOriginalProto, HasUnderscoreProtoPropertyExcludingOriginalProto, 1, 29); |
663 | |
664 | private: |
665 | friend class LLIntOffsetsExtractor; |
666 | |
667 | JS_EXPORT_PRIVATE Structure(VM&, JSGlobalObject*, JSValue prototype, const TypeInfo&, const ClassInfo*, IndexingType, unsigned inlineCapacity); |
668 | Structure(VM&); |
669 | Structure(VM&, Structure*, DeferredStructureTransitionWatchpointFire*); |
670 | |
671 | static Structure* create(VM&, Structure*, DeferredStructureTransitionWatchpointFire* = nullptr); |
672 | |
673 | static Structure* addPropertyTransitionToExistingStructureImpl(Structure*, UniquedStringImpl* uid, unsigned attributes, PropertyOffset&); |
674 | |
675 | // This will return the structure that has a usable property table, that property table, |
676 | // and the list of structures that we visited before we got to it. If it returns a |
677 | // non-null structure, it will also lock the structure that it returns; it is your job |
678 | // to unlock it. |
679 | void findStructuresAndMapForMaterialization(Vector<Structure*, 8>& structures, Structure*&, PropertyTable*&); |
680 | |
681 | static Structure* toDictionaryTransition(VM&, Structure*, DictionaryKind, DeferredStructureTransitionWatchpointFire* = nullptr); |
682 | |
683 | enum class ShouldPin { No, Yes }; |
684 | template<ShouldPin, typename Func> |
685 | PropertyOffset add(VM&, PropertyName, unsigned attributes, const Func&); |
686 | PropertyOffset add(VM&, PropertyName, unsigned attributes); |
687 | template<typename Func> |
688 | PropertyOffset remove(PropertyName, const Func&); |
689 | PropertyOffset remove(PropertyName); |
690 | |
691 | void checkConsistency(); |
692 | |
693 | // This may grab the lock, or not. Do not call when holding the Structure's lock. |
694 | PropertyTable* ensurePropertyTableIfNotEmpty(VM& vm) |
695 | { |
696 | if (PropertyTable* result = m_propertyTableUnsafe.get()) |
697 | return result; |
698 | if (!previousID()) |
699 | return nullptr; |
700 | return materializePropertyTable(vm); |
701 | } |
702 | |
703 | // This may grab the lock, or not. Do not call when holding the Structure's lock. |
704 | PropertyTable* ensurePropertyTable(VM& vm) |
705 | { |
706 | if (PropertyTable* result = m_propertyTableUnsafe.get()) |
707 | return result; |
708 | return materializePropertyTable(vm); |
709 | } |
710 | |
711 | PropertyTable* propertyTableOrNull() const |
712 | { |
713 | return m_propertyTableUnsafe.get(); |
714 | } |
715 | |
716 | // This will grab the lock. Do not call when holding the Structure's lock. |
717 | JS_EXPORT_PRIVATE PropertyTable* materializePropertyTable(VM&, bool setPropertyTable = true); |
718 | |
719 | void setPropertyTable(VM& vm, PropertyTable* table); |
720 | |
721 | PropertyTable* takePropertyTableOrCloneIfPinned(VM&); |
722 | PropertyTable* copyPropertyTableForPinning(VM&); |
723 | |
724 | void setPreviousID(VM&, Structure*); |
725 | |
726 | void clearPreviousID() |
727 | { |
728 | if (hasRareData()) |
729 | rareData()->clearPreviousID(); |
730 | else |
731 | m_previousOrRareData.clear(); |
732 | } |
733 | |
734 | int transitionCount() const |
735 | { |
736 | // Since the number of transitions is always the same as m_offset, we keep the size of Structure down by not storing both. |
737 | return numberOfSlotsForLastOffset(m_offset, m_inlineCapacity); |
738 | } |
739 | |
740 | bool isValid(JSGlobalObject*, StructureChain* cachedPrototypeChain, JSObject* base) const; |
741 | |
742 | // You have to hold the structure lock to do these. |
743 | JS_EXPORT_PRIVATE void pin(const AbstractLocker&, VM&, PropertyTable*); |
744 | void pinForCaching(const AbstractLocker&, VM&, PropertyTable*); |
745 | |
746 | bool isRareData(JSCell* cell) const |
747 | { |
748 | return cell && cell->structureID() != structureID(); |
749 | } |
750 | |
751 | template<typename DetailsFunc> |
752 | bool checkOffsetConsistency(PropertyTable*, const DetailsFunc&) const; |
753 | bool checkOffsetConsistency() const; |
754 | |
755 | JS_EXPORT_PRIVATE void allocateRareData(VM&); |
756 | |
757 | void startWatchingInternalProperties(VM&); |
758 | |
759 | static constexpr int s_maxTransitionLength = 64; |
760 | static constexpr int s_maxTransitionLengthForNonEvalPutById = 512; |
761 | |
762 | // These need to be properly aligned at the beginning of the 'Structure' |
763 | // part of the object. |
764 | StructureIDBlob m_blob; |
765 | TypeInfo::OutOfLineTypeFlags m_outOfLineTypeFlags; |
766 | |
767 | uint8_t m_inlineCapacity; |
768 | |
769 | ConcurrentJSLock m_lock; |
770 | |
771 | uint32_t m_bitField; |
772 | |
773 | WriteBarrier<JSGlobalObject> m_globalObject; |
774 | WriteBarrier<Unknown> m_prototype; |
775 | mutable WriteBarrier<StructureChain> m_cachedPrototypeChain; |
776 | |
777 | WriteBarrier<JSCell> m_previousOrRareData; |
778 | |
779 | RefPtr<UniquedStringImpl> m_nameInPrevious; |
780 | |
781 | const ClassInfo* m_classInfo; |
782 | |
783 | StructureTransitionTable m_transitionTable; |
784 | |
785 | // Should be accessed through ensurePropertyTable(). During GC, it may be set to 0 by another thread. |
786 | // During a Heap Snapshot GC we avoid clearing the table so it is safe to use. |
787 | WriteBarrier<PropertyTable> m_propertyTableUnsafe; |
788 | |
789 | mutable InlineWatchpointSet m_transitionWatchpointSet; |
790 | |
791 | COMPILE_ASSERT(firstOutOfLineOffset < 256, firstOutOfLineOffset_fits); |
792 | |
793 | // m_offset does not account for anonymous slots |
794 | PropertyOffset m_offset; |
795 | |
796 | uint32_t m_propertyHash; |
797 | |
798 | friend class VMInspector; |
799 | }; |
800 | |
801 | } // namespace JSC |
802 | |