1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "Structure.h" |
28 | |
29 | #include "BuiltinNames.h" |
30 | #include "CodeBlock.h" |
31 | #include "DumpContext.h" |
32 | #include "JSCInlines.h" |
33 | #include "JSObject.h" |
34 | #include "JSPropertyNameEnumerator.h" |
35 | #include "Lookup.h" |
36 | #include "PropertyMapHashTable.h" |
37 | #include "PropertyNameArray.h" |
38 | #include "StructureChain.h" |
39 | #include "StructureRareDataInlines.h" |
40 | #include "WeakGCMapInlines.h" |
41 | #include <wtf/CommaPrinter.h> |
42 | #include <wtf/NeverDestroyed.h> |
43 | #include <wtf/ProcessID.h> |
44 | #include <wtf/RefPtr.h> |
45 | #include <wtf/Threading.h> |
46 | |
47 | #define DUMP_STRUCTURE_ID_STATISTICS 0 |
48 | |
49 | namespace JSC { |
50 | |
51 | #if DUMP_STRUCTURE_ID_STATISTICS |
52 | static HashSet<Structure*>& liveStructureSet = *(new HashSet<Structure*>); |
53 | #endif |
54 | |
55 | class SingleSlotTransitionWeakOwner final : public WeakHandleOwner { |
56 | void finalize(Handle<Unknown>, void* context) override |
57 | { |
58 | StructureTransitionTable* table = reinterpret_cast<StructureTransitionTable*>(context); |
59 | ASSERT(table->isUsingSingleSlot()); |
60 | WeakSet::deallocate(table->weakImpl()); |
61 | table->m_data = StructureTransitionTable::UsingSingleSlotFlag; |
62 | } |
63 | }; |
64 | |
65 | static SingleSlotTransitionWeakOwner& singleSlotTransitionWeakOwner() |
66 | { |
67 | static NeverDestroyed<SingleSlotTransitionWeakOwner> owner; |
68 | return owner; |
69 | } |
70 | |
71 | inline Structure* StructureTransitionTable::singleTransition() const |
72 | { |
73 | ASSERT(isUsingSingleSlot()); |
74 | if (WeakImpl* impl = this->weakImpl()) { |
75 | if (impl->state() == WeakImpl::Live) |
76 | return jsCast<Structure*>(impl->jsValue().asCell()); |
77 | } |
78 | return nullptr; |
79 | } |
80 | |
81 | inline void StructureTransitionTable::setSingleTransition(Structure* structure) |
82 | { |
83 | ASSERT(isUsingSingleSlot()); |
84 | if (WeakImpl* impl = this->weakImpl()) |
85 | WeakSet::deallocate(impl); |
86 | WeakImpl* impl = WeakSet::allocate(structure, &singleSlotTransitionWeakOwner(), this); |
87 | m_data = bitwise_cast<intptr_t>(impl) | UsingSingleSlotFlag; |
88 | } |
89 | |
90 | bool StructureTransitionTable::contains(UniquedStringImpl* rep, unsigned attributes) const |
91 | { |
92 | if (isUsingSingleSlot()) { |
93 | Structure* transition = singleTransition(); |
94 | return transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes; |
95 | } |
96 | return map()->get(std::make_pair(rep, attributes)); |
97 | } |
98 | |
99 | inline Structure* StructureTransitionTable::get(UniquedStringImpl* rep, unsigned attributes) const |
100 | { |
101 | if (isUsingSingleSlot()) { |
102 | Structure* transition = singleTransition(); |
103 | return (transition && transition->m_nameInPrevious == rep && transition->attributesInPrevious() == attributes) ? transition : 0; |
104 | } |
105 | return map()->get(std::make_pair(rep, attributes)); |
106 | } |
107 | |
108 | void StructureTransitionTable::add(VM& vm, Structure* structure) |
109 | { |
110 | if (isUsingSingleSlot()) { |
111 | Structure* existingTransition = singleTransition(); |
112 | |
113 | // This handles the first transition being added. |
114 | if (!existingTransition) { |
115 | setSingleTransition(structure); |
116 | return; |
117 | } |
118 | |
119 | // This handles the second transition being added |
120 | // (or the first transition being despecified!) |
121 | setMap(new TransitionMap(vm)); |
122 | add(vm, existingTransition); |
123 | } |
124 | |
125 | // Add the structure to the map. |
126 | |
127 | // Newer versions of the STL have an std::make_pair function that takes rvalue references. |
128 | // When either of the parameters are bitfields, the C++ compiler will try to bind them as lvalues, which is invalid. To work around this, use unary "+" to make the parameter an rvalue. |
129 | // See https://bugs.webkit.org/show_bug.cgi?id=59261 for more details |
130 | map()->set(std::make_pair(structure->m_nameInPrevious.get(), +structure->attributesInPrevious()), structure); |
131 | } |
132 | |
133 | void Structure::dumpStatistics() |
134 | { |
135 | #if DUMP_STRUCTURE_ID_STATISTICS |
136 | unsigned numberLeaf = 0; |
137 | unsigned numberUsingSingleSlot = 0; |
138 | unsigned numberSingletons = 0; |
139 | unsigned numberWithPropertyMaps = 0; |
140 | unsigned totalPropertyMapsSize = 0; |
141 | |
142 | HashSet<Structure*>::const_iterator end = liveStructureSet.end(); |
143 | for (HashSet<Structure*>::const_iterator it = liveStructureSet.begin(); it != end; ++it) { |
144 | Structure* structure = *it; |
145 | |
146 | switch (structure->m_transitionTable.size()) { |
147 | case 0: |
148 | ++numberLeaf; |
149 | if (!structure->previousID()) |
150 | ++numberSingletons; |
151 | break; |
152 | |
153 | case 1: |
154 | ++numberUsingSingleSlot; |
155 | break; |
156 | } |
157 | |
158 | if (PropertyTable* table = structure->propertyTableOrNull()) { |
159 | ++numberWithPropertyMaps; |
160 | totalPropertyMapsSize += table->sizeInMemory(); |
161 | } |
162 | } |
163 | |
164 | dataLogF("Number of live Structures: %d\n" , liveStructureSet.size()); |
165 | dataLogF("Number of Structures using the single item optimization for transition map: %d\n" , numberUsingSingleSlot); |
166 | dataLogF("Number of Structures that are leaf nodes: %d\n" , numberLeaf); |
167 | dataLogF("Number of Structures that singletons: %d\n" , numberSingletons); |
168 | dataLogF("Number of Structures with PropertyMaps: %d\n" , numberWithPropertyMaps); |
169 | |
170 | dataLogF("Size of a single Structures: %d\n" , static_cast<unsigned>(sizeof(Structure))); |
171 | dataLogF("Size of sum of all property maps: %d\n" , totalPropertyMapsSize); |
172 | dataLogF("Size of average of all property maps: %f\n" , static_cast<double>(totalPropertyMapsSize) / static_cast<double>(liveStructureSet.size())); |
173 | #else |
174 | dataLogF("Dumping Structure statistics is not enabled.\n" ); |
175 | #endif |
176 | } |
177 | |
178 | Structure::Structure(VM& vm, JSGlobalObject* globalObject, JSValue prototype, const TypeInfo& typeInfo, const ClassInfo* classInfo, IndexingType indexingType, unsigned inlineCapacity) |
179 | : JSCell(vm, vm.structureStructure.get()) |
180 | , m_blob(vm.heap.structureIDTable().allocateID(this), indexingType, typeInfo) |
181 | , m_outOfLineTypeFlags(typeInfo.outOfLineTypeFlags()) |
182 | , m_inlineCapacity(inlineCapacity) |
183 | , m_bitField(0) |
184 | , m_globalObject(vm, this, globalObject, WriteBarrier<JSGlobalObject>::MayBeNull) |
185 | , m_prototype(vm, this, prototype) |
186 | , m_classInfo(classInfo) |
187 | , m_transitionWatchpointSet(IsWatched) |
188 | , m_offset(invalidOffset) |
189 | , m_propertyHash(0) |
190 | { |
191 | setDictionaryKind(NoneDictionaryKind); |
192 | setIsPinnedPropertyTable(false); |
193 | setHasGetterSetterProperties(classInfo->hasStaticSetterOrReadonlyProperties()); |
194 | setHasCustomGetterSetterProperties(false); |
195 | setHasReadOnlyOrGetterSetterPropertiesExcludingProto(classInfo->hasStaticSetterOrReadonlyProperties()); |
196 | setHasUnderscoreProtoPropertyExcludingOriginalProto(false); |
197 | setIsQuickPropertyAccessAllowedForEnumeration(true); |
198 | setAttributesInPrevious(0); |
199 | setDidPreventExtensions(false); |
200 | setDidTransition(false); |
201 | setStaticPropertiesReified(false); |
202 | setTransitionWatchpointIsLikelyToBeFired(false); |
203 | setHasBeenDictionary(false); |
204 | setIsAddingPropertyForTransition(false); |
205 | |
206 | ASSERT(inlineCapacity <= JSFinalObject::maxInlineCapacity()); |
207 | ASSERT(static_cast<PropertyOffset>(inlineCapacity) < firstOutOfLineOffset); |
208 | ASSERT(!hasRareData()); |
209 | ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties()); |
210 | ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties()); |
211 | ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData); |
212 | } |
213 | |
214 | const ClassInfo Structure::s_info = { "Structure" , nullptr, nullptr, nullptr, CREATE_METHOD_TABLE(Structure) }; |
215 | |
216 | Structure::Structure(VM& vm) |
217 | : JSCell(CreatingEarlyCell) |
218 | , m_inlineCapacity(0) |
219 | , m_bitField(0) |
220 | , m_prototype(vm, this, jsNull()) |
221 | , m_classInfo(info()) |
222 | , m_transitionWatchpointSet(IsWatched) |
223 | , m_offset(invalidOffset) |
224 | , m_propertyHash(0) |
225 | { |
226 | setDictionaryKind(NoneDictionaryKind); |
227 | setIsPinnedPropertyTable(false); |
228 | setHasGetterSetterProperties(m_classInfo->hasStaticSetterOrReadonlyProperties()); |
229 | setHasCustomGetterSetterProperties(false); |
230 | setHasReadOnlyOrGetterSetterPropertiesExcludingProto(m_classInfo->hasStaticSetterOrReadonlyProperties()); |
231 | setHasUnderscoreProtoPropertyExcludingOriginalProto(false); |
232 | setIsQuickPropertyAccessAllowedForEnumeration(true); |
233 | setAttributesInPrevious(0); |
234 | setDidPreventExtensions(false); |
235 | setDidTransition(false); |
236 | setStaticPropertiesReified(false); |
237 | setTransitionWatchpointIsLikelyToBeFired(false); |
238 | setHasBeenDictionary(false); |
239 | setIsAddingPropertyForTransition(false); |
240 | |
241 | TypeInfo typeInfo = TypeInfo(CellType, StructureFlags); |
242 | m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), 0, typeInfo); |
243 | m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags(); |
244 | |
245 | ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties()); |
246 | ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties()); |
247 | ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData); |
248 | } |
249 | |
250 | Structure::Structure(VM& vm, Structure* previous, DeferredStructureTransitionWatchpointFire* deferred) |
251 | : JSCell(vm, vm.structureStructure.get()) |
252 | , m_inlineCapacity(previous->m_inlineCapacity) |
253 | , m_bitField(0) |
254 | , m_prototype(vm, this, previous->m_prototype.get()) |
255 | , m_classInfo(previous->m_classInfo) |
256 | , m_transitionWatchpointSet(IsWatched) |
257 | , m_offset(invalidOffset) |
258 | , m_propertyHash(previous->m_propertyHash) |
259 | { |
260 | setDictionaryKind(previous->dictionaryKind()); |
261 | setIsPinnedPropertyTable(false); |
262 | setHasBeenFlattenedBefore(previous->hasBeenFlattenedBefore()); |
263 | setHasGetterSetterProperties(previous->hasGetterSetterProperties()); |
264 | setHasCustomGetterSetterProperties(previous->hasCustomGetterSetterProperties()); |
265 | setHasReadOnlyOrGetterSetterPropertiesExcludingProto(previous->hasReadOnlyOrGetterSetterPropertiesExcludingProto()); |
266 | setHasUnderscoreProtoPropertyExcludingOriginalProto(previous->hasUnderscoreProtoPropertyExcludingOriginalProto()); |
267 | setIsQuickPropertyAccessAllowedForEnumeration(previous->isQuickPropertyAccessAllowedForEnumeration()); |
268 | setAttributesInPrevious(0); |
269 | setDidPreventExtensions(previous->didPreventExtensions()); |
270 | setDidTransition(true); |
271 | setStaticPropertiesReified(previous->staticPropertiesReified()); |
272 | setHasBeenDictionary(previous->hasBeenDictionary()); |
273 | setIsAddingPropertyForTransition(false); |
274 | |
275 | TypeInfo typeInfo = previous->typeInfo(); |
276 | m_blob = StructureIDBlob(vm.heap.structureIDTable().allocateID(this), previous->indexingModeIncludingHistory(), typeInfo); |
277 | m_outOfLineTypeFlags = typeInfo.outOfLineTypeFlags(); |
278 | |
279 | ASSERT(!previous->typeInfo().structureIsImmortal()); |
280 | setPreviousID(vm, previous); |
281 | |
282 | previous->didTransitionFromThisStructure(deferred); |
283 | |
284 | // Copy this bit now, in case previous was being watched. |
285 | setTransitionWatchpointIsLikelyToBeFired(previous->transitionWatchpointIsLikelyToBeFired()); |
286 | |
287 | if (previous->m_globalObject) |
288 | m_globalObject.set(vm, this, previous->m_globalObject.get()); |
289 | ASSERT(hasReadOnlyOrGetterSetterPropertiesExcludingProto() || !m_classInfo->hasStaticSetterOrReadonlyProperties()); |
290 | ASSERT(hasGetterSetterProperties() || !m_classInfo->hasStaticSetterOrReadonlyProperties()); |
291 | ASSERT(!this->typeInfo().overridesGetCallData() || m_classInfo->methodTable.getCallData != &JSCell::getCallData); |
292 | } |
293 | |
294 | Structure::~Structure() |
295 | { |
296 | if (typeInfo().structureIsImmortal()) |
297 | return; |
298 | Heap::heap(this)->structureIDTable().deallocateID(this, m_blob.structureID()); |
299 | } |
300 | |
301 | void Structure::destroy(JSCell* cell) |
302 | { |
303 | static_cast<Structure*>(cell)->Structure::~Structure(); |
304 | } |
305 | |
306 | Structure* Structure::create(PolyProtoTag, VM& vm, JSGlobalObject* globalObject, JSObject* prototype, const TypeInfo& typeInfo, const ClassInfo* classInfo, IndexingType indexingType, unsigned inlineCapacity) |
307 | { |
308 | Structure* result = create(vm, globalObject, prototype, typeInfo, classInfo, indexingType, inlineCapacity); |
309 | |
310 | unsigned oldOutOfLineCapacity = result->outOfLineCapacity(); |
311 | result->addPropertyWithoutTransition( |
312 | vm, vm.propertyNames->builtinNames().polyProtoName(), static_cast<unsigned>(PropertyAttribute::DontEnum), |
313 | [&] (const GCSafeConcurrentJSLocker&, PropertyOffset offset, PropertyOffset newLastOffset) { |
314 | RELEASE_ASSERT(Structure::outOfLineCapacity(newLastOffset) == oldOutOfLineCapacity); |
315 | RELEASE_ASSERT(offset == knownPolyProtoOffset); |
316 | RELEASE_ASSERT(isInlineOffset(knownPolyProtoOffset)); |
317 | result->m_prototype.setWithoutWriteBarrier(JSValue()); |
318 | result->setLastOffset(newLastOffset); |
319 | }); |
320 | |
321 | return result; |
322 | } |
323 | |
324 | void Structure::findStructuresAndMapForMaterialization(Vector<Structure*, 8>& structures, Structure*& structure, PropertyTable*& table) |
325 | { |
326 | ASSERT(structures.isEmpty()); |
327 | table = 0; |
328 | |
329 | for (structure = this; structure; structure = structure->previousID()) { |
330 | structure->m_lock.lock(); |
331 | |
332 | table = structure->propertyTableOrNull(); |
333 | if (table) { |
334 | // Leave the structure locked, so that the caller can do things to it atomically |
335 | // before it loses its property table. |
336 | return; |
337 | } |
338 | |
339 | structures.append(structure); |
340 | structure->m_lock.unlock(); |
341 | } |
342 | |
343 | ASSERT(!structure); |
344 | ASSERT(!table); |
345 | } |
346 | |
347 | PropertyTable* Structure::materializePropertyTable(VM& vm, bool setPropertyTable) |
348 | { |
349 | ASSERT(structure(vm)->classInfo() == info()); |
350 | ASSERT(!isAddingPropertyForTransition()); |
351 | |
352 | DeferGC deferGC(vm.heap); |
353 | |
354 | Vector<Structure*, 8> structures; |
355 | Structure* structure; |
356 | PropertyTable* table; |
357 | |
358 | findStructuresAndMapForMaterialization(structures, structure, table); |
359 | |
360 | unsigned capacity = numberOfSlotsForLastOffset(m_offset, m_inlineCapacity); |
361 | if (table) { |
362 | table = table->copy(vm, capacity); |
363 | structure->m_lock.unlock(); |
364 | } else |
365 | table = PropertyTable::create(vm, capacity); |
366 | |
367 | // Must hold the lock on this structure, since we will be modifying this structure's |
368 | // property map. We don't want getConcurrently() to see the property map in a half-baked |
369 | // state. |
370 | GCSafeConcurrentJSLocker locker(m_lock, vm.heap); |
371 | if (setPropertyTable) |
372 | this->setPropertyTable(vm, table); |
373 | |
374 | for (size_t i = structures.size(); i--;) { |
375 | structure = structures[i]; |
376 | if (!structure->m_nameInPrevious) |
377 | continue; |
378 | PropertyMapEntry entry(structure->m_nameInPrevious.get(), structure->m_offset, structure->attributesInPrevious()); |
379 | table->add(entry, m_offset, PropertyTable::PropertyOffsetMustNotChange); |
380 | } |
381 | |
382 | checkOffsetConsistency( |
383 | table, |
384 | [&] () { |
385 | dataLog("Detected in materializePropertyTable.\n" ); |
386 | dataLog("Found structure = " , RawPointer(structure), "\n" ); |
387 | dataLog("structures = " ); |
388 | CommaPrinter comma; |
389 | for (Structure* structure : structures) |
390 | dataLog(comma, RawPointer(structure)); |
391 | dataLog("\n" ); |
392 | }); |
393 | |
394 | return table; |
395 | } |
396 | |
397 | Structure* Structure::addPropertyTransitionToExistingStructureImpl(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset) |
398 | { |
399 | ASSERT(!structure->isDictionary()); |
400 | ASSERT(structure->isObject()); |
401 | |
402 | if (Structure* existingTransition = structure->m_transitionTable.get(uid, attributes)) { |
403 | validateOffset(existingTransition->m_offset, existingTransition->inlineCapacity()); |
404 | offset = existingTransition->m_offset; |
405 | return existingTransition; |
406 | } |
407 | |
408 | return 0; |
409 | } |
410 | |
411 | Structure* Structure::addPropertyTransitionToExistingStructure(Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset) |
412 | { |
413 | ASSERT(!isCompilationThread()); |
414 | return addPropertyTransitionToExistingStructureImpl(structure, propertyName.uid(), attributes, offset); |
415 | } |
416 | |
417 | Structure* Structure::addPropertyTransitionToExistingStructureConcurrently(Structure* structure, UniquedStringImpl* uid, unsigned attributes, PropertyOffset& offset) |
418 | { |
419 | ConcurrentJSLocker locker(structure->m_lock); |
420 | return addPropertyTransitionToExistingStructureImpl(structure, uid, attributes, offset); |
421 | } |
422 | |
423 | bool Structure::holesMustForwardToPrototype(VM& vm, JSObject* base) const |
424 | { |
425 | ASSERT(base->structure(vm) == this); |
426 | |
427 | if (this->mayInterceptIndexedAccesses()) |
428 | return true; |
429 | |
430 | JSValue prototype = this->storedPrototype(base); |
431 | if (!prototype.isObject()) |
432 | return false; |
433 | JSObject* object = asObject(prototype); |
434 | |
435 | while (true) { |
436 | Structure& structure = *object->structure(vm); |
437 | if (hasIndexedProperties(object->indexingType()) || structure.mayInterceptIndexedAccesses()) |
438 | return true; |
439 | prototype = structure.storedPrototype(object); |
440 | if (!prototype.isObject()) |
441 | return false; |
442 | object = asObject(prototype); |
443 | } |
444 | |
445 | RELEASE_ASSERT_NOT_REACHED(); |
446 | return false; |
447 | } |
448 | |
449 | Structure* Structure::addPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset) |
450 | { |
451 | Structure* newStructure = addPropertyTransitionToExistingStructure( |
452 | structure, propertyName, attributes, offset); |
453 | if (newStructure) |
454 | return newStructure; |
455 | |
456 | return addNewPropertyTransition( |
457 | vm, structure, propertyName, attributes, offset, PutPropertySlot::UnknownContext); |
458 | } |
459 | |
460 | Structure* Structure::addNewPropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes, PropertyOffset& offset, PutPropertySlot::Context context, DeferredStructureTransitionWatchpointFire* deferred) |
461 | { |
462 | ASSERT(!structure->isDictionary()); |
463 | ASSERT(structure->isObject()); |
464 | ASSERT(!Structure::addPropertyTransitionToExistingStructure(structure, propertyName, attributes, offset)); |
465 | |
466 | int maxTransitionLength; |
467 | if (context == PutPropertySlot::PutById) |
468 | maxTransitionLength = s_maxTransitionLengthForNonEvalPutById; |
469 | else |
470 | maxTransitionLength = s_maxTransitionLength; |
471 | if (structure->transitionCount() > maxTransitionLength) { |
472 | ASSERT(!isCopyOnWrite(structure->indexingMode())); |
473 | Structure* transition = toCacheableDictionaryTransition(vm, structure, deferred); |
474 | ASSERT(structure != transition); |
475 | offset = transition->add(vm, propertyName, attributes); |
476 | return transition; |
477 | } |
478 | |
479 | Structure* transition = create(vm, structure, deferred); |
480 | |
481 | transition->m_cachedPrototypeChain.setMayBeNull(vm, transition, structure->m_cachedPrototypeChain.get()); |
482 | |
483 | // While we are adding the property, rematerializing the property table is super weird: we already |
484 | // have a m_nameInPrevious and attributesInPrevious but the m_offset is still wrong. If the |
485 | // materialization algorithm runs, it'll build a property table that already has the property but |
486 | // at a bogus offset. Rather than try to teach the materialization code how to create a table under |
487 | // those conditions, we just tell the GC not to blow the table away during this period of time. |
488 | // Holding the lock ensures that we either do this before the GC starts scanning the structure, in |
489 | // which case the GC will not blow the table away, or we do it after the GC already ran in which |
490 | // case all is well. If it wasn't for the lock, the GC would have TOCTOU: if could read |
491 | // isAddingPropertyForTransition before we set it to true, and then blow the table away after. |
492 | { |
493 | ConcurrentJSLocker locker(transition->m_lock); |
494 | transition->setIsAddingPropertyForTransition(true); |
495 | } |
496 | |
497 | transition->m_blob.setIndexingModeIncludingHistory(structure->indexingModeIncludingHistory() & ~CopyOnWrite); |
498 | transition->m_nameInPrevious = propertyName.uid(); |
499 | transition->setAttributesInPrevious(attributes); |
500 | transition->setPropertyTable(vm, structure->takePropertyTableOrCloneIfPinned(vm)); |
501 | transition->m_offset = structure->m_offset; |
502 | |
503 | offset = transition->add(vm, propertyName, attributes); |
504 | |
505 | // Now that everything is fine with the new structure's bookkeeping, the GC is free to blow the |
506 | // table away if it wants. We can now rebuild it fine. |
507 | WTF::storeStoreFence(); |
508 | transition->setIsAddingPropertyForTransition(false); |
509 | |
510 | checkOffset(transition->m_offset, transition->inlineCapacity()); |
511 | { |
512 | ConcurrentJSLocker locker(structure->m_lock); |
513 | DeferGC deferGC(vm.heap); |
514 | structure->m_transitionTable.add(vm, transition); |
515 | } |
516 | transition->checkOffsetConsistency(); |
517 | structure->checkOffsetConsistency(); |
518 | return transition; |
519 | } |
520 | |
521 | Structure* Structure::removePropertyTransition(VM& vm, Structure* structure, PropertyName propertyName, PropertyOffset& offset) |
522 | { |
523 | // NOTE: There are some good reasons why this goes directly to uncacheable dictionary rather than |
524 | // caching the removal. We can fix all of these things, but we must remember to do so, if we ever try |
525 | // to optimize this case. |
526 | // |
527 | // - Cached transitions usually steal the property table, and assume that this is possible because they |
528 | // can just rebuild the table by looking at past transitions. That code assumes that the table only |
529 | // grew and never shrank. To support removals, we'd have to change the property table materialization |
530 | // code to handle deletions. Also, we have logic to get the list of properties on a structure that |
531 | // lacks a property table by just looking back through the set of transitions since the last |
532 | // structure that had a pinned table. That logic would also have to be changed to handle cached |
533 | // removals. |
534 | // |
535 | ASSERT(!structure->isUncacheableDictionary()); |
536 | |
537 | Structure* transition = toUncacheableDictionaryTransition(vm, structure); |
538 | |
539 | offset = transition->remove(propertyName); |
540 | |
541 | transition->checkOffsetConsistency(); |
542 | return transition; |
543 | } |
544 | |
545 | Structure* Structure::changePrototypeTransition(VM& vm, Structure* structure, JSValue prototype, DeferredStructureTransitionWatchpointFire& deferred) |
546 | { |
547 | ASSERT(prototype.isObject() || prototype.isNull()); |
548 | |
549 | DeferGC deferGC(vm.heap); |
550 | Structure* transition = create(vm, structure, &deferred); |
551 | |
552 | transition->m_prototype.set(vm, transition, prototype); |
553 | |
554 | PropertyTable* table = structure->copyPropertyTableForPinning(vm); |
555 | transition->pin(holdLock(transition->m_lock), vm, table); |
556 | transition->m_offset = structure->m_offset; |
557 | |
558 | transition->checkOffsetConsistency(); |
559 | return transition; |
560 | } |
561 | |
562 | Structure* Structure::attributeChangeTransition(VM& vm, Structure* structure, PropertyName propertyName, unsigned attributes) |
563 | { |
564 | if (!structure->isUncacheableDictionary()) { |
565 | Structure* transition = create(vm, structure); |
566 | |
567 | PropertyTable* table = structure->copyPropertyTableForPinning(vm); |
568 | transition->pin(holdLock(transition->m_lock), vm, table); |
569 | transition->m_offset = structure->m_offset; |
570 | |
571 | structure = transition; |
572 | } |
573 | |
574 | PropertyMapEntry* entry = structure->ensurePropertyTable(vm)->get(propertyName.uid()); |
575 | ASSERT(entry); |
576 | entry->attributes = attributes; |
577 | |
578 | structure->checkOffsetConsistency(); |
579 | return structure; |
580 | } |
581 | |
582 | Structure* Structure::toDictionaryTransition(VM& vm, Structure* structure, DictionaryKind kind, DeferredStructureTransitionWatchpointFire* deferred) |
583 | { |
584 | ASSERT(!structure->isUncacheableDictionary()); |
585 | DeferGC deferGC(vm.heap); |
586 | |
587 | Structure* transition = create(vm, structure, deferred); |
588 | |
589 | PropertyTable* table = structure->copyPropertyTableForPinning(vm); |
590 | transition->pin(holdLock(transition->m_lock), vm, table); |
591 | transition->m_offset = structure->m_offset; |
592 | transition->setDictionaryKind(kind); |
593 | transition->setHasBeenDictionary(true); |
594 | |
595 | transition->checkOffsetConsistency(); |
596 | return transition; |
597 | } |
598 | |
599 | Structure* Structure::toCacheableDictionaryTransition(VM& vm, Structure* structure, DeferredStructureTransitionWatchpointFire* deferred) |
600 | { |
601 | return toDictionaryTransition(vm, structure, CachedDictionaryKind, deferred); |
602 | } |
603 | |
604 | Structure* Structure::toUncacheableDictionaryTransition(VM& vm, Structure* structure) |
605 | { |
606 | return toDictionaryTransition(vm, structure, UncachedDictionaryKind); |
607 | } |
608 | |
609 | Structure* Structure::sealTransition(VM& vm, Structure* structure) |
610 | { |
611 | return nonPropertyTransition(vm, structure, NonPropertyTransition::Seal); |
612 | } |
613 | |
614 | Structure* Structure::freezeTransition(VM& vm, Structure* structure) |
615 | { |
616 | return nonPropertyTransition(vm, structure, NonPropertyTransition::Freeze); |
617 | } |
618 | |
619 | Structure* Structure::preventExtensionsTransition(VM& vm, Structure* structure) |
620 | { |
621 | return nonPropertyTransition(vm, structure, NonPropertyTransition::PreventExtensions); |
622 | } |
623 | |
624 | PropertyTable* Structure::takePropertyTableOrCloneIfPinned(VM& vm) |
625 | { |
626 | // This must always return a property table. It can't return null. |
627 | PropertyTable* result = propertyTableOrNull(); |
628 | if (result) { |
629 | if (isPinnedPropertyTable()) |
630 | return result->copy(vm, result->size() + 1); |
631 | ConcurrentJSLocker locker(m_lock); |
632 | setPropertyTable(vm, nullptr); |
633 | return result; |
634 | } |
635 | bool setPropertyTable = false; |
636 | return materializePropertyTable(vm, setPropertyTable); |
637 | } |
638 | |
639 | Structure* Structure::nonPropertyTransitionSlow(VM& vm, Structure* structure, NonPropertyTransition transitionKind) |
640 | { |
641 | unsigned attributes = toAttributes(transitionKind); |
642 | IndexingType indexingModeIncludingHistory = newIndexingType(structure->indexingModeIncludingHistory(), transitionKind); |
643 | |
644 | Structure* existingTransition; |
645 | if (!structure->isDictionary() && (existingTransition = structure->m_transitionTable.get(0, attributes))) { |
646 | ASSERT(existingTransition->attributesInPrevious() == attributes); |
647 | ASSERT(existingTransition->indexingModeIncludingHistory() == indexingModeIncludingHistory); |
648 | return existingTransition; |
649 | } |
650 | |
651 | DeferGC deferGC(vm.heap); |
652 | |
653 | Structure* transition = create(vm, structure); |
654 | transition->setAttributesInPrevious(attributes); |
655 | transition->m_blob.setIndexingModeIncludingHistory(indexingModeIncludingHistory); |
656 | |
657 | if (preventsExtensions(transitionKind)) |
658 | transition->setDidPreventExtensions(true); |
659 | |
660 | if (setsDontDeleteOnAllProperties(transitionKind) |
661 | || setsReadOnlyOnNonAccessorProperties(transitionKind)) { |
662 | // We pin the property table on transitions that do wholesale editing of the property |
663 | // table, since our logic for walking the property transition chain to rematerialize the |
664 | // table doesn't know how to take into account such wholesale edits. |
665 | |
666 | PropertyTable* table = structure->copyPropertyTableForPinning(vm); |
667 | transition->pinForCaching(holdLock(transition->m_lock), vm, table); |
668 | transition->m_offset = structure->m_offset; |
669 | |
670 | table = transition->propertyTableOrNull(); |
671 | RELEASE_ASSERT(table); |
672 | for (auto& entry : *table) { |
673 | if (setsDontDeleteOnAllProperties(transitionKind)) |
674 | entry.attributes |= static_cast<unsigned>(PropertyAttribute::DontDelete); |
675 | if (setsReadOnlyOnNonAccessorProperties(transitionKind) && !(entry.attributes & PropertyAttribute::Accessor)) |
676 | entry.attributes |= static_cast<unsigned>(PropertyAttribute::ReadOnly); |
677 | } |
678 | } else { |
679 | transition->setPropertyTable(vm, structure->takePropertyTableOrCloneIfPinned(vm)); |
680 | transition->m_offset = structure->m_offset; |
681 | checkOffset(transition->m_offset, transition->inlineCapacity()); |
682 | } |
683 | |
684 | if (setsReadOnlyOnNonAccessorProperties(transitionKind) |
685 | && !transition->propertyTableOrNull()->isEmpty()) |
686 | transition->setHasReadOnlyOrGetterSetterPropertiesExcludingProto(true); |
687 | |
688 | if (structure->isDictionary()) { |
689 | PropertyTable* table = transition->ensurePropertyTable(vm); |
690 | transition->pin(holdLock(transition->m_lock), vm, table); |
691 | } else { |
692 | auto locker = holdLock(structure->m_lock); |
693 | structure->m_transitionTable.add(vm, transition); |
694 | } |
695 | |
696 | transition->checkOffsetConsistency(); |
697 | return transition; |
698 | } |
699 | |
700 | // In future we may want to cache this property. |
701 | bool Structure::isSealed(VM& vm) |
702 | { |
703 | if (isStructureExtensible()) |
704 | return false; |
705 | |
706 | PropertyTable* table = ensurePropertyTableIfNotEmpty(vm); |
707 | if (!table) |
708 | return true; |
709 | |
710 | PropertyTable::iterator end = table->end(); |
711 | for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) { |
712 | if ((iter->attributes & PropertyAttribute::DontDelete) != static_cast<unsigned>(PropertyAttribute::DontDelete)) |
713 | return false; |
714 | } |
715 | return true; |
716 | } |
717 | |
718 | // In future we may want to cache this property. |
719 | bool Structure::isFrozen(VM& vm) |
720 | { |
721 | if (isStructureExtensible()) |
722 | return false; |
723 | |
724 | PropertyTable* table = ensurePropertyTableIfNotEmpty(vm); |
725 | if (!table) |
726 | return true; |
727 | |
728 | PropertyTable::iterator end = table->end(); |
729 | for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) { |
730 | if (!(iter->attributes & PropertyAttribute::DontDelete)) |
731 | return false; |
732 | if (!(iter->attributes & (PropertyAttribute::ReadOnly | PropertyAttribute::Accessor))) |
733 | return false; |
734 | } |
735 | return true; |
736 | } |
737 | |
738 | Structure* Structure::flattenDictionaryStructure(VM& vm, JSObject* object) |
739 | { |
740 | checkOffsetConsistency(); |
741 | ASSERT(isDictionary()); |
742 | |
743 | GCSafeConcurrentJSLocker locker(m_lock, vm.heap); |
744 | |
745 | object->setStructureIDDirectly(nuke(id())); |
746 | WTF::storeStoreFence(); |
747 | |
748 | size_t beforeOutOfLineCapacity = this->outOfLineCapacity(); |
749 | if (isUncacheableDictionary()) { |
750 | PropertyTable* table = propertyTableOrNull(); |
751 | ASSERT(table); |
752 | |
753 | size_t propertyCount = table->size(); |
754 | |
755 | // Holds our values compacted by insertion order. |
756 | Vector<JSValue> values(propertyCount); |
757 | |
758 | // Copies out our values from their hashed locations, compacting property table offsets as we go. |
759 | unsigned i = 0; |
760 | PropertyTable::iterator end = table->end(); |
761 | m_offset = invalidOffset; |
762 | for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter, ++i) { |
763 | values[i] = object->getDirect(iter->offset); |
764 | m_offset = iter->offset = offsetForPropertyNumber(i, m_inlineCapacity); |
765 | } |
766 | |
767 | // Copies in our values to their compacted locations. |
768 | for (unsigned i = 0; i < propertyCount; i++) |
769 | object->putDirect(vm, offsetForPropertyNumber(i, m_inlineCapacity), values[i]); |
770 | |
771 | table->clearDeletedOffsets(); |
772 | |
773 | // We need to zero our unused property space; otherwise the GC might see a |
774 | // stale pointer when we add properties in the future. |
775 | memset( |
776 | object->inlineStorageUnsafe() + inlineSize(), |
777 | 0, |
778 | (inlineCapacity() - inlineSize()) * sizeof(EncodedJSValue)); |
779 | |
780 | Butterfly* butterfly = object->butterfly(); |
781 | size_t preCapacity = butterfly->indexingHeader()->preCapacity(this); |
782 | void* base = butterfly->base(preCapacity, beforeOutOfLineCapacity); |
783 | void* startOfPropertyStorageSlots = reinterpret_cast<EncodedJSValue*>(base) + preCapacity; |
784 | memset(startOfPropertyStorageSlots, 0, (beforeOutOfLineCapacity - outOfLineSize()) * sizeof(EncodedJSValue)); |
785 | checkOffsetConsistency(); |
786 | } |
787 | |
788 | setDictionaryKind(NoneDictionaryKind); |
789 | setHasBeenFlattenedBefore(true); |
790 | |
791 | size_t afterOutOfLineCapacity = this->outOfLineCapacity(); |
792 | |
793 | if (object->butterfly() && beforeOutOfLineCapacity != afterOutOfLineCapacity) { |
794 | ASSERT(beforeOutOfLineCapacity > afterOutOfLineCapacity); |
795 | // If the object had a Butterfly but after flattening/compacting we no longer have need of it, |
796 | // we need to zero it out because the collector depends on the Structure to know the size for copying. |
797 | if (!afterOutOfLineCapacity && !this->hasIndexingHeader(object)) |
798 | object->setButterfly(vm, nullptr); |
799 | // If the object was down-sized to the point where the base of the Butterfly is no longer within the |
800 | // first CopiedBlock::blockSize bytes, we'll get the wrong answer if we try to mask the base back to |
801 | // the CopiedBlock header. To prevent this case we need to memmove the Butterfly down. |
802 | else |
803 | object->shiftButterflyAfterFlattening(locker, vm, this, afterOutOfLineCapacity); |
804 | } |
805 | |
806 | WTF::storeStoreFence(); |
807 | object->setStructureIDDirectly(id()); |
808 | |
809 | // We need to do a writebarrier here because the GC thread might be scanning the butterfly while |
810 | // we are shuffling properties around. See: https://bugs.webkit.org/show_bug.cgi?id=166989 |
811 | vm.heap.writeBarrier(object); |
812 | |
813 | return this; |
814 | } |
815 | |
816 | void Structure::pin(const AbstractLocker&, VM& vm, PropertyTable* table) |
817 | { |
818 | setIsPinnedPropertyTable(true); |
819 | setPropertyTable(vm, table); |
820 | clearPreviousID(); |
821 | m_nameInPrevious = nullptr; |
822 | } |
823 | |
824 | void Structure::pinForCaching(const AbstractLocker&, VM& vm, PropertyTable* table) |
825 | { |
826 | setIsPinnedPropertyTable(true); |
827 | setPropertyTable(vm, table); |
828 | m_nameInPrevious = nullptr; |
829 | } |
830 | |
831 | void Structure::allocateRareData(VM& vm) |
832 | { |
833 | ASSERT(!hasRareData()); |
834 | StructureRareData* rareData = StructureRareData::create(vm, previousID()); |
835 | WTF::storeStoreFence(); |
836 | m_previousOrRareData.set(vm, this, rareData); |
837 | ASSERT(hasRareData()); |
838 | } |
839 | |
840 | WatchpointSet* Structure::ensurePropertyReplacementWatchpointSet(VM& vm, PropertyOffset offset) |
841 | { |
842 | ASSERT(!isUncacheableDictionary()); |
843 | |
844 | // In some places it's convenient to call this with an invalid offset. So, we do the check here. |
845 | if (!isValidOffset(offset)) |
846 | return nullptr; |
847 | |
848 | if (!hasRareData()) |
849 | allocateRareData(vm); |
850 | ConcurrentJSLocker locker(m_lock); |
851 | StructureRareData* rareData = this->rareData(); |
852 | if (!rareData->m_replacementWatchpointSets) { |
853 | rareData->m_replacementWatchpointSets = |
854 | std::make_unique<StructureRareData::PropertyWatchpointMap>(); |
855 | WTF::storeStoreFence(); |
856 | } |
857 | auto result = rareData->m_replacementWatchpointSets->add(offset, nullptr); |
858 | if (result.isNewEntry) |
859 | result.iterator->value = adoptRef(new WatchpointSet(IsWatched)); |
860 | return result.iterator->value.get(); |
861 | } |
862 | |
863 | void Structure::startWatchingPropertyForReplacements(VM& vm, PropertyName propertyName) |
864 | { |
865 | ASSERT(!isUncacheableDictionary()); |
866 | |
867 | startWatchingPropertyForReplacements(vm, get(vm, propertyName)); |
868 | } |
869 | |
870 | void Structure::didCachePropertyReplacement(VM& vm, PropertyOffset offset) |
871 | { |
872 | RELEASE_ASSERT(isValidOffset(offset)); |
873 | ensurePropertyReplacementWatchpointSet(vm, offset)->fireAll(vm, "Did cache property replacement" ); |
874 | } |
875 | |
876 | void Structure::startWatchingInternalProperties(VM& vm) |
877 | { |
878 | if (!isUncacheableDictionary()) { |
879 | startWatchingPropertyForReplacements(vm, vm.propertyNames->toString); |
880 | startWatchingPropertyForReplacements(vm, vm.propertyNames->valueOf); |
881 | } |
882 | setDidWatchInternalProperties(true); |
883 | } |
884 | |
885 | #if DUMP_PROPERTYMAP_STATS |
886 | |
887 | PropertyMapHashTableStats* propertyMapHashTableStats = 0; |
888 | |
889 | struct PropertyMapStatisticsExitLogger { |
890 | PropertyMapStatisticsExitLogger(); |
891 | ~PropertyMapStatisticsExitLogger(); |
892 | }; |
893 | |
894 | DEFINE_GLOBAL_FOR_LOGGING(PropertyMapStatisticsExitLogger, logger, ); |
895 | |
896 | PropertyMapStatisticsExitLogger::PropertyMapStatisticsExitLogger() |
897 | { |
898 | propertyMapHashTableStats = adoptPtr(new PropertyMapHashTableStats()).leakPtr(); |
899 | } |
900 | |
901 | PropertyMapStatisticsExitLogger::~PropertyMapStatisticsExitLogger() |
902 | { |
903 | unsigned finds = propertyMapHashTableStats->numFinds; |
904 | unsigned collisions = propertyMapHashTableStats->numCollisions; |
905 | dataLogF("\nJSC::PropertyMap statistics for process %d\n\n" , getCurrentProcessID()); |
906 | dataLogF("%d finds\n" , finds); |
907 | dataLogF("%d collisions (%.1f%%)\n" , collisions, 100.0 * collisions / finds); |
908 | dataLogF("%d lookups\n" , propertyMapHashTableStats->numLookups.load()); |
909 | dataLogF("%d lookup probings\n" , propertyMapHashTableStats->numLookupProbing.load()); |
910 | dataLogF("%d adds\n" , propertyMapHashTableStats->numAdds.load()); |
911 | dataLogF("%d removes\n" , propertyMapHashTableStats->numRemoves.load()); |
912 | dataLogF("%d rehashes\n" , propertyMapHashTableStats->numRehashes.load()); |
913 | dataLogF("%d reinserts\n" , propertyMapHashTableStats->numReinserts.load()); |
914 | } |
915 | |
916 | #endif |
917 | |
918 | PropertyTable* Structure::copyPropertyTableForPinning(VM& vm) |
919 | { |
920 | if (PropertyTable* table = propertyTableOrNull()) |
921 | return PropertyTable::clone(vm, *table); |
922 | bool setPropertyTable = false; |
923 | return materializePropertyTable(vm, setPropertyTable); |
924 | } |
925 | |
926 | PropertyOffset Structure::getConcurrently(UniquedStringImpl* uid, unsigned& attributes) |
927 | { |
928 | PropertyOffset result = invalidOffset; |
929 | |
930 | forEachPropertyConcurrently( |
931 | [&] (const PropertyMapEntry& candidate) -> bool { |
932 | if (candidate.key != uid) |
933 | return true; |
934 | |
935 | result = candidate.offset; |
936 | attributes = candidate.attributes; |
937 | return false; |
938 | }); |
939 | |
940 | return result; |
941 | } |
942 | |
943 | Vector<PropertyMapEntry> Structure::getPropertiesConcurrently() |
944 | { |
945 | Vector<PropertyMapEntry> result; |
946 | |
947 | forEachPropertyConcurrently( |
948 | [&] (const PropertyMapEntry& entry) -> bool { |
949 | result.append(entry); |
950 | return true; |
951 | }); |
952 | |
953 | return result; |
954 | } |
955 | |
956 | PropertyOffset Structure::add(VM& vm, PropertyName propertyName, unsigned attributes) |
957 | { |
958 | return add<ShouldPin::No>( |
959 | vm, propertyName, attributes, |
960 | [this] (const GCSafeConcurrentJSLocker&, PropertyOffset, PropertyOffset newLastOffset) { |
961 | setLastOffset(newLastOffset); |
962 | }); |
963 | } |
964 | |
965 | PropertyOffset Structure::remove(PropertyName propertyName) |
966 | { |
967 | return remove(propertyName, [] (const ConcurrentJSLocker&, PropertyOffset) { }); |
968 | } |
969 | |
970 | void Structure::getPropertyNamesFromStructure(VM& vm, PropertyNameArray& propertyNames, EnumerationMode mode) |
971 | { |
972 | PropertyTable* table = ensurePropertyTableIfNotEmpty(vm); |
973 | if (!table) |
974 | return; |
975 | |
976 | bool knownUnique = propertyNames.canAddKnownUniqueForStructure(); |
977 | |
978 | PropertyTable::iterator end = table->end(); |
979 | for (PropertyTable::iterator iter = table->begin(); iter != end; ++iter) { |
980 | ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !(iter->attributes & PropertyAttribute::DontEnum)); |
981 | ASSERT(!isQuickPropertyAccessAllowedForEnumeration() || !iter->key->isSymbol()); |
982 | if (!(iter->attributes & PropertyAttribute::DontEnum) || mode.includeDontEnumProperties()) { |
983 | if (iter->key->isSymbol() && !propertyNames.includeSymbolProperties()) |
984 | continue; |
985 | if (knownUnique) |
986 | propertyNames.addUnchecked(iter->key); |
987 | else |
988 | propertyNames.add(iter->key); |
989 | } |
990 | } |
991 | } |
992 | |
993 | void StructureFireDetail::dump(PrintStream& out) const |
994 | { |
995 | out.print("Structure transition from " , *m_structure); |
996 | } |
997 | |
998 | DeferredStructureTransitionWatchpointFire::DeferredStructureTransitionWatchpointFire(VM& vm, Structure* structure) |
999 | : DeferredWatchpointFire(vm) |
1000 | , m_structure(structure) |
1001 | { |
1002 | } |
1003 | |
1004 | DeferredStructureTransitionWatchpointFire::~DeferredStructureTransitionWatchpointFire() |
1005 | { |
1006 | fireAll(); |
1007 | } |
1008 | |
1009 | void DeferredStructureTransitionWatchpointFire::dump(PrintStream& out) const |
1010 | { |
1011 | out.print("Structure transition from " , *m_structure); |
1012 | } |
1013 | |
1014 | void Structure::didTransitionFromThisStructure(DeferredStructureTransitionWatchpointFire* deferred) const |
1015 | { |
1016 | // If the structure is being watched, and this is the kind of structure that the DFG would |
1017 | // like to watch, then make sure to note for all future versions of this structure that it's |
1018 | // unwise to watch it. |
1019 | if (m_transitionWatchpointSet.isBeingWatched()) |
1020 | const_cast<Structure*>(this)->setTransitionWatchpointIsLikelyToBeFired(true); |
1021 | |
1022 | if (deferred) { |
1023 | ASSERT(deferred->structure() == this); |
1024 | m_transitionWatchpointSet.fireAll(*vm(), deferred); |
1025 | } else |
1026 | m_transitionWatchpointSet.fireAll(*vm(), StructureFireDetail(this)); |
1027 | } |
1028 | |
1029 | void Structure::visitChildren(JSCell* cell, SlotVisitor& visitor) |
1030 | { |
1031 | Structure* thisObject = jsCast<Structure*>(cell); |
1032 | ASSERT_GC_OBJECT_INHERITS(thisObject, info()); |
1033 | |
1034 | Base::visitChildren(thisObject, visitor); |
1035 | |
1036 | ConcurrentJSLocker locker(thisObject->m_lock); |
1037 | |
1038 | visitor.append(thisObject->m_globalObject); |
1039 | if (!thisObject->isObject()) |
1040 | thisObject->m_cachedPrototypeChain.clear(); |
1041 | else { |
1042 | visitor.append(thisObject->m_prototype); |
1043 | visitor.append(thisObject->m_cachedPrototypeChain); |
1044 | } |
1045 | visitor.append(thisObject->m_previousOrRareData); |
1046 | |
1047 | if (thisObject->isPinnedPropertyTable() || thisObject->isAddingPropertyForTransition()) { |
1048 | // NOTE: This can interleave in pin(), in which case it may see a null property table. |
1049 | // That's fine, because then the barrier will fire and we will scan this again. |
1050 | visitor.append(thisObject->m_propertyTableUnsafe); |
1051 | } else if (visitor.isBuildingHeapSnapshot()) |
1052 | visitor.append(thisObject->m_propertyTableUnsafe); |
1053 | else if (thisObject->m_propertyTableUnsafe) |
1054 | thisObject->m_propertyTableUnsafe.clear(); |
1055 | } |
1056 | |
1057 | bool Structure::isCheapDuringGC(VM& vm) |
1058 | { |
1059 | // FIXME: We could make this even safer by returning false if this structure's property table |
1060 | // has any large property names. |
1061 | // https://bugs.webkit.org/show_bug.cgi?id=157334 |
1062 | |
1063 | return (!m_globalObject || vm.heap.isMarked(m_globalObject.get())) |
1064 | && (hasPolyProto() || !storedPrototypeObject() || vm.heap.isMarked(storedPrototypeObject())); |
1065 | } |
1066 | |
1067 | bool Structure::markIfCheap(SlotVisitor& visitor) |
1068 | { |
1069 | VM& vm = visitor.vm(); |
1070 | if (!isCheapDuringGC(vm)) |
1071 | return vm.heap.isMarked(this); |
1072 | |
1073 | visitor.appendUnbarriered(this); |
1074 | return true; |
1075 | } |
1076 | |
1077 | Ref<StructureShape> Structure::toStructureShape(JSValue value, bool& sawPolyProtoStructure) |
1078 | { |
1079 | Ref<StructureShape> baseShape = StructureShape::create(); |
1080 | RefPtr<StructureShape> curShape = baseShape.ptr(); |
1081 | Structure* curStructure = this; |
1082 | JSValue curValue = value; |
1083 | sawPolyProtoStructure = false; |
1084 | while (curStructure) { |
1085 | sawPolyProtoStructure |= curStructure->hasPolyProto(); |
1086 | curStructure->forEachPropertyConcurrently( |
1087 | [&] (const PropertyMapEntry& entry) -> bool { |
1088 | if (!PropertyName(entry.key).isPrivateName()) |
1089 | curShape->addProperty(*entry.key); |
1090 | return true; |
1091 | }); |
1092 | |
1093 | if (JSObject* curObject = curValue.getObject()) |
1094 | curShape->setConstructorName(JSObject::calculatedClassName(curObject)); |
1095 | else |
1096 | curShape->setConstructorName(curStructure->classInfo()->className); |
1097 | |
1098 | if (curStructure->isDictionary()) |
1099 | curShape->enterDictionaryMode(); |
1100 | |
1101 | curShape->markAsFinal(); |
1102 | |
1103 | if (!curValue.isObject()) |
1104 | break; |
1105 | |
1106 | JSObject* object = asObject(curValue); |
1107 | JSObject* prototypeObject = object->structure()->storedPrototypeObject(object); |
1108 | if (!prototypeObject) |
1109 | break; |
1110 | |
1111 | auto newShape = StructureShape::create(); |
1112 | curShape->setProto(newShape.copyRef()); |
1113 | curShape = WTFMove(newShape); |
1114 | curValue = prototypeObject; |
1115 | curStructure = prototypeObject->structure(); |
1116 | } |
1117 | |
1118 | return baseShape; |
1119 | } |
1120 | |
1121 | void Structure::dump(PrintStream& out) const |
1122 | { |
1123 | out.print(RawPointer(this), ":[" , classInfo()->className, ", {" ); |
1124 | |
1125 | CommaPrinter comma; |
1126 | |
1127 | const_cast<Structure*>(this)->forEachPropertyConcurrently( |
1128 | [&] (const PropertyMapEntry& entry) -> bool { |
1129 | out.print(comma, entry.key, ":" , static_cast<int>(entry.offset)); |
1130 | return true; |
1131 | }); |
1132 | |
1133 | out.print("}, " , IndexingTypeDump(indexingMode())); |
1134 | |
1135 | if (hasPolyProto()) |
1136 | out.print(", PolyProto offset:" , knownPolyProtoOffset); |
1137 | else if (m_prototype.get().isCell()) |
1138 | out.print(", Proto:" , RawPointer(m_prototype.get().asCell())); |
1139 | |
1140 | switch (dictionaryKind()) { |
1141 | case NoneDictionaryKind: |
1142 | if (hasBeenDictionary()) |
1143 | out.print(", Has been dictionary" ); |
1144 | break; |
1145 | case CachedDictionaryKind: |
1146 | out.print(", Dictionary" ); |
1147 | break; |
1148 | case UncachedDictionaryKind: |
1149 | out.print(", UncacheableDictionary" ); |
1150 | break; |
1151 | } |
1152 | |
1153 | if (transitionWatchpointSetIsStillValid()) |
1154 | out.print(", Leaf" ); |
1155 | else if (transitionWatchpointIsLikelyToBeFired()) |
1156 | out.print(", Shady leaf" ); |
1157 | |
1158 | out.print("]" ); |
1159 | } |
1160 | |
1161 | void Structure::dumpInContext(PrintStream& out, DumpContext* context) const |
1162 | { |
1163 | if (context) |
1164 | context->structures.dumpBrief(this, out); |
1165 | else |
1166 | dump(out); |
1167 | } |
1168 | |
1169 | void Structure::dumpBrief(PrintStream& out, const CString& string) const |
1170 | { |
1171 | out.print("%" , string, ":" , classInfo()->className); |
1172 | } |
1173 | |
1174 | void Structure::(PrintStream& out) |
1175 | { |
1176 | out.print("Structures:" ); |
1177 | } |
1178 | |
1179 | bool ClassInfo::hasStaticSetterOrReadonlyProperties() const |
1180 | { |
1181 | for (const ClassInfo* ci = this; ci; ci = ci->parentClass) { |
1182 | if (const HashTable* table = ci->staticPropHashTable) { |
1183 | if (table->hasSetterOrReadonlyProperties) |
1184 | return true; |
1185 | } |
1186 | } |
1187 | return false; |
1188 | } |
1189 | |
1190 | void Structure::setCachedPropertyNameEnumerator(VM& vm, JSPropertyNameEnumerator* enumerator) |
1191 | { |
1192 | ASSERT(!isDictionary()); |
1193 | if (!hasRareData()) |
1194 | allocateRareData(vm); |
1195 | rareData()->setCachedPropertyNameEnumerator(vm, enumerator); |
1196 | } |
1197 | |
1198 | JSPropertyNameEnumerator* Structure::cachedPropertyNameEnumerator() const |
1199 | { |
1200 | if (!hasRareData()) |
1201 | return nullptr; |
1202 | return rareData()->cachedPropertyNameEnumerator(); |
1203 | } |
1204 | |
1205 | bool Structure::canCachePropertyNameEnumerator() const |
1206 | { |
1207 | if (!this->canCacheOwnKeys()) |
1208 | return false; |
1209 | |
1210 | StructureChain* structureChain = m_cachedPrototypeChain.get(); |
1211 | ASSERT(structureChain); |
1212 | WriteBarrier<Structure>* structure = structureChain->head(); |
1213 | while (true) { |
1214 | if (!structure->get()) |
1215 | return true; |
1216 | if (!structure->get()->canCacheOwnKeys()) |
1217 | return false; |
1218 | structure++; |
1219 | } |
1220 | |
1221 | ASSERT_NOT_REACHED(); |
1222 | return true; |
1223 | } |
1224 | |
1225 | bool Structure::canAccessPropertiesQuicklyForEnumeration() const |
1226 | { |
1227 | if (!isQuickPropertyAccessAllowedForEnumeration()) |
1228 | return false; |
1229 | if (hasGetterSetterProperties()) |
1230 | return false; |
1231 | if (isUncacheableDictionary()) |
1232 | return false; |
1233 | return true; |
1234 | } |
1235 | |
1236 | } // namespace JSC |
1237 | |