1 | /* |
2 | * Copyright (C) 2007, 2008, 2012-2015 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * 2. Redistributions in binary form must reproduce the above copyright |
11 | * notice, this list of conditions and the following disclaimer in the |
12 | * documentation and/or other materials provided with the distribution. |
13 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of |
14 | * its contributors may be used to endorse or promote products derived |
15 | * from this software without specific prior written permission. |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
18 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
19 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
20 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
21 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
22 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
23 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
28 | |
29 | #pragma once |
30 | |
31 | #include "ConcurrentJSLock.h" |
32 | #include "ConstantMode.h" |
33 | #include "InferredValue.h" |
34 | #include "JSObject.h" |
35 | #include "ScopedArgumentsTable.h" |
36 | #include "TypeLocation.h" |
37 | #include "VarOffset.h" |
38 | #include "Watchpoint.h" |
39 | #include <memory> |
40 | #include <wtf/HashTraits.h> |
41 | #include <wtf/text/UniquedStringImpl.h> |
42 | |
43 | namespace JSC { |
44 | |
45 | class SymbolTable; |
46 | |
47 | static ALWAYS_INLINE int missingSymbolMarker() { return std::numeric_limits<int>::max(); } |
48 | |
49 | // The bit twiddling in this class assumes that every register index is a |
50 | // reasonably small positive or negative number, and therefore has its high |
51 | // four bits all set or all unset. |
52 | |
53 | // In addition to implementing semantics-mandated variable attributes and |
54 | // implementation-mandated variable indexing, this class also implements |
55 | // watchpoints to be used for JIT optimizations. Because watchpoints are |
56 | // meant to be relatively rare, this class optimizes heavily for the case |
57 | // that they are not being used. To that end, this class uses the thin-fat |
58 | // idiom: either it is thin, in which case it contains an in-place encoded |
59 | // word that consists of attributes, the index, and a bit saying that it is |
60 | // thin; or it is fat, in which case it contains a pointer to a malloc'd |
61 | // data structure and a bit saying that it is fat. The malloc'd data |
62 | // structure will be malloced a second time upon copy, to preserve the |
63 | // property that in-place edits to SymbolTableEntry do not manifest in any |
64 | // copies. However, the malloc'd FatEntry data structure contains a ref- |
65 | // counted pointer to a shared WatchpointSet. Thus, in-place edits of the |
66 | // WatchpointSet will manifest in all copies. Here's a picture: |
67 | // |
68 | // SymbolTableEntry --> FatEntry --> WatchpointSet |
69 | // |
70 | // If you make a copy of a SymbolTableEntry, you will have: |
71 | // |
72 | // original: SymbolTableEntry --> FatEntry --> WatchpointSet |
73 | // copy: SymbolTableEntry --> FatEntry -----^ |
74 | |
75 | struct SymbolTableEntry { |
76 | friend class CachedSymbolTableEntry; |
77 | |
78 | private: |
79 | static VarOffset varOffsetFromBits(intptr_t bits) |
80 | { |
81 | VarKind kind; |
82 | intptr_t kindBits = bits & KindBitsMask; |
83 | if (kindBits <= UnwatchableScopeKindBits) |
84 | kind = VarKind::Scope; |
85 | else if (kindBits == StackKindBits) |
86 | kind = VarKind::Stack; |
87 | else |
88 | kind = VarKind::DirectArgument; |
89 | return VarOffset::assemble(kind, static_cast<int>(bits >> FlagBits)); |
90 | } |
91 | |
92 | static ScopeOffset scopeOffsetFromBits(intptr_t bits) |
93 | { |
94 | ASSERT((bits & KindBitsMask) <= UnwatchableScopeKindBits); |
95 | return ScopeOffset(static_cast<int>(bits >> FlagBits)); |
96 | } |
97 | |
98 | public: |
99 | |
100 | // Use the SymbolTableEntry::Fast class, either via implicit cast or by calling |
101 | // getFast(), when you (1) only care about isNull(), getIndex(), and isReadOnly(), |
102 | // and (2) you are in a hot path where you need to minimize the number of times |
103 | // that you branch on isFat() when getting the bits(). |
104 | class Fast { |
105 | public: |
106 | Fast() |
107 | : m_bits(SlimFlag) |
108 | { |
109 | } |
110 | |
111 | ALWAYS_INLINE Fast(const SymbolTableEntry& entry) |
112 | : m_bits(entry.bits()) |
113 | { |
114 | } |
115 | |
116 | bool isNull() const |
117 | { |
118 | return !(m_bits & ~SlimFlag); |
119 | } |
120 | |
121 | VarOffset varOffset() const |
122 | { |
123 | return varOffsetFromBits(m_bits); |
124 | } |
125 | |
126 | // Asserts if the offset is anything but a scope offset. This structures the assertions |
127 | // in a way that may result in better code, even in release, than doing |
128 | // varOffset().scopeOffset(). |
129 | ScopeOffset scopeOffset() const |
130 | { |
131 | return scopeOffsetFromBits(m_bits); |
132 | } |
133 | |
134 | bool isReadOnly() const |
135 | { |
136 | return m_bits & ReadOnlyFlag; |
137 | } |
138 | |
139 | bool isDontEnum() const |
140 | { |
141 | return m_bits & DontEnumFlag; |
142 | } |
143 | |
144 | unsigned getAttributes() const |
145 | { |
146 | unsigned attributes = 0; |
147 | if (isReadOnly()) |
148 | attributes |= PropertyAttribute::ReadOnly; |
149 | if (isDontEnum()) |
150 | attributes |= PropertyAttribute::DontEnum; |
151 | return attributes; |
152 | } |
153 | |
154 | bool isFat() const |
155 | { |
156 | return !(m_bits & SlimFlag); |
157 | } |
158 | |
159 | private: |
160 | friend struct SymbolTableEntry; |
161 | intptr_t m_bits; |
162 | }; |
163 | |
164 | SymbolTableEntry() |
165 | : m_bits(SlimFlag) |
166 | { |
167 | } |
168 | |
169 | SymbolTableEntry(VarOffset offset) |
170 | : m_bits(SlimFlag) |
171 | { |
172 | ASSERT(isValidVarOffset(offset)); |
173 | pack(offset, true, false, false); |
174 | } |
175 | |
176 | SymbolTableEntry(VarOffset offset, unsigned attributes) |
177 | : m_bits(SlimFlag) |
178 | { |
179 | ASSERT(isValidVarOffset(offset)); |
180 | pack(offset, true, attributes & PropertyAttribute::ReadOnly, attributes & PropertyAttribute::DontEnum); |
181 | } |
182 | |
183 | ~SymbolTableEntry() |
184 | { |
185 | freeFatEntry(); |
186 | } |
187 | |
188 | SymbolTableEntry(const SymbolTableEntry& other) |
189 | : m_bits(SlimFlag) |
190 | { |
191 | *this = other; |
192 | } |
193 | |
194 | SymbolTableEntry& operator=(const SymbolTableEntry& other) |
195 | { |
196 | if (UNLIKELY(other.isFat())) |
197 | return copySlow(other); |
198 | freeFatEntry(); |
199 | m_bits = other.m_bits; |
200 | return *this; |
201 | } |
202 | |
203 | SymbolTableEntry(SymbolTableEntry&& other) |
204 | : m_bits(SlimFlag) |
205 | { |
206 | swap(other); |
207 | } |
208 | |
209 | SymbolTableEntry& operator=(SymbolTableEntry&& other) |
210 | { |
211 | swap(other); |
212 | return *this; |
213 | } |
214 | |
215 | void swap(SymbolTableEntry& other) |
216 | { |
217 | std::swap(m_bits, other.m_bits); |
218 | } |
219 | |
220 | bool isNull() const |
221 | { |
222 | return !(bits() & ~SlimFlag); |
223 | } |
224 | |
225 | VarOffset varOffset() const |
226 | { |
227 | return varOffsetFromBits(bits()); |
228 | } |
229 | |
230 | bool isWatchable() const |
231 | { |
232 | return (m_bits & KindBitsMask) == ScopeKindBits && VM::canUseJIT(); |
233 | } |
234 | |
235 | // Asserts if the offset is anything but a scope offset. This structures the assertions |
236 | // in a way that may result in better code, even in release, than doing |
237 | // varOffset().scopeOffset(). |
238 | ScopeOffset scopeOffset() const |
239 | { |
240 | return scopeOffsetFromBits(bits()); |
241 | } |
242 | |
243 | ALWAYS_INLINE Fast getFast() const |
244 | { |
245 | return Fast(*this); |
246 | } |
247 | |
248 | ALWAYS_INLINE Fast getFast(bool& wasFat) const |
249 | { |
250 | Fast result; |
251 | wasFat = isFat(); |
252 | if (wasFat) |
253 | result.m_bits = fatEntry()->m_bits | SlimFlag; |
254 | else |
255 | result.m_bits = m_bits; |
256 | return result; |
257 | } |
258 | |
259 | unsigned getAttributes() const |
260 | { |
261 | return getFast().getAttributes(); |
262 | } |
263 | |
264 | void setAttributes(unsigned attributes) |
265 | { |
266 | pack(varOffset(), isWatchable(), attributes & PropertyAttribute::ReadOnly, attributes & PropertyAttribute::DontEnum); |
267 | } |
268 | |
269 | bool isReadOnly() const |
270 | { |
271 | return bits() & ReadOnlyFlag; |
272 | } |
273 | |
274 | ConstantMode constantMode() const |
275 | { |
276 | return modeForIsConstant(isReadOnly()); |
277 | } |
278 | |
279 | bool isDontEnum() const |
280 | { |
281 | return bits() & DontEnumFlag; |
282 | } |
283 | |
284 | void disableWatching(VM& vm) |
285 | { |
286 | if (WatchpointSet* set = watchpointSet()) |
287 | set->invalidate(vm, "Disabling watching in symbol table" ); |
288 | if (varOffset().isScope()) |
289 | pack(varOffset(), false, isReadOnly(), isDontEnum()); |
290 | } |
291 | |
292 | void prepareToWatch(); |
293 | |
294 | // This watchpoint set is initialized clear, and goes through the following state transitions: |
295 | // |
296 | // First write to this var, in any scope that has this symbol table: Clear->IsWatched. |
297 | // |
298 | // Second write to this var, in any scope that has this symbol table: IsWatched->IsInvalidated. |
299 | // |
300 | // We ensure that we touch the set (i.e. trigger its state transition) after we do the write. This |
301 | // means that if you're in the compiler thread, and you: |
302 | // |
303 | // 1) Observe that the set IsWatched and commit to adding your watchpoint. |
304 | // 2) Load a value from any scope that has this watchpoint set. |
305 | // |
306 | // Then you can be sure that that value is either going to be the correct value for that var forever, |
307 | // or the watchpoint set will invalidate and you'll get fired. |
308 | // |
309 | // It's possible to write a program that first creates multiple scopes with the same var, and then |
310 | // initializes that var in just one of them. This means that a compilation could constant-fold to one |
311 | // of the scopes that still has an undefined value for this variable. That's fine, because at that |
312 | // point any write to any of the instances of that variable would fire the watchpoint. |
313 | // |
314 | // Note that watchpointSet() returns nullptr if JIT is disabled. |
315 | WatchpointSet* watchpointSet() |
316 | { |
317 | if (!isFat()) |
318 | return nullptr; |
319 | return fatEntry()->m_watchpoints.get(); |
320 | } |
321 | |
322 | private: |
323 | static const intptr_t SlimFlag = 0x1; |
324 | static const intptr_t ReadOnlyFlag = 0x2; |
325 | static const intptr_t DontEnumFlag = 0x4; |
326 | static const intptr_t NotNullFlag = 0x8; |
327 | static const intptr_t KindBitsMask = 0x30; |
328 | static const intptr_t ScopeKindBits = 0x00; |
329 | static const intptr_t UnwatchableScopeKindBits = 0x10; |
330 | static const intptr_t StackKindBits = 0x20; |
331 | static const intptr_t DirectArgumentKindBits = 0x30; |
332 | static const intptr_t FlagBits = 6; |
333 | |
334 | class FatEntry { |
335 | WTF_MAKE_FAST_ALLOCATED; |
336 | public: |
337 | FatEntry(intptr_t bits) |
338 | : m_bits(bits & ~SlimFlag) |
339 | { |
340 | } |
341 | |
342 | intptr_t m_bits; // always has FatFlag set and exactly matches what the bits would have been if this wasn't fat. |
343 | |
344 | RefPtr<WatchpointSet> m_watchpoints; |
345 | }; |
346 | |
347 | SymbolTableEntry& copySlow(const SymbolTableEntry&); |
348 | |
349 | bool isFat() const |
350 | { |
351 | return !(m_bits & SlimFlag); |
352 | } |
353 | |
354 | const FatEntry* fatEntry() const |
355 | { |
356 | ASSERT(isFat()); |
357 | return bitwise_cast<const FatEntry*>(m_bits); |
358 | } |
359 | |
360 | FatEntry* fatEntry() |
361 | { |
362 | ASSERT(isFat()); |
363 | return bitwise_cast<FatEntry*>(m_bits); |
364 | } |
365 | |
366 | FatEntry* inflate() |
367 | { |
368 | if (LIKELY(isFat())) |
369 | return fatEntry(); |
370 | return inflateSlow(); |
371 | } |
372 | |
373 | FatEntry* inflateSlow(); |
374 | |
375 | ALWAYS_INLINE intptr_t bits() const |
376 | { |
377 | if (isFat()) |
378 | return fatEntry()->m_bits; |
379 | return m_bits; |
380 | } |
381 | |
382 | ALWAYS_INLINE intptr_t& bits() |
383 | { |
384 | if (isFat()) |
385 | return fatEntry()->m_bits; |
386 | return m_bits; |
387 | } |
388 | |
389 | void freeFatEntry() |
390 | { |
391 | if (LIKELY(!isFat())) |
392 | return; |
393 | freeFatEntrySlow(); |
394 | } |
395 | |
396 | JS_EXPORT_PRIVATE void freeFatEntrySlow(); |
397 | |
398 | void pack(VarOffset offset, bool isWatchable, bool readOnly, bool dontEnum) |
399 | { |
400 | ASSERT(!isFat()); |
401 | intptr_t& bitsRef = bits(); |
402 | bitsRef = |
403 | (static_cast<intptr_t>(offset.rawOffset()) << FlagBits) | NotNullFlag | SlimFlag; |
404 | if (readOnly) |
405 | bitsRef |= ReadOnlyFlag; |
406 | if (dontEnum) |
407 | bitsRef |= DontEnumFlag; |
408 | switch (offset.kind()) { |
409 | case VarKind::Scope: |
410 | if (isWatchable) |
411 | bitsRef |= ScopeKindBits; |
412 | else |
413 | bitsRef |= UnwatchableScopeKindBits; |
414 | break; |
415 | case VarKind::Stack: |
416 | bitsRef |= StackKindBits; |
417 | break; |
418 | case VarKind::DirectArgument: |
419 | bitsRef |= DirectArgumentKindBits; |
420 | break; |
421 | default: |
422 | RELEASE_ASSERT_NOT_REACHED(); |
423 | break; |
424 | } |
425 | } |
426 | |
427 | static bool isValidVarOffset(VarOffset offset) |
428 | { |
429 | return ((static_cast<intptr_t>(offset.rawOffset()) << FlagBits) >> FlagBits) == static_cast<intptr_t>(offset.rawOffset()); |
430 | } |
431 | |
432 | intptr_t m_bits; |
433 | }; |
434 | |
435 | struct SymbolTableIndexHashTraits : HashTraits<SymbolTableEntry> { |
436 | static const bool needsDestruction = true; |
437 | }; |
438 | |
439 | class SymbolTable final : public JSCell { |
440 | friend class CachedSymbolTable; |
441 | |
442 | public: |
443 | typedef JSCell Base; |
444 | static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; |
445 | |
446 | typedef HashMap<RefPtr<UniquedStringImpl>, SymbolTableEntry, IdentifierRepHash, HashTraits<RefPtr<UniquedStringImpl>>, SymbolTableIndexHashTraits> Map; |
447 | typedef HashMap<RefPtr<UniquedStringImpl>, GlobalVariableID, IdentifierRepHash> UniqueIDMap; |
448 | typedef HashMap<RefPtr<UniquedStringImpl>, RefPtr<TypeSet>, IdentifierRepHash> UniqueTypeSetMap; |
449 | typedef HashMap<VarOffset, RefPtr<UniquedStringImpl>> OffsetToVariableMap; |
450 | typedef Vector<SymbolTableEntry*> LocalToEntryVec; |
451 | |
452 | template<typename CellType, SubspaceAccess> |
453 | static IsoSubspace* subspaceFor(VM& vm) |
454 | { |
455 | return &vm.symbolTableSpace; |
456 | } |
457 | |
458 | static SymbolTable* create(VM& vm) |
459 | { |
460 | SymbolTable* symbolTable = new (NotNull, allocateCell<SymbolTable>(vm.heap)) SymbolTable(vm); |
461 | symbolTable->finishCreation(vm); |
462 | return symbolTable; |
463 | } |
464 | |
465 | static const bool needsDestruction = true; |
466 | static void destroy(JSCell*); |
467 | |
468 | static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) |
469 | { |
470 | return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); |
471 | } |
472 | |
473 | // You must hold the lock until after you're done with the iterator. |
474 | Map::iterator find(const ConcurrentJSLocker&, UniquedStringImpl* key) |
475 | { |
476 | return m_map.find(key); |
477 | } |
478 | |
479 | Map::iterator find(const GCSafeConcurrentJSLocker&, UniquedStringImpl* key) |
480 | { |
481 | return m_map.find(key); |
482 | } |
483 | |
484 | SymbolTableEntry get(const ConcurrentJSLocker&, UniquedStringImpl* key) |
485 | { |
486 | return m_map.get(key); |
487 | } |
488 | |
489 | SymbolTableEntry get(UniquedStringImpl* key) |
490 | { |
491 | ConcurrentJSLocker locker(m_lock); |
492 | return get(locker, key); |
493 | } |
494 | |
495 | SymbolTableEntry inlineGet(const ConcurrentJSLocker&, UniquedStringImpl* key) |
496 | { |
497 | return m_map.inlineGet(key); |
498 | } |
499 | |
500 | SymbolTableEntry inlineGet(UniquedStringImpl* key) |
501 | { |
502 | ConcurrentJSLocker locker(m_lock); |
503 | return inlineGet(locker, key); |
504 | } |
505 | |
506 | Map::iterator begin(const ConcurrentJSLocker&) |
507 | { |
508 | return m_map.begin(); |
509 | } |
510 | |
511 | Map::iterator end(const ConcurrentJSLocker&) |
512 | { |
513 | return m_map.end(); |
514 | } |
515 | |
516 | Map::iterator end(const GCSafeConcurrentJSLocker&) |
517 | { |
518 | return m_map.end(); |
519 | } |
520 | |
521 | size_t size(const ConcurrentJSLocker&) const |
522 | { |
523 | return m_map.size(); |
524 | } |
525 | |
526 | size_t size() const |
527 | { |
528 | ConcurrentJSLocker locker(m_lock); |
529 | return size(locker); |
530 | } |
531 | |
532 | ScopeOffset maxScopeOffset() const |
533 | { |
534 | return m_maxScopeOffset; |
535 | } |
536 | |
537 | void didUseScopeOffset(ScopeOffset offset) |
538 | { |
539 | if (!m_maxScopeOffset || m_maxScopeOffset < offset) |
540 | m_maxScopeOffset = offset; |
541 | } |
542 | |
543 | void didUseVarOffset(VarOffset offset) |
544 | { |
545 | if (offset.isScope()) |
546 | didUseScopeOffset(offset.scopeOffset()); |
547 | } |
548 | |
549 | unsigned scopeSize() const |
550 | { |
551 | ScopeOffset maxScopeOffset = this->maxScopeOffset(); |
552 | |
553 | // Do some calculation that relies on invalid scope offset plus one being zero. |
554 | unsigned fastResult = maxScopeOffset.offsetUnchecked() + 1; |
555 | |
556 | // Assert that this works. |
557 | ASSERT(fastResult == (!maxScopeOffset ? 0 : maxScopeOffset.offset() + 1)); |
558 | |
559 | return fastResult; |
560 | } |
561 | |
562 | ScopeOffset nextScopeOffset() const |
563 | { |
564 | return ScopeOffset(scopeSize()); |
565 | } |
566 | |
567 | ScopeOffset takeNextScopeOffset(const ConcurrentJSLocker&) |
568 | { |
569 | ScopeOffset result = nextScopeOffset(); |
570 | m_maxScopeOffset = result; |
571 | return result; |
572 | } |
573 | |
574 | ScopeOffset takeNextScopeOffset() |
575 | { |
576 | ConcurrentJSLocker locker(m_lock); |
577 | return takeNextScopeOffset(locker); |
578 | } |
579 | |
580 | template<typename Entry> |
581 | void add(const ConcurrentJSLocker&, UniquedStringImpl* key, Entry&& entry) |
582 | { |
583 | RELEASE_ASSERT(!m_localToEntry); |
584 | didUseVarOffset(entry.varOffset()); |
585 | Map::AddResult result = m_map.add(key, std::forward<Entry>(entry)); |
586 | ASSERT_UNUSED(result, result.isNewEntry); |
587 | } |
588 | |
589 | template<typename Entry> |
590 | void add(UniquedStringImpl* key, Entry&& entry) |
591 | { |
592 | ConcurrentJSLocker locker(m_lock); |
593 | add(locker, key, std::forward<Entry>(entry)); |
594 | } |
595 | |
596 | template<typename Entry> |
597 | void set(const ConcurrentJSLocker&, UniquedStringImpl* key, Entry&& entry) |
598 | { |
599 | RELEASE_ASSERT(!m_localToEntry); |
600 | didUseVarOffset(entry.varOffset()); |
601 | m_map.set(key, std::forward<Entry>(entry)); |
602 | } |
603 | |
604 | template<typename Entry> |
605 | void set(UniquedStringImpl* key, Entry&& entry) |
606 | { |
607 | ConcurrentJSLocker locker(m_lock); |
608 | set(locker, key, std::forward<Entry>(entry)); |
609 | } |
610 | |
611 | bool contains(const ConcurrentJSLocker&, UniquedStringImpl* key) |
612 | { |
613 | return m_map.contains(key); |
614 | } |
615 | |
616 | bool contains(UniquedStringImpl* key) |
617 | { |
618 | ConcurrentJSLocker locker(m_lock); |
619 | return contains(locker, key); |
620 | } |
621 | |
622 | // The principle behind ScopedArgumentsTable modifications is that we will create one and |
623 | // leave it unlocked - thereby allowing in-place changes - until someone asks for a pointer to |
624 | // the table. Then, we will lock it. Then both our future changes and their future changes |
625 | // will first have to make a copy. This discipline means that usually when we create a |
626 | // ScopedArguments object, we don't have to make a copy of the ScopedArgumentsTable - instead |
627 | // we just take a reference to one that we already have. |
628 | |
629 | uint32_t argumentsLength() const |
630 | { |
631 | if (!m_arguments) |
632 | return 0; |
633 | return m_arguments->length(); |
634 | } |
635 | |
636 | void setArgumentsLength(VM& vm, uint32_t length) |
637 | { |
638 | if (UNLIKELY(!m_arguments)) |
639 | m_arguments.set(vm, this, ScopedArgumentsTable::create(vm, length)); |
640 | else |
641 | m_arguments.set(vm, this, m_arguments->setLength(vm, length)); |
642 | } |
643 | |
644 | ScopeOffset argumentOffset(uint32_t i) const |
645 | { |
646 | ASSERT_WITH_SECURITY_IMPLICATION(m_arguments); |
647 | return m_arguments->get(i); |
648 | } |
649 | |
650 | void setArgumentOffset(VM& vm, uint32_t i, ScopeOffset offset) |
651 | { |
652 | ASSERT_WITH_SECURITY_IMPLICATION(m_arguments); |
653 | m_arguments.set(vm, this, m_arguments->set(vm, i, offset)); |
654 | } |
655 | |
656 | ScopedArgumentsTable* arguments() const |
657 | { |
658 | if (!m_arguments) |
659 | return nullptr; |
660 | m_arguments->lock(); |
661 | return m_arguments.get(); |
662 | } |
663 | |
664 | const LocalToEntryVec& localToEntry(const ConcurrentJSLocker&); |
665 | SymbolTableEntry* entryFor(const ConcurrentJSLocker&, ScopeOffset); |
666 | |
667 | GlobalVariableID uniqueIDForVariable(const ConcurrentJSLocker&, UniquedStringImpl* key, VM&); |
668 | GlobalVariableID uniqueIDForOffset(const ConcurrentJSLocker&, VarOffset, VM&); |
669 | RefPtr<TypeSet> globalTypeSetForOffset(const ConcurrentJSLocker&, VarOffset, VM&); |
670 | RefPtr<TypeSet> globalTypeSetForVariable(const ConcurrentJSLocker&, UniquedStringImpl* key, VM&); |
671 | |
672 | bool usesNonStrictEval() const { return m_usesNonStrictEval; } |
673 | void setUsesNonStrictEval(bool usesNonStrictEval) { m_usesNonStrictEval = usesNonStrictEval; } |
674 | |
675 | bool isNestedLexicalScope() const { return m_nestedLexicalScope; } |
676 | void markIsNestedLexicalScope() { ASSERT(scopeType() == LexicalScope); m_nestedLexicalScope = true; } |
677 | |
678 | enum ScopeType { |
679 | VarScope, |
680 | GlobalLexicalScope, |
681 | LexicalScope, |
682 | CatchScope, |
683 | FunctionNameScope |
684 | }; |
685 | void setScopeType(ScopeType type) { m_scopeType = type; } |
686 | ScopeType scopeType() const { return static_cast<ScopeType>(m_scopeType); } |
687 | |
688 | SymbolTable* cloneScopePart(VM&); |
689 | |
690 | void prepareForTypeProfiling(const ConcurrentJSLocker&); |
691 | |
692 | CodeBlock* rareDataCodeBlock(); |
693 | void setRareDataCodeBlock(CodeBlock*); |
694 | |
695 | InferredValue<JSScope>& singleton() { return m_singleton; } |
696 | |
697 | void notifyCreation(VM& vm, JSScope* scope, const char* reason) |
698 | { |
699 | m_singleton.notifyWrite(vm, this, scope, reason); |
700 | } |
701 | |
702 | static void visitChildren(JSCell*, SlotVisitor&); |
703 | |
704 | DECLARE_EXPORT_INFO; |
705 | |
706 | void finalizeUnconditionally(VM&); |
707 | |
708 | private: |
709 | JS_EXPORT_PRIVATE SymbolTable(VM&); |
710 | ~SymbolTable(); |
711 | |
712 | JS_EXPORT_PRIVATE void finishCreation(VM&); |
713 | |
714 | Map m_map; |
715 | ScopeOffset m_maxScopeOffset; |
716 | public: |
717 | mutable ConcurrentJSLock m_lock; |
718 | private: |
719 | unsigned m_usesNonStrictEval : 1; |
720 | unsigned m_nestedLexicalScope : 1; // Non-function LexicalScope. |
721 | unsigned m_scopeType : 3; // ScopeType |
722 | |
723 | struct SymbolTableRareData { |
724 | UniqueIDMap m_uniqueIDMap; |
725 | OffsetToVariableMap m_offsetToVariableMap; |
726 | UniqueTypeSetMap m_uniqueTypeSetMap; |
727 | WriteBarrier<CodeBlock> m_codeBlock; |
728 | }; |
729 | std::unique_ptr<SymbolTableRareData> m_rareData; |
730 | |
731 | WriteBarrier<ScopedArgumentsTable> m_arguments; |
732 | InferredValue<JSScope> m_singleton; |
733 | |
734 | std::unique_ptr<LocalToEntryVec> m_localToEntry; |
735 | }; |
736 | |
737 | } // namespace JSC |
738 | |