1 | /* |
2 | * Copyright (C) 2017-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "VMInspector.h" |
28 | |
29 | #include "CodeBlock.h" |
30 | #include "CodeBlockSet.h" |
31 | #include "HeapInlines.h" |
32 | #include "HeapIterationScope.h" |
33 | #include "JSCInlines.h" |
34 | #include "MachineContext.h" |
35 | #include "MarkedSpaceInlines.h" |
36 | #include "StackVisitor.h" |
37 | #include <mutex> |
38 | #include <wtf/Expected.h> |
39 | |
40 | #if !OS(WINDOWS) |
41 | #include <unistd.h> |
42 | #endif |
43 | |
44 | namespace JSC { |
45 | |
46 | VMInspector& VMInspector::instance() |
47 | { |
48 | static VMInspector* manager; |
49 | static std::once_flag once; |
50 | std::call_once(once, [] { |
51 | manager = new VMInspector(); |
52 | }); |
53 | return *manager; |
54 | } |
55 | |
56 | void VMInspector::add(VM* vm) |
57 | { |
58 | auto locker = holdLock(m_lock); |
59 | m_vmList.append(vm); |
60 | } |
61 | |
62 | void VMInspector::remove(VM* vm) |
63 | { |
64 | auto locker = holdLock(m_lock); |
65 | m_vmList.remove(vm); |
66 | } |
67 | |
68 | auto VMInspector::lock(Seconds timeout) -> Expected<Locker, Error> |
69 | { |
70 | // This function may be called from a signal handler (e.g. via visit()). Hence, |
71 | // it should only use APIs that are safe to call from signal handlers. This is |
72 | // why we use unistd.h's sleep() instead of its alternatives. |
73 | |
74 | // We'll be doing sleep(1) between tries below. Hence, sleepPerRetry is 1. |
75 | unsigned maxRetries = (timeout < Seconds::infinity()) ? timeout.value() : UINT_MAX; |
76 | |
77 | Expected<Locker, Error> locker = Locker::tryLock(m_lock); |
78 | unsigned tryCount = 0; |
79 | while (!locker && tryCount < maxRetries) { |
80 | // We want the version of sleep from unistd.h. Cast to disambiguate. |
81 | #if !OS(WINDOWS) |
82 | (static_cast<unsigned (*)(unsigned)>(sleep))(1); |
83 | #endif |
84 | locker = Locker::tryLock(m_lock); |
85 | } |
86 | |
87 | if (!locker) |
88 | return makeUnexpected(Error::TimedOut); |
89 | return locker; |
90 | } |
91 | |
92 | #if ENABLE(JIT) |
93 | static bool ensureIsSafeToLock(Lock& lock) |
94 | { |
95 | unsigned maxRetries = 2; |
96 | unsigned tryCount = 0; |
97 | while (tryCount <= maxRetries) { |
98 | bool success = lock.tryLock(); |
99 | if (success) { |
100 | lock.unlock(); |
101 | return true; |
102 | } |
103 | tryCount++; |
104 | } |
105 | return false; |
106 | }; |
107 | #endif // ENABLE(JIT) |
108 | |
109 | auto VMInspector::isValidExecutableMemory(const VMInspector::Locker&, void* machinePC) -> Expected<bool, Error> |
110 | { |
111 | #if ENABLE(JIT) |
112 | bool found = false; |
113 | bool hasTimeout = false; |
114 | iterate([&] (VM&) -> FunctorStatus { |
115 | auto& allocator = ExecutableAllocator::singleton(); |
116 | auto& lock = allocator.getLock(); |
117 | |
118 | bool isSafeToLock = ensureIsSafeToLock(lock); |
119 | if (!isSafeToLock) { |
120 | hasTimeout = true; |
121 | return FunctorStatus::Continue; // Skip this VM. |
122 | } |
123 | |
124 | LockHolder executableAllocatorLocker(lock); |
125 | if (allocator.isValidExecutableMemory(executableAllocatorLocker, machinePC)) { |
126 | found = true; |
127 | return FunctorStatus::Done; |
128 | } |
129 | return FunctorStatus::Continue; |
130 | }); |
131 | |
132 | if (!found && hasTimeout) |
133 | return makeUnexpected(Error::TimedOut); |
134 | return found; |
135 | #else |
136 | UNUSED_PARAM(machinePC); |
137 | return false; |
138 | #endif |
139 | } |
140 | |
141 | auto VMInspector::codeBlockForMachinePC(const VMInspector::Locker&, void* machinePC) -> Expected<CodeBlock*, Error> |
142 | { |
143 | #if ENABLE(JIT) |
144 | CodeBlock* codeBlock = nullptr; |
145 | bool hasTimeout = false; |
146 | iterate([&] (VM& vm) { |
147 | if (!vm.currentThreadIsHoldingAPILock()) |
148 | return FunctorStatus::Continue; |
149 | |
150 | // It is safe to call Heap::forEachCodeBlockIgnoringJITPlans here because: |
151 | // 1. CodeBlocks are added to the CodeBlockSet from the main thread before |
152 | // they are handed to the JIT plans. Those codeBlocks will have a null jitCode, |
153 | // but we check for that in our lambda functor. |
154 | // 2. We will acquire the CodeBlockSet lock before iterating. |
155 | // This ensures that a CodeBlock won't be GCed while we're iterating. |
156 | // 3. We do a tryLock on the CodeBlockSet's lock first to ensure that it is |
157 | // safe for the current thread to lock it before calling |
158 | // Heap::forEachCodeBlockIgnoringJITPlans(). Hence, there's no risk of |
159 | // re-entering the lock and deadlocking on it. |
160 | |
161 | auto& codeBlockSetLock = vm.heap.codeBlockSet().getLock(); |
162 | bool isSafeToLock = ensureIsSafeToLock(codeBlockSetLock); |
163 | if (!isSafeToLock) { |
164 | hasTimeout = true; |
165 | return FunctorStatus::Continue; // Skip this VM. |
166 | } |
167 | |
168 | auto locker = holdLock(codeBlockSetLock); |
169 | vm.heap.forEachCodeBlockIgnoringJITPlans(locker, [&] (CodeBlock* cb) { |
170 | JITCode* jitCode = cb->jitCode().get(); |
171 | if (!jitCode) { |
172 | // If the codeBlock is a replacement codeBlock which is in the process of being |
173 | // compiled, its jitCode will be null, and we can disregard it as a match for |
174 | // the machinePC we're searching for. |
175 | return; |
176 | } |
177 | |
178 | if (!JITCode::isJIT(jitCode->jitType())) |
179 | return; |
180 | |
181 | if (jitCode->contains(machinePC)) { |
182 | codeBlock = cb; |
183 | return; |
184 | } |
185 | }); |
186 | if (codeBlock) |
187 | return FunctorStatus::Done; |
188 | return FunctorStatus::Continue; |
189 | }); |
190 | |
191 | if (!codeBlock && hasTimeout) |
192 | return makeUnexpected(Error::TimedOut); |
193 | return codeBlock; |
194 | #else |
195 | UNUSED_PARAM(machinePC); |
196 | return nullptr; |
197 | #endif |
198 | } |
199 | |
200 | bool VMInspector::currentThreadOwnsJSLock(JSGlobalObject* globalObject) |
201 | { |
202 | return globalObject->vm().currentThreadIsHoldingAPILock(); |
203 | } |
204 | |
205 | static bool ensureCurrentThreadOwnsJSLock(JSGlobalObject* globalObject) |
206 | { |
207 | if (VMInspector::currentThreadOwnsJSLock(globalObject)) |
208 | return true; |
209 | dataLog("ERROR: current thread does not own the JSLock\n" ); |
210 | return false; |
211 | } |
212 | |
213 | void VMInspector::gc(JSGlobalObject* globalObject) |
214 | { |
215 | VM& vm = globalObject->vm(); |
216 | if (!ensureCurrentThreadOwnsJSLock(globalObject)) |
217 | return; |
218 | vm.heap.collectNow(Sync, CollectionScope::Full); |
219 | } |
220 | |
221 | void VMInspector::edenGC(JSGlobalObject* globalObject) |
222 | { |
223 | VM& vm = globalObject->vm(); |
224 | if (!ensureCurrentThreadOwnsJSLock(globalObject)) |
225 | return; |
226 | vm.heap.collectSync(CollectionScope::Eden); |
227 | } |
228 | |
229 | bool VMInspector::isInHeap(Heap* heap, void* ptr) |
230 | { |
231 | MarkedBlock* candidate = MarkedBlock::blockFor(ptr); |
232 | if (heap->objectSpace().blocks().set().contains(candidate)) |
233 | return true; |
234 | for (PreciseAllocation* allocation : heap->objectSpace().preciseAllocations()) { |
235 | if (allocation->contains(ptr)) |
236 | return true; |
237 | } |
238 | return false; |
239 | } |
240 | |
241 | struct CellAddressCheckFunctor : MarkedBlock::CountFunctor { |
242 | CellAddressCheckFunctor(JSCell* candidate) |
243 | : candidate(candidate) |
244 | { |
245 | } |
246 | |
247 | IterationStatus operator()(HeapCell* cell, HeapCell::Kind) const |
248 | { |
249 | if (cell == candidate) { |
250 | found = true; |
251 | return IterationStatus::Done; |
252 | } |
253 | return IterationStatus::Continue; |
254 | } |
255 | |
256 | JSCell* candidate; |
257 | mutable bool found { false }; |
258 | }; |
259 | |
260 | bool VMInspector::isValidCell(Heap* heap, JSCell* candidate) |
261 | { |
262 | HeapIterationScope iterationScope(*heap); |
263 | CellAddressCheckFunctor functor(candidate); |
264 | heap->objectSpace().forEachLiveCell(iterationScope, functor); |
265 | return functor.found; |
266 | } |
267 | |
268 | bool VMInspector::isValidCodeBlock(JSGlobalObject* globalObject, CodeBlock* candidate) |
269 | { |
270 | if (!ensureCurrentThreadOwnsJSLock(globalObject)) |
271 | return false; |
272 | |
273 | struct CodeBlockValidationFunctor { |
274 | CodeBlockValidationFunctor(CodeBlock* candidate) |
275 | : candidate(candidate) |
276 | { |
277 | } |
278 | |
279 | void operator()(CodeBlock* codeBlock) const |
280 | { |
281 | if (codeBlock == candidate) |
282 | found = true; |
283 | } |
284 | |
285 | CodeBlock* candidate; |
286 | mutable bool found { false }; |
287 | }; |
288 | |
289 | VM& vm = globalObject->vm(); |
290 | CodeBlockValidationFunctor functor(candidate); |
291 | vm.heap.forEachCodeBlock(functor); |
292 | return functor.found; |
293 | } |
294 | |
295 | CodeBlock* VMInspector::codeBlockForFrame(JSGlobalObject* globalObject, CallFrame* topCallFrame, unsigned ) |
296 | { |
297 | VM& vm = globalObject->vm(); |
298 | if (!ensureCurrentThreadOwnsJSLock(globalObject)) |
299 | return nullptr; |
300 | |
301 | if (!topCallFrame) |
302 | return nullptr; |
303 | |
304 | struct FetchCodeBlockFunctor { |
305 | public: |
306 | FetchCodeBlockFunctor(unsigned ) |
307 | : targetFrame(targetFrameNumber) |
308 | { |
309 | } |
310 | |
311 | StackVisitor::Status operator()(StackVisitor& visitor) const |
312 | { |
313 | auto currentFrame = nextFrame++; |
314 | if (currentFrame == targetFrame) { |
315 | codeBlock = visitor->codeBlock(); |
316 | return StackVisitor::Done; |
317 | } |
318 | return StackVisitor::Continue; |
319 | } |
320 | |
321 | unsigned targetFrame; |
322 | mutable unsigned nextFrame { 0 }; |
323 | mutable CodeBlock* codeBlock { nullptr }; |
324 | }; |
325 | |
326 | FetchCodeBlockFunctor functor(frameNumber); |
327 | topCallFrame->iterate(vm, functor); |
328 | return functor.codeBlock; |
329 | } |
330 | |
331 | class DumpFrameFunctor { |
332 | public: |
333 | enum Action { |
334 | DumpOne, |
335 | DumpAll |
336 | }; |
337 | |
338 | DumpFrameFunctor(Action action, unsigned framesToSkip) |
339 | : m_action(action) |
340 | , m_framesToSkip(framesToSkip) |
341 | { |
342 | } |
343 | |
344 | StackVisitor::Status operator()(StackVisitor& visitor) const |
345 | { |
346 | m_currentFrame++; |
347 | if (m_currentFrame > m_framesToSkip) { |
348 | visitor->dump(WTF::dataFile(), Indenter(2), [&] (PrintStream& out) { |
349 | out.print("[" , (m_currentFrame - m_framesToSkip - 1), "] " ); |
350 | }); |
351 | } |
352 | if (m_action == DumpOne && m_currentFrame > m_framesToSkip) |
353 | return StackVisitor::Done; |
354 | return StackVisitor::Continue; |
355 | } |
356 | |
357 | private: |
358 | Action m_action; |
359 | unsigned m_framesToSkip; |
360 | mutable unsigned m_currentFrame { 0 }; |
361 | }; |
362 | |
363 | void VMInspector::dumpCallFrame(JSGlobalObject* globalObject, CallFrame* callFrame, unsigned framesToSkip) |
364 | { |
365 | if (!ensureCurrentThreadOwnsJSLock(globalObject)) |
366 | return; |
367 | DumpFrameFunctor functor(DumpFrameFunctor::DumpOne, framesToSkip); |
368 | callFrame->iterate(globalObject->vm(), functor); |
369 | } |
370 | |
371 | void VMInspector::dumpRegisters(CallFrame* callFrame) |
372 | { |
373 | CodeBlock* codeBlock = callFrame->codeBlock(); |
374 | if (!codeBlock) { |
375 | dataLog("Dumping host frame registers not supported.\n" ); |
376 | return; |
377 | } |
378 | VM& vm = codeBlock->vm(); |
379 | auto valueAsString = [&] (JSValue v) -> CString { |
380 | if (!v.isCell() || VMInspector::isValidCell(&vm.heap, reinterpret_cast<JSCell*>(JSValue::encode(v)))) |
381 | return toCString(v); |
382 | return "" ; |
383 | }; |
384 | |
385 | dataLogF("Register frame: \n\n" ); |
386 | dataLogF("-----------------------------------------------------------------------------\n" ); |
387 | dataLogF(" use | address | value \n" ); |
388 | dataLogF("-----------------------------------------------------------------------------\n" ); |
389 | |
390 | const Register* it; |
391 | const Register* end; |
392 | |
393 | it = callFrame->registers() + CallFrameSlot::thisArgument + callFrame->argumentCount(); |
394 | end = callFrame->registers() + CallFrameSlot::thisArgument - 1; |
395 | while (it > end) { |
396 | JSValue v = it->jsValue(); |
397 | int registerNumber = it - callFrame->registers(); |
398 | String name = codeBlock->nameForRegister(VirtualRegister(registerNumber)); |
399 | dataLogF("[r% 3d %14s] | %10p | 0x%-16llx %s\n" , registerNumber, name.ascii().data(), it, (long long)JSValue::encode(v), valueAsString(v).data()); |
400 | --it; |
401 | } |
402 | |
403 | dataLogF("-----------------------------------------------------------------------------\n" ); |
404 | dataLogF("[ArgumentCount] | %10p | %lu \n" , it, (unsigned long) callFrame->argumentCount()); |
405 | |
406 | callFrame->iterate(vm, [&] (StackVisitor& visitor) { |
407 | if (visitor->callFrame() == callFrame) { |
408 | unsigned line = 0; |
409 | unsigned unusedColumn = 0; |
410 | visitor->computeLineAndColumn(line, unusedColumn); |
411 | dataLogF("[ReturnVPC] | %10p | %d (line %d)\n" , it, visitor->bytecodeIndex().offset(), line); |
412 | return StackVisitor::Done; |
413 | } |
414 | return StackVisitor::Continue; |
415 | }); |
416 | |
417 | --it; |
418 | dataLogF("[Callee] | %10p | 0x%-16llx %s\n" , it, (long long)callFrame->callee().rawPtr(), valueAsString(it->jsValue()).data()); |
419 | --it; |
420 | dataLogF("[CodeBlock] | %10p | 0x%-16llx " , it, (long long)codeBlock); |
421 | dataLogLn(codeBlock); |
422 | --it; |
423 | #if ENABLE(JIT) |
424 | AbstractPC pc = callFrame->abstractReturnPC(vm); |
425 | if (pc.hasJITReturnAddress()) |
426 | dataLogF("[ReturnPC] | %10p | %p \n" , it, pc.jitReturnAddress().value()); |
427 | --it; |
428 | #endif |
429 | dataLogF("[CallerFrame] | %10p | %p \n" , it, callFrame->callerFrame()); |
430 | --it; |
431 | dataLogF("-----------------------------------------------------------------------------\n" ); |
432 | |
433 | size_t numberOfCalleeSaveSlots = codeBlock->calleeSaveSpaceAsVirtualRegisters(); |
434 | const Register* endOfCalleeSaves = it - numberOfCalleeSaveSlots; |
435 | |
436 | end = it - codeBlock->numVars(); |
437 | if (it != end) { |
438 | do { |
439 | JSValue v = it->jsValue(); |
440 | int registerNumber = it - callFrame->registers(); |
441 | String name = (it > endOfCalleeSaves) |
442 | ? "CalleeSaveReg" |
443 | : codeBlock->nameForRegister(VirtualRegister(registerNumber)); |
444 | dataLogF("[r% 3d %14s] | %10p | 0x%-16llx %s\n" , registerNumber, name.ascii().data(), it, (long long)JSValue::encode(v), valueAsString(v).data()); |
445 | --it; |
446 | } while (it != end); |
447 | } |
448 | dataLogF("-----------------------------------------------------------------------------\n" ); |
449 | |
450 | end = it - codeBlock->numCalleeLocals() + codeBlock->numVars(); |
451 | if (it != end) { |
452 | do { |
453 | JSValue v = (*it).jsValue(); |
454 | int registerNumber = it - callFrame->registers(); |
455 | dataLogF("[r% 3d] | %10p | 0x%-16llx %s\n" , registerNumber, it, (long long)JSValue::encode(v), valueAsString(v).data()); |
456 | --it; |
457 | } while (it != end); |
458 | } |
459 | dataLogF("-----------------------------------------------------------------------------\n" ); |
460 | } |
461 | |
462 | void VMInspector::dumpStack(JSGlobalObject* globalObject, CallFrame* topCallFrame, unsigned framesToSkip) |
463 | { |
464 | if (!ensureCurrentThreadOwnsJSLock(globalObject)) |
465 | return; |
466 | if (!topCallFrame) |
467 | return; |
468 | DumpFrameFunctor functor(DumpFrameFunctor::DumpAll, framesToSkip); |
469 | topCallFrame->iterate(globalObject->vm(), functor); |
470 | } |
471 | |
472 | void VMInspector::dumpValue(JSValue value) |
473 | { |
474 | dataLogLn(value); |
475 | } |
476 | |
477 | void VMInspector::dumpCellMemory(JSCell* cell) |
478 | { |
479 | dumpCellMemoryToStream(cell, WTF::dataFile()); |
480 | } |
481 | |
482 | class IndentationScope { |
483 | public: |
484 | IndentationScope(unsigned& indentation) |
485 | : m_indentation(indentation) |
486 | { |
487 | ++m_indentation; |
488 | } |
489 | |
490 | ~IndentationScope() |
491 | { |
492 | --m_indentation; |
493 | } |
494 | |
495 | private: |
496 | unsigned& m_indentation; |
497 | }; |
498 | |
499 | void VMInspector::dumpCellMemoryToStream(JSCell* cell, PrintStream& out) |
500 | { |
501 | VM& vm = cell->vm(); |
502 | StructureID structureID = cell->structureID(); |
503 | Structure* structure = cell->structure(vm); |
504 | IndexingType indexingTypeAndMisc = cell->indexingTypeAndMisc(); |
505 | IndexingType indexingType = structure->indexingType(); |
506 | IndexingType indexingMode = structure->indexingMode(); |
507 | JSType type = cell->type(); |
508 | TypeInfo::InlineTypeFlags inlineTypeFlags = cell->inlineTypeFlags(); |
509 | CellState cellState = cell->cellState(); |
510 | size_t cellSize = cell->cellSize(); |
511 | size_t slotCount = cellSize / sizeof(EncodedJSValue); |
512 | |
513 | EncodedJSValue* slots = bitwise_cast<EncodedJSValue*>(cell); |
514 | unsigned indentation = 0; |
515 | |
516 | auto indent = [&] { |
517 | for (unsigned i = 0 ; i < indentation; ++i) |
518 | out.print(" " ); |
519 | }; |
520 | |
521 | #define INDENT indent(), |
522 | |
523 | auto dumpSlot = [&] (EncodedJSValue* slots, unsigned index, const char* label = nullptr) { |
524 | out.print("[" , index, "] " , format("%p : 0x%016" PRIx64, &slots[index], slots[index])); |
525 | if (label) |
526 | out.print(" " , label); |
527 | out.print("\n" ); |
528 | }; |
529 | |
530 | out.printf("<%p, %s>\n" , cell, cell->className(vm)); |
531 | IndentationScope scope(indentation); |
532 | |
533 | INDENT dumpSlot(slots, 0, "header" ); |
534 | { |
535 | IndentationScope scope(indentation); |
536 | INDENT out.println("structureID " , format("%d 0x%" PRIx32, structureID, structureID), " structure " , RawPointer(structure)); |
537 | INDENT out.println("indexingTypeAndMisc " , format("%d 0x%" PRIx8, indexingTypeAndMisc, indexingTypeAndMisc), " " , IndexingTypeDump(indexingMode)); |
538 | INDENT out.println("type " , format("%d 0x%" PRIx8, type, type)); |
539 | INDENT out.println("flags " , format("%d 0x%" PRIx8, inlineTypeFlags, inlineTypeFlags)); |
540 | INDENT out.println("cellState " , format("%d" , cellState)); |
541 | } |
542 | |
543 | unsigned slotIndex = 1; |
544 | if (cell->isObject()) { |
545 | JSObject* obj = static_cast<JSObject*>(const_cast<JSCell*>(cell)); |
546 | Butterfly* butterfly = obj->butterfly(); |
547 | size_t butterflySize = obj->butterflyTotalSize(); |
548 | |
549 | INDENT dumpSlot(slots, slotIndex, "butterfly" ); |
550 | slotIndex++; |
551 | |
552 | if (butterfly) { |
553 | IndentationScope scope(indentation); |
554 | |
555 | bool = structure->hasIndexingHeader(cell); |
556 | bool hasAnyArrayStorage = JSC::hasAnyArrayStorage(indexingType); |
557 | |
558 | size_t preCapacity = obj->butterflyPreCapacity(); |
559 | size_t propertyCapacity = structure->outOfLineCapacity(); |
560 | |
561 | void* base = hasIndexingHeader |
562 | ? butterfly->base(preCapacity, propertyCapacity) |
563 | : butterfly->base(structure); |
564 | |
565 | unsigned publicLength = butterfly->publicLength(); |
566 | unsigned vectorLength = butterfly->vectorLength(); |
567 | size_t butterflyCellSize = MarkedSpace::optimalSizeFor(butterflySize); |
568 | |
569 | size_t endOfIndexedPropertiesIndex = butterflySize / sizeof(EncodedJSValue); |
570 | size_t endOfButterflyIndex = butterflyCellSize / sizeof(EncodedJSValue); |
571 | |
572 | INDENT out.println("base " , RawPointer(base)); |
573 | INDENT out.println("hasIndexingHeader " , (hasIndexingHeader ? "YES" : "NO" ), " hasAnyArrayStorage " , (hasAnyArrayStorage ? "YES" : "NO" )); |
574 | if (hasIndexingHeader) { |
575 | INDENT out.print("publicLength " , publicLength, " vectorLength " , vectorLength); |
576 | if (hasAnyArrayStorage) |
577 | out.print(" indexBias " , butterfly->arrayStorage()->m_indexBias); |
578 | out.print("\n" ); |
579 | } |
580 | INDENT out.println("preCapacity " , preCapacity, " propertyCapacity " , propertyCapacity); |
581 | |
582 | unsigned index = 0; |
583 | EncodedJSValue* slots = reinterpret_cast<EncodedJSValue*>(base); |
584 | |
585 | auto asVoidPtr = [] (void* p) { |
586 | return p; |
587 | }; |
588 | |
589 | auto = [&] (const char* name) { |
590 | out.println("<--- " , name); |
591 | }; |
592 | |
593 | auto dumpSection = [&] (unsigned startIndex, unsigned endIndex, const char* name) -> unsigned { |
594 | for (unsigned index = startIndex; index < endIndex; ++index) { |
595 | if (name && index == startIndex) |
596 | INDENT dumpSectionHeader(name); |
597 | INDENT dumpSlot(slots, index); |
598 | } |
599 | return endIndex; |
600 | }; |
601 | |
602 | { |
603 | IndentationScope scope(indentation); |
604 | |
605 | index = dumpSection(index, preCapacity, "preCapacity" ); |
606 | index = dumpSection(index, preCapacity + propertyCapacity, "propertyCapacity" ); |
607 | |
608 | if (hasIndexingHeader) |
609 | index = dumpSection(index, index + 1, "indexingHeader" ); |
610 | |
611 | INDENT dumpSectionHeader("butterfly" ); |
612 | if (hasAnyArrayStorage) { |
613 | RELEASE_ASSERT(asVoidPtr(butterfly->arrayStorage()) == asVoidPtr(&slots[index])); |
614 | RELEASE_ASSERT(ArrayStorage::vectorOffset() == 2 * sizeof(EncodedJSValue)); |
615 | index = dumpSection(index, index + 2, "arrayStorage" ); |
616 | } |
617 | |
618 | index = dumpSection(index, endOfIndexedPropertiesIndex, "indexedProperties" ); |
619 | index = dumpSection(index, endOfButterflyIndex, "unallocated capacity" ); |
620 | } |
621 | } |
622 | } |
623 | |
624 | for (; slotIndex < slotCount; ++slotIndex) |
625 | INDENT dumpSlot(slots, slotIndex); |
626 | |
627 | #undef INDENT |
628 | } |
629 | |
630 | void VMInspector::dumpSubspaceHashes(VM* vm) |
631 | { |
632 | unsigned count = 0; |
633 | vm->heap.objectSpace().forEachSubspace([&] (const Subspace& subspace) -> IterationStatus { |
634 | const char* name = subspace.name(); |
635 | unsigned hash = StringHasher::computeHash(name); |
636 | void* hashAsPtr = reinterpret_cast<void*>(static_cast<uintptr_t>(hash)); |
637 | dataLogLn(" [" , count++, "] " , name, " Hash:" , RawPointer(hashAsPtr)); |
638 | return IterationStatus::Continue; |
639 | }); |
640 | dataLogLn(); |
641 | } |
642 | |
643 | } // namespace JSC |
644 | |