1 | /* |
2 | * Copyright (C) 2017 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
23 | * THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "AsyncStackTrace.h" |
28 | |
29 | #include "ScriptCallStack.h" |
30 | #include <wtf/Ref.h> |
31 | |
32 | namespace Inspector { |
33 | |
34 | Ref<AsyncStackTrace> AsyncStackTrace::create(Ref<ScriptCallStack>&& callStack, bool singleShot, RefPtr<AsyncStackTrace> parent) |
35 | { |
36 | ASSERT(callStack->size()); |
37 | return adoptRef(*new AsyncStackTrace(WTFMove(callStack), singleShot, WTFMove(parent))); |
38 | } |
39 | |
40 | AsyncStackTrace::AsyncStackTrace(Ref<ScriptCallStack>&& callStack, bool singleShot, RefPtr<AsyncStackTrace> parent) |
41 | : m_callStack(WTFMove(callStack)) |
42 | , m_parent(parent) |
43 | , m_singleShot(singleShot) |
44 | { |
45 | if (m_parent) |
46 | m_parent->m_childCount++; |
47 | } |
48 | |
49 | AsyncStackTrace::~AsyncStackTrace() |
50 | { |
51 | if (m_parent) |
52 | remove(); |
53 | ASSERT(!m_childCount); |
54 | } |
55 | |
56 | bool AsyncStackTrace::isPending() const |
57 | { |
58 | return m_state == State::Pending; |
59 | } |
60 | |
61 | bool AsyncStackTrace::isLocked() const |
62 | { |
63 | return m_state == State::Pending || m_state == State::Active || m_childCount > 1; |
64 | } |
65 | |
66 | void AsyncStackTrace::willDispatchAsyncCall(size_t maxDepth) |
67 | { |
68 | ASSERT(m_state == State::Pending); |
69 | m_state = State::Active; |
70 | |
71 | truncate(maxDepth); |
72 | } |
73 | |
74 | void AsyncStackTrace::didDispatchAsyncCall() |
75 | { |
76 | ASSERT(m_state == State::Active || m_state == State::Canceled); |
77 | |
78 | if (m_state == State::Active && !m_singleShot) { |
79 | m_state = State::Pending; |
80 | return; |
81 | } |
82 | |
83 | m_state = State::Dispatched; |
84 | |
85 | if (!m_childCount) |
86 | remove(); |
87 | } |
88 | |
89 | void AsyncStackTrace::didCancelAsyncCall() |
90 | { |
91 | if (m_state == State::Canceled) |
92 | return; |
93 | |
94 | if (m_state == State::Pending && !m_childCount) |
95 | remove(); |
96 | |
97 | m_state = State::Canceled; |
98 | } |
99 | |
100 | RefPtr<Protocol::Console::StackTrace> AsyncStackTrace::buildInspectorObject() const |
101 | { |
102 | RefPtr<Protocol::Console::StackTrace> topStackTrace; |
103 | RefPtr<Protocol::Console::StackTrace> previousStackTrace; |
104 | |
105 | auto* stackTrace = this; |
106 | while (stackTrace) { |
107 | auto& callStack = stackTrace->m_callStack; |
108 | ASSERT(callStack->size()); |
109 | |
110 | auto protocolObject = Protocol::Console::StackTrace::create() |
111 | .setCallFrames(callStack->buildInspectorArray()) |
112 | .release(); |
113 | |
114 | if (stackTrace->m_truncated) |
115 | protocolObject->setTruncated(true); |
116 | if (callStack->at(0).isNative()) |
117 | protocolObject->setTopCallFrameIsBoundary(true); |
118 | |
119 | if (!topStackTrace) |
120 | topStackTrace = protocolObject.ptr(); |
121 | |
122 | if (previousStackTrace) |
123 | previousStackTrace->setParentStackTrace(protocolObject.ptr()); |
124 | |
125 | previousStackTrace = WTFMove(protocolObject); |
126 | stackTrace = stackTrace->m_parent.get(); |
127 | } |
128 | |
129 | return topStackTrace; |
130 | } |
131 | |
132 | void AsyncStackTrace::truncate(size_t maxDepth) |
133 | { |
134 | AsyncStackTrace* lastUnlockedAncestor = nullptr; |
135 | size_t depth = 0; |
136 | |
137 | auto* newStackTraceRoot = this; |
138 | while (newStackTraceRoot) { |
139 | depth += newStackTraceRoot->m_callStack->size(); |
140 | if (depth >= maxDepth) |
141 | break; |
142 | |
143 | auto* parent = newStackTraceRoot->m_parent.get(); |
144 | if (!lastUnlockedAncestor && parent && parent->isLocked()) |
145 | lastUnlockedAncestor = newStackTraceRoot; |
146 | |
147 | newStackTraceRoot = parent; |
148 | } |
149 | |
150 | if (!newStackTraceRoot || !newStackTraceRoot->m_parent) |
151 | return; |
152 | |
153 | if (!lastUnlockedAncestor) { |
154 | // No locked nodes belong to the trace. The subtree at the new root |
155 | // is moved to a new tree, and marked as truncated if necessary. |
156 | newStackTraceRoot->m_truncated = true; |
157 | newStackTraceRoot->remove(); |
158 | return; |
159 | } |
160 | |
161 | // The new root has a locked descendent. Since truncating a stack trace |
162 | // cannot mutate locked nodes or their ancestors, a new tree is created by |
163 | // cloning the locked portion of the trace (the path from the locked node |
164 | // to the new root). The subtree rooted at the last unlocked ancestor is |
165 | // then appended to the new tree. |
166 | auto* previousNode = lastUnlockedAncestor; |
167 | |
168 | // The subtree being truncated must be removed from it's parent before |
169 | // updating its parent pointer chain. |
170 | RefPtr<AsyncStackTrace> sourceNode = lastUnlockedAncestor->m_parent; |
171 | lastUnlockedAncestor->remove(); |
172 | |
173 | while (sourceNode) { |
174 | previousNode->m_parent = AsyncStackTrace::create(sourceNode->m_callStack.copyRef(), true, nullptr); |
175 | previousNode->m_parent->m_childCount = 1; |
176 | previousNode = previousNode->m_parent.get(); |
177 | |
178 | if (sourceNode.get() == newStackTraceRoot) |
179 | break; |
180 | |
181 | sourceNode = sourceNode->m_parent; |
182 | } |
183 | |
184 | previousNode->m_truncated = true; |
185 | } |
186 | |
187 | void AsyncStackTrace::remove() |
188 | { |
189 | if (!m_parent) |
190 | return; |
191 | |
192 | ASSERT(m_parent->m_childCount); |
193 | m_parent->m_childCount--; |
194 | m_parent = nullptr; |
195 | } |
196 | |
197 | } // namespace Inspector |
198 | |