1 | /* |
2 | * Copyright (C) 2017-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #include "CPU.h" |
29 | #include <wtf/HashMap.h> |
30 | #include <wtf/StdLibExtras.h> |
31 | #include <wtf/Threading.h> |
32 | |
33 | #if ENABLE(MASM_PROBE) |
34 | |
35 | namespace JSC { |
36 | |
37 | namespace Probe { |
38 | |
39 | class Page { |
40 | WTF_MAKE_FAST_ALLOCATED; |
41 | public: |
42 | Page(void* baseAddress); |
43 | |
44 | static void* baseAddressFor(void* p) |
45 | { |
46 | return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(p) & ~s_pageMask); |
47 | } |
48 | static void* chunkAddressFor(void* p) |
49 | { |
50 | return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(p) & ~s_chunkMask); |
51 | } |
52 | |
53 | void* baseAddress() { return m_baseLogicalAddress; } |
54 | |
55 | template<typename T> |
56 | T get(void* logicalAddress) |
57 | { |
58 | void* from = physicalAddressFor(logicalAddress); |
59 | typename std::remove_const<T>::type to { }; |
60 | std::memcpy(&to, from, sizeof(to)); // Use std::memcpy to avoid strict aliasing issues. |
61 | return to; |
62 | } |
63 | template<typename T> |
64 | T get(void* logicalBaseAddress, ptrdiff_t offset) |
65 | { |
66 | return get<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset); |
67 | } |
68 | |
69 | template<typename T> |
70 | void set(void* logicalAddress, T value) |
71 | { |
72 | if (sizeof(T) <= s_chunkSize) |
73 | m_dirtyBits |= dirtyBitFor(logicalAddress); |
74 | else { |
75 | size_t numberOfChunks = roundUpToMultipleOf<sizeof(T)>(s_chunkSize) / s_chunkSize; |
76 | uint8_t* dirtyAddress = reinterpret_cast<uint8_t*>(logicalAddress); |
77 | for (size_t i = 0; i < numberOfChunks; ++i, dirtyAddress += s_chunkSize) |
78 | m_dirtyBits |= dirtyBitFor(dirtyAddress); |
79 | } |
80 | void* to = physicalAddressFor(logicalAddress); |
81 | std::memcpy(to, &value, sizeof(T)); // Use std::memcpy to avoid strict aliasing issues. |
82 | } |
83 | template<typename T> |
84 | void set(void* logicalBaseAddress, ptrdiff_t offset, T value) |
85 | { |
86 | set<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset, value); |
87 | } |
88 | |
89 | bool hasWritesToFlush() const { return !!m_dirtyBits; } |
90 | void flushWritesIfNeeded() |
91 | { |
92 | if (m_dirtyBits) |
93 | flushWrites(); |
94 | } |
95 | |
96 | void* lowWatermarkFromVisitingDirtyChunks(); |
97 | |
98 | private: |
99 | uint64_t dirtyBitFor(void* logicalAddress) |
100 | { |
101 | uintptr_t offset = reinterpret_cast<uintptr_t>(logicalAddress) & s_pageMask; |
102 | return static_cast<uint64_t>(1) << (offset >> s_chunkSizeShift); |
103 | } |
104 | |
105 | void* physicalAddressFor(void* logicalAddress) |
106 | { |
107 | return reinterpret_cast<uint8_t*>(logicalAddress) + m_physicalAddressOffset; |
108 | } |
109 | |
110 | void flushWrites(); |
111 | |
112 | void* m_baseLogicalAddress { nullptr }; |
113 | ptrdiff_t m_physicalAddressOffset; |
114 | uint64_t m_dirtyBits { 0 }; |
115 | |
116 | #if ASAN_ENABLED |
117 | // The ASan stack may contain poisoned words that may be manipulated at ASan's discretion. |
118 | // We would never touch those words anyway, but let's ensure that the page size is set |
119 | // such that the chunk size is guaranteed to be exactly sizeof(uintptr_t) so that we won't |
120 | // inadvertently overwrite one of ASan's words on the stack when we copy back the dirty |
121 | // chunks. |
122 | // FIXME: we should consider using the same page size for both ASan and non-ASan builds. |
123 | // https://bugs.webkit.org/show_bug.cgi?id=176961 |
124 | static constexpr size_t s_pageSize = 64 * sizeof(uintptr_t); // because there are 64 bits in m_dirtyBits. |
125 | #else // not ASAN_ENABLED |
126 | static constexpr size_t s_pageSize = 1024; |
127 | #endif // ASAN_ENABLED |
128 | static constexpr uintptr_t s_pageMask = s_pageSize - 1; |
129 | static constexpr size_t s_chunksPerPage = sizeof(uint64_t) * 8; // number of bits in m_dirtyBits. |
130 | static constexpr size_t s_chunkSize = s_pageSize / s_chunksPerPage; |
131 | static constexpr uintptr_t s_chunkMask = s_chunkSize - 1; |
132 | #if ASAN_ENABLED |
133 | static_assert(s_chunkSize == sizeof(uintptr_t), "bad chunkSizeShift" ); |
134 | static constexpr size_t s_chunkSizeShift = is64Bit() ? 3 : 2; |
135 | #else // no ASAN_ENABLED |
136 | static constexpr size_t s_chunkSizeShift = 4; |
137 | #endif // ASAN_ENABLED |
138 | static_assert(s_pageSize > s_chunkSize, "bad pageSize or chunkSize" ); |
139 | static_assert(s_chunkSize == (1 << s_chunkSizeShift), "bad chunkSizeShift" ); |
140 | |
141 | typedef typename std::aligned_storage<s_pageSize, std::alignment_of<uintptr_t>::value>::type Buffer; |
142 | Buffer m_buffer; |
143 | }; |
144 | |
145 | class Stack { |
146 | WTF_MAKE_FAST_ALLOCATED; |
147 | public: |
148 | Stack() |
149 | : m_stackBounds(Thread::current().stack()) |
150 | { } |
151 | Stack(Stack&& other); |
152 | |
153 | void* lowWatermarkFromVisitingDirtyPages(); |
154 | void* lowWatermark(void* stackPointer) |
155 | { |
156 | ASSERT(Page::chunkAddressFor(stackPointer) == lowWatermarkFromVisitingDirtyPages()); |
157 | return Page::chunkAddressFor(stackPointer); |
158 | } |
159 | |
160 | template<typename T> |
161 | T get(void* address) |
162 | { |
163 | Page* page = pageFor(address); |
164 | return page->get<T>(address); |
165 | } |
166 | template<typename T> |
167 | T get(void* logicalBaseAddress, ptrdiff_t offset) |
168 | { |
169 | return get<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset); |
170 | } |
171 | |
172 | template<typename T> |
173 | void set(void* address, T value) |
174 | { |
175 | Page* page = pageFor(address); |
176 | page->set<T>(address, value); |
177 | } |
178 | |
179 | template<typename T> |
180 | void set(void* logicalBaseAddress, ptrdiff_t offset, T value) |
181 | { |
182 | set<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset, value); |
183 | } |
184 | |
185 | JS_EXPORT_PRIVATE Page* ensurePageFor(void* address); |
186 | |
187 | void* savedStackPointer() const { return m_savedStackPointer; } |
188 | void setSavedStackPointer(void* sp) { m_savedStackPointer = sp; } |
189 | |
190 | bool hasWritesToFlush(); |
191 | void flushWrites(); |
192 | |
193 | #if !ASSERT_DISABLED |
194 | bool isValid() { return m_isValid; } |
195 | #endif |
196 | |
197 | private: |
198 | Page* pageFor(void* address) |
199 | { |
200 | if (LIKELY(Page::baseAddressFor(address) == m_lastAccessedPageBaseAddress)) |
201 | return m_lastAccessedPage; |
202 | return ensurePageFor(address); |
203 | } |
204 | |
205 | void* m_savedStackPointer { nullptr }; |
206 | |
207 | // A cache of the last accessed page details for quick access. |
208 | void* m_lastAccessedPageBaseAddress { nullptr }; |
209 | Page* m_lastAccessedPage { nullptr }; |
210 | |
211 | StackBounds m_stackBounds; |
212 | HashMap<void*, std::unique_ptr<Page>> m_pages; |
213 | |
214 | #if !ASSERT_DISABLED |
215 | bool m_isValid { true }; |
216 | #endif |
217 | }; |
218 | |
219 | } // namespace Probe |
220 | } // namespace JSC |
221 | |
222 | #endif // ENABLE(MASM_PROBE) |
223 | |