1/*
2 * Copyright (C) 2017 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "ProbeStack.h"
28
29#include <memory>
30#include <wtf/StdLibExtras.h>
31
32#if ENABLE(MASM_PROBE)
33
34namespace JSC {
35namespace Probe {
36
37static void* const maxLowWatermark = reinterpret_cast<void*>(std::numeric_limits<uintptr_t>::max());
38
39#if ASAN_ENABLED
40// FIXME: we should consider using the copy function for both ASan and non-ASan builds.
41// https://bugs.webkit.org/show_bug.cgi?id=176961
42SUPPRESS_ASAN
43static void copyStackPage(void* dst, void* src, size_t size)
44{
45 ASSERT(roundUpToMultipleOf<sizeof(uintptr_t)>(dst) == dst);
46 ASSERT(roundUpToMultipleOf<sizeof(uintptr_t)>(src) == src);
47
48 uintptr_t* dstPointer = reinterpret_cast<uintptr_t*>(dst);
49 uintptr_t* srcPointer = reinterpret_cast<uintptr_t*>(src);
50 for (; size; size -= sizeof(uintptr_t))
51 *dstPointer++ = *srcPointer++;
52}
53#else
54#define copyStackPage(dst, src, size) std::memcpy(dst, src, size)
55#endif
56
57Page::Page(void* baseAddress)
58 : m_baseLogicalAddress(baseAddress)
59 , m_physicalAddressOffset(reinterpret_cast<uint8_t*>(&m_buffer) - reinterpret_cast<uint8_t*>(baseAddress))
60{
61 copyStackPage(&m_buffer, baseAddress, s_pageSize);
62}
63
64void Page::flushWrites()
65{
66 uint64_t dirtyBits = m_dirtyBits;
67 size_t offset = 0;
68 while (dirtyBits) {
69 // Find start.
70 if (dirtyBits & 1) {
71 size_t startOffset = offset;
72 // Find end.
73 do {
74 dirtyBits = dirtyBits >> 1;
75 offset += s_chunkSize;
76 } while (dirtyBits & 1);
77
78 size_t size = offset - startOffset;
79 uint8_t* src = reinterpret_cast<uint8_t*>(&m_buffer) + startOffset;
80 uint8_t* dst = reinterpret_cast<uint8_t*>(m_baseLogicalAddress) + startOffset;
81 copyStackPage(dst, src, size);
82 }
83 dirtyBits = dirtyBits >> 1;
84 offset += s_chunkSize;
85 }
86 m_dirtyBits = 0;
87}
88
89void* Page::lowWatermarkFromVisitingDirtyChunks()
90{
91 uint64_t dirtyBits = m_dirtyBits;
92 size_t offset = 0;
93 while (dirtyBits) {
94 if (dirtyBits & 1)
95 return reinterpret_cast<uint8_t*>(m_baseLogicalAddress) + offset;
96 dirtyBits = dirtyBits >> 1;
97 offset += s_chunkSize;
98 }
99 return maxLowWatermark;
100}
101
102Stack::Stack(Stack&& other)
103 : m_stackBounds(WTFMove(other.m_stackBounds))
104 , m_pages(WTFMove(other.m_pages))
105{
106 m_savedStackPointer = other.m_savedStackPointer;
107#if !ASSERT_DISABLED
108 other.m_isValid = false;
109#endif
110}
111
112bool Stack::hasWritesToFlush()
113{
114 return std::any_of(m_pages.begin(), m_pages.end(), [] (auto& it) { return it.value->hasWritesToFlush(); });
115}
116
117void Stack::flushWrites()
118{
119 for (auto it = m_pages.begin(); it != m_pages.end(); ++it)
120 it->value->flushWritesIfNeeded();
121}
122
123Page* Stack::ensurePageFor(void* address)
124{
125 // Since the machine stack is always allocated in units of whole pages, asserting
126 // that the address is contained in the stack is sufficient to infer that its page
127 // is also contained in the stack.
128 RELEASE_ASSERT(m_stackBounds.contains(address));
129
130 // We may have gotten here because of a cache miss. So, look up the page first
131 // before allocating a new one,
132 void* baseAddress = Page::baseAddressFor(address);
133 auto it = m_pages.find(baseAddress);
134 if (LIKELY(it != m_pages.end()))
135 m_lastAccessedPage = it->value.get();
136 else {
137 std::unique_ptr<Page> page = makeUnique<Page>(baseAddress);
138 auto result = m_pages.add(baseAddress, WTFMove(page));
139 m_lastAccessedPage = result.iterator->value.get();
140 }
141 m_lastAccessedPageBaseAddress = baseAddress;
142 return m_lastAccessedPage;
143}
144
145void* Stack::lowWatermarkFromVisitingDirtyPages()
146{
147 void* low = maxLowWatermark;
148 for (auto it = m_pages.begin(); it != m_pages.end(); ++it) {
149 Page& page = *it->value;
150 if (!page.hasWritesToFlush() || low < page.baseAddress())
151 continue;
152 low = std::min(low, page.lowWatermarkFromVisitingDirtyChunks());
153 }
154 return low;
155}
156
157} // namespace Probe
158} // namespace JSC
159
160#endif // ENABLE(MASM_PROBE)
161