1 | /* |
2 | * Copyright (C) 2017-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #include "IsoDirectory.h" |
29 | |
30 | namespace bmalloc { |
31 | |
32 | template<typename Config> |
33 | IsoDirectoryBase<Config>::IsoDirectoryBase(IsoHeapImpl<Config>& heap) |
34 | : m_heap(heap) |
35 | { |
36 | } |
37 | |
38 | template<typename Config, unsigned passedNumPages> |
39 | IsoDirectory<Config, passedNumPages>::IsoDirectory(IsoHeapImpl<Config>& heap) |
40 | : IsoDirectoryBase<Config>(heap) |
41 | { |
42 | for (unsigned i = numPages; i--;) |
43 | m_pages[i] = nullptr; |
44 | } |
45 | |
46 | template<typename Config, unsigned passedNumPages> |
47 | EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligible() |
48 | { |
49 | unsigned pageIndex = (m_eligible | ~m_committed).findBit(m_firstEligibleOrDecommitted, true); |
50 | m_firstEligibleOrDecommitted = pageIndex; |
51 | BASSERT((m_eligible | ~m_committed).findBit(0, true) == pageIndex); |
52 | if (pageIndex >= numPages) |
53 | return EligibilityKind::Full; |
54 | |
55 | Scavenger& scavenger = *Scavenger::get(); |
56 | scavenger.didStartGrowing(); |
57 | |
58 | IsoPage<Config>* page = m_pages[pageIndex]; |
59 | |
60 | if (!m_committed[pageIndex]) { |
61 | scavenger.scheduleIfUnderMemoryPressure(IsoPageBase::pageSize); |
62 | |
63 | // It could be that we haven't even allocated a page yet. Do that now! |
64 | if (!page) { |
65 | page = IsoPage<Config>::tryCreate(*this, pageIndex); |
66 | if (!page) |
67 | return EligibilityKind::OutOfMemory; |
68 | m_pages[pageIndex] = page; |
69 | } else { |
70 | // This means that we have a page that we previously allocated and that page just needs to be |
71 | // committed. |
72 | vmAllocatePhysicalPages(page, IsoPageBase::pageSize); |
73 | new (page) IsoPage<Config>(*this, pageIndex); |
74 | } |
75 | |
76 | m_committed[pageIndex] = true; |
77 | this->m_heap.didCommit(page, IsoPageBase::pageSize); |
78 | } else { |
79 | if (m_empty[pageIndex]) |
80 | this->m_heap.isNoLongerFreeable(page, IsoPageBase::pageSize); |
81 | } |
82 | |
83 | RELEASE_BASSERT(page); |
84 | |
85 | // Make the page non-empty and non-eligible. |
86 | m_eligible[pageIndex] = false; |
87 | m_empty[pageIndex] = false; |
88 | return page; |
89 | } |
90 | |
91 | template<typename Config, unsigned passedNumPages> |
92 | void IsoDirectory<Config, passedNumPages>::didBecome(IsoPage<Config>* page, IsoPageTrigger trigger) |
93 | { |
94 | static constexpr bool verbose = false; |
95 | unsigned pageIndex = page->index(); |
96 | switch (trigger) { |
97 | case IsoPageTrigger::Eligible: |
98 | if (verbose) |
99 | fprintf(stderr, "%p: %p did become eligible.\n" , this, page); |
100 | m_eligible[pageIndex] = true; |
101 | m_firstEligibleOrDecommitted = std::min(m_firstEligibleOrDecommitted, pageIndex); |
102 | this->m_heap.didBecomeEligibleOrDecommited(this); |
103 | return; |
104 | case IsoPageTrigger::Empty: |
105 | if (verbose) |
106 | fprintf(stderr, "%p: %p did become empty.\n" , this, page); |
107 | BASSERT(!!m_committed[pageIndex]); |
108 | this->m_heap.isNowFreeable(page, IsoPageBase::pageSize); |
109 | m_empty[pageIndex] = true; |
110 | Scavenger::get()->schedule(IsoPageBase::pageSize); |
111 | return; |
112 | } |
113 | BCRASH(); |
114 | } |
115 | |
116 | template<typename Config, unsigned passedNumPages> |
117 | void IsoDirectory<Config, passedNumPages>::didDecommit(unsigned index) |
118 | { |
119 | // FIXME: We could do this without grabbing the lock. I just doubt that it matters. This is not going |
120 | // to be a frequently executed path, in the sense that decommitting perf will be dominated by the |
121 | // syscall itself (which has to do many hard things). |
122 | std::lock_guard<Mutex> locker(this->m_heap.lock); |
123 | BASSERT(!!m_committed[index]); |
124 | this->m_heap.isNoLongerFreeable(m_pages[index], IsoPageBase::pageSize); |
125 | m_committed[index] = false; |
126 | m_firstEligibleOrDecommitted = std::min(m_firstEligibleOrDecommitted, index); |
127 | this->m_heap.didBecomeEligibleOrDecommited(this); |
128 | this->m_heap.didDecommit(m_pages[index], IsoPageBase::pageSize); |
129 | } |
130 | |
131 | template<typename Config, unsigned passedNumPages> |
132 | void IsoDirectory<Config, passedNumPages>::scavengePage(size_t index, Vector<DeferredDecommit>& decommits) |
133 | { |
134 | // Make sure that this page is now off limits. |
135 | m_empty[index] = false; |
136 | m_eligible[index] = false; |
137 | decommits.push(DeferredDecommit(this, m_pages[index], index)); |
138 | } |
139 | |
140 | template<typename Config, unsigned passedNumPages> |
141 | void IsoDirectory<Config, passedNumPages>::scavenge(Vector<DeferredDecommit>& decommits) |
142 | { |
143 | (m_empty & m_committed).forEachSetBit( |
144 | [&] (size_t index) { |
145 | scavengePage(index, decommits); |
146 | }); |
147 | } |
148 | |
149 | template<typename Config, unsigned passedNumPages> |
150 | template<typename Func> |
151 | void IsoDirectory<Config, passedNumPages>::forEachCommittedPage(const Func& func) |
152 | { |
153 | m_committed.forEachSetBit( |
154 | [&] (size_t index) { |
155 | func(*m_pages[index]); |
156 | }); |
157 | } |
158 | |
159 | } // namespace bmalloc |
160 | |
161 | |