1 // mem/pagealloc.cc -- low-level page allocator
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following conditions:
12 // * Redistributions of source code must retain the above copyright notice,
13 // this list of conditions and the following disclaimers.
15 // * Redistributions in binary form must reproduce the above copyright notice,
16 // this list of conditions and the following disclaimers in the
17 // documentation and/or other materials provided with the distribution.
19 // * The names of the Software's authors and/or contributors
20 // may not be used to endorse or promote products derived from
21 // this Software without specific prior written permission.
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
25 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
31 #include <kern/kernel.h>
32 #include <kern/pagealloc.h>
33 #include <kern/paging.h>
34 #include <kern/bitops.h>
35 #include <kern/list.h>
40 Page *pages, *last_page;
41 inline size_t PageAllocZone::chunk_size(Page *start)
43 ptrdiff_t ret = start->free.otherside - start + 1;
45 assert(static_cast<size_t>(ret) <= zonesize);
46 return static_cast<size_t>(ret);
49 inline Page *PageAllocZone::bin_to_head(int bin)
54 inline uint PageAllocZone::bin_to_size(int bin)
59 inline int PageAllocZone::size_to_bin_alloc(size_t size)
61 int bin = ll_get_order_round_up(size);
69 inline int PageAllocZone::size_to_bin_free(size_t size)
71 int bin = ll_get_order_round_down(size);
79 inline void PageAllocZone::remove_chunk(Page *chunk, int bin)
81 chunk->chunk_rmap_list.del();
83 // If bin's list is empty, forward it to the next non-empty bin.
85 Page *head = bin_to_head(bin);
86 if (head->chunk_rmap_list.empty()) {
87 Page *newforward = bin_to_head(bin + 1)->free.otherside;
88 assert(head->free.otherside == head);
91 p - bins >= 0 && p->free.otherside == head; p--)
92 p->free.otherside = newforward;
96 inline void PageAllocZone::add_to_bin(Page *chunk, int bin)
98 Page *head = bin_to_head(bin);
99 Page *oldforward = head->free.otherside;
101 head->chunk_rmap_list.add_front(&chunk->chunk_rmap_list);
103 // Remove bin's forwarding if it was empty, along with smaller
104 // bins pointing past this bin.
106 if (oldforward != head) {
109 p->free.otherside = head;
111 } while (p - bins >= 0 && p->free.otherside == oldforward);
115 // Break off a piece from the end of the chunk, rather than the start.
116 // This way, only the size of the remaining chunk needs to be adjusted
117 // as long as it still fits in the same bin.
119 Page *PageAllocZone::shrink_chunk(Page *start, int num_pages,
120 size_t chunk_size, int bin)
122 size_t size_left = chunk_size - num_pages;
123 Page *newend = start + size_left - 1;
125 start->free.otherside = newend;
126 newend->free.otherside = start;
128 if (size_left < bin_to_size(bin)) {
129 remove_chunk(start, bin);
130 add_to_bin(start, size_to_bin_alloc(size_left));
136 struct Page *PageAllocZone::alloc(uint num_pages)
138 Lock::AutoSpinLockRecIRQ autolock(lock);
140 assert(num_pages > 0);
141 assert(num_pages <= bin_to_size(num_bins - 1));
143 int bin = size_to_bin_alloc(num_pages);
144 Page *head = bin_to_head(bin)->free.otherside;
145 int realbin = head - bins;
146 Util::List *list = &head->chunk_rmap_list;
149 assert(realbin == num_bins - 1);
153 head = list->next->listentry(Page, chunk_rmap_list);
155 assert(head->flags & Page::Free);
157 size_t size = chunk_size(head);
158 assert(size >= bin_to_size(bin));
159 assert(size >= (uint)num_pages);
161 if (size != num_pages)
162 head = shrink_chunk(head, num_pages, size, realbin);
164 remove_chunk(head, realbin);
166 for (Page *p = head; p != head + num_pages; p++) {
167 assert(p->flags & Page::Free);
168 assert(!(p->flags & Page::InUse));
169 assert(p->chunk_rmap_list.empty());
170 p->flags = (p->flags & ~Page::Free) | Page::InUse;
171 p->inuse.refcount = 1;
177 void PageAllocZone::free(struct Page *head, size_t num_pages)
179 Lock::AutoSpinLockRecIRQ autolock(lock);
181 assert(num_pages > 0);
183 for (Page *p = head; p != head + num_pages; p++) {
184 assert(!(p->flags & Page::Free));
185 assert(p->chunk_rmap_list.empty());
186 p->flags = (p->flags & ~Page::InUse) | Page::Free;
189 // Combine the newly free chunk with any adjacent free chunks,
190 // regardless of what bin they are in.
192 Page *prevpage = head - 1;
193 Page *nextpage = head + num_pages;
197 if (prevpage - pages >= start && (prevpage->flags & Page::Free)) {
198 Page *prevchunk = prevpage->free.otherside;
199 assert(prevchunk->flags & Page::Free);
201 Page *end = head + num_pages - 1;
202 prevchunk->free.otherside = end;
203 end->free.otherside = prevchunk;
205 size_t prevsize = head - prevchunk;
206 assert(prevsize > 0);
207 assert(prevsize < zonesize);
210 num_pages += prevsize;
211 bin = size_to_bin_free(prevsize);
214 if (nextpage - pages <= end && nextpage->flags & Page::Free) {
215 size_t prevsize = chunk_size(nextpage);
216 num_pages += prevsize;
217 Page *end = nextpage->free.otherside;
219 remove_chunk(nextpage, size_to_bin_free(prevsize));
221 end->free.otherside = head;
222 head->free.otherside = end;
225 int newbin = size_to_bin_free(num_pages);
229 remove_chunk(head, bin);
230 assert(head->free.otherside == head + num_pages - 1);
233 head->free.otherside = head + num_pages - 1;
234 add_to_bin(head, newbin);
238 void PageAllocZone::init(uintptr_t base, size_t size)
241 assert(base + size <= Arch::mem_end / Arch::page_size);
245 end = base + size - 1;
247 for (Page *p = &pages[start]; p <= &pages[end]; p++) {
248 assert(p->flags == 0);
253 for (int i = 0; i < num_bins; i++) {
254 Page *bin = bin_to_head(i);
256 bin->free.otherside = bin_to_head(num_bins - 1);
257 bin->chunk_rmap_list.init();
261 Page *PageAlloc::alloc(uint num_pages, PageAllocZone *const *zonelist)
264 Page *ret = (*zonelist)->alloc(num_pages);
271 // FIXME: Deliver System.Traps.ReduceMemoryUsage first
272 throw_idl(OutOfMemory);
275 void Page::free_page()
277 PageAlloc::free(this, 1);
280 PageAlloc page_alloc;