1 // mem/pagealloc.cc -- low-level page allocator
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following condition:
12 // The above copyright notice and this permission notice shall be
13 // included in all copies or substantial portions of the Software.
15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
23 #include <kern/kernel.h>
24 #include <kern/pagealloc.h>
25 #include <kern/paging.h>
26 #include <kern/bitops.h>
27 #include <kern/list.h>
32 Page *pages, *last_page;
33 inline size_t PageAllocZone::chunk_size(Page *start)
35 ptrdiff_t ret = start->free.otherside - start + 1;
37 assert(static_cast<size_t>(ret) <= zonesize);
38 return static_cast<size_t>(ret);
41 inline Page *PageAllocZone::bin_to_head(int bin)
46 inline uint PageAllocZone::bin_to_size(int bin)
51 inline int PageAllocZone::size_to_bin_alloc(size_t size)
53 int bin = ll_get_order_round_up(size);
61 inline int PageAllocZone::size_to_bin_free(size_t size)
63 int bin = ll_get_order_round_down(size);
71 inline void PageAllocZone::remove_chunk(Page *chunk, int bin)
73 chunk->chunk_rmap_list.del();
75 // If bin's list is empty, forward it to the next non-empty bin.
77 Page *head = bin_to_head(bin);
78 if (head->chunk_rmap_list.empty()) {
79 Page *newforward = bin_to_head(bin + 1)->free.otherside;
80 assert(head->free.otherside == head);
83 p - bins >= 0 && p->free.otherside == head; p--)
84 p->free.otherside = newforward;
88 inline void PageAllocZone::add_to_bin(Page *chunk, int bin)
90 Page *head = bin_to_head(bin);
91 Page *oldforward = head->free.otherside;
93 head->chunk_rmap_list.add_front(&chunk->chunk_rmap_list);
95 // Remove bin's forwarding if it was empty, along with smaller
96 // bins pointing past this bin.
98 if (oldforward != head) {
101 p->free.otherside = head;
103 } while (p - bins >= 0 && p->free.otherside == oldforward);
107 // Break off a piece from the end of the chunk, rather than the start.
108 // This way, only the size of the remaining chunk needs to be adjusted
109 // as long as it still fits in the same bin.
111 Page *PageAllocZone::shrink_chunk(Page *start, int num_pages,
112 size_t chunk_size, int bin)
114 size_t size_left = chunk_size - num_pages;
115 Page *newend = start + size_left - 1;
117 start->free.otherside = newend;
118 newend->free.otherside = start;
120 if (size_left < bin_to_size(bin)) {
121 remove_chunk(start, bin);
122 add_to_bin(start, size_to_bin_alloc(size_left));
128 struct Page *PageAllocZone::alloc(uint num_pages)
130 Lock::AutoSpinLockRecIRQ autolock(lock);
132 assert(num_pages > 0);
133 assert(num_pages <= bin_to_size(num_bins - 1));
135 int bin = size_to_bin_alloc(num_pages);
136 Page *head = bin_to_head(bin)->free.otherside;
137 int realbin = head - bins;
138 Util::List *list = &head->chunk_rmap_list;
141 assert(realbin == num_bins - 1);
145 head = list->next->listentry(Page, chunk_rmap_list);
147 assert(head->flags & Page::Free);
149 size_t size = chunk_size(head);
150 assert(size >= bin_to_size(bin));
151 assert(size >= (uint)num_pages);
153 if (size != num_pages)
154 head = shrink_chunk(head, num_pages, size, realbin);
156 remove_chunk(head, realbin);
158 for (Page *p = head; p != head + num_pages; p++) {
159 assert(p->flags & Page::Free);
160 assert(!(p->flags & Page::InUse));
161 assert(p->chunk_rmap_list.empty());
162 p->flags = (p->flags & ~Page::Free) | Page::InUse;
163 p->inuse.refcount = 1;
169 void PageAllocZone::free(struct Page *head, size_t num_pages)
171 Lock::AutoSpinLockRecIRQ autolock(lock);
173 assert(num_pages > 0);
175 for (Page *p = head; p != head + num_pages; p++) {
176 assert(!(p->flags & Page::Free));
177 assert(p->chunk_rmap_list.empty());
178 p->flags = (p->flags & ~Page::InUse) | Page::Free;
181 // Combine the newly free chunk with any adjacent free chunks,
182 // regardless of what bin they are in.
184 Page *prevpage = head - 1;
185 Page *nextpage = head + num_pages;
189 if (prevpage - pages >= start && (prevpage->flags & Page::Free)) {
190 Page *prevchunk = prevpage->free.otherside;
191 assert(prevchunk->flags & Page::Free);
193 Page *end = head + num_pages - 1;
194 prevchunk->free.otherside = end;
195 end->free.otherside = prevchunk;
197 size_t prevsize = head - prevchunk;
198 assert(prevsize > 0);
199 assert(prevsize < zonesize);
202 num_pages += prevsize;
203 bin = size_to_bin_free(prevsize);
206 if (nextpage - pages <= end && nextpage->flags & Page::Free) {
207 size_t prevsize = chunk_size(nextpage);
208 num_pages += prevsize;
209 Page *end = nextpage->free.otherside;
211 remove_chunk(nextpage, size_to_bin_free(prevsize));
213 end->free.otherside = head;
214 head->free.otherside = end;
217 int newbin = size_to_bin_free(num_pages);
221 remove_chunk(head, bin);
222 assert(head->free.otherside == head + num_pages - 1);
225 head->free.otherside = head + num_pages - 1;
226 add_to_bin(head, newbin);
230 void PageAllocZone::init(uintptr_t base, size_t size)
233 assert(base + size <= Arch::mem_end / Arch::page_size);
237 end = base + size - 1;
239 for (Page *p = &pages[start]; p <= &pages[end]; p++) {
240 assert(p->flags == 0);
245 for (int i = 0; i < num_bins; i++) {
246 Page *bin = bin_to_head(i);
248 bin->free.otherside = bin_to_head(num_bins - 1);
249 bin->chunk_rmap_list.init();
253 Page *PageAlloc::alloc(uint num_pages, PageAllocZone *const *zonelist)
256 Page *ret = (*zonelist)->alloc(num_pages);
263 // FIXME: Deliver System.Traps.ReduceMemoryUsage first
264 throw_idl(OutOfMemory);
267 void Page::free_page()
269 PageAlloc::free(this, 1);
272 PageAlloc page_alloc;