1 // mem/pagealloc.cc -- low-level page allocator
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // This software is provided 'as-is', without any express or implied warranty.
6 // In no event will the authors or contributors be held liable for any damages
7 // arising from the use of this software.
9 // Permission is hereby granted to everyone, free of charge, to use, copy,
10 // modify, prepare derivative works of, publish, distribute, perform,
11 // sublicense, and/or sell copies of the Software, provided that the above
12 // copyright notice and disclaimer of warranty be included in all copies or
13 // substantial portions of this software.
15 #include <kern/kernel.h>
16 #include <kern/pagealloc.h>
17 #include <kern/paging.h>
18 #include <kern/bitops.h>
19 #include <kern/list.h>
24 Page *pages, *last_page;
25 inline size_t PageAllocZone::chunk_size(Page *start)
27 ptrdiff_t ret = start->free.otherside - start + 1;
29 assert(static_cast<size_t>(ret) <= zonesize);
30 return static_cast<size_t>(ret);
33 inline Page *PageAllocZone::bin_to_head(int bin)
38 inline uint PageAllocZone::bin_to_size(int bin)
43 inline int PageAllocZone::size_to_bin_alloc(size_t size)
45 int bin = ll_get_order_round_up(size);
53 inline int PageAllocZone::size_to_bin_free(size_t size)
55 int bin = ll_get_order_round_down(size);
63 inline void PageAllocZone::remove_chunk(Page *chunk, int bin)
65 chunk->chunk_rmap_list.del();
67 // If bin's list is empty, forward it to the next non-empty bin.
69 Page *head = bin_to_head(bin);
70 if (head->chunk_rmap_list.empty()) {
71 Page *newforward = bin_to_head(bin + 1)->free.otherside;
72 assert(head->free.otherside == head);
75 p - bins >= 0 && p->free.otherside == head; p--)
76 p->free.otherside = newforward;
80 inline void PageAllocZone::add_to_bin(Page *chunk, int bin)
82 Page *head = bin_to_head(bin);
83 Page *oldforward = head->free.otherside;
85 head->chunk_rmap_list.add_front(&chunk->chunk_rmap_list);
87 // Remove bin's forwarding if it was empty, along with smaller
88 // bins pointing past this bin.
90 if (oldforward != head) {
93 p->free.otherside = head;
95 } while (p - bins >= 0 && p->free.otherside == oldforward);
99 // Break off a piece from the end of the chunk, rather than the start.
100 // This way, only the size of the remaining chunk needs to be adjusted
101 // as long as it still fits in the same bin.
103 Page *PageAllocZone::shrink_chunk(Page *start, int num_pages,
104 size_t chunk_size, int bin)
106 size_t size_left = chunk_size - num_pages;
107 Page *newend = start + size_left - 1;
109 start->free.otherside = newend;
110 newend->free.otherside = start;
112 if (size_left < bin_to_size(bin)) {
113 remove_chunk(start, bin);
114 add_to_bin(start, size_to_bin_alloc(size_left));
120 struct Page *PageAllocZone::alloc(uint num_pages)
122 Lock::AutoSpinLockRecIRQ autolock(lock);
124 assert(num_pages > 0);
125 assert(num_pages <= bin_to_size(num_bins - 1));
127 int bin = size_to_bin_alloc(num_pages);
128 Page *head = bin_to_head(bin)->free.otherside;
129 int realbin = head - bins;
130 Util::List *list = &head->chunk_rmap_list;
133 assert(realbin == num_bins - 1);
137 head = list->next->listentry(Page, chunk_rmap_list);
139 assert(head->flags & Page::Free);
141 size_t size = chunk_size(head);
142 assert(size >= bin_to_size(bin));
143 assert(size >= (uint)num_pages);
145 if (size != num_pages)
146 head = shrink_chunk(head, num_pages, size, realbin);
148 remove_chunk(head, realbin);
150 for (Page *p = head; p != head + num_pages; p++) {
151 assert(p->flags & Page::Free);
152 assert(!(p->flags & Page::InUse));
153 assert(p->chunk_rmap_list.empty());
154 p->flags = (p->flags & ~Page::Free) | Page::InUse;
155 p->inuse.refcount = 1;
161 void PageAllocZone::free(struct Page *head, size_t num_pages)
163 Lock::AutoSpinLockRecIRQ autolock(lock);
165 assert(num_pages > 0);
167 for (Page *p = head; p != head + num_pages; p++) {
168 assert(!(p->flags & Page::Free));
169 assert(p->chunk_rmap_list.empty());
170 p->flags = (p->flags & ~Page::InUse) | Page::Free;
173 // Combine the newly free chunk with any adjacent free chunks,
174 // regardless of what bin they are in.
176 Page *prevpage = head - 1;
177 Page *nextpage = head + num_pages;
181 if (prevpage - pages >= start && (prevpage->flags & Page::Free)) {
182 Page *prevchunk = prevpage->free.otherside;
183 assert(prevchunk->flags & Page::Free);
185 Page *end = head + num_pages - 1;
186 prevchunk->free.otherside = end;
187 end->free.otherside = prevchunk;
189 size_t prevsize = head - prevchunk;
190 assert(prevsize > 0);
191 assert(prevsize < zonesize);
194 num_pages += prevsize;
195 bin = size_to_bin_free(prevsize);
198 if (nextpage - pages <= end && nextpage->flags & Page::Free) {
199 size_t prevsize = chunk_size(nextpage);
200 num_pages += prevsize;
201 Page *end = nextpage->free.otherside;
203 remove_chunk(nextpage, size_to_bin_free(prevsize));
205 end->free.otherside = head;
206 head->free.otherside = end;
209 int newbin = size_to_bin_free(num_pages);
213 remove_chunk(head, bin);
214 assert(head->free.otherside == head + num_pages - 1);
217 head->free.otherside = head + num_pages - 1;
218 add_to_bin(head, newbin);
222 void PageAllocZone::init(uintptr_t base, size_t size)
225 assert(base + size <= Arch::mem_end / Arch::page_size);
229 end = base + size - 1;
231 for (Page *p = &pages[start]; p <= &pages[end]; p++) {
232 assert(p->flags == 0);
237 for (int i = 0; i < num_bins; i++) {
238 Page *bin = bin_to_head(i);
240 bin->free.otherside = bin_to_head(num_bins - 1);
241 bin->chunk_rmap_list.init();
245 Page *PageAlloc::alloc(uint num_pages, PageAllocZone *const *zonelist)
248 Page *ret = (*zonelist)->alloc(num_pages);
255 // FIXME: Deliver System.Traps.ReduceMemoryUsage first
256 throw_idl(OutOfMemory);
259 void Page::free_page()
261 PageAlloc::free(this, 1);
264 PageAlloc page_alloc;