Switch to a simple X11-style license.
[polintos/scott/priv.git] / kernel / mem / pagealloc.cc
1 // mem/pagealloc.cc -- low-level page allocator
2 //
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
4 // 
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following condition:
11 // 
12 // The above copyright notice and this permission notice shall be
13 // included in all copies or substantial portions of the Software.
14 // 
15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
18 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
21 // SOFTWARE.
22
23 #include <kern/kernel.h>
24 #include <kern/pagealloc.h>
25 #include <kern/paging.h>
26 #include <kern/bitops.h>
27 #include <kern/list.h>
28 #include <kern/mem.h>
29 #include <limits.h>
30
31 namespace Mem {
32         Page *pages, *last_page;
33         inline size_t PageAllocZone::chunk_size(Page *start)
34         {
35                 ptrdiff_t ret = start->free.otherside - start + 1;
36                 assert(ret >= 1);
37                 assert(static_cast<size_t>(ret) <= zonesize);
38                 return static_cast<size_t>(ret);
39         }
40
41         inline Page *PageAllocZone::bin_to_head(int bin)
42         {
43                 return &bins[bin];
44         }
45
46         inline uint PageAllocZone::bin_to_size(int bin)
47         {
48                 return 1 << bin;
49         }
50         
51         inline int PageAllocZone::size_to_bin_alloc(size_t size)
52         {
53                 int bin = ll_get_order_round_up(size);
54                 
55                 if (bin >= num_bins)
56                         bin = num_bins - 1;
57                 
58                 return bin;
59         }
60
61         inline int PageAllocZone::size_to_bin_free(size_t size)
62         {
63                 int bin = ll_get_order_round_down(size);
64                 
65                 if (bin >= num_bins)
66                         bin = num_bins - 1;
67                 
68                 return bin;
69         }
70
71         inline void PageAllocZone::remove_chunk(Page *chunk, int bin)
72         {
73                 chunk->chunk_rmap_list.del();
74                 
75                 // If bin's list is empty, forward it to the next non-empty bin.
76                 
77                 Page *head = bin_to_head(bin);
78                 if (head->chunk_rmap_list.empty()) {
79                         Page *newforward = bin_to_head(bin + 1)->free.otherside;
80                         assert(head->free.otherside == head);
81
82                         for (Page *p = head;
83                              p - bins >= 0 && p->free.otherside == head; p--)
84                                 p->free.otherside = newforward;
85                 }
86         }
87
88         inline void PageAllocZone::add_to_bin(Page *chunk, int bin)
89         {
90                 Page *head = bin_to_head(bin);
91                 Page *oldforward = head->free.otherside;
92
93                 head->chunk_rmap_list.add_front(&chunk->chunk_rmap_list);
94                 
95                 // Remove bin's forwarding if it was empty, along with smaller
96                 // bins pointing past this bin.
97                 
98                 if (oldforward != head) {
99                         Page *p = head;
100                         do {
101                                 p->free.otherside = head;
102                                 p--;
103                         } while (p - bins >= 0 && p->free.otherside == oldforward);
104                 }
105         }
106
107         // Break off a piece from the end of the chunk, rather than the start. 
108         // This way, only the size of the remaining chunk needs to be adjusted
109         // as long as it still fits in the same bin.
110         
111         Page *PageAllocZone::shrink_chunk(Page *start, int num_pages,
112                                           size_t chunk_size, int bin)
113         {
114                 size_t size_left = chunk_size - num_pages;
115                 Page *newend = start + size_left - 1;
116                 
117                 start->free.otherside = newend;
118                 newend->free.otherside = start;
119                 
120                 if (size_left < bin_to_size(bin)) {
121                         remove_chunk(start, bin);
122                         add_to_bin(start, size_to_bin_alloc(size_left));
123                 }
124                 
125                 return newend + 1;
126         }
127         
128         struct Page *PageAllocZone::alloc(uint num_pages)
129         {
130                 Lock::AutoSpinLockRecIRQ autolock(lock);
131         
132                 assert(num_pages > 0);
133                 assert(num_pages <= bin_to_size(num_bins - 1));
134         
135                 int bin = size_to_bin_alloc(num_pages);
136                 Page *head = bin_to_head(bin)->free.otherside;
137                 int realbin = head - bins;
138                 Util::List *list = &head->chunk_rmap_list;
139                 
140                 if (list->empty()) {
141                         assert(realbin == num_bins - 1);
142                         return NULL;
143                 }
144                 
145                 head = list->next->listentry(Page, chunk_rmap_list);
146                 
147                 assert(head->flags & Page::Free);
148                 
149                 size_t size = chunk_size(head);
150                 assert(size >= bin_to_size(bin));
151                 assert(size >= (uint)num_pages);
152                 
153                 if (size != num_pages)
154                         head = shrink_chunk(head, num_pages, size, realbin);
155                 else
156                         remove_chunk(head, realbin);
157
158                 for (Page *p = head; p != head + num_pages; p++) {
159                         assert(p->flags & Page::Free);
160                         assert(!(p->flags & Page::InUse));
161                         assert(p->chunk_rmap_list.empty());
162                         p->flags = (p->flags & ~Page::Free) | Page::InUse;
163                         p->inuse.refcount = 1;
164                 }
165
166                 return head;
167         }
168         
169         void PageAllocZone::free(struct Page *head, size_t num_pages)
170         {
171                 Lock::AutoSpinLockRecIRQ autolock(lock);
172
173                 assert(num_pages > 0);
174         
175                 for (Page *p = head; p != head + num_pages; p++) {
176                         assert(!(p->flags & Page::Free));
177                         assert(p->chunk_rmap_list.empty());
178                         p->flags = (p->flags & ~Page::InUse) | Page::Free;
179                 }
180                 
181                 // Combine the newly free chunk with any adjacent free chunks,
182                 // regardless of what bin they are in.
183
184                 Page *prevpage = head - 1;
185                 Page *nextpage = head + num_pages;
186                 
187                 int bin = -1;
188                                 
189                 if (prevpage - pages >= start && (prevpage->flags & Page::Free)) {
190                         Page *prevchunk = prevpage->free.otherside;
191                         assert(prevchunk->flags & Page::Free);
192
193                         Page *end = head + num_pages - 1;
194                         prevchunk->free.otherside = end;
195                         end->free.otherside = prevchunk;
196                         
197                         size_t prevsize = head - prevchunk;
198                         assert(prevsize > 0);
199                         assert(prevsize < zonesize);
200
201                         head = prevchunk;
202                         num_pages += prevsize;
203                         bin = size_to_bin_free(prevsize);
204                 }
205
206                 if (nextpage - pages <= end && nextpage->flags & Page::Free) {
207                         size_t prevsize = chunk_size(nextpage);
208                         num_pages += prevsize;
209                         Page *end = nextpage->free.otherside;
210                         
211                         remove_chunk(nextpage, size_to_bin_free(prevsize));
212                         
213                         end->free.otherside = head;
214                         head->free.otherside = end;
215                 }
216                 
217                 int newbin = size_to_bin_free(num_pages);
218                 
219                 if (bin != newbin) {
220                         if (bin != -1) {
221                                 remove_chunk(head, bin);
222                                 assert(head->free.otherside == head + num_pages - 1);
223                         }
224
225                         head->free.otherside = head + num_pages - 1;
226                         add_to_bin(head, newbin);
227                 }
228         }
229         
230         void PageAllocZone::init(uintptr_t base, size_t size)
231         {
232                 assert(size > 0);
233                 assert(base + size <= Arch::mem_end / Arch::page_size);
234         
235                 zonesize = size;
236                 start = base;
237                 end = base + size - 1;
238
239                 for (Page *p = &pages[start]; p <= &pages[end]; p++) {
240                         assert(p->flags == 0);
241                         new(p) Page;
242                         p->zone = this;
243                 }
244                 
245                 for (int i = 0; i < num_bins; i++) {
246                         Page *bin = bin_to_head(i);
247                         
248                         bin->free.otherside = bin_to_head(num_bins - 1);
249                         bin->chunk_rmap_list.init();
250                 }
251         }
252         
253         Page *PageAlloc::alloc(uint num_pages, PageAllocZone *const *zonelist)
254         {
255                 while (*zonelist) {
256                         Page *ret = (*zonelist)->alloc(num_pages);
257                         if (ret)
258                                 return ret;
259                         
260                         zonelist++;
261                 }
262                 
263                 // FIXME: Deliver System.Traps.ReduceMemoryUsage first
264                 throw_idl(OutOfMemory);
265         }
266         
267         void Page::free_page()
268         {
269                 PageAlloc::free(this, 1);
270         }
271
272         PageAlloc page_alloc;
273 }