]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/pagealloc.cc
Initial checkin from Perforce.
[polintos/scott/priv.git] / kernel / mem / pagealloc.cc
1 // mem/pagealloc.cc -- low-level page allocator
2 //
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
4 // 
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following conditions:
11 // 
12 //     * Redistributions of source code must retain the above copyright notice,
13 //       this list of conditions and the following disclaimers.
14 // 
15 //     * Redistributions in binary form must reproduce the above copyright notice,
16 //       this list of conditions and the following disclaimers in the
17 //       documentation and/or other materials provided with the distribution.
18 // 
19 //     * The names of the Software's authors and/or contributors
20 //       may not be used to endorse or promote products derived from
21 //       this Software without specific prior written permission.
22 // 
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
25 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
26 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
29 // SOFTWARE.
30
31 #include <kern/kernel.h>
32 #include <kern/pagealloc.h>
33 #include <kern/paging.h>
34 #include <kern/bitops.h>
35 #include <kern/list.h>
36 #include <kern/mem.h>
37 #include <limits.h>
38
39 namespace Mem {
40         Page *pages, *last_page;
41         inline size_t PageAllocZone::chunk_size(Page *start)
42         {
43                 ptrdiff_t ret = start->free.otherside - start + 1;
44                 assert(ret >= 1);
45                 assert(static_cast<size_t>(ret) <= zonesize);
46                 return static_cast<size_t>(ret);
47         }
48
49         inline Page *PageAllocZone::bin_to_head(int bin)
50         {
51                 return &bins[bin];
52         }
53
54         inline uint PageAllocZone::bin_to_size(int bin)
55         {
56                 return 1 << bin;
57         }
58         
59         inline int PageAllocZone::size_to_bin_alloc(size_t size)
60         {
61                 int bin = ll_get_order_round_up(size);
62                 
63                 if (bin >= num_bins)
64                         bin = num_bins - 1;
65                 
66                 return bin;
67         }
68
69         inline int PageAllocZone::size_to_bin_free(size_t size)
70         {
71                 int bin = ll_get_order_round_down(size);
72                 
73                 if (bin >= num_bins)
74                         bin = num_bins - 1;
75                 
76                 return bin;
77         }
78
79         inline void PageAllocZone::remove_chunk(Page *chunk, int bin)
80         {
81                 chunk->chunk_rmap_list.del();
82                 
83                 // If bin's list is empty, forward it to the next non-empty bin.
84                 
85                 Page *head = bin_to_head(bin);
86                 if (head->chunk_rmap_list.empty()) {
87                         Page *newforward = bin_to_head(bin + 1)->free.otherside;
88                         assert(head->free.otherside == head);
89
90                         for (Page *p = head;
91                              p - bins >= 0 && p->free.otherside == head; p--)
92                                 p->free.otherside = newforward;
93                 }
94         }
95
96         inline void PageAllocZone::add_to_bin(Page *chunk, int bin)
97         {
98                 Page *head = bin_to_head(bin);
99                 Page *oldforward = head->free.otherside;
100
101                 head->chunk_rmap_list.add_front(&chunk->chunk_rmap_list);
102                 
103                 // Remove bin's forwarding if it was empty, along with smaller
104                 // bins pointing past this bin.
105                 
106                 if (oldforward != head) {
107                         Page *p = head;
108                         do {
109                                 p->free.otherside = head;
110                                 p--;
111                         } while (p - bins >= 0 && p->free.otherside == oldforward);
112                 }
113         }
114
115         // Break off a piece from the end of the chunk, rather than the start. 
116         // This way, only the size of the remaining chunk needs to be adjusted
117         // as long as it still fits in the same bin.
118         
119         Page *PageAllocZone::shrink_chunk(Page *start, int num_pages,
120                                           size_t chunk_size, int bin)
121         {
122                 size_t size_left = chunk_size - num_pages;
123                 Page *newend = start + size_left - 1;
124                 
125                 start->free.otherside = newend;
126                 newend->free.otherside = start;
127                 
128                 if (size_left < bin_to_size(bin)) {
129                         remove_chunk(start, bin);
130                         add_to_bin(start, size_to_bin_alloc(size_left));
131                 }
132                 
133                 return newend + 1;
134         }
135         
136         struct Page *PageAllocZone::alloc(uint num_pages)
137         {
138                 Lock::AutoSpinLockRecIRQ autolock(lock);
139         
140                 assert(num_pages > 0);
141                 assert(num_pages <= bin_to_size(num_bins - 1));
142         
143                 int bin = size_to_bin_alloc(num_pages);
144                 Page *head = bin_to_head(bin)->free.otherside;
145                 int realbin = head - bins;
146                 Util::List *list = &head->chunk_rmap_list;
147                 
148                 if (list->empty()) {
149                         assert(realbin == num_bins - 1);
150                         return NULL;
151                 }
152                 
153                 head = list->next->listentry(Page, chunk_rmap_list);
154                 
155                 assert(head->flags & Page::Free);
156                 
157                 size_t size = chunk_size(head);
158                 assert(size >= bin_to_size(bin));
159                 assert(size >= (uint)num_pages);
160                 
161                 if (size != num_pages)
162                         head = shrink_chunk(head, num_pages, size, realbin);
163                 else
164                         remove_chunk(head, realbin);
165
166                 for (Page *p = head; p != head + num_pages; p++) {
167                         assert(p->flags & Page::Free);
168                         assert(!(p->flags & Page::InUse));
169                         assert(p->chunk_rmap_list.empty());
170                         p->flags = (p->flags & ~Page::Free) | Page::InUse;
171                         p->inuse.refcount = 1;
172                 }
173
174                 return head;
175         }
176         
177         void PageAllocZone::free(struct Page *head, size_t num_pages)
178         {
179                 Lock::AutoSpinLockRecIRQ autolock(lock);
180
181                 assert(num_pages > 0);
182         
183                 for (Page *p = head; p != head + num_pages; p++) {
184                         assert(!(p->flags & Page::Free));
185                         assert(p->chunk_rmap_list.empty());
186                         p->flags = (p->flags & ~Page::InUse) | Page::Free;
187                 }
188                 
189                 // Combine the newly free chunk with any adjacent free chunks,
190                 // regardless of what bin they are in.
191
192                 Page *prevpage = head - 1;
193                 Page *nextpage = head + num_pages;
194                 
195                 int bin = -1;
196                                 
197                 if (prevpage - pages >= start && (prevpage->flags & Page::Free)) {
198                         Page *prevchunk = prevpage->free.otherside;
199                         assert(prevchunk->flags & Page::Free);
200
201                         Page *end = head + num_pages - 1;
202                         prevchunk->free.otherside = end;
203                         end->free.otherside = prevchunk;
204                         
205                         size_t prevsize = head - prevchunk;
206                         assert(prevsize > 0);
207                         assert(prevsize < zonesize);
208
209                         head = prevchunk;
210                         num_pages += prevsize;
211                         bin = size_to_bin_free(prevsize);
212                 }
213
214                 if (nextpage - pages <= end && nextpage->flags & Page::Free) {
215                         size_t prevsize = chunk_size(nextpage);
216                         num_pages += prevsize;
217                         Page *end = nextpage->free.otherside;
218                         
219                         remove_chunk(nextpage, size_to_bin_free(prevsize));
220                         
221                         end->free.otherside = head;
222                         head->free.otherside = end;
223                 }
224                 
225                 int newbin = size_to_bin_free(num_pages);
226                 
227                 if (bin != newbin) {
228                         if (bin != -1) {
229                                 remove_chunk(head, bin);
230                                 assert(head->free.otherside == head + num_pages - 1);
231                         }
232
233                         head->free.otherside = head + num_pages - 1;
234                         add_to_bin(head, newbin);
235                 }
236         }
237         
238         void PageAllocZone::init(uintptr_t base, size_t size)
239         {
240                 assert(size > 0);
241                 assert(base + size <= Arch::mem_end / Arch::page_size);
242         
243                 zonesize = size;
244                 start = base;
245                 end = base + size - 1;
246
247                 for (Page *p = &pages[start]; p <= &pages[end]; p++) {
248                         assert(p->flags == 0);
249                         new(p) Page;
250                         p->zone = this;
251                 }
252                 
253                 for (int i = 0; i < num_bins; i++) {
254                         Page *bin = bin_to_head(i);
255                         
256                         bin->free.otherside = bin_to_head(num_bins - 1);
257                         bin->chunk_rmap_list.init();
258                 }
259         }
260         
261         Page *PageAlloc::alloc(uint num_pages, PageAllocZone *const *zonelist)
262         {
263                 while (*zonelist) {
264                         Page *ret = (*zonelist)->alloc(num_pages);
265                         if (ret)
266                                 return ret;
267                         
268                         zonelist++;
269                 }
270                 
271                 // FIXME: Deliver System.Traps.ReduceMemoryUsage first
272                 throw_idl(OutOfMemory);
273         }
274         
275         void Page::free_page()
276         {
277                 PageAlloc::free(this, 1);
278         }
279
280         PageAlloc page_alloc;
281 }