]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/pagealloc.cc
minor doc updates
[polintos/scott/priv.git] / kernel / mem / pagealloc.cc
1 // mem/pagealloc.cc -- low-level page allocator
2 //
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
4 // 
5 // This software is provided 'as-is', without any express or implied warranty.
6 // In no event will the authors or contributors be held liable for any damages
7 // arising from the use of this software.
8 // 
9 // Permission is hereby granted to everyone, free of charge, to use, copy,
10 // modify, prepare derivative works of, publish, distribute, perform,
11 // sublicense, and/or sell copies of the Software, provided that the above
12 // copyright notice and disclaimer of warranty be included in all copies or
13 // substantial portions of this software.
14
15 #include <kern/kernel.h>
16 #include <kern/pagealloc.h>
17 #include <kern/paging.h>
18 #include <kern/bitops.h>
19 #include <kern/list.h>
20 #include <kern/mem.h>
21 #include <limits.h>
22
23 namespace Mem {
24         Page *pages, *last_page;
25         inline size_t PageAllocZone::chunk_size(Page *start)
26         {
27                 ptrdiff_t ret = start->free.otherside - start + 1;
28                 assert(ret >= 1);
29                 assert(static_cast<size_t>(ret) <= zonesize);
30                 return static_cast<size_t>(ret);
31         }
32
33         inline Page *PageAllocZone::bin_to_head(int bin)
34         {
35                 return &bins[bin];
36         }
37
38         inline uint PageAllocZone::bin_to_size(int bin)
39         {
40                 return 1 << bin;
41         }
42         
43         inline int PageAllocZone::size_to_bin_alloc(size_t size)
44         {
45                 int bin = ll_get_order_round_up(size);
46                 
47                 if (bin >= num_bins)
48                         bin = num_bins - 1;
49                 
50                 return bin;
51         }
52
53         inline int PageAllocZone::size_to_bin_free(size_t size)
54         {
55                 int bin = ll_get_order_round_down(size);
56                 
57                 if (bin >= num_bins)
58                         bin = num_bins - 1;
59                 
60                 return bin;
61         }
62
63         inline void PageAllocZone::remove_chunk(Page *chunk, int bin)
64         {
65                 chunk->chunk_rmap_list.del();
66                 
67                 // If bin's list is empty, forward it to the next non-empty bin.
68                 
69                 Page *head = bin_to_head(bin);
70                 if (head->chunk_rmap_list.empty()) {
71                         Page *newforward = bin_to_head(bin + 1)->free.otherside;
72                         assert(head->free.otherside == head);
73
74                         for (Page *p = head;
75                              p - bins >= 0 && p->free.otherside == head; p--)
76                                 p->free.otherside = newforward;
77                 }
78         }
79
80         inline void PageAllocZone::add_to_bin(Page *chunk, int bin)
81         {
82                 Page *head = bin_to_head(bin);
83                 Page *oldforward = head->free.otherside;
84
85                 head->chunk_rmap_list.add_front(&chunk->chunk_rmap_list);
86                 
87                 // Remove bin's forwarding if it was empty, along with smaller
88                 // bins pointing past this bin.
89                 
90                 if (oldforward != head) {
91                         Page *p = head;
92                         do {
93                                 p->free.otherside = head;
94                                 p--;
95                         } while (p - bins >= 0 && p->free.otherside == oldforward);
96                 }
97         }
98
99         // Break off a piece from the end of the chunk, rather than the start. 
100         // This way, only the size of the remaining chunk needs to be adjusted
101         // as long as it still fits in the same bin.
102         
103         Page *PageAllocZone::shrink_chunk(Page *start, int num_pages,
104                                           size_t chunk_size, int bin)
105         {
106                 size_t size_left = chunk_size - num_pages;
107                 Page *newend = start + size_left - 1;
108                 
109                 start->free.otherside = newend;
110                 newend->free.otherside = start;
111                 
112                 if (size_left < bin_to_size(bin)) {
113                         remove_chunk(start, bin);
114                         add_to_bin(start, size_to_bin_alloc(size_left));
115                 }
116                 
117                 return newend + 1;
118         }
119         
120         struct Page *PageAllocZone::alloc(uint num_pages)
121         {
122                 Lock::AutoSpinLockRecIRQ autolock(lock);
123         
124                 assert(num_pages > 0);
125                 assert(num_pages <= bin_to_size(num_bins - 1));
126         
127                 int bin = size_to_bin_alloc(num_pages);
128                 Page *head = bin_to_head(bin)->free.otherside;
129                 int realbin = head - bins;
130                 Util::List *list = &head->chunk_rmap_list;
131                 
132                 if (list->empty()) {
133                         assert(realbin == num_bins - 1);
134                         return NULL;
135                 }
136                 
137                 head = list->next->listentry(Page, chunk_rmap_list);
138                 
139                 assert(head->flags & Page::Free);
140                 
141                 size_t size = chunk_size(head);
142                 assert(size >= bin_to_size(bin));
143                 assert(size >= (uint)num_pages);
144                 
145                 if (size != num_pages)
146                         head = shrink_chunk(head, num_pages, size, realbin);
147                 else
148                         remove_chunk(head, realbin);
149
150                 for (Page *p = head; p != head + num_pages; p++) {
151                         assert(p->flags & Page::Free);
152                         assert(!(p->flags & Page::InUse));
153                         assert(p->chunk_rmap_list.empty());
154                         p->flags = (p->flags & ~Page::Free) | Page::InUse;
155                         p->inuse.refcount = 1;
156                 }
157
158                 return head;
159         }
160         
161         void PageAllocZone::free(struct Page *head, size_t num_pages)
162         {
163                 Lock::AutoSpinLockRecIRQ autolock(lock);
164
165                 assert(num_pages > 0);
166         
167                 for (Page *p = head; p != head + num_pages; p++) {
168                         assert(!(p->flags & Page::Free));
169                         assert(p->chunk_rmap_list.empty());
170                         p->flags = (p->flags & ~Page::InUse) | Page::Free;
171                 }
172                 
173                 // Combine the newly free chunk with any adjacent free chunks,
174                 // regardless of what bin they are in.
175
176                 Page *prevpage = head - 1;
177                 Page *nextpage = head + num_pages;
178                 
179                 int bin = -1;
180                                 
181                 if (prevpage - pages >= start && (prevpage->flags & Page::Free)) {
182                         Page *prevchunk = prevpage->free.otherside;
183                         assert(prevchunk->flags & Page::Free);
184
185                         Page *end = head + num_pages - 1;
186                         prevchunk->free.otherside = end;
187                         end->free.otherside = prevchunk;
188                         
189                         size_t prevsize = head - prevchunk;
190                         assert(prevsize > 0);
191                         assert(prevsize < zonesize);
192
193                         head = prevchunk;
194                         num_pages += prevsize;
195                         bin = size_to_bin_free(prevsize);
196                 }
197
198                 if (nextpage - pages <= end && nextpage->flags & Page::Free) {
199                         size_t prevsize = chunk_size(nextpage);
200                         num_pages += prevsize;
201                         Page *end = nextpage->free.otherside;
202                         
203                         remove_chunk(nextpage, size_to_bin_free(prevsize));
204                         
205                         end->free.otherside = head;
206                         head->free.otherside = end;
207                 }
208                 
209                 int newbin = size_to_bin_free(num_pages);
210                 
211                 if (bin != newbin) {
212                         if (bin != -1) {
213                                 remove_chunk(head, bin);
214                                 assert(head->free.otherside == head + num_pages - 1);
215                         }
216
217                         head->free.otherside = head + num_pages - 1;
218                         add_to_bin(head, newbin);
219                 }
220         }
221         
222         void PageAllocZone::init(uintptr_t base, size_t size)
223         {
224                 assert(size > 0);
225                 assert(base + size <= Arch::mem_end / Arch::page_size);
226         
227                 zonesize = size;
228                 start = base;
229                 end = base + size - 1;
230
231                 for (Page *p = &pages[start]; p <= &pages[end]; p++) {
232                         assert(p->flags == 0);
233                         new(p) Page;
234                         p->zone = this;
235                 }
236                 
237                 for (int i = 0; i < num_bins; i++) {
238                         Page *bin = bin_to_head(i);
239                         
240                         bin->free.otherside = bin_to_head(num_bins - 1);
241                         bin->chunk_rmap_list.init();
242                 }
243         }
244         
245         Page *PageAlloc::alloc(uint num_pages, PageAllocZone *const *zonelist)
246         {
247                 while (*zonelist) {
248                         Page *ret = (*zonelist)->alloc(num_pages);
249                         if (ret)
250                                 return ret;
251                         
252                         zonelist++;
253                 }
254                 
255                 // FIXME: Deliver System.Traps.ReduceMemoryUsage first
256                 throw_idl(OutOfMemory);
257         }
258         
259         void Page::free_page()
260         {
261                 PageAlloc::free(this, 1);
262         }
263
264         PageAlloc page_alloc;
265 }