1 #ifndef _KERN_PAGEALLOC_H
2 #define _KERN_PAGEALLOC_H
4 #include <System/Mem.h>
6 #include <kern/kernel.h>
10 #include <arch/paging.h>
11 #include <arch/addrs.h>
13 #include <lowlevel/atomic.h>
14 #include <util/lock.h>
34 // This is an array of all pages in all zones. For now, zones must be
35 // contiguous (or at least close enough to each other that it's not
36 // a big deal to waste the space for the intervening page structs).
38 extern Page *pages, *last_page;
40 static inline uintptr_t page_to_phys(Page *page);
42 static inline bool is_phys_page(Page *page)
44 return page >= Mem::pages && page < last_page;
48 // A page is reserved if it is neither Free or InUse.
49 // Free and InUse may not be set at the same time.
51 static const u32 Free = 0x00000001;
52 static const u32 InUse = 0x00000002;
58 // These fields are valid when a page is free, if it is the
59 // first or last page of the chunk.
62 // For the first page of a chunk, this points to the last
63 // page of the chunk. For the last page of the chunk, this
64 // points to the first page of the chunk. For a single-page
65 // chunk, this points to itself. For a bin's list head, this
66 // points to the smallest bin at least as large as itself
67 // that has an available chunk.
72 // These fields are valid when a page is in use (not free or
81 // chunk when free, rmap when in use
83 // chunk points to the last page of the previous chunk in
84 // the bin and the first page of the next chunk in the bin.
85 // Only valid for the first page of a chunk. Prevchunk of the
86 // first chunk and nextchunk of the last chunk point to the
89 // rmap points to each mapping (as an RMapNode) of this page.
90 // NOTE: rmap list currently unused
92 Util::List chunk_rmap_list;
96 if (_UTIL_ASSERT_LEVEL >= 1) {
97 if (!is_phys_page(this) || !(flags & InUse)) {
99 printf("Page %p (phys %lx) retained flags %x, is_phys %d\n",
100 this, page_to_phys(this), flags,
101 (int)is_phys_page(this));
105 // assert(flags & InUse);
106 assert(inuse.refcount >= 1);
107 ll_atomic_inc(&inuse.refcount);
114 if (_UTIL_ASSERT_LEVEL >= 1) {
115 if (!is_phys_page(this) || !(flags & InUse)) {
117 printf("Page %p (phys %lx) released flags %x, is_phys %d\n",
118 this, page_to_phys(this), flags,
119 (int)is_phys_page(this));
123 // assert(flags & InUse);
124 assert(inuse.refcount >= 1);
125 if (ll_atomic_dec_and_test(&inuse.refcount))
129 int32_t get_refcount()
131 assert(is_phys_page(this));
132 assert(flags & InUse);
133 assert(inuse.refcount >= 1);
134 return inuse.refcount;
138 static inline Page *phys_to_page(uintptr_t phys)
140 return pages + (phys >> Arch::page_shift);
143 static inline uintptr_t page_to_phys(Page *page)
145 return (page - pages) << Arch::page_shift;
148 static inline Page *kvirt_to_page(void *kvirt)
150 return phys_to_page(kvirt_to_phys(kvirt));
153 static inline void *page_to_kvirt(Page *page)
155 return phys_to_kvirt(page_to_phys(page));
158 class PageAllocZone {
162 num_bins = CONF_MEM_MAX_PAGE_ALLOC_BITS
165 // List head for each bin
168 // First and last pages of the zone
170 size_t zonesize; // end - start + 1
172 size_t chunk_size(Page *start);
174 uint bin_to_size(int bin);
175 int size_to_bin_alloc(size_t size);
176 int size_to_bin_free(size_t size);
178 Page *bin_to_head(int bin);
180 void remove_chunk(Page *start, int bin);
181 void add_to_bin(Page *chunk, int bin);
182 Page *shrink_chunk(Page *start, int num_pages,
183 size_t chunk_size, int bin);
186 // base and size are in pages; all pages must be reserved
187 // (i.e. flags set to zero).
189 void init(uintptr_t base, size_t size);
191 Page *alloc(uint num_pages);
192 void free(Page *head, size_t num_pages);
197 // #include "mem/pagealloc-server/Mem/PageAlloc.h"
199 // Architectures must define lists of zones, in preference order,
200 // to use for each type of allocation; these are indices into
201 // Arch::pagezonelists.
203 // The ISA DMA zonelist is for ISA DMA buffers, which must be below
204 // some platform-dependent limit (typically 16MiB).
206 // The DMA32 zonelist is for devices on a 32-bit bus (such as
207 // ordinary PCI), so that buffers over 4GiB are not used unless
208 // the platform provides a suitable mapping mechanism (such
211 // An architecture may define additional zonelists for its internal
212 // use, but it's best to pass them directly as pointers than to
213 // define additional numbers, so as to avoid conflict if the
214 // generic list expands in the future.
222 // Note that while num_pages does not have to be a power of 2 (and
223 // the allocation size will not be rounded up to the next power of
224 // 2), a request that is not a power of two may fail despite the
225 // existence of a suitable chunk if there is no available chunk of
226 // the next-higher power of 2. num_pages may not be 0.
228 static Page *alloc(uint num_pages, PageAllocZone *const *zonelist);
230 static Page *alloc(uint num_pages, int zone = zonelist_normal)
232 assert(zone >= 0 && zone <= 2);
234 return alloc(num_pages, Arch::pagezonelists[zone]);
237 // Any span of allocated pages may be freed; it does not have to
238 // correspond to the size and start of the original allocation, and
239 // it may be larger than the maximum allocation size (though this
240 // is likely only useful during bootup when adding new chunks of
241 // memory). All pages must be in the same zone. num_pages may not
244 static void free(Page *head, size_t num_pages)
246 head->zone->free(head, num_pages);
250 extern PageAlloc page_alloc;
253 static inline void *alloc_pages(int num,
254 int zone = PageAlloc::zonelist_normal)
256 return Mem::page_to_kvirt(PageAlloc::alloc(num, zone));
259 static inline void free_pages(void *addr, int num)
262 PageAlloc::free(kvirt_to_page(addr), num);
265 static inline void retain_if_phys(ulong addr)
267 Page *page = phys_to_page(addr);
269 if (is_phys_page(page))
273 static inline void release_if_phys(ulong addr)
275 Page *page = phys_to_page(addr);
277 if (is_phys_page(page))