4 #include <System/Mem.h>
6 #include <kern/kernel.h>
10 #include <arch/addrs.h>
12 #include <util/rbtree.h>
13 #include <util/list.h>
14 #include <util/lock.h>
15 #include <kernel/region.h>
16 #include <lowlevel/bitops.h>
19 // Used for allocating memory at boot time before the page allocator
20 // is running. Alignment must be a power of 2. Because nothing other
21 // than the kernel is guaranteed to be mapped from the beginning on
22 // all architectures, no generic code should use this until after
23 // Arch::arch_init() has run and set up physical memory mappings.
25 // This function may not be used after the page allocator has
26 // been initialized by architecture code.
28 // Architectures must provide Arch::next_free_bootmem initalized
29 // to the first free piece of bootmem.
31 static inline void *get_bootmem(size_t size, size_t align)
33 uintptr_t ret = (Arch::next_free_bootmem + align - 1) & ~(align - 1);
34 Arch::next_free_bootmem = ret + size;
35 return reinterpret_cast<void *>(ret);
38 typedef System::Mem::AddrSpace IAddrSpace;
39 typedef System::Mem::Mappable IMappable;
40 using System::Mem::Cacheable;
41 using System::Mem::Region;
42 using System::Mem::RegionWithOffset;
43 using System::Mem::AllocFlags;
44 using System::Mem::MapFlags;
45 using System::Mem::AccessFlags;
49 // This must be kept in sync with include/kern/generic-pte.h
52 // Readable, Writeable, and Executable are for permission only,
53 // not for implementing copy on write, swapping, etc.
63 // If set, then on a write access, the page is copied and this
64 // address space gets the new, anonymous version. The rmap list
65 // is then traversed; all downstream mappings will share the new
68 // For vareas that directly map something other than an address
69 // space, the action to be taken on a write fault is
75 // Do not allow the user to unmap or modify flags.
76 // Used for the shared user/kernel mappings.
80 #elif defined(BITFIELD_BE)
81 ulong pad:_LL_LONG_BYTES * 8 - 9;
92 #error Unspecified/unrecognized bitfield endianness
98 PTEFlags(ulong init) : raw(init)
112 using Arch::kvirt_to_phys;
113 using Arch::phys_to_kvirt;
118 typedef Util::RBTree<VirtualArea, Region, u64> VirtualAreaTree;
122 // This linked list keeps track of the virtual areas that map this
123 // mappable (this is not transitive; vareas that map a varea that
124 // maps this mappable are not on this list).
126 // OPT: rbtree keyed on mapped address range?
129 Lock::SpinLock mappings_lock;
134 virtual void get_size(u64 *size) = 0;
136 virtual void get_block_size(u64 *block_size)
138 *block_size = Arch::page_size;
141 // Register/unregister varea as mapping this mappable.
143 virtual void map(VirtualArea *varea);
144 virtual void unmap(VirtualArea *varea);
146 // Make the specified page available for mapping. This must be
147 // done before map() will succeed. It is possible (though
148 // unlikely) that the pages will be removed before map() is called,
149 // causing map() to return false. In such a case, pagein should be
150 // called again by the fault handler. If the mapping fails for
151 // other reasons (such as lack of permission, a hole in a stacked
152 // aspace, or an I/O error) then pagein() will throw a BadPageFault
155 virtual void pagein(u64 vaddr, PTEFlags reqflags) = 0;
157 // Returns the physical address and flags associated with a given
158 // virtual address. If flags.Valid is not set, then phys and all
159 // other flags are undefined, and pagein() should be retried.
160 // rmap_lock must be held.
162 virtual void get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags) = 0;
164 #include <servers/mem/addrspace/Mem/Mappable.h>
180 // The red/black tree is used to find a region based on address.
182 // The linked list is kept in order and is used to iterate over
183 // vmas in a region (after looking up the starting point in the
184 // tree, unless the region is the entire address space).
186 VirtualAreaTree::Node rbtree_node;
187 Util::List list_node;
188 Util::List mappings_node;
193 // This is added to the virtual address to get the offset
194 // into the mappable.
199 return rbtree_node.value;
204 struct BadPageFault {
207 class ASpaceMappable : public Mappable {
210 static bool rec_pagein(AddrSpace *aspace, u64 vaddr,
214 ASpaceMappable (AddrSpace *ASPACE) : aspace(ASPACE)
219 void get_size(u64 *size);
222 virtual void pagein(u64 vaddr, PTEFlags reqflags);
223 virtual void get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags);
225 friend class AddrSpace;
229 // OPT: Coalesce vareas when possible (except when setting flags to
230 // match surrounding vareas, as the flags are likely to change
231 // again if they've already changed).
233 // OPT: A subclass of AddrSpace that doesn't use
234 // VirtualArea::offset, but rather has its own virtual method that
235 // figures out offsets to the next level using its own data
236 // structures (such as filesystem block tables). This would avoid
237 // excessive vareas for fragmented files. Whether the excess of
238 // vareas is significant enough for this to be worthwhile remains
241 VirtualAreaTree varea_tree;
242 Util::List varea_list;
245 // This defines the start and end of the aspace; mappings outside
246 // this range may not be done, and will not be returned by
247 // get_free_region(). For process aspaces, this goes from
248 // Arch::user_start to Arch::user_end. For non-proc aspaces, this
251 Region aspace_region;
253 // Returns true if there is a mapped region that overlaps the given
254 // region. If there is a collision, then the first overlapping
255 // varea is returned in va. Otherwise, it returns the last mapped
256 // area before the region in va (if there are no areas, or the
257 // region is before the first area, then prev is NULL). The aspace
258 // lock must be held.
260 bool check_overlap(Region region, VirtualArea *&va);
262 // Finds a free region of the requested length and puts it in
263 // region. Returns true if an appropriate area is found. The prev
264 // pointer is as in check_overlap. The aspace lock must be held.
266 bool get_free_region(ulong len, Region ®ion, VirtualArea *&prev);
268 // This is the value after the last region returned by
269 // get_free_region. If there was an intervening unmap for a lower
270 // address, then it is set to that address instead.
272 u64 cached_free_region;
274 static u64 rec_unmap(AddrSpace *aspace, Region region,
275 PTEFlags reqflags, VirtualArea *va);
277 // If there are multiple virtual areas that cover the specified region,
278 // split them at the region's boundaries. The first varea in the region
279 // (if any) is returned. The aspace lock must be held.
281 VirtualArea *split_varea(Region region);
283 void break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys);
284 bool map(VirtualArea *va, u64 vaddr, PTEFlags reqflags);
290 #include <servers/mem/addrspace/Mem/AddrSpace.h>
292 ASpaceMappable mappable;
293 PageTable *page_table;
295 AddrSpace(PageTable *ptbl = NULL);
297 // Returns true if the fault was "good"; otherwise, the caller
298 // should dump regs. exec should only be used if the CPU
299 // implements per-page exec protection; otherwise, treat it
302 bool handle_fault(ulong addr, bool write, bool exec, bool user);
304 void get_mappable(IMappable *ma);
305 void clone(IAddrSpace *addrspace, u8 clone_is_real);
313 void map(IMappable ma, Region region, u64 *vstart, MapFlags mflags,
314 int map_type = map_user);
315 void unmap(Region region, bool from_kernel = false);
317 void set_mapflags(Region region, MapFlags mflags);
318 void get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same);
319 void get_mapping(Region region, IMappable *ma, u64 *offset);
321 void get_page_size(u32 *page_size);
322 void get_min_align(u32 *min_align);
323 void get_size(u64 *size);
325 friend class ASpaceMappable;
328 class ProcAddrSpace : public AddrSpace {
331 ProcAddrSpace(void *page_table);
333 ORB::IDSpace idspace;
336 extern Factory addr_space_factory, proc_addr_space_factory;
338 using ::System::RunTime::orbmm;
340 static inline bool page_aligned(u64 addr)
342 return !(addr & (u64)(Arch::page_size - 1));
345 static inline u64 page_align(u64 addr)
347 return addr & ~(u64)(Arch::page_size - 1);
350 // FIXME: Valid user addr? Paging holes?
351 static inline bool valid_addr(uint64_t addr)
353 if (sizeof(void *) == 8)
356 return (addr >> 32) == 0;