5 #include <kern/radix.h>
6 #include <util/radix.h>
12 Util::ListNoAutoInit head, tail;
17 head_offset = offsetof(RMapNode, head),
18 tail_offset = offsetof(RMapNode, tail),
22 // This lock protects the rmap chains and rmap tables. It also makes
23 // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
26 // OPT: This lock is acquired on all map/unmap activity; if/when this
27 // turns out to be a significant bottleneck, finer-grained locking can
28 // be used. I decided against doing it now because it would be
29 // somewhat complicated (but I believe do-able) to avoid all races,
30 // and I'd like to move on to implementing other things for now.
32 extern Lock::Lock rmap_lock;
36 Util::PageRadixTree<RMapNode, u64> tree;
38 RMapNode *lookup(u64 vaddr, bool add = false)
40 return tree.lookup(vaddr >> Arch::page_shift, add);
44 // rmap_lock must be held.
45 static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
46 u64 virtaddr, u64 upstream_vaddr);
48 void unmap(u64 virtaddr);
50 // Handle a copy-on-write for the specified page and all downstream
51 // mappings. All such mappings are set to the new page, and
52 // FaultOnWrite is cleared.
54 void break_copy_on_write(u64 virtaddr, Page *new_page);