]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/include/kern/rmap.h
minor doc updates
[polintos/scott/priv.git] / kernel / include / kern / rmap.h
1 #ifndef _KERN_RMAP_H
2 #define _KERN_RMAP_H
3
4 #include <kern/mem.h>
5 #include <kern/radix.h>
6 #include <util/radix.h>
7
8 namespace Mem {
9         struct RMapNode {
10                 u64 vaddr;
11                 VirtualArea *va;
12                 Util::ListNoAutoInit head, tail;
13         };
14
15         namespace RMap {
16                 enum {
17                         head_offset = offsetof(RMapNode, head),
18                         tail_offset = offsetof(RMapNode, tail),
19                 };
20         }
21         
22         // This lock protects the rmap chains and rmap tables.  It also makes
23         // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
24         // sequence.
25         //
26         // OPT: This lock is acquired on all map/unmap activity; if/when this
27         // turns out to be a significant bottleneck, finer-grained locking can
28         // be used.  I decided against doing it now because it would be
29         // somewhat complicated (but I believe do-able) to avoid all races,
30         // and I'd like to move on to implementing other things for now.
31         
32         extern Lock::Lock rmap_lock;
33         class Page;
34         
35         class RMapTable {
36                 Util::PageRadixTree<RMapNode, u64> tree;
37                 
38                 RMapNode *lookup(u64 vaddr, bool add = false)
39                 {
40                         return tree.lookup(vaddr >> Arch::page_shift, add);
41                 }
42                 
43         public:
44                 // rmap_lock must be held.
45                 static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
46                                 u64 virtaddr, u64 upstream_vaddr);
47
48                 void unmap(u64 virtaddr);
49
50                 // Handle a copy-on-write for the specified page and all downstream
51                 // mappings.  All such mappings are set to the new page, and
52                 // FaultOnWrite is cleared.
53                 
54                 void break_copy_on_write(u64 virtaddr, Page *new_page);
55         };
56 };
57
58 #endif