]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/include/kern/rmap.h
86dacd6febf352d550ef8209380c704aab31bfde
[polintos/scott/priv.git] / kernel / include / kern / rmap.h
1 #ifndef _KERN_RMAP_H
2 #define _KERN_RMAP_H
3
4 #include <kern/mem.h>
5 #include <kern/radix.h>
6
7 namespace Mem {
8         struct RMapNode {
9                 u64 vaddr;
10                 VirtualArea *va;
11                 Util::ListNoAutoInit head, tail;
12         };
13
14         namespace RMap {
15                 enum {
16                         head_offset = offsetof(RMapNode, head),
17                         tail_offset = offsetof(RMapNode, tail),
18                 };
19         }
20         
21         // This lock protects the rmap chains and rmap tables.  It also makes
22         // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
23         // sequence.
24         //
25         // OPT: This lock is acquired on all map/unmap activity; if/when this
26         // turns out to be a significant bottleneck, finer-grained locking can
27         // be used.  I decided against doing it now because it would be
28         // somewhat complicated (but I believe do-able) to avoid all races,
29         // and I'd like to move on to implementing other things for now.
30         
31         extern Lock::Lock rmap_lock;
32         class Page;
33         
34         class RMapTable {
35                 Util::RadixTree<RMapNode, u64> tree;
36                 
37         public:
38                 // rmap_lock must be held.
39                 static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
40                                 u64 virtaddr, u64 upstream_vaddr);
41
42                 void unmap(u64 virtaddr);
43
44                 // Handle a copy-on-write for the specified page and all downstream
45                 // mappings.  All such mappings are set to the new page, and
46                 // FaultOnWrite is cleared.
47                 
48                 void break_copy_on_write(u64 virtaddr, Page *new_page);
49         };
50 };
51
52 #endif