]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/rmap.cc
dc9e2563efb23885d78eaec8c2cc424cb374b5a3
[polintos/scott/priv.git] / kernel / mem / rmap.cc
1 // mem/rmap.cc -- Reverse mapping from physical page frames (or 
2 // intermediate address spaces) to mappers.
3 //
4 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // 
6 // This software is provided 'as-is', without any express or implied warranty.
7 // In no event will the authors or contributors be held liable for any damages
8 // arising from the use of this software.
9 // 
10 // Permission is hereby granted to everyone, free of charge, to use, copy,
11 // modify, prepare derivative works of, publish, distribute, perform,
12 // sublicense, and/or sell copies of the Software, provided that the above
13 // copyright notice and disclaimer of warranty be included in all copies or
14 // substantial portions of this software.
15
16 #include <kern/mem.h>
17 #include <kern/pagealloc.h>
18 #include <kern/rmap.h>
19 #include <kern/pagetable.h>
20 #include <kern/sched.h>
21 #include <kern/thread.h>
22
23 namespace Mem {
24         static const ulong rmap_node_len =
25                 1 << ll_get_order_round_up(sizeof(RMapNode));
26         
27         void RMapTable::map(VirtualArea *dsva, PageTable *usptbl,
28                                  u64 dsvaddr, u64 usvaddr)
29         {
30                 RMapNode *dsrmap = dsva->aspace->page_table->
31                                    rmap_table.lookup(dsvaddr, true);
32
33                 assert(!dsrmap->va);
34                 dsrmap->va = dsva;
35                 dsrmap->vaddr = page_align(dsvaddr);
36                 dsrmap->head.init();
37                 dsrmap->tail.init();
38                 
39                 if (usptbl) {
40                         RMapNode *usrmap = usptbl->rmap_table.lookup(usvaddr);
41                         assert(usrmap);
42                         assert(usrmap->va->aspace->page_table == usptbl);
43
44                         usrmap->head.add_front(&dsrmap->head);
45                 } else {
46                         // FIXME: If it ends up being useful, link into the phys-page
47                         // rmap list.
48                 }
49
50                 dsrmap->head.add_front(&dsrmap->tail);
51         }
52
53         void RMapTable::unmap(u64 virtaddr)
54         {
55                 Lock::AutoLock autolock(rmap_lock);
56                 RMapNode *head = lookup(virtaddr);
57                 
58                 if (!head || !head->va)
59                         return;
60
61                 assert(head->vaddr == virtaddr);
62
63                 Util::ListNoAutoInit *node = &head->head, *oldnode;
64                 
65                 do {
66                         ulong off = reinterpret_cast<ulong>(node) & (rmap_node_len - 1);
67                         if (off == RMap::head_offset) {
68                                 RMapNode *rmap = node->listentry(RMapNode, head);
69                         
70                                 Region region = { rmap->vaddr,
71                                                   rmap->vaddr + Arch::page_shift - 1 };
72
73                                 rmap->va->aspace->page_table->unmap(region);
74                                 rmap->va = NULL;
75                         } else {
76                                 assert(off == RMap::tail_offset);
77                         }
78
79                         oldnode = node;
80                         node = node->next;
81                         oldnode->del();
82                 } while (node != &head->tail);
83                 
84                 node->del();
85         }
86
87         void RMapTable::break_copy_on_write(u64 virtaddr, Page *new_page)
88         {
89                 assert(rmap_lock.held_by_curthread());
90                 RMapNode *head = lookup(virtaddr);
91                 RMapNode *still_cow = NULL;
92                 
93                 assert(head && head->va);
94                 assert(head->vaddr == virtaddr);
95                 
96                 // If there are sibling or upstream mappings of this page,
97                 // detach the rmap list.
98                 
99                 if (head->head.prev != &head->tail) {
100                         head->head.prev->next = head->tail.next;
101                         head->tail.next->prev = head->head.prev;
102                         
103                         head->head.prev = &head->tail;
104                         head->tail.next = &head->head;
105                 }
106
107                 assert(head->tail.next == &head->head);
108                 Util::ListNoAutoInit *node = &head->head;
109                 
110                 do {
111                         ulong off = reinterpret_cast<ulong>(node) & (rmap_node_len - 1);
112                         if (off == RMap::head_offset) {
113                                 RMapNode *rmap = node->listentry(RMapNode, head);
114                                 RegionWithOffset region;
115                                 
116                                 region.start = rmap->vaddr;
117                                 region.end = rmap->vaddr + Arch::page_shift - 1;
118                                 region.offset = page_to_phys(new_page);
119                                 
120                                 PTEFlags flags = rmap->va->flags;
121                                 
122                                 // The faulting mapping always has PTE FaultOnWrite cleared;
123                                 // downstream mappings have PTE FaultOnWrite cleared if they
124                                 // are not downstream of different mapping with VA
125                                 // FaultOnWrite set.  Downstream mappings should never have
126                                 // PTE FaultOnWrite clear if VA FaultOnWrite is set; if the
127                                 // downstream mapping had been cow-broken, it would have been
128                                 // removed from this physpage's rmap list.
129                                 
130                                 if (flags.FaultOnWrite && node != &head->head && !still_cow)
131                                         still_cow = rmap;
132                                 
133                                 if (still_cow)
134                                         flags.FaultOnWrite = 1;
135                                 else
136                                         flags.FaultOnWrite = 0;
137                                 
138                                 rmap->va->aspace->page_table->map(region, flags);
139                         } else {
140                                 assert(off == RMap::tail_offset);
141                                 
142                                 if (still_cow) {
143                                         RMapNode *rmap = node->listentry(RMapNode, tail);
144
145                                         // We've finished the downstreams of a COW mapping,
146                                         // so stop marking pages as COW.
147
148                                         if (rmap == still_cow)
149                                                 still_cow = NULL;
150                                 }
151                         }
152
153                         node = node->next;
154                 } while (node != &head->tail);
155                 
156                 assert(!still_cow);
157         }
158
159         Lock::Lock rmap_lock;
160 }