1 // mem/rmap.cc -- Reverse mapping from physical page frames (or
2 // intermediate address spaces) to mappers.
4 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
6 // This software is provided 'as-is', without any express or implied warranty.
7 // In no event will the authors or contributors be held liable for any damages
8 // arising from the use of this software.
10 // Permission is hereby granted to everyone, free of charge, to use, copy,
11 // modify, prepare derivative works of, publish, distribute, perform,
12 // sublicense, and/or sell copies of the Software, provided that the above
13 // copyright notice and disclaimer of warranty be included in all copies or
14 // substantial portions of this software.
17 #include <kern/pagealloc.h>
18 #include <kern/rmap.h>
19 #include <kern/pagetable.h>
20 #include <kern/sched.h>
21 #include <kern/thread.h>
24 static const ulong rmap_node_len =
25 1 << ll_get_order_round_up(sizeof(RMapNode));
27 void RMapTable::map(VirtualArea *dsva, PageTable *usptbl,
28 u64 dsvaddr, u64 usvaddr)
30 RMapNode *dsrmap = dsva->aspace->page_table->
31 rmap_table.lookup(dsvaddr, true);
35 dsrmap->vaddr = page_align(dsvaddr);
40 RMapNode *usrmap = usptbl->rmap_table.lookup(usvaddr);
42 assert(usrmap->va->aspace->page_table == usptbl);
44 usrmap->head.add_front(&dsrmap->head);
46 // FIXME: If it ends up being useful, link into the phys-page
50 dsrmap->head.add_front(&dsrmap->tail);
53 void RMapTable::unmap(u64 virtaddr)
55 Lock::AutoLock autolock(rmap_lock);
56 RMapNode *head = lookup(virtaddr);
58 if (!head || !head->va)
61 assert(head->vaddr == virtaddr);
63 Util::ListNoAutoInit *node = &head->head, *oldnode;
66 ulong off = reinterpret_cast<ulong>(node) & (rmap_node_len - 1);
67 if (off == RMap::head_offset) {
68 RMapNode *rmap = node->listentry(RMapNode, head);
70 Region region = { rmap->vaddr,
71 rmap->vaddr + Arch::page_shift - 1 };
73 rmap->va->aspace->page_table->unmap(region);
76 assert(off == RMap::tail_offset);
82 } while (node != &head->tail);
87 void RMapTable::break_copy_on_write(u64 virtaddr, Page *new_page)
89 assert(rmap_lock.held_by_curthread());
90 RMapNode *head = lookup(virtaddr);
91 RMapNode *still_cow = NULL;
93 assert(head && head->va);
94 assert(head->vaddr == virtaddr);
96 // If there are sibling or upstream mappings of this page,
97 // detach the rmap list.
99 if (head->head.prev != &head->tail) {
100 head->head.prev->next = head->tail.next;
101 head->tail.next->prev = head->head.prev;
103 head->head.prev = &head->tail;
104 head->tail.next = &head->head;
107 assert(head->tail.next == &head->head);
108 Util::ListNoAutoInit *node = &head->head;
111 ulong off = reinterpret_cast<ulong>(node) & (rmap_node_len - 1);
112 if (off == RMap::head_offset) {
113 RMapNode *rmap = node->listentry(RMapNode, head);
114 RegionWithOffset region;
116 region.start = rmap->vaddr;
117 region.end = rmap->vaddr + Arch::page_shift - 1;
118 region.offset = page_to_phys(new_page);
120 PTEFlags flags = rmap->va->flags;
122 // The faulting mapping always has PTE FaultOnWrite cleared;
123 // downstream mappings have PTE FaultOnWrite cleared if they
124 // are not downstream of different mapping with VA
125 // FaultOnWrite set. Downstream mappings should never have
126 // PTE FaultOnWrite clear if VA FaultOnWrite is set; if the
127 // downstream mapping had been cow-broken, it would have been
128 // removed from this physpage's rmap list.
130 if (flags.FaultOnWrite && node != &head->head && !still_cow)
134 flags.FaultOnWrite = 1;
136 flags.FaultOnWrite = 0;
138 rmap->va->aspace->page_table->map(region, flags);
140 assert(off == RMap::tail_offset);
143 RMapNode *rmap = node->listentry(RMapNode, tail);
145 // We've finished the downstreams of a COW mapping,
146 // so stop marking pages as COW.
148 if (rmap == still_cow)
154 } while (node != &head->tail);
159 Lock::Lock rmap_lock;