1 // mem/rmap.cc -- Reverse mapping from physical page frames (or
2 // intermediate address spaces) to mappers.
4 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
6 // This software is provided 'as-is', without any express or implied warranty.
7 // In no event will the authors or contributors be held liable for any damages
8 // arising from the use of this software.
10 // Permission is hereby granted to everyone, free of charge, to use, copy,
11 // modify, prepare derivative works of, publish, distribute, perform,
12 // sublicense, and/or sell copies of the Software, provided that the above
13 // copyright notice and disclaimer of warranty be included in all copies or
14 // substantial portions of this software.
17 #include <kern/pagealloc.h>
18 #include <kern/rmap.h>
19 #include <kern/pagetable.h>
22 static const ulong rmap_node_len =
23 1 << ll_get_order_round_up(sizeof(RMapNode));
25 void RMapTable::map(VirtualArea *dsva, PageTable *usptbl,
26 u64 dsvaddr, u64 usvaddr)
28 RMapNode *dsrmap = dsva->aspace->page_table->
29 rmap_table.tree.lookup(dsvaddr, true);
33 dsrmap->vaddr = page_align(dsvaddr);
38 RMapNode *usrmap = usptbl->rmap_table.tree.lookup(usvaddr);
40 assert(usrmap->va->aspace->page_table == usptbl);
42 usrmap->head.add_front(&dsrmap->head);
44 // FIXME: If it ends up being useful, link into the phys-page
48 dsrmap->head.add_front(&dsrmap->tail);
51 void RMapTable::unmap(u64 virtaddr)
53 Lock::AutoLock autolock(rmap_lock);
54 RMapNode *head = tree.lookup(virtaddr);
56 if (!head || !head->va)
59 assert(head->vaddr == virtaddr);
61 Util::ListNoAutoInit *node = &head->head, *oldnode;
64 ulong off = reinterpret_cast<ulong>(node) & (rmap_node_len - 1);
65 if (off == RMap::head_offset) {
66 RMapNode *rmap = node->listentry(RMapNode, head);
68 Region region = { rmap->vaddr,
69 rmap->vaddr + Arch::page_shift - 1 };
71 rmap->va->aspace->page_table->unmap(region);
74 assert(off == RMap::tail_offset);
80 } while (node != &head->tail);
85 void RMapTable::break_copy_on_write(u64 virtaddr, Page *new_page)
87 assert(rmap_lock.held_by_curthread());
88 RMapNode *head = tree.lookup(virtaddr);
89 RMapNode *still_cow = NULL;
91 assert(head && head->va);
92 assert(head->vaddr == virtaddr);
94 // If there are sibling or upstream mappings of this page,
95 // detach the rmap list.
97 if (head->head.prev != &head->tail) {
98 head->head.prev->next = head->tail.next;
99 head->tail.next->prev = head->head.prev;
101 head->head.prev = &head->tail;
102 head->tail.next = &head->head;
105 assert(head->tail.next == &head->head);
106 Util::ListNoAutoInit *node = &head->head;
109 ulong off = reinterpret_cast<ulong>(node) & (rmap_node_len - 1);
110 if (off == RMap::head_offset) {
111 RMapNode *rmap = node->listentry(RMapNode, head);
112 RegionWithOffset region;
114 region.start = rmap->vaddr;
115 region.end = rmap->vaddr + Arch::page_shift - 1;
116 region.offset = page_to_phys(new_page);
118 PTEFlags flags = rmap->va->flags;
120 // The faulting mapping always has PTE FaultOnWrite cleared;
121 // downstream mappings have PTE FaultOnWrite cleared if they
122 // are not downstream of different mapping with VA
123 // FaultOnWrite set. Downstream mappings should never have
124 // PTE FaultOnWrite clear if VA FaultOnWrite is set; if the
125 // downstream mapping had been cow-broken, it would have been
126 // removed from this physpage's rmap list.
128 if (flags.FaultOnWrite && node != &head->head && !still_cow)
132 flags.FaultOnWrite = 1;
134 flags.FaultOnWrite = 0;
136 rmap->va->aspace->page_table->map(region, flags);
138 assert(off == RMap::tail_offset);
141 RMapNode *rmap = node->listentry(RMapNode, tail);
143 // We've finished the downstreams of a COW mapping,
144 // so stop marking pages as COW.
146 if (rmap == still_cow)
152 } while (node != &head->tail);
157 Lock::Lock rmap_lock;