--- /dev/null
+#ifndef _KERN_RMAP_H
+#define _KERN_RMAP_H
+
+#include <kern/mem.h>
+#include <kern/radix.h>
+
+namespace Mem {
+ struct RMapNode {
+ u64 vaddr;
+ VirtualArea *va;
+ Util::ListNoAutoInit head, tail;
+ };
+
+ namespace RMap {
+ enum {
+ head_offset = offsetof(RMapNode, head),
+ tail_offset = offsetof(RMapNode, tail),
+ };
+ }
+
+ // This lock protects the rmap chains and rmap tables. It also makes
+ // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
+ // sequence.
+ //
+ // OPT: This lock is acquired on all map/unmap activity; if/when this
+ // turns out to be a significant bottleneck, finer-grained locking can
+ // be used. I decided against doing it now because it would be
+ // somewhat complicated (but I believe do-able) to avoid all races,
+ // and I'd like to move on to implementing other things for now.
+
+ extern Lock::Lock rmap_lock;
+ class Page;
+
+ class RMapTable {
+ Util::RadixTree<RMapNode, u64> tree;
+
+ public:
+ // rmap_lock must be held.
+ static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
+ u64 virtaddr, u64 upstream_vaddr);
+
+ void unmap(u64 virtaddr);
+
+ // Handle a copy-on-write for the specified page and all downstream
+ // mappings. All such mappings are set to the new page, and
+ // FaultOnWrite is cleared.
+
+ void break_copy_on_write(u64 virtaddr, Page *new_page);
+ };
+};
+
+#endif