- // If the padded size of this changes, update rmap_shift.
- // and the alignment check in RMapTable::unmap.
-
- // If the layout of this changes, update the offsets below.
- union RMapNode {
- struct {
- u64 vaddr;
- VirtualArea *va;
- Util::ListNoAutoInit head, tail;
- };
-
- long pad[8];
-
- enum {
- head_offset = sizeof(u64) + sizeof(void *),
- tail_offset = head_offset + sizeof(void *) * 2,
- };
- };
-
- // This lock protects the rmap chains and rmap tables. It also makes
- // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
- // sequence.
- //
- // OPT: This lock is acquired on all map/unmap activity; if/when this
- // turns out to be a significant bottleneck, finer-grained locking can
- // be used. I decided against doing it now because it would be
- // somewhat complicated (but I believe do-able) to avoid all races,
- // and I'd like to move on to implementing other things for now.
-
- extern Lock::Lock rmap_lock;
- class Page;
-
- class RMapTable {
- void *toplevel;
- int toplevel_shift;
-
- RMapNode *get_rmap(u64 virtaddr, bool add = false);
-
- public:
- RMapTable();
-
- // rmap_lock must be held.
- static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
- u64 virtaddr, u64 upstream_vaddr);
-
- void unmap(u64 virtaddr);
-
- // Handle a copy-on-write for the specified page and all downstream
- // mappings. All such mappings are set to the new page, and
- // FaultOnWrite is cleared.
-
- void break_copy_on_write(u64 virtaddr, Page *new_page);
- };