X-Git-Url: http://git.buserror.net/cgi-bin/gitweb.cgi?p=polintos%2Fscott%2Fpriv.git;a=blobdiff_plain;f=kernel%2Fmem%2Frmap.cc;h=14ca7f78348bb5ec121ea70f0bec42802e2d5355;hp=d9a82e36eecb750e88f5addd07e1a9de7fd7d1c1;hb=b4bfc871337ca32ce83407916a87db2524729ca9;hpb=77bf9a95a836b14a243953e1fbd28c7c1106c59a diff --git a/kernel/mem/rmap.cc b/kernel/mem/rmap.cc index d9a82e3..14ca7f7 100644 --- a/kernel/mem/rmap.cc +++ b/kernel/mem/rmap.cc @@ -15,101 +15,18 @@ #include #include -#include +#include +#include namespace Mem { - using Util::round_up; - // static uint rmaps_per_page = Arch::page_size / sizeof(RMapNode); + static const ulong rmap_node_len = + 1 << ll_get_order_round_up(sizeof(RMapNode)); - // If RMapNode's length becomes something other than 8 longs, - // change "3" to the base-2 log of the number of longs. - - static int rmap_shift = Arch::page_shift - _LL_LONG_LOGBYTES - 3; - - // static int rmap_dirs_per_page = Arch::page_size / sizeof(RMapNode *); - static int rmap_dir_shift = Arch::page_shift - _LL_LONG_LOGBYTES; - static int rmap_lastlevel_shift = rmap_shift + Arch::page_shift; - - static int rmap_dir_levels = (64 - rmap_lastlevel_shift - 1) - / rmap_dir_shift; - - static int rmap_toplevel_shift = rmap_dir_shift * rmap_dir_levels - + rmap_lastlevel_shift; - - static inline u64 addr_to_dir_offset(u64 addr, int shift) - { - return (addr >> shift) & ((1ULL << rmap_dir_shift) - 1); - } - - static inline u64 addr_to_offset(u64 addr) - { - return (addr >> Arch::page_shift) & ((1ULL << rmap_shift) - 1); - } - - RMapTable::RMapTable() - { - // All RMap tables must have at least one dir level, in order to - // simplify the code. If it turns out that a lot of memory is - // wasted due to this, the code could be made more complex in order - // to allow one-level rmap tables. Currently, on 4KiB-page systems, - // a page is wasted per under-512KiB aspace (32-bit) or under-256KiB - // aspace (64-bit). - // - // Dynamic levels would have to be implemented in generic-pte for - // the wastage here to be meaningful. - - toplevel_shift = rmap_lastlevel_shift; - toplevel = Mem::alloc_pages(1); - bzero(toplevel, Arch::page_size); - } - - RMapNode *RMapTable::get_rmap(u64 virtaddr, bool add) - { - assert(rmap_lock.held_by_curthread()); - int shift = toplevel_shift; - void *table = toplevel; - - while (toplevel_shift < rmap_toplevel_shift && - (virtaddr >> (toplevel_shift + rmap_dir_shift))) - { - if (!add) - return NULL; - - shift += rmap_dir_shift; - toplevel_shift += rmap_dir_shift; - - toplevel = Mem::alloc_pages(1); - bzero(toplevel, Arch::page_size); - - static_cast(toplevel)[0] = table; - table = toplevel; - } - - while (shift >= rmap_lastlevel_shift) { - int off = addr_to_dir_offset(virtaddr, shift); - void *new_table = static_cast(table)[off]; - - if (!new_table) { - new_table = Mem::alloc_pages(1); - bzero(new_table, Arch::page_size); - static_cast(table)[off] = new_table; - } - - table = new_table; - shift -= rmap_dir_shift; - } - - assert(shift + rmap_dir_shift - rmap_shift == Arch::page_shift); - - int off = addr_to_offset(virtaddr); - return &static_cast(table)[off]; - } - void RMapTable::map(VirtualArea *dsva, PageTable *usptbl, u64 dsvaddr, u64 usvaddr) { RMapNode *dsrmap = dsva->aspace->page_table-> - rmap_table.get_rmap(dsvaddr, true); + rmap_table.tree.lookup(dsvaddr, true); assert(!dsrmap->va); dsrmap->va = dsva; @@ -118,7 +35,7 @@ namespace Mem { dsrmap->tail.init(); if (usptbl) { - RMapNode *usrmap = usptbl->rmap_table.get_rmap(usvaddr); + RMapNode *usrmap = usptbl->rmap_table.tree.lookup(usvaddr); assert(usrmap); assert(usrmap->va->aspace->page_table == usptbl); @@ -134,7 +51,7 @@ namespace Mem { void RMapTable::unmap(u64 virtaddr) { Lock::AutoLock autolock(rmap_lock); - RMapNode *head = get_rmap(virtaddr); + RMapNode *head = tree.lookup(virtaddr); if (!head || !head->va) return; @@ -144,8 +61,8 @@ namespace Mem { Util::ListNoAutoInit *node = &head->head, *oldnode; do { - ulong off = reinterpret_cast(node) & (sizeof(RMapNode) - 1); - if (off == RMapNode::head_offset) { + ulong off = reinterpret_cast(node) & (rmap_node_len - 1); + if (off == RMap::head_offset) { RMapNode *rmap = node->listentry(RMapNode, head); Region region = { rmap->vaddr, @@ -154,7 +71,7 @@ namespace Mem { rmap->va->aspace->page_table->unmap(region); rmap->va = NULL; } else { - assert(off == RMapNode::tail_offset); + assert(off == RMap::tail_offset); } oldnode = node; @@ -168,7 +85,7 @@ namespace Mem { void RMapTable::break_copy_on_write(u64 virtaddr, Page *new_page) { assert(rmap_lock.held_by_curthread()); - RMapNode *head = get_rmap(virtaddr); + RMapNode *head = tree.lookup(virtaddr); RMapNode *still_cow = NULL; assert(head && head->va); @@ -189,8 +106,8 @@ namespace Mem { Util::ListNoAutoInit *node = &head->head; do { - ulong off = reinterpret_cast(node) & (sizeof(RMapNode) - 1); - if (off == RMapNode::head_offset) { + ulong off = reinterpret_cast(node) & (rmap_node_len - 1); + if (off == RMap::head_offset) { RMapNode *rmap = node->listentry(RMapNode, head); RegionWithOffset region; @@ -218,7 +135,7 @@ namespace Mem { rmap->va->aspace->page_table->map(region, flags); } else { - assert(off == RMapNode::tail_offset); + assert(off == RMap::tail_offset); if (still_cow) { RMapNode *rmap = node->listentry(RMapNode, tail);