size is greater than specified in this field.
objlist_ptr 1 Pointer to the object list
- objlist_len 2 Length of the object list
+ objlist_len 2 Length of the object list, in IDs
The object list is a special segment that contains object IDs
rather than arbitrary data. Each object ID will be translated
into the destination ID-space, allocating new IDs when
- necessary.
+ necessary. The IDs themselves are 32 bits each, unsigned,
+ regardless of the pointer size.
num_segments 3 Number of data segments
segment.ptr 4+n*4 Pointer to data segment
segment.len 5+n*4 Length of data segment in bytes
segment.flags 6+n*4 Attributes of data segment
- reserved 7+n*4 Reserved for future use, and to
- ensure power-of-two indexing
+ reserved 7+n*4 Reserved for future use, and for
+ power-of-two indexing
Each segment describes data being transmitted to and/or from the
callee. For out segments, the caller may designate a buffer to hold
struct ParamInfoBlock {
uintptr_t buffer_size;
- void **objlist_ptr;
+ uintptr_t *objlist_ptr;
uintptr_t objlist_len;
- void **ptrlist_ptr;
- uintptr_t ptrlist_len;
uintptr_t num_segments;
struct Segment {
void *ptr;
uintptr_t len;
uintptr_t flags;
+ uintptr_t reserved;
} segments[0];
};
}
//
// To throw an IDL exception of type Foo, do this:
// throw_idl(Foo, args, to, foo);
+//
+// FIXME: Instead, maybe a static throw method with noinline and
+// builtin_return_address.
#ifndef POLINTOS_NO_THROW_IDL
#define throw_idl(T, args...) do { \
Priv::tss.ss0 = 8;
asm volatile("ltr %w0" : : "r" (0x18) : "memory");
- init_thread->addr_space = new Mem::AddrSpace(x86_init_ptbl_l2);
+ init_thread->addr_space = new Mem::ProcAddrSpace(x86_init_ptbl_l2);
init_thread->active_addr_space = init_thread->addr_space;
}
#include <kern/pagetable.h>
namespace Arch {
- void set_aspace(Mem::AddrSpace *aspace)
+ void set_aspace(Mem::ProcAddrSpace *aspace)
{
u32 cr3 = Mem::kvirt_to_phys(aspace->page_table->toplevel);
asm volatile("movl %0, %%cr3" : : "r" (cr3) : "memory");
}
}
- void Thread::set_aspace(Mem::AddrSpace *aspace)
+ void Thread::set_aspace(Mem::ProcAddrSpace *aspace)
{
// FIXME: lock thread against scheduling; this temporary method should
// be gone before SMP anyway.
}
namespace Mem {
- class AddrSpace;
+ class ProcAddrSpace;
}
namespace Arch {
};
void switch_thread(Threads::Thread *dest, Threads::Thread *src);
- void set_aspace(Mem::AddrSpace *aspace);
+ void set_aspace(Mem::ProcAddrSpace *aspace);
namespace Priv {
struct TSS {
--- /dev/null
+// This is a generic pagetable implementation that most architectures
+// should be able to use as is, though architectures with weird paging
+// hardware can provide their own implementation. It corresponds to
+// mem/pagetable.cc.
+
+#ifndef _KERN_GENERIC_PAGETABLE_H
+#define _KERN_GENERIC_PAGETABLE_H
+
+#include <kern/mem.h>
+#include <util/lock.h>
+#include <arch/pagetable.h>
+#include <kern/pagetable.h>
+
+namespace Mem {
+ template<typename PTE>
+ class PageTableImpl : public PageTable {
+ public:
+ typedef Mem::PTEFlags Flags;
+ typedef System::Mem::Region Region;
+ typedef System::Mem::RegionWithOffset RegionWithOffset;
+ typedef typename PTE::VirtAddr VirtAddr;
+ typedef typename PTE::DirPTE DirPTE;
+
+ private:
+ // The lock of any page table may nest in the lock of any
+ // aspace.
+
+ Lock::Lock lock;
+
+ // For non-process aspaces, the number of levels may be more or
+ // less than what the hardware provides (in particular, large file
+ // mappings on 32-bit targets will need more levels). For process
+ // aspaces, num_levels must equal PTE::num_levels. Levels for
+ // non-process address spaces can be added dynamically as needed.
+ // Non-proc aspaces may also use a different PTE format.
+
+ int num_levels;
+ int toplevel_shift, lastlevel_shift;
+
+ static uint pages_per_table()
+ {
+ return 1 << PTE::shift_per_level;
+ }
+
+ static uint pages_per_dtable()
+ {
+ return 1 << DirPTE::shift_per_level;
+ }
+
+ void end_map(RegionWithOffset region, PTE flags, void *table);
+
+ void end_unmap(Region region, void *table);
+
+ void end_set_flags(Region region, PTE flags, PTE mask, void *table);
+
+ void rec_map(RegionWithOffset region, PTE flags,
+ void *table, int shift);
+
+ void rec_unmap(Region region, void *table, int shift);
+
+ void rec_set_flags(Region region, PTE flags,
+ PTE mask, void *table, int shift);
+
+ public:
+ PageTableImpl(bool process);
+ PageTableImpl(void *table);
+
+ virtual ~PageTableImpl();
+
+ virtual void map(RegionWithOffset region, Flags flags);
+ virtual void unmap(Region region);
+ virtual void set_flags(Region region, Flags flags, Flags mask);
+ virtual void get_mapping(u64 addr, u64 *phys, Flags *flags);
+
+ virtual void get_size(u64 *size)
+ {
+ if (is_process)
+ *size = 1ULL << (PTE::num_levels * PTE::shift_per_level);
+ else
+ *size = 1ULL << (64 - PTE::page_shift);
+ }
+ };
+}
+
+#endif
#include <System/Mem.h>
#include <kern/kernel.h>
+#include <kern/orb.h>
+
#include <arch/mem.h>
#include <arch/addrs.h>
}
};
- // If the padded size of this changes, update rmap_shift.
- // and the alignment check in RMapTable::unmap.
-
- // If the layout of this changes, update the offsets below.
- union RMapNode {
- struct {
- u64 vaddr;
- VirtualArea *va;
- Util::ListNoAutoInit head, tail;
- };
-
- long pad[8];
-
- enum {
- head_offset = sizeof(u64) + sizeof(void *),
- tail_offset = head_offset + sizeof(void *) * 2,
- };
- };
-
- // This lock protects the rmap chains and rmap tables. It also makes
- // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
- // sequence.
- //
- // OPT: This lock is acquired on all map/unmap activity; if/when this
- // turns out to be a significant bottleneck, finer-grained locking can
- // be used. I decided against doing it now because it would be
- // somewhat complicated (but I believe do-able) to avoid all races,
- // and I'd like to move on to implementing other things for now.
-
- extern Lock::Lock rmap_lock;
- class Page;
-
- class RMapTable {
- void *toplevel;
- int toplevel_shift;
-
- RMapNode *get_rmap(u64 virtaddr, bool add = false);
-
- public:
- RMapTable();
-
- // rmap_lock must be held.
- static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
- u64 virtaddr, u64 upstream_vaddr);
-
- void unmap(u64 virtaddr);
-
- // Handle a copy-on-write for the specified page and all downstream
- // mappings. All such mappings are set to the new page, and
- // FaultOnWrite is cleared.
-
- void break_copy_on_write(u64 virtaddr, Page *new_page);
- };
-
- class PageTable {
- public:
- void *toplevel;
- RMapTable rmap_table;
- const bool is_process;
-
- typedef Mem::PTEFlags Flags;
- typedef System::Mem::Region Region;
- typedef System::Mem::RegionWithOffset RegionWithOffset;
-
- PageTable(bool process) : is_process(process)
- {
- }
-
- virtual ~PageTable()
- {
- }
-
- // Region is virtual, offset is physical
- virtual void map(RegionWithOffset region, Flags flags) = 0;
- virtual void unmap(Region region) = 0;
-
- // Sets the flags which are set in mask to their value in flags.
- // Flags not set in mask are untouched.
-
- virtual void set_flags(Region region, Flags flags, Flags mask) = 0;
-
- // Returns the physical address and flags associated with a given
- // virtual address. If flags.Valid is not set, then phys and all
- // other flags are undefined. This function is mainly used for
- // propagating stacked aspace PTEs.
-
- virtual void get_mapping(u64 vaddr, u64 *phys, Flags *flags) = 0;
-
- virtual void get_size(u64 *size) = 0;
-
- // This is called when a PTE is replaced. It handles refcounting,
- // dirty page queueing, and TLB invalidation. vaddr is only
- // valid for process address spaces, so it doesn't need to be
- // 64-bit (except on 64-bit hardware, of course). When it is
- // known that only flags are changing, set no_release so that
- // the page refcount is not decremented.
-
- void kill_pte(ulong vaddr, u64 physaddr, bool dirty, bool valid,
- bool no_release = false);
- };
struct BadPageFault {
};
VirtualAreaTree varea_tree;
Util::List varea_list;
Lock::Lock lock;
- bool is_process;
+
// This defines the start and end of the aspace; mappings outside
// this range may not be done, and will not be returned by
// get_free_region(). For process aspaces, this goes from
void break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys);
bool map(VirtualArea *va, u64 vaddr, PTEFlags reqflags);
+ protected:
+ bool is_process;
+
public:
#include <servers/mem/addrspace/Mem/AddrSpace.h>
ASpaceMappable mappable;
PageTable *page_table;
- AddrSpace(bool process);
- AddrSpace(void *page_table);
+ AddrSpace(PageTable *ptbl = NULL);
// Returns true if the fault was "good"; otherwise, the caller
// should dump regs. exec should only be used if the CPU
void get_page_size(u32 *page_size);
void get_min_align(u32 *min_align);
-
- void get_size(u64 *size)
- {
- page_table->get_size(size);
- }
-
- friend void Arch::set_aspace(AddrSpace *aspace);
+ void get_size(u64 *size);
+
friend class ASpaceMappable;
};
+
+ class ProcAddrSpace : public AddrSpace {
+ public:
+ ProcAddrSpace();
+ ProcAddrSpace(void *page_table);
+
+ ORB::IDSpace idspace;
+ };
extern Factory addr_space_factory, proc_addr_space_factory;
#include <orb.h>
#include <util/list.h>
+#include <util/rbtree.h>
#include <System/Objects.h>
namespace Mem {
- class AddrSpace;
+ class ProcAddrSpace;
};
namespace Threads {
};
namespace ORB {
- typedef ulong ID;
+ typedef u32 ID;
struct CallFrame {
// Address Space and PC to return to
- Mem::AddrSpace *ret_aspace;
+ Mem::ProcAddrSpace *ret_aspace;
ulong ret_pc;
// Caller's PIB Pointer
CallFrame frames[0];
};
+
+ struct ObjectHdr;
+ struct Object;
+
+ typedef Util::RBTree<ObjectHdr, Object *, Object *> IDRMap;
+
+ struct ObjectHdr {
+ IDRMap::Node rbtree_node;
+ u32 id;
+
+ union {
+ struct {
+ u32 Pointer:1;
+ };
+
+ u32 flags;
+ };
+ };
+
+ struct Object : public ObjectHdr {
+ Mem::ProcAddrSpace *aspace;
+ uintptr_t entry;
+ };
+
+ struct ObjectPtr : public ObjectHdr {
+ Object *object;
+ };
+
+ class IDSpace {
+ // Reverse mapping of object pointers to local IDs
+ IDRMap id_rmap;
+
+ public:
+ Object *lookup(u32 id);
+ u32 rlookup(Object *obj);
+ };
}
#endif
-// This is a generic pagetable implementation that most architectures
-// should be able to use as is, though architectures with weird paging
-// hardware can provide their own implementation. It corresponds to
-// mem/pagetable.cc.
-
#ifndef _KERN_PAGETABLE_H
#define _KERN_PAGETABLE_H
#include <kern/mem.h>
-#include <util/lock.h>
-#include <arch/pagetable.h>
+#include <kern/rmap.h>
namespace Mem {
- template<typename PTE>
- class PageTableImpl : public PageTable {
+ class PageTable {
public:
+ void *toplevel;
+ RMapTable rmap_table;
+ const bool is_process;
+
typedef Mem::PTEFlags Flags;
typedef System::Mem::Region Region;
typedef System::Mem::RegionWithOffset RegionWithOffset;
- typedef typename PTE::VirtAddr VirtAddr;
- typedef typename PTE::DirPTE DirPTE;
-
- private:
- // The lock of any page table may nest in the lock of any
- // aspace.
- Lock::Lock lock;
-
- // For non-process aspaces, the number of levels may be more or
- // less than what the hardware provides (in particular, large file
- // mappings on 32-bit targets will need more levels). For process
- // aspaces, num_levels must equal PTE::num_levels. Levels for
- // non-process address spaces can be added dynamically as needed.
- // Non-proc aspaces may also use a different PTE format.
-
- int num_levels;
- int toplevel_shift, lastlevel_shift;
-
- static uint pages_per_table()
+ PageTable(bool process) : is_process(process)
{
- return 1 << PTE::shift_per_level;
}
-
- static uint pages_per_dtable()
+
+ virtual ~PageTable()
{
- return 1 << DirPTE::shift_per_level;
}
- void end_map(RegionWithOffset region, PTE flags, void *table);
-
- void end_unmap(Region region, void *table);
+ // Region is virtual, offset is physical
+ virtual void map(RegionWithOffset region, Flags flags) = 0;
+ virtual void unmap(Region region) = 0;
- void end_set_flags(Region region, PTE flags, PTE mask, void *table);
-
- void rec_map(RegionWithOffset region, PTE flags,
- void *table, int shift);
+ // Sets the flags which are set in mask to their value in flags.
+ // Flags not set in mask are untouched.
- void rec_unmap(Region region, void *table, int shift);
-
- void rec_set_flags(Region region, PTE flags,
- PTE mask, void *table, int shift);
-
- public:
- PageTableImpl(bool process);
- PageTableImpl(void *table);
+ virtual void set_flags(Region region, Flags flags, Flags mask) = 0;
- virtual ~PageTableImpl();
-
- virtual void map(RegionWithOffset region, Flags flags);
- virtual void unmap(Region region);
- virtual void set_flags(Region region, Flags flags, Flags mask);
- virtual void get_mapping(u64 addr, u64 *phys, Flags *flags);
-
- virtual void get_size(u64 *size)
- {
- if (is_process)
- *size = 1ULL << (PTE::num_levels * PTE::shift_per_level);
- else
- *size = 1ULL << (64 - PTE::page_shift);
- }
+ // Returns the physical address and flags associated with a given
+ // virtual address. If flags.Valid is not set, then phys and all
+ // other flags are undefined. This function is mainly used for
+ // propagating stacked aspace PTEs.
+
+ virtual void get_mapping(u64 vaddr, u64 *phys, Flags *flags) = 0;
+
+ virtual void get_size(u64 *size) = 0;
+
+ // This is called when a PTE is replaced. It handles refcounting,
+ // dirty page queueing, and TLB invalidation. vaddr is only
+ // valid for process address spaces, so it doesn't need to be
+ // 64-bit (except on 64-bit hardware, of course). When it is
+ // known that only flags are changing, set no_release so that
+ // the page refcount is not decremented.
+
+ void kill_pte(ulong vaddr, u64 physaddr, bool dirty, bool valid,
+ bool no_release = false);
};
}
--- /dev/null
+// Generic radix tree implementation. It's only kernel-specific because
+// it uses the kernel page allocator; each node takes up one page. The
+// depth of the tree is dynamically growable (but not currently
+// shrinkable).
+
+#ifndef _KERN_RADIX_H
+#define _KERN_RADIX_H
+
+#include <kern/types.h>
+#include <kern/pagealloc.h>
+#include <kern/compiler.h>
+
+namespace Util {
+ // Key must be an integer.
+ template <typename T, typename Key>
+ class RadixTree {
+ void *toplevel;
+ uint depth; // in bits
+
+ enum {
+ // Yuck... C++ doesn't allow function results to be used
+ // in constants, even if the function can be evaluated
+ // at compile time.
+
+ #define RADIX_data_bits (ll_get_order_round_up(sizeof(T)))
+ #define RADIX_node_size (1 << RADIX_data_bits)
+
+ #define RADIX_final_shift (Arch::page_shift - RADIX_data_bits)
+ #define RADIX_entries_per_table (Arch::page_size / RADIX_node_size)
+
+ dir_shift = Arch::page_shift - _LL_LONG_LOGBYTES,
+ entries_per_dir = Arch::page_size / sizeof(void *)
+ };
+
+ static uint key_to_dir_offset(Key key, int shift)
+ {
+ return (key >> shift) & (entries_per_dir - 1);
+ }
+
+ static uint key_to_offset(Key key)
+ {
+ return key & (RADIX_entries_per_table - 1);
+ }
+
+ public:
+ T *lookup(Key key, bool add = false)
+ {
+ int shift = depth;
+ void *node = toplevel;
+ int new_shift = depth == 0 ? RADIX_final_shift : dir_shift;
+
+ while (unlikely(key_to_dir_offset(key, shift + new_shift) != 0)) {
+ if (!add)
+ return NULL;
+
+ toplevel = Mem::alloc_pages(1);
+ bzero(toplevel, Arch::page_size);
+
+ static_cast<void **>(toplevel)[0] = node;
+ node = toplevel;
+
+ shift += new_shift;
+ depth += new_shift;
+ new_shift = dir_shift;
+ }
+
+ while (shift >= RADIX_final_shift) {
+ int off = key_to_dir_offset(key, shift);
+ void *new_node = static_cast<void **>(node)[off];
+
+ if (!new_node) {
+ if (!add)
+ return NULL;
+
+ new_node = Mem::alloc_pages(1);
+ bzero(new_node, Arch::page_size);
+ static_cast<void **>(node)[off] = new_node;
+ }
+
+ shift -= dir_shift;
+ node = new_node;
+ }
+
+ assert(shift == RADIX_final_shift - dir_shift);
+ return static_cast<T *>(node) + key_to_offset(key);
+ }
+
+ RadixTree()
+ {
+ toplevel = Mem::alloc_pages(1);
+ bzero(toplevel, Arch::page_size);
+ depth = 0;
+ }
+ };
+}
+
+#undef RADIX_data_bits
+#undef RADIX_node_size
+#undef RADIX_final_shift
+#undef RADIX_entries_per_table
+
+#endif
--- /dev/null
+#ifndef _KERN_RMAP_H
+#define _KERN_RMAP_H
+
+#include <kern/mem.h>
+#include <kern/radix.h>
+
+namespace Mem {
+ struct RMapNode {
+ u64 vaddr;
+ VirtualArea *va;
+ Util::ListNoAutoInit head, tail;
+ };
+
+ namespace RMap {
+ enum {
+ head_offset = offsetof(RMapNode, head),
+ tail_offset = offsetof(RMapNode, tail),
+ };
+ }
+
+ // This lock protects the rmap chains and rmap tables. It also makes
+ // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
+ // sequence.
+ //
+ // OPT: This lock is acquired on all map/unmap activity; if/when this
+ // turns out to be a significant bottleneck, finer-grained locking can
+ // be used. I decided against doing it now because it would be
+ // somewhat complicated (but I believe do-able) to avoid all races,
+ // and I'd like to move on to implementing other things for now.
+
+ extern Lock::Lock rmap_lock;
+ class Page;
+
+ class RMapTable {
+ Util::RadixTree<RMapNode, u64> tree;
+
+ public:
+ // rmap_lock must be held.
+ static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
+ u64 virtaddr, u64 upstream_vaddr);
+
+ void unmap(u64 virtaddr);
+
+ // Handle a copy-on-write for the specified page and all downstream
+ // mappings. All such mappings are set to the new page, and
+ // FaultOnWrite is cleared.
+
+ void break_copy_on_write(u64 virtaddr, Page *new_page);
+ };
+};
+
+#endif
extern int need_resched;
namespace Mem {
- class AddrSpace;
+ class ProcAddrSpace;
}
namespace Threads {
public:
Util::List threadlist_node;
- Mem::AddrSpace *addr_space, *active_addr_space;
+ Mem::ProcAddrSpace *addr_space, *active_addr_space;
enum {
name_len = 32
// FIXME: temp hack; use a Process later
- void set_aspace(Mem::AddrSpace *aspace);
+ void set_aspace(Mem::ProcAddrSpace *aspace);
};
enum {
#include <kern/mem.h>
#include <kern/paging.h>
+#include <kern/generic-pagetable.h>
#include <kern/pagetable.h>
#include <kern/pagealloc.h>
#include <kern/generic-pte.h>
void create(Object *obj)
{
- *obj = static_cast<IAddrSpace>(*(new AddrSpace(false)));
+ *obj = static_cast<IAddrSpace>(*(new AddrSpace));
}
};
void create(Object *obj)
{
- AddrSpace *as = new AddrSpace(true);
+ AddrSpace *as = new ProcAddrSpace;
Region region;
MapFlags mf = 0;
u64 vstart;
printf("vstart %llx\n", vstart);
as->map(anonmem, region, &vstart, mf);
- *obj = static_cast<IAddrSpace>(*(as));
+ *obj = static_cast<IAddrSpace>(*as);
}
};
AddrSpaceFactory real_addrspace_factory;
Factory addr_space_factory = real_addrspace_factory;
- AddrSpace::AddrSpace(bool process) : mappable(this)
+ AddrSpace::AddrSpace(PageTable *ptbl) : mappable(this)
{
init_iface();
- is_process = process;
+ is_process = false;
+ page_table = ptbl;
- // OPT: Allow optional use of the native PTE for stacked aspaces,
- // either because the native PTE is 64-bit, or because it's an
- // embedded system which does not need 64-bit storage.
-
- if (process)
- page_table = new PageTableImpl<Arch::PTE>(true);
- else
+ if (!ptbl)
page_table = new PageTableImpl<GenPTE>(false);
cached_free_region = Arch::user_start + Arch::page_size;
}
+ ProcAddrSpace::ProcAddrSpace() :
+ AddrSpace(new PageTableImpl<Arch::PTE>(true))
+ {
+ is_process = true;
+ }
+
// This should only be used once during bootup to initialize the
// kernel's address space with a static initial page table.
- AddrSpace::AddrSpace(void *ptbl_toplevel) : mappable(this)
+ ProcAddrSpace::ProcAddrSpace(void *ptbl_toplevel) :
+ AddrSpace(new PageTableImpl<Arch::PTE>(ptbl_toplevel))
{
- init_iface();
+ // FIXME: set cached_free_region to kernel virtual space
is_process = true;
- page_table = new PageTableImpl<Arch::PTE>(ptbl_toplevel);
-
- // FIXME: should be kernel virtual space
- cached_free_region = Arch::user_start + Arch::page_size;
}
void AddrSpace::get_mappable(IMappable *ma)
{
*min_align = Arch::page_mapping_min_align;
}
+
+ void AddrSpace::get_size(u64 *size)
+ {
+ page_table->get_size(size);
+ }
void Mappable::map(VirtualArea *varea)
{
void pagein(u64 vaddr, PTEFlags reqflags)
{
- // Doesn't need to do anything yet, though it may later
- // once high memory support is added.
}
void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
void pagein(u64 vaddr, PTEFlags reqflags)
{
- // Doesn't need to do anything yet, though it may later
- // once high memory support is added.
}
void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
#include <kern/mem.h>
#include <kern/pagealloc.h>
-#include <kern/pagetable.h>
+#include <kern/generic-pagetable.h>
#include <kern/generic-pte.h>
#include <lowlevel/atomic.h>
#include <util/misc.h>
#include <kern/mem.h>
#include <kern/pagealloc.h>
-#include <util/misc.h>
+#include <kern/rmap.h>
+#include <kern/pagetable.h>
namespace Mem {
- using Util::round_up;
- // static uint rmaps_per_page = Arch::page_size / sizeof(RMapNode);
+ static const ulong rmap_node_len =
+ 1 << ll_get_order_round_up(sizeof(RMapNode));
- // If RMapNode's length becomes something other than 8 longs,
- // change "3" to the base-2 log of the number of longs.
-
- static int rmap_shift = Arch::page_shift - _LL_LONG_LOGBYTES - 3;
-
- // static int rmap_dirs_per_page = Arch::page_size / sizeof(RMapNode *);
- static int rmap_dir_shift = Arch::page_shift - _LL_LONG_LOGBYTES;
- static int rmap_lastlevel_shift = rmap_shift + Arch::page_shift;
-
- static int rmap_dir_levels = (64 - rmap_lastlevel_shift - 1)
- / rmap_dir_shift;
-
- static int rmap_toplevel_shift = rmap_dir_shift * rmap_dir_levels
- + rmap_lastlevel_shift;
-
- static inline u64 addr_to_dir_offset(u64 addr, int shift)
- {
- return (addr >> shift) & ((1ULL << rmap_dir_shift) - 1);
- }
-
- static inline u64 addr_to_offset(u64 addr)
- {
- return (addr >> Arch::page_shift) & ((1ULL << rmap_shift) - 1);
- }
-
- RMapTable::RMapTable()
- {
- // All RMap tables must have at least one dir level, in order to
- // simplify the code. If it turns out that a lot of memory is
- // wasted due to this, the code could be made more complex in order
- // to allow one-level rmap tables. Currently, on 4KiB-page systems,
- // a page is wasted per under-512KiB aspace (32-bit) or under-256KiB
- // aspace (64-bit).
- //
- // Dynamic levels would have to be implemented in generic-pte for
- // the wastage here to be meaningful.
-
- toplevel_shift = rmap_lastlevel_shift;
- toplevel = Mem::alloc_pages(1);
- bzero(toplevel, Arch::page_size);
- }
-
- RMapNode *RMapTable::get_rmap(u64 virtaddr, bool add)
- {
- assert(rmap_lock.held_by_curthread());
- int shift = toplevel_shift;
- void *table = toplevel;
-
- while (toplevel_shift < rmap_toplevel_shift &&
- (virtaddr >> (toplevel_shift + rmap_dir_shift)))
- {
- if (!add)
- return NULL;
-
- shift += rmap_dir_shift;
- toplevel_shift += rmap_dir_shift;
-
- toplevel = Mem::alloc_pages(1);
- bzero(toplevel, Arch::page_size);
-
- static_cast<void **>(toplevel)[0] = table;
- table = toplevel;
- }
-
- while (shift >= rmap_lastlevel_shift) {
- int off = addr_to_dir_offset(virtaddr, shift);
- void *new_table = static_cast<void **>(table)[off];
-
- if (!new_table) {
- new_table = Mem::alloc_pages(1);
- bzero(new_table, Arch::page_size);
- static_cast<void **>(table)[off] = new_table;
- }
-
- table = new_table;
- shift -= rmap_dir_shift;
- }
-
- assert(shift + rmap_dir_shift - rmap_shift == Arch::page_shift);
-
- int off = addr_to_offset(virtaddr);
- return &static_cast<RMapNode *>(table)[off];
- }
-
void RMapTable::map(VirtualArea *dsva, PageTable *usptbl,
u64 dsvaddr, u64 usvaddr)
{
RMapNode *dsrmap = dsva->aspace->page_table->
- rmap_table.get_rmap(dsvaddr, true);
+ rmap_table.tree.lookup(dsvaddr, true);
assert(!dsrmap->va);
dsrmap->va = dsva;
dsrmap->tail.init();
if (usptbl) {
- RMapNode *usrmap = usptbl->rmap_table.get_rmap(usvaddr);
+ RMapNode *usrmap = usptbl->rmap_table.tree.lookup(usvaddr);
assert(usrmap);
assert(usrmap->va->aspace->page_table == usptbl);
void RMapTable::unmap(u64 virtaddr)
{
Lock::AutoLock autolock(rmap_lock);
- RMapNode *head = get_rmap(virtaddr);
+ RMapNode *head = tree.lookup(virtaddr);
if (!head || !head->va)
return;
Util::ListNoAutoInit *node = &head->head, *oldnode;
do {
- ulong off = reinterpret_cast<ulong>(node) & (sizeof(RMapNode) - 1);
- if (off == RMapNode::head_offset) {
+ ulong off = reinterpret_cast<ulong>(node) & (rmap_node_len - 1);
+ if (off == RMap::head_offset) {
RMapNode *rmap = node->listentry(RMapNode, head);
Region region = { rmap->vaddr,
rmap->va->aspace->page_table->unmap(region);
rmap->va = NULL;
} else {
- assert(off == RMapNode::tail_offset);
+ assert(off == RMap::tail_offset);
}
oldnode = node;
void RMapTable::break_copy_on_write(u64 virtaddr, Page *new_page)
{
assert(rmap_lock.held_by_curthread());
- RMapNode *head = get_rmap(virtaddr);
+ RMapNode *head = tree.lookup(virtaddr);
RMapNode *still_cow = NULL;
assert(head && head->va);
Util::ListNoAutoInit *node = &head->head;
do {
- ulong off = reinterpret_cast<ulong>(node) & (sizeof(RMapNode) - 1);
- if (off == RMapNode::head_offset) {
+ ulong off = reinterpret_cast<ulong>(node) & (rmap_node_len - 1);
+ if (off == RMap::head_offset) {
RMapNode *rmap = node->listentry(RMapNode, head);
RegionWithOffset region;
rmap->va->aspace->page_table->map(region, flags);
} else {
- assert(off == RMapNode::tail_offset);
+ assert(off == RMap::tail_offset);
if (still_cow) {
RMapNode *rmap = node->listentry(RMapNode, tail);
return &hdr->frames[0];
}
- return &hdr->frames[thread->orbstack_top += 1];
+ return &hdr->frames[++thread->orbstack_top];
+ }
+
+ u32 IDSpace::rlookup(Object *obj)
+ {
+#if 0
+ ObjectHdr *hdr = idtree.find(obj);
+
+ if (!hdr)
+ return 0;
+
+ return hdr->id;
+#endif
+ return 0;
}
}
-extern "C" void invoke_method(ulong objid, ulong methid,
- ParamInfoBlock *user_pib, ulong ret_pc)
+extern "C" void invoke_method(u32 objid, u32 methid, ParamInfoBlock *user_pib,
+ ulong ret_pc)
{
ParamInfoBlock pib = Arch::copyin(user_pib);
CallFrame *frame = new_frame(curthread);
frame->caller_user_pib = user_pib;
frame->ret_pc = ret_pc;
- printf("invoke_method: frame %p object %lx method %lx pib %p ret %lx\n",
+ printf("invoke_method: frame %p object %x method %x pib %p ret %lx\n",
frame, frame->object, frame->method, frame->caller_user_pib,
frame->ret_pc);
printf("aspace %p created\n", (void *)aspace);
- AddrSpace *kaspace = AddrSpace::classptr(aspace);
+ ProcAddrSpace *kaspace = static_cast<ProcAddrSpace *>(AddrSpace::classptr(aspace));
printf("kaspace %p\n", kaspace);