X-Git-Url: http://git.buserror.net/cgi-bin/gitweb.cgi?p=polintos%2Fscott%2Fpriv.git;a=blobdiff_plain;f=kernel%2Finclude%2Fkern%2Fmem.h;h=a143dfb7dcaa209f23085e13bf72f914697db246;hp=be628b4ccbb204adca4b0c63e1c764e44205b353;hb=1ac390fe1e18444008857b056c947710be9621a8;hpb=166dbb2f95c17d80568102b9f063c472e30d706b diff --git a/kernel/include/kern/mem.h b/kernel/include/kern/mem.h index be628b4..a143dfb 100644 --- a/kernel/include/kern/mem.h +++ b/kernel/include/kern/mem.h @@ -4,6 +4,7 @@ #include #include + #include #include @@ -43,6 +44,20 @@ namespace Mem { using System::Mem::AccessFlags; union PTEFlags { + enum { + valid = 0x001, + writeable = 0x002, + readable = 0x004, + executable = 0x008, + user = 0x010, + accessed = 0x020, + dirty = 0x040, + uncached = 0x080, + faultonwrite = 0x100, + addressonly = 0x200, + protectedmap = 0x400, + }; + struct { // This must be kept in sync with include/kern/generic-pte.h @@ -57,6 +72,7 @@ namespace Mem { ulong User:1; ulong Accessed:1; ulong Dirty:1; + ulong Uncached:1; // If set, then on a write access, the page is copied and this // address space gets the new, anonymous version. The rmap list @@ -69,6 +85,12 @@ namespace Mem { ulong FaultOnWrite:1; + // The address itself is being mapped, not the page located + // there. Do not manipulate page reference counts. This bit + // does not get propagated during copy-on-write. + + ulong AddressOnly:1; + // VArea Only: // Do not allow the user to unmap or modify flags. // Used for the shared user/kernel mappings. @@ -76,9 +98,11 @@ namespace Mem { ulong Protected:1; #elif defined(BITFIELD_BE) - ulong pad:_LL_LONG_BYTES * 8 - 9; + ulong pad:_LL_LONG_BYTES * 8 - 11; ulong Protected:1; + ulong AddressOnly:1; ulong FaultOnWrite:1; + ulong Uncached:1; ulong Dirty:1; ulong Accessed:1; ulong User:1; @@ -198,108 +222,13 @@ namespace Mem { } }; - // If the padded size of this changes, update rmap_shift. - // and the alignment check in RMapTable::unmap. - - // If the layout of this changes, update the offsets below. - union RMapNode { - struct { - u64 vaddr; - VirtualArea *va; - Util::ListNoAutoInit head, tail; - }; - - long pad[8]; - - enum { - head_offset = sizeof(u64) + sizeof(void *), - tail_offset = head_offset + sizeof(void *) * 2, - }; - }; - - // This lock protects the rmap chains and rmap tables. It also makes - // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map - // sequence. - // - // OPT: This lock is acquired on all map/unmap activity; if/when this - // turns out to be a significant bottleneck, finer-grained locking can - // be used. I decided against doing it now because it would be - // somewhat complicated (but I believe do-able) to avoid all races, - // and I'd like to move on to implementing other things for now. - - extern Lock::Lock rmap_lock; - class Page; - - class RMapTable { - void *toplevel; - int toplevel_shift; - - RMapNode *get_rmap(u64 virtaddr, bool add = false); - - public: - RMapTable(); - - // rmap_lock must be held. - static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl, - u64 virtaddr, u64 upstream_vaddr); - - void unmap(u64 virtaddr); - - // Handle a copy-on-write for the specified page and all downstream - // mappings. All such mappings are set to the new page, and - // FaultOnWrite is cleared. - - void break_copy_on_write(u64 virtaddr, Page *new_page); - }; - class PageTable { - public: - void *toplevel; - RMapTable rmap_table; - const bool is_process; - - typedef Mem::PTEFlags Flags; - typedef System::Mem::Region Region; - typedef System::Mem::RegionWithOffset RegionWithOffset; + struct BadPageFault { + MemoryFault_ns::Cause cause; - PageTable(bool process) : is_process(process) + BadPageFault(MemoryFault_ns::Cause CAUSE) : cause(CAUSE) { } - - virtual ~PageTable() - { - } - - // Region is virtual, offset is physical - virtual void map(RegionWithOffset region, Flags flags) = 0; - virtual void unmap(Region region) = 0; - - // Sets the flags which are set in mask to their value in flags. - // Flags not set in mask are untouched. - - virtual void set_flags(Region region, Flags flags, Flags mask) = 0; - - // Returns the physical address and flags associated with a given - // virtual address. If flags.Valid is not set, then phys and all - // other flags are undefined. This function is mainly used for - // propagating stacked aspace PTEs. - - virtual void get_mapping(u64 vaddr, u64 *phys, Flags *flags) = 0; - - virtual void get_size(u64 *size) = 0; - - // This is called when a PTE is replaced. It handles refcounting, - // dirty page queueing, and TLB invalidation. vaddr is only - // valid for process address spaces, so it doesn't need to be - // 64-bit (except on 64-bit hardware, of course). When it is - // known that only flags are changing, set no_release so that - // the page refcount is not decremented. - - void kill_pte(ulong vaddr, u64 physaddr, bool dirty, bool valid, - bool no_release = false); - }; - - struct BadPageFault { }; class ASpaceMappable : public Mappable { @@ -339,7 +268,7 @@ namespace Mem { VirtualAreaTree varea_tree; Util::List varea_list; Lock::Lock lock; - bool is_process; + // This defines the start and end of the aspace; mappings outside // this range may not be done, and will not be returned by // get_free_region(). For process aspaces, this goes from @@ -381,33 +310,29 @@ namespace Mem { void break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys); bool map(VirtualArea *va, u64 vaddr, PTEFlags reqflags); + protected: + bool is_process; + public: #include ASpaceMappable mappable; PageTable *page_table; - AddrSpace(bool process); - AddrSpace(void *page_table); + AddrSpace(PageTable *ptbl = NULL); - // Returns true if the fault was "good"; otherwise, the caller - // should dump regs. exec should only be used if the CPU - // implements per-page exec protection; otherwise, treat it - // as a read. + // Returns negative if the fault was "good"; otherwise, a fault + // code corresponding to MemoryFault.Cause is returned. exec + // should only be set if the CPU implements per-page exec + // protection; otherwise, treat it as a read. - bool handle_fault(ulong addr, bool write, bool exec, bool user); + int handle_fault(ulong addr, bool write, bool exec, bool user); void get_mappable(IMappable *ma); void clone(IAddrSpace *addrspace, u8 clone_is_real); - enum { - map_user, - map_protected, - map_kernel - }; - void map(IMappable ma, Region region, u64 *vstart, MapFlags mflags, - int map_type = map_user); + PTEFlags set = 0, PTEFlags clear = 0); void unmap(Region region, bool from_kernel = false); void set_mapflags(Region region, MapFlags mflags); @@ -416,16 +341,11 @@ namespace Mem { void get_page_size(u32 *page_size); void get_min_align(u32 *min_align); - - void get_size(u64 *size) - { - page_table->get_size(size); - } - - friend void Arch::set_aspace(AddrSpace *aspace); + void get_size(u64 *size); + friend class ASpaceMappable; }; - + extern Factory addr_space_factory, proc_addr_space_factory; using ::System::RunTime::orbmm;