#include <System/Mem.h>
#include <kern/kernel.h>
+
#include <arch/mem.h>
#include <arch/addrs.h>
using System::Mem::AccessFlags;
union PTEFlags {
+ enum {
+ valid = 0x001,
+ writeable = 0x002,
+ readable = 0x004,
+ executable = 0x008,
+ user = 0x010,
+ accessed = 0x020,
+ dirty = 0x040,
+ uncached = 0x080,
+ faultonwrite = 0x100,
+ addressonly = 0x200,
+ protectedmap = 0x400,
+ };
+
struct {
// This must be kept in sync with include/kern/generic-pte.h
ulong User:1;
ulong Accessed:1;
ulong Dirty:1;
+ ulong Uncached:1;
// If set, then on a write access, the page is copied and this
// address space gets the new, anonymous version. The rmap list
ulong FaultOnWrite:1;
+ // The address itself is being mapped, not the page located
+ // there. Do not manipulate page reference counts. This bit
+ // does not get propagated during copy-on-write.
+
+ ulong AddressOnly:1;
+
// VArea Only:
// Do not allow the user to unmap or modify flags.
// Used for the shared user/kernel mappings.
ulong Protected:1;
#elif defined(BITFIELD_BE)
- ulong pad:_LL_LONG_BYTES * 8 - 9;
+ ulong pad:_LL_LONG_BYTES * 8 - 11;
ulong Protected:1;
+ ulong AddressOnly:1;
ulong FaultOnWrite:1;
+ ulong Uncached:1;
ulong Dirty:1;
ulong Accessed:1;
ulong User:1;
}
};
- // If the padded size of this changes, update rmap_shift.
- // and the alignment check in RMapTable::unmap.
-
- // If the layout of this changes, update the offsets below.
- union RMapNode {
- struct {
- u64 vaddr;
- VirtualArea *va;
- Util::ListNoAutoInit head, tail;
- };
-
- long pad[8];
-
- enum {
- head_offset = sizeof(u64) + sizeof(void *),
- tail_offset = head_offset + sizeof(void *) * 2,
- };
- };
-
- // This lock protects the rmap chains and rmap tables. It also makes
- // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
- // sequence.
- //
- // OPT: This lock is acquired on all map/unmap activity; if/when this
- // turns out to be a significant bottleneck, finer-grained locking can
- // be used. I decided against doing it now because it would be
- // somewhat complicated (but I believe do-able) to avoid all races,
- // and I'd like to move on to implementing other things for now.
-
- extern Lock::Lock rmap_lock;
- class Page;
-
- class RMapTable {
- void *toplevel;
- int toplevel_shift;
-
- RMapNode *get_rmap(u64 virtaddr, bool add = false);
-
- public:
- RMapTable();
-
- // rmap_lock must be held.
- static void map(VirtualArea *downstream_va, PageTable *upstream_ptbl,
- u64 virtaddr, u64 upstream_vaddr);
-
- void unmap(u64 virtaddr);
-
- // Handle a copy-on-write for the specified page and all downstream
- // mappings. All such mappings are set to the new page, and
- // FaultOnWrite is cleared.
-
- void break_copy_on_write(u64 virtaddr, Page *new_page);
- };
- class PageTable {
- public:
- void *toplevel;
- RMapTable rmap_table;
- const bool is_process;
-
- typedef Mem::PTEFlags Flags;
- typedef System::Mem::Region Region;
- typedef System::Mem::RegionWithOffset RegionWithOffset;
+ struct BadPageFault {
+ MemoryFault_ns::Cause cause;
- PageTable(bool process) : is_process(process)
- {
- }
-
- virtual ~PageTable()
+ BadPageFault(MemoryFault_ns::Cause CAUSE) : cause(CAUSE)
{
}
-
- // Region is virtual, offset is physical
- virtual void map(RegionWithOffset region, Flags flags) = 0;
- virtual void unmap(Region region) = 0;
-
- // Sets the flags which are set in mask to their value in flags.
- // Flags not set in mask are untouched.
-
- virtual void set_flags(Region region, Flags flags, Flags mask) = 0;
-
- // Returns the physical address and flags associated with a given
- // virtual address. If flags.Valid is not set, then phys and all
- // other flags are undefined. This function is mainly used for
- // propagating stacked aspace PTEs.
-
- virtual void get_mapping(u64 vaddr, u64 *phys, Flags *flags) = 0;
-
- virtual void get_size(u64 *size) = 0;
-
- // This is called when a PTE is replaced. It handles refcounting,
- // dirty page queueing, and TLB invalidation. vaddr is only
- // valid for process address spaces, so it doesn't need to be
- // 64-bit (except on 64-bit hardware, of course). When it is
- // known that only flags are changing, set no_release so that
- // the page refcount is not decremented.
-
- void kill_pte(ulong vaddr, u64 physaddr, bool dirty, bool valid,
- bool no_release = false);
- };
-
- struct BadPageFault {
};
class ASpaceMappable : public Mappable {
VirtualAreaTree varea_tree;
Util::List varea_list;
Lock::Lock lock;
- bool is_process;
+
// This defines the start and end of the aspace; mappings outside
// this range may not be done, and will not be returned by
// get_free_region(). For process aspaces, this goes from
void break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys);
bool map(VirtualArea *va, u64 vaddr, PTEFlags reqflags);
+ protected:
+ bool is_process;
+
public:
#include <servers/mem/addrspace/Mem/AddrSpace.h>
ASpaceMappable mappable;
PageTable *page_table;
- AddrSpace(bool process);
- AddrSpace(void *page_table);
+ AddrSpace(PageTable *ptbl = NULL);
- // Returns true if the fault was "good"; otherwise, the caller
- // should dump regs. exec should only be used if the CPU
- // implements per-page exec protection; otherwise, treat it
- // as a read.
+ // Returns negative if the fault was "good"; otherwise, a fault
+ // code corresponding to MemoryFault.Cause is returned. exec
+ // should only be set if the CPU implements per-page exec
+ // protection; otherwise, treat it as a read.
- bool handle_fault(ulong addr, bool write, bool exec, bool user);
+ int handle_fault(ulong addr, bool write, bool exec, bool user);
void get_mappable(IMappable *ma);
void clone(IAddrSpace *addrspace, u8 clone_is_real);
- enum {
- map_user,
- map_protected,
- map_kernel
- };
-
void map(IMappable ma, Region region, u64 *vstart, MapFlags mflags,
- int map_type = map_user);
+ PTEFlags set = 0, PTEFlags clear = 0);
void unmap(Region region, bool from_kernel = false);
void set_mapflags(Region region, MapFlags mflags);
void get_page_size(u32 *page_size);
void get_min_align(u32 *min_align);
-
- void get_size(u64 *size)
- {
- page_table->get_size(size);
- }
-
- friend void Arch::set_aspace(AddrSpace *aspace);
+ void get_size(u64 *size);
+
friend class ASpaceMappable;
};
-
+
extern Factory addr_space_factory, proc_addr_space_factory;
- using ::System::RunTime::orbmm;
-
static inline bool page_aligned(u64 addr)
{
return !(addr & (u64)(Arch::page_size - 1));