// other flags are undefined, and pagein() should be retried.
// rmap_lock must be held.
- virtual void get_entry(u64 vaddr, u64 *phys, PTEFlags *flags) = 0;
+ virtual void get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags) = 0;
#include <servers/mem/addrspace/Mem/Mappable.h>
};
// This lock protects the rmap chains and rmap tables. It also makes
- // atomic the PageTable::get_entry, RMapTable::map, PageTable::map
+ // atomic the PageTable::get_mapping, RMapTable::map, PageTable::map
// sequence.
//
// OPT: This lock is acquired on all map/unmap activity; if/when this
// other flags are undefined. This function is mainly used for
// propagating stacked aspace PTEs.
- virtual void get_entry(u64 vaddr, u64 *phys, Flags *flags) = 0;
+ virtual void get_mapping(u64 vaddr, u64 *phys, Flags *flags) = 0;
virtual void get_size(u64 *size) = 0;
// Unexported
virtual void pagein(u64 vaddr, PTEFlags reqflags);
- virtual void get_entry(u64 vaddr, u64 *phys, PTEFlags *flags);
+ virtual void get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags);
friend class AddrSpace;
};
virtual void map(RegionWithOffset region, Flags flags);
virtual void unmap(Region region);
virtual void set_flags(Region region, Flags flags, Flags mask);
- virtual void get_entry(u64 addr, u64 *phys, Flags *flags);
+ virtual void get_mapping(u64 addr, u64 *phys, Flags *flags);
virtual void get_size(u64 *size)
{
new_page->release();
}
- void ASpaceMappable::get_entry(u64 vaddr, u64 *phys, PTEFlags *flags)
+ void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
{
- aspace->page_table->get_entry(vaddr, phys, flags);
+ aspace->page_table->get_mapping(vaddr, phys, flags);
}
bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
u64 phys;
PTEFlags flags;
- va->ma->get_entry(vaddr + va->offset, &phys, &flags);
+ va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
PTEFlags newflags = flags & va->flags;
newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
u64 oldphys;
PTEFlags oldflags;
- page_table->get_entry(vaddr, &oldphys, &oldflags);
+ page_table->get_mapping(vaddr, &oldphys, &oldflags);
if (oldflags.Valid &&
!(reqflags.Writeable && oldflags.FaultOnWrite))
// once high memory support is added.
}
- void get_entry(u64 addr, u64 *phys, PTEFlags *flags)
+ void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
{
*phys = addr;
*flags = 0;
PhysMem real_physmem;
IMappable physmem = real_physmem;
+
+ class AnonMem : public Mappable {
+ public:
+ void get_size(u64 *size)
+ {
+ if (sizeof(long) == 8)
+ *size = 1ULL << (64 - Arch::page_shift);
+ else
+ *size = 1ULL << (32 - Arch::page_shift);
+ }
+
+ void pagein(u64 vaddr, PTEFlags reqflags)
+ {
+ // Doesn't need to do anything yet, though it may later
+ // once high memory support is added.
+ }
+
+ void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
+ {
+ Page *page = PageAlloc::alloc(1);
+
+ // OPT: Only zero if it was asked for.
+ // OPT: Eventually, have separate pagelists for zeroed and
+ // unzeroed memory, and a low-priority background thread
+ // that zeroes pages and moves them to the zeroed list.
+ bzero(page_to_kvirt(page), Arch::page_size);
+
+ *phys = page_to_phys(page);
+ *flags = 0;
+ flags->Valid = 1;
+ flags->Readable = 1;
+ flags->Writeable = 1;
+ flags->Executable = 1;
+ flags->User = 1;
+ }
+ };
+
+ AnonMem real_anonmem;
+ IMappable anonmem = real_anonmem;
}
#include <servers/mem/addrspace/footer.cc>