new_page->release();
}
- void ASpaceMappable::get_entry(u64 vaddr, u64 *phys, PTEFlags *flags)
+ void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
{
- aspace->page_table->get_entry(vaddr, phys, flags);
+ aspace->page_table->get_mapping(vaddr, phys, flags);
}
bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
u64 phys;
PTEFlags flags;
- va->ma->get_entry(vaddr + va->offset, &phys, &flags);
+ va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
PTEFlags newflags = flags & va->flags;
newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
u64 oldphys;
PTEFlags oldflags;
- page_table->get_entry(vaddr, &oldphys, &oldflags);
+ page_table->get_mapping(vaddr, &oldphys, &oldflags);
if (oldflags.Valid &&
!(reqflags.Writeable && oldflags.FaultOnWrite))
// once high memory support is added.
}
- void get_entry(u64 addr, u64 *phys, PTEFlags *flags)
+ void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
{
*phys = addr;
*flags = 0;
PhysMem real_physmem;
IMappable physmem = real_physmem;
+
+ class AnonMem : public Mappable {
+ public:
+ void get_size(u64 *size)
+ {
+ if (sizeof(long) == 8)
+ *size = 1ULL << (64 - Arch::page_shift);
+ else
+ *size = 1ULL << (32 - Arch::page_shift);
+ }
+
+ void pagein(u64 vaddr, PTEFlags reqflags)
+ {
+ // Doesn't need to do anything yet, though it may later
+ // once high memory support is added.
+ }
+
+ void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
+ {
+ Page *page = PageAlloc::alloc(1);
+
+ // OPT: Only zero if it was asked for.
+ // OPT: Eventually, have separate pagelists for zeroed and
+ // unzeroed memory, and a low-priority background thread
+ // that zeroes pages and moves them to the zeroed list.
+ bzero(page_to_kvirt(page), Arch::page_size);
+
+ *phys = page_to_phys(page);
+ *flags = 0;
+ flags->Valid = 1;
+ flags->Readable = 1;
+ flags->Writeable = 1;
+ flags->Executable = 1;
+ flags->User = 1;
+ }
+ };
+
+ AnonMem real_anonmem;
+ IMappable anonmem = real_anonmem;
}
#include <servers/mem/addrspace/footer.cc>