// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
-// so, subject to the following conditions:
+// so, subject to the following condition:
//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimers.
-//
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimers in the
-// documentation and/or other materials provided with the distribution.
-//
-// * The names of the Software's authors and/or contributors
-// may not be used to endorse or promote products derived from
-// this Software without specific prior written permission.
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
extern int rwshared_start, rwshared_page_end;
namespace Mem {
- extern IMappable physmem;
+ extern IMappable physmem, anonmem;
class AddrSpaceFactory {
public:
mf.access_IDLNS_Read = 1;
mf.access_IDLNS_Exec = 1;
- as->map(physmem, region, &vstart, mf,
- true, AddrSpace::map_protected);
+ as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
region.start = kvirt_to_phys(&rwshared_start);
region.end = kvirt_to_phys(&rwshared_page_end);
mf.access_IDLNS_Write = 1;
mf.CopyOnWrite = 1;
- as->map(physmem, region, &vstart, mf,
- true, AddrSpace::map_protected);
+ as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
- AllocFlags af = 0;
- vstart = Arch::stack_bottom;
- as->alloc_and_map(Arch::stack_top - vstart + 1, &vstart, af, mf);
+ // Leave the stack no-exec by default.
+ region.start = vstart = Arch::stack_bottom;
+ region.end = Arch::stack_top;
+ mf.CopyOnWrite = 0;
+ printf("vstart %llx\n", vstart);
+ as->map(anonmem, region, &vstart, mf);
*obj = static_cast<IAddrSpace>(*(as));
}
*addrspace = NULL;
}
- void AddrSpace::alloc_and_map(u64 len, u64 *vstart,
- AllocFlags aflags, MapFlags mflags)
- {
- // FIXME: implement
- }
-
bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
{
if (lock.held_by_curthread())
new_page->release();
}
- void ASpaceMappable::get_entry(u64 vaddr, u64 *phys, PTEFlags *flags)
+ void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
{
- aspace->page_table->get_entry(vaddr, phys, flags);
+ aspace->page_table->get_mapping(vaddr, phys, flags);
}
bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
u64 phys;
PTEFlags flags;
- va->ma->get_entry(vaddr + va->offset, &phys, &flags);
+ va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
PTEFlags newflags = flags & va->flags;
newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
u64 oldphys;
PTEFlags oldflags;
- page_table->get_entry(vaddr, &oldphys, &oldflags);
+ page_table->get_mapping(vaddr, &oldphys, &oldflags);
if (oldflags.Valid &&
!(reqflags.Writeable && oldflags.FaultOnWrite))
// gets its copy-on-write broken.
assert((oldflags & reqflags) == reqflags);
+ assert(!va->flags.FaultOnWrite || oldphys == phys);
return true;
}
}
void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
- MapFlags mflags, bool from_kernel, int map_type)
+ MapFlags mflags, int map_type)
{
// FIXME: check alignment for VIPT caches
// FIXME: Implement the "Replace" map flag
if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
vregion.start = *vstart;
- vregion.end = vregion.start + region.end - region.start + 1;
+ vregion.end = vregion.start + region.end - region.start;
if (is_process) {
if (!valid_addr(vregion.start))
}
if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
- if (fixed)
+ if (fixed)
throw_idl(ResourceBusy, 2, countarray("varea overlap"));
if (!get_free_region(region.end - region.start + 1, vregion, prev))
// once high memory support is added.
}
- void get_entry(u64 addr, u64 *phys, PTEFlags *flags)
+ void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
{
*phys = addr;
*flags = 0;
PhysMem real_physmem;
IMappable physmem = real_physmem;
+
+ class AnonMem : public Mappable {
+ public:
+ void get_size(u64 *size)
+ {
+ if (sizeof(long) == 8)
+ *size = 1ULL << (64 - Arch::page_shift);
+ else
+ *size = 1ULL << (32 - Arch::page_shift);
+ }
+
+ void pagein(u64 vaddr, PTEFlags reqflags)
+ {
+ // Doesn't need to do anything yet, though it may later
+ // once high memory support is added.
+ }
+
+ void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
+ {
+ Page *page = PageAlloc::alloc(1);
+
+ // OPT: Only zero if it was asked for.
+ // OPT: Eventually, have separate pagelists for zeroed and
+ // unzeroed memory, and a low-priority background thread
+ // that zeroes pages and moves them to the zeroed list.
+ bzero(page_to_kvirt(page), Arch::page_size);
+
+ *phys = page_to_phys(page);
+ *flags = 0;
+ flags->Valid = 1;
+ flags->Readable = 1;
+ flags->Writeable = 1;
+ flags->Executable = 1;
+ flags->User = 1;
+ }
+ };
+
+ AnonMem real_anonmem;
+ IMappable anonmem = real_anonmem;
}
#include <servers/mem/addrspace/footer.cc>