]> git.buserror.net Git - polintos/scott/priv.git/blobdiff - kernel/mem/addrspace.cc
Remove Mem.AddrSpace.alloc_and_map(); a Mappable that allocates
[polintos/scott/priv.git] / kernel / mem / addrspace.cc
index 324421f7f2b1f7fd1be271fb7f6c210c662cdb29..f014c5458c621d97b08a5efe3787d342852dd533 100644 (file)
@@ -46,7 +46,7 @@ extern int roshared_start, roshared_page_end;
 extern int rwshared_start, rwshared_page_end;
 
 namespace Mem {
-       extern IMappable physmem;
+       extern IMappable physmem, anonmem;
 
        class AddrSpaceFactory {
        public:
@@ -86,8 +86,7 @@ namespace Mem {
                        mf.access_IDLNS_Read = 1;
                        mf.access_IDLNS_Exec = 1;
                        
-                       as->map(physmem, region, &vstart, mf,
-                               true, AddrSpace::map_protected);
+                       as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
                        
                        region.start = kvirt_to_phys(&rwshared_start);
                        region.end = kvirt_to_phys(&rwshared_page_end);
@@ -96,12 +95,14 @@ namespace Mem {
                        mf.access_IDLNS_Write = 1;
                        mf.CopyOnWrite = 1;
                        
-                       as->map(physmem, region, &vstart, mf,
-                               true, AddrSpace::map_protected);
+                       as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
                        
-                       AllocFlags af = 0;
-                       vstart = Arch::stack_bottom;
-                       as->alloc_and_map(Arch::stack_top - vstart + 1, &vstart, af, mf);
+                       // Leave the stack no-exec by default.
+                       region.start = vstart = Arch::stack_bottom;
+                       region.end = Arch::stack_top;
+                       mf.CopyOnWrite = 0;
+                       printf("vstart %llx\n", vstart);
+                       as->map(anonmem, region, &vstart, mf);
                        
                        *obj = static_cast<IAddrSpace>(*(as));
                }
@@ -154,12 +155,6 @@ namespace Mem {
                *addrspace = NULL;
        }
        
-       void AddrSpace::alloc_and_map(u64 len, u64 *vstart,
-                                     AllocFlags aflags, MapFlags mflags)
-       {
-               // FIXME: implement
-       }
-       
        bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
        {
                if (lock.held_by_curthread())
@@ -499,9 +494,9 @@ namespace Mem {
                new_page->release();
        }
 
-       void ASpaceMappable::get_entry(u64 vaddr, u64 *phys, PTEFlags *flags)
+       void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
        {
-               aspace->page_table->get_entry(vaddr, phys, flags);
+               aspace->page_table->get_mapping(vaddr, phys, flags);
        }
        
        bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
@@ -511,7 +506,7 @@ namespace Mem {
                        
                u64 phys;
                PTEFlags flags;
-               va->ma->get_entry(vaddr + va->offset, &phys, &flags);
+               va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
 
                PTEFlags newflags = flags & va->flags;
                newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
@@ -526,7 +521,7 @@ namespace Mem {
 
                u64 oldphys;
                PTEFlags oldflags;
-               page_table->get_entry(vaddr, &oldphys, &oldflags);
+               page_table->get_mapping(vaddr, &oldphys, &oldflags);
                
                if (oldflags.Valid &&
                    !(reqflags.Writeable && oldflags.FaultOnWrite))
@@ -542,6 +537,7 @@ namespace Mem {
                        // gets its copy-on-write broken.
 
                        assert((oldflags & reqflags) == reqflags);
+                       assert(!va->flags.FaultOnWrite || oldphys == phys);
                        return true;
                }
 
@@ -580,7 +576,7 @@ namespace Mem {
        }
        
        void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
-                           MapFlags mflags, bool from_kernel, int map_type)
+                           MapFlags mflags, int map_type)
        {
                // FIXME: check alignment for VIPT caches
                // FIXME: Implement the "Replace" map flag
@@ -614,7 +610,7 @@ namespace Mem {
                
                if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
                        vregion.start = *vstart;
-                       vregion.end = vregion.start + region.end - region.start + 1;
+                       vregion.end = vregion.start + region.end - region.start;
                
                        if (is_process) {
                                if (!valid_addr(vregion.start))
@@ -631,7 +627,7 @@ namespace Mem {
                }
                
                if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
-                       if (fixed) 
+                       if (fixed)
                                throw_idl(ResourceBusy, 2, countarray("varea overlap"));
                        
                        if (!get_free_region(region.end - region.start + 1, vregion, prev))
@@ -826,7 +822,7 @@ namespace Mem {
                        // once high memory support is added.
                }
                
-               void get_entry(u64 addr, u64 *phys, PTEFlags *flags)
+               void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
                {
                        *phys = addr;
                        *flags = 0;
@@ -840,6 +836,45 @@ namespace Mem {
        
        PhysMem real_physmem;
        IMappable physmem = real_physmem;
+
+       class AnonMem : public Mappable {
+       public:
+               void get_size(u64 *size)
+               {
+                       if (sizeof(long) == 8)
+                               *size = 1ULL << (64 - Arch::page_shift);
+                       else
+                               *size = 1ULL << (32 - Arch::page_shift);
+               }
+       
+               void pagein(u64 vaddr, PTEFlags reqflags)
+               {
+                       // Doesn't need to do anything yet, though it may later
+                       // once high memory support is added.
+               }
+               
+               void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
+               {
+                       Page *page = PageAlloc::alloc(1);
+                       
+                       // OPT: Only zero if it was asked for.
+                       // OPT: Eventually, have separate pagelists for zeroed and
+                       // unzeroed memory, and a low-priority background thread
+                       // that zeroes pages and moves them to the zeroed list.
+                       bzero(page_to_kvirt(page), Arch::page_size);
+                       
+                       *phys = page_to_phys(page);
+                       *flags = 0;
+                       flags->Valid = 1;
+                       flags->Readable = 1;
+                       flags->Writeable = 1;
+                       flags->Executable = 1;
+                       flags->User = 1;
+               }
+       };
+       
+       AnonMem real_anonmem;
+       IMappable anonmem = real_anonmem;
 }
 
 #include <servers/mem/addrspace/footer.cc>