]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/addrspace.cc
f014c5458c621d97b08a5efe3787d342852dd533
[polintos/scott/priv.git] / kernel / mem / addrspace.cc
1 // mem/addrspace.cc -- System.Mem.AddrSpace
2 //
3 // OPT: Special AddrSpaces that only translate/export a linear block of
4 // another AddrSpace, and don't have individual entries for every page.
5 //
6 // OPT: Special VAreas that use their own translation mechanism instead
7 // of varea->offset, so that filesystem block tables (and similar things)
8 // don't need to have a VArea per block.
9 //
10 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
11 // 
12 // Permission is hereby granted, free of charge, to any person obtaining a copy of
13 // this software and associated documentation files (the "Software"), to deal with
14 // the Software without restriction, including without limitation the rights to
15 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
16 // of the Software, and to permit persons to whom the Software is furnished to do
17 // so, subject to the following conditions:
18 // 
19 //     * Redistributions of source code must retain the above copyright notice,
20 //       this list of conditions and the following disclaimers.
21 // 
22 //     * Redistributions in binary form must reproduce the above copyright notice,
23 //       this list of conditions and the following disclaimers in the
24 //       documentation and/or other materials provided with the distribution.
25 // 
26 //     * The names of the Software's authors and/or contributors
27 //       may not be used to endorse or promote products derived from
28 //       this Software without specific prior written permission.
29 // 
30 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
32 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
33 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
35 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
36 // SOFTWARE.
37
38 #include <kern/mem.h>
39 #include <kern/paging.h>
40 #include <kern/pagetable.h>
41 #include <kern/pagealloc.h>
42 #include <kern/generic-pte.h>
43 #include <kern/compiler.h>
44
45 extern int roshared_start, roshared_page_end;
46 extern int rwshared_start, rwshared_page_end;
47
48 namespace Mem {
49         extern IMappable physmem, anonmem;
50
51         class AddrSpaceFactory {
52         public:
53                 #include <servers/mem/addrspace/Mem/AddrSpaceFactory.h>
54                 
55                 AddrSpaceFactory()
56                 {
57                         init_iface();
58                 }
59                 
60                 void create(Object *obj)
61                 {
62                         *obj = static_cast<IAddrSpace>(*(new AddrSpace(false)));
63                 }
64         };
65
66         class ProcAddrSpaceFactory {
67         public:
68                 #include <servers/mem/addrspace/Mem/ProcAddrSpaceFactory.h>
69                 
70                 ProcAddrSpaceFactory()
71                 {
72                         init_iface();
73                 }
74                 
75                 void create(Object *obj)
76                 {
77                         AddrSpace *as = new AddrSpace(true);
78                         Region region;
79                         MapFlags mf = 0;
80                         u64 vstart;
81
82                         region.start = kvirt_to_phys(&roshared_start);
83                         region.end = kvirt_to_phys(&roshared_page_end);
84                         vstart = Arch::roshared_map;
85                         mf.Fixed = 1;
86                         mf.access_IDLNS_Read = 1;
87                         mf.access_IDLNS_Exec = 1;
88                         
89                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
90                         
91                         region.start = kvirt_to_phys(&rwshared_start);
92                         region.end = kvirt_to_phys(&rwshared_page_end);
93                         vstart = Arch::rwshared_map;
94                         mf.access_IDLNS_Exec = 0;
95                         mf.access_IDLNS_Write = 1;
96                         mf.CopyOnWrite = 1;
97                         
98                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
99                         
100                         // Leave the stack no-exec by default.
101                         region.start = vstart = Arch::stack_bottom;
102                         region.end = Arch::stack_top;
103                         mf.CopyOnWrite = 0;
104                         printf("vstart %llx\n", vstart);
105                         as->map(anonmem, region, &vstart, mf);
106                         
107                         *obj = static_cast<IAddrSpace>(*(as));
108                 }
109         };
110         
111         ProcAddrSpaceFactory real_proc_addrspace_factory;
112         Factory proc_addr_space_factory = real_proc_addrspace_factory;
113
114         AddrSpaceFactory real_addrspace_factory;
115         Factory addr_space_factory = real_addrspace_factory;
116
117         AddrSpace::AddrSpace(bool process) : mappable(this)
118         {
119                 init_iface();
120                 is_process = process;
121                 
122                 // OPT: Allow optional use of the native PTE for stacked aspaces,
123                 // either because the native PTE is 64-bit, or because it's an
124                 // embedded system which does not need 64-bit storage.
125                 
126                 if (process)
127                         page_table = new PageTableImpl<Arch::PTE>(true);
128                 else
129                         page_table = new PageTableImpl<GenPTE>(false);
130                 
131                 cached_free_region = Arch::user_start + Arch::page_size;
132         }
133         
134         // This should only be used once during bootup to initialize the
135         // kernel's address space with a static initial page table.
136         
137         AddrSpace::AddrSpace(void *ptbl_toplevel) : mappable(this)
138         {
139                 init_iface();
140                 is_process = true;
141                 page_table = new PageTableImpl<Arch::PTE>(ptbl_toplevel);
142                 
143                 // FIXME: should be kernel virtual space
144                 cached_free_region = Arch::user_start + Arch::page_size;
145         }
146
147         void AddrSpace::get_mappable(IMappable *ma)
148         {
149                 *ma = mappable;
150         }
151
152         void AddrSpace::clone(IAddrSpace *addrspace, uint8_t clone_is_real)
153         {
154                 // FIXME: implement
155                 *addrspace = NULL;
156         }
157         
158         bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
159         {
160                 if (lock.held_by_curthread())
161                         return false;
162
163                 assert(!(write && exec));
164                 PTEFlags reqflags;
165
166                 if (user)
167                         reqflags.User = 1;
168
169                 if (write)
170                         reqflags.Writeable = 1;
171                 else if (exec)
172                         reqflags.Executable = 1;
173                 else
174                         reqflags.Readable = 1;
175                 
176                 reqflags.Valid = 1;
177
178                 try {
179                         mappable.pagein(page_align(vaddr), reqflags);
180                 }
181
182                 catch (BadPageFault &bpf) {
183                         // FIXME: retain info about nature of bpf
184                         // to throw to user?
185                         return false;
186                 }
187
188                 return true;
189         }
190         
191         bool AddrSpace::check_overlap(Region region, VirtualArea *&va)
192         {
193                 if (region.end < region.start)
194                         return true;
195         
196                 va = varea_tree.find_nearest(region.start);
197                 
198                 if (!va)
199                         return false;
200                 
201                 // If region.start is in an existing region, that region will
202                 // be returned.
203
204                 if (region.end >= va->region().start &&
205                     region.start <= va->region().end)
206                         return true;
207                 
208                 // If it returns a region that's greater than region.start, and va
209                 // itself does not overlap, then prev does not overlap (or else
210                 // region.start would be in or before prev, and thus prev would
211                 // have been returned).
212                 
213                 // If it returns a region that's less than region.start, we still
214                 // need to check next, as region.end could be in (or beyond) that
215                 // region.
216                 
217                 if (va->list_node.next != &varea_list) {
218                         VirtualArea *next =
219                                 va->list_node.next->listentry(VirtualArea, list_node);
220                 
221                         if (region.end >= next->region().start &&
222                        region.start <= next->region().end)
223                    {
224                         va = next;
225                                 return true;
226                         }
227                 }
228
229                 VirtualArea *prev;
230         
231                 if (va->list_node.prev != &varea_list)
232                         prev = va->list_node.prev->listentry(VirtualArea, list_node);
233                 else
234                         prev = NULL;
235         
236                 if (region.start < va->region().start) {
237                         assert(!prev || prev->region().end < region.start);
238                         va = prev;
239                 }
240                 
241                 return false;
242         }
243
244         VirtualArea *AddrSpace::split_varea(Region region)
245         {
246                 VirtualArea *start, *mid, *end;
247
248                 // check_overlap is used rather than varea_tree.find,
249                 // so that the first overlapping region can be returned,
250                 // as most (if not all) callers will need this anyway.
251                 
252                 if (!check_overlap(region, start))
253                         return NULL;
254                 
255                 assert(start);
256                 assert(start->aspace == this);
257                 assert(start->region().end >= region.start);
258                 
259                 if (start->region().start < region.start) {
260                         // There is a varea that straddles region.start;
261                         // create a new varea "mid" for the overlapping part.
262                 
263                         mid = new VirtualArea;
264                         
265                         mid->aspace = this;
266                         mid->region().start = region.start;
267                         
268                         if (region.end > start->region().end)
269                                 mid->region().end = start->region().end;
270                         else
271                                 mid->region().end = region.end;
272
273                         mid->flags = start->flags;
274                         mid->ma = start->ma;
275                         mid->offset = start->offset;
276                         
277                         if (start->region().end > region.end) {
278                                 // The varea also straddles region.end; create a new
279                                 // varea "end" for the other side of the region.
280                         
281                                 end = new VirtualArea;
282
283                                 end->aspace = this;
284                                 end->region().start = region.end + 1;
285                                 end->region().end = start->region().end;
286
287                                 end->flags = start->flags;
288                                 end->ma = start->ma;
289                                 end->offset = start->offset;
290                         } else {
291                                 end = NULL;
292                         }
293
294                         start->region().end = region.start - 1;
295
296                         varea_tree.add(mid);
297                         mid->ma->map(mid);
298                         
299                         if (end) {
300                                 // Splits have already been done at both ends of the region,
301                                 // so there's no need to look up the ending address.
302                                 
303                                 varea_tree.add(end);
304                                 mid->ma->map(end);
305                                 return mid;
306                         }
307                         
308                         start = mid;
309                 }
310                 
311                 if (start->region().end == region.end)
312                         return start;
313                 
314                 if (start->region().end > region.end)
315                         end = start;
316                 else {
317                         end = varea_tree.find(region.end);
318                         
319                         if (!end)
320                                 return start;
321
322                         assert(end->aspace == this);
323                         assert(end->region().start <= region.end);
324                         assert(end->region().end >= region.end);
325
326                         if (end->region().end == region.end)
327                                 return start;
328                 }
329                 
330                 assert(end->region().end > region.end);
331                 
332                 // There is a varea that straddles region.start;
333                 // create a new varea "mid" for the overlapping part.
334                 
335                 mid = new VirtualArea;
336                 
337                 mid->aspace = this;
338                 mid->region().start = region.start;
339                         
340                 mid->region().start = end->region().start;
341                 mid->region().end = region.end;
342
343                 mid->flags = start->flags;
344                 mid->ma = start->ma;
345                 mid->offset = start->offset;
346                         
347                 end->region().start = region.end + 1;
348
349                 varea_tree.add(mid);
350                 mid->ma->map(mid);
351                 
352                 return start;
353         }
354         
355         bool AddrSpace::get_free_region(ulong len, Region &region,
356                                         VirtualArea *&prev)
357         {
358                 assert(page_aligned(len));
359                 assert(cached_free_region);
360         
361                 region.start = cached_free_region;
362                 region.end = cached_free_region + len - 1;
363                 
364                 if (region.end <= Arch::user_end && !check_overlap(region, prev)) {
365                         cached_free_region = region.end + 1;
366                         return true;
367                 }
368                 
369                 for (Util::List *node = &prev->list_node; node != &varea_list;
370                      node = node->next)
371                 {
372                         VirtualArea *va = node->listentry(VirtualArea, list_node);
373                         ulong end = Arch::user_end;
374                         
375                         if (node->next != &varea_list) {
376                                 VirtualArea *next = node->next->listentry(VirtualArea, list_node);
377                                 end = next->region().start - 1;
378                         }
379                         
380                         assert(end > va->region().end);
381                         
382                         if (end - va->region().end >= len) {
383                                 region.start = va->region().end + 1;
384                                 region.end = region.start + len - 1;
385                                 
386                                 assert(page_aligned(region.start));
387                                 cached_free_region = region.end + 1;
388                                 return true;
389                         }
390                 }
391                 
392                 if (cached_free_region != Arch::user_start + Arch::page_size) {
393                         cached_free_region = Arch::user_start + Arch::page_size;
394                         return get_free_region(len, region, prev);
395                 }
396                 
397                 return false;
398         }
399
400         // The "mapped" paramater is used to indicate whether the top-level
401         // address space has had a mapping established.  If "mapped" is
402         // false, but an exception is not thrown, then this method must
403         // be called again to propagate the mapping along the aspace chain.
404         //
405         // FIXME: Between aspace locks, if aspace's mapping is revoked and
406         // ma->aspace's mapping changes, a pagein could leak through and cause
407         // a page load or a copy-on-write breaking.  This isn't a huge deal
408         // (it doesn't affect the correctness of the code or give aspace
409         // access to ma->aspace's new mapping), but it's unpleasant, and could
410         // have an adverse impact on determinism.  If you have a real-time
411         // application that can't tolerate the occasional spurious pagein or
412         // copy-on-write breaking, then use an address space that hasn't
413         // previously been exposed to recursive mappers.
414         
415         bool ASpaceMappable::rec_pagein(AddrSpace *aspace, u64 vaddr,
416                                         PTEFlags reqflags)
417         {
418                 bool mapped = true;
419                 
420                 // aspace->mappable.retain();
421                 
422                 while (true) {
423                         Lock::DroppableAutoLock autolock(aspace->lock);
424                         VirtualArea *va = aspace->varea_tree.find(vaddr);
425                         
426                         if (!va)
427                                 throw BadPageFault();
428
429                         if ((va->flags & reqflags) != reqflags)
430                                 throw BadPageFault();
431                         
432                         if (aspace->map(va, vaddr, reqflags))
433                                 break;
434
435                         mapped = false;
436                         Mappable *ma = va->ma;
437                         vaddr += va->offset;
438
439                         // ma->retain();
440                         autolock.unlock();
441                         // aspace->mappable.release();
442
443                         if (!ma->is_aspace) {
444                                 ma->pagein(vaddr, reqflags);
445                                 // ma->release();
446                                 break;
447                         }
448                         
449                         aspace = static_cast<ASpaceMappable *>(ma)->aspace;
450                 }
451                 
452                 return mapped;
453         }
454         
455         void ASpaceMappable::pagein(u64 vaddr, PTEFlags reqflags)
456         {
457                 while (!rec_pagein(aspace, vaddr, reqflags));
458         }
459         
460         void AddrSpace::break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys)
461         {
462                 assert(lock.held_by_curthread());
463                 assert(rmap_lock.held_by_curthread());
464
465                 assert(va->flags.FaultOnWrite);
466                 assert(va->aspace == this);
467
468                 Page *old_page = phys_to_page(phys);
469
470                 Region region = { vaddr, vaddr + Arch::page_size - 1 };
471         
472                 // If this is the only reference to the page left, then
473                 // nothing needs to be copied.  Just clear the COW condition.
474                 if (is_phys_page(old_page) && old_page->get_refcount() == 1) {
475                         PTEFlags mask, flags;
476                         mask.FaultOnWrite = 1;
477
478                         page_table->set_flags(region, flags, mask);
479                         return;
480                 }
481                 
482                 Page *new_page = PageAlloc::alloc(1);
483
484                 // FIXME -- highmem
485                 // OPT: It'd be better to do this without the rmap_lock held,
486                 // especially if rmap_lock is global rather than per-physpage.
487                 // I want to keep things simple for now and optimize later,
488                 // though.
489
490                 memcpy(page_to_kvirt(new_page), phys_to_kvirt(phys),
491                        Arch::page_size);
492
493                 page_table->rmap_table.break_copy_on_write(region.start, new_page);
494                 new_page->release();
495         }
496
497         void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
498         {
499                 aspace->page_table->get_mapping(vaddr, phys, flags);
500         }
501         
502         bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
503         {
504                 Lock::AutoLock autolock(rmap_lock);
505                 assert(va->aspace == this);
506                         
507                 u64 phys;
508                 PTEFlags flags;
509                 va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
510
511                 PTEFlags newflags = flags & va->flags;
512                 newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
513                 
514                 if (!newflags.Valid) {
515                         assert(va->flags.Valid);
516                         return false;
517                 }
518                 
519                 if ((newflags & reqflags) != reqflags)
520                         return false;
521
522                 u64 oldphys;
523                 PTEFlags oldflags;
524                 page_table->get_mapping(vaddr, &oldphys, &oldflags);
525                 
526                 if (oldflags.Valid &&
527                     !(reqflags.Writeable && oldflags.FaultOnWrite))
528                 {
529                         // If the existing mapping is valid, don't try to map it again.
530                         // The existing mapping was put there possibly by a race, but
531                         // more likely because a FaultOnWrite was handled upstream.
532                         //
533                         // FaultOnWrite handling is the only type of mapping change that
534                         // can be done directly; all others must change the varea and do
535                         // an rmap invalidation instead.  FaultOnWrite is special
536                         // because we don't want to split vareas for every page that
537                         // gets its copy-on-write broken.
538
539                         assert((oldflags & reqflags) == reqflags);
540                         assert(!va->flags.FaultOnWrite || oldphys == phys);
541                         return true;
542                 }
543
544                 if (reqflags.Writeable && oldflags.FaultOnWrite)
545                 {
546                         // The FaultOnWrite needs to be handled upstream.
547                         if (!va->flags.FaultOnWrite)
548                                 return false;
549                         
550                         va->aspace->break_copy_on_write(va, vaddr, phys);
551                 } else {
552                         assert(!oldflags.Valid);
553                         PageTable *usptbl = NULL;
554                         
555                         if (va->ma->is_aspace) {
556                                 ASpaceMappable *asma = static_cast<ASpaceMappable *>(va->ma);
557                                 usptbl = asma->aspace->page_table;
558                         }
559                         
560                         RMapTable::map(va, usptbl, vaddr, vaddr + va->offset);
561                         
562                         RegionWithOffset rwo;
563                         rwo.start = vaddr;
564                         rwo.end = vaddr + Arch::page_size - 1;
565                         rwo.offset = phys;
566                         
567                         page_table->map(rwo, newflags);
568                 }
569                 
570                 return true;
571         }
572
573         void ASpaceMappable::get_size(u64 *size)
574         {
575                 aspace->get_size(size);
576         }
577         
578         void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
579                             MapFlags mflags, int map_type)
580         {
581                 // FIXME: check alignment for VIPT caches
582                 // FIXME: Implement the "Replace" map flag
583                 
584                 if (mflags.Replace)
585                         throw_idl(InvalidArgument, 3,
586                                   countarray("Replace unimplemented"));
587                 
588                 Mappable *cma = Mappable::classptr(ma);
589                 if (!cma) {
590                         // The given IMappable does not refer to a Mappable
591                         // of this kernel.
592
593                         throw_idl(InvalidArgument, 0, nullarray);
594                 }
595                 
596                 bool fixed = mflags.Fixed;
597                 
598                 if (is_process)
599                         mflags.Fixed = 1;
600                 
601                 if (!page_aligned(region.start))
602                         throw_idl(InvalidArgument, 1, countarray("unaligned start"));
603                 
604                 if (!page_aligned(region.end + 1))
605                         throw_idl(InvalidArgument, 1, countarray("unaligned end"));
606                 
607                 Lock::AutoLock autolock(lock);
608                 Region vregion;
609                 VirtualArea *prev;
610                 
611                 if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
612                         vregion.start = *vstart;
613                         vregion.end = vregion.start + region.end - region.start;
614                 
615                         if (is_process) {
616                                 if (!valid_addr(vregion.start))
617                                         throw_idl(InvalidArgument, 2,
618                                                   countarray("invalid virtual start"));
619                                 
620                                 if (!valid_addr(vregion.end))
621                                         throw_idl(InvalidArgument, 2,
622                                                   countarray("invalid virtual end"));
623                         }
624                         
625                         if (check_overlap(vregion, prev))
626                                 *vstart = System::Mem::AddrSpace_ns::unspecified_start;
627                 }
628                 
629                 if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
630                         if (fixed)
631                                 throw_idl(ResourceBusy, 2, countarray("varea overlap"));
632                         
633                         if (!get_free_region(region.end - region.start + 1, vregion, prev))
634                                 throw_idl(OutOfSpace, countarray("out of vspace"));
635                         
636                         *vstart = vregion.start;
637                 }
638                 
639                 VirtualArea *newva = new VirtualArea;
640                 newva->aspace = this;
641                 newva->region() = vregion;
642
643                 newva->flags.Valid = 1;
644                 newva->flags.User = map_type != map_kernel;
645                 newva->flags.Readable = mflags.access_IDLNS_Read;
646                 newva->flags.Writeable = mflags.access_IDLNS_Write;
647                 newva->flags.Executable = mflags.access_IDLNS_Exec;
648                 newva->flags.FaultOnWrite = mflags.CopyOnWrite;
649                 newva->flags.Protected = map_type != map_user;
650                 newva->ma = cma;
651                 newva->offset = region.start - vregion.start;
652
653                 varea_tree.add(newva);
654                 newva->ma->map(newva);
655                 
656                 if (prev) {
657                         prev->list_node.add_front(&newva->list_node);
658                 } else {
659                         varea_list.add_front(&newva->list_node);
660                 }
661         }
662         
663         void AddrSpace::unmap(Region region, bool from_kernel)
664         {
665                 u64 orig_start = region.start;
666         
667                 while (region.start <= region.end) {
668                         Lock::DroppableAutoLock autolock(lock);
669                         VirtualArea *va;
670
671                         // If check_overlap returns false, then there are no vareas
672                         // in the specified region, so there's nothing to unmap.
673                         
674                         if (!check_overlap(region, va))
675                                 return;
676                         
677                         if (va->flags.Protected && !from_kernel) {
678                                 region.start = va->list_node.next->
679                                                listentry(VirtualArea, list_node)->region().start;
680
681                                 if (region.start <= orig_start)
682                                         break;
683                                 
684                                 continue;
685                         }
686                         
687                         u64 va_end = va->region().end;
688                         u64 next_start = 0;
689                         
690                         if (va_end > region.end) {
691                                 u64 va_start = va->region().start;
692                                 va->region().start = region.end + 1;
693                         
694                                 if (va_start < region.start) {
695                                         VirtualArea *newva = new VirtualArea;
696
697                                         newva->aspace = this;
698                                         newva->region().start = va_start;
699                                         newva->region().end = region.start - 1;
700         
701                                         newva->flags = va->flags;
702                                         newva->ma = va->ma;
703                                         newva->offset = va->offset;
704
705                                         varea_tree.add(newva);
706                                         newva->ma->map(newva);
707                                 }
708                                 
709                                 VirtualArea *nextva =
710                                         va->list_node.next->listentry(VirtualArea, list_node);
711
712                                 next_start = nextva->region().start;
713                         } else if (va->region().start < region.start) {
714                                 va->region().end = region.start - 1;
715                         } else {
716                                 varea_tree.del(va);
717                                 va->ma->unmap(va);
718                         }
719
720                         // This is done after the varea removal, so that new faults
721                         // don't map things in again.
722
723                         // OPT: Skip RMap-based unmapping if nothing maps this aspace.
724                         // OPT: Push this loop into the RMap code, allowing it to skip
725                         // empty portions of the tables (as the pagetable code currently
726                         // does).
727                         
728                         while (region.start <= va_end && region.start <= region.end) {
729                                 page_table->rmap_table.unmap(region.start);
730                                 region.start += Arch::page_size;
731
732                                 if (region.start <= orig_start)
733                                         break;
734                         }
735                         
736                         region.start = next_start;
737                         
738                         if (region.start <= orig_start)
739                                 break;
740                 }
741         }
742         
743         void AddrSpace::set_mapflags(Region region, MapFlags mflags)
744         {
745                 // FIXME: implement
746                 // Find varea, split if necessary, propagate change to stacked aspaces
747         }
748         
749         void AddrSpace::get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same)
750         {
751                 // FIXME: implement
752         }
753         
754         void AddrSpace::get_mapping(Region region, IMappable *ma, u64 *offset)
755         {
756                 // FIXME: implement
757         }
758         
759         void AddrSpace::get_page_size(u32 *page_size)
760         {
761                 *page_size = Arch::page_size;
762         }
763         
764         void AddrSpace::get_min_align(u32 *min_align)
765         {
766                 *min_align = Arch::page_mapping_min_align;
767         }
768         
769         void Mappable::map(VirtualArea *varea)
770         {
771                 mappings_lock.lock_irq();
772                 mappings.add_back(&varea->mappings_node);
773                 mappings_lock.unlock_irq();
774         }
775
776         void Mappable::unmap(VirtualArea *varea)
777         {
778                 mappings_lock.lock_irq();
779                 varea->mappings_node.del();
780                 mappings_lock.unlock_irq();
781         }
782         
783         void PageTable::kill_pte(ulong vaddr, u64 paddr, bool dirty,
784                                  bool valid, bool no_release)
785         {
786                 Page *oldpage = phys_to_page(paddr);
787                 
788                 if (!is_phys_page(oldpage))
789                         oldpage = NULL;
790
791                 if (is_process && valid) {
792                         Arch::invalidate_tlb_entry(vaddr);
793                                 
794                         if (oldpage && dirty &&
795                             !ll_test_and_set(&oldpage->flags, PageFlags::bits::Dirty))
796                         {
797                                 oldpage->retain();
798                                 // Queue page for writeback
799                         }
800                 }
801                 
802                 if (!no_release && oldpage)
803                         oldpage->release();
804         }
805         
806         // FIXME: Add a special PTE flag to indicate that PhysMem mappings
807         // don't mess with page refcounts.
808         
809         class PhysMem : public Mappable {
810         public:
811                 void get_size(u64 *size)
812                 {
813                         if (sizeof(long) == 8)
814                                 *size = 1ULL << (64 - Arch::page_shift);
815                         else
816                                 *size = 1ULL << (32 - Arch::page_shift);
817                 }
818         
819                 void pagein(u64 vaddr, PTEFlags reqflags)
820                 {
821                         // Doesn't need to do anything yet, though it may later
822                         // once high memory support is added.
823                 }
824                 
825                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
826                 {
827                         *phys = addr;
828                         *flags = 0;
829                         flags->Valid = 1;
830                         flags->Readable = 1;
831                         flags->Writeable = 1;
832                         flags->Executable = 1;
833                         flags->User = 1;
834                 }
835         };
836         
837         PhysMem real_physmem;
838         IMappable physmem = real_physmem;
839
840         class AnonMem : public Mappable {
841         public:
842                 void get_size(u64 *size)
843                 {
844                         if (sizeof(long) == 8)
845                                 *size = 1ULL << (64 - Arch::page_shift);
846                         else
847                                 *size = 1ULL << (32 - Arch::page_shift);
848                 }
849         
850                 void pagein(u64 vaddr, PTEFlags reqflags)
851                 {
852                         // Doesn't need to do anything yet, though it may later
853                         // once high memory support is added.
854                 }
855                 
856                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
857                 {
858                         Page *page = PageAlloc::alloc(1);
859                         
860                         // OPT: Only zero if it was asked for.
861                         // OPT: Eventually, have separate pagelists for zeroed and
862                         // unzeroed memory, and a low-priority background thread
863                         // that zeroes pages and moves them to the zeroed list.
864                         bzero(page_to_kvirt(page), Arch::page_size);
865                         
866                         *phys = page_to_phys(page);
867                         *flags = 0;
868                         flags->Valid = 1;
869                         flags->Readable = 1;
870                         flags->Writeable = 1;
871                         flags->Executable = 1;
872                         flags->User = 1;
873                 }
874         };
875         
876         AnonMem real_anonmem;
877         IMappable anonmem = real_anonmem;
878 }
879
880 #include <servers/mem/addrspace/footer.cc>