]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/addrspace.cc
Initial checkin from Perforce.
[polintos/scott/priv.git] / kernel / mem / addrspace.cc
1 // mem/addrspace.cc -- System.Mem.AddrSpace
2 //
3 // OPT: Special AddrSpaces that only translate/export a linear block of
4 // another AddrSpace, and don't have individual entries for every page.
5 //
6 // OPT: Special VAreas that use their own translation mechanism instead
7 // of varea->offset, so that filesystem block tables (and similar things)
8 // don't need to have a VArea per block.
9 //
10 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
11 // 
12 // Permission is hereby granted, free of charge, to any person obtaining a copy of
13 // this software and associated documentation files (the "Software"), to deal with
14 // the Software without restriction, including without limitation the rights to
15 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
16 // of the Software, and to permit persons to whom the Software is furnished to do
17 // so, subject to the following conditions:
18 // 
19 //     * Redistributions of source code must retain the above copyright notice,
20 //       this list of conditions and the following disclaimers.
21 // 
22 //     * Redistributions in binary form must reproduce the above copyright notice,
23 //       this list of conditions and the following disclaimers in the
24 //       documentation and/or other materials provided with the distribution.
25 // 
26 //     * The names of the Software's authors and/or contributors
27 //       may not be used to endorse or promote products derived from
28 //       this Software without specific prior written permission.
29 // 
30 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
32 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
33 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
35 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
36 // SOFTWARE.
37
38 #include <kern/mem.h>
39 #include <kern/paging.h>
40 #include <kern/pagetable.h>
41 #include <kern/pagealloc.h>
42 #include <kern/generic-pte.h>
43 #include <kern/compiler.h>
44
45 extern int roshared_start, roshared_page_end;
46 extern int rwshared_start, rwshared_page_end;
47
48 namespace Mem {
49         extern IMappable physmem;
50
51         class AddrSpaceFactory {
52         public:
53                 #include <servers/mem/addrspace/Mem/AddrSpaceFactory.h>
54                 
55                 AddrSpaceFactory()
56                 {
57                         init_iface();
58                 }
59                 
60                 void create(Object *obj)
61                 {
62                         *obj = static_cast<IAddrSpace>(*(new AddrSpace(false)));
63                 }
64         };
65
66         class ProcAddrSpaceFactory {
67         public:
68                 #include <servers/mem/addrspace/Mem/ProcAddrSpaceFactory.h>
69                 
70                 ProcAddrSpaceFactory()
71                 {
72                         init_iface();
73                 }
74                 
75                 void create(Object *obj)
76                 {
77                         AddrSpace *as = new AddrSpace(true);
78                         Region region;
79                         MapFlags mf = 0;
80                         u64 vstart;
81
82                         region.start = kvirt_to_phys(&roshared_start);
83                         region.end = kvirt_to_phys(&roshared_page_end);
84                         vstart = Arch::roshared_map;
85                         mf.Fixed = 1;
86                         mf.access_IDLNS_Read = 1;
87                         mf.access_IDLNS_Exec = 1;
88                         
89                         as->map(physmem, region, &vstart, mf,
90                                 true, AddrSpace::map_protected);
91                         
92                         region.start = kvirt_to_phys(&rwshared_start);
93                         region.end = kvirt_to_phys(&rwshared_page_end);
94                         vstart = Arch::rwshared_map;
95                         mf.access_IDLNS_Exec = 0;
96                         mf.access_IDLNS_Write = 1;
97                         mf.CopyOnWrite = 1;
98                         
99                         as->map(physmem, region, &vstart, mf,
100                                 true, AddrSpace::map_protected);
101                         
102                         AllocFlags af = 0;
103                         vstart = Arch::stack_bottom;
104                         as->alloc_and_map(Arch::stack_top - vstart + 1, &vstart, af, mf);
105                         
106                         *obj = static_cast<IAddrSpace>(*(as));
107                 }
108         };
109         
110         ProcAddrSpaceFactory real_proc_addrspace_factory;
111         Factory proc_addr_space_factory = real_proc_addrspace_factory;
112
113         AddrSpaceFactory real_addrspace_factory;
114         Factory addr_space_factory = real_addrspace_factory;
115
116         AddrSpace::AddrSpace(bool process) : mappable(this)
117         {
118                 init_iface();
119                 is_process = process;
120                 
121                 // OPT: Allow optional use of the native PTE for stacked aspaces,
122                 // either because the native PTE is 64-bit, or because it's an
123                 // embedded system which does not need 64-bit storage.
124                 
125                 if (process)
126                         page_table = new PageTableImpl<Arch::PTE>(true);
127                 else
128                         page_table = new PageTableImpl<GenPTE>(false);
129                 
130                 cached_free_region = Arch::user_start + Arch::page_size;
131         }
132         
133         // This should only be used once during bootup to initialize the
134         // kernel's address space with a static initial page table.
135         
136         AddrSpace::AddrSpace(void *ptbl_toplevel) : mappable(this)
137         {
138                 init_iface();
139                 is_process = true;
140                 page_table = new PageTableImpl<Arch::PTE>(ptbl_toplevel);
141                 
142                 // FIXME: should be kernel virtual space
143                 cached_free_region = Arch::user_start + Arch::page_size;
144         }
145
146         void AddrSpace::get_mappable(IMappable *ma)
147         {
148                 *ma = mappable;
149         }
150
151         void AddrSpace::clone(IAddrSpace *addrspace, uint8_t clone_is_real)
152         {
153                 // FIXME: implement
154                 *addrspace = NULL;
155         }
156         
157         void AddrSpace::alloc_and_map(u64 len, u64 *vstart,
158                                       AllocFlags aflags, MapFlags mflags)
159         {
160                 // FIXME: implement
161         }
162         
163         bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
164         {
165                 if (lock.held_by_curthread())
166                         return false;
167
168                 assert(!(write && exec));
169                 PTEFlags reqflags;
170
171                 if (user)
172                         reqflags.User = 1;
173
174                 if (write)
175                         reqflags.Writeable = 1;
176                 else if (exec)
177                         reqflags.Executable = 1;
178                 else
179                         reqflags.Readable = 1;
180                 
181                 reqflags.Valid = 1;
182
183                 try {
184                         mappable.pagein(page_align(vaddr), reqflags);
185                 }
186
187                 catch (BadPageFault &bpf) {
188                         // FIXME: retain info about nature of bpf
189                         // to throw to user?
190                         return false;
191                 }
192
193                 return true;
194         }
195         
196         bool AddrSpace::check_overlap(Region region, VirtualArea *&va)
197         {
198                 if (region.end < region.start)
199                         return true;
200         
201                 va = varea_tree.find_nearest(region.start);
202                 
203                 if (!va)
204                         return false;
205                 
206                 // If region.start is in an existing region, that region will
207                 // be returned.
208
209                 if (region.end >= va->region().start &&
210                     region.start <= va->region().end)
211                         return true;
212                 
213                 // If it returns a region that's greater than region.start, and va
214                 // itself does not overlap, then prev does not overlap (or else
215                 // region.start would be in or before prev, and thus prev would
216                 // have been returned).
217                 
218                 // If it returns a region that's less than region.start, we still
219                 // need to check next, as region.end could be in (or beyond) that
220                 // region.
221                 
222                 if (va->list_node.next != &varea_list) {
223                         VirtualArea *next =
224                                 va->list_node.next->listentry(VirtualArea, list_node);
225                 
226                         if (region.end >= next->region().start &&
227                        region.start <= next->region().end)
228                    {
229                         va = next;
230                                 return true;
231                         }
232                 }
233
234                 VirtualArea *prev;
235         
236                 if (va->list_node.prev != &varea_list)
237                         prev = va->list_node.prev->listentry(VirtualArea, list_node);
238                 else
239                         prev = NULL;
240         
241                 if (region.start < va->region().start) {
242                         assert(!prev || prev->region().end < region.start);
243                         va = prev;
244                 }
245                 
246                 return false;
247         }
248
249         VirtualArea *AddrSpace::split_varea(Region region)
250         {
251                 VirtualArea *start, *mid, *end;
252
253                 // check_overlap is used rather than varea_tree.find,
254                 // so that the first overlapping region can be returned,
255                 // as most (if not all) callers will need this anyway.
256                 
257                 if (!check_overlap(region, start))
258                         return NULL;
259                 
260                 assert(start);
261                 assert(start->aspace == this);
262                 assert(start->region().end >= region.start);
263                 
264                 if (start->region().start < region.start) {
265                         // There is a varea that straddles region.start;
266                         // create a new varea "mid" for the overlapping part.
267                 
268                         mid = new VirtualArea;
269                         
270                         mid->aspace = this;
271                         mid->region().start = region.start;
272                         
273                         if (region.end > start->region().end)
274                                 mid->region().end = start->region().end;
275                         else
276                                 mid->region().end = region.end;
277
278                         mid->flags = start->flags;
279                         mid->ma = start->ma;
280                         mid->offset = start->offset;
281                         
282                         if (start->region().end > region.end) {
283                                 // The varea also straddles region.end; create a new
284                                 // varea "end" for the other side of the region.
285                         
286                                 end = new VirtualArea;
287
288                                 end->aspace = this;
289                                 end->region().start = region.end + 1;
290                                 end->region().end = start->region().end;
291
292                                 end->flags = start->flags;
293                                 end->ma = start->ma;
294                                 end->offset = start->offset;
295                         } else {
296                                 end = NULL;
297                         }
298
299                         start->region().end = region.start - 1;
300
301                         varea_tree.add(mid);
302                         mid->ma->map(mid);
303                         
304                         if (end) {
305                                 // Splits have already been done at both ends of the region,
306                                 // so there's no need to look up the ending address.
307                                 
308                                 varea_tree.add(end);
309                                 mid->ma->map(end);
310                                 return mid;
311                         }
312                         
313                         start = mid;
314                 }
315                 
316                 if (start->region().end == region.end)
317                         return start;
318                 
319                 if (start->region().end > region.end)
320                         end = start;
321                 else {
322                         end = varea_tree.find(region.end);
323                         
324                         if (!end)
325                                 return start;
326
327                         assert(end->aspace == this);
328                         assert(end->region().start <= region.end);
329                         assert(end->region().end >= region.end);
330
331                         if (end->region().end == region.end)
332                                 return start;
333                 }
334                 
335                 assert(end->region().end > region.end);
336                 
337                 // There is a varea that straddles region.start;
338                 // create a new varea "mid" for the overlapping part.
339                 
340                 mid = new VirtualArea;
341                 
342                 mid->aspace = this;
343                 mid->region().start = region.start;
344                         
345                 mid->region().start = end->region().start;
346                 mid->region().end = region.end;
347
348                 mid->flags = start->flags;
349                 mid->ma = start->ma;
350                 mid->offset = start->offset;
351                         
352                 end->region().start = region.end + 1;
353
354                 varea_tree.add(mid);
355                 mid->ma->map(mid);
356                 
357                 return start;
358         }
359         
360         bool AddrSpace::get_free_region(ulong len, Region &region,
361                                         VirtualArea *&prev)
362         {
363                 assert(page_aligned(len));
364                 assert(cached_free_region);
365         
366                 region.start = cached_free_region;
367                 region.end = cached_free_region + len - 1;
368                 
369                 if (region.end <= Arch::user_end && !check_overlap(region, prev)) {
370                         cached_free_region = region.end + 1;
371                         return true;
372                 }
373                 
374                 for (Util::List *node = &prev->list_node; node != &varea_list;
375                      node = node->next)
376                 {
377                         VirtualArea *va = node->listentry(VirtualArea, list_node);
378                         ulong end = Arch::user_end;
379                         
380                         if (node->next != &varea_list) {
381                                 VirtualArea *next = node->next->listentry(VirtualArea, list_node);
382                                 end = next->region().start - 1;
383                         }
384                         
385                         assert(end > va->region().end);
386                         
387                         if (end - va->region().end >= len) {
388                                 region.start = va->region().end + 1;
389                                 region.end = region.start + len - 1;
390                                 
391                                 assert(page_aligned(region.start));
392                                 cached_free_region = region.end + 1;
393                                 return true;
394                         }
395                 }
396                 
397                 if (cached_free_region != Arch::user_start + Arch::page_size) {
398                         cached_free_region = Arch::user_start + Arch::page_size;
399                         return get_free_region(len, region, prev);
400                 }
401                 
402                 return false;
403         }
404
405         // The "mapped" paramater is used to indicate whether the top-level
406         // address space has had a mapping established.  If "mapped" is
407         // false, but an exception is not thrown, then this method must
408         // be called again to propagate the mapping along the aspace chain.
409         //
410         // FIXME: Between aspace locks, if aspace's mapping is revoked and
411         // ma->aspace's mapping changes, a pagein could leak through and cause
412         // a page load or a copy-on-write breaking.  This isn't a huge deal
413         // (it doesn't affect the correctness of the code or give aspace
414         // access to ma->aspace's new mapping), but it's unpleasant, and could
415         // have an adverse impact on determinism.  If you have a real-time
416         // application that can't tolerate the occasional spurious pagein or
417         // copy-on-write breaking, then use an address space that hasn't
418         // previously been exposed to recursive mappers.
419         
420         bool ASpaceMappable::rec_pagein(AddrSpace *aspace, u64 vaddr,
421                                         PTEFlags reqflags)
422         {
423                 bool mapped = true;
424                 
425                 // aspace->mappable.retain();
426                 
427                 while (true) {
428                         Lock::DroppableAutoLock autolock(aspace->lock);
429                         VirtualArea *va = aspace->varea_tree.find(vaddr);
430                         
431                         if (!va)
432                                 throw BadPageFault();
433
434                         if ((va->flags & reqflags) != reqflags)
435                                 throw BadPageFault();
436                         
437                         if (aspace->map(va, vaddr, reqflags))
438                                 break;
439
440                         mapped = false;
441                         Mappable *ma = va->ma;
442                         vaddr += va->offset;
443
444                         // ma->retain();
445                         autolock.unlock();
446                         // aspace->mappable.release();
447
448                         if (!ma->is_aspace) {
449                                 ma->pagein(vaddr, reqflags);
450                                 // ma->release();
451                                 break;
452                         }
453                         
454                         aspace = static_cast<ASpaceMappable *>(ma)->aspace;
455                 }
456                 
457                 return mapped;
458         }
459         
460         void ASpaceMappable::pagein(u64 vaddr, PTEFlags reqflags)
461         {
462                 while (!rec_pagein(aspace, vaddr, reqflags));
463         }
464         
465         void AddrSpace::break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys)
466         {
467                 assert(lock.held_by_curthread());
468                 assert(rmap_lock.held_by_curthread());
469
470                 assert(va->flags.FaultOnWrite);
471                 assert(va->aspace == this);
472
473                 Page *old_page = phys_to_page(phys);
474
475                 Region region = { vaddr, vaddr + Arch::page_size - 1 };
476         
477                 // If this is the only reference to the page left, then
478                 // nothing needs to be copied.  Just clear the COW condition.
479                 if (is_phys_page(old_page) && old_page->get_refcount() == 1) {
480                         PTEFlags mask, flags;
481                         mask.FaultOnWrite = 1;
482
483                         page_table->set_flags(region, flags, mask);
484                         return;
485                 }
486                 
487                 Page *new_page = PageAlloc::alloc(1);
488
489                 // FIXME -- highmem
490                 // OPT: It'd be better to do this without the rmap_lock held,
491                 // especially if rmap_lock is global rather than per-physpage.
492                 // I want to keep things simple for now and optimize later,
493                 // though.
494
495                 memcpy(page_to_kvirt(new_page), phys_to_kvirt(phys),
496                        Arch::page_size);
497
498                 page_table->rmap_table.break_copy_on_write(region.start, new_page);
499                 new_page->release();
500         }
501
502         void ASpaceMappable::get_entry(u64 vaddr, u64 *phys, PTEFlags *flags)
503         {
504                 aspace->page_table->get_entry(vaddr, phys, flags);
505         }
506         
507         bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
508         {
509                 Lock::AutoLock autolock(rmap_lock);
510                 assert(va->aspace == this);
511                         
512                 u64 phys;
513                 PTEFlags flags;
514                 va->ma->get_entry(vaddr + va->offset, &phys, &flags);
515
516                 PTEFlags newflags = flags & va->flags;
517                 newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
518                 
519                 if (!newflags.Valid) {
520                         assert(va->flags.Valid);
521                         return false;
522                 }
523                 
524                 if ((newflags & reqflags) != reqflags)
525                         return false;
526
527                 u64 oldphys;
528                 PTEFlags oldflags;
529                 page_table->get_entry(vaddr, &oldphys, &oldflags);
530                 
531                 if (oldflags.Valid &&
532                     !(reqflags.Writeable && oldflags.FaultOnWrite))
533                 {
534                         // If the existing mapping is valid, don't try to map it again.
535                         // The existing mapping was put there possibly by a race, but
536                         // more likely because a FaultOnWrite was handled upstream.
537                         //
538                         // FaultOnWrite handling is the only type of mapping change that
539                         // can be done directly; all others must change the varea and do
540                         // an rmap invalidation instead.  FaultOnWrite is special
541                         // because we don't want to split vareas for every page that
542                         // gets its copy-on-write broken.
543
544                         assert((oldflags & reqflags) == reqflags);
545                         return true;
546                 }
547
548                 if (reqflags.Writeable && oldflags.FaultOnWrite)
549                 {
550                         // The FaultOnWrite needs to be handled upstream.
551                         if (!va->flags.FaultOnWrite)
552                                 return false;
553                         
554                         va->aspace->break_copy_on_write(va, vaddr, phys);
555                 } else {
556                         assert(!oldflags.Valid);
557                         PageTable *usptbl = NULL;
558                         
559                         if (va->ma->is_aspace) {
560                                 ASpaceMappable *asma = static_cast<ASpaceMappable *>(va->ma);
561                                 usptbl = asma->aspace->page_table;
562                         }
563                         
564                         RMapTable::map(va, usptbl, vaddr, vaddr + va->offset);
565                         
566                         RegionWithOffset rwo;
567                         rwo.start = vaddr;
568                         rwo.end = vaddr + Arch::page_size - 1;
569                         rwo.offset = phys;
570                         
571                         page_table->map(rwo, newflags);
572                 }
573                 
574                 return true;
575         }
576
577         void ASpaceMappable::get_size(u64 *size)
578         {
579                 aspace->get_size(size);
580         }
581         
582         void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
583                             MapFlags mflags, bool from_kernel, int map_type)
584         {
585                 // FIXME: check alignment for VIPT caches
586                 // FIXME: Implement the "Replace" map flag
587                 
588                 if (mflags.Replace)
589                         throw_idl(InvalidArgument, 3,
590                                   countarray("Replace unimplemented"));
591                 
592                 Mappable *cma = Mappable::classptr(ma);
593                 if (!cma) {
594                         // The given IMappable does not refer to a Mappable
595                         // of this kernel.
596
597                         throw_idl(InvalidArgument, 0, nullarray);
598                 }
599                 
600                 bool fixed = mflags.Fixed;
601                 
602                 if (is_process)
603                         mflags.Fixed = 1;
604                 
605                 if (!page_aligned(region.start))
606                         throw_idl(InvalidArgument, 1, countarray("unaligned start"));
607                 
608                 if (!page_aligned(region.end + 1))
609                         throw_idl(InvalidArgument, 1, countarray("unaligned end"));
610                 
611                 Lock::AutoLock autolock(lock);
612                 Region vregion;
613                 VirtualArea *prev;
614                 
615                 if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
616                         vregion.start = *vstart;
617                         vregion.end = vregion.start + region.end - region.start + 1;
618                 
619                         if (is_process) {
620                                 if (!valid_addr(vregion.start))
621                                         throw_idl(InvalidArgument, 2,
622                                                   countarray("invalid virtual start"));
623                                 
624                                 if (!valid_addr(vregion.end))
625                                         throw_idl(InvalidArgument, 2,
626                                                   countarray("invalid virtual end"));
627                         }
628                         
629                         if (check_overlap(vregion, prev))
630                                 *vstart = System::Mem::AddrSpace_ns::unspecified_start;
631                 }
632                 
633                 if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
634                         if (fixed) 
635                                 throw_idl(ResourceBusy, 2, countarray("varea overlap"));
636                         
637                         if (!get_free_region(region.end - region.start + 1, vregion, prev))
638                                 throw_idl(OutOfSpace, countarray("out of vspace"));
639                         
640                         *vstart = vregion.start;
641                 }
642                 
643                 VirtualArea *newva = new VirtualArea;
644                 newva->aspace = this;
645                 newva->region() = vregion;
646
647                 newva->flags.Valid = 1;
648                 newva->flags.User = map_type != map_kernel;
649                 newva->flags.Readable = mflags.access_IDLNS_Read;
650                 newva->flags.Writeable = mflags.access_IDLNS_Write;
651                 newva->flags.Executable = mflags.access_IDLNS_Exec;
652                 newva->flags.FaultOnWrite = mflags.CopyOnWrite;
653                 newva->flags.Protected = map_type != map_user;
654                 newva->ma = cma;
655                 newva->offset = region.start - vregion.start;
656
657                 varea_tree.add(newva);
658                 newva->ma->map(newva);
659                 
660                 if (prev) {
661                         prev->list_node.add_front(&newva->list_node);
662                 } else {
663                         varea_list.add_front(&newva->list_node);
664                 }
665         }
666         
667         void AddrSpace::unmap(Region region, bool from_kernel)
668         {
669                 u64 orig_start = region.start;
670         
671                 while (region.start <= region.end) {
672                         Lock::DroppableAutoLock autolock(lock);
673                         VirtualArea *va;
674
675                         // If check_overlap returns false, then there are no vareas
676                         // in the specified region, so there's nothing to unmap.
677                         
678                         if (!check_overlap(region, va))
679                                 return;
680                         
681                         if (va->flags.Protected && !from_kernel) {
682                                 region.start = va->list_node.next->
683                                                listentry(VirtualArea, list_node)->region().start;
684
685                                 if (region.start <= orig_start)
686                                         break;
687                                 
688                                 continue;
689                         }
690                         
691                         u64 va_end = va->region().end;
692                         u64 next_start = 0;
693                         
694                         if (va_end > region.end) {
695                                 u64 va_start = va->region().start;
696                                 va->region().start = region.end + 1;
697                         
698                                 if (va_start < region.start) {
699                                         VirtualArea *newva = new VirtualArea;
700
701                                         newva->aspace = this;
702                                         newva->region().start = va_start;
703                                         newva->region().end = region.start - 1;
704         
705                                         newva->flags = va->flags;
706                                         newva->ma = va->ma;
707                                         newva->offset = va->offset;
708
709                                         varea_tree.add(newva);
710                                         newva->ma->map(newva);
711                                 }
712                                 
713                                 VirtualArea *nextva =
714                                         va->list_node.next->listentry(VirtualArea, list_node);
715
716                                 next_start = nextva->region().start;
717                         } else if (va->region().start < region.start) {
718                                 va->region().end = region.start - 1;
719                         } else {
720                                 varea_tree.del(va);
721                                 va->ma->unmap(va);
722                         }
723
724                         // This is done after the varea removal, so that new faults
725                         // don't map things in again.
726
727                         // OPT: Skip RMap-based unmapping if nothing maps this aspace.
728                         // OPT: Push this loop into the RMap code, allowing it to skip
729                         // empty portions of the tables (as the pagetable code currently
730                         // does).
731                         
732                         while (region.start <= va_end && region.start <= region.end) {
733                                 page_table->rmap_table.unmap(region.start);
734                                 region.start += Arch::page_size;
735
736                                 if (region.start <= orig_start)
737                                         break;
738                         }
739                         
740                         region.start = next_start;
741                         
742                         if (region.start <= orig_start)
743                                 break;
744                 }
745         }
746         
747         void AddrSpace::set_mapflags(Region region, MapFlags mflags)
748         {
749                 // FIXME: implement
750                 // Find varea, split if necessary, propagate change to stacked aspaces
751         }
752         
753         void AddrSpace::get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same)
754         {
755                 // FIXME: implement
756         }
757         
758         void AddrSpace::get_mapping(Region region, IMappable *ma, u64 *offset)
759         {
760                 // FIXME: implement
761         }
762         
763         void AddrSpace::get_page_size(u32 *page_size)
764         {
765                 *page_size = Arch::page_size;
766         }
767         
768         void AddrSpace::get_min_align(u32 *min_align)
769         {
770                 *min_align = Arch::page_mapping_min_align;
771         }
772         
773         void Mappable::map(VirtualArea *varea)
774         {
775                 mappings_lock.lock_irq();
776                 mappings.add_back(&varea->mappings_node);
777                 mappings_lock.unlock_irq();
778         }
779
780         void Mappable::unmap(VirtualArea *varea)
781         {
782                 mappings_lock.lock_irq();
783                 varea->mappings_node.del();
784                 mappings_lock.unlock_irq();
785         }
786         
787         void PageTable::kill_pte(ulong vaddr, u64 paddr, bool dirty,
788                                  bool valid, bool no_release)
789         {
790                 Page *oldpage = phys_to_page(paddr);
791                 
792                 if (!is_phys_page(oldpage))
793                         oldpage = NULL;
794
795                 if (is_process && valid) {
796                         Arch::invalidate_tlb_entry(vaddr);
797                                 
798                         if (oldpage && dirty &&
799                             !ll_test_and_set(&oldpage->flags, PageFlags::bits::Dirty))
800                         {
801                                 oldpage->retain();
802                                 // Queue page for writeback
803                         }
804                 }
805                 
806                 if (!no_release && oldpage)
807                         oldpage->release();
808         }
809         
810         // FIXME: Add a special PTE flag to indicate that PhysMem mappings
811         // don't mess with page refcounts.
812         
813         class PhysMem : public Mappable {
814         public:
815                 void get_size(u64 *size)
816                 {
817                         if (sizeof(long) == 8)
818                                 *size = 1ULL << (64 - Arch::page_shift);
819                         else
820                                 *size = 1ULL << (32 - Arch::page_shift);
821                 }
822         
823                 void pagein(u64 vaddr, PTEFlags reqflags)
824                 {
825                         // Doesn't need to do anything yet, though it may later
826                         // once high memory support is added.
827                 }
828                 
829                 void get_entry(u64 addr, u64 *phys, PTEFlags *flags)
830                 {
831                         *phys = addr;
832                         *flags = 0;
833                         flags->Valid = 1;
834                         flags->Readable = 1;
835                         flags->Writeable = 1;
836                         flags->Executable = 1;
837                         flags->User = 1;
838                 }
839         };
840         
841         PhysMem real_physmem;
842         IMappable physmem = real_physmem;
843 }
844
845 #include <servers/mem/addrspace/footer.cc>