]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/addrspace.cc
Switch to a simple X11-style license.
[polintos/scott/priv.git] / kernel / mem / addrspace.cc
1 // mem/addrspace.cc -- System.Mem.AddrSpace
2 //
3 // OPT: Special AddrSpaces that only translate/export a linear block of
4 // another AddrSpace, and don't have individual entries for every page.
5 //
6 // OPT: Special VAreas that use their own translation mechanism instead
7 // of varea->offset, so that filesystem block tables (and similar things)
8 // don't need to have a VArea per block.
9 //
10 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
11 // 
12 // Permission is hereby granted, free of charge, to any person obtaining a copy of
13 // this software and associated documentation files (the "Software"), to deal with
14 // the Software without restriction, including without limitation the rights to
15 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
16 // of the Software, and to permit persons to whom the Software is furnished to do
17 // so, subject to the following condition:
18 // 
19 // The above copyright notice and this permission notice shall be
20 // included in all copies or substantial portions of the Software.
21 // 
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
24 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
25 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
28 // SOFTWARE.
29
30 #include <kern/mem.h>
31 #include <kern/paging.h>
32 #include <kern/pagetable.h>
33 #include <kern/pagealloc.h>
34 #include <kern/generic-pte.h>
35 #include <kern/compiler.h>
36
37 extern int roshared_start, roshared_page_end;
38 extern int rwshared_start, rwshared_page_end;
39
40 namespace Mem {
41         extern IMappable physmem, anonmem;
42
43         class AddrSpaceFactory {
44         public:
45                 #include <servers/mem/addrspace/Mem/AddrSpaceFactory.h>
46                 
47                 AddrSpaceFactory()
48                 {
49                         init_iface();
50                 }
51                 
52                 void create(Object *obj)
53                 {
54                         *obj = static_cast<IAddrSpace>(*(new AddrSpace(false)));
55                 }
56         };
57
58         class ProcAddrSpaceFactory {
59         public:
60                 #include <servers/mem/addrspace/Mem/ProcAddrSpaceFactory.h>
61                 
62                 ProcAddrSpaceFactory()
63                 {
64                         init_iface();
65                 }
66                 
67                 void create(Object *obj)
68                 {
69                         AddrSpace *as = new AddrSpace(true);
70                         Region region;
71                         MapFlags mf = 0;
72                         u64 vstart;
73
74                         region.start = kvirt_to_phys(&roshared_start);
75                         region.end = kvirt_to_phys(&roshared_page_end);
76                         vstart = Arch::roshared_map;
77                         mf.Fixed = 1;
78                         mf.access_IDLNS_Read = 1;
79                         mf.access_IDLNS_Exec = 1;
80                         
81                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
82                         
83                         region.start = kvirt_to_phys(&rwshared_start);
84                         region.end = kvirt_to_phys(&rwshared_page_end);
85                         vstart = Arch::rwshared_map;
86                         mf.access_IDLNS_Exec = 0;
87                         mf.access_IDLNS_Write = 1;
88                         mf.CopyOnWrite = 1;
89                         
90                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
91                         
92                         // Leave the stack no-exec by default.
93                         region.start = vstart = Arch::stack_bottom;
94                         region.end = Arch::stack_top;
95                         mf.CopyOnWrite = 0;
96                         printf("vstart %llx\n", vstart);
97                         as->map(anonmem, region, &vstart, mf);
98                         
99                         *obj = static_cast<IAddrSpace>(*(as));
100                 }
101         };
102         
103         ProcAddrSpaceFactory real_proc_addrspace_factory;
104         Factory proc_addr_space_factory = real_proc_addrspace_factory;
105
106         AddrSpaceFactory real_addrspace_factory;
107         Factory addr_space_factory = real_addrspace_factory;
108
109         AddrSpace::AddrSpace(bool process) : mappable(this)
110         {
111                 init_iface();
112                 is_process = process;
113                 
114                 // OPT: Allow optional use of the native PTE for stacked aspaces,
115                 // either because the native PTE is 64-bit, or because it's an
116                 // embedded system which does not need 64-bit storage.
117                 
118                 if (process)
119                         page_table = new PageTableImpl<Arch::PTE>(true);
120                 else
121                         page_table = new PageTableImpl<GenPTE>(false);
122                 
123                 cached_free_region = Arch::user_start + Arch::page_size;
124         }
125         
126         // This should only be used once during bootup to initialize the
127         // kernel's address space with a static initial page table.
128         
129         AddrSpace::AddrSpace(void *ptbl_toplevel) : mappable(this)
130         {
131                 init_iface();
132                 is_process = true;
133                 page_table = new PageTableImpl<Arch::PTE>(ptbl_toplevel);
134                 
135                 // FIXME: should be kernel virtual space
136                 cached_free_region = Arch::user_start + Arch::page_size;
137         }
138
139         void AddrSpace::get_mappable(IMappable *ma)
140         {
141                 *ma = mappable;
142         }
143
144         void AddrSpace::clone(IAddrSpace *addrspace, uint8_t clone_is_real)
145         {
146                 // FIXME: implement
147                 *addrspace = NULL;
148         }
149         
150         bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
151         {
152                 if (lock.held_by_curthread())
153                         return false;
154
155                 assert(!(write && exec));
156                 PTEFlags reqflags;
157
158                 if (user)
159                         reqflags.User = 1;
160
161                 if (write)
162                         reqflags.Writeable = 1;
163                 else if (exec)
164                         reqflags.Executable = 1;
165                 else
166                         reqflags.Readable = 1;
167                 
168                 reqflags.Valid = 1;
169
170                 try {
171                         mappable.pagein(page_align(vaddr), reqflags);
172                 }
173
174                 catch (BadPageFault &bpf) {
175                         // FIXME: retain info about nature of bpf
176                         // to throw to user?
177                         return false;
178                 }
179
180                 return true;
181         }
182         
183         bool AddrSpace::check_overlap(Region region, VirtualArea *&va)
184         {
185                 if (region.end < region.start)
186                         return true;
187         
188                 va = varea_tree.find_nearest(region.start);
189                 
190                 if (!va)
191                         return false;
192                 
193                 // If region.start is in an existing region, that region will
194                 // be returned.
195
196                 if (region.end >= va->region().start &&
197                     region.start <= va->region().end)
198                         return true;
199                 
200                 // If it returns a region that's greater than region.start, and va
201                 // itself does not overlap, then prev does not overlap (or else
202                 // region.start would be in or before prev, and thus prev would
203                 // have been returned).
204                 
205                 // If it returns a region that's less than region.start, we still
206                 // need to check next, as region.end could be in (or beyond) that
207                 // region.
208                 
209                 if (va->list_node.next != &varea_list) {
210                         VirtualArea *next =
211                                 va->list_node.next->listentry(VirtualArea, list_node);
212                 
213                         if (region.end >= next->region().start &&
214                        region.start <= next->region().end)
215                    {
216                         va = next;
217                                 return true;
218                         }
219                 }
220
221                 VirtualArea *prev;
222         
223                 if (va->list_node.prev != &varea_list)
224                         prev = va->list_node.prev->listentry(VirtualArea, list_node);
225                 else
226                         prev = NULL;
227         
228                 if (region.start < va->region().start) {
229                         assert(!prev || prev->region().end < region.start);
230                         va = prev;
231                 }
232                 
233                 return false;
234         }
235
236         VirtualArea *AddrSpace::split_varea(Region region)
237         {
238                 VirtualArea *start, *mid, *end;
239
240                 // check_overlap is used rather than varea_tree.find,
241                 // so that the first overlapping region can be returned,
242                 // as most (if not all) callers will need this anyway.
243                 
244                 if (!check_overlap(region, start))
245                         return NULL;
246                 
247                 assert(start);
248                 assert(start->aspace == this);
249                 assert(start->region().end >= region.start);
250                 
251                 if (start->region().start < region.start) {
252                         // There is a varea that straddles region.start;
253                         // create a new varea "mid" for the overlapping part.
254                 
255                         mid = new VirtualArea;
256                         
257                         mid->aspace = this;
258                         mid->region().start = region.start;
259                         
260                         if (region.end > start->region().end)
261                                 mid->region().end = start->region().end;
262                         else
263                                 mid->region().end = region.end;
264
265                         mid->flags = start->flags;
266                         mid->ma = start->ma;
267                         mid->offset = start->offset;
268                         
269                         if (start->region().end > region.end) {
270                                 // The varea also straddles region.end; create a new
271                                 // varea "end" for the other side of the region.
272                         
273                                 end = new VirtualArea;
274
275                                 end->aspace = this;
276                                 end->region().start = region.end + 1;
277                                 end->region().end = start->region().end;
278
279                                 end->flags = start->flags;
280                                 end->ma = start->ma;
281                                 end->offset = start->offset;
282                         } else {
283                                 end = NULL;
284                         }
285
286                         start->region().end = region.start - 1;
287
288                         varea_tree.add(mid);
289                         mid->ma->map(mid);
290                         
291                         if (end) {
292                                 // Splits have already been done at both ends of the region,
293                                 // so there's no need to look up the ending address.
294                                 
295                                 varea_tree.add(end);
296                                 mid->ma->map(end);
297                                 return mid;
298                         }
299                         
300                         start = mid;
301                 }
302                 
303                 if (start->region().end == region.end)
304                         return start;
305                 
306                 if (start->region().end > region.end)
307                         end = start;
308                 else {
309                         end = varea_tree.find(region.end);
310                         
311                         if (!end)
312                                 return start;
313
314                         assert(end->aspace == this);
315                         assert(end->region().start <= region.end);
316                         assert(end->region().end >= region.end);
317
318                         if (end->region().end == region.end)
319                                 return start;
320                 }
321                 
322                 assert(end->region().end > region.end);
323                 
324                 // There is a varea that straddles region.start;
325                 // create a new varea "mid" for the overlapping part.
326                 
327                 mid = new VirtualArea;
328                 
329                 mid->aspace = this;
330                 mid->region().start = region.start;
331                         
332                 mid->region().start = end->region().start;
333                 mid->region().end = region.end;
334
335                 mid->flags = start->flags;
336                 mid->ma = start->ma;
337                 mid->offset = start->offset;
338                         
339                 end->region().start = region.end + 1;
340
341                 varea_tree.add(mid);
342                 mid->ma->map(mid);
343                 
344                 return start;
345         }
346         
347         bool AddrSpace::get_free_region(ulong len, Region &region,
348                                         VirtualArea *&prev)
349         {
350                 assert(page_aligned(len));
351                 assert(cached_free_region);
352         
353                 region.start = cached_free_region;
354                 region.end = cached_free_region + len - 1;
355                 
356                 if (region.end <= Arch::user_end && !check_overlap(region, prev)) {
357                         cached_free_region = region.end + 1;
358                         return true;
359                 }
360                 
361                 for (Util::List *node = &prev->list_node; node != &varea_list;
362                      node = node->next)
363                 {
364                         VirtualArea *va = node->listentry(VirtualArea, list_node);
365                         ulong end = Arch::user_end;
366                         
367                         if (node->next != &varea_list) {
368                                 VirtualArea *next = node->next->listentry(VirtualArea, list_node);
369                                 end = next->region().start - 1;
370                         }
371                         
372                         assert(end > va->region().end);
373                         
374                         if (end - va->region().end >= len) {
375                                 region.start = va->region().end + 1;
376                                 region.end = region.start + len - 1;
377                                 
378                                 assert(page_aligned(region.start));
379                                 cached_free_region = region.end + 1;
380                                 return true;
381                         }
382                 }
383                 
384                 if (cached_free_region != Arch::user_start + Arch::page_size) {
385                         cached_free_region = Arch::user_start + Arch::page_size;
386                         return get_free_region(len, region, prev);
387                 }
388                 
389                 return false;
390         }
391
392         // The "mapped" paramater is used to indicate whether the top-level
393         // address space has had a mapping established.  If "mapped" is
394         // false, but an exception is not thrown, then this method must
395         // be called again to propagate the mapping along the aspace chain.
396         //
397         // FIXME: Between aspace locks, if aspace's mapping is revoked and
398         // ma->aspace's mapping changes, a pagein could leak through and cause
399         // a page load or a copy-on-write breaking.  This isn't a huge deal
400         // (it doesn't affect the correctness of the code or give aspace
401         // access to ma->aspace's new mapping), but it's unpleasant, and could
402         // have an adverse impact on determinism.  If you have a real-time
403         // application that can't tolerate the occasional spurious pagein or
404         // copy-on-write breaking, then use an address space that hasn't
405         // previously been exposed to recursive mappers.
406         
407         bool ASpaceMappable::rec_pagein(AddrSpace *aspace, u64 vaddr,
408                                         PTEFlags reqflags)
409         {
410                 bool mapped = true;
411                 
412                 // aspace->mappable.retain();
413                 
414                 while (true) {
415                         Lock::DroppableAutoLock autolock(aspace->lock);
416                         VirtualArea *va = aspace->varea_tree.find(vaddr);
417                         
418                         if (!va)
419                                 throw BadPageFault();
420
421                         if ((va->flags & reqflags) != reqflags)
422                                 throw BadPageFault();
423                         
424                         if (aspace->map(va, vaddr, reqflags))
425                                 break;
426
427                         mapped = false;
428                         Mappable *ma = va->ma;
429                         vaddr += va->offset;
430
431                         // ma->retain();
432                         autolock.unlock();
433                         // aspace->mappable.release();
434
435                         if (!ma->is_aspace) {
436                                 ma->pagein(vaddr, reqflags);
437                                 // ma->release();
438                                 break;
439                         }
440                         
441                         aspace = static_cast<ASpaceMappable *>(ma)->aspace;
442                 }
443                 
444                 return mapped;
445         }
446         
447         void ASpaceMappable::pagein(u64 vaddr, PTEFlags reqflags)
448         {
449                 while (!rec_pagein(aspace, vaddr, reqflags));
450         }
451         
452         void AddrSpace::break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys)
453         {
454                 assert(lock.held_by_curthread());
455                 assert(rmap_lock.held_by_curthread());
456
457                 assert(va->flags.FaultOnWrite);
458                 assert(va->aspace == this);
459
460                 Page *old_page = phys_to_page(phys);
461
462                 Region region = { vaddr, vaddr + Arch::page_size - 1 };
463         
464                 // If this is the only reference to the page left, then
465                 // nothing needs to be copied.  Just clear the COW condition.
466                 if (is_phys_page(old_page) && old_page->get_refcount() == 1) {
467                         PTEFlags mask, flags;
468                         mask.FaultOnWrite = 1;
469
470                         page_table->set_flags(region, flags, mask);
471                         return;
472                 }
473                 
474                 Page *new_page = PageAlloc::alloc(1);
475
476                 // FIXME -- highmem
477                 // OPT: It'd be better to do this without the rmap_lock held,
478                 // especially if rmap_lock is global rather than per-physpage.
479                 // I want to keep things simple for now and optimize later,
480                 // though.
481
482                 memcpy(page_to_kvirt(new_page), phys_to_kvirt(phys),
483                        Arch::page_size);
484
485                 page_table->rmap_table.break_copy_on_write(region.start, new_page);
486                 new_page->release();
487         }
488
489         void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
490         {
491                 aspace->page_table->get_mapping(vaddr, phys, flags);
492         }
493         
494         bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
495         {
496                 Lock::AutoLock autolock(rmap_lock);
497                 assert(va->aspace == this);
498                         
499                 u64 phys;
500                 PTEFlags flags;
501                 va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
502
503                 PTEFlags newflags = flags & va->flags;
504                 newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
505                 
506                 if (!newflags.Valid) {
507                         assert(va->flags.Valid);
508                         return false;
509                 }
510                 
511                 if ((newflags & reqflags) != reqflags)
512                         return false;
513
514                 u64 oldphys;
515                 PTEFlags oldflags;
516                 page_table->get_mapping(vaddr, &oldphys, &oldflags);
517                 
518                 if (oldflags.Valid &&
519                     !(reqflags.Writeable && oldflags.FaultOnWrite))
520                 {
521                         // If the existing mapping is valid, don't try to map it again.
522                         // The existing mapping was put there possibly by a race, but
523                         // more likely because a FaultOnWrite was handled upstream.
524                         //
525                         // FaultOnWrite handling is the only type of mapping change that
526                         // can be done directly; all others must change the varea and do
527                         // an rmap invalidation instead.  FaultOnWrite is special
528                         // because we don't want to split vareas for every page that
529                         // gets its copy-on-write broken.
530
531                         assert((oldflags & reqflags) == reqflags);
532                         assert(!va->flags.FaultOnWrite || oldphys == phys);
533                         return true;
534                 }
535
536                 if (reqflags.Writeable && oldflags.FaultOnWrite)
537                 {
538                         // The FaultOnWrite needs to be handled upstream.
539                         if (!va->flags.FaultOnWrite)
540                                 return false;
541                         
542                         va->aspace->break_copy_on_write(va, vaddr, phys);
543                 } else {
544                         assert(!oldflags.Valid);
545                         PageTable *usptbl = NULL;
546                         
547                         if (va->ma->is_aspace) {
548                                 ASpaceMappable *asma = static_cast<ASpaceMappable *>(va->ma);
549                                 usptbl = asma->aspace->page_table;
550                         }
551                         
552                         RMapTable::map(va, usptbl, vaddr, vaddr + va->offset);
553                         
554                         RegionWithOffset rwo;
555                         rwo.start = vaddr;
556                         rwo.end = vaddr + Arch::page_size - 1;
557                         rwo.offset = phys;
558                         
559                         page_table->map(rwo, newflags);
560                 }
561                 
562                 return true;
563         }
564
565         void ASpaceMappable::get_size(u64 *size)
566         {
567                 aspace->get_size(size);
568         }
569         
570         void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
571                             MapFlags mflags, int map_type)
572         {
573                 // FIXME: check alignment for VIPT caches
574                 // FIXME: Implement the "Replace" map flag
575                 
576                 if (mflags.Replace)
577                         throw_idl(InvalidArgument, 3,
578                                   countarray("Replace unimplemented"));
579                 
580                 Mappable *cma = Mappable::classptr(ma);
581                 if (!cma) {
582                         // The given IMappable does not refer to a Mappable
583                         // of this kernel.
584
585                         throw_idl(InvalidArgument, 0, nullarray);
586                 }
587                 
588                 bool fixed = mflags.Fixed;
589                 
590                 if (is_process)
591                         mflags.Fixed = 1;
592                 
593                 if (!page_aligned(region.start))
594                         throw_idl(InvalidArgument, 1, countarray("unaligned start"));
595                 
596                 if (!page_aligned(region.end + 1))
597                         throw_idl(InvalidArgument, 1, countarray("unaligned end"));
598                 
599                 Lock::AutoLock autolock(lock);
600                 Region vregion;
601                 VirtualArea *prev;
602                 
603                 if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
604                         vregion.start = *vstart;
605                         vregion.end = vregion.start + region.end - region.start;
606                 
607                         if (is_process) {
608                                 if (!valid_addr(vregion.start))
609                                         throw_idl(InvalidArgument, 2,
610                                                   countarray("invalid virtual start"));
611                                 
612                                 if (!valid_addr(vregion.end))
613                                         throw_idl(InvalidArgument, 2,
614                                                   countarray("invalid virtual end"));
615                         }
616                         
617                         if (check_overlap(vregion, prev))
618                                 *vstart = System::Mem::AddrSpace_ns::unspecified_start;
619                 }
620                 
621                 if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
622                         if (fixed)
623                                 throw_idl(ResourceBusy, 2, countarray("varea overlap"));
624                         
625                         if (!get_free_region(region.end - region.start + 1, vregion, prev))
626                                 throw_idl(OutOfSpace, countarray("out of vspace"));
627                         
628                         *vstart = vregion.start;
629                 }
630                 
631                 VirtualArea *newva = new VirtualArea;
632                 newva->aspace = this;
633                 newva->region() = vregion;
634
635                 newva->flags.Valid = 1;
636                 newva->flags.User = map_type != map_kernel;
637                 newva->flags.Readable = mflags.access_IDLNS_Read;
638                 newva->flags.Writeable = mflags.access_IDLNS_Write;
639                 newva->flags.Executable = mflags.access_IDLNS_Exec;
640                 newva->flags.FaultOnWrite = mflags.CopyOnWrite;
641                 newva->flags.Protected = map_type != map_user;
642                 newva->ma = cma;
643                 newva->offset = region.start - vregion.start;
644
645                 varea_tree.add(newva);
646                 newva->ma->map(newva);
647                 
648                 if (prev) {
649                         prev->list_node.add_front(&newva->list_node);
650                 } else {
651                         varea_list.add_front(&newva->list_node);
652                 }
653         }
654         
655         void AddrSpace::unmap(Region region, bool from_kernel)
656         {
657                 u64 orig_start = region.start;
658         
659                 while (region.start <= region.end) {
660                         Lock::DroppableAutoLock autolock(lock);
661                         VirtualArea *va;
662
663                         // If check_overlap returns false, then there are no vareas
664                         // in the specified region, so there's nothing to unmap.
665                         
666                         if (!check_overlap(region, va))
667                                 return;
668                         
669                         if (va->flags.Protected && !from_kernel) {
670                                 region.start = va->list_node.next->
671                                                listentry(VirtualArea, list_node)->region().start;
672
673                                 if (region.start <= orig_start)
674                                         break;
675                                 
676                                 continue;
677                         }
678                         
679                         u64 va_end = va->region().end;
680                         u64 next_start = 0;
681                         
682                         if (va_end > region.end) {
683                                 u64 va_start = va->region().start;
684                                 va->region().start = region.end + 1;
685                         
686                                 if (va_start < region.start) {
687                                         VirtualArea *newva = new VirtualArea;
688
689                                         newva->aspace = this;
690                                         newva->region().start = va_start;
691                                         newva->region().end = region.start - 1;
692         
693                                         newva->flags = va->flags;
694                                         newva->ma = va->ma;
695                                         newva->offset = va->offset;
696
697                                         varea_tree.add(newva);
698                                         newva->ma->map(newva);
699                                 }
700                                 
701                                 VirtualArea *nextva =
702                                         va->list_node.next->listentry(VirtualArea, list_node);
703
704                                 next_start = nextva->region().start;
705                         } else if (va->region().start < region.start) {
706                                 va->region().end = region.start - 1;
707                         } else {
708                                 varea_tree.del(va);
709                                 va->ma->unmap(va);
710                         }
711
712                         // This is done after the varea removal, so that new faults
713                         // don't map things in again.
714
715                         // OPT: Skip RMap-based unmapping if nothing maps this aspace.
716                         // OPT: Push this loop into the RMap code, allowing it to skip
717                         // empty portions of the tables (as the pagetable code currently
718                         // does).
719                         
720                         while (region.start <= va_end && region.start <= region.end) {
721                                 page_table->rmap_table.unmap(region.start);
722                                 region.start += Arch::page_size;
723
724                                 if (region.start <= orig_start)
725                                         break;
726                         }
727                         
728                         region.start = next_start;
729                         
730                         if (region.start <= orig_start)
731                                 break;
732                 }
733         }
734         
735         void AddrSpace::set_mapflags(Region region, MapFlags mflags)
736         {
737                 // FIXME: implement
738                 // Find varea, split if necessary, propagate change to stacked aspaces
739         }
740         
741         void AddrSpace::get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same)
742         {
743                 // FIXME: implement
744         }
745         
746         void AddrSpace::get_mapping(Region region, IMappable *ma, u64 *offset)
747         {
748                 // FIXME: implement
749         }
750         
751         void AddrSpace::get_page_size(u32 *page_size)
752         {
753                 *page_size = Arch::page_size;
754         }
755         
756         void AddrSpace::get_min_align(u32 *min_align)
757         {
758                 *min_align = Arch::page_mapping_min_align;
759         }
760         
761         void Mappable::map(VirtualArea *varea)
762         {
763                 mappings_lock.lock_irq();
764                 mappings.add_back(&varea->mappings_node);
765                 mappings_lock.unlock_irq();
766         }
767
768         void Mappable::unmap(VirtualArea *varea)
769         {
770                 mappings_lock.lock_irq();
771                 varea->mappings_node.del();
772                 mappings_lock.unlock_irq();
773         }
774         
775         void PageTable::kill_pte(ulong vaddr, u64 paddr, bool dirty,
776                                  bool valid, bool no_release)
777         {
778                 Page *oldpage = phys_to_page(paddr);
779                 
780                 if (!is_phys_page(oldpage))
781                         oldpage = NULL;
782
783                 if (is_process && valid) {
784                         Arch::invalidate_tlb_entry(vaddr);
785                                 
786                         if (oldpage && dirty &&
787                             !ll_test_and_set(&oldpage->flags, PageFlags::bits::Dirty))
788                         {
789                                 oldpage->retain();
790                                 // Queue page for writeback
791                         }
792                 }
793                 
794                 if (!no_release && oldpage)
795                         oldpage->release();
796         }
797         
798         // FIXME: Add a special PTE flag to indicate that PhysMem mappings
799         // don't mess with page refcounts.
800         
801         class PhysMem : public Mappable {
802         public:
803                 void get_size(u64 *size)
804                 {
805                         if (sizeof(long) == 8)
806                                 *size = 1ULL << (64 - Arch::page_shift);
807                         else
808                                 *size = 1ULL << (32 - Arch::page_shift);
809                 }
810         
811                 void pagein(u64 vaddr, PTEFlags reqflags)
812                 {
813                         // Doesn't need to do anything yet, though it may later
814                         // once high memory support is added.
815                 }
816                 
817                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
818                 {
819                         *phys = addr;
820                         *flags = 0;
821                         flags->Valid = 1;
822                         flags->Readable = 1;
823                         flags->Writeable = 1;
824                         flags->Executable = 1;
825                         flags->User = 1;
826                 }
827         };
828         
829         PhysMem real_physmem;
830         IMappable physmem = real_physmem;
831
832         class AnonMem : public Mappable {
833         public:
834                 void get_size(u64 *size)
835                 {
836                         if (sizeof(long) == 8)
837                                 *size = 1ULL << (64 - Arch::page_shift);
838                         else
839                                 *size = 1ULL << (32 - Arch::page_shift);
840                 }
841         
842                 void pagein(u64 vaddr, PTEFlags reqflags)
843                 {
844                         // Doesn't need to do anything yet, though it may later
845                         // once high memory support is added.
846                 }
847                 
848                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
849                 {
850                         Page *page = PageAlloc::alloc(1);
851                         
852                         // OPT: Only zero if it was asked for.
853                         // OPT: Eventually, have separate pagelists for zeroed and
854                         // unzeroed memory, and a low-priority background thread
855                         // that zeroes pages and moves them to the zeroed list.
856                         bzero(page_to_kvirt(page), Arch::page_size);
857                         
858                         *phys = page_to_phys(page);
859                         *flags = 0;
860                         flags->Valid = 1;
861                         flags->Readable = 1;
862                         flags->Writeable = 1;
863                         flags->Executable = 1;
864                         flags->User = 1;
865                 }
866         };
867         
868         AnonMem real_anonmem;
869         IMappable anonmem = real_anonmem;
870 }
871
872 #include <servers/mem/addrspace/footer.cc>