]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/addrspace.cc
eb92a6a98590adf4aeb28392b304eaf5ade71915
[polintos/scott/priv.git] / kernel / mem / addrspace.cc
1 // mem/addrspace.cc -- System.Mem.AddrSpace
2 //
3 // OPT: Special AddrSpaces that only translate/export a linear block of
4 // another AddrSpace, and don't have individual entries for every page.
5 //
6 // OPT: Special VAreas that use their own translation mechanism instead
7 // of varea->offset, so that filesystem block tables (and similar things)
8 // don't need to have a VArea per block.
9 //
10 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
11 // 
12 // This software is provided 'as-is', without any express or implied warranty.
13 // In no event will the authors or contributors be held liable for any damages
14 // arising from the use of this software.
15 // 
16 // Permission is hereby granted to everyone, free of charge, to use, copy,
17 // modify, prepare derivative works of, publish, distribute, perform,
18 // sublicense, and/or sell copies of the Software, provided that the above
19 // copyright notice and disclaimer of warranty be included in all copies or
20 // substantial portions of this software.
21
22 #include <kern/mem.h>
23 #include <kern/paging.h>
24 #include <kern/generic-pagetable.h>
25 #include <kern/pagetable.h>
26 #include <kern/pagealloc.h>
27 #include <kern/generic-pte.h>
28 #include <kern/compiler.h>
29 #include <kern/process.h>
30 #include <kern/thread.h>
31
32 extern int roshared_start, roshared_page_end;
33 extern int rwshared_start, rwshared_page_end;
34
35 namespace Mem {
36         extern IMappable physmem, anonmem;
37
38         class AddrSpaceFactory {
39         public:
40                 #include <servers/mem/addrspace/Mem/AddrSpaceFactory.h>
41                 
42                 AddrSpaceFactory()
43                 {
44                         init_iface();
45                 }
46                 
47                 void create(Object *obj)
48                 {
49                         *obj = static_cast<IAddrSpace>(*(new AddrSpace));
50                 }
51         };
52
53         class ProcAddrSpaceFactory {
54         public:
55                 #include <servers/mem/addrspace/Mem/ProcAddrSpaceFactory.h>
56                 
57                 ProcAddrSpaceFactory()
58                 {
59                         init_iface();
60                 }
61                 
62                 void create(Object *obj)
63                 {
64                         AddrSpace *as = new ProcAddrSpace;
65                         Region region;
66                         MapFlags mf = 0;
67                         u64 vstart;
68
69                         region.start = kvirt_to_phys(&roshared_start);
70                         region.end = kvirt_to_phys(&roshared_page_end);
71                         vstart = Arch::roshared_map;
72                         mf.Fixed = 1;
73                         mf.access_IDLNS_Read = 1;
74                         mf.access_IDLNS_Exec = 1;
75                         
76                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
77                         
78                         region.start = kvirt_to_phys(&rwshared_start);
79                         region.end = kvirt_to_phys(&rwshared_page_end);
80                         vstart = Arch::rwshared_map;
81                         mf.access_IDLNS_Exec = 0;
82                         mf.access_IDLNS_Write = 1;
83                         mf.CopyOnWrite = 1;
84                         
85                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
86                         
87                         // Leave the stack no-exec by default.
88                         region.start = vstart = Arch::stack_bottom;
89                         region.end = Arch::stack_top;
90                         mf.CopyOnWrite = 0;
91                         printf("vstart %llx\n", vstart);
92                         as->map(anonmem, region, &vstart, mf);
93                         
94                         *obj = static_cast<IAddrSpace>(*as);
95                 }
96         };
97         
98         ProcAddrSpaceFactory real_proc_addrspace_factory;
99         Factory proc_addr_space_factory = real_proc_addrspace_factory;
100
101         AddrSpaceFactory real_addrspace_factory;
102         Factory addr_space_factory = real_addrspace_factory;
103
104         AddrSpace::AddrSpace(PageTable *ptbl) : mappable(this)
105         {
106                 init_iface();
107                 is_process = false;
108                 page_table = ptbl;
109                 
110                 if (!ptbl)
111                         page_table = new PageTableImpl<GenPTE>(false);
112                 
113                 cached_free_region = Arch::user_start + Arch::page_size;
114         }
115         
116         ProcAddrSpace::ProcAddrSpace() :
117         AddrSpace(new PageTableImpl<Arch::PTE>(true))
118         {
119                 is_process = true;
120         }
121         
122         // This should only be used once during bootup to initialize the
123         // kernel's address space with a static initial page table.
124         
125         ProcAddrSpace::ProcAddrSpace(void *ptbl_toplevel) :
126         AddrSpace(new PageTableImpl<Arch::PTE>(ptbl_toplevel))
127         {
128                 // FIXME: set cached_free_region to kernel virtual space
129                 is_process = true;
130         }
131
132         void AddrSpace::get_mappable(IMappable *ma)
133         {
134                 *ma = mappable;
135         }
136
137         void AddrSpace::clone(IAddrSpace *addrspace, uint8_t clone_is_real)
138         {
139                 // FIXME: implement
140                 *addrspace = NULL;
141         }
142         
143         bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
144         {
145                 if (lock.held_by_curthread())
146                         return false;
147
148                 assert(!(write && exec));
149                 PTEFlags reqflags;
150
151                 if (user)
152                         reqflags.User = 1;
153
154                 if (write)
155                         reqflags.Writeable = 1;
156                 else if (exec)
157                         reqflags.Executable = 1;
158                 else
159                         reqflags.Readable = 1;
160                 
161                 reqflags.Valid = 1;
162
163                 try {
164                         mappable.pagein(page_align(vaddr), reqflags);
165                 }
166
167                 catch (BadPageFault &bpf) {
168                         // FIXME: retain info about nature of bpf
169                         // to throw to user?
170                         return false;
171                 }
172
173                 return true;
174         }
175         
176         bool AddrSpace::check_overlap(Region region, VirtualArea *&va)
177         {
178                 if (region.end < region.start)
179                         return true;
180         
181                 va = varea_tree.find_nearest(region.start);
182                 
183                 if (!va)
184                         return false;
185                 
186                 // If region.start is in an existing region, that region will
187                 // be returned.
188
189                 if (region.end >= va->region().start &&
190                     region.start <= va->region().end)
191                         return true;
192                 
193                 // If it returns a region that's greater than region.start, and va
194                 // itself does not overlap, then prev does not overlap (or else
195                 // region.start would be in or before prev, and thus prev would
196                 // have been returned).
197                 
198                 // If it returns a region that's less than region.start, we still
199                 // need to check next, as region.end could be in (or beyond) that
200                 // region.
201                 
202                 if (va->list_node.next != &varea_list) {
203                         VirtualArea *next =
204                                 va->list_node.next->listentry(VirtualArea, list_node);
205                 
206                         if (region.end >= next->region().start &&
207                        region.start <= next->region().end)
208                    {
209                         va = next;
210                                 return true;
211                         }
212                 }
213
214                 VirtualArea *prev;
215         
216                 if (va->list_node.prev != &varea_list)
217                         prev = va->list_node.prev->listentry(VirtualArea, list_node);
218                 else
219                         prev = NULL;
220         
221                 if (region.start < va->region().start) {
222                         assert(!prev || prev->region().end < region.start);
223                         va = prev;
224                 }
225                 
226                 return false;
227         }
228
229         VirtualArea *AddrSpace::split_varea(Region region)
230         {
231                 VirtualArea *start, *mid, *end;
232
233                 // check_overlap is used rather than varea_tree.find,
234                 // so that the first overlapping region can be returned,
235                 // as most (if not all) callers will need this anyway.
236                 
237                 if (!check_overlap(region, start))
238                         return NULL;
239                 
240                 assert(start);
241                 assert(start->aspace == this);
242                 assert(start->region().end >= region.start);
243                 
244                 if (start->region().start < region.start) {
245                         // There is a varea that straddles region.start;
246                         // create a new varea "mid" for the overlapping part.
247                 
248                         mid = new VirtualArea;
249                         
250                         mid->aspace = this;
251                         mid->region().start = region.start;
252                         
253                         if (region.end > start->region().end)
254                                 mid->region().end = start->region().end;
255                         else
256                                 mid->region().end = region.end;
257
258                         mid->flags = start->flags;
259                         mid->ma = start->ma;
260                         mid->offset = start->offset;
261                         
262                         if (start->region().end > region.end) {
263                                 // The varea also straddles region.end; create a new
264                                 // varea "end" for the other side of the region.
265                         
266                                 end = new VirtualArea;
267
268                                 end->aspace = this;
269                                 end->region().start = region.end + 1;
270                                 end->region().end = start->region().end;
271
272                                 end->flags = start->flags;
273                                 end->ma = start->ma;
274                                 end->offset = start->offset;
275                         } else {
276                                 end = NULL;
277                         }
278
279                         start->region().end = region.start - 1;
280
281                         varea_tree.add(mid);
282                         mid->ma->map(mid);
283                         
284                         if (end) {
285                                 // Splits have already been done at both ends of the region,
286                                 // so there's no need to look up the ending address.
287                                 
288                                 varea_tree.add(end);
289                                 mid->ma->map(end);
290                                 return mid;
291                         }
292                         
293                         start = mid;
294                 }
295                 
296                 if (start->region().end == region.end)
297                         return start;
298                 
299                 if (start->region().end > region.end)
300                         end = start;
301                 else {
302                         end = varea_tree.find(region.end);
303                         
304                         if (!end)
305                                 return start;
306
307                         assert(end->aspace == this);
308                         assert(end->region().start <= region.end);
309                         assert(end->region().end >= region.end);
310
311                         if (end->region().end == region.end)
312                                 return start;
313                 }
314                 
315                 assert(end->region().end > region.end);
316                 
317                 // There is a varea that straddles region.start;
318                 // create a new varea "mid" for the overlapping part.
319                 
320                 mid = new VirtualArea;
321                 
322                 mid->aspace = this;
323                 mid->region().start = region.start;
324                         
325                 mid->region().start = end->region().start;
326                 mid->region().end = region.end;
327
328                 mid->flags = start->flags;
329                 mid->ma = start->ma;
330                 mid->offset = start->offset;
331                         
332                 end->region().start = region.end + 1;
333
334                 varea_tree.add(mid);
335                 mid->ma->map(mid);
336                 
337                 return start;
338         }
339         
340         bool AddrSpace::get_free_region(ulong len, Region &region,
341                                         VirtualArea *&prev)
342         {
343                 assert(page_aligned(len));
344                 assert(cached_free_region);
345         
346                 region.start = cached_free_region;
347                 region.end = cached_free_region + len - 1;
348                 
349                 if (region.end <= Arch::user_end && !check_overlap(region, prev)) {
350                         cached_free_region = region.end + 1;
351                         return true;
352                 }
353                 
354                 for (Util::List *node = &prev->list_node; node != &varea_list;
355                      node = node->next)
356                 {
357                         VirtualArea *va = node->listentry(VirtualArea, list_node);
358                         ulong end = Arch::user_end;
359                         
360                         if (node->next != &varea_list) {
361                                 VirtualArea *next = node->next->listentry(VirtualArea, list_node);
362                                 end = next->region().start - 1;
363                         }
364                         
365                         assert(end > va->region().end);
366                         
367                         if (end - va->region().end >= len) {
368                                 region.start = va->region().end + 1;
369                                 region.end = region.start + len - 1;
370                                 
371                                 assert(page_aligned(region.start));
372                                 cached_free_region = region.end + 1;
373                                 return true;
374                         }
375                 }
376                 
377                 if (cached_free_region != Arch::user_start + Arch::page_size) {
378                         cached_free_region = Arch::user_start + Arch::page_size;
379                         return get_free_region(len, region, prev);
380                 }
381                 
382                 return false;
383         }
384
385         // The "mapped" paramater is used to indicate whether the top-level
386         // address space has had a mapping established.  If "mapped" is
387         // false, but an exception is not thrown, then this method must
388         // be called again to propagate the mapping along the aspace chain.
389         //
390         // FIXME: Between aspace locks, if aspace's mapping is revoked and
391         // ma->aspace's mapping changes, a pagein could leak through and cause
392         // a page load or a copy-on-write breaking.  This isn't a huge deal
393         // (it doesn't affect the correctness of the code or give aspace
394         // access to ma->aspace's new mapping), but it's unpleasant, and could
395         // have an adverse impact on determinism.  If you have a real-time
396         // application that can't tolerate the occasional spurious pagein or
397         // copy-on-write breaking, then use an address space that hasn't
398         // previously been exposed to recursive mappers.
399         
400         bool ASpaceMappable::rec_pagein(AddrSpace *aspace, u64 vaddr,
401                                         PTEFlags reqflags)
402         {
403                 bool mapped = true;
404                 
405                 // aspace->mappable.retain();
406                 
407                 while (true) {
408                         Lock::DroppableAutoLock autolock(aspace->lock);
409                         VirtualArea *va = aspace->varea_tree.find(vaddr);
410                         
411                         if (!va)
412                                 throw BadPageFault();
413
414                         if ((va->flags & reqflags) != reqflags)
415                                 throw BadPageFault();
416                         
417                         if (aspace->map(va, vaddr, reqflags))
418                                 break;
419
420                         mapped = false;
421                         Mappable *ma = va->ma;
422                         vaddr += va->offset;
423
424                         // ma->retain();
425                         autolock.unlock();
426                         // aspace->mappable.release();
427
428                         if (!ma->is_aspace) {
429                                 ma->pagein(vaddr, reqflags);
430                                 // ma->release();
431                                 break;
432                         }
433                         
434                         aspace = static_cast<ASpaceMappable *>(ma)->aspace;
435                 }
436                 
437                 return mapped;
438         }
439         
440         void ASpaceMappable::pagein(u64 vaddr, PTEFlags reqflags)
441         {
442                 while (!rec_pagein(aspace, vaddr, reqflags));
443         }
444         
445         void AddrSpace::break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys)
446         {
447                 assert(lock.held_by_curthread());
448                 assert(rmap_lock.held_by_curthread());
449
450                 assert(va->flags.FaultOnWrite);
451                 assert(va->aspace == this);
452
453                 Page *old_page = phys_to_page(phys);
454
455                 Region region = { vaddr, vaddr + Arch::page_size - 1 };
456         
457                 // If this is the only reference to the page left, then
458                 // nothing needs to be copied.  Just clear the COW condition.
459                 if (is_phys_page(old_page) && old_page->get_refcount() == 1) {
460                         PTEFlags mask, flags;
461                         mask.FaultOnWrite = 1;
462
463                         page_table->set_flags(region, flags, mask);
464                         return;
465                 }
466                 
467                 Page *new_page = PageAlloc::alloc(1);
468
469                 // FIXME -- highmem
470                 // OPT: It'd be better to do this without the rmap_lock held,
471                 // especially if rmap_lock is global rather than per-physpage.
472                 // I want to keep things simple for now and optimize later,
473                 // though.
474
475                 memcpy(page_to_kvirt(new_page), phys_to_kvirt(phys),
476                        Arch::page_size);
477
478                 page_table->rmap_table.break_copy_on_write(region.start, new_page);
479                 new_page->release();
480         }
481
482         void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
483         {
484                 aspace->page_table->get_mapping(vaddr, phys, flags);
485         }
486         
487         bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
488         {
489                 Lock::AutoLock autolock(rmap_lock);
490                 assert(va->aspace == this);
491                         
492                 u64 phys;
493                 PTEFlags flags;
494                 va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
495
496                 PTEFlags newflags = flags & va->flags;
497                 newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
498                 
499                 if (!newflags.Valid) {
500                         assert(va->flags.Valid);
501                         return false;
502                 }
503                 
504                 if ((newflags & reqflags) != reqflags)
505                         return false;
506
507                 u64 oldphys;
508                 PTEFlags oldflags;
509                 page_table->get_mapping(vaddr, &oldphys, &oldflags);
510                 
511                 if (oldflags.Valid &&
512                     !(reqflags.Writeable && oldflags.FaultOnWrite))
513                 {
514                         // If the existing mapping is valid, don't try to map it again.
515                         // The existing mapping was put there possibly by a race, but
516                         // more likely because a FaultOnWrite was handled upstream.
517                         //
518                         // FaultOnWrite handling is the only type of mapping change that
519                         // can be done directly; all others must change the varea and do
520                         // an rmap invalidation instead.  FaultOnWrite is special
521                         // because we don't want to split vareas for every page that
522                         // gets its copy-on-write broken.
523
524                         assert((oldflags & reqflags) == reqflags);
525                         assert(!va->flags.FaultOnWrite || oldphys == phys);
526                         return true;
527                 }
528
529                 if (reqflags.Writeable && oldflags.FaultOnWrite)
530                 {
531                         // The FaultOnWrite needs to be handled upstream.
532                         if (!va->flags.FaultOnWrite)
533                                 return false;
534                         
535                         va->aspace->break_copy_on_write(va, vaddr, phys);
536                 } else {
537                         assert(!oldflags.Valid);
538                         PageTable *usptbl = NULL;
539                         
540                         if (va->ma->is_aspace) {
541                                 ASpaceMappable *asma = static_cast<ASpaceMappable *>(va->ma);
542                                 usptbl = asma->aspace->page_table;
543                         }
544                         
545                         RMapTable::map(va, usptbl, vaddr, vaddr + va->offset);
546                         
547                         RegionWithOffset rwo;
548                         rwo.start = vaddr;
549                         rwo.end = vaddr + Arch::page_size - 1;
550                         rwo.offset = phys;
551                         
552                         page_table->map(rwo, newflags);
553                 }
554                 
555                 return true;
556         }
557
558         void ASpaceMappable::get_size(u64 *size)
559         {
560                 aspace->get_size(size);
561         }
562         
563         void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
564                             MapFlags mflags, int map_type)
565         {
566                 // FIXME: check alignment for VIPT caches
567                 // FIXME: Implement the "Replace" map flag
568                 
569                 if (mflags.Replace)
570                         throw_idl(InvalidArgument, 3,
571                                   countarray("Replace unimplemented"));
572                 
573                 Mappable *cma = Mappable::classptr(ma);
574                 if (!cma) {
575                         // The given IMappable does not refer to a Mappable
576                         // of this kernel.
577
578                         throw_idl(InvalidArgument, 0, nullarray);
579                 }
580                 
581                 bool fixed = mflags.Fixed;
582                 
583                 if (is_process)
584                         mflags.Fixed = 1;
585                 
586                 if (!page_aligned(region.start))
587                         throw_idl(InvalidArgument, 1, countarray("unaligned start"));
588                 
589                 if (!page_aligned(region.end + 1))
590                         throw_idl(InvalidArgument, 1, countarray("unaligned end"));
591                 
592                 Lock::AutoLock autolock(lock);
593                 Region vregion;
594                 VirtualArea *prev;
595                 
596                 if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
597                         vregion.start = *vstart;
598                         vregion.end = vregion.start + region.end - region.start;
599                 
600                         if (is_process) {
601                                 if (!valid_addr(vregion.start))
602                                         throw_idl(InvalidArgument, 2,
603                                                   countarray("invalid virtual start"));
604                                 
605                                 if (!valid_addr(vregion.end))
606                                         throw_idl(InvalidArgument, 2,
607                                                   countarray("invalid virtual end"));
608                         }
609                         
610                         if (check_overlap(vregion, prev))
611                                 *vstart = System::Mem::AddrSpace_ns::unspecified_start;
612                 }
613                 
614                 if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
615                         if (fixed)
616                                 throw_idl(ResourceBusy, 2, countarray("varea overlap"));
617                         
618                         if (!get_free_region(region.end - region.start + 1, vregion, prev))
619                                 throw_idl(OutOfSpace, countarray("out of vspace"));
620                         
621                         *vstart = vregion.start;
622                 }
623                 
624                 VirtualArea *newva = new VirtualArea;
625                 newva->aspace = this;
626                 newva->region() = vregion;
627
628                 newva->flags.Valid = 1;
629                 newva->flags.User = map_type != map_kernel;
630                 newva->flags.Readable = mflags.access_IDLNS_Read;
631                 newva->flags.Writeable = mflags.access_IDLNS_Write;
632                 newva->flags.Executable = mflags.access_IDLNS_Exec;
633                 newva->flags.FaultOnWrite = mflags.CopyOnWrite;
634                 newva->flags.Protected = map_type != map_user;
635                 newva->ma = cma;
636                 newva->offset = region.start - vregion.start;
637
638                 varea_tree.add(newva);
639                 newva->ma->map(newva);
640                 
641                 if (prev) {
642                         prev->list_node.add_front(&newva->list_node);
643                 } else {
644                         varea_list.add_front(&newva->list_node);
645                 }
646         }
647         
648         void AddrSpace::unmap(Region region, bool from_kernel)
649         {
650                 u64 orig_start = region.start;
651         
652                 while (region.start <= region.end) {
653                         Lock::DroppableAutoLock autolock(lock);
654                         VirtualArea *va;
655
656                         // If check_overlap returns false, then there are no vareas
657                         // in the specified region, so there's nothing to unmap.
658                         
659                         if (!check_overlap(region, va))
660                                 return;
661                         
662                         if (va->flags.Protected && !from_kernel) {
663                                 region.start = va->list_node.next->
664                                                listentry(VirtualArea, list_node)->region().start;
665
666                                 if (region.start <= orig_start)
667                                         break;
668                                 
669                                 continue;
670                         }
671                         
672                         u64 va_end = va->region().end;
673                         u64 next_start = 0;
674                         
675                         if (va_end > region.end) {
676                                 u64 va_start = va->region().start;
677                                 va->region().start = region.end + 1;
678                         
679                                 if (va_start < region.start) {
680                                         VirtualArea *newva = new VirtualArea;
681
682                                         newva->aspace = this;
683                                         newva->region().start = va_start;
684                                         newva->region().end = region.start - 1;
685         
686                                         newva->flags = va->flags;
687                                         newva->ma = va->ma;
688                                         newva->offset = va->offset;
689
690                                         varea_tree.add(newva);
691                                         newva->ma->map(newva);
692                                 }
693                                 
694                                 VirtualArea *nextva =
695                                         va->list_node.next->listentry(VirtualArea, list_node);
696
697                                 next_start = nextva->region().start;
698                         } else if (va->region().start < region.start) {
699                                 va->region().end = region.start - 1;
700                         } else {
701                                 varea_tree.del(va);
702                                 va->ma->unmap(va);
703                         }
704
705                         // This is done after the varea removal, so that new faults
706                         // don't map things in again.
707
708                         // OPT: Skip RMap-based unmapping if nothing maps this aspace.
709                         // OPT: Push this loop into the RMap code, allowing it to skip
710                         // empty portions of the tables (as the pagetable code currently
711                         // does).
712                         
713                         while (region.start <= va_end && region.start <= region.end) {
714                                 page_table->rmap_table.unmap(region.start);
715                                 region.start += Arch::page_size;
716
717                                 if (region.start <= orig_start)
718                                         break;
719                         }
720                         
721                         region.start = next_start;
722                         
723                         if (region.start <= orig_start)
724                                 break;
725                 }
726         }
727         
728         void AddrSpace::set_mapflags(Region region, MapFlags mflags)
729         {
730                 // FIXME: implement
731                 // Find varea, split if necessary, propagate change to stacked aspaces
732         }
733         
734         void AddrSpace::get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same)
735         {
736                 // FIXME: implement
737         }
738         
739         void AddrSpace::get_mapping(Region region, IMappable *ma, u64 *offset)
740         {
741                 // FIXME: implement
742         }
743         
744         void AddrSpace::get_page_size(u32 *page_size)
745         {
746                 *page_size = Arch::page_size;
747         }
748         
749         void AddrSpace::get_min_align(u32 *min_align)
750         {
751                 *min_align = Arch::page_mapping_min_align;
752         }
753
754         void AddrSpace::get_size(u64 *size)
755         {
756                 page_table->get_size(size);
757         }
758         
759         void Mappable::map(VirtualArea *varea)
760         {
761                 mappings_lock.lock_irq();
762                 mappings.add_back(&varea->mappings_node);
763                 mappings_lock.unlock_irq();
764         }
765
766         void Mappable::unmap(VirtualArea *varea)
767         {
768                 mappings_lock.lock_irq();
769                 varea->mappings_node.del();
770                 mappings_lock.unlock_irq();
771         }
772         
773         void PageTable::kill_pte(ulong vaddr, u64 paddr, bool dirty,
774                                  bool valid, bool no_release)
775         {
776                 Page *oldpage = phys_to_page(paddr);
777                 
778                 if (!is_phys_page(oldpage))
779                         oldpage = NULL;
780
781                 if (is_process && valid) {
782                         Arch::invalidate_tlb_entry(vaddr);
783                                 
784                         if (oldpage && dirty &&
785                             !ll_test_and_set(&oldpage->flags, PageFlags::bits::Dirty))
786                         {
787                                 oldpage->retain();
788                                 // Queue page for writeback
789                         }
790                 }
791                 
792                 if (!no_release && oldpage)
793                         oldpage->release();
794         }
795         
796         // FIXME: Add a special PTE flag to indicate that PhysMem mappings
797         // don't mess with page refcounts.
798         
799         class PhysMem : public Mappable {
800         public:
801                 void get_size(u64 *size)
802                 {
803                         if (sizeof(long) == 8)
804                                 *size = 1ULL << (64 - Arch::page_shift);
805                         else
806                                 *size = 1ULL << (32 - Arch::page_shift);
807                 }
808         
809                 void pagein(u64 vaddr, PTEFlags reqflags)
810                 {
811                 }
812                 
813                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
814                 {
815                         *phys = addr;
816                         *flags = 0;
817                         flags->Valid = 1;
818                         flags->Readable = 1;
819                         flags->Writeable = 1;
820                         flags->Executable = 1;
821                         flags->User = 1;
822                 }
823         };
824         
825         PhysMem real_physmem;
826         IMappable physmem = real_physmem;
827
828         class AnonMem : public Mappable {
829         public:
830                 void get_size(u64 *size)
831                 {
832                         if (sizeof(long) == 8)
833                                 *size = 1ULL << (64 - Arch::page_shift);
834                         else
835                                 *size = 1ULL << (32 - Arch::page_shift);
836                 }
837         
838                 void pagein(u64 vaddr, PTEFlags reqflags)
839                 {
840                 }
841                 
842                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
843                 {
844                         Page *page = PageAlloc::alloc(1);
845                         
846                         // OPT: Only zero if it was asked for.
847                         // OPT: Eventually, have separate pagelists for zeroed and
848                         // unzeroed memory, and a low-priority background thread
849                         // that zeroes pages and moves them to the zeroed list.
850                         bzero(page_to_kvirt(page), Arch::page_size);
851                         
852                         *phys = page_to_phys(page);
853                         *flags = 0;
854                         flags->Valid = 1;
855                         flags->Readable = 1;
856                         flags->Writeable = 1;
857                         flags->Executable = 1;
858                         flags->User = 1;
859                 }
860         };
861         
862         AnonMem real_anonmem;
863         IMappable anonmem = real_anonmem;
864 }
865
866 #include <servers/mem/addrspace/footer.cc>