]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/addrspace.cc
Use a radix tree for IDRMap.
[polintos/scott/priv.git] / kernel / mem / addrspace.cc
1 // mem/addrspace.cc -- System.Mem.AddrSpace
2 //
3 // OPT: Special AddrSpaces that only translate/export a linear block of
4 // another AddrSpace, and don't have individual entries for every page.
5 //
6 // OPT: Special VAreas that use their own translation mechanism instead
7 // of varea->offset, so that filesystem block tables (and similar things)
8 // don't need to have a VArea per block.
9 //
10 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
11 // 
12 // This software is provided 'as-is', without any express or implied warranty.
13 // In no event will the authors or contributors be held liable for any damages
14 // arising from the use of this software.
15 // 
16 // Permission is hereby granted to everyone, free of charge, to use, copy,
17 // modify, prepare derivative works of, publish, distribute, perform,
18 // sublicense, and/or sell copies of the Software, provided that the above
19 // copyright notice and disclaimer of warranty be included in all copies or
20 // substantial portions of this software.
21
22 #include <kern/mem.h>
23 #include <kern/paging.h>
24 #include <kern/generic-pagetable.h>
25 #include <kern/pagetable.h>
26 #include <kern/pagealloc.h>
27 #include <kern/generic-pte.h>
28 #include <kern/compiler.h>
29
30 extern int roshared_start, roshared_page_end;
31 extern int rwshared_start, rwshared_page_end;
32
33 namespace Mem {
34         extern IMappable physmem, anonmem;
35
36         class AddrSpaceFactory {
37         public:
38                 #include <servers/mem/addrspace/Mem/AddrSpaceFactory.h>
39                 
40                 AddrSpaceFactory()
41                 {
42                         init_iface();
43                 }
44                 
45                 void create(Object *obj)
46                 {
47                         *obj = static_cast<IAddrSpace>(*(new AddrSpace));
48                 }
49         };
50
51         class ProcAddrSpaceFactory {
52         public:
53                 #include <servers/mem/addrspace/Mem/ProcAddrSpaceFactory.h>
54                 
55                 ProcAddrSpaceFactory()
56                 {
57                         init_iface();
58                 }
59                 
60                 void create(Object *obj)
61                 {
62                         AddrSpace *as = new ProcAddrSpace;
63                         Region region;
64                         MapFlags mf = 0;
65                         u64 vstart;
66
67                         region.start = kvirt_to_phys(&roshared_start);
68                         region.end = kvirt_to_phys(&roshared_page_end);
69                         vstart = Arch::roshared_map;
70                         mf.Fixed = 1;
71                         mf.access_IDLNS_Read = 1;
72                         mf.access_IDLNS_Exec = 1;
73                         
74                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
75                         
76                         region.start = kvirt_to_phys(&rwshared_start);
77                         region.end = kvirt_to_phys(&rwshared_page_end);
78                         vstart = Arch::rwshared_map;
79                         mf.access_IDLNS_Exec = 0;
80                         mf.access_IDLNS_Write = 1;
81                         mf.CopyOnWrite = 1;
82                         
83                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
84                         
85                         // Leave the stack no-exec by default.
86                         region.start = vstart = Arch::stack_bottom;
87                         region.end = Arch::stack_top;
88                         mf.CopyOnWrite = 0;
89                         printf("vstart %llx\n", vstart);
90                         as->map(anonmem, region, &vstart, mf);
91                         
92                         *obj = static_cast<IAddrSpace>(*as);
93                 }
94         };
95         
96         ProcAddrSpaceFactory real_proc_addrspace_factory;
97         Factory proc_addr_space_factory = real_proc_addrspace_factory;
98
99         AddrSpaceFactory real_addrspace_factory;
100         Factory addr_space_factory = real_addrspace_factory;
101
102         AddrSpace::AddrSpace(PageTable *ptbl) : mappable(this)
103         {
104                 init_iface();
105                 is_process = false;
106                 page_table = ptbl;
107                 
108                 if (!ptbl)
109                         page_table = new PageTableImpl<GenPTE>(false);
110                 
111                 cached_free_region = Arch::user_start + Arch::page_size;
112         }
113         
114         ProcAddrSpace::ProcAddrSpace() :
115         AddrSpace(new PageTableImpl<Arch::PTE>(true))
116         {
117                 is_process = true;
118         }
119         
120         // This should only be used once during bootup to initialize the
121         // kernel's address space with a static initial page table.
122         
123         ProcAddrSpace::ProcAddrSpace(void *ptbl_toplevel) :
124         AddrSpace(new PageTableImpl<Arch::PTE>(ptbl_toplevel))
125         {
126                 // FIXME: set cached_free_region to kernel virtual space
127                 is_process = true;
128         }
129
130         void AddrSpace::get_mappable(IMappable *ma)
131         {
132                 *ma = mappable;
133         }
134
135         void AddrSpace::clone(IAddrSpace *addrspace, uint8_t clone_is_real)
136         {
137                 // FIXME: implement
138                 *addrspace = NULL;
139         }
140         
141         bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
142         {
143                 if (lock.held_by_curthread())
144                         return false;
145
146                 assert(!(write && exec));
147                 PTEFlags reqflags;
148
149                 if (user)
150                         reqflags.User = 1;
151
152                 if (write)
153                         reqflags.Writeable = 1;
154                 else if (exec)
155                         reqflags.Executable = 1;
156                 else
157                         reqflags.Readable = 1;
158                 
159                 reqflags.Valid = 1;
160
161                 try {
162                         mappable.pagein(page_align(vaddr), reqflags);
163                 }
164
165                 catch (BadPageFault &bpf) {
166                         // FIXME: retain info about nature of bpf
167                         // to throw to user?
168                         return false;
169                 }
170
171                 return true;
172         }
173         
174         bool AddrSpace::check_overlap(Region region, VirtualArea *&va)
175         {
176                 if (region.end < region.start)
177                         return true;
178         
179                 va = varea_tree.find_nearest(region.start);
180                 
181                 if (!va)
182                         return false;
183                 
184                 // If region.start is in an existing region, that region will
185                 // be returned.
186
187                 if (region.end >= va->region().start &&
188                     region.start <= va->region().end)
189                         return true;
190                 
191                 // If it returns a region that's greater than region.start, and va
192                 // itself does not overlap, then prev does not overlap (or else
193                 // region.start would be in or before prev, and thus prev would
194                 // have been returned).
195                 
196                 // If it returns a region that's less than region.start, we still
197                 // need to check next, as region.end could be in (or beyond) that
198                 // region.
199                 
200                 if (va->list_node.next != &varea_list) {
201                         VirtualArea *next =
202                                 va->list_node.next->listentry(VirtualArea, list_node);
203                 
204                         if (region.end >= next->region().start &&
205                        region.start <= next->region().end)
206                    {
207                         va = next;
208                                 return true;
209                         }
210                 }
211
212                 VirtualArea *prev;
213         
214                 if (va->list_node.prev != &varea_list)
215                         prev = va->list_node.prev->listentry(VirtualArea, list_node);
216                 else
217                         prev = NULL;
218         
219                 if (region.start < va->region().start) {
220                         assert(!prev || prev->region().end < region.start);
221                         va = prev;
222                 }
223                 
224                 return false;
225         }
226
227         VirtualArea *AddrSpace::split_varea(Region region)
228         {
229                 VirtualArea *start, *mid, *end;
230
231                 // check_overlap is used rather than varea_tree.find,
232                 // so that the first overlapping region can be returned,
233                 // as most (if not all) callers will need this anyway.
234                 
235                 if (!check_overlap(region, start))
236                         return NULL;
237                 
238                 assert(start);
239                 assert(start->aspace == this);
240                 assert(start->region().end >= region.start);
241                 
242                 if (start->region().start < region.start) {
243                         // There is a varea that straddles region.start;
244                         // create a new varea "mid" for the overlapping part.
245                 
246                         mid = new VirtualArea;
247                         
248                         mid->aspace = this;
249                         mid->region().start = region.start;
250                         
251                         if (region.end > start->region().end)
252                                 mid->region().end = start->region().end;
253                         else
254                                 mid->region().end = region.end;
255
256                         mid->flags = start->flags;
257                         mid->ma = start->ma;
258                         mid->offset = start->offset;
259                         
260                         if (start->region().end > region.end) {
261                                 // The varea also straddles region.end; create a new
262                                 // varea "end" for the other side of the region.
263                         
264                                 end = new VirtualArea;
265
266                                 end->aspace = this;
267                                 end->region().start = region.end + 1;
268                                 end->region().end = start->region().end;
269
270                                 end->flags = start->flags;
271                                 end->ma = start->ma;
272                                 end->offset = start->offset;
273                         } else {
274                                 end = NULL;
275                         }
276
277                         start->region().end = region.start - 1;
278
279                         varea_tree.add(mid);
280                         mid->ma->map(mid);
281                         
282                         if (end) {
283                                 // Splits have already been done at both ends of the region,
284                                 // so there's no need to look up the ending address.
285                                 
286                                 varea_tree.add(end);
287                                 mid->ma->map(end);
288                                 return mid;
289                         }
290                         
291                         start = mid;
292                 }
293                 
294                 if (start->region().end == region.end)
295                         return start;
296                 
297                 if (start->region().end > region.end)
298                         end = start;
299                 else {
300                         end = varea_tree.find(region.end);
301                         
302                         if (!end)
303                                 return start;
304
305                         assert(end->aspace == this);
306                         assert(end->region().start <= region.end);
307                         assert(end->region().end >= region.end);
308
309                         if (end->region().end == region.end)
310                                 return start;
311                 }
312                 
313                 assert(end->region().end > region.end);
314                 
315                 // There is a varea that straddles region.start;
316                 // create a new varea "mid" for the overlapping part.
317                 
318                 mid = new VirtualArea;
319                 
320                 mid->aspace = this;
321                 mid->region().start = region.start;
322                         
323                 mid->region().start = end->region().start;
324                 mid->region().end = region.end;
325
326                 mid->flags = start->flags;
327                 mid->ma = start->ma;
328                 mid->offset = start->offset;
329                         
330                 end->region().start = region.end + 1;
331
332                 varea_tree.add(mid);
333                 mid->ma->map(mid);
334                 
335                 return start;
336         }
337         
338         bool AddrSpace::get_free_region(ulong len, Region &region,
339                                         VirtualArea *&prev)
340         {
341                 assert(page_aligned(len));
342                 assert(cached_free_region);
343         
344                 region.start = cached_free_region;
345                 region.end = cached_free_region + len - 1;
346                 
347                 if (region.end <= Arch::user_end && !check_overlap(region, prev)) {
348                         cached_free_region = region.end + 1;
349                         return true;
350                 }
351                 
352                 for (Util::List *node = &prev->list_node; node != &varea_list;
353                      node = node->next)
354                 {
355                         VirtualArea *va = node->listentry(VirtualArea, list_node);
356                         ulong end = Arch::user_end;
357                         
358                         if (node->next != &varea_list) {
359                                 VirtualArea *next = node->next->listentry(VirtualArea, list_node);
360                                 end = next->region().start - 1;
361                         }
362                         
363                         assert(end > va->region().end);
364                         
365                         if (end - va->region().end >= len) {
366                                 region.start = va->region().end + 1;
367                                 region.end = region.start + len - 1;
368                                 
369                                 assert(page_aligned(region.start));
370                                 cached_free_region = region.end + 1;
371                                 return true;
372                         }
373                 }
374                 
375                 if (cached_free_region != Arch::user_start + Arch::page_size) {
376                         cached_free_region = Arch::user_start + Arch::page_size;
377                         return get_free_region(len, region, prev);
378                 }
379                 
380                 return false;
381         }
382
383         // The "mapped" paramater is used to indicate whether the top-level
384         // address space has had a mapping established.  If "mapped" is
385         // false, but an exception is not thrown, then this method must
386         // be called again to propagate the mapping along the aspace chain.
387         //
388         // FIXME: Between aspace locks, if aspace's mapping is revoked and
389         // ma->aspace's mapping changes, a pagein could leak through and cause
390         // a page load or a copy-on-write breaking.  This isn't a huge deal
391         // (it doesn't affect the correctness of the code or give aspace
392         // access to ma->aspace's new mapping), but it's unpleasant, and could
393         // have an adverse impact on determinism.  If you have a real-time
394         // application that can't tolerate the occasional spurious pagein or
395         // copy-on-write breaking, then use an address space that hasn't
396         // previously been exposed to recursive mappers.
397         
398         bool ASpaceMappable::rec_pagein(AddrSpace *aspace, u64 vaddr,
399                                         PTEFlags reqflags)
400         {
401                 bool mapped = true;
402                 
403                 // aspace->mappable.retain();
404                 
405                 while (true) {
406                         Lock::DroppableAutoLock autolock(aspace->lock);
407                         VirtualArea *va = aspace->varea_tree.find(vaddr);
408                         
409                         if (!va)
410                                 throw BadPageFault();
411
412                         if ((va->flags & reqflags) != reqflags)
413                                 throw BadPageFault();
414                         
415                         if (aspace->map(va, vaddr, reqflags))
416                                 break;
417
418                         mapped = false;
419                         Mappable *ma = va->ma;
420                         vaddr += va->offset;
421
422                         // ma->retain();
423                         autolock.unlock();
424                         // aspace->mappable.release();
425
426                         if (!ma->is_aspace) {
427                                 ma->pagein(vaddr, reqflags);
428                                 // ma->release();
429                                 break;
430                         }
431                         
432                         aspace = static_cast<ASpaceMappable *>(ma)->aspace;
433                 }
434                 
435                 return mapped;
436         }
437         
438         void ASpaceMappable::pagein(u64 vaddr, PTEFlags reqflags)
439         {
440                 while (!rec_pagein(aspace, vaddr, reqflags));
441         }
442         
443         void AddrSpace::break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys)
444         {
445                 assert(lock.held_by_curthread());
446                 assert(rmap_lock.held_by_curthread());
447
448                 assert(va->flags.FaultOnWrite);
449                 assert(va->aspace == this);
450
451                 Page *old_page = phys_to_page(phys);
452
453                 Region region = { vaddr, vaddr + Arch::page_size - 1 };
454         
455                 // If this is the only reference to the page left, then
456                 // nothing needs to be copied.  Just clear the COW condition.
457                 if (is_phys_page(old_page) && old_page->get_refcount() == 1) {
458                         PTEFlags mask, flags;
459                         mask.FaultOnWrite = 1;
460
461                         page_table->set_flags(region, flags, mask);
462                         return;
463                 }
464                 
465                 Page *new_page = PageAlloc::alloc(1);
466
467                 // FIXME -- highmem
468                 // OPT: It'd be better to do this without the rmap_lock held,
469                 // especially if rmap_lock is global rather than per-physpage.
470                 // I want to keep things simple for now and optimize later,
471                 // though.
472
473                 memcpy(page_to_kvirt(new_page), phys_to_kvirt(phys),
474                        Arch::page_size);
475
476                 page_table->rmap_table.break_copy_on_write(region.start, new_page);
477                 new_page->release();
478         }
479
480         void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
481         {
482                 aspace->page_table->get_mapping(vaddr, phys, flags);
483         }
484         
485         bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
486         {
487                 Lock::AutoLock autolock(rmap_lock);
488                 assert(va->aspace == this);
489                         
490                 u64 phys;
491                 PTEFlags flags;
492                 va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
493
494                 PTEFlags newflags = flags & va->flags;
495                 newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
496                 
497                 if (!newflags.Valid) {
498                         assert(va->flags.Valid);
499                         return false;
500                 }
501                 
502                 if ((newflags & reqflags) != reqflags)
503                         return false;
504
505                 u64 oldphys;
506                 PTEFlags oldflags;
507                 page_table->get_mapping(vaddr, &oldphys, &oldflags);
508                 
509                 if (oldflags.Valid &&
510                     !(reqflags.Writeable && oldflags.FaultOnWrite))
511                 {
512                         // If the existing mapping is valid, don't try to map it again.
513                         // The existing mapping was put there possibly by a race, but
514                         // more likely because a FaultOnWrite was handled upstream.
515                         //
516                         // FaultOnWrite handling is the only type of mapping change that
517                         // can be done directly; all others must change the varea and do
518                         // an rmap invalidation instead.  FaultOnWrite is special
519                         // because we don't want to split vareas for every page that
520                         // gets its copy-on-write broken.
521
522                         assert((oldflags & reqflags) == reqflags);
523                         assert(!va->flags.FaultOnWrite || oldphys == phys);
524                         return true;
525                 }
526
527                 if (reqflags.Writeable && oldflags.FaultOnWrite)
528                 {
529                         // The FaultOnWrite needs to be handled upstream.
530                         if (!va->flags.FaultOnWrite)
531                                 return false;
532                         
533                         va->aspace->break_copy_on_write(va, vaddr, phys);
534                 } else {
535                         assert(!oldflags.Valid);
536                         PageTable *usptbl = NULL;
537                         
538                         if (va->ma->is_aspace) {
539                                 ASpaceMappable *asma = static_cast<ASpaceMappable *>(va->ma);
540                                 usptbl = asma->aspace->page_table;
541                         }
542                         
543                         RMapTable::map(va, usptbl, vaddr, vaddr + va->offset);
544                         
545                         RegionWithOffset rwo;
546                         rwo.start = vaddr;
547                         rwo.end = vaddr + Arch::page_size - 1;
548                         rwo.offset = phys;
549                         
550                         page_table->map(rwo, newflags);
551                 }
552                 
553                 return true;
554         }
555
556         void ASpaceMappable::get_size(u64 *size)
557         {
558                 aspace->get_size(size);
559         }
560         
561         void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
562                             MapFlags mflags, int map_type)
563         {
564                 // FIXME: check alignment for VIPT caches
565                 // FIXME: Implement the "Replace" map flag
566                 
567                 if (mflags.Replace)
568                         throw_idl(InvalidArgument, 3,
569                                   countarray("Replace unimplemented"));
570                 
571                 Mappable *cma = Mappable::classptr(ma);
572                 if (!cma) {
573                         // The given IMappable does not refer to a Mappable
574                         // of this kernel.
575
576                         throw_idl(InvalidArgument, 0, nullarray);
577                 }
578                 
579                 bool fixed = mflags.Fixed;
580                 
581                 if (is_process)
582                         mflags.Fixed = 1;
583                 
584                 if (!page_aligned(region.start))
585                         throw_idl(InvalidArgument, 1, countarray("unaligned start"));
586                 
587                 if (!page_aligned(region.end + 1))
588                         throw_idl(InvalidArgument, 1, countarray("unaligned end"));
589                 
590                 Lock::AutoLock autolock(lock);
591                 Region vregion;
592                 VirtualArea *prev;
593                 
594                 if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
595                         vregion.start = *vstart;
596                         vregion.end = vregion.start + region.end - region.start;
597                 
598                         if (is_process) {
599                                 if (!valid_addr(vregion.start))
600                                         throw_idl(InvalidArgument, 2,
601                                                   countarray("invalid virtual start"));
602                                 
603                                 if (!valid_addr(vregion.end))
604                                         throw_idl(InvalidArgument, 2,
605                                                   countarray("invalid virtual end"));
606                         }
607                         
608                         if (check_overlap(vregion, prev))
609                                 *vstart = System::Mem::AddrSpace_ns::unspecified_start;
610                 }
611                 
612                 if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
613                         if (fixed)
614                                 throw_idl(ResourceBusy, 2, countarray("varea overlap"));
615                         
616                         if (!get_free_region(region.end - region.start + 1, vregion, prev))
617                                 throw_idl(OutOfSpace, countarray("out of vspace"));
618                         
619                         *vstart = vregion.start;
620                 }
621                 
622                 VirtualArea *newva = new VirtualArea;
623                 newva->aspace = this;
624                 newva->region() = vregion;
625
626                 newva->flags.Valid = 1;
627                 newva->flags.User = map_type != map_kernel;
628                 newva->flags.Readable = mflags.access_IDLNS_Read;
629                 newva->flags.Writeable = mflags.access_IDLNS_Write;
630                 newva->flags.Executable = mflags.access_IDLNS_Exec;
631                 newva->flags.FaultOnWrite = mflags.CopyOnWrite;
632                 newva->flags.Protected = map_type != map_user;
633                 newva->ma = cma;
634                 newva->offset = region.start - vregion.start;
635
636                 varea_tree.add(newva);
637                 newva->ma->map(newva);
638                 
639                 if (prev) {
640                         prev->list_node.add_front(&newva->list_node);
641                 } else {
642                         varea_list.add_front(&newva->list_node);
643                 }
644         }
645         
646         void AddrSpace::unmap(Region region, bool from_kernel)
647         {
648                 u64 orig_start = region.start;
649         
650                 while (region.start <= region.end) {
651                         Lock::DroppableAutoLock autolock(lock);
652                         VirtualArea *va;
653
654                         // If check_overlap returns false, then there are no vareas
655                         // in the specified region, so there's nothing to unmap.
656                         
657                         if (!check_overlap(region, va))
658                                 return;
659                         
660                         if (va->flags.Protected && !from_kernel) {
661                                 region.start = va->list_node.next->
662                                                listentry(VirtualArea, list_node)->region().start;
663
664                                 if (region.start <= orig_start)
665                                         break;
666                                 
667                                 continue;
668                         }
669                         
670                         u64 va_end = va->region().end;
671                         u64 next_start = 0;
672                         
673                         if (va_end > region.end) {
674                                 u64 va_start = va->region().start;
675                                 va->region().start = region.end + 1;
676                         
677                                 if (va_start < region.start) {
678                                         VirtualArea *newva = new VirtualArea;
679
680                                         newva->aspace = this;
681                                         newva->region().start = va_start;
682                                         newva->region().end = region.start - 1;
683         
684                                         newva->flags = va->flags;
685                                         newva->ma = va->ma;
686                                         newva->offset = va->offset;
687
688                                         varea_tree.add(newva);
689                                         newva->ma->map(newva);
690                                 }
691                                 
692                                 VirtualArea *nextva =
693                                         va->list_node.next->listentry(VirtualArea, list_node);
694
695                                 next_start = nextva->region().start;
696                         } else if (va->region().start < region.start) {
697                                 va->region().end = region.start - 1;
698                         } else {
699                                 varea_tree.del(va);
700                                 va->ma->unmap(va);
701                         }
702
703                         // This is done after the varea removal, so that new faults
704                         // don't map things in again.
705
706                         // OPT: Skip RMap-based unmapping if nothing maps this aspace.
707                         // OPT: Push this loop into the RMap code, allowing it to skip
708                         // empty portions of the tables (as the pagetable code currently
709                         // does).
710                         
711                         while (region.start <= va_end && region.start <= region.end) {
712                                 page_table->rmap_table.unmap(region.start);
713                                 region.start += Arch::page_size;
714
715                                 if (region.start <= orig_start)
716                                         break;
717                         }
718                         
719                         region.start = next_start;
720                         
721                         if (region.start <= orig_start)
722                                 break;
723                 }
724         }
725         
726         void AddrSpace::set_mapflags(Region region, MapFlags mflags)
727         {
728                 // FIXME: implement
729                 // Find varea, split if necessary, propagate change to stacked aspaces
730         }
731         
732         void AddrSpace::get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same)
733         {
734                 // FIXME: implement
735         }
736         
737         void AddrSpace::get_mapping(Region region, IMappable *ma, u64 *offset)
738         {
739                 // FIXME: implement
740         }
741         
742         void AddrSpace::get_page_size(u32 *page_size)
743         {
744                 *page_size = Arch::page_size;
745         }
746         
747         void AddrSpace::get_min_align(u32 *min_align)
748         {
749                 *min_align = Arch::page_mapping_min_align;
750         }
751
752         void AddrSpace::get_size(u64 *size)
753         {
754                 page_table->get_size(size);
755         }
756         
757         void Mappable::map(VirtualArea *varea)
758         {
759                 mappings_lock.lock_irq();
760                 mappings.add_back(&varea->mappings_node);
761                 mappings_lock.unlock_irq();
762         }
763
764         void Mappable::unmap(VirtualArea *varea)
765         {
766                 mappings_lock.lock_irq();
767                 varea->mappings_node.del();
768                 mappings_lock.unlock_irq();
769         }
770         
771         void PageTable::kill_pte(ulong vaddr, u64 paddr, bool dirty,
772                                  bool valid, bool no_release)
773         {
774                 Page *oldpage = phys_to_page(paddr);
775                 
776                 if (!is_phys_page(oldpage))
777                         oldpage = NULL;
778
779                 if (is_process && valid) {
780                         Arch::invalidate_tlb_entry(vaddr);
781                                 
782                         if (oldpage && dirty &&
783                             !ll_test_and_set(&oldpage->flags, PageFlags::bits::Dirty))
784                         {
785                                 oldpage->retain();
786                                 // Queue page for writeback
787                         }
788                 }
789                 
790                 if (!no_release && oldpage)
791                         oldpage->release();
792         }
793         
794         // FIXME: Add a special PTE flag to indicate that PhysMem mappings
795         // don't mess with page refcounts.
796         
797         class PhysMem : public Mappable {
798         public:
799                 void get_size(u64 *size)
800                 {
801                         if (sizeof(long) == 8)
802                                 *size = 1ULL << (64 - Arch::page_shift);
803                         else
804                                 *size = 1ULL << (32 - Arch::page_shift);
805                 }
806         
807                 void pagein(u64 vaddr, PTEFlags reqflags)
808                 {
809                 }
810                 
811                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
812                 {
813                         *phys = addr;
814                         *flags = 0;
815                         flags->Valid = 1;
816                         flags->Readable = 1;
817                         flags->Writeable = 1;
818                         flags->Executable = 1;
819                         flags->User = 1;
820                 }
821         };
822         
823         PhysMem real_physmem;
824         IMappable physmem = real_physmem;
825
826         class AnonMem : public Mappable {
827         public:
828                 void get_size(u64 *size)
829                 {
830                         if (sizeof(long) == 8)
831                                 *size = 1ULL << (64 - Arch::page_shift);
832                         else
833                                 *size = 1ULL << (32 - Arch::page_shift);
834                 }
835         
836                 void pagein(u64 vaddr, PTEFlags reqflags)
837                 {
838                 }
839                 
840                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
841                 {
842                         Page *page = PageAlloc::alloc(1);
843                         
844                         // OPT: Only zero if it was asked for.
845                         // OPT: Eventually, have separate pagelists for zeroed and
846                         // unzeroed memory, and a low-priority background thread
847                         // that zeroes pages and moves them to the zeroed list.
848                         bzero(page_to_kvirt(page), Arch::page_size);
849                         
850                         *phys = page_to_phys(page);
851                         *flags = 0;
852                         flags->Valid = 1;
853                         flags->Readable = 1;
854                         flags->Writeable = 1;
855                         flags->Executable = 1;
856                         flags->User = 1;
857                 }
858         };
859         
860         AnonMem real_anonmem;
861         IMappable anonmem = real_anonmem;
862 }
863
864 #include <servers/mem/addrspace/footer.cc>