]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/mem/addrspace.cc
6cff1f2d602635c1b8fbb145153baddb999d5539
[polintos/scott/priv.git] / kernel / mem / addrspace.cc
1 // mem/addrspace.cc -- System.Mem.AddrSpace
2 //
3 // OPT: Special AddrSpaces that only translate/export a linear block of
4 // another AddrSpace, and don't have individual entries for every page.
5 //
6 // OPT: Special VAreas that use their own translation mechanism instead
7 // of varea->offset, so that filesystem block tables (and similar things)
8 // don't need to have a VArea per block.
9 //
10 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
11 // 
12 // This software is provided 'as-is', without any express or implied warranty.
13 // In no event will the authors or contributors be held liable for any damages
14 // arising from the use of this software.
15 // 
16 // Permission is hereby granted to everyone, free of charge, to use, copy,
17 // modify, prepare derivative works of, publish, distribute, perform,
18 // sublicense, and/or sell copies of the Software, provided that the above
19 // copyright notice and disclaimer of warranty be included in all copies or
20 // substantial portions of this software.
21
22 #include <kern/mem.h>
23 #include <kern/paging.h>
24 #include <kern/pagetable.h>
25 #include <kern/pagealloc.h>
26 #include <kern/generic-pte.h>
27 #include <kern/compiler.h>
28
29 extern int roshared_start, roshared_page_end;
30 extern int rwshared_start, rwshared_page_end;
31
32 namespace Mem {
33         extern IMappable physmem, anonmem;
34
35         class AddrSpaceFactory {
36         public:
37                 #include <servers/mem/addrspace/Mem/AddrSpaceFactory.h>
38                 
39                 AddrSpaceFactory()
40                 {
41                         init_iface();
42                 }
43                 
44                 void create(Object *obj)
45                 {
46                         *obj = static_cast<IAddrSpace>(*(new AddrSpace(false)));
47                 }
48         };
49
50         class ProcAddrSpaceFactory {
51         public:
52                 #include <servers/mem/addrspace/Mem/ProcAddrSpaceFactory.h>
53                 
54                 ProcAddrSpaceFactory()
55                 {
56                         init_iface();
57                 }
58                 
59                 void create(Object *obj)
60                 {
61                         AddrSpace *as = new AddrSpace(true);
62                         Region region;
63                         MapFlags mf = 0;
64                         u64 vstart;
65
66                         region.start = kvirt_to_phys(&roshared_start);
67                         region.end = kvirt_to_phys(&roshared_page_end);
68                         vstart = Arch::roshared_map;
69                         mf.Fixed = 1;
70                         mf.access_IDLNS_Read = 1;
71                         mf.access_IDLNS_Exec = 1;
72                         
73                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
74                         
75                         region.start = kvirt_to_phys(&rwshared_start);
76                         region.end = kvirt_to_phys(&rwshared_page_end);
77                         vstart = Arch::rwshared_map;
78                         mf.access_IDLNS_Exec = 0;
79                         mf.access_IDLNS_Write = 1;
80                         mf.CopyOnWrite = 1;
81                         
82                         as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
83                         
84                         // Leave the stack no-exec by default.
85                         region.start = vstart = Arch::stack_bottom;
86                         region.end = Arch::stack_top;
87                         mf.CopyOnWrite = 0;
88                         printf("vstart %llx\n", vstart);
89                         as->map(anonmem, region, &vstart, mf);
90                         
91                         *obj = static_cast<IAddrSpace>(*(as));
92                 }
93         };
94         
95         ProcAddrSpaceFactory real_proc_addrspace_factory;
96         Factory proc_addr_space_factory = real_proc_addrspace_factory;
97
98         AddrSpaceFactory real_addrspace_factory;
99         Factory addr_space_factory = real_addrspace_factory;
100
101         AddrSpace::AddrSpace(bool process) : mappable(this)
102         {
103                 init_iface();
104                 is_process = process;
105                 
106                 // OPT: Allow optional use of the native PTE for stacked aspaces,
107                 // either because the native PTE is 64-bit, or because it's an
108                 // embedded system which does not need 64-bit storage.
109                 
110                 if (process)
111                         page_table = new PageTableImpl<Arch::PTE>(true);
112                 else
113                         page_table = new PageTableImpl<GenPTE>(false);
114                 
115                 cached_free_region = Arch::user_start + Arch::page_size;
116         }
117         
118         // This should only be used once during bootup to initialize the
119         // kernel's address space with a static initial page table.
120         
121         AddrSpace::AddrSpace(void *ptbl_toplevel) : mappable(this)
122         {
123                 init_iface();
124                 is_process = true;
125                 page_table = new PageTableImpl<Arch::PTE>(ptbl_toplevel);
126                 
127                 // FIXME: should be kernel virtual space
128                 cached_free_region = Arch::user_start + Arch::page_size;
129         }
130
131         void AddrSpace::get_mappable(IMappable *ma)
132         {
133                 *ma = mappable;
134         }
135
136         void AddrSpace::clone(IAddrSpace *addrspace, uint8_t clone_is_real)
137         {
138                 // FIXME: implement
139                 *addrspace = NULL;
140         }
141         
142         bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
143         {
144                 if (lock.held_by_curthread())
145                         return false;
146
147                 assert(!(write && exec));
148                 PTEFlags reqflags;
149
150                 if (user)
151                         reqflags.User = 1;
152
153                 if (write)
154                         reqflags.Writeable = 1;
155                 else if (exec)
156                         reqflags.Executable = 1;
157                 else
158                         reqflags.Readable = 1;
159                 
160                 reqflags.Valid = 1;
161
162                 try {
163                         mappable.pagein(page_align(vaddr), reqflags);
164                 }
165
166                 catch (BadPageFault &bpf) {
167                         // FIXME: retain info about nature of bpf
168                         // to throw to user?
169                         return false;
170                 }
171
172                 return true;
173         }
174         
175         bool AddrSpace::check_overlap(Region region, VirtualArea *&va)
176         {
177                 if (region.end < region.start)
178                         return true;
179         
180                 va = varea_tree.find_nearest(region.start);
181                 
182                 if (!va)
183                         return false;
184                 
185                 // If region.start is in an existing region, that region will
186                 // be returned.
187
188                 if (region.end >= va->region().start &&
189                     region.start <= va->region().end)
190                         return true;
191                 
192                 // If it returns a region that's greater than region.start, and va
193                 // itself does not overlap, then prev does not overlap (or else
194                 // region.start would be in or before prev, and thus prev would
195                 // have been returned).
196                 
197                 // If it returns a region that's less than region.start, we still
198                 // need to check next, as region.end could be in (or beyond) that
199                 // region.
200                 
201                 if (va->list_node.next != &varea_list) {
202                         VirtualArea *next =
203                                 va->list_node.next->listentry(VirtualArea, list_node);
204                 
205                         if (region.end >= next->region().start &&
206                        region.start <= next->region().end)
207                    {
208                         va = next;
209                                 return true;
210                         }
211                 }
212
213                 VirtualArea *prev;
214         
215                 if (va->list_node.prev != &varea_list)
216                         prev = va->list_node.prev->listentry(VirtualArea, list_node);
217                 else
218                         prev = NULL;
219         
220                 if (region.start < va->region().start) {
221                         assert(!prev || prev->region().end < region.start);
222                         va = prev;
223                 }
224                 
225                 return false;
226         }
227
228         VirtualArea *AddrSpace::split_varea(Region region)
229         {
230                 VirtualArea *start, *mid, *end;
231
232                 // check_overlap is used rather than varea_tree.find,
233                 // so that the first overlapping region can be returned,
234                 // as most (if not all) callers will need this anyway.
235                 
236                 if (!check_overlap(region, start))
237                         return NULL;
238                 
239                 assert(start);
240                 assert(start->aspace == this);
241                 assert(start->region().end >= region.start);
242                 
243                 if (start->region().start < region.start) {
244                         // There is a varea that straddles region.start;
245                         // create a new varea "mid" for the overlapping part.
246                 
247                         mid = new VirtualArea;
248                         
249                         mid->aspace = this;
250                         mid->region().start = region.start;
251                         
252                         if (region.end > start->region().end)
253                                 mid->region().end = start->region().end;
254                         else
255                                 mid->region().end = region.end;
256
257                         mid->flags = start->flags;
258                         mid->ma = start->ma;
259                         mid->offset = start->offset;
260                         
261                         if (start->region().end > region.end) {
262                                 // The varea also straddles region.end; create a new
263                                 // varea "end" for the other side of the region.
264                         
265                                 end = new VirtualArea;
266
267                                 end->aspace = this;
268                                 end->region().start = region.end + 1;
269                                 end->region().end = start->region().end;
270
271                                 end->flags = start->flags;
272                                 end->ma = start->ma;
273                                 end->offset = start->offset;
274                         } else {
275                                 end = NULL;
276                         }
277
278                         start->region().end = region.start - 1;
279
280                         varea_tree.add(mid);
281                         mid->ma->map(mid);
282                         
283                         if (end) {
284                                 // Splits have already been done at both ends of the region,
285                                 // so there's no need to look up the ending address.
286                                 
287                                 varea_tree.add(end);
288                                 mid->ma->map(end);
289                                 return mid;
290                         }
291                         
292                         start = mid;
293                 }
294                 
295                 if (start->region().end == region.end)
296                         return start;
297                 
298                 if (start->region().end > region.end)
299                         end = start;
300                 else {
301                         end = varea_tree.find(region.end);
302                         
303                         if (!end)
304                                 return start;
305
306                         assert(end->aspace == this);
307                         assert(end->region().start <= region.end);
308                         assert(end->region().end >= region.end);
309
310                         if (end->region().end == region.end)
311                                 return start;
312                 }
313                 
314                 assert(end->region().end > region.end);
315                 
316                 // There is a varea that straddles region.start;
317                 // create a new varea "mid" for the overlapping part.
318                 
319                 mid = new VirtualArea;
320                 
321                 mid->aspace = this;
322                 mid->region().start = region.start;
323                         
324                 mid->region().start = end->region().start;
325                 mid->region().end = region.end;
326
327                 mid->flags = start->flags;
328                 mid->ma = start->ma;
329                 mid->offset = start->offset;
330                         
331                 end->region().start = region.end + 1;
332
333                 varea_tree.add(mid);
334                 mid->ma->map(mid);
335                 
336                 return start;
337         }
338         
339         bool AddrSpace::get_free_region(ulong len, Region &region,
340                                         VirtualArea *&prev)
341         {
342                 assert(page_aligned(len));
343                 assert(cached_free_region);
344         
345                 region.start = cached_free_region;
346                 region.end = cached_free_region + len - 1;
347                 
348                 if (region.end <= Arch::user_end && !check_overlap(region, prev)) {
349                         cached_free_region = region.end + 1;
350                         return true;
351                 }
352                 
353                 for (Util::List *node = &prev->list_node; node != &varea_list;
354                      node = node->next)
355                 {
356                         VirtualArea *va = node->listentry(VirtualArea, list_node);
357                         ulong end = Arch::user_end;
358                         
359                         if (node->next != &varea_list) {
360                                 VirtualArea *next = node->next->listentry(VirtualArea, list_node);
361                                 end = next->region().start - 1;
362                         }
363                         
364                         assert(end > va->region().end);
365                         
366                         if (end - va->region().end >= len) {
367                                 region.start = va->region().end + 1;
368                                 region.end = region.start + len - 1;
369                                 
370                                 assert(page_aligned(region.start));
371                                 cached_free_region = region.end + 1;
372                                 return true;
373                         }
374                 }
375                 
376                 if (cached_free_region != Arch::user_start + Arch::page_size) {
377                         cached_free_region = Arch::user_start + Arch::page_size;
378                         return get_free_region(len, region, prev);
379                 }
380                 
381                 return false;
382         }
383
384         // The "mapped" paramater is used to indicate whether the top-level
385         // address space has had a mapping established.  If "mapped" is
386         // false, but an exception is not thrown, then this method must
387         // be called again to propagate the mapping along the aspace chain.
388         //
389         // FIXME: Between aspace locks, if aspace's mapping is revoked and
390         // ma->aspace's mapping changes, a pagein could leak through and cause
391         // a page load or a copy-on-write breaking.  This isn't a huge deal
392         // (it doesn't affect the correctness of the code or give aspace
393         // access to ma->aspace's new mapping), but it's unpleasant, and could
394         // have an adverse impact on determinism.  If you have a real-time
395         // application that can't tolerate the occasional spurious pagein or
396         // copy-on-write breaking, then use an address space that hasn't
397         // previously been exposed to recursive mappers.
398         
399         bool ASpaceMappable::rec_pagein(AddrSpace *aspace, u64 vaddr,
400                                         PTEFlags reqflags)
401         {
402                 bool mapped = true;
403                 
404                 // aspace->mappable.retain();
405                 
406                 while (true) {
407                         Lock::DroppableAutoLock autolock(aspace->lock);
408                         VirtualArea *va = aspace->varea_tree.find(vaddr);
409                         
410                         if (!va)
411                                 throw BadPageFault();
412
413                         if ((va->flags & reqflags) != reqflags)
414                                 throw BadPageFault();
415                         
416                         if (aspace->map(va, vaddr, reqflags))
417                                 break;
418
419                         mapped = false;
420                         Mappable *ma = va->ma;
421                         vaddr += va->offset;
422
423                         // ma->retain();
424                         autolock.unlock();
425                         // aspace->mappable.release();
426
427                         if (!ma->is_aspace) {
428                                 ma->pagein(vaddr, reqflags);
429                                 // ma->release();
430                                 break;
431                         }
432                         
433                         aspace = static_cast<ASpaceMappable *>(ma)->aspace;
434                 }
435                 
436                 return mapped;
437         }
438         
439         void ASpaceMappable::pagein(u64 vaddr, PTEFlags reqflags)
440         {
441                 while (!rec_pagein(aspace, vaddr, reqflags));
442         }
443         
444         void AddrSpace::break_copy_on_write(VirtualArea *va, u64 vaddr, u64 phys)
445         {
446                 assert(lock.held_by_curthread());
447                 assert(rmap_lock.held_by_curthread());
448
449                 assert(va->flags.FaultOnWrite);
450                 assert(va->aspace == this);
451
452                 Page *old_page = phys_to_page(phys);
453
454                 Region region = { vaddr, vaddr + Arch::page_size - 1 };
455         
456                 // If this is the only reference to the page left, then
457                 // nothing needs to be copied.  Just clear the COW condition.
458                 if (is_phys_page(old_page) && old_page->get_refcount() == 1) {
459                         PTEFlags mask, flags;
460                         mask.FaultOnWrite = 1;
461
462                         page_table->set_flags(region, flags, mask);
463                         return;
464                 }
465                 
466                 Page *new_page = PageAlloc::alloc(1);
467
468                 // FIXME -- highmem
469                 // OPT: It'd be better to do this without the rmap_lock held,
470                 // especially if rmap_lock is global rather than per-physpage.
471                 // I want to keep things simple for now and optimize later,
472                 // though.
473
474                 memcpy(page_to_kvirt(new_page), phys_to_kvirt(phys),
475                        Arch::page_size);
476
477                 page_table->rmap_table.break_copy_on_write(region.start, new_page);
478                 new_page->release();
479         }
480
481         void ASpaceMappable::get_mapping(u64 vaddr, u64 *phys, PTEFlags *flags)
482         {
483                 aspace->page_table->get_mapping(vaddr, phys, flags);
484         }
485         
486         bool AddrSpace::map(VirtualArea *va, u64 vaddr, PTEFlags reqflags)
487         {
488                 Lock::AutoLock autolock(rmap_lock);
489                 assert(va->aspace == this);
490                         
491                 u64 phys;
492                 PTEFlags flags;
493                 va->ma->get_mapping(vaddr + va->offset, &phys, &flags);
494
495                 PTEFlags newflags = flags & va->flags;
496                 newflags.FaultOnWrite = flags.FaultOnWrite | va->flags.FaultOnWrite;
497                 
498                 if (!newflags.Valid) {
499                         assert(va->flags.Valid);
500                         return false;
501                 }
502                 
503                 if ((newflags & reqflags) != reqflags)
504                         return false;
505
506                 u64 oldphys;
507                 PTEFlags oldflags;
508                 page_table->get_mapping(vaddr, &oldphys, &oldflags);
509                 
510                 if (oldflags.Valid &&
511                     !(reqflags.Writeable && oldflags.FaultOnWrite))
512                 {
513                         // If the existing mapping is valid, don't try to map it again.
514                         // The existing mapping was put there possibly by a race, but
515                         // more likely because a FaultOnWrite was handled upstream.
516                         //
517                         // FaultOnWrite handling is the only type of mapping change that
518                         // can be done directly; all others must change the varea and do
519                         // an rmap invalidation instead.  FaultOnWrite is special
520                         // because we don't want to split vareas for every page that
521                         // gets its copy-on-write broken.
522
523                         assert((oldflags & reqflags) == reqflags);
524                         assert(!va->flags.FaultOnWrite || oldphys == phys);
525                         return true;
526                 }
527
528                 if (reqflags.Writeable && oldflags.FaultOnWrite)
529                 {
530                         // The FaultOnWrite needs to be handled upstream.
531                         if (!va->flags.FaultOnWrite)
532                                 return false;
533                         
534                         va->aspace->break_copy_on_write(va, vaddr, phys);
535                 } else {
536                         assert(!oldflags.Valid);
537                         PageTable *usptbl = NULL;
538                         
539                         if (va->ma->is_aspace) {
540                                 ASpaceMappable *asma = static_cast<ASpaceMappable *>(va->ma);
541                                 usptbl = asma->aspace->page_table;
542                         }
543                         
544                         RMapTable::map(va, usptbl, vaddr, vaddr + va->offset);
545                         
546                         RegionWithOffset rwo;
547                         rwo.start = vaddr;
548                         rwo.end = vaddr + Arch::page_size - 1;
549                         rwo.offset = phys;
550                         
551                         page_table->map(rwo, newflags);
552                 }
553                 
554                 return true;
555         }
556
557         void ASpaceMappable::get_size(u64 *size)
558         {
559                 aspace->get_size(size);
560         }
561         
562         void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
563                             MapFlags mflags, int map_type)
564         {
565                 // FIXME: check alignment for VIPT caches
566                 // FIXME: Implement the "Replace" map flag
567                 
568                 if (mflags.Replace)
569                         throw_idl(InvalidArgument, 3,
570                                   countarray("Replace unimplemented"));
571                 
572                 Mappable *cma = Mappable::classptr(ma);
573                 if (!cma) {
574                         // The given IMappable does not refer to a Mappable
575                         // of this kernel.
576
577                         throw_idl(InvalidArgument, 0, nullarray);
578                 }
579                 
580                 bool fixed = mflags.Fixed;
581                 
582                 if (is_process)
583                         mflags.Fixed = 1;
584                 
585                 if (!page_aligned(region.start))
586                         throw_idl(InvalidArgument, 1, countarray("unaligned start"));
587                 
588                 if (!page_aligned(region.end + 1))
589                         throw_idl(InvalidArgument, 1, countarray("unaligned end"));
590                 
591                 Lock::AutoLock autolock(lock);
592                 Region vregion;
593                 VirtualArea *prev;
594                 
595                 if (*vstart != System::Mem::AddrSpace_ns::unspecified_start) {
596                         vregion.start = *vstart;
597                         vregion.end = vregion.start + region.end - region.start;
598                 
599                         if (is_process) {
600                                 if (!valid_addr(vregion.start))
601                                         throw_idl(InvalidArgument, 2,
602                                                   countarray("invalid virtual start"));
603                                 
604                                 if (!valid_addr(vregion.end))
605                                         throw_idl(InvalidArgument, 2,
606                                                   countarray("invalid virtual end"));
607                         }
608                         
609                         if (check_overlap(vregion, prev))
610                                 *vstart = System::Mem::AddrSpace_ns::unspecified_start;
611                 }
612                 
613                 if (*vstart == System::Mem::AddrSpace_ns::unspecified_start) {
614                         if (fixed)
615                                 throw_idl(ResourceBusy, 2, countarray("varea overlap"));
616                         
617                         if (!get_free_region(region.end - region.start + 1, vregion, prev))
618                                 throw_idl(OutOfSpace, countarray("out of vspace"));
619                         
620                         *vstart = vregion.start;
621                 }
622                 
623                 VirtualArea *newva = new VirtualArea;
624                 newva->aspace = this;
625                 newva->region() = vregion;
626
627                 newva->flags.Valid = 1;
628                 newva->flags.User = map_type != map_kernel;
629                 newva->flags.Readable = mflags.access_IDLNS_Read;
630                 newva->flags.Writeable = mflags.access_IDLNS_Write;
631                 newva->flags.Executable = mflags.access_IDLNS_Exec;
632                 newva->flags.FaultOnWrite = mflags.CopyOnWrite;
633                 newva->flags.Protected = map_type != map_user;
634                 newva->ma = cma;
635                 newva->offset = region.start - vregion.start;
636
637                 varea_tree.add(newva);
638                 newva->ma->map(newva);
639                 
640                 if (prev) {
641                         prev->list_node.add_front(&newva->list_node);
642                 } else {
643                         varea_list.add_front(&newva->list_node);
644                 }
645         }
646         
647         void AddrSpace::unmap(Region region, bool from_kernel)
648         {
649                 u64 orig_start = region.start;
650         
651                 while (region.start <= region.end) {
652                         Lock::DroppableAutoLock autolock(lock);
653                         VirtualArea *va;
654
655                         // If check_overlap returns false, then there are no vareas
656                         // in the specified region, so there's nothing to unmap.
657                         
658                         if (!check_overlap(region, va))
659                                 return;
660                         
661                         if (va->flags.Protected && !from_kernel) {
662                                 region.start = va->list_node.next->
663                                                listentry(VirtualArea, list_node)->region().start;
664
665                                 if (region.start <= orig_start)
666                                         break;
667                                 
668                                 continue;
669                         }
670                         
671                         u64 va_end = va->region().end;
672                         u64 next_start = 0;
673                         
674                         if (va_end > region.end) {
675                                 u64 va_start = va->region().start;
676                                 va->region().start = region.end + 1;
677                         
678                                 if (va_start < region.start) {
679                                         VirtualArea *newva = new VirtualArea;
680
681                                         newva->aspace = this;
682                                         newva->region().start = va_start;
683                                         newva->region().end = region.start - 1;
684         
685                                         newva->flags = va->flags;
686                                         newva->ma = va->ma;
687                                         newva->offset = va->offset;
688
689                                         varea_tree.add(newva);
690                                         newva->ma->map(newva);
691                                 }
692                                 
693                                 VirtualArea *nextva =
694                                         va->list_node.next->listentry(VirtualArea, list_node);
695
696                                 next_start = nextva->region().start;
697                         } else if (va->region().start < region.start) {
698                                 va->region().end = region.start - 1;
699                         } else {
700                                 varea_tree.del(va);
701                                 va->ma->unmap(va);
702                         }
703
704                         // This is done after the varea removal, so that new faults
705                         // don't map things in again.
706
707                         // OPT: Skip RMap-based unmapping if nothing maps this aspace.
708                         // OPT: Push this loop into the RMap code, allowing it to skip
709                         // empty portions of the tables (as the pagetable code currently
710                         // does).
711                         
712                         while (region.start <= va_end && region.start <= region.end) {
713                                 page_table->rmap_table.unmap(region.start);
714                                 region.start += Arch::page_size;
715
716                                 if (region.start <= orig_start)
717                                         break;
718                         }
719                         
720                         region.start = next_start;
721                         
722                         if (region.start <= orig_start)
723                                 break;
724                 }
725         }
726         
727         void AddrSpace::set_mapflags(Region region, MapFlags mflags)
728         {
729                 // FIXME: implement
730                 // Find varea, split if necessary, propagate change to stacked aspaces
731         }
732         
733         void AddrSpace::get_mapflags(Region region, MapFlags *mflags, uint8_t *all_same)
734         {
735                 // FIXME: implement
736         }
737         
738         void AddrSpace::get_mapping(Region region, IMappable *ma, u64 *offset)
739         {
740                 // FIXME: implement
741         }
742         
743         void AddrSpace::get_page_size(u32 *page_size)
744         {
745                 *page_size = Arch::page_size;
746         }
747         
748         void AddrSpace::get_min_align(u32 *min_align)
749         {
750                 *min_align = Arch::page_mapping_min_align;
751         }
752         
753         void Mappable::map(VirtualArea *varea)
754         {
755                 mappings_lock.lock_irq();
756                 mappings.add_back(&varea->mappings_node);
757                 mappings_lock.unlock_irq();
758         }
759
760         void Mappable::unmap(VirtualArea *varea)
761         {
762                 mappings_lock.lock_irq();
763                 varea->mappings_node.del();
764                 mappings_lock.unlock_irq();
765         }
766         
767         void PageTable::kill_pte(ulong vaddr, u64 paddr, bool dirty,
768                                  bool valid, bool no_release)
769         {
770                 Page *oldpage = phys_to_page(paddr);
771                 
772                 if (!is_phys_page(oldpage))
773                         oldpage = NULL;
774
775                 if (is_process && valid) {
776                         Arch::invalidate_tlb_entry(vaddr);
777                                 
778                         if (oldpage && dirty &&
779                             !ll_test_and_set(&oldpage->flags, PageFlags::bits::Dirty))
780                         {
781                                 oldpage->retain();
782                                 // Queue page for writeback
783                         }
784                 }
785                 
786                 if (!no_release && oldpage)
787                         oldpage->release();
788         }
789         
790         // FIXME: Add a special PTE flag to indicate that PhysMem mappings
791         // don't mess with page refcounts.
792         
793         class PhysMem : public Mappable {
794         public:
795                 void get_size(u64 *size)
796                 {
797                         if (sizeof(long) == 8)
798                                 *size = 1ULL << (64 - Arch::page_shift);
799                         else
800                                 *size = 1ULL << (32 - Arch::page_shift);
801                 }
802         
803                 void pagein(u64 vaddr, PTEFlags reqflags)
804                 {
805                         // Doesn't need to do anything yet, though it may later
806                         // once high memory support is added.
807                 }
808                 
809                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
810                 {
811                         *phys = addr;
812                         *flags = 0;
813                         flags->Valid = 1;
814                         flags->Readable = 1;
815                         flags->Writeable = 1;
816                         flags->Executable = 1;
817                         flags->User = 1;
818                 }
819         };
820         
821         PhysMem real_physmem;
822         IMappable physmem = real_physmem;
823
824         class AnonMem : public Mappable {
825         public:
826                 void get_size(u64 *size)
827                 {
828                         if (sizeof(long) == 8)
829                                 *size = 1ULL << (64 - Arch::page_shift);
830                         else
831                                 *size = 1ULL << (32 - Arch::page_shift);
832                 }
833         
834                 void pagein(u64 vaddr, PTEFlags reqflags)
835                 {
836                         // Doesn't need to do anything yet, though it may later
837                         // once high memory support is added.
838                 }
839                 
840                 void get_mapping(u64 addr, u64 *phys, PTEFlags *flags)
841                 {
842                         Page *page = PageAlloc::alloc(1);
843                         
844                         // OPT: Only zero if it was asked for.
845                         // OPT: Eventually, have separate pagelists for zeroed and
846                         // unzeroed memory, and a low-priority background thread
847                         // that zeroes pages and moves them to the zeroed list.
848                         bzero(page_to_kvirt(page), Arch::page_size);
849                         
850                         *phys = page_to_phys(page);
851                         *flags = 0;
852                         flags->Valid = 1;
853                         flags->Readable = 1;
854                         flags->Writeable = 1;
855                         flags->Executable = 1;
856                         flags->User = 1;
857                 }
858         };
859         
860         AnonMem real_anonmem;
861         IMappable anonmem = real_anonmem;
862 }
863
864 #include <servers/mem/addrspace/footer.cc>