1 // Generic 64-bit PTE, mainly useful for non-process aspaces on 32-bit hosts,
2 // so that files, disks, etc. can address >4GiB. 64-bit hosts will probably
3 // want to use Arch::PTE instead, especially if direct copying between page
4 // tables of similar PTEs is implemented. 32-bit embedded systems may also
5 // wish to use Arch::PTE (for both less code and smaller page tables) if all
6 // storage can be addressed with 32 bits.
8 #ifndef _KERN_GENERIC_PTE_H
9 #define _KERN_GENERIC_PTE_H
11 #include <lowlevel/misc.h>
12 #include <lowlevel/atomic.h>
17 typedef ulong PhysAddr;
21 GenDirPTE() : raw(NULL)
25 GenDirPTE(void *RAW) : raw(RAW)
29 static uint addr_to_offset(VirtAddr addr, int shift)
31 int pages_per_table = Arch::page_size / sizeof(void *);
32 return (addr >> shift) & (pages_per_table - 1);
40 static GenDirPTE set_table(void *addr)
42 return GenDirPTE(addr);
50 void set_pte(GenDirPTE *table, uint offset)
56 shift_per_level = Arch::page_shift - _LL_LONG_LOGBYTES,
61 typedef u64 VirtAddr, PhysAddr;
62 typedef GenDirPTE DirPTE;
69 PhysAddr Executable:1;
73 PhysAddr FaultOnWrite:1;
75 #elif defined(BITFIELD_BE)
77 PhysAddr FaultOnWrite:1;
81 PhysAddr Executable:1;
86 #error Unspecified/unrecognized bitfield endianness
92 GenPTE(PhysAddr init) : raw(init)
105 static uint addr_to_offset(VirtAddr addr, int shift)
107 int pages_per_table = page_size / sizeof(PhysAddr);
108 return (addr >> shift) & (pages_per_table - 1);
111 PhysAddr pte_to_addr()
113 return raw & ~((u64)page_size - 1);
116 static GenPTE addr_to_pte(PhysAddr phys)
118 return phys & ~((u64)page_size - 1);
121 static void flags_to_pte(Mem::PTEFlags flagsin,
122 Mem::PTEFlags maskin,
126 flagsout = (PhysAddr)flagsin;
127 maskout = (PhysAddr)maskin;
130 GenPTE set_flags(GenPTE mask, GenPTE flags)
132 return (raw & ~mask) | flags;
135 Mem::PTEFlags pte_to_flags()
137 return raw & (page_size - 1);
140 void set_pte(GenPTE *table, uint offset)
145 GenPTE xchg_pte(GenPTE *table, uint offset)
147 GenPTE old = table[offset];
159 // FIXME: decide how to handle stacked dirty pages
164 page_size = Arch::page_size,
165 page_shift = Arch::page_shift,
167 pages_per_table = page_size / sizeof(PhysAddr),
168 shift_per_level = page_shift - 3,
169 num_levels = 2 + (64 - page_shift - shift_per_level - 1) /
170 DirPTE::shift_per_level,
172 // kmap is ignored for stacked aspaces
173 kmap_start = 0, kmap_end = 0