1 // arch/x64/mem.cc -- x64 misc. memory management
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following condition:
12 // The above copyright notice and this permission notice shall be
13 // included in all copies or substantial portions of the Software.
15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
23 #include <kern/kernel.h>
25 #include <kern/pagealloc.h>
26 #include <kern/libc.h>
28 // Initial page tables have the first 4 MiB mapped, using large pages.
30 __attribute__((aligned(4096))) u64 x64_init_ptbl_l2[512] = {
35 // The ORing of 7 into these entries will be done in entry.S;
36 // doing it here causes the compiler to emit runtime code
39 __attribute__((aligned(4096))) u64 x64_init_ptbl_l3[512] = {
40 reinterpret_cast<u64>(x64_init_ptbl_l2) - KERNEL_START
43 __attribute__((aligned(4096))) u64 x64_init_ptbl_l4[512] = {
44 reinterpret_cast<u64>(x64_init_ptbl_l3) - KERNEL_START
49 using Mem::PageAllocZone;
54 PageAllocZone *pagezones[3];
57 PageAllocZone isadmazone, dma32zone, highzone;
59 PageAllocZone *isadmazonelist[2] = { &isadmazone, NULL };
60 PageAllocZone *dma32zonelist[3];
61 PageAllocZone *normalzonelist[4];
64 PageAllocZone **pagezonelists[3] = { Priv::normalzonelist,
66 Priv::isadmazonelist };
68 uintptr_t next_free_bootmem = reinterpret_cast<uintptr_t>(&_end) -
69 KERNEL_START + PHYSMEM_START;
72 void early_adjust_mappings()
74 using Mem::get_bootmem;
76 // Clear low-address mappings and invalidate TLB
77 x64_init_ptbl_l4[0] = 0;
78 x64_init_ptbl_l3[0] = 0;
79 asm volatile("movq %0, %%cr3" : : "r" (kvirt_to_phys(x64_init_ptbl_l4)));
81 // Mark the ktext mapping global now that it's not mapped at address
84 x64_init_ptbl_l2[0] |= 0x100;
85 x64_init_ptbl_l2[1] |= 0x100;
87 u64 l3phys = kvirt_to_phys(get_bootmem(page_size, page_size));
88 u64 *l3 = static_cast<u64 *>(phys_to_ktext(l3phys));
90 x64_init_ptbl_l4[0x100] = l3phys | 7;
92 u64 l2phys = kvirt_to_phys(get_bootmem(page_size, page_size));
93 u64 *l2 = static_cast<u64 *>(phys_to_ktext(l2phys));
97 // Map at least as much as is mapped in ktext, so that
98 // things like the VGA driver can use it early without
99 // having to know about phys_to_ktext, and so map_physmem
100 // doesn't need to special-case the use of phys_to_ktext
101 // for the first couple pages.
109 using Mem::get_bootmem;
111 // phys_to_ktext can be used for the first
112 // 2MiB-minus-size-of-kernel of bootmem allocations.
114 for (uintptr_t physpage = 2; physpage <= (mem_end - 1) / 512; physpage++)
116 uintptr_t virtpage = physpage + (PHYSMEM_START >> 21);
119 u64 l3phys = x64_init_ptbl_l4[(virtpage >> 18) & 511] &
123 l3 = static_cast<u64 *>(get_bootmem(page_size, page_size));
124 bzero(l3, page_size);
125 x64_init_ptbl_l4[(virtpage >> 18) & 511] =
126 kvirt_to_phys(l3) | 7;
128 l3 = static_cast<u64 *>(phys_to_kvirt(l3phys));
132 u64 l2phys = l3[(virtpage >> 9) & 511] & ~(page_size - 1);
135 l2 = static_cast<u64 *>(get_bootmem(page_size, page_size));
136 bzero(l2, page_size);
137 l3[(virtpage >> 9) & 511] = kvirt_to_phys(l2) | 7;
139 l2 = static_cast<u64 *>(phys_to_kvirt(l2phys));
142 l2[virtpage & 511] = (physpage << 21) | 0x187;
145 size_t pages_size = mem_end * sizeof(Mem::Page);
146 Mem::pages = static_cast<Mem::Page *>(get_bootmem(pages_size, 8));
147 Mem::last_page = Mem::pages + pages_size - 1;
148 bzero(Mem::pages, pages_size);
150 int normal = 0, dma = 0;
151 uintptr_t highstart = highzonestart;
152 uintptr_t dma32start = dma32zonestart;
154 if (mem_end > highstart) {
155 normalzonelist[normal++] = &highzone;
156 highzone.init(highstart, mem_end - highstart);
161 if (mem_end > dma32start) {
162 normalzonelist[normal++] = &dma32zone;
163 dma32zonelist[dma++] = &dma32zone;
164 dma32zone.init(dma32start, highstart - dma32start);
166 dma32start = mem_end;
169 normalzonelist[normal++] = &isadmazone;
170 dma32zonelist[dma++] = &isadmazone;
172 isadmazone.init(mem_start, dma32start);