1 // arch/x64/mem.cc -- x64 misc. memory management
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following conditions:
12 // * Redistributions of source code must retain the above copyright notice,
13 // this list of conditions and the following disclaimers.
15 // * Redistributions in binary form must reproduce the above copyright notice,
16 // this list of conditions and the following disclaimers in the
17 // documentation and/or other materials provided with the distribution.
19 // * The names of the Software's authors and/or contributors
20 // may not be used to endorse or promote products derived from
21 // this Software without specific prior written permission.
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
25 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
31 #include <kern/kernel.h>
33 #include <kern/pagealloc.h>
34 #include <kern/libc.h>
36 // Initial page tables have the first 4 MiB mapped, using large pages.
38 __attribute__((aligned(4096))) u64 x64_init_ptbl_l2[512] = {
43 // The ORing of 7 into these entries will be done in entry.S;
44 // doing it here causes the compiler to emit runtime code
47 __attribute__((aligned(4096))) u64 x64_init_ptbl_l3[512] = {
48 reinterpret_cast<u64>(x64_init_ptbl_l2) - KERNEL_START
51 __attribute__((aligned(4096))) u64 x64_init_ptbl_l4[512] = {
52 reinterpret_cast<u64>(x64_init_ptbl_l3) - KERNEL_START
57 using Mem::PageAllocZone;
62 PageAllocZone *pagezones[3];
65 PageAllocZone isadmazone, dma32zone, highzone;
67 PageAllocZone *isadmazonelist[2] = { &isadmazone, NULL };
68 PageAllocZone *dma32zonelist[3];
69 PageAllocZone *normalzonelist[4];
72 PageAllocZone **pagezonelists[3] = { Priv::normalzonelist,
74 Priv::isadmazonelist };
76 uintptr_t next_free_bootmem = reinterpret_cast<uintptr_t>(&_end) -
77 KERNEL_START + PHYSMEM_START;
80 void early_adjust_mappings()
82 using Mem::get_bootmem;
84 // Clear low-address mappings and invalidate TLB
85 x64_init_ptbl_l4[0] = 0;
86 x64_init_ptbl_l3[0] = 0;
87 asm volatile("movq %0, %%cr3" : : "r" (kvirt_to_phys(x64_init_ptbl_l4)));
89 // Mark the ktext mapping global now that it's not mapped at address
92 x64_init_ptbl_l2[0] |= 0x100;
93 x64_init_ptbl_l2[1] |= 0x100;
95 u64 l3phys = kvirt_to_phys(get_bootmem(page_size, page_size));
96 u64 *l3 = static_cast<u64 *>(phys_to_ktext(l3phys));
98 x64_init_ptbl_l4[0x100] = l3phys | 7;
100 u64 l2phys = kvirt_to_phys(get_bootmem(page_size, page_size));
101 u64 *l2 = static_cast<u64 *>(phys_to_ktext(l2phys));
102 bzero(l2, page_size);
105 // Map at least as much as is mapped in ktext, so that
106 // things like the VGA driver can use it early without
107 // having to know about phys_to_ktext, and so map_physmem
108 // doesn't need to special-case the use of phys_to_ktext
109 // for the first couple pages.
117 using Mem::get_bootmem;
119 // phys_to_ktext can be used for the first
120 // 2MiB-minus-size-of-kernel of bootmem allocations.
122 for (uintptr_t physpage = 2; physpage <= (mem_end - 1) / 512; physpage++)
124 uintptr_t virtpage = physpage + (PHYSMEM_START >> 21);
127 u64 l3phys = x64_init_ptbl_l4[(virtpage >> 18) & 511] &
131 l3 = static_cast<u64 *>(get_bootmem(page_size, page_size));
132 bzero(l3, page_size);
133 x64_init_ptbl_l4[(virtpage >> 18) & 511] =
134 kvirt_to_phys(l3) | 7;
136 l3 = static_cast<u64 *>(phys_to_kvirt(l3phys));
140 u64 l2phys = l3[(virtpage >> 9) & 511] & ~(page_size - 1);
143 l2 = static_cast<u64 *>(get_bootmem(page_size, page_size));
144 bzero(l2, page_size);
145 l3[(virtpage >> 9) & 511] = kvirt_to_phys(l2) | 7;
147 l2 = static_cast<u64 *>(phys_to_kvirt(l2phys));
150 l2[virtpage & 511] = (physpage << 21) | 0x187;
153 size_t pages_size = mem_end * sizeof(Mem::Page);
154 Mem::pages = static_cast<Mem::Page *>(get_bootmem(pages_size, 8));
155 Mem::last_page = Mem::pages + pages_size - 1;
156 bzero(Mem::pages, pages_size);
158 int normal = 0, dma = 0;
159 uintptr_t highstart = highzonestart;
160 uintptr_t dma32start = dma32zonestart;
162 if (mem_end > highstart) {
163 normalzonelist[normal++] = &highzone;
164 highzone.init(highstart, mem_end - highstart);
169 if (mem_end > dma32start) {
170 normalzonelist[normal++] = &dma32zone;
171 dma32zonelist[dma++] = &dma32zone;
172 dma32zone.init(dma32start, highstart - dma32start);
174 dma32start = mem_end;
177 normalzonelist[normal++] = &isadmazone;
178 dma32zonelist[dma++] = &isadmazone;
180 isadmazone.init(mem_start, dma32start);