1 // arch/x86/mem.cc -- x86 paging and misc. memory management
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following conditions:
12 // * Redistributions of source code must retain the above copyright notice,
13 // this list of conditions and the following disclaimers.
15 // * Redistributions in binary form must reproduce the above copyright notice,
16 // this list of conditions and the following disclaimers in the
17 // documentation and/or other materials provided with the distribution.
19 // * The names of the Software's authors and/or contributors
20 // may not be used to endorse or promote products derived from
21 // this Software without specific prior written permission.
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
25 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
31 #include <kern/kernel.h>
33 #include <kern/pagealloc.h>
34 #include <kern/libc.h>
36 // Initial page tables have the first 4 MiB mapped, using large pages.
38 __attribute__((aligned(4096))) u32 x86_init_ptbl_l2[1024] = {
44 using Mem::PageAllocZone;
50 #define zonelist(x) ((x) * ((x) + 1) / 2 - 1)
52 PageAllocZone pagezones[num_zones];
53 PageAllocZone *pagezonelists_real[zonelist(num_zones + 1)];
56 PageAllocZone **pagezonelists[Priv::num_zones];
58 uintptr_t next_free_bootmem = reinterpret_cast<uintptr_t>(&_end);
63 void early_adjust_mappings()
65 using Mem::get_bootmem;
67 // Clear low-address mapping and invalidate TLB
68 x86_init_ptbl_l2[0] = 0;
69 asm volatile("movl %0, %%cr3" : : "r" (kvirt_to_phys(x86_init_ptbl_l2)));
71 // Mark the ktext mapping global now that it's not mapped at address
72 // zero. FIXME: check for and enable PGE
74 x86_init_ptbl_l2[0x200] |= 0x100;
79 using Mem::get_bootmem;
81 // phys_to_ktext can be used for the first
82 // 4MiB-minus-size-of-kernel of bootmem allocations.
84 for (uintptr_t physpage = 1; physpage <= (mem_end - 1) / (4096*1024);
87 uintptr_t virtpage = physpage + (PHYSMEM_START >> 22);
88 x86_init_ptbl_l2[virtpage & 1023] = (physpage << 22) | 0x187;
91 size_t pages_size = (mem_end / page_size) * sizeof(Mem::Page);
92 Mem::pages = static_cast<Mem::Page *>(get_bootmem(pages_size, 4));
93 Mem::last_page = Mem::pages + pages_size - 1;
94 bzero(Mem::pages, pages_size);
96 int listpos[num_zones];
98 for (int i = num_zones - 1; i >= 0; i--) {
99 listpos[i] = zonelist(i);
100 pagezonelists[num_zones - 1 - i] = &pagezonelists_real[listpos[i]];
102 u64 rstart = mem_zone_regions[i].start;
103 u64 rend = mem_zone_regions[i].end;
105 if (mem_start <= rend && mem_end >= rstart) {
106 if (rstart < mem_start)
111 ulong page_start = rstart / page_size;
112 ulong page_len = (rend - rstart + 1) / page_size;
114 pagezones[i].init(page_start, page_len);
116 for (int j = i; j < num_zones; j++) {
117 assert(listpos[j] < zonelist(j + 1));
118 pagezonelists_real[listpos[j]++] = &pagezones[i];