]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/arch/x64/mem.cc
License change.
[polintos/scott/priv.git] / kernel / arch / x64 / mem.cc
1 // arch/x64/mem.cc -- x64 misc. memory management
2 //
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
4 // 
5 // This software is provided 'as-is', without any express or implied warranty.
6 // In no event will the authors or contributors be held liable for any damages
7 // arising from the use of this software.
8 // 
9 // Permission is hereby granted to everyone, free of charge, to use, copy,
10 // modify, prepare derivative works of, publish, distribute, perform,
11 // sublicense, and/or sell copies of the Software, provided that the above
12 // copyright notice and disclaimer of warranty be included in all copies or
13 // substantial portions of this software.
14
15 #include <kern/kernel.h>
16 #include <kern/mem.h>
17 #include <kern/pagealloc.h>
18 #include <kern/libc.h>
19
20 // Initial page tables have the first 4 MiB mapped, using large pages.
21
22 __attribute__((aligned(4096))) u64 x64_init_ptbl_l2[512] = {
23         0x87,
24         0x00200087,
25 };
26
27 // The ORing of 7 into these entries will be done in entry.S;
28 // doing it here causes the compiler to emit runtime code
29 // to do it.
30
31 __attribute__((aligned(4096))) u64 x64_init_ptbl_l3[512] = {
32         reinterpret_cast<u64>(x64_init_ptbl_l2) - KERNEL_START
33 };
34
35 __attribute__((aligned(4096))) u64 x64_init_ptbl_l4[512] = {
36         reinterpret_cast<u64>(x64_init_ptbl_l3) - KERNEL_START
37 };
38
39 extern int _end;
40
41 using Mem::PageAllocZone;
42
43 namespace Arch {
44         size_t mem_end;
45         
46         PageAllocZone *pagezones[3];
47
48         namespace Priv {
49                 PageAllocZone isadmazone, dma32zone, highzone;
50         
51                 PageAllocZone *isadmazonelist[2] = { &isadmazone, NULL };
52                 PageAllocZone *dma32zonelist[3];
53                 PageAllocZone *normalzonelist[4];
54         }
55         
56         PageAllocZone **pagezonelists[3] = { Priv::normalzonelist,
57                                              Priv::dma32zonelist,
58                                              Priv::isadmazonelist };
59
60         uintptr_t next_free_bootmem = reinterpret_cast<uintptr_t>(&_end) -
61                                       KERNEL_START + PHYSMEM_START;
62
63         namespace Priv {
64                 void early_adjust_mappings()
65                 {
66                         using Mem::get_bootmem;
67                 
68                         // Clear low-address mappings and invalidate TLB
69                         x64_init_ptbl_l4[0] = 0;
70                         x64_init_ptbl_l3[0] = 0;
71                         asm volatile("movq %0, %%cr3" : : "r" (kvirt_to_phys(x64_init_ptbl_l4)));
72                         
73                         // Mark the ktext mapping global now that it's not mapped at address
74                         // zero.
75                         
76                         x64_init_ptbl_l2[0] |= 0x100;
77                         x64_init_ptbl_l2[1] |= 0x100;
78                         
79                         u64 l3phys = kvirt_to_phys(get_bootmem(page_size, page_size));
80                         u64 *l3 = static_cast<u64 *>(phys_to_ktext(l3phys));
81                         bzero(l3, page_size);
82                         x64_init_ptbl_l4[0x100] = l3phys | 7;
83         
84                         u64 l2phys = kvirt_to_phys(get_bootmem(page_size, page_size));
85                         u64 *l2 = static_cast<u64 *>(phys_to_ktext(l2phys));
86                         bzero(l2, page_size);
87                         l3[0] = l2phys | 7;
88         
89                         // Map at least as much as is mapped in ktext, so that
90                         // things like the VGA driver can use it early without
91                         // having to know about phys_to_ktext, and so map_physmem
92                         // doesn't need to special-case the use of phys_to_ktext
93                         // for the first couple pages.
94                         
95                         l2[0] = 0x187;
96                         l2[1] = 0x00200187;
97                 }
98         
99                 void map_physmem()
100                 {
101                         using Mem::get_bootmem;
102                 
103                         // phys_to_ktext can be used for the first
104                         // 2MiB-minus-size-of-kernel of bootmem allocations.
105                 
106                         for (uintptr_t physpage = 2; physpage <= (mem_end - 1) / 512; physpage++)
107                         {
108                                 uintptr_t virtpage = physpage + (PHYSMEM_START >> 21);
109                                 
110                                 u64 *l3;
111                                 u64 l3phys = x64_init_ptbl_l4[(virtpage >> 18) & 511] &
112                                              ~(page_size - 1);
113                                 
114                                 if (!l3phys) {
115                                         l3 = static_cast<u64 *>(get_bootmem(page_size, page_size));
116                                         bzero(l3, page_size);
117                                         x64_init_ptbl_l4[(virtpage >> 18) & 511] = 
118                                                 kvirt_to_phys(l3) | 7;
119                                 } else {
120                                         l3 = static_cast<u64 *>(phys_to_kvirt(l3phys));
121                                 }
122                                 
123                                 u64 *l2;
124                                 u64 l2phys = l3[(virtpage >> 9) & 511] & ~(page_size - 1);
125         
126                                 if (!l2phys) {
127                                         l2 = static_cast<u64 *>(get_bootmem(page_size, page_size));
128                                         bzero(l2, page_size);
129                                         l3[(virtpage >> 9) & 511] = kvirt_to_phys(l2) | 7;
130                                 } else {
131                                         l2 = static_cast<u64 *>(phys_to_kvirt(l2phys));
132                                 }
133                                 
134                                 l2[virtpage & 511] = (physpage << 21) | 0x187;
135                         }
136                         
137                         size_t pages_size = mem_end * sizeof(Mem::Page);
138                         Mem::pages = static_cast<Mem::Page *>(get_bootmem(pages_size, 8));
139                         Mem::last_page = Mem::pages + pages_size - 1;
140                         bzero(Mem::pages, pages_size);
141                         
142                         int normal = 0, dma = 0;
143                         uintptr_t highstart = highzonestart;
144                         uintptr_t dma32start = dma32zonestart;
145                         
146                         if (mem_end > highstart) {
147                                 normalzonelist[normal++] = &highzone;
148                                 highzone.init(highstart, mem_end - highstart);
149                         } else {
150                                 highstart = mem_end;
151                         }
152                         
153                         if (mem_end > dma32start) {
154                                 normalzonelist[normal++] = &dma32zone;
155                                 dma32zonelist[dma++] = &dma32zone;
156                                 dma32zone.init(dma32start, highstart - dma32start);
157                         } else {
158                                 dma32start = mem_end;
159                         }
160         
161                         normalzonelist[normal++] = &isadmazone;
162                         dma32zonelist[dma++] = &isadmazone;
163                         
164                         isadmazone.init(mem_start, dma32start);
165                 }
166         }
167 }