]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/arch/x64/mem.cc
50343fce01e4283c54c4aa3de6cc994cf2f428c7
[polintos/scott/priv.git] / kernel / arch / x64 / mem.cc
1 // arch/x64/mem.cc -- x64 misc. memory management
2 //
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
4 // 
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following condition:
11 // 
12 // The above copyright notice and this permission notice shall be
13 // included in all copies or substantial portions of the Software.
14 // 
15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
18 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
21 // SOFTWARE.
22
23 #include <kern/kernel.h>
24 #include <kern/mem.h>
25 #include <kern/pagealloc.h>
26 #include <kern/libc.h>
27
28 // Initial page tables have the first 4 MiB mapped, using large pages.
29
30 __attribute__((aligned(4096))) u64 x64_init_ptbl_l2[512] = {
31         0x87,
32         0x00200087,
33 };
34
35 // The ORing of 7 into these entries will be done in entry.S;
36 // doing it here causes the compiler to emit runtime code
37 // to do it.
38
39 __attribute__((aligned(4096))) u64 x64_init_ptbl_l3[512] = {
40         reinterpret_cast<u64>(x64_init_ptbl_l2) - KERNEL_START
41 };
42
43 __attribute__((aligned(4096))) u64 x64_init_ptbl_l4[512] = {
44         reinterpret_cast<u64>(x64_init_ptbl_l3) - KERNEL_START
45 };
46
47 extern int _end;
48
49 using Mem::PageAllocZone;
50
51 namespace Arch {
52         size_t mem_end;
53         
54         PageAllocZone *pagezones[3];
55
56         namespace Priv {
57                 PageAllocZone isadmazone, dma32zone, highzone;
58         
59                 PageAllocZone *isadmazonelist[2] = { &isadmazone, NULL };
60                 PageAllocZone *dma32zonelist[3];
61                 PageAllocZone *normalzonelist[4];
62         }
63         
64         PageAllocZone **pagezonelists[3] = { Priv::normalzonelist,
65                                              Priv::dma32zonelist,
66                                              Priv::isadmazonelist };
67
68         uintptr_t next_free_bootmem = reinterpret_cast<uintptr_t>(&_end) -
69                                       KERNEL_START + PHYSMEM_START;
70
71         namespace Priv {
72                 void early_adjust_mappings()
73                 {
74                         using Mem::get_bootmem;
75                 
76                         // Clear low-address mappings and invalidate TLB
77                         x64_init_ptbl_l4[0] = 0;
78                         x64_init_ptbl_l3[0] = 0;
79                         asm volatile("movq %0, %%cr3" : : "r" (kvirt_to_phys(x64_init_ptbl_l4)));
80                         
81                         // Mark the ktext mapping global now that it's not mapped at address
82                         // zero.
83                         
84                         x64_init_ptbl_l2[0] |= 0x100;
85                         x64_init_ptbl_l2[1] |= 0x100;
86                         
87                         u64 l3phys = kvirt_to_phys(get_bootmem(page_size, page_size));
88                         u64 *l3 = static_cast<u64 *>(phys_to_ktext(l3phys));
89                         bzero(l3, page_size);
90                         x64_init_ptbl_l4[0x100] = l3phys | 7;
91         
92                         u64 l2phys = kvirt_to_phys(get_bootmem(page_size, page_size));
93                         u64 *l2 = static_cast<u64 *>(phys_to_ktext(l2phys));
94                         bzero(l2, page_size);
95                         l3[0] = l2phys | 7;
96         
97                         // Map at least as much as is mapped in ktext, so that
98                         // things like the VGA driver can use it early without
99                         // having to know about phys_to_ktext, and so map_physmem
100                         // doesn't need to special-case the use of phys_to_ktext
101                         // for the first couple pages.
102                         
103                         l2[0] = 0x187;
104                         l2[1] = 0x00200187;
105                 }
106         
107                 void map_physmem()
108                 {
109                         using Mem::get_bootmem;
110                 
111                         // phys_to_ktext can be used for the first
112                         // 2MiB-minus-size-of-kernel of bootmem allocations.
113                 
114                         for (uintptr_t physpage = 2; physpage <= (mem_end - 1) / 512; physpage++)
115                         {
116                                 uintptr_t virtpage = physpage + (PHYSMEM_START >> 21);
117                                 
118                                 u64 *l3;
119                                 u64 l3phys = x64_init_ptbl_l4[(virtpage >> 18) & 511] &
120                                              ~(page_size - 1);
121                                 
122                                 if (!l3phys) {
123                                         l3 = static_cast<u64 *>(get_bootmem(page_size, page_size));
124                                         bzero(l3, page_size);
125                                         x64_init_ptbl_l4[(virtpage >> 18) & 511] = 
126                                                 kvirt_to_phys(l3) | 7;
127                                 } else {
128                                         l3 = static_cast<u64 *>(phys_to_kvirt(l3phys));
129                                 }
130                                 
131                                 u64 *l2;
132                                 u64 l2phys = l3[(virtpage >> 9) & 511] & ~(page_size - 1);
133         
134                                 if (!l2phys) {
135                                         l2 = static_cast<u64 *>(get_bootmem(page_size, page_size));
136                                         bzero(l2, page_size);
137                                         l3[(virtpage >> 9) & 511] = kvirt_to_phys(l2) | 7;
138                                 } else {
139                                         l2 = static_cast<u64 *>(phys_to_kvirt(l2phys));
140                                 }
141                                 
142                                 l2[virtpage & 511] = (physpage << 21) | 0x187;
143                         }
144                         
145                         size_t pages_size = mem_end * sizeof(Mem::Page);
146                         Mem::pages = static_cast<Mem::Page *>(get_bootmem(pages_size, 8));
147                         Mem::last_page = Mem::pages + pages_size - 1;
148                         bzero(Mem::pages, pages_size);
149                         
150                         int normal = 0, dma = 0;
151                         uintptr_t highstart = highzonestart;
152                         uintptr_t dma32start = dma32zonestart;
153                         
154                         if (mem_end > highstart) {
155                                 normalzonelist[normal++] = &highzone;
156                                 highzone.init(highstart, mem_end - highstart);
157                         } else {
158                                 highstart = mem_end;
159                         }
160                         
161                         if (mem_end > dma32start) {
162                                 normalzonelist[normal++] = &dma32zone;
163                                 dma32zonelist[dma++] = &dma32zone;
164                                 dma32zone.init(dma32start, highstart - dma32start);
165                         } else {
166                                 dma32start = mem_end;
167                         }
168         
169                         normalzonelist[normal++] = &isadmazone;
170                         dma32zonelist[dma++] = &isadmazone;
171                         
172                         isadmazone.init(mem_start, dma32start);
173                 }
174         }
175 }