]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/arch/x64/mem.cc
Initial checkin from Perforce.
[polintos/scott/priv.git] / kernel / arch / x64 / mem.cc
1 // arch/x64/mem.cc -- x64 misc. memory management
2 //
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
4 // 
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following conditions:
11 // 
12 //     * Redistributions of source code must retain the above copyright notice,
13 //       this list of conditions and the following disclaimers.
14 // 
15 //     * Redistributions in binary form must reproduce the above copyright notice,
16 //       this list of conditions and the following disclaimers in the
17 //       documentation and/or other materials provided with the distribution.
18 // 
19 //     * The names of the Software's authors and/or contributors
20 //       may not be used to endorse or promote products derived from
21 //       this Software without specific prior written permission.
22 // 
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
25 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
26 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
29 // SOFTWARE.
30
31 #include <kern/kernel.h>
32 #include <kern/mem.h>
33 #include <kern/pagealloc.h>
34 #include <kern/libc.h>
35
36 // Initial page tables have the first 4 MiB mapped, using large pages.
37
38 __attribute__((aligned(4096))) u64 x64_init_ptbl_l2[512] = {
39         0x87,
40         0x00200087,
41 };
42
43 // The ORing of 7 into these entries will be done in entry.S;
44 // doing it here causes the compiler to emit runtime code
45 // to do it.
46
47 __attribute__((aligned(4096))) u64 x64_init_ptbl_l3[512] = {
48         reinterpret_cast<u64>(x64_init_ptbl_l2) - KERNEL_START
49 };
50
51 __attribute__((aligned(4096))) u64 x64_init_ptbl_l4[512] = {
52         reinterpret_cast<u64>(x64_init_ptbl_l3) - KERNEL_START
53 };
54
55 extern int _end;
56
57 using Mem::PageAllocZone;
58
59 namespace Arch {
60         size_t mem_end;
61         
62         PageAllocZone *pagezones[3];
63
64         namespace Priv {
65                 PageAllocZone isadmazone, dma32zone, highzone;
66         
67                 PageAllocZone *isadmazonelist[2] = { &isadmazone, NULL };
68                 PageAllocZone *dma32zonelist[3];
69                 PageAllocZone *normalzonelist[4];
70         }
71         
72         PageAllocZone **pagezonelists[3] = { Priv::normalzonelist,
73                                              Priv::dma32zonelist,
74                                              Priv::isadmazonelist };
75
76         uintptr_t next_free_bootmem = reinterpret_cast<uintptr_t>(&_end) -
77                                       KERNEL_START + PHYSMEM_START;
78
79         namespace Priv {
80                 void early_adjust_mappings()
81                 {
82                         using Mem::get_bootmem;
83                 
84                         // Clear low-address mappings and invalidate TLB
85                         x64_init_ptbl_l4[0] = 0;
86                         x64_init_ptbl_l3[0] = 0;
87                         asm volatile("movq %0, %%cr3" : : "r" (kvirt_to_phys(x64_init_ptbl_l4)));
88                         
89                         // Mark the ktext mapping global now that it's not mapped at address
90                         // zero.
91                         
92                         x64_init_ptbl_l2[0] |= 0x100;
93                         x64_init_ptbl_l2[1] |= 0x100;
94                         
95                         u64 l3phys = kvirt_to_phys(get_bootmem(page_size, page_size));
96                         u64 *l3 = static_cast<u64 *>(phys_to_ktext(l3phys));
97                         bzero(l3, page_size);
98                         x64_init_ptbl_l4[0x100] = l3phys | 7;
99         
100                         u64 l2phys = kvirt_to_phys(get_bootmem(page_size, page_size));
101                         u64 *l2 = static_cast<u64 *>(phys_to_ktext(l2phys));
102                         bzero(l2, page_size);
103                         l3[0] = l2phys | 7;
104         
105                         // Map at least as much as is mapped in ktext, so that
106                         // things like the VGA driver can use it early without
107                         // having to know about phys_to_ktext, and so map_physmem
108                         // doesn't need to special-case the use of phys_to_ktext
109                         // for the first couple pages.
110                         
111                         l2[0] = 0x187;
112                         l2[1] = 0x00200187;
113                 }
114         
115                 void map_physmem()
116                 {
117                         using Mem::get_bootmem;
118                 
119                         // phys_to_ktext can be used for the first
120                         // 2MiB-minus-size-of-kernel of bootmem allocations.
121                 
122                         for (uintptr_t physpage = 2; physpage <= (mem_end - 1) / 512; physpage++)
123                         {
124                                 uintptr_t virtpage = physpage + (PHYSMEM_START >> 21);
125                                 
126                                 u64 *l3;
127                                 u64 l3phys = x64_init_ptbl_l4[(virtpage >> 18) & 511] &
128                                              ~(page_size - 1);
129                                 
130                                 if (!l3phys) {
131                                         l3 = static_cast<u64 *>(get_bootmem(page_size, page_size));
132                                         bzero(l3, page_size);
133                                         x64_init_ptbl_l4[(virtpage >> 18) & 511] = 
134                                                 kvirt_to_phys(l3) | 7;
135                                 } else {
136                                         l3 = static_cast<u64 *>(phys_to_kvirt(l3phys));
137                                 }
138                                 
139                                 u64 *l2;
140                                 u64 l2phys = l3[(virtpage >> 9) & 511] & ~(page_size - 1);
141         
142                                 if (!l2phys) {
143                                         l2 = static_cast<u64 *>(get_bootmem(page_size, page_size));
144                                         bzero(l2, page_size);
145                                         l3[(virtpage >> 9) & 511] = kvirt_to_phys(l2) | 7;
146                                 } else {
147                                         l2 = static_cast<u64 *>(phys_to_kvirt(l2phys));
148                                 }
149                                 
150                                 l2[virtpage & 511] = (physpage << 21) | 0x187;
151                         }
152                         
153                         size_t pages_size = mem_end * sizeof(Mem::Page);
154                         Mem::pages = static_cast<Mem::Page *>(get_bootmem(pages_size, 8));
155                         Mem::last_page = Mem::pages + pages_size - 1;
156                         bzero(Mem::pages, pages_size);
157                         
158                         int normal = 0, dma = 0;
159                         uintptr_t highstart = highzonestart;
160                         uintptr_t dma32start = dma32zonestart;
161                         
162                         if (mem_end > highstart) {
163                                 normalzonelist[normal++] = &highzone;
164                                 highzone.init(highstart, mem_end - highstart);
165                         } else {
166                                 highstart = mem_end;
167                         }
168                         
169                         if (mem_end > dma32start) {
170                                 normalzonelist[normal++] = &dma32zone;
171                                 dma32zonelist[dma++] = &dma32zone;
172                                 dma32zone.init(dma32start, highstart - dma32start);
173                         } else {
174                                 dma32start = mem_end;
175                         }
176         
177                         normalzonelist[normal++] = &isadmazone;
178                         dma32zonelist[dma++] = &isadmazone;
179                         
180                         isadmazone.init(mem_start, dma32start);
181                 }
182         }
183 }