// arch/x86/entry.S - x64 entry points (booting and traps) // // This software is copyright (c) 2006 Scott Wood. // // This software is provided 'as-is', without any express or implied warranty. // In no event will the authors or contributors be held liable for any damages // arising from the use of this software. // // Permission is hereby granted to everyone, free of charge, to use, copy, // modify, prepare derivative works of, publish, distribute, perform, // sublicense, and/or sell copies of the Software, provided that the above // copyright notice and disclaimer of warranty be included in all copies or // substantial portions of this software. #include .org 0 .code32 .global _start .align 4 multiboot_hdr: .long 0x1badb002 // Multi-boot magic // Multi-boot flags: // bit 0: 4KiB-align all boot modules // bit 1: must include memory size and map // bit 2: must include video mode table // bit 16: load addresses in this header are valid // and should be used instead of the ELF header .long 0x00010003 // checksum: -(magic + flags), update if flags change .long 0xe4514ffb .long multiboot_hdr - KERNEL_START // header_addr .long 0x00200000 // load_addr .long 0 // load_end_addr: load whole file .long bss_end - KERNEL_START // bss_end_addr .long _start - KERNEL_START // entry_addr _start: cld cmpl $0x2badb002, %eax bne no_multiboot lgdt x64_gdtr_phys + 6 - KERNEL_START ljmp $0x10, $using_our_gdt - KERNEL_START using_our_gdt: movw $0x08, %ax movw %ax, %ds movw %ax, %es movw %ax, %fs movw %ax, %gs movw %ax, %ss movl %ebx, %esi // Save the multiboot pointer somewhere // it won't be clobbered by CPUID // This gives 256 bytes to Threads::Thread; if it gets larger // this needs to be updated (as well as the code at high_vaddr). movl $x64_init_stack + 3840 - KERNEL_START, %esp // Test for CPUID pushfl popl %eax movl %eax, %ebx xorl $0x00200000, %eax pushl %eax popfl pushfl popl %eax cmpl %eax, %ebx je no_long_mode // Test for long mode movl $0x80000000, %eax cpuid cmpl $0x80000000, %eax jbe no_long_mode movl $0x80000001, %eax cpuid btl $29, %edx jnc no_long_mode movl $0xc0000080, %ecx // Extended Feature Enable Register (EFER) xorl %edx, %edx movl $0x100, %eax // Enable long mode wrmsr // enable PAE movl %cr4, %eax btsl $5, %eax movl %eax, %cr4 // Set page table attributes orl $7, x64_init_ptbl_l4 - KERNEL_START orl $7, x64_init_ptbl_l3 - KERNEL_START // orl $7, x64_init_ptbl_l2 - KERNEL_START // Load the initial page table movl $x64_init_ptbl_l4 - KERNEL_START, %eax movl %eax, %cr3 // enable paging, kernel write-protect, // and internal floating point error handling movl %cr0, %eax orl $0x80010020, %eax movl %eax, %cr0 ljmp $0x18, $in_code64 - KERNEL_START .code64 in_code64: // Set up high page tables for 0xffffffff80000000 mapping, // reusing the tables previously used for the low identity // mapping. movq x64_init_ptbl_l4 - KERNEL_START, %rax movq %rax, x64_init_ptbl_l4 - KERNEL_START + 0xff8 movq x64_init_ptbl_l3 - KERNEL_START, %rax movq %rax, x64_init_ptbl_l3 - KERNEL_START + 0xff0 lgdt x64_gdtr + 6 movl %esi, x64_boot_info_phys movq $high_vaddr, %rax jmp *%rax high_vaddr: movq $x64_init_stack + 3840, %rsp jmp start_kernel .code32 no_multiboot: movl $no_multiboot_str - KERNEL_START, %esi jmp do_print no_long_mode: movl $no_long_mode_str - KERNEL_START, %esi do_print: movl $0xb8000, %edi movb (%esi), %al xorl %ecx, %ecx 1: movb %al, (%edi, %ecx, 2) movb $14, 1(%edi, %ecx, 2) // It's not at the cursor, so use // yellow to make it stand out. incl %ecx movb (%esi, %ecx), %al testb %al, %al jnz 1b 2: jmp 2b no_long_mode_str: .string "This CPU does not support long (64-bit) mode. Use a 32-bit kernel." no_multiboot_str: .string "Unrecognized bootloader; a multiboot-compliant loader is required." .code64 .macro pushvolatilesnordi push %rax push %rcx push %rdx push %rsi push %r8 push %r9 push %r10 push %r11 .endm .macro pushvolatiles push %rax pushvolatilesnordi .endm .macro popvolatiles pop %r11 pop %r10 pop %r9 pop %r8 pop %rsi pop %rdx pop %rcx pop %rax pop %rdi .endm // Non-volatile registers must be pushed if the handler will // need to access all of the interrupted code's registers, // such as when producing an error dump. Does not include // r15, as that is usually swapped with the error code. .macro pushall push %r14 push %r13 push %r12 push %r11 push %r10 push %r9 push %r8 push %rdi push %rsi push %rbp push %rsp push %rbx push %rdx push %rcx push %rax .endm .macro pushallnoerr push %r15 pushall .endm .macro popall pop %rax pop %rcx pop %rdx pop %rbx addq $8, %rsp pop %rbp pop %rsi pop %rdi pop %r8 pop %r9 pop %r10 pop %r11 pop %r12 pop %r13 pop %r14 pop %r15 .endm .global x64_diverr x64_diverr: pushallnoerr movq %rsp, %rdi call x64_do_diverr popall iretq .global x64_invalid_insn x64_invalid_insn: pushallnoerr movq %rsp, %rdi call x64_do_invalid_insn popall iretq .global x64_gpf x64_gpf: xchgq %r15, (%rsp) // get error code pushall movq %rsp, %rdi movq %r15, %rsi call x64_do_gpf popall iretq .global x64_page_fault x64_page_fault: xchgq %r15, (%rsp) // get error code pushall movq %rsp, %rdi movq %cr2, %rsi movq %r15, %rdx call x64_do_page_fault popall iretq .global x64_irq x64_irq: pushvolatilesnordi subq $8, %rsp // Keep the stack frame 16-byte aligned call x64_do_irq movl need_resched, %eax testl %eax, %eax jnz x64_reschedule x64_ret_irq: addq $8, %rsp popvolatiles iretq x64_reschedule: // The cli is to make sure interrupts don't get re-enabled in // this thread context between the schedule and the ret from // IRQ. cli call schedule jmp x64_ret_irq .section ".irqs","x" .global x64_irqs x64_irqs: .text .macro irq from,to 1: push %rdi movq $\from, %rdi jmp x64_irq .section ".irqs","x" .quad 1b .text .if \to-\from irq (\from+1),\to .endif .endm .macro irq16 from,to irq \from,(\from+15) .if \to-\from irq16 (\from+16),\to .endif .endm irq16 0,240 .global x64_new_thread x64_new_thread: xorq %rax, %rax xorq %rbx, %rbx xorq %rcx, %rcx xorq %rdx, %rdx xorq %rbp, %rbp xorq %r8, %r8 xorq %r9, %r9 xorq %r10, %r10 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 xorq %r14, %r14 xorq %r15, %r15 call sched_new_thread pop %rsi pop %rdi call *%rsi call exit_thread ud2a