1 /* 2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com. 3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include "x86_syscalls.h" 9 10 #include <KernelExport.h> 11 12 #ifdef _COMPAT_MODE 13 # include <commpage_compat.h> 14 #endif 15 #include <cpu.h> 16 #ifdef _COMPAT_MODE 17 # include <elf.h> 18 #endif 19 #include <smp.h> 20 21 22 // SYSCALL handler (in interrupts.S). 23 extern "C" void x86_64_syscall_entry(void); 24 25 26 #ifdef _COMPAT_MODE 27 28 // SYSCALL/SYSENTER handlers (in entry_compat.S). 29 extern "C" { 30 void x86_64_syscall32_entry(void); 31 void x86_64_sysenter32_entry(void); 32 } 33 34 35 void (*gX86SetSyscallStack)(addr_t stackTop) = NULL; 36 37 38 // user syscall assembly stubs 39 extern "C" void x86_user_syscall_sysenter(void); 40 extern unsigned int x86_user_syscall_sysenter_end; 41 extern "C" void x86_user_syscall_syscall(void); 42 extern unsigned int x86_user_syscall_syscall_end; 43 44 extern "C" void x86_sysenter32_userspace_thread_exit(void); 45 extern unsigned int x86_sysenter32_userspace_thread_exit_end; 46 47 #endif // _COMPAT_MODE 48 49 50 static void 51 init_syscall_registers(void* dummy, int cpuNum) 52 { 53 // Enable SYSCALL (EFER.SCE = 1). 54 x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER) 55 | IA32_MSR_EFER_SYSCALL); 56 57 // Flags to clear upon entry. Want interrupts disabled and the carry, trap, 58 // nested task, direction and alignment check flags cleared. 59 x86_write_msr(IA32_MSR_FMASK, X86_EFLAGS_INTERRUPT | X86_EFLAGS_DIRECTION 60 | X86_EFLAGS_NESTED_TASK | X86_EFLAGS_CARRY | X86_EFLAGS_TRAP 61 | X86_EFLAGS_ALIGNMENT_CHECK); 62 63 // Entry point address. 64 x86_write_msr(IA32_MSR_LSTAR, (addr_t)x86_64_syscall_entry); 65 66 #ifdef _COMPAT_MODE 67 // Syscall compat entry point address. 68 x86_write_msr(IA32_MSR_CSTAR, (addr_t)x86_64_syscall32_entry); 69 #endif 70 71 // Segments that will be set upon entry and return. This is very strange 72 // and requires a specific ordering of segments in the GDT. Upon entry: 73 // - CS is set to IA32_STAR[47:32] 74 // - SS is set to IA32_STAR[47:32] + 8 75 // Upon return: 76 // - CS is set to IA32_STAR[63:48] + 16 77 // - SS is set to IA32_STAR[63:48] + 8 78 // From this we get: 79 // - Entry CS = KERNEL_CODE_SELECTOR 80 // - Entry SS = KERNEL_CODE_SELECTOR + 8 = KERNEL_DATA_SELECTOR 81 // - Return CS = USER32_CODE_SELECTOR + 16 = USER_CODE_SELECTOR 82 // - Return SS = USER32_CODE_SELECTOR + 8 = USER_DATA_SELECTOR 83 x86_write_msr(IA32_MSR_STAR, ((uint64)(USER32_CODE_SELECTOR) << 48) 84 | ((uint64)(KERNEL_CODE_SELECTOR) << 32)); 85 } 86 87 88 // #pragma mark - 89 90 91 void 92 x86_initialize_syscall(void) 93 { 94 // SYSCALL/SYSRET are always available on x86_64 so we just use them, no 95 // need to use the commpage. Tell all CPUs to initialize the SYSCALL MSRs. 96 call_all_cpus_sync(&init_syscall_registers, NULL); 97 } 98 99 100 #ifdef _COMPAT_MODE 101 102 static void 103 set_intel_syscall_stack(addr_t stackTop) 104 { 105 x86_write_msr(IA32_MSR_SYSENTER_ESP, stackTop); 106 } 107 108 109 static void 110 init_intel_syscall_registers(void* dummy, int cpuNum) 111 { 112 x86_write_msr(IA32_MSR_SYSENTER_CS, KERNEL_CODE_SELECTOR); 113 x86_write_msr(IA32_MSR_SYSENTER_ESP, 0); 114 x86_write_msr(IA32_MSR_SYSENTER_EIP, (addr_t)x86_64_sysenter32_entry); 115 116 gX86SetSyscallStack = &set_intel_syscall_stack; 117 } 118 119 120 void 121 x86_compat_initialize_syscall(void) 122 { 123 // for 32-bit syscalls, fill the commpage with the right mechanism 124 call_all_cpus_sync(&init_intel_syscall_registers, NULL); 125 126 void* syscallCode = (void *)&x86_user_syscall_sysenter; 127 void* syscallCodeEnd = &x86_user_syscall_sysenter_end; 128 129 // TODO check AMD for sysenter 130 131 // fill in the table entry 132 size_t len = (size_t)((addr_t)syscallCodeEnd - (addr_t)syscallCode); 133 addr_t position = fill_commpage_compat_entry(COMMPAGE_ENTRY_X86_SYSCALL, 134 syscallCode, len); 135 136 image_id image = get_commpage_compat_image(); 137 elf_add_memory_image_symbol(image, "commpage_compat_syscall", position, 138 len, B_SYMBOL_TYPE_TEXT); 139 140 void* threadExitCode = (void *)&x86_sysenter32_userspace_thread_exit; 141 void* threadExitCodeEnd = &x86_sysenter32_userspace_thread_exit_end; 142 143 len = (size_t)((addr_t)threadExitCodeEnd - (addr_t)threadExitCode); 144 position = fill_commpage_compat_entry(COMMPAGE_ENTRY_X86_THREAD_EXIT, 145 threadExitCode, len); 146 147 elf_add_memory_image_symbol(image, "commpage_compat_thread_exit", 148 position, len, B_SYMBOL_TYPE_TEXT); 149 } 150 151 #endif // _COMPAT_MODE 152 153