1 /* 2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com. 3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include "x86_syscalls.h" 9 10 #include <KernelExport.h> 11 12 #ifdef _COMPAT_MODE 13 # include <commpage_compat.h> 14 #endif 15 #include <cpu.h> 16 #ifdef _COMPAT_MODE 17 # include <elf.h> 18 #endif 19 #include <smp.h> 20 21 22 // SYSCALL handler (in interrupts.S). 23 extern "C" void x86_64_syscall_entry(void); 24 25 26 #ifdef _COMPAT_MODE 27 28 // SYSCALL/SYSENTER handlers (in entry_compat.S). 29 extern "C" { 30 void x86_64_syscall32_entry(void); 31 void x86_64_sysenter32_entry(void); 32 } 33 34 35 void (*gX86SetSyscallStack)(addr_t stackTop) = NULL; 36 37 38 // user syscall assembly stubs 39 extern "C" void x86_user_syscall_sysenter(void); 40 extern unsigned int x86_user_syscall_sysenter_end; 41 extern "C" void x86_user_syscall_syscall(void); 42 extern unsigned int x86_user_syscall_syscall_end; 43 44 extern "C" void x86_sysenter32_userspace_thread_exit(void); 45 extern unsigned int x86_sysenter32_userspace_thread_exit_end; 46 47 #endif // _COMPAT_MODE 48 49 50 static void 51 init_syscall_registers(void* dummy, int cpuNum) 52 { 53 // Enable SYSCALL (EFER.SCE = 1). 54 x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER) 55 | IA32_MSR_EFER_SYSCALL); 56 57 // Flags to clear upon entry. Want interrupts disabled and the direction 58 // flag cleared. 59 x86_write_msr(IA32_MSR_FMASK, X86_EFLAGS_INTERRUPT | X86_EFLAGS_DIRECTION 60 | X86_EFLAGS_ALIGNMENT_CHECK); 61 62 // Entry point address. 63 x86_write_msr(IA32_MSR_LSTAR, (addr_t)x86_64_syscall_entry); 64 65 #ifdef _COMPAT_MODE 66 // Syscall compat entry point address. 67 x86_write_msr(IA32_MSR_CSTAR, (addr_t)x86_64_syscall32_entry); 68 #endif 69 70 // Segments that will be set upon entry and return. This is very strange 71 // and requires a specific ordering of segments in the GDT. Upon entry: 72 // - CS is set to IA32_STAR[47:32] 73 // - SS is set to IA32_STAR[47:32] + 8 74 // Upon return: 75 // - CS is set to IA32_STAR[63:48] + 16 76 // - SS is set to IA32_STAR[63:48] + 8 77 // From this we get: 78 // - Entry CS = KERNEL_CODE_SELECTOR 79 // - Entry SS = KERNEL_CODE_SELECTOR + 8 = KERNEL_DATA_SELECTOR 80 // - Return CS = USER32_CODE_SELECTOR + 16 = USER_CODE_SELECTOR 81 // - Return SS = USER32_CODE_SELECTOR + 8 = USER_DATA_SELECTOR 82 x86_write_msr(IA32_MSR_STAR, ((uint64)(USER32_CODE_SELECTOR) << 48) 83 | ((uint64)(KERNEL_CODE_SELECTOR) << 32)); 84 } 85 86 87 // #pragma mark - 88 89 90 void 91 x86_initialize_syscall(void) 92 { 93 // SYSCALL/SYSRET are always available on x86_64 so we just use them, no 94 // need to use the commpage. Tell all CPUs to initialize the SYSCALL MSRs. 95 call_all_cpus_sync(&init_syscall_registers, NULL); 96 } 97 98 99 #ifdef _COMPAT_MODE 100 101 static void 102 set_intel_syscall_stack(addr_t stackTop) 103 { 104 x86_write_msr(IA32_MSR_SYSENTER_ESP, stackTop); 105 } 106 107 108 static void 109 init_intel_syscall_registers(void* dummy, int cpuNum) 110 { 111 x86_write_msr(IA32_MSR_SYSENTER_CS, KERNEL_CODE_SELECTOR); 112 x86_write_msr(IA32_MSR_SYSENTER_ESP, 0); 113 x86_write_msr(IA32_MSR_SYSENTER_EIP, (addr_t)x86_64_sysenter32_entry); 114 115 gX86SetSyscallStack = &set_intel_syscall_stack; 116 } 117 118 119 void 120 x86_compat_initialize_syscall(void) 121 { 122 // for 32-bit syscalls, fill the commpage with the right mechanism 123 call_all_cpus_sync(&init_intel_syscall_registers, NULL); 124 125 void* syscallCode = (void *)&x86_user_syscall_sysenter; 126 void* syscallCodeEnd = &x86_user_syscall_sysenter_end; 127 128 // TODO check AMD for sysenter 129 130 // fill in the table entry 131 size_t len = (size_t)((addr_t)syscallCodeEnd - (addr_t)syscallCode); 132 addr_t position = fill_commpage_compat_entry(COMMPAGE_ENTRY_X86_SYSCALL, 133 syscallCode, len); 134 135 image_id image = get_commpage_compat_image(); 136 elf_add_memory_image_symbol(image, "commpage_compat_syscall", position, 137 len, B_SYMBOL_TYPE_TEXT); 138 139 void* threadExitCode = (void *)&x86_sysenter32_userspace_thread_exit; 140 void* threadExitCodeEnd = &x86_sysenter32_userspace_thread_exit_end; 141 142 len = (size_t)((addr_t)threadExitCodeEnd - (addr_t)threadExitCode); 143 position = fill_commpage_compat_entry(COMMPAGE_ENTRY_X86_THREAD_EXIT, 144 threadExitCode, len); 145 146 elf_add_memory_image_symbol(image, "commpage_compat_thread_exit", 147 position, len, B_SYMBOL_TYPE_TEXT); 148 } 149 150 #endif // _COMPAT_MODE 151 152