1 /*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2007, Travis Geiselbrecht. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8 #include "x86_syscalls.h"
9
10 #include <string.h>
11
12 #include <KernelExport.h>
13
14 #include <commpage.h>
15 #include <cpu.h>
16 #include <elf.h>
17 #include <smp.h>
18
19
20 // user syscall assembly stubs
21 extern "C" void x86_user_syscall_int(void);
22 extern unsigned int x86_user_syscall_int_end;
23 extern "C" void x86_user_syscall_sysenter(void);
24 extern unsigned int x86_user_syscall_sysenter_end;
25
26 // sysenter handler
27 extern "C" void x86_sysenter();
28
29
30 void (*gX86SetSyscallStack)(addr_t stackTop) = NULL;
31
32
33 extern int memcpy_end;
34 extern int memset_end;
35
36
37 static bool
all_cpus_have_feature(enum x86_feature_type type,int feature)38 all_cpus_have_feature(enum x86_feature_type type, int feature)
39 {
40 int i;
41 int cpuCount = smp_get_num_cpus();
42
43 for (i = 0; i < cpuCount; i++) {
44 if (!(gCPU[i].arch.feature[type] & feature))
45 return false;
46 }
47
48 return true;
49 }
50
51
52 static void
set_intel_syscall_stack(addr_t stackTop)53 set_intel_syscall_stack(addr_t stackTop)
54 {
55 x86_write_msr(IA32_MSR_SYSENTER_ESP, stackTop);
56 }
57
58
59 static void
init_intel_syscall_registers(void * dummy,int cpuNum)60 init_intel_syscall_registers(void* dummy, int cpuNum)
61 {
62 x86_write_msr(IA32_MSR_SYSENTER_CS, KERNEL_CODE_SELECTOR);
63 x86_write_msr(IA32_MSR_SYSENTER_ESP, 0);
64 x86_write_msr(IA32_MSR_SYSENTER_EIP, (addr_t)x86_sysenter);
65
66 gX86SetSyscallStack = &set_intel_syscall_stack;
67 }
68
69
70 #if 0
71 static void
72 init_amd_syscall_registers(void* dummy, int cpuNum)
73 {
74 // TODO: ...
75 }
76 #endif
77
78
79 // #pragma mark -
80
81
82 void
x86_initialize_syscall(void)83 x86_initialize_syscall(void)
84 {
85 void* syscallCode = (void *)&x86_user_syscall_int;
86 void* syscallCodeEnd = &x86_user_syscall_int_end;
87
88 // check syscall
89 if (all_cpus_have_feature(FEATURE_COMMON, IA32_FEATURE_SEP)
90 && !(gCPU[0].arch.family == 6 && gCPU[0].arch.model < 3
91 && gCPU[0].arch.stepping < 3)) {
92 // Intel sysenter/sysexit
93 dprintf("initialize_commpage_syscall(): sysenter/sysexit supported\n");
94
95 // the code to be used in userland
96 syscallCode = (void *)&x86_user_syscall_sysenter;
97 syscallCodeEnd = &x86_user_syscall_sysenter_end;
98
99 // tell all CPUs to init their sysenter/sysexit related registers
100 call_all_cpus_sync(&init_intel_syscall_registers, NULL);
101 } else if (all_cpus_have_feature(FEATURE_EXT_AMD,
102 IA32_FEATURE_AMD_EXT_SYSCALL)) {
103 // AMD syscall/sysret
104 dprintf("initialize_commpage_syscall(): syscall/sysret supported "
105 "-- not yet by Haiku, though");
106 } else {
107 // no special syscall support
108 dprintf("initialize_commpage_syscall(): no special syscall support\n");
109 }
110
111 // fill in the table entry
112 size_t len = (size_t)((addr_t)syscallCodeEnd - (addr_t)syscallCode);
113 addr_t position = fill_commpage_entry(COMMPAGE_ENTRY_X86_SYSCALL,
114 syscallCode, len);
115
116 // put the optimized functions into the commpage
117 size_t memcpyLen = (addr_t)&memcpy_end - (addr_t)memcpy;
118 addr_t memcpyPosition = fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMCPY,
119 (const void*)memcpy, memcpyLen);
120 size_t memsetLen = (addr_t)&memset_end - (addr_t)memset;
121 addr_t memsetPosition = fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMSET,
122 (const void*)memset, memsetLen);
123
124 // add syscall to the commpage image
125 image_id image = get_commpage_image();
126 elf_add_memory_image_symbol(image, "commpage_memcpy", memcpyPosition,
127 memcpyLen, B_SYMBOL_TYPE_TEXT);
128 elf_add_memory_image_symbol(image, "commpage_memset", memsetPosition,
129 memsetLen, B_SYMBOL_TYPE_TEXT);
130 elf_add_memory_image_symbol(image, "commpage_syscall", position, len,
131 B_SYMBOL_TYPE_TEXT);
132 }
133