xref: /haiku/src/system/kernel/arch/x86/32/arch.S (revision 14b32de1d5efe99b4c6d4ef8c25df47eb009cf0f)
1/*
2 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
3 * Copyright 2012, Rene Gollent, rene@gollent.com.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Copyright 2002, Michael Noisternig. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11
12#include <asm_defs.h>
13
14#include <arch/x86/descriptors.h>
15
16#include "asm_offsets.h"
17#include "syscall_numbers.h"
18
19
20.text
21
22/* void x86_fnsave(void *fpu_state); */
23FUNCTION(x86_fnsave):
24	movl	4(%esp), %eax
25	fnsave	(%eax)
26	ret
27FUNCTION_END(x86_fnsave)
28
29/* void x86_fxsave(void *fpu_state); */
30FUNCTION(x86_fxsave):
31	movl	4(%esp), %eax
32	fxsave	(%eax)
33	ret
34FUNCTION_END(x86_fxsave)
35
36/* void x86_frstor(const void *fpu_state); */
37FUNCTION(x86_frstor):
38	movl	4(%esp), %eax
39	frstor	(%eax)
40	ret
41FUNCTION_END(x86_frstor)
42
43/* void x86_fxrstor(const void *fpu_state); */
44FUNCTION(x86_fxrstor):
45	movl	4(%esp), %eax
46	fxrstor	(%eax)
47	ret
48FUNCTION_END(x86_fxrstor)
49
50/* void x86_noop_swap(void *old_fpu_state, const void *new_fpu_state); */
51FUNCTION(x86_noop_swap):
52	nop
53	ret
54FUNCTION_END(x86_noop_swap)
55
56/* void x86_fnsave_swap(void *old_fpu_state, const void *new_fpu_state); */
57FUNCTION(x86_fnsave_swap):
58	movl	4(%esp),%eax
59	fnsave	(%eax)
60	movl	8(%esp),%eax
61	frstor	(%eax)
62	ret
63FUNCTION_END(x86_fnsave_swap)
64
65/* void x86_fxsave_swap(void *old_fpu_state, const void *new_fpu_state); */
66FUNCTION(x86_fxsave_swap):
67	movl	4(%esp),%eax
68	fxsave	(%eax)
69	movl	8(%esp),%eax
70	fxrstor	(%eax)
71	ret
72FUNCTION_END(x86_fxsave_swap)
73
74/* uint32 x86_get_stack_frame(); */
75FUNCTION(x86_get_stack_frame):
76	movl	%ebp, %eax
77	ret
78FUNCTION_END(x86_get_stack_frame)
79
80/* uint64 x86_read_msr(uint32 register); */
81FUNCTION(x86_read_msr):
82	movl	4(%esp), %ecx
83	rdmsr
84	ret
85FUNCTION_END(x86_read_msr)
86
87/* void x86_write_msr(uint32 register, uint64 value); */
88FUNCTION(x86_write_msr):
89	movl	4(%esp), %ecx
90	movl	8(%esp), %eax
91	movl	12(%esp), %edx
92	wrmsr
93	ret
94FUNCTION_END(x86_write_msr)
95
96/* void x86_context_switch(struct arch_thread* oldState,
97	struct arch_thread* newState); */
98FUNCTION(x86_context_switch):
99	pusha					/* pushes 8 words onto the stack */
100	movl	36(%esp),%eax	/* save oldState->current_stack */
101	movl	%esp,(%eax)
102	pushl	%ss
103	popl	%edx
104	movl	%edx,4(%eax)
105	movl	40(%esp),%eax	/* get new newState->current_stack */
106	lss		(%eax),%esp
107	popa
108	ret
109FUNCTION_END(x86_context_switch)
110
111/* void x86_swap_pgdir(uint32 newPageDir); */
112FUNCTION(x86_swap_pgdir):
113	movl	4(%esp),%eax
114	movl	%eax,%cr3
115	ret
116FUNCTION_END(x86_swap_pgdir)
117
118/* thread exit stub */
119	.align 4
120FUNCTION(x86_userspace_thread_exit):
121	pushl	%eax
122	sub		$4, %esp
123	movl	$1, %ecx
124	lea		(%esp), %edx
125	movl	$SYSCALL_EXIT_THREAD, %eax
126	int		$99
127	.align 4
128FUNCTION_END(x86_userspace_thread_exit)
129SYMBOL(x86_end_userspace_thread_exit):
130
131
132null_idt_descr:
133	.word	0
134	.word	0,0
135
136FUNCTION(x86_reboot):
137	lidt	null_idt_descr
138	int		$0
139done:
140	jmp		done
141FUNCTION_END(x86_reboot)
142
143
144/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
145FUNCTION(arch_cpu_user_memcpy):
146	pushl	%esi
147	pushl	%edi
148	movl	12(%esp),%edi	/* dest */
149	movl	16(%esp),%esi	/* source */
150	movl	20(%esp),%ecx	/* count */
151
152	/* set the fault handler */
153	movl	24(%esp),%edx	/* fault handler */
154	movl	(%edx),%eax
155	movl	$.L_user_memcpy_error, (%edx)
156
157	/* move by words */
158	cld
159	shrl	$2,%ecx
160	rep
161	movsl
162
163	/* move any remaining data by bytes */
164	movl	20(%esp),%ecx
165	andl	$3,%ecx
166	rep
167	movsb
168
169	/* restore the old fault handler */
170	movl	%eax,(%edx)
171	xor		%eax,%eax
172
173	popl	%edi
174	popl	%esi
175	ret
176
177	/* error condition */
178.L_user_memcpy_error:
179	/* restore the old fault handler */
180	movl	%eax,(%edx)
181	movl	$-1,%eax	/* return a generic error, the wrapper routine will deal with it */
182	popl	%edi
183	popl	%esi
184	ret
185FUNCTION_END(arch_cpu_user_memcpy)
186
187
188/* status_t arch_cpu_user_memset(void *to, char c, size_t count, addr_t *faultHandler) */
189FUNCTION(arch_cpu_user_memset):
190	pushl	%esi
191	pushl	%edi
192	movl	12(%esp),%edi	/* dest */
193	movb	16(%esp),%al	/* c */
194	movl	20(%esp),%ecx	/* count */
195
196	/* set the fault handler */
197	movl	24(%esp),%edx	/* fault handler */
198	movl	(%edx),%esi
199	movl	$.L_user_memset_error, (%edx)
200
201	rep
202	stosb
203
204	/* restore the old fault handler */
205	movl	%esi,(%edx)
206	xor		%eax,%eax
207
208	popl	%edi
209	popl	%esi
210	ret
211
212	/* error condition */
213.L_user_memset_error:
214	/* restore the old fault handler */
215	movl	%esi,(%edx)
216	movl	$-1,%eax	/* return a generic error, the wrapper routine will deal with it */
217	popl	%edi
218	popl	%esi
219	ret
220FUNCTION_END(arch_cpu_user_memset)
221
222
223/* ssize_t arch_cpu_user_strlcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
224FUNCTION(arch_cpu_user_strlcpy):
225	pushl	%esi
226	pushl	%edi
227	pushl	%ebx
228	movl	16(%esp),%edi	/* dest */
229	movl	20(%esp),%esi	/* source */
230	movl	24(%esp),%ecx	/* count */
231
232	/* set the fault handler */
233	movl	28(%esp),%edx	/* fault handler */
234	movl	(%edx),%ebx
235	movl	$.L_user_strlcpy_error, (%edx)
236
237	/* Check for 0 length */
238	cmp		$0,%ecx
239	je		.L_user_strlcpy_source_count
240
241	/* Copy at most count - 1 bytes */
242	dec		%ecx
243
244	/* If count is now 0, skip straight to null terminating
245	   as our loop will otherwise overflow */
246	jnz		.L_user_strlcpy_copy_begin
247	movb	$0,(%edi)
248	jmp		.L_user_strlcpy_source_count
249
250.L_user_strlcpy_copy_begin:
251	cld
252.L_user_strlcpy_copy_loop:
253	/* move data by bytes */
254	lodsb
255	stosb
256	test %al,%al
257	jz .L_user_strlcpy_source_done
258	loop .L_user_strlcpy_copy_loop
259
260	/* null terminate string */
261	movb	$0,(%edi)
262	dec		%esi
263
264	/* count remaining bytes in src */
265.L_user_strlcpy_source_count:
266	not		%ecx
267	xor		%al,%al
268	repnz
269	scasb
270
271.L_user_strlcpy_source_done:
272	movl	%esi,%eax
273	subl	20(%esp),%eax
274	dec		%eax
275	/* restore the old fault handler */
276	movl	%ebx,(%edx)
277
278	popl	%ebx
279	popl	%edi
280	popl	%esi
281	ret
282
283	/* error condition */
284.L_user_strlcpy_error:
285	/* restore the old fault handler */
286	movl	%ebx,(%edx)
287	movl	$-1,%eax	/* return a generic error, the wrapper routine will deal with it */
288	popl	%ebx
289	popl	%edi
290	popl	%esi
291	ret
292FUNCTION_END(arch_cpu_user_strlcpy)
293
294
295/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
296		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
297
298	Called by debug_call_with_fault_handler() to do the dirty work of setting
299	the fault handler and calling the function. If the function causes a page
300	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
301	given \a jumpBuffer. Otherwise it returns normally.
302
303	debug_call_with_fault_handler() has already saved the CPU's fault_handler
304	and fault_handler_stack_pointer and will reset them later, so
305	arch_debug_call_with_fault_handler() doesn't need to care about it.
306
307	\param cpu The \c cpu_ent for the current CPU.
308	\param jumpBuffer Buffer to be used for longjmp().
309	\param function The function to be called.
310	\param parameter The parameter to be passed to the function to be called.
311*/
312FUNCTION(arch_debug_call_with_fault_handler):
313	push	%ebp
314	movl	%esp, %ebp
315
316	// Set fault handler address, and fault handler stack pointer address. We
317	// don't need to save the previous values, since that's done by the caller.
318	movl	8(%ebp), %eax	// cpu to %eax
319	lea		1f, %edx
320	movl	%edx, CPU_ENT_fault_handler(%eax)
321	movl	%ebp, CPU_ENT_fault_handler_stack_pointer(%eax)
322
323	// call the function
324	movl	20(%ebp), %eax	// parameter
325	push	%eax
326	movl	16(%ebp), %eax	// function
327	call	*%eax
328
329	// regular return
330	movl	%ebp, %esp
331	pop		%ebp
332	ret
333
334	// fault -- return via longjmp(jumpBuffer, 1)
3351:
336	movl	%ebp, %esp		// restore %esp
337	pushl	$1
338	movl	12(%ebp), %eax	// jumpBuffer
339	pushl	%eax
340	call	longjmp
341FUNCTION_END(arch_debug_call_with_fault_handler)
342