/* * Copyright 2012, Alex Smith, alex@alex-smith.me.uk. * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de. * Copyright 2012, Rene Gollent, rene@gollent.com. * Distributed under the terms of the MIT License. * * Copyright 2001, Travis Geiselbrecht. All rights reserved. * Copyright 2002, Michael Noisternig. All rights reserved. * Distributed under the terms of the NewOS License. */ #include #include "asm_offsets.h" #include "syscall_numbers.h" .text /* void x86_fxsave(void* fpuState); */ FUNCTION(x86_fxsave): fxsave (%rdi) ret FUNCTION_END(x86_fxsave) /* void x86_fxrstor(const void* fpuState); */ FUNCTION(x86_fxrstor): fxrstor (%rdi) ret FUNCTION_END(x86_fxrstor) /* void x86_noop_swap(void *oldFpuState, const void *newFpuState); */ FUNCTION(x86_noop_swap): nop ret FUNCTION_END(x86_noop_swap) /* void x86_fxsave_swap(void* oldFpuState, const void* newFpuState); */ FUNCTION(x86_fxsave_swap): fxsave (%rdi) fxrstor (%rsi) ret FUNCTION_END(x86_fxsave_swap) /* addr_t x86_get_stack_frame(); */ FUNCTION(x86_get_stack_frame): mov %rbp, %rax ret FUNCTION_END(x86_get_stack_frame) /* void x86_64_thread_entry(); */ FUNCTION(x86_64_thread_entry): xorq %rbp, %rbp movq %rsp, %rax addq $16, %rsp andq $0xfffffffffffffff0, %rsp subq $8, %rsp movq 8(%rax), %rdi jmp *(%rax) FUNCTION_END(x86_64_thread_entry) /* void x86_swap_pgdir(uint64 newPageDir); */ FUNCTION(x86_swap_pgdir): movq %rdi, %cr3 ret FUNCTION_END(x86_swap_pgdir) /* thread exit stub */ .align 8 FUNCTION(x86_userspace_thread_exit): movq %rax, %rdi movq $SYSCALL_EXIT_THREAD, %rax syscall .align 8 FUNCTION_END(x86_userspace_thread_exit) SYMBOL(x86_end_userspace_thread_exit): null_idt_descr: .word 0 .quad 0 FUNCTION(x86_reboot): lidt null_idt_descr int $0 done: jmp done FUNCTION_END(x86_reboot) /* status_t arch_cpu_user_memcpy(void* to, const void* from, size_t size, addr_t* faultHandler) */ FUNCTION(arch_cpu_user_memcpy): // faultHandler -> r8, size -> rcx. movq %rcx, %r8 movq %rdx, %rcx // Set the fault handler, preserve old in rax. movq (%r8), %rax movq $.L_user_memcpy_error, (%r8) // Move by quadwords. cld movq %rcx, %r9 shrq $3, %rcx rep movsq // Move any remaining data by bytes. movq %r9, %rcx andq $7, %rcx rep movsb // Restore the old fault handler and return. movq %rax, (%r8) xorl %eax, %eax ret .L_user_memcpy_error: // Restore the old fault handler. Return a generic error, the wrapper // routine will deal with it. movq %rax, (%r8) movl $-1, %eax ret FUNCTION_END(arch_cpu_user_memcpy) /* status_t arch_cpu_user_memset(void* to, char c, size_t count, addr_t* faultHandler) */ FUNCTION(arch_cpu_user_memset): // c -> al, faultHandler -> r8, size -> rcx. movw %si, %ax movq %rcx, %r8 movq %rdx, %rcx // Set the fault handler, preserve old in rdx. movq (%r8), %rdx movq $.L_user_memset_error, (%r8) rep stosb // Restore the old fault handler and return. movq %rdx, (%r8) xorl %eax, %eax ret .L_user_memset_error: // Restore the old fault handler. Return a generic error, the wrapper // routine will deal with it. movq %rdx, (%r8) movl $-1, %eax ret FUNCTION_END(arch_cpu_user_memset) /* ssize_t arch_cpu_user_strlcpy(void* to, const void* from, size_t size, addr_t* faultHandler) */ FUNCTION(arch_cpu_user_strlcpy): // faultHandler -> r8, size -> rcx, source -> r9 (original value needed to // calculate return value). movq %rcx, %r8 movq %rdx, %rcx movq %rsi, %r9 // Set the fault handler, preserve old in rax. movq (%r8), %rax movq $.L_user_strlcpy_error, (%r8) // Check for 0 length. cmp $0, %rcx je .L_user_strlcpy_source_count // Copy at most count - 1 bytes. dec %rcx // If count is now 0, skip straight to null terminating as our loop will // otherwise overflow. jnz .L_user_strlcpy_copy_begin movb $0, (%rdi) jmp .L_user_strlcpy_source_count .L_user_strlcpy_copy_begin: cld .L_user_strlcpy_copy_loop: // Move data by bytes. lodsb stosb test %al, %al jz .L_user_strlcpy_source_done loop .L_user_strlcpy_copy_loop // Null terminate string. movb $0, (%rdi) dec %rsi .L_user_strlcpy_source_count: // Count remaining bytes in src not %rcx # %rcx was 0 and is now max xor %al, %al movq %rsi, %rdi repnz scasb movq %rdi, %rsi .L_user_strlcpy_source_done: // Restore the old fault handler movq %rax, (%r8) // Calculate total string length and return. movq %rsi, %rax subq %r9, %rax dec %rax ret .L_user_strlcpy_error: // Restore the old fault handler. Return a generic error, the wrapper // routine will deal with it. movq %rax, (%r8) movq $-1, %rax ret FUNCTION_END(arch_cpu_user_strlcpy) /*! \fn void arch_debug_call_with_fault_handler(cpu_ent* cpu, jmp_buf jumpBuffer, void (*function)(void*), void* parameter) Called by debug_call_with_fault_handler() to do the dirty work of setting the fault handler and calling the function. If the function causes a page fault, the arch_debug_call_with_fault_handler() calls longjmp() with the given \a jumpBuffer. Otherwise it returns normally. debug_call_with_fault_handler() has already saved the CPU's fault_handler and fault_handler_stack_pointer and will reset them later, so arch_debug_call_with_fault_handler() doesn't need to care about it. \param cpu The \c cpu_ent for the current CPU. \param jumpBuffer Buffer to be used for longjmp(). \param function The function to be called. \param parameter The parameter to be passed to the function to be called. */ FUNCTION(arch_debug_call_with_fault_handler): push %rbp movq %rsp, %rbp // Preserve the jump buffer address for the fault return. push %rsi // Set fault handler address, and fault handler stack pointer address. We // don't need to save the previous values, since that's done by the caller. movq $.L_debug_call_fault_handler, CPU_ENT_fault_handler(%rdi) movq %rbp, CPU_ENT_fault_handler_stack_pointer(%rdi) // Call the function. movq %rcx, %rdi call *%rdx // Regular return. movq %rbp, %rsp pop %rbp ret .L_debug_call_fault_handler: // Fault -- return via longjmp(jumpBuffer, 1) movq %rbp, %rsp movq -8(%rsp), %rdi movq $1, %rsi call longjmp FUNCTION_END(arch_debug_call_with_fault_handler)