1 /*
2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
4 * Distributed under the terms of the MIT License.
5 */
6 #ifndef _KERNEL_ARCH_GENERIC_USER_MEMORY_H
7 #define _KERNEL_ARCH_GENERIC_USER_MEMORY_H
8
9
10 #include <atomic>
11
12 #include <setjmp.h>
13 #include <string.h>
14
15 #include <thread.h>
16
17
18 namespace {
19
20
21 struct FaultHandlerGuard {
FaultHandlerGuardFaultHandlerGuard22 FaultHandlerGuard()
23 {
24 old_handler = thread_get_current_thread()->fault_handler;
25 if (old_handler != nullptr) {
26 memcpy(old_handler_state,
27 thread_get_current_thread()->fault_handler_state,
28 sizeof(jmp_buf));
29 }
30 thread_get_current_thread()->fault_handler = HandleFault;
31 std::atomic_signal_fence(std::memory_order_acq_rel);
32 }
33
34
~FaultHandlerGuardFaultHandlerGuard35 ~FaultHandlerGuard()
36 {
37 std::atomic_signal_fence(std::memory_order_acq_rel);
38 thread_get_current_thread()->fault_handler = old_handler;
39 if (old_handler != nullptr) {
40 memcpy(thread_get_current_thread()->fault_handler_state,
41 old_handler_state,
42 sizeof(jmp_buf));
43 }
44
45 }
46
47
HandleFaultFaultHandlerGuard48 [[noreturn]] static void HandleFault()
49 {
50 longjmp(thread_get_current_thread()->fault_handler_state, 1);
51 }
52
53 void (*old_handler)(void);
54 jmp_buf old_handler_state;
55 };
56
57
58 template<typename Function>
user_access(Function function)59 bool user_access(Function function)
60 {
61 FaultHandlerGuard guard;
62 // TODO: try { } catch (...) { } would be much nicer, wouldn't it?
63 // And faster... And world wouldn't end in a terrible disaster if function()
64 // or anything it calls created on stack an object with non-trivial
65 // destructor.
66 auto fail = setjmp(thread_get_current_thread()->fault_handler_state);
67 if (fail == 0) {
68 arch_cpu_enable_user_access();
69 function();
70 arch_cpu_disable_user_access();
71 return true;
72 }
73 arch_cpu_disable_user_access();
74 return false;
75 }
76
77
78 inline status_t
arch_cpu_user_memcpy(void * src,const void * dst,size_t n)79 arch_cpu_user_memcpy(void* src, const void* dst, size_t n)
80 {
81 return user_access([=] { memcpy(src, dst, n); }) ? B_OK : B_ERROR;
82 }
83
84
85 inline status_t
arch_cpu_user_memset(void * src,char v,size_t n)86 arch_cpu_user_memset(void* src, char v, size_t n)
87 {
88 return user_access([=] { memset(src, v, n); }) ? B_OK : B_ERROR;
89 }
90
91
92 inline ssize_t
arch_cpu_user_strlcpy(char * src,const char * dst,size_t n)93 arch_cpu_user_strlcpy(char* src, const char* dst, size_t n)
94 {
95 ssize_t result;
96 return user_access([=, &result] { result = strlcpy(src, dst, n); })
97 ? result : B_ERROR;
98 }
99
100 }
101
102 #endif // _KERNEL_ARCH_GENERIC_USER_MEMORY_H
103
104