xref: /haiku/headers/private/kernel/user_debugger.h (revision 909af08f4328301fbdef1ffb41f566c3b5bec0c7)
1 /*
2  * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Userland debugger support.
6  */
7 #ifndef _KERNEL_USER_DEBUGGER_H
8 #define _KERNEL_USER_DEBUGGER_H
9 
10 
11 #include <debugger.h>
12 
13 #include <arch/user_debugger.h>
14 
15 #include <timer.h>
16 
17 
18 // limits
19 #define B_DEBUG_MIN_PROFILE_INTERVAL			10			/* in us */
20 #define B_DEBUG_STACK_TRACE_DEPTH				128
21 #define	B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD	70			/* in % */
22 
23 
24 struct BreakpointManager;
25 struct ConditionVariable;
26 struct function_profile_info;
27 
28 namespace BKernel {
29 	struct Thread;
30 }
31 
32 using BKernel::Thread;
33 
34 
35 // Team related debugging data.
36 //
37 // Locking policy:
38 // 1) When accessing the structure it must be made sure, that the structure,
39 //    (i.e. the struct Team it lives in) isn't deleted. Thus one either needs to
40 //    get a team reference, lock the team, or one accesses the structure from a
41 //    thread of that team.
42 // 2) Access to the `flags' field is atomic. Reading via atomic_get()
43 //    requires no further locks (in addition to 1) that is). Writing requires
44 //    `lock' to be held and must be done atomically, too
45 //    (atomic_{set,and,or}()). Reading with `lock' being held doesn't need to
46 //    be done atomically.
47 // 3) Access to all other fields (read or write) requires `lock' to be held.
48 // 4) Locking order is scheduler lock -> Team -> Thread -> team_debug_info::lock
49 //    -> thread_debug_info::lock.
50 //
51 struct team_debug_info {
52 	spinlock	lock;
53 		// Guards the remaining fields. Should always be the innermost lock
54 		// to be acquired/released, save for thread_debug_info::lock.
55 
56 	int32		flags;
57 		// Set atomically. So reading atomically is OK, even when the lock is
58 		// not held (at least if it is certain, that the team struct won't go).
59 
60 	team_id		debugger_team;
61 	port_id		debugger_port;
62 	thread_id	nub_thread;
63 	port_id		nub_port;
64 		// the port the nub thread is waiting on for commands from the debugger
65 	sem_id		debugger_write_lock;
66 		// synchronizes writes to the debugger port with the setting (but not
67 		// clearing) of the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag
68 	thread_id	causing_thread;
69 		// thread that caused the debugger to be attached; -1 for manual
70 		// debugger attachment (or no debugger installed)
71 	int32		image_event;
72 		// counter incremented whenever an image is created/deleted
73 
74 	struct ConditionVariable* debugger_changed_condition;
75 		// Set to a condition variable when going to change the debugger. Anyone
76 		// who wants to change the debugger as well, needs to wait until the
77 		// condition variable is unset again (waiting for the condition and
78 		// rechecking again). The field and the condition variable is protected
79 		// by 'lock'. After setting the a condition variable the team is
80 		// guaranteed not to be deleted (until it is unset) it might be removed
81 		// from the team hash table, though.
82 
83 	struct BreakpointManager* breakpoint_manager;
84 		// manages hard- and software breakpoints
85 
86 	struct arch_team_debug_info	arch_info;
87 };
88 
89 // Thread related debugging data.
90 //
91 // Locking policy:
92 // 1) When accessing the structure it must be made sure, that the structure,
93 //    (i.e. the struct Thread it lives in) isn't deleted. Thus one either needs
94 //    to get a thread reference, lock the thread, or one accesses the structure
95 //    of the current thread.
96 // 2) Access to the `flags' field is atomic. Reading via atomic_get()
97 //    requires no further locks (in addition to 1) that is). Writing requires
98 //    `lock' to be held and must be done atomically, too
99 //    (atomic_{set,and,or}()). Reading with `lock' being held doesn't need to
100 //    be done atomically.
101 // 3) Access to all other fields (read or write) requires `lock' to be held.
102 // 4) Locking order is scheduler lock -> Team -> Thread -> team_debug_info::lock
103 //    -> thread_debug_info::lock.
104 //
105 struct thread_debug_info {
106 	spinlock	lock;
107 		// Guards the remaining fields. Should always be the innermost lock
108 		// to be acquired/released.
109 
110 	int32		flags;
111 		// Set atomically. So reading atomically is OK, even when the lock is
112 		// not held (at least if it is certain, that the thread struct won't
113 		// go).
114 	port_id		debug_port;
115 		// the port the thread is waiting on for commands from the nub thread
116 
117 	sigset_t	ignore_signals;
118 		// the signals the debugger is not interested in
119 	sigset_t	ignore_signals_once;
120 		// the signals the debugger wishes not to be notified of, when they
121 		// occur the next time
122 
123 	// profiling related part; if samples != NULL, the thread is profiled
124 	struct {
125 		bigtime_t		interval;
126 			// sampling interval
127 		area_id			sample_area;
128 			// cloned sample buffer area
129 		addr_t*			samples;
130 			// sample buffer
131 		int32			max_samples;
132 			// maximum number of samples the buffer can hold
133 		int32			flush_threshold;
134 			// number of sample when the buffer is flushed (if possible)
135 		int32			sample_count;
136 			// number of samples the buffer currently holds
137 		int32			stack_depth;
138 			// number of return addresses to record per timer interval
139 		int32			dropped_ticks;
140 			// number of ticks that had to be dropped when the sample buffer was
141 			// full and couldn't be flushed
142 		int32			image_event;
143 			// number of the image event when the first sample was written into
144 			// the buffer
145 		int32			last_image_event;
146 			// number of the image event when the last sample was written into
147 			// the buffer
148 		bool			variable_stack_depth;
149 			// record a variable number of samples per hit
150 		bool			profile_kernel;
151 			// record samples in kernel stack frames
152 		bool			flush_needed;
153 			// indicates that a flush of the sample buffer is needed
154 		union {
155 			bigtime_t	interval_left;
156 				// when unscheduled: the time left of the current sampling
157 				// interval
158 			bigtime_t	timer_end;
159 				// when running: the absolute time the timer is supposed to go
160 				// off
161 		};
162 		timer*			installed_timer;
163 			// when running and being profiled: the CPU's profiling timer
164 	} profile;
165 
166 	struct arch_thread_debug_info	arch_info;
167 };
168 
169 #define GRAB_TEAM_DEBUG_INFO_LOCK(info)		acquire_spinlock(&(info).lock)
170 #define RELEASE_TEAM_DEBUG_INFO_LOCK(info)	release_spinlock(&(info).lock)
171 
172 // team debugging flags (user-specifiable flags are in <debugger.h>)
173 enum {
174 	B_TEAM_DEBUG_DEBUGGER_INSTALLED		= 0x0001,
175 	B_TEAM_DEBUG_DEBUGGER_HANDOVER		= 0x0002,	// marked for hand-over
176 	B_TEAM_DEBUG_DEBUGGER_HANDING_OVER	= 0x0004,	// handing over
177 	B_TEAM_DEBUG_DEBUGGER_DISABLED		= 0x0008,
178 
179 	B_TEAM_DEBUG_KERNEL_FLAG_MASK		= 0xffff,
180 
181 	B_TEAM_DEBUG_DEFAULT_FLAGS			= 0,
182 	B_TEAM_DEBUG_INHERITED_FLAGS		= B_TEAM_DEBUG_DEBUGGER_DISABLED
183 };
184 
185 // thread debugging flags (user-specifiable flags are in <debugger.h>)
186 enum {
187 	B_THREAD_DEBUG_INITIALIZED			= 0x0001,
188 	B_THREAD_DEBUG_DYING				= 0x0002,
189 	B_THREAD_DEBUG_STOP					= 0x0004,
190 	B_THREAD_DEBUG_STOPPED				= 0x0008,
191 	B_THREAD_DEBUG_SINGLE_STEP			= 0x0010,
192 	B_THREAD_DEBUG_NOTIFY_SINGLE_STEP	= 0x0020,
193 
194 	B_THREAD_DEBUG_NUB_THREAD			= 0x0040,	// marks the nub thread
195 
196 	B_THREAD_DEBUG_KERNEL_FLAG_MASK		= 0xffff,
197 
198 	B_THREAD_DEBUG_DEFAULT_FLAGS		= 0,
199 };
200 
201 // messages sent from the debug nub thread to a debugged thread
202 typedef enum {
203 	B_DEBUGGED_THREAD_MESSAGE_CONTINUE	= 0,
204 	B_DEBUGGED_THREAD_SET_CPU_STATE,
205 	B_DEBUGGED_THREAD_GET_CPU_STATE,
206 	B_DEBUGGED_THREAD_DEBUGGER_CHANGED,
207 } debugged_thread_message;
208 
209 typedef struct {
210 	uint32	handle_event;
211 	bool	single_step;
212 } debugged_thread_continue;
213 
214 typedef struct {
215 	port_id	reply_port;
216 } debugged_thread_get_cpu_state;
217 
218 typedef struct {
219 	debug_cpu_state	cpu_state;
220 } debugged_thread_set_cpu_state;
221 
222 typedef union {
223 	debugged_thread_continue		continue_thread;
224 	debugged_thread_set_cpu_state	set_cpu_state;
225 	debugged_thread_get_cpu_state	get_cpu_state;
226 } debugged_thread_message_data;
227 
228 
229 // internal messages sent to the nub thread
230 typedef enum {
231 	B_DEBUG_MESSAGE_HANDED_OVER		= -1,
232 } debug_nub_kernel_message;
233 
234 
235 #ifdef __cplusplus
236 extern "C" {
237 #endif
238 
239 // service calls
240 
241 void clear_team_debug_info(struct team_debug_info *info, bool initLock);
242 
243 void init_thread_debug_info(struct thread_debug_info *info);
244 void clear_thread_debug_info(struct thread_debug_info *info, bool dying);
245 void destroy_thread_debug_info(struct thread_debug_info *info);
246 
247 void user_debug_prepare_for_exec();
248 void user_debug_finish_after_exec();
249 
250 void init_user_debug();
251 
252 
253 // debug event callbacks
254 
255 void user_debug_pre_syscall(uint32 syscall, void *args);
256 void user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue,
257 		bigtime_t startTime);
258 bool user_debug_exception_occurred(debug_exception_type exception, int signal);
259 bool user_debug_handle_signal(int signal, struct sigaction *handler,
260 		siginfo_t *info, bool deadly);
261 void user_debug_stop_thread();
262 void user_debug_team_created(team_id teamID);
263 void user_debug_team_deleted(team_id teamID, port_id debuggerPort, status_t status,
264 		int signal, team_usage_info* usageInfo);
265 void user_debug_team_exec();
266 void user_debug_update_new_thread_flags(Thread* thread);
267 void user_debug_thread_created(thread_id threadID);
268 void user_debug_thread_deleted(team_id teamID, thread_id threadID, status_t status);
269 void user_debug_thread_exiting(Thread* thread);
270 void user_debug_image_created(const image_info *imageInfo);
271 void user_debug_image_deleted(const image_info *imageInfo);
272 void user_debug_breakpoint_hit(bool software);
273 void user_debug_watchpoint_hit();
274 void user_debug_single_stepped();
275 
276 void user_debug_thread_unscheduled(Thread* thread);
277 void user_debug_thread_scheduled(Thread* thread);
278 
279 
280 // syscalls
281 
282 void		_user_debugger(const char *message);
283 int			_user_disable_debugger(int state);
284 
285 status_t	_user_install_default_debugger(port_id debuggerPort);
286 port_id		_user_install_team_debugger(team_id team, port_id debuggerPort);
287 status_t	_user_remove_team_debugger(team_id team);
288 status_t	_user_debug_thread(thread_id thread);
289 void		_user_wait_for_debugger(void);
290 
291 status_t	_user_set_debugger_breakpoint(void *address, uint32 type,
292 				int32 length, bool watchpoint);
293 status_t	_user_clear_debugger_breakpoint(void *address, bool watchpoint);
294 
295 ssize_t		_user_get_stack_trace(size_t addressCount, addr_t* returnAddresses);
296 
297 #ifdef __cplusplus
298 }	// extern "C"
299 #endif
300 
301 
302 #endif	// _KERNEL_USER_DEBUGGER_H
303