xref: /haiku/headers/private/kernel/user_debugger.h (revision 984f843b917a1c4e077915c5961a6ef1cf8dabc7)
1 /*
2  * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Userland debugger support.
6  */
7 #ifndef _KERNEL_USER_DEBUGGER_H
8 #define _KERNEL_USER_DEBUGGER_H
9 
10 
11 #include <debugger.h>
12 
13 #include <arch/user_debugger.h>
14 
15 #include <timer.h>
16 
17 
18 // limits
19 #define B_DEBUG_MIN_PROFILE_INTERVAL			10			/* in us */
20 #define B_DEBUG_STACK_TRACE_DEPTH				128
21 #define	B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD	70			/* in % */
22 
23 
24 struct BreakpointManager;
25 struct ConditionVariable;
26 struct function_profile_info;
27 
28 namespace BKernel {
29 	struct Thread;
30 }
31 
32 using BKernel::Thread;
33 
34 
35 // Team related debugging data.
36 //
37 // Locking policy:
38 // 1) When accessing the structure it must be made sure, that the structure,
39 //    (i.e. the struct Team it lives in) isn't deleted. Thus one either needs to
40 //    get a team reference, lock the team, or one accesses the structure from a
41 //    thread of that team.
42 // 2) Access to the `flags' field is atomic. Reading via atomic_get()
43 //    requires no further locks (in addition to 1) that is). Writing requires
44 //    `lock' to be held and must be done atomically, too
45 //    (atomic_{set,and,or}()). Reading with `lock' being held doesn't need to
46 //    be done atomically.
47 // 3) Access to all other fields (read or write) requires `lock' to be held.
48 // 4) Locking order is scheduler lock -> Team -> Thread -> team_debug_info::lock
49 //    -> thread_debug_info::lock.
50 //
51 struct team_debug_info {
52 	spinlock	lock;
53 		// Guards the remaining fields. Should always be the innermost lock
54 		// to be acquired/released, save for thread_debug_info::lock.
55 
56 	int32		flags;
57 		// Set atomically. So reading atomically is OK, even when the lock is
58 		// not held (at least if it is certain, that the team struct won't go).
59 
60 	team_id		debugger_team;
61 	port_id		debugger_port;
62 	thread_id	nub_thread;
63 	port_id		nub_port;
64 		// the port the nub thread is waiting on for commands from the debugger
65 	sem_id		debugger_write_lock;
66 		// synchronizes writes to the debugger port with the setting (but not
67 		// clearing) of the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag
68 	thread_id	causing_thread;
69 		// thread that caused the debugger to be attached; -1 for manual
70 		// debugger attachment (or no debugger installed)
71 	int32		image_event;
72 		// counter incremented whenever an image is created/deleted
73 
74 	struct ConditionVariable* debugger_changed_condition;
75 		// Set to a condition variable when going to change the debugger. Anyone
76 		// who wants to change the debugger as well, needs to wait until the
77 		// condition variable is unset again (waiting for the condition and
78 		// rechecking again). The field and the condition variable is protected
79 		// by 'lock'. After setting the a condition variable the team is
80 		// guaranteed not to be deleted (until it is unset) it might be removed
81 		// from the team hash table, though.
82 
83 	struct BreakpointManager* breakpoint_manager;
84 		// manages hard- and software breakpoints
85 
86 	struct arch_team_debug_info	arch_info;
87 };
88 
89 // Thread related debugging data.
90 //
91 // Locking policy:
92 // 1) When accessing the structure it must be made sure, that the structure,
93 //    (i.e. the struct Thread it lives in) isn't deleted. Thus one either needs
94 //    to get a thread reference, lock the thread, or one accesses the structure
95 //    of the current thread.
96 // 2) Access to the `flags' field is atomic. Reading via atomic_get()
97 //    requires no further locks (in addition to 1) that is). Writing requires
98 //    `lock' to be held and must be done atomically, too
99 //    (atomic_{set,and,or}()). Reading with `lock' being held doesn't need to
100 //    be done atomically.
101 // 3) Access to all other fields (read or write) requires `lock' to be held.
102 // 4) Locking order is scheduler lock -> Team -> Thread -> team_debug_info::lock
103 //    -> thread_debug_info::lock.
104 //
105 struct thread_debug_info {
106 	spinlock	lock;
107 		// Guards the remaining fields. Should always be the innermost lock
108 		// to be acquired/released.
109 
110 	int32		flags;
111 		// Set atomically. So reading atomically is OK, even when the lock is
112 		// not held (at least if it is certain, that the thread struct won't
113 		// go).
114 	port_id		debug_port;
115 		// the port the thread is waiting on for commands from the nub thread
116 
117 	sigset_t	ignore_signals;
118 		// the signals the debugger is not interested in
119 	sigset_t	ignore_signals_once;
120 		// the signals the debugger wishes not to be notified of, when they
121 		// occur the next time
122 
123 	// profiling related part; if samples != NULL, the thread is profiled
124 	struct {
125 		bigtime_t		interval;
126 			// sampling interval
127 		area_id			sample_area;
128 			// cloned sample buffer area
129 		addr_t*			samples;
130 			// sample buffer
131 		int32			max_samples;
132 			// maximum number of samples the buffer can hold
133 		int32			flush_threshold;
134 			// number of sample when the buffer is flushed (if possible)
135 		int32			sample_count;
136 			// number of samples the buffer currently holds
137 		int32			stack_depth;
138 			// number of return addresses to record per timer interval
139 		int32			dropped_ticks;
140 			// number of ticks that had to be dropped when the sample buffer was
141 			// full and couldn't be flushed
142 		int32			image_event;
143 			// number of the image event when the first sample was written into
144 			// the buffer
145 		int32			last_image_event;
146 			// number of the image event when the last sample was written into
147 			// the buffer
148 		bool			variable_stack_depth;
149 			// record a variable number of samples per hit
150 		bool			buffer_full;
151 			// indicates that the sample buffer is full
152 		union {
153 			bigtime_t	interval_left;
154 				// when unscheduled: the time left of the current sampling
155 				// interval
156 			bigtime_t	timer_end;
157 				// when running: the absolute time the timer is supposed to go
158 				// off
159 		};
160 		timer*			installed_timer;
161 			// when running and being profiled: the CPU's profiling timer
162 	} profile;
163 
164 	struct arch_thread_debug_info	arch_info;
165 };
166 
167 #define GRAB_TEAM_DEBUG_INFO_LOCK(info)		acquire_spinlock(&(info).lock)
168 #define RELEASE_TEAM_DEBUG_INFO_LOCK(info)	release_spinlock(&(info).lock)
169 
170 // team debugging flags (user-specifiable flags are in <debugger.h>)
171 enum {
172 	B_TEAM_DEBUG_DEBUGGER_INSTALLED		= 0x0001,
173 	B_TEAM_DEBUG_DEBUGGER_HANDOVER		= 0x0002,	// marked for hand-over
174 	B_TEAM_DEBUG_DEBUGGER_HANDING_OVER	= 0x0004,	// handing over
175 	B_TEAM_DEBUG_DEBUGGER_DISABLED		= 0x0008,
176 
177 	B_TEAM_DEBUG_KERNEL_FLAG_MASK		= 0xffff,
178 
179 	B_TEAM_DEBUG_DEFAULT_FLAGS			= 0,
180 	B_TEAM_DEBUG_INHERITED_FLAGS		= B_TEAM_DEBUG_DEBUGGER_DISABLED
181 };
182 
183 // thread debugging flags (user-specifiable flags are in <debugger.h>)
184 enum {
185 	B_THREAD_DEBUG_INITIALIZED			= 0x0001,
186 	B_THREAD_DEBUG_DYING				= 0x0002,
187 	B_THREAD_DEBUG_STOP					= 0x0004,
188 	B_THREAD_DEBUG_STOPPED				= 0x0008,
189 	B_THREAD_DEBUG_SINGLE_STEP			= 0x0010,
190 	B_THREAD_DEBUG_NOTIFY_SINGLE_STEP	= 0x0020,
191 
192 	B_THREAD_DEBUG_NUB_THREAD			= 0x0040,	// marks the nub thread
193 
194 	B_THREAD_DEBUG_KERNEL_FLAG_MASK		= 0xffff,
195 
196 	B_THREAD_DEBUG_DEFAULT_FLAGS		= 0,
197 };
198 
199 // messages sent from the debug nub thread to a debugged thread
200 typedef enum {
201 	B_DEBUGGED_THREAD_MESSAGE_CONTINUE	= 0,
202 	B_DEBUGGED_THREAD_SET_CPU_STATE,
203 	B_DEBUGGED_THREAD_GET_CPU_STATE,
204 	B_DEBUGGED_THREAD_DEBUGGER_CHANGED,
205 } debugged_thread_message;
206 
207 typedef struct {
208 	uint32	handle_event;
209 	bool	single_step;
210 } debugged_thread_continue;
211 
212 typedef struct {
213 	port_id	reply_port;
214 } debugged_thread_get_cpu_state;
215 
216 typedef struct {
217 	debug_cpu_state	cpu_state;
218 } debugged_thread_set_cpu_state;
219 
220 typedef union {
221 	debugged_thread_continue		continue_thread;
222 	debugged_thread_set_cpu_state	set_cpu_state;
223 	debugged_thread_get_cpu_state	get_cpu_state;
224 } debugged_thread_message_data;
225 
226 
227 // internal messages sent to the nub thread
228 typedef enum {
229 	B_DEBUG_MESSAGE_HANDED_OVER		= -1,
230 } debug_nub_kernel_message;
231 
232 
233 #ifdef __cplusplus
234 extern "C" {
235 #endif
236 
237 // service calls
238 
239 void clear_team_debug_info(struct team_debug_info *info, bool initLock);
240 
241 void init_thread_debug_info(struct thread_debug_info *info);
242 void clear_thread_debug_info(struct thread_debug_info *info, bool dying);
243 void destroy_thread_debug_info(struct thread_debug_info *info);
244 
245 void user_debug_prepare_for_exec();
246 void user_debug_finish_after_exec();
247 
248 void init_user_debug();
249 
250 
251 // debug event callbacks
252 
253 void user_debug_pre_syscall(uint32 syscall, void *args);
254 void user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue,
255 		bigtime_t startTime);
256 bool user_debug_exception_occurred(debug_exception_type exception, int signal);
257 bool user_debug_handle_signal(int signal, struct sigaction *handler,
258 		siginfo_t *info, bool deadly);
259 void user_debug_stop_thread();
260 void user_debug_team_created(team_id teamID);
261 void user_debug_team_deleted(team_id teamID, port_id debuggerPort, status_t status,
262 		team_usage_info* usageInfo);
263 void user_debug_team_exec();
264 void user_debug_update_new_thread_flags(Thread* thread);
265 void user_debug_thread_created(thread_id threadID);
266 void user_debug_thread_deleted(team_id teamID, thread_id threadID, status_t status);
267 void user_debug_thread_exiting(Thread* thread);
268 void user_debug_image_created(const image_info *imageInfo);
269 void user_debug_image_deleted(const image_info *imageInfo);
270 void user_debug_breakpoint_hit(bool software);
271 void user_debug_watchpoint_hit();
272 void user_debug_single_stepped();
273 
274 void user_debug_thread_unscheduled(Thread* thread);
275 void user_debug_thread_scheduled(Thread* thread);
276 
277 
278 // syscalls
279 
280 void		_user_debugger(const char *message);
281 int			_user_disable_debugger(int state);
282 
283 status_t	_user_install_default_debugger(port_id debuggerPort);
284 port_id		_user_install_team_debugger(team_id team, port_id debuggerPort);
285 status_t	_user_remove_team_debugger(team_id team);
286 status_t	_user_debug_thread(thread_id thread);
287 void		_user_wait_for_debugger(void);
288 
289 status_t	_user_set_debugger_breakpoint(void *address, uint32 type,
290 				int32 length, bool watchpoint);
291 status_t	_user_clear_debugger_breakpoint(void *address, bool watchpoint);
292 
293 ssize_t		_user_get_stack_trace(size_t addressCount, addr_t* returnAddresses);
294 
295 #ifdef __cplusplus
296 }	// extern "C"
297 #endif
298 
299 
300 #endif	// _KERNEL_USER_DEBUGGER_H
301