xref: /haiku/src/system/kernel/debug/user_debugger.cpp (revision 445d4fd926c569e7b9ae28017da86280aaecbae2)
1 /*
2  * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2015, Rene Gollent, rene@gollent.com.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <errno.h>
9 #include <signal.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <string.h>
13 
14 #include <algorithm>
15 
16 #include <arch/debug.h>
17 #include <arch/user_debugger.h>
18 #include <core_dump.h>
19 #include <cpu.h>
20 #include <debugger.h>
21 #include <kernel.h>
22 #include <KernelExport.h>
23 #include <kscheduler.h>
24 #include <ksignal.h>
25 #include <ksyscalls.h>
26 #include <port.h>
27 #include <sem.h>
28 #include <team.h>
29 #include <thread.h>
30 #include <thread_types.h>
31 #include <user_debugger.h>
32 #include <vm/vm.h>
33 #include <vm/vm_types.h>
34 
35 #include <AutoDeleter.h>
36 #include <util/AutoLock.h>
37 #include <util/ThreadAutoLock.h>
38 
39 #include "BreakpointManager.h"
40 
41 
42 //#define TRACE_USER_DEBUGGER
43 #ifdef TRACE_USER_DEBUGGER
44 #	define TRACE(x) dprintf x
45 #else
46 #	define TRACE(x) ;
47 #endif
48 
49 
50 // TODO: Since the introduction of team_debug_info::debugger_changed_condition
51 // there's some potential for simplifications. E.g. clear_team_debug_info() and
52 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus
53 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()).
54 
55 
56 static port_id sDefaultDebuggerPort = -1;
57 	// accessed atomically
58 
59 static timer sProfilingTimers[SMP_MAX_CPUS];
60 	// a profiling timer for each CPU -- used when a profiled thread is running
61 	// on that CPU
62 
63 
64 static void schedule_profiling_timer(Thread* thread, bigtime_t interval);
65 static int32 profiling_event(timer* unused);
66 static status_t ensure_debugger_installed();
67 static void get_team_debug_info(team_debug_info &teamDebugInfo);
68 
69 
70 static inline status_t
71 kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
72 	size_t bufferSize)
73 {
74 	return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT,
75 		0);
76 }
77 
78 
79 static status_t
80 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
81 	bool dontWait)
82 {
83 	TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", "
84 		"port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, "
85 		"dontWait: %d\n", thread_get_current_thread()->id,
86 		thread_get_current_thread()->team->id, port, code, buffer, bufferSize,
87 		dontWait));
88 
89 	status_t error = B_OK;
90 
91 	// get the team debug info
92 	team_debug_info teamDebugInfo;
93 	get_team_debug_info(teamDebugInfo);
94 	sem_id writeLock = teamDebugInfo.debugger_write_lock;
95 
96 	// get the write lock
97 	TRACE(("debugger_write(): acquiring write lock...\n"));
98 	error = acquire_sem_etc(writeLock, 1,
99 		dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
100 	if (error != B_OK) {
101 		TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error));
102 		return error;
103 	}
104 
105 	// re-get the team debug info
106 	get_team_debug_info(teamDebugInfo);
107 
108 	if (teamDebugInfo.debugger_port != port
109 		|| (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) {
110 		// The debugger has changed in the meantime or we are about to be
111 		// handed over to a new debugger. In either case we don't send the
112 		// message.
113 		TRACE(("debugger_write(): %s\n",
114 			(teamDebugInfo.debugger_port != port ? "debugger port changed"
115 				: "handover flag set")));
116 	} else {
117 		TRACE(("debugger_write(): writing to port...\n"));
118 
119 		error = write_port_etc(port, code, buffer, bufferSize,
120 			dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
121 	}
122 
123 	// release the write lock
124 	release_sem(writeLock);
125 
126 	TRACE(("debugger_write() done: %" B_PRIx32 "\n", error));
127 
128 	return error;
129 }
130 
131 
132 /*!	Updates the thread::flags field according to what user debugger flags are
133 	set for the thread.
134 	Interrupts must be disabled and the thread's debug info lock must be held.
135 */
136 static void
137 update_thread_user_debug_flag(Thread* thread)
138 {
139 	if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0)
140 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD);
141 	else
142 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD);
143 }
144 
145 
146 /*!	Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
147 	given thread.
148 	Interrupts must be disabled and the thread debug info lock must be held.
149 */
150 static void
151 update_thread_breakpoints_flag(Thread* thread)
152 {
153 	Team* team = thread->team;
154 
155 	if (arch_has_breakpoints(&team->debug_info.arch_info))
156 		atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
157 	else
158 		atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
159 }
160 
161 
162 /*!	Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
163 	threads of the current team.
164 */
165 static void
166 update_threads_breakpoints_flag()
167 {
168 	Team* team = thread_get_current_thread()->team;
169 
170 	TeamLocker teamLocker(team);
171 
172 	Thread* thread = team->thread_list;
173 
174 	if (arch_has_breakpoints(&team->debug_info.arch_info)) {
175 		for (; thread != NULL; thread = thread->team_next)
176 			atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
177 	} else {
178 		for (; thread != NULL; thread = thread->team_next)
179 			atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
180 	}
181 }
182 
183 
184 /*!	Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
185 	given thread, which must be the current thread.
186 */
187 static void
188 update_thread_debugger_installed_flag(Thread* thread)
189 {
190 	Team* team = thread->team;
191 
192 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
193 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
194 	else
195 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
196 }
197 
198 
199 /*!	Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
200 	threads of the given team.
201 	The team's lock must be held.
202 */
203 static void
204 update_threads_debugger_installed_flag(Team* team)
205 {
206 	Thread* thread = team->thread_list;
207 
208 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
209 		for (; thread != NULL; thread = thread->team_next)
210 			atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
211 	} else {
212 		for (; thread != NULL; thread = thread->team_next)
213 			atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
214 	}
215 }
216 
217 
218 /**
219  *	For the first initialization the function must be called with \a initLock
220  *	set to \c true. If it would be possible that another thread accesses the
221  *	structure at the same time, `lock' must be held when calling the function.
222  */
223 void
224 clear_team_debug_info(struct team_debug_info *info, bool initLock)
225 {
226 	if (info) {
227 		arch_clear_team_debug_info(&info->arch_info);
228 		atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS);
229 		info->debugger_team = -1;
230 		info->debugger_port = -1;
231 		info->nub_thread = -1;
232 		info->nub_port = -1;
233 		info->debugger_write_lock = -1;
234 		info->causing_thread = -1;
235 		info->image_event = 0;
236 		info->breakpoint_manager = NULL;
237 
238 		if (initLock) {
239 			B_INITIALIZE_SPINLOCK(&info->lock);
240 			info->debugger_changed_condition = NULL;
241 		}
242 	}
243 }
244 
245 /**
246  *  `lock' must not be held nor may interrupts be disabled.
247  *  \a info must not be a member of a team struct (or the team struct must no
248  *  longer be accessible, i.e. the team should already be removed).
249  *
250  *	In case the team is still accessible, the procedure is:
251  *	1. get `lock'
252  *	2. copy the team debug info on stack
253  *	3. call clear_team_debug_info() on the team debug info
254  *	4. release `lock'
255  *	5. call destroy_team_debug_info() on the copied team debug info
256  */
257 static void
258 destroy_team_debug_info(struct team_debug_info *info)
259 {
260 	if (info) {
261 		arch_destroy_team_debug_info(&info->arch_info);
262 
263 		// delete the breakpoint manager
264 		delete info->breakpoint_manager ;
265 		info->breakpoint_manager = NULL;
266 
267 		// delete the debugger port write lock
268 		if (info->debugger_write_lock >= 0) {
269 			delete_sem(info->debugger_write_lock);
270 			info->debugger_write_lock = -1;
271 		}
272 
273 		// delete the nub port
274 		if (info->nub_port >= 0) {
275 			set_port_owner(info->nub_port, B_CURRENT_TEAM);
276 			delete_port(info->nub_port);
277 			info->nub_port = -1;
278 		}
279 
280 		// wait for the nub thread
281 		if (info->nub_thread >= 0) {
282 			if (info->nub_thread != thread_get_current_thread()->id) {
283 				int32 result;
284 				wait_for_thread(info->nub_thread, &result);
285 			}
286 
287 			info->nub_thread = -1;
288 		}
289 
290 		atomic_set(&info->flags, 0);
291 		info->debugger_team = -1;
292 		info->debugger_port = -1;
293 		info->causing_thread = -1;
294 		info->image_event = -1;
295 	}
296 }
297 
298 
299 void
300 init_thread_debug_info(struct thread_debug_info *info)
301 {
302 	if (info) {
303 		B_INITIALIZE_SPINLOCK(&info->lock);
304 		arch_clear_thread_debug_info(&info->arch_info);
305 		info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS;
306 		info->debug_port = -1;
307 		info->ignore_signals = 0;
308 		info->ignore_signals_once = 0;
309 		info->profile.sample_area = -1;
310 		info->profile.samples = NULL;
311 		info->profile.buffer_full = false;
312 		info->profile.installed_timer = NULL;
313 	}
314 }
315 
316 
317 /*!	Clears the debug info for the current thread.
318 	Invoked with thread debug info lock being held.
319 */
320 void
321 clear_thread_debug_info(struct thread_debug_info *info, bool dying)
322 {
323 	if (info) {
324 		// cancel profiling timer
325 		if (info->profile.installed_timer != NULL) {
326 			cancel_timer(info->profile.installed_timer);
327 			info->profile.installed_timer = NULL;
328 		}
329 
330 		arch_clear_thread_debug_info(&info->arch_info);
331 		atomic_set(&info->flags,
332 			B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0));
333 		info->debug_port = -1;
334 		info->ignore_signals = 0;
335 		info->ignore_signals_once = 0;
336 		info->profile.sample_area = -1;
337 		info->profile.samples = NULL;
338 		info->profile.buffer_full = false;
339 	}
340 }
341 
342 
343 void
344 destroy_thread_debug_info(struct thread_debug_info *info)
345 {
346 	if (info) {
347 		area_id sampleArea = info->profile.sample_area;
348 		if (sampleArea >= 0) {
349 			area_info areaInfo;
350 			if (get_area_info(sampleArea, &areaInfo) == B_OK) {
351 				unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
352 				delete_area(sampleArea);
353 			}
354 		}
355 
356 		arch_destroy_thread_debug_info(&info->arch_info);
357 
358 		if (info->debug_port >= 0) {
359 			delete_port(info->debug_port);
360 			info->debug_port = -1;
361 		}
362 
363 		info->ignore_signals = 0;
364 		info->ignore_signals_once = 0;
365 
366 		atomic_set(&info->flags, 0);
367 	}
368 }
369 
370 
371 static status_t
372 prepare_debugger_change(team_id teamID, ConditionVariable& condition,
373 	Team*& team)
374 {
375 	// We look up the team by ID, even in case of the current team, so we can be
376 	// sure, that the team is not already dying.
377 	if (teamID == B_CURRENT_TEAM)
378 		teamID = thread_get_current_thread()->team->id;
379 
380 	while (true) {
381 		// get the team
382 		team = Team::GetAndLock(teamID);
383 		if (team == NULL)
384 			return B_BAD_TEAM_ID;
385 		BReference<Team> teamReference(team, true);
386 		TeamLocker teamLocker(team, true);
387 
388 		// don't allow messing with the kernel team
389 		if (team == team_get_kernel_team())
390 			return B_NOT_ALLOWED;
391 
392 		// check whether the condition is already set
393 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
394 
395 		if (team->debug_info.debugger_changed_condition == NULL) {
396 			// nobody there yet -- set our condition variable and be done
397 			team->debug_info.debugger_changed_condition = &condition;
398 			return B_OK;
399 		}
400 
401 		// we'll have to wait
402 		ConditionVariableEntry entry;
403 		team->debug_info.debugger_changed_condition->Add(&entry);
404 
405 		debugInfoLocker.Unlock();
406 		teamLocker.Unlock();
407 
408 		entry.Wait();
409 	}
410 }
411 
412 
413 static void
414 prepare_debugger_change(Team* team, ConditionVariable& condition)
415 {
416 	while (true) {
417 		// check whether the condition is already set
418 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
419 
420 		if (team->debug_info.debugger_changed_condition == NULL) {
421 			// nobody there yet -- set our condition variable and be done
422 			team->debug_info.debugger_changed_condition = &condition;
423 			return;
424 		}
425 
426 		// we'll have to wait
427 		ConditionVariableEntry entry;
428 		team->debug_info.debugger_changed_condition->Add(&entry);
429 
430 		debugInfoLocker.Unlock();
431 
432 		entry.Wait();
433 	}
434 }
435 
436 
437 static void
438 finish_debugger_change(Team* team)
439 {
440 	// unset our condition variable and notify all threads waiting on it
441 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
442 
443 	ConditionVariable* condition = team->debug_info.debugger_changed_condition;
444 	team->debug_info.debugger_changed_condition = NULL;
445 
446 	condition->NotifyAll();
447 }
448 
449 
450 void
451 user_debug_prepare_for_exec()
452 {
453 	Thread *thread = thread_get_current_thread();
454 	Team *team = thread->team;
455 
456 	// If a debugger is installed for the team and the thread debug stuff
457 	// initialized, change the ownership of the debug port for the thread
458 	// to the kernel team, since exec_team() deletes all ports owned by this
459 	// team. We change the ownership back later.
460 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
461 		// get the port
462 		port_id debugPort = -1;
463 
464 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
465 
466 		if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0)
467 			debugPort = thread->debug_info.debug_port;
468 
469 		threadDebugInfoLocker.Unlock();
470 
471 		// set the new port ownership
472 		if (debugPort >= 0)
473 			set_port_owner(debugPort, team_get_kernel_team_id());
474 	}
475 }
476 
477 
478 void
479 user_debug_finish_after_exec()
480 {
481 	Thread *thread = thread_get_current_thread();
482 	Team *team = thread->team;
483 
484 	// If a debugger is installed for the team and the thread debug stuff
485 	// initialized for this thread, change the ownership of its debug port
486 	// back to this team.
487 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
488 		// get the port
489 		port_id debugPort = -1;
490 
491 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
492 
493 		if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
494 			debugPort = thread->debug_info.debug_port;
495 
496 		threadDebugInfoLocker.Unlock();
497 
498 		// set the new port ownership
499 		if (debugPort >= 0)
500 			set_port_owner(debugPort, team->id);
501 	}
502 }
503 
504 
505 void
506 init_user_debug()
507 {
508 	#ifdef ARCH_INIT_USER_DEBUG
509 		ARCH_INIT_USER_DEBUG();
510 	#endif
511 }
512 
513 
514 static void
515 get_team_debug_info(team_debug_info &teamDebugInfo)
516 {
517 	Thread *thread = thread_get_current_thread();
518 
519 	cpu_status state = disable_interrupts();
520 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
521 
522 	memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info));
523 
524 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
525 	restore_interrupts(state);
526 }
527 
528 
529 static status_t
530 thread_hit_debug_event_internal(debug_debugger_message event,
531 	const void *message, int32 size, bool requireDebugger, bool &restart)
532 {
533 	restart = false;
534 	Thread *thread = thread_get_current_thread();
535 
536 	TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32
537 		", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event,
538 		message, size));
539 
540 	// check, if there's a debug port already
541 	bool setPort = !(atomic_get(&thread->debug_info.flags)
542 		& B_THREAD_DEBUG_INITIALIZED);
543 
544 	// create a port, if there is none yet
545 	port_id port = -1;
546 	if (setPort) {
547 		char nameBuffer[128];
548 		snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32,
549 			thread->id);
550 
551 		port = create_port(1, nameBuffer);
552 		if (port < 0) {
553 			dprintf("thread_hit_debug_event(): Failed to create debug port: "
554 				"%s\n", strerror(port));
555 			return port;
556 		}
557 	}
558 
559 	// check the debug info structures once more: get the debugger port, set
560 	// the thread's debug port, and update the thread's debug flags
561 	port_id deletePort = port;
562 	port_id debuggerPort = -1;
563 	port_id nubPort = -1;
564 	status_t error = B_OK;
565 	cpu_status state = disable_interrupts();
566 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
567 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
568 
569 	uint32 threadFlags = thread->debug_info.flags;
570 	threadFlags &= ~B_THREAD_DEBUG_STOP;
571 	bool debuggerInstalled
572 		= (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED);
573 	if (thread->id == thread->team->debug_info.nub_thread) {
574 		// Ugh, we're the nub thread. We shouldn't be here.
575 		TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32
576 			"\n", thread->id));
577 
578 		error = B_ERROR;
579 	} else if (debuggerInstalled || !requireDebugger) {
580 		if (debuggerInstalled) {
581 			debuggerPort = thread->team->debug_info.debugger_port;
582 			nubPort = thread->team->debug_info.nub_port;
583 		}
584 
585 		if (setPort) {
586 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
587 				// someone created a port for us (the port we've created will
588 				// be deleted below)
589 				port = thread->debug_info.debug_port;
590 			} else {
591 				thread->debug_info.debug_port = port;
592 				deletePort = -1;	// keep the port
593 				threadFlags |= B_THREAD_DEBUG_INITIALIZED;
594 			}
595 		} else {
596 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
597 				port = thread->debug_info.debug_port;
598 			} else {
599 				// someone deleted our port
600 				error = B_ERROR;
601 			}
602 		}
603 	} else
604 		error = B_ERROR;
605 
606 	// update the flags
607 	if (error == B_OK)
608 		threadFlags |= B_THREAD_DEBUG_STOPPED;
609 	atomic_set(&thread->debug_info.flags, threadFlags);
610 
611 	update_thread_user_debug_flag(thread);
612 
613 	threadDebugInfoLocker.Unlock();
614 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
615 	restore_interrupts(state);
616 
617 	// delete the superfluous port
618 	if (deletePort >= 0)
619 		delete_port(deletePort);
620 
621 	if (error != B_OK) {
622 		TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: "
623 			"%" B_PRIx32 "\n", thread->id, error));
624 		return error;
625 	}
626 
627 	// send a message to the debugger port
628 	if (debuggerInstalled) {
629 		// update the message's origin info first
630 		debug_origin *origin = (debug_origin *)message;
631 		origin->thread = thread->id;
632 		origin->team = thread->team->id;
633 		origin->nub_port = nubPort;
634 
635 		TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending "
636 			"message to debugger port %" B_PRId32 "\n", thread->id,
637 			debuggerPort));
638 
639 		error = debugger_write(debuggerPort, event, message, size, false);
640 	}
641 
642 	status_t result = B_THREAD_DEBUG_HANDLE_EVENT;
643 	bool singleStep = false;
644 
645 	if (error == B_OK) {
646 		bool done = false;
647 		while (!done) {
648 			// read a command from the debug port
649 			int32 command;
650 			debugged_thread_message_data commandMessage;
651 			ssize_t commandMessageSize = read_port_etc(port, &command,
652 				&commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT,
653 				0);
654 
655 			if (commandMessageSize < 0) {
656 				error = commandMessageSize;
657 				TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed "
658 					"to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n",
659 					thread->id, port, error));
660 				break;
661 			}
662 
663 			switch (command) {
664 				case B_DEBUGGED_THREAD_MESSAGE_CONTINUE:
665 					TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": "
666 						"B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n",
667 						thread->id));
668 					result = commandMessage.continue_thread.handle_event;
669 
670 					singleStep = commandMessage.continue_thread.single_step;
671 					done = true;
672 					break;
673 
674 				case B_DEBUGGED_THREAD_SET_CPU_STATE:
675 				{
676 					TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": "
677 						"B_DEBUGGED_THREAD_SET_CPU_STATE\n",
678 						thread->id));
679 					arch_set_debug_cpu_state(
680 						&commandMessage.set_cpu_state.cpu_state);
681 
682 					break;
683 				}
684 
685 				case B_DEBUGGED_THREAD_GET_CPU_STATE:
686 				{
687 					port_id replyPort = commandMessage.get_cpu_state.reply_port;
688 
689 					// prepare the message
690 					debug_nub_get_cpu_state_reply replyMessage;
691 					replyMessage.error = B_OK;
692 					replyMessage.message = event;
693 					arch_get_debug_cpu_state(&replyMessage.cpu_state);
694 
695 					// send it
696 					error = kill_interruptable_write_port(replyPort, event,
697 						&replyMessage, sizeof(replyMessage));
698 
699 					break;
700 				}
701 
702 				case B_DEBUGGED_THREAD_DEBUGGER_CHANGED:
703 				{
704 					// Check, if the debugger really changed, i.e. is different
705 					// than the one we know.
706 					team_debug_info teamDebugInfo;
707 					get_team_debug_info(teamDebugInfo);
708 
709 					if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
710 						if (!debuggerInstalled
711 							|| teamDebugInfo.debugger_port != debuggerPort) {
712 							// debugger was installed or has changed: restart
713 							// this function
714 							restart = true;
715 							done = true;
716 						}
717 					} else {
718 						if (debuggerInstalled) {
719 							// debugger is gone: continue the thread normally
720 							done = true;
721 						}
722 					}
723 
724 					break;
725 				}
726 			}
727 		}
728 	} else {
729 		TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send "
730 			"message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n",
731 			thread->id, debuggerPort, error));
732 	}
733 
734 	// update the thread debug info
735 	bool destroyThreadInfo = false;
736 	thread_debug_info threadDebugInfo;
737 
738 	state = disable_interrupts();
739 	threadDebugInfoLocker.Lock();
740 
741 	// check, if the team is still being debugged
742 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
743 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
744 		// update the single-step flag
745 		if (singleStep) {
746 			atomic_or(&thread->debug_info.flags,
747 				B_THREAD_DEBUG_SINGLE_STEP);
748 			atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP);
749 		} else {
750 			atomic_and(&thread->debug_info.flags,
751 				~(int32)B_THREAD_DEBUG_SINGLE_STEP);
752 		}
753 
754 		// unset the "stopped" state
755 		atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED);
756 
757 		update_thread_user_debug_flag(thread);
758 
759 	} else {
760 		// the debugger is gone: cleanup our info completely
761 		threadDebugInfo = thread->debug_info;
762 		clear_thread_debug_info(&thread->debug_info, false);
763 		destroyThreadInfo = true;
764 	}
765 
766 	threadDebugInfoLocker.Unlock();
767 	restore_interrupts(state);
768 
769 	// enable/disable single stepping
770 	arch_update_thread_single_step();
771 
772 	if (destroyThreadInfo)
773 		destroy_thread_debug_info(&threadDebugInfo);
774 
775 	return (error == B_OK ? result : error);
776 }
777 
778 
779 static status_t
780 thread_hit_debug_event(debug_debugger_message event, const void *message,
781 	int32 size, bool requireDebugger)
782 {
783 	status_t result;
784 	bool restart;
785 	do {
786 		restart = false;
787 		result = thread_hit_debug_event_internal(event, message, size,
788 			requireDebugger, restart);
789 	} while (result >= 0 && restart);
790 
791 	// Prepare to continue -- we install a debugger change condition, so no one
792 	// will change the debugger while we're playing with the breakpoint manager.
793 	// TODO: Maybe better use ref-counting and a flag in the breakpoint manager.
794 	Team* team = thread_get_current_thread()->team;
795 	ConditionVariable debugChangeCondition;
796 	debugChangeCondition.Init(team, "debug change condition");
797 	prepare_debugger_change(team, debugChangeCondition);
798 
799 	if (team->debug_info.breakpoint_manager != NULL) {
800 		bool isSyscall;
801 		void* pc = arch_debug_get_interrupt_pc(&isSyscall);
802 		if (pc != NULL && !isSyscall)
803 			team->debug_info.breakpoint_manager->PrepareToContinue(pc);
804 	}
805 
806 	finish_debugger_change(team);
807 
808 	return result;
809 }
810 
811 
812 static status_t
813 thread_hit_serious_debug_event(debug_debugger_message event,
814 	const void *message, int32 messageSize)
815 {
816 	// ensure that a debugger is installed for this team
817 	status_t error = ensure_debugger_installed();
818 	if (error != B_OK) {
819 		Thread *thread = thread_get_current_thread();
820 		dprintf("thread_hit_serious_debug_event(): Failed to install debugger: "
821 			"thread: %" B_PRId32 " (%s): %s\n", thread->id, thread->name,
822 			strerror(error));
823 		return error;
824 	}
825 
826 	// enter the debug loop
827 	return thread_hit_debug_event(event, message, messageSize, true);
828 }
829 
830 
831 void
832 user_debug_pre_syscall(uint32 syscall, void *args)
833 {
834 	// check whether a debugger is installed
835 	Thread *thread = thread_get_current_thread();
836 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
837 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
838 		return;
839 
840 	// check whether pre-syscall tracing is enabled for team or thread
841 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
842 	if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL)
843 			&& !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) {
844 		return;
845 	}
846 
847 	// prepare the message
848 	debug_pre_syscall message;
849 	message.syscall = syscall;
850 
851 	// copy the syscall args
852 	if (syscall < (uint32)kSyscallCount) {
853 		if (kSyscallInfos[syscall].parameter_size > 0)
854 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
855 	}
856 
857 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message,
858 		sizeof(message), true);
859 }
860 
861 
862 void
863 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue,
864 	bigtime_t startTime)
865 {
866 	// check whether a debugger is installed
867 	Thread *thread = thread_get_current_thread();
868 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
869 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
870 		return;
871 
872 	// check whether post-syscall tracing is enabled for team or thread
873 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
874 	if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL)
875 			&& !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) {
876 		return;
877 	}
878 
879 	// prepare the message
880 	debug_post_syscall message;
881 	message.start_time = startTime;
882 	message.end_time = system_time();
883 	message.return_value = returnValue;
884 	message.syscall = syscall;
885 
886 	// copy the syscall args
887 	if (syscall < (uint32)kSyscallCount) {
888 		if (kSyscallInfos[syscall].parameter_size > 0)
889 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
890 	}
891 
892 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message,
893 		sizeof(message), true);
894 }
895 
896 
897 /**	\brief To be called when an unhandled processor exception (error/fault)
898  *		   occurred.
899  *	\param exception The debug_why_stopped value identifying the kind of fault.
900  *	\param signal The signal corresponding to the exception.
901  *	\return \c true, if the caller shall continue normally, i.e. usually send
902  *			a deadly signal. \c false, if the debugger insists to continue the
903  *			program (e.g. because it has solved the removed the cause of the
904  *			problem).
905  */
906 bool
907 user_debug_exception_occurred(debug_exception_type exception, int signal)
908 {
909 	// First check whether there's a signal handler installed for the signal.
910 	// If so, we don't want to install a debugger for the team. We always send
911 	// the signal instead. An already installed debugger will be notified, if
912 	// it has requested notifications of signal.
913 	struct sigaction signalAction;
914 	if (sigaction(signal, NULL, &signalAction) == 0
915 		&& signalAction.sa_handler != SIG_DFL) {
916 		return true;
917 	}
918 
919 	// prepare the message
920 	debug_exception_occurred message;
921 	message.exception = exception;
922 	message.signal = signal;
923 
924 	status_t result = thread_hit_serious_debug_event(
925 		B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message));
926 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
927 }
928 
929 
930 bool
931 user_debug_handle_signal(int signal, struct sigaction *handler, siginfo_t *info,
932 	bool deadly)
933 {
934 	// check, if a debugger is installed and is interested in signals
935 	Thread *thread = thread_get_current_thread();
936 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
937 	if (~teamDebugFlags
938 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) {
939 		return true;
940 	}
941 
942 	// prepare the message
943 	debug_signal_received message;
944 	message.signal = signal;
945 	message.handler = *handler;
946 	message.info = *info;
947 	message.deadly = deadly;
948 
949 	status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED,
950 		&message, sizeof(message), true);
951 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
952 }
953 
954 
955 void
956 user_debug_stop_thread()
957 {
958 	// check whether this is actually an emulated single-step notification
959 	Thread* thread = thread_get_current_thread();
960 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
961 
962 	bool singleStepped = false;
963 	if ((atomic_and(&thread->debug_info.flags,
964 				~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP)
965 			& B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) {
966 		singleStepped = true;
967 	}
968 
969 	threadDebugInfoLocker.Unlock();
970 
971 	if (singleStepped) {
972 		user_debug_single_stepped();
973 	} else {
974 		debug_thread_debugged message;
975 		thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED,
976 			&message, sizeof(message));
977 	}
978 }
979 
980 
981 void
982 user_debug_team_created(team_id teamID)
983 {
984 	// check, if a debugger is installed and is interested in team creation
985 	// events
986 	Thread *thread = thread_get_current_thread();
987 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
988 	if (~teamDebugFlags
989 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
990 		return;
991 	}
992 
993 	// prepare the message
994 	debug_team_created message;
995 	message.new_team = teamID;
996 
997 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message,
998 		sizeof(message), true);
999 }
1000 
1001 
1002 void
1003 user_debug_team_deleted(team_id teamID, port_id debuggerPort)
1004 {
1005 	if (debuggerPort >= 0) {
1006 		TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: "
1007 			"%" B_PRId32 ")\n", teamID, debuggerPort));
1008 
1009 		debug_team_deleted message;
1010 		message.origin.thread = -1;
1011 		message.origin.team = teamID;
1012 		message.origin.nub_port = -1;
1013 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message,
1014 			sizeof(message), B_RELATIVE_TIMEOUT, 0);
1015 	}
1016 }
1017 
1018 
1019 void
1020 user_debug_team_exec()
1021 {
1022 	// check, if a debugger is installed and is interested in team creation
1023 	// events
1024 	Thread *thread = thread_get_current_thread();
1025 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1026 	if (~teamDebugFlags
1027 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
1028 		return;
1029 	}
1030 
1031 	// prepare the message
1032 	debug_team_exec message;
1033 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1034 		+ 1;
1035 
1036 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message,
1037 		sizeof(message), true);
1038 }
1039 
1040 
1041 /*!	Called by a new userland thread to update the debugging related flags of
1042 	\c Thread::flags before the thread first enters userland.
1043 	\param thread The calling thread.
1044 */
1045 void
1046 user_debug_update_new_thread_flags(Thread* thread)
1047 {
1048 	// lock it and update it's flags
1049 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1050 
1051 	update_thread_user_debug_flag(thread);
1052 	update_thread_breakpoints_flag(thread);
1053 	update_thread_debugger_installed_flag(thread);
1054 }
1055 
1056 
1057 void
1058 user_debug_thread_created(thread_id threadID)
1059 {
1060 	// check, if a debugger is installed and is interested in thread events
1061 	Thread *thread = thread_get_current_thread();
1062 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1063 	if (~teamDebugFlags
1064 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1065 		return;
1066 	}
1067 
1068 	// prepare the message
1069 	debug_thread_created message;
1070 	message.new_thread = threadID;
1071 
1072 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message,
1073 		sizeof(message), true);
1074 }
1075 
1076 
1077 void
1078 user_debug_thread_deleted(team_id teamID, thread_id threadID)
1079 {
1080 	// Things are a bit complicated here, since this thread no longer belongs to
1081 	// the debugged team (but to the kernel). So we can't use debugger_write().
1082 
1083 	// get the team debug flags and debugger port
1084 	Team* team = Team::Get(teamID);
1085 	if (team == NULL)
1086 		return;
1087 	BReference<Team> teamReference(team, true);
1088 
1089 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1090 
1091 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1092 	port_id debuggerPort = team->debug_info.debugger_port;
1093 	sem_id writeLock = team->debug_info.debugger_write_lock;
1094 
1095 	debugInfoLocker.Unlock();
1096 
1097 	// check, if a debugger is installed and is interested in thread events
1098 	if (~teamDebugFlags
1099 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1100 		return;
1101 	}
1102 
1103 	// acquire the debugger write lock
1104 	status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0);
1105 	if (error != B_OK)
1106 		return;
1107 
1108 	// re-get the team debug info -- we need to check whether anything changed
1109 	debugInfoLocker.Lock();
1110 
1111 	teamDebugFlags = atomic_get(&team->debug_info.flags);
1112 	port_id newDebuggerPort = team->debug_info.debugger_port;
1113 
1114 	debugInfoLocker.Unlock();
1115 
1116 	// Send the message only if the debugger hasn't changed in the meantime or
1117 	// the team is about to be handed over.
1118 	if (newDebuggerPort == debuggerPort
1119 		|| (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) {
1120 		debug_thread_deleted message;
1121 		message.origin.thread = threadID;
1122 		message.origin.team = teamID;
1123 		message.origin.nub_port = -1;
1124 
1125 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED,
1126 			&message, sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1127 	}
1128 
1129 	// release the debugger write lock
1130 	release_sem(writeLock);
1131 }
1132 
1133 
1134 /*!	Called for a thread that is about to die, cleaning up all user debug
1135 	facilities installed for the thread.
1136 	\param thread The current thread, the one that is going to die.
1137 */
1138 void
1139 user_debug_thread_exiting(Thread* thread)
1140 {
1141 	// thread is the current thread, so using team is safe
1142 	Team* team = thread->team;
1143 
1144 	InterruptsLocker interruptsLocker;
1145 
1146 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1147 
1148 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1149 	port_id debuggerPort = team->debug_info.debugger_port;
1150 
1151 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1152 
1153 	// check, if a debugger is installed
1154 	if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0
1155 		|| debuggerPort < 0) {
1156 		return;
1157 	}
1158 
1159 	// detach the profile info and mark the thread dying
1160 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1161 
1162 	thread_debug_info& threadDebugInfo = thread->debug_info;
1163 	if (threadDebugInfo.profile.samples == NULL)
1164 		return;
1165 
1166 	area_id sampleArea = threadDebugInfo.profile.sample_area;
1167 	int32 sampleCount = threadDebugInfo.profile.sample_count;
1168 	int32 droppedTicks = threadDebugInfo.profile.dropped_ticks;
1169 	int32 stackDepth = threadDebugInfo.profile.stack_depth;
1170 	bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth;
1171 	int32 imageEvent = threadDebugInfo.profile.image_event;
1172 	threadDebugInfo.profile.sample_area = -1;
1173 	threadDebugInfo.profile.samples = NULL;
1174 	threadDebugInfo.profile.buffer_full = false;
1175 
1176 	atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
1177 
1178 	threadDebugInfoLocker.Unlock();
1179 	interruptsLocker.Unlock();
1180 
1181 	// notify the debugger
1182 	debug_profiler_update message;
1183 	message.origin.thread = thread->id;
1184 	message.origin.team = thread->team->id;
1185 	message.origin.nub_port = -1;	// asynchronous message
1186 	message.sample_count = sampleCount;
1187 	message.dropped_ticks = droppedTicks;
1188 	message.stack_depth = stackDepth;
1189 	message.variable_stack_depth = variableStackDepth;
1190 	message.image_event = imageEvent;
1191 	message.stopped = true;
1192 	debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE,
1193 		&message, sizeof(message), false);
1194 
1195 	if (sampleArea >= 0) {
1196 		area_info areaInfo;
1197 		if (get_area_info(sampleArea, &areaInfo) == B_OK) {
1198 			unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
1199 			delete_area(sampleArea);
1200 		}
1201 	}
1202 }
1203 
1204 
1205 void
1206 user_debug_image_created(const image_info *imageInfo)
1207 {
1208 	// check, if a debugger is installed and is interested in image events
1209 	Thread *thread = thread_get_current_thread();
1210 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1211 	if (~teamDebugFlags
1212 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1213 		return;
1214 	}
1215 
1216 	// prepare the message
1217 	debug_image_created message;
1218 	memcpy(&message.info, imageInfo, sizeof(image_info));
1219 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1220 		+ 1;
1221 
1222 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
1223 		sizeof(message), true);
1224 }
1225 
1226 
1227 void
1228 user_debug_image_deleted(const image_info *imageInfo)
1229 {
1230 	// check, if a debugger is installed and is interested in image events
1231 	Thread *thread = thread_get_current_thread();
1232 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1233 	if (~teamDebugFlags
1234 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1235 		return;
1236 	}
1237 
1238 	// prepare the message
1239 	debug_image_deleted message;
1240 	memcpy(&message.info, imageInfo, sizeof(image_info));
1241 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1242 		+ 1;
1243 
1244 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message,
1245 		sizeof(message), true);
1246 }
1247 
1248 
1249 void
1250 user_debug_breakpoint_hit(bool software)
1251 {
1252 	// prepare the message
1253 	debug_breakpoint_hit message;
1254 	arch_get_debug_cpu_state(&message.cpu_state);
1255 
1256 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message,
1257 		sizeof(message));
1258 }
1259 
1260 
1261 void
1262 user_debug_watchpoint_hit()
1263 {
1264 	// prepare the message
1265 	debug_watchpoint_hit message;
1266 	arch_get_debug_cpu_state(&message.cpu_state);
1267 
1268 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message,
1269 		sizeof(message));
1270 }
1271 
1272 
1273 void
1274 user_debug_single_stepped()
1275 {
1276 	// clear the single-step thread flag
1277 	Thread* thread = thread_get_current_thread();
1278 	atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP);
1279 
1280 	// prepare the message
1281 	debug_single_step message;
1282 	arch_get_debug_cpu_state(&message.cpu_state);
1283 
1284 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message,
1285 		sizeof(message));
1286 }
1287 
1288 
1289 /*!	Schedules the profiling timer for the current thread.
1290 	The caller must hold the thread's debug info lock.
1291 	\param thread The current thread.
1292 	\param interval The time after which the timer should fire.
1293 */
1294 static void
1295 schedule_profiling_timer(Thread* thread, bigtime_t interval)
1296 {
1297 	struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num];
1298 	thread->debug_info.profile.installed_timer = timer;
1299 	thread->debug_info.profile.timer_end = system_time() + interval;
1300 	add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER);
1301 }
1302 
1303 
1304 /*!	Samples the current thread's instruction pointer/stack trace.
1305 	The caller must hold the current thread's debug info lock.
1306 	\param flushBuffer Return parameter: Set to \c true when the sampling
1307 		buffer must be flushed.
1308 */
1309 static bool
1310 profiling_do_sample(bool& flushBuffer)
1311 {
1312 	Thread* thread = thread_get_current_thread();
1313 	thread_debug_info& debugInfo = thread->debug_info;
1314 
1315 	if (debugInfo.profile.samples == NULL)
1316 		return false;
1317 
1318 	// Check, whether the buffer is full or an image event occurred since the
1319 	// last sample was taken.
1320 	int32 maxSamples = debugInfo.profile.max_samples;
1321 	int32 sampleCount = debugInfo.profile.sample_count;
1322 	int32 stackDepth = debugInfo.profile.stack_depth;
1323 	int32 imageEvent = thread->team->debug_info.image_event;
1324 	if (debugInfo.profile.sample_count > 0) {
1325 		if (debugInfo.profile.last_image_event < imageEvent
1326 			&& debugInfo.profile.variable_stack_depth
1327 			&& sampleCount + 2 <= maxSamples) {
1328 			// an image event occurred, but we use variable stack depth and
1329 			// have enough room in the buffer to indicate an image event
1330 			addr_t* event = debugInfo.profile.samples + sampleCount;
1331 			event[0] = B_DEBUG_PROFILE_IMAGE_EVENT;
1332 			event[1] = imageEvent;
1333 			sampleCount += 2;
1334 			debugInfo.profile.sample_count = sampleCount;
1335 			debugInfo.profile.last_image_event = imageEvent;
1336 		}
1337 
1338 		if (debugInfo.profile.last_image_event < imageEvent
1339 			|| debugInfo.profile.flush_threshold - sampleCount < stackDepth) {
1340 			if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) {
1341 				flushBuffer = true;
1342 				return true;
1343 			}
1344 
1345 			// We can't flush the buffer now, since we interrupted a kernel
1346 			// function. If the buffer is not full yet, we add the samples,
1347 			// otherwise we have to drop them.
1348 			if (maxSamples - sampleCount < stackDepth) {
1349 				debugInfo.profile.dropped_ticks++;
1350 				return true;
1351 			}
1352 		}
1353 	} else {
1354 		// first sample -- set the image event
1355 		debugInfo.profile.image_event = imageEvent;
1356 		debugInfo.profile.last_image_event = imageEvent;
1357 	}
1358 
1359 	// get the samples
1360 	addr_t* returnAddresses = debugInfo.profile.samples
1361 		+ debugInfo.profile.sample_count;
1362 	if (debugInfo.profile.variable_stack_depth) {
1363 		// variable sample count per hit
1364 		*returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1,
1365 			stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1366 
1367 		debugInfo.profile.sample_count += *returnAddresses + 1;
1368 	} else {
1369 		// fixed sample count per hit
1370 		if (stackDepth > 1) {
1371 			int32 count = arch_debug_get_stack_trace(returnAddresses,
1372 				stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1373 
1374 			for (int32 i = count; i < stackDepth; i++)
1375 				returnAddresses[i] = 0;
1376 		} else
1377 			*returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL);
1378 
1379 		debugInfo.profile.sample_count += stackDepth;
1380 	}
1381 
1382 	return true;
1383 }
1384 
1385 
1386 static void
1387 profiling_buffer_full(void*)
1388 {
1389 	// It is undefined whether the function is called with interrupts enabled
1390 	// or disabled. We are allowed to enable interrupts, though. First make
1391 	// sure interrupts are disabled.
1392 	disable_interrupts();
1393 
1394 	Thread* thread = thread_get_current_thread();
1395 	thread_debug_info& debugInfo = thread->debug_info;
1396 
1397 	SpinLocker threadDebugInfoLocker(debugInfo.lock);
1398 
1399 	if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
1400 		int32 sampleCount = debugInfo.profile.sample_count;
1401 		int32 droppedTicks = debugInfo.profile.dropped_ticks;
1402 		int32 stackDepth = debugInfo.profile.stack_depth;
1403 		bool variableStackDepth = debugInfo.profile.variable_stack_depth;
1404 		int32 imageEvent = debugInfo.profile.image_event;
1405 
1406 		// notify the debugger
1407 		debugInfo.profile.sample_count = 0;
1408 		debugInfo.profile.dropped_ticks = 0;
1409 
1410 		threadDebugInfoLocker.Unlock();
1411 		enable_interrupts();
1412 
1413 		// prepare the message
1414 		debug_profiler_update message;
1415 		message.sample_count = sampleCount;
1416 		message.dropped_ticks = droppedTicks;
1417 		message.stack_depth = stackDepth;
1418 		message.variable_stack_depth = variableStackDepth;
1419 		message.image_event = imageEvent;
1420 		message.stopped = false;
1421 
1422 		thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message,
1423 			sizeof(message), false);
1424 
1425 		disable_interrupts();
1426 		threadDebugInfoLocker.Lock();
1427 
1428 		// do the sampling and reschedule timer, if still profiling this thread
1429 		bool flushBuffer;
1430 		if (profiling_do_sample(flushBuffer)) {
1431 			debugInfo.profile.buffer_full = false;
1432 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1433 		}
1434 	}
1435 
1436 	threadDebugInfoLocker.Unlock();
1437 	enable_interrupts();
1438 }
1439 
1440 
1441 /*!	Profiling timer event callback.
1442 	Called with interrupts disabled.
1443 */
1444 static int32
1445 profiling_event(timer* /*unused*/)
1446 {
1447 	Thread* thread = thread_get_current_thread();
1448 	thread_debug_info& debugInfo = thread->debug_info;
1449 
1450 	SpinLocker threadDebugInfoLocker(debugInfo.lock);
1451 
1452 	bool flushBuffer = false;
1453 	if (profiling_do_sample(flushBuffer)) {
1454 		if (flushBuffer) {
1455 			// The sample buffer needs to be flushed; we'll have to notify the
1456 			// debugger. We can't do that right here. Instead we set a post
1457 			// interrupt callback doing that for us, and don't reschedule the
1458 			// timer yet.
1459 			thread->post_interrupt_callback = profiling_buffer_full;
1460 			debugInfo.profile.installed_timer = NULL;
1461 			debugInfo.profile.buffer_full = true;
1462 		} else
1463 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1464 	} else
1465 		debugInfo.profile.installed_timer = NULL;
1466 
1467 	return B_HANDLED_INTERRUPT;
1468 }
1469 
1470 
1471 /*!	Called by the scheduler when a debugged thread has been unscheduled.
1472 	The scheduler lock is being held.
1473 */
1474 void
1475 user_debug_thread_unscheduled(Thread* thread)
1476 {
1477 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1478 
1479 	// if running, cancel the profiling timer
1480 	struct timer* timer = thread->debug_info.profile.installed_timer;
1481 	if (timer != NULL) {
1482 		// track remaining time
1483 		bigtime_t left = thread->debug_info.profile.timer_end - system_time();
1484 		thread->debug_info.profile.interval_left = max_c(left, 0);
1485 		thread->debug_info.profile.installed_timer = NULL;
1486 
1487 		// cancel timer
1488 		threadDebugInfoLocker.Unlock();
1489 			// not necessary, but doesn't harm and reduces contention
1490 		cancel_timer(timer);
1491 			// since invoked on the same CPU, this will not possibly wait for
1492 			// an already called timer hook
1493 	}
1494 }
1495 
1496 
1497 /*!	Called by the scheduler when a debugged thread has been scheduled.
1498 	The scheduler lock is being held.
1499 */
1500 void
1501 user_debug_thread_scheduled(Thread* thread)
1502 {
1503 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1504 
1505 	if (thread->debug_info.profile.samples != NULL
1506 		&& !thread->debug_info.profile.buffer_full) {
1507 		// install profiling timer
1508 		schedule_profiling_timer(thread,
1509 			thread->debug_info.profile.interval_left);
1510 	}
1511 }
1512 
1513 
1514 /*!	\brief Called by the debug nub thread of a team to broadcast a message to
1515 		all threads of the team that are initialized for debugging (and
1516 		thus have a debug port).
1517 */
1518 static void
1519 broadcast_debugged_thread_message(Thread *nubThread, int32 code,
1520 	const void *message, int32 size)
1521 {
1522 	// iterate through the threads
1523 	thread_info threadInfo;
1524 	int32 cookie = 0;
1525 	while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo)
1526 			== B_OK) {
1527 		// get the thread and lock it
1528 		Thread* thread = Thread::GetAndLock(threadInfo.thread);
1529 		if (thread == NULL)
1530 			continue;
1531 
1532 		BReference<Thread> threadReference(thread, true);
1533 		ThreadLocker threadLocker(thread, true);
1534 
1535 		// get the thread's debug port
1536 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1537 
1538 		port_id threadDebugPort = -1;
1539 		if (thread && thread != nubThread && thread->team == nubThread->team
1540 			&& (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0
1541 			&& (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) {
1542 			threadDebugPort = thread->debug_info.debug_port;
1543 		}
1544 
1545 		threadDebugInfoLocker.Unlock();
1546 		threadLocker.Unlock();
1547 
1548 		// send the message to the thread
1549 		if (threadDebugPort >= 0) {
1550 			status_t error = kill_interruptable_write_port(threadDebugPort,
1551 				code, message, size);
1552 			if (error != B_OK) {
1553 				TRACE(("broadcast_debugged_thread_message(): Failed to send "
1554 					"message to thread %" B_PRId32 ": %" B_PRIx32 "\n",
1555 					thread->id, error));
1556 			}
1557 		}
1558 	}
1559 }
1560 
1561 
1562 static void
1563 nub_thread_cleanup(Thread *nubThread)
1564 {
1565 	TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n",
1566 		nubThread->id, nubThread->team->debug_info.debugger_port));
1567 
1568 	ConditionVariable debugChangeCondition;
1569 	debugChangeCondition.Init(nubThread->team, "debug change condition");
1570 	prepare_debugger_change(nubThread->team, debugChangeCondition);
1571 
1572 	team_debug_info teamDebugInfo;
1573 	bool destroyDebugInfo = false;
1574 
1575 	TeamLocker teamLocker(nubThread->team);
1576 		// required by update_threads_debugger_installed_flag()
1577 
1578 	cpu_status state = disable_interrupts();
1579 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1580 
1581 	team_debug_info &info = nubThread->team->debug_info;
1582 	if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED
1583 		&& info.nub_thread == nubThread->id) {
1584 		teamDebugInfo = info;
1585 		clear_team_debug_info(&info, false);
1586 		destroyDebugInfo = true;
1587 	}
1588 
1589 	// update the thread::flags fields
1590 	update_threads_debugger_installed_flag(nubThread->team);
1591 
1592 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1593 	restore_interrupts(state);
1594 
1595 	teamLocker.Unlock();
1596 
1597 	if (destroyDebugInfo)
1598 		teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints();
1599 
1600 	finish_debugger_change(nubThread->team);
1601 
1602 	if (destroyDebugInfo)
1603 		destroy_team_debug_info(&teamDebugInfo);
1604 
1605 	// notify all threads that the debugger is gone
1606 	broadcast_debugged_thread_message(nubThread,
1607 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1608 }
1609 
1610 
1611 /**	\brief Debug nub thread helper function that returns the debug port of
1612  *		   a thread of the same team.
1613  */
1614 static status_t
1615 debug_nub_thread_get_thread_debug_port(Thread *nubThread,
1616 	thread_id threadID, port_id &threadDebugPort)
1617 {
1618 	threadDebugPort = -1;
1619 
1620 	// get the thread
1621 	Thread* thread = Thread::GetAndLock(threadID);
1622 	if (thread == NULL)
1623 		return B_BAD_THREAD_ID;
1624 	BReference<Thread> threadReference(thread, true);
1625 	ThreadLocker threadLocker(thread, true);
1626 
1627 	// get the debug port
1628 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1629 
1630 	if (thread->team != nubThread->team)
1631 		return B_BAD_VALUE;
1632 	if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0)
1633 		return B_BAD_THREAD_STATE;
1634 
1635 	threadDebugPort = thread->debug_info.debug_port;
1636 
1637 	threadDebugInfoLocker.Unlock();
1638 
1639 	if (threadDebugPort < 0)
1640 		return B_ERROR;
1641 
1642 	return B_OK;
1643 }
1644 
1645 
1646 static status_t
1647 debug_nub_thread(void *)
1648 {
1649 	Thread *nubThread = thread_get_current_thread();
1650 
1651 	// check, if we're still the current nub thread and get our port
1652 	cpu_status state = disable_interrupts();
1653 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1654 
1655 	if (nubThread->team->debug_info.nub_thread != nubThread->id) {
1656 		RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1657 		restore_interrupts(state);
1658 		return 0;
1659 	}
1660 
1661 	port_id port = nubThread->team->debug_info.nub_port;
1662 	sem_id writeLock = nubThread->team->debug_info.debugger_write_lock;
1663 	BreakpointManager* breakpointManager
1664 		= nubThread->team->debug_info.breakpoint_manager;
1665 
1666 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1667 	restore_interrupts(state);
1668 
1669 	TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub "
1670 		"port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port));
1671 
1672 	// notify all threads that a debugger has been installed
1673 	broadcast_debugged_thread_message(nubThread,
1674 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1675 
1676 	// command processing loop
1677 	while (true) {
1678 		int32 command;
1679 		debug_nub_message_data message;
1680 		ssize_t messageSize = read_port_etc(port, &command, &message,
1681 			sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1682 
1683 		if (messageSize < 0) {
1684 			// The port is no longer valid or we were interrupted by a kill
1685 			// signal: If we are still listed in the team's debug info as nub
1686 			// thread, we need to update that.
1687 			nub_thread_cleanup(nubThread);
1688 
1689 			TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n",
1690 				nubThread->id, messageSize));
1691 
1692 			return messageSize;
1693 		}
1694 
1695 		bool sendReply = false;
1696 		union {
1697 			debug_nub_read_memory_reply			read_memory;
1698 			debug_nub_write_memory_reply		write_memory;
1699 			debug_nub_get_cpu_state_reply		get_cpu_state;
1700 			debug_nub_set_breakpoint_reply		set_breakpoint;
1701 			debug_nub_set_watchpoint_reply		set_watchpoint;
1702 			debug_nub_get_signal_masks_reply	get_signal_masks;
1703 			debug_nub_get_signal_handler_reply	get_signal_handler;
1704 			debug_nub_start_profiler_reply		start_profiler;
1705 			debug_profiler_update				profiler_update;
1706 			debug_nub_write_core_file_reply		write_core_file;
1707 		} reply;
1708 		int32 replySize = 0;
1709 		port_id replyPort = -1;
1710 
1711 		// process the command
1712 		switch (command) {
1713 			case B_DEBUG_MESSAGE_READ_MEMORY:
1714 			{
1715 				// get the parameters
1716 				replyPort = message.read_memory.reply_port;
1717 				void *address = message.read_memory.address;
1718 				int32 size = message.read_memory.size;
1719 				status_t result = B_OK;
1720 
1721 				// check the parameters
1722 				if (!BreakpointManager::CanAccessAddress(address, false))
1723 					result = B_BAD_ADDRESS;
1724 				else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE)
1725 					result = B_BAD_VALUE;
1726 
1727 				// read the memory
1728 				size_t bytesRead = 0;
1729 				if (result == B_OK) {
1730 					result = breakpointManager->ReadMemory(address,
1731 						reply.read_memory.data, size, bytesRead);
1732 				}
1733 				reply.read_memory.error = result;
1734 
1735 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: "
1736 					"reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32
1737 					", result: %" B_PRIx32 ", read: %ld\n", nubThread->id,
1738 					replyPort, address, size, result, bytesRead));
1739 
1740 				// send only as much data as necessary
1741 				reply.read_memory.size = bytesRead;
1742 				replySize = reply.read_memory.data + bytesRead - (char*)&reply;
1743 				sendReply = true;
1744 				break;
1745 			}
1746 
1747 			case B_DEBUG_MESSAGE_WRITE_MEMORY:
1748 			{
1749 				// get the parameters
1750 				replyPort = message.write_memory.reply_port;
1751 				void *address = message.write_memory.address;
1752 				int32 size = message.write_memory.size;
1753 				const char *data = message.write_memory.data;
1754 				int32 realSize = (char*)&message + messageSize - data;
1755 				status_t result = B_OK;
1756 
1757 				// check the parameters
1758 				if (!BreakpointManager::CanAccessAddress(address, true))
1759 					result = B_BAD_ADDRESS;
1760 				else if (size <= 0 || size > realSize)
1761 					result = B_BAD_VALUE;
1762 
1763 				// write the memory
1764 				size_t bytesWritten = 0;
1765 				if (result == B_OK) {
1766 					result = breakpointManager->WriteMemory(address, data, size,
1767 						bytesWritten);
1768 				}
1769 				reply.write_memory.error = result;
1770 
1771 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: "
1772 					"reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32
1773 					", result: %" B_PRIx32 ", written: %ld\n", nubThread->id,
1774 					replyPort, address, size, result, bytesWritten));
1775 
1776 				reply.write_memory.size = bytesWritten;
1777 				sendReply = true;
1778 				replySize = sizeof(debug_nub_write_memory_reply);
1779 				break;
1780 			}
1781 
1782 			case B_DEBUG_MESSAGE_SET_TEAM_FLAGS:
1783 			{
1784 				// get the parameters
1785 				int32 flags = message.set_team_flags.flags
1786 					& B_TEAM_DEBUG_USER_FLAG_MASK;
1787 
1788 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS"
1789 					": flags: %" B_PRIx32 "\n", nubThread->id, flags));
1790 
1791 				Team *team = thread_get_current_thread()->team;
1792 
1793 				// set the flags
1794 				cpu_status state = disable_interrupts();
1795 				GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1796 
1797 				flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK;
1798 				atomic_set(&team->debug_info.flags, flags);
1799 
1800 				RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1801 				restore_interrupts(state);
1802 
1803 				break;
1804 			}
1805 
1806 			case B_DEBUG_MESSAGE_SET_THREAD_FLAGS:
1807 			{
1808 				// get the parameters
1809 				thread_id threadID = message.set_thread_flags.thread;
1810 				int32 flags = message.set_thread_flags.flags
1811 					& B_THREAD_DEBUG_USER_FLAG_MASK;
1812 
1813 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS"
1814 					": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n",
1815 					nubThread->id, threadID, flags));
1816 
1817 				// set the flags
1818 				Thread* thread = Thread::GetAndLock(threadID);
1819 				if (thread == NULL)
1820 					break;
1821 				BReference<Thread> threadReference(thread, true);
1822 				ThreadLocker threadLocker(thread, true);
1823 
1824 				InterruptsSpinLocker threadDebugInfoLocker(
1825 					thread->debug_info.lock);
1826 
1827 				if (thread->team == thread_get_current_thread()->team) {
1828 					flags |= thread->debug_info.flags
1829 						& B_THREAD_DEBUG_KERNEL_FLAG_MASK;
1830 					atomic_set(&thread->debug_info.flags, flags);
1831 				}
1832 
1833 				break;
1834 			}
1835 
1836 			case B_DEBUG_MESSAGE_CONTINUE_THREAD:
1837 			{
1838 				// get the parameters
1839 				thread_id threadID;
1840 				uint32 handleEvent;
1841 				bool singleStep;
1842 
1843 				threadID = message.continue_thread.thread;
1844 				handleEvent = message.continue_thread.handle_event;
1845 				singleStep = message.continue_thread.single_step;
1846 
1847 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD"
1848 					": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", "
1849 					"single step: %d\n", nubThread->id, threadID, handleEvent,
1850 					singleStep));
1851 
1852 				// find the thread and get its debug port
1853 				port_id threadDebugPort = -1;
1854 				status_t result = debug_nub_thread_get_thread_debug_port(
1855 					nubThread, threadID, threadDebugPort);
1856 
1857 				// send a message to the debugged thread
1858 				if (result == B_OK) {
1859 					debugged_thread_continue commandMessage;
1860 					commandMessage.handle_event = handleEvent;
1861 					commandMessage.single_step = singleStep;
1862 
1863 					result = write_port(threadDebugPort,
1864 						B_DEBUGGED_THREAD_MESSAGE_CONTINUE,
1865 						&commandMessage, sizeof(commandMessage));
1866 				} else if (result == B_BAD_THREAD_STATE) {
1867 					Thread* thread = Thread::GetAndLock(threadID);
1868 					if (thread == NULL)
1869 						break;
1870 
1871 					BReference<Thread> threadReference(thread, true);
1872 					ThreadLocker threadLocker(thread, true);
1873 					if (thread->state == B_THREAD_SUSPENDED) {
1874 						threadLocker.Unlock();
1875 						resume_thread(threadID);
1876 						break;
1877 					}
1878 				}
1879 
1880 				break;
1881 			}
1882 
1883 			case B_DEBUG_MESSAGE_SET_CPU_STATE:
1884 			{
1885 				// get the parameters
1886 				thread_id threadID = message.set_cpu_state.thread;
1887 				const debug_cpu_state &cpuState
1888 					= message.set_cpu_state.cpu_state;
1889 
1890 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE"
1891 					": thread: %" B_PRId32 "\n", nubThread->id, threadID));
1892 
1893 				// find the thread and get its debug port
1894 				port_id threadDebugPort = -1;
1895 				status_t result = debug_nub_thread_get_thread_debug_port(
1896 					nubThread, threadID, threadDebugPort);
1897 
1898 				// send a message to the debugged thread
1899 				if (result == B_OK) {
1900 					debugged_thread_set_cpu_state commandMessage;
1901 					memcpy(&commandMessage.cpu_state, &cpuState,
1902 						sizeof(debug_cpu_state));
1903 					write_port(threadDebugPort,
1904 						B_DEBUGGED_THREAD_SET_CPU_STATE,
1905 						&commandMessage, sizeof(commandMessage));
1906 				}
1907 
1908 				break;
1909 			}
1910 
1911 			case B_DEBUG_MESSAGE_GET_CPU_STATE:
1912 			{
1913 				// get the parameters
1914 				thread_id threadID = message.get_cpu_state.thread;
1915 				replyPort = message.get_cpu_state.reply_port;
1916 
1917 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE"
1918 					": thread: %" B_PRId32 "\n", nubThread->id, threadID));
1919 
1920 				// find the thread and get its debug port
1921 				port_id threadDebugPort = -1;
1922 				status_t result = debug_nub_thread_get_thread_debug_port(
1923 					nubThread, threadID, threadDebugPort);
1924 
1925 				// send a message to the debugged thread
1926 				if (threadDebugPort >= 0) {
1927 					debugged_thread_get_cpu_state commandMessage;
1928 					commandMessage.reply_port = replyPort;
1929 					result = write_port(threadDebugPort,
1930 						B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage,
1931 						sizeof(commandMessage));
1932 				}
1933 
1934 				// send a reply to the debugger in case of error
1935 				if (result != B_OK) {
1936 					reply.get_cpu_state.error = result;
1937 					sendReply = true;
1938 					replySize = sizeof(reply.get_cpu_state);
1939 				}
1940 
1941 				break;
1942 			}
1943 
1944 			case B_DEBUG_MESSAGE_SET_BREAKPOINT:
1945 			{
1946 				// get the parameters
1947 				replyPort = message.set_breakpoint.reply_port;
1948 				void *address = message.set_breakpoint.address;
1949 
1950 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT"
1951 					": address: %p\n", nubThread->id, address));
1952 
1953 				// check the address
1954 				status_t result = B_OK;
1955 				if (address == NULL
1956 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1957 					result = B_BAD_ADDRESS;
1958 				}
1959 
1960 				// set the breakpoint
1961 				if (result == B_OK)
1962 					result = breakpointManager->InstallBreakpoint(address);
1963 
1964 				if (result == B_OK)
1965 					update_threads_breakpoints_flag();
1966 
1967 				// prepare the reply
1968 				reply.set_breakpoint.error = result;
1969 				replySize = sizeof(reply.set_breakpoint);
1970 				sendReply = true;
1971 
1972 				break;
1973 			}
1974 
1975 			case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT:
1976 			{
1977 				// get the parameters
1978 				void *address = message.clear_breakpoint.address;
1979 
1980 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT"
1981 					": address: %p\n", nubThread->id, address));
1982 
1983 				// check the address
1984 				status_t result = B_OK;
1985 				if (address == NULL
1986 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1987 					result = B_BAD_ADDRESS;
1988 				}
1989 
1990 				// clear the breakpoint
1991 				if (result == B_OK)
1992 					result = breakpointManager->UninstallBreakpoint(address);
1993 
1994 				if (result == B_OK)
1995 					update_threads_breakpoints_flag();
1996 
1997 				break;
1998 			}
1999 
2000 			case B_DEBUG_MESSAGE_SET_WATCHPOINT:
2001 			{
2002 				// get the parameters
2003 				replyPort = message.set_watchpoint.reply_port;
2004 				void *address = message.set_watchpoint.address;
2005 				uint32 type = message.set_watchpoint.type;
2006 				int32 length = message.set_watchpoint.length;
2007 
2008 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT"
2009 					": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n",
2010 					nubThread->id, address, type, length));
2011 
2012 				// check the address and size
2013 				status_t result = B_OK;
2014 				if (address == NULL
2015 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2016 					result = B_BAD_ADDRESS;
2017 				}
2018 				if (length < 0)
2019 					result = B_BAD_VALUE;
2020 
2021 				// set the watchpoint
2022 				if (result == B_OK) {
2023 					result = breakpointManager->InstallWatchpoint(address, type,
2024 						length);
2025 				}
2026 
2027 				if (result == B_OK)
2028 					update_threads_breakpoints_flag();
2029 
2030 				// prepare the reply
2031 				reply.set_watchpoint.error = result;
2032 				replySize = sizeof(reply.set_watchpoint);
2033 				sendReply = true;
2034 
2035 				break;
2036 			}
2037 
2038 			case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT:
2039 			{
2040 				// get the parameters
2041 				void *address = message.clear_watchpoint.address;
2042 
2043 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT"
2044 					": address: %p\n", nubThread->id, address));
2045 
2046 				// check the address
2047 				status_t result = B_OK;
2048 				if (address == NULL
2049 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2050 					result = B_BAD_ADDRESS;
2051 				}
2052 
2053 				// clear the watchpoint
2054 				if (result == B_OK)
2055 					result = breakpointManager->UninstallWatchpoint(address);
2056 
2057 				if (result == B_OK)
2058 					update_threads_breakpoints_flag();
2059 
2060 				break;
2061 			}
2062 
2063 			case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS:
2064 			{
2065 				// get the parameters
2066 				thread_id threadID = message.set_signal_masks.thread;
2067 				uint64 ignore = message.set_signal_masks.ignore_mask;
2068 				uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask;
2069 				uint32 ignoreOp = message.set_signal_masks.ignore_op;
2070 				uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op;
2071 
2072 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS"
2073 					": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %"
2074 					B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32
2075 					")\n", nubThread->id, threadID, ignore, ignoreOp,
2076 					ignoreOnce, ignoreOnceOp));
2077 
2078 				// set the masks
2079 				Thread* thread = Thread::GetAndLock(threadID);
2080 				if (thread == NULL)
2081 					break;
2082 				BReference<Thread> threadReference(thread, true);
2083 				ThreadLocker threadLocker(thread, true);
2084 
2085 				InterruptsSpinLocker threadDebugInfoLocker(
2086 					thread->debug_info.lock);
2087 
2088 				if (thread->team == thread_get_current_thread()->team) {
2089 					thread_debug_info &threadDebugInfo = thread->debug_info;
2090 					// set ignore mask
2091 					switch (ignoreOp) {
2092 						case B_DEBUG_SIGNAL_MASK_AND:
2093 							threadDebugInfo.ignore_signals &= ignore;
2094 							break;
2095 						case B_DEBUG_SIGNAL_MASK_OR:
2096 							threadDebugInfo.ignore_signals |= ignore;
2097 							break;
2098 						case B_DEBUG_SIGNAL_MASK_SET:
2099 							threadDebugInfo.ignore_signals = ignore;
2100 							break;
2101 					}
2102 
2103 					// set ignore once mask
2104 					switch (ignoreOnceOp) {
2105 						case B_DEBUG_SIGNAL_MASK_AND:
2106 							threadDebugInfo.ignore_signals_once &= ignoreOnce;
2107 							break;
2108 						case B_DEBUG_SIGNAL_MASK_OR:
2109 							threadDebugInfo.ignore_signals_once |= ignoreOnce;
2110 							break;
2111 						case B_DEBUG_SIGNAL_MASK_SET:
2112 							threadDebugInfo.ignore_signals_once = ignoreOnce;
2113 							break;
2114 					}
2115 				}
2116 
2117 				break;
2118 			}
2119 
2120 			case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS:
2121 			{
2122 				// get the parameters
2123 				replyPort = message.get_signal_masks.reply_port;
2124 				thread_id threadID = message.get_signal_masks.thread;
2125 				status_t result = B_OK;
2126 
2127 				// get the masks
2128 				uint64 ignore = 0;
2129 				uint64 ignoreOnce = 0;
2130 
2131 				Thread* thread = Thread::GetAndLock(threadID);
2132 				if (thread != NULL) {
2133 					BReference<Thread> threadReference(thread, true);
2134 					ThreadLocker threadLocker(thread, true);
2135 
2136 					InterruptsSpinLocker threadDebugInfoLocker(
2137 						thread->debug_info.lock);
2138 
2139 					ignore = thread->debug_info.ignore_signals;
2140 					ignoreOnce = thread->debug_info.ignore_signals_once;
2141 				} else
2142 					result = B_BAD_THREAD_ID;
2143 
2144 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS"
2145 					": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", "
2146 					"ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: "
2147 					"%" B_PRIx32 "\n", nubThread->id, replyPort, threadID,
2148 					ignore, ignoreOnce, result));
2149 
2150 				// prepare the message
2151 				reply.get_signal_masks.error = result;
2152 				reply.get_signal_masks.ignore_mask = ignore;
2153 				reply.get_signal_masks.ignore_once_mask = ignoreOnce;
2154 				replySize = sizeof(reply.get_signal_masks);
2155 				sendReply = true;
2156 				break;
2157 			}
2158 
2159 			case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER:
2160 			{
2161 				// get the parameters
2162 				int signal = message.set_signal_handler.signal;
2163 				struct sigaction &handler = message.set_signal_handler.handler;
2164 
2165 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER"
2166 					": signal: %d, handler: %p\n", nubThread->id, signal,
2167 					handler.sa_handler));
2168 
2169 				// set the handler
2170 				sigaction(signal, &handler, NULL);
2171 
2172 				break;
2173 			}
2174 
2175 			case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER:
2176 			{
2177 				// get the parameters
2178 				replyPort = message.get_signal_handler.reply_port;
2179 				int signal = message.get_signal_handler.signal;
2180 				status_t result = B_OK;
2181 
2182 				// get the handler
2183 				if (sigaction(signal, NULL, &reply.get_signal_handler.handler)
2184 						!= 0) {
2185 					result = errno;
2186 				}
2187 
2188 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER"
2189 					": reply port: %" B_PRId32 ", signal: %d, handler: %p\n",
2190 					nubThread->id, replyPort, signal,
2191 					reply.get_signal_handler.handler.sa_handler));
2192 
2193 				// prepare the message
2194 				reply.get_signal_handler.error = result;
2195 				replySize = sizeof(reply.get_signal_handler);
2196 				sendReply = true;
2197 				break;
2198 			}
2199 
2200 			case B_DEBUG_MESSAGE_PREPARE_HANDOVER:
2201 			{
2202 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER"
2203 					"\n", nubThread->id));
2204 
2205 				Team *team = nubThread->team;
2206 
2207 				// Acquire the debugger write lock. As soon as we have it and
2208 				// have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread
2209 				// will write anything to the debugger port anymore.
2210 				status_t result = acquire_sem_etc(writeLock, 1,
2211 					B_KILL_CAN_INTERRUPT, 0);
2212 				if (result == B_OK) {
2213 					// set the respective team debug flag
2214 					cpu_status state = disable_interrupts();
2215 					GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2216 
2217 					atomic_or(&team->debug_info.flags,
2218 						B_TEAM_DEBUG_DEBUGGER_HANDOVER);
2219 					BreakpointManager* breakpointManager
2220 						= team->debug_info.breakpoint_manager;
2221 
2222 					RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2223 					restore_interrupts(state);
2224 
2225 					// remove all installed breakpoints
2226 					breakpointManager->RemoveAllBreakpoints();
2227 
2228 					release_sem(writeLock);
2229 				} else {
2230 					// We probably got a SIGKILL. If so, we will terminate when
2231 					// reading the next message fails.
2232 				}
2233 
2234 				break;
2235 			}
2236 
2237 			case B_DEBUG_MESSAGE_HANDED_OVER:
2238 			{
2239 				// notify all threads that the debugger has changed
2240 				broadcast_debugged_thread_message(nubThread,
2241 					B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
2242 
2243 				break;
2244 			}
2245 
2246 			case B_DEBUG_START_PROFILER:
2247 			{
2248 				// get the parameters
2249 				thread_id threadID = message.start_profiler.thread;
2250 				replyPort = message.start_profiler.reply_port;
2251 				area_id sampleArea = message.start_profiler.sample_area;
2252 				int32 stackDepth = message.start_profiler.stack_depth;
2253 				bool variableStackDepth
2254 					= message.start_profiler.variable_stack_depth;
2255 				bigtime_t interval = max_c(message.start_profiler.interval,
2256 					B_DEBUG_MIN_PROFILE_INTERVAL);
2257 				status_t result = B_OK;
2258 
2259 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: "
2260 					"thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n",
2261 					nubThread->id, threadID, sampleArea));
2262 
2263 				if (stackDepth < 1)
2264 					stackDepth = 1;
2265 				else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH)
2266 					stackDepth = B_DEBUG_STACK_TRACE_DEPTH;
2267 
2268 				// provision for an extra entry per hit (for the number of
2269 				// samples), if variable stack depth
2270 				if (variableStackDepth)
2271 					stackDepth++;
2272 
2273 				// clone the sample area
2274 				area_info areaInfo;
2275 				if (result == B_OK)
2276 					result = get_area_info(sampleArea, &areaInfo);
2277 
2278 				area_id clonedSampleArea = -1;
2279 				void* samples = NULL;
2280 				if (result == B_OK) {
2281 					clonedSampleArea = clone_area("profiling samples", &samples,
2282 						B_ANY_KERNEL_ADDRESS,
2283 						B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
2284 						sampleArea);
2285 					if (clonedSampleArea >= 0) {
2286 						// we need the memory locked
2287 						result = lock_memory(samples, areaInfo.size,
2288 							B_READ_DEVICE);
2289 						if (result != B_OK) {
2290 							delete_area(clonedSampleArea);
2291 							clonedSampleArea = -1;
2292 						}
2293 					} else
2294 						result = clonedSampleArea;
2295 				}
2296 
2297 				// get the thread and set the profile info
2298 				int32 imageEvent = nubThread->team->debug_info.image_event;
2299 				if (result == B_OK) {
2300 					Thread* thread = Thread::GetAndLock(threadID);
2301 					BReference<Thread> threadReference(thread, true);
2302 					ThreadLocker threadLocker(thread, true);
2303 
2304 					if (thread != NULL && thread->team == nubThread->team) {
2305 						thread_debug_info &threadDebugInfo = thread->debug_info;
2306 
2307 						InterruptsSpinLocker threadDebugInfoLocker(
2308 							threadDebugInfo.lock);
2309 
2310 						if (threadDebugInfo.profile.samples == NULL) {
2311 							threadDebugInfo.profile.interval = interval;
2312 							threadDebugInfo.profile.sample_area
2313 								= clonedSampleArea;
2314 							threadDebugInfo.profile.samples = (addr_t*)samples;
2315 							threadDebugInfo.profile.max_samples
2316 								= areaInfo.size / sizeof(addr_t);
2317 							threadDebugInfo.profile.flush_threshold
2318 								= threadDebugInfo.profile.max_samples
2319 									* B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD
2320 									/ 100;
2321 							threadDebugInfo.profile.sample_count = 0;
2322 							threadDebugInfo.profile.dropped_ticks = 0;
2323 							threadDebugInfo.profile.stack_depth = stackDepth;
2324 							threadDebugInfo.profile.variable_stack_depth
2325 								= variableStackDepth;
2326 							threadDebugInfo.profile.buffer_full = false;
2327 							threadDebugInfo.profile.interval_left = interval;
2328 							threadDebugInfo.profile.installed_timer = NULL;
2329 							threadDebugInfo.profile.image_event = imageEvent;
2330 							threadDebugInfo.profile.last_image_event
2331 								= imageEvent;
2332 						} else
2333 							result = B_BAD_VALUE;
2334 					} else
2335 						result = B_BAD_THREAD_ID;
2336 				}
2337 
2338 				// on error unlock and delete the sample area
2339 				if (result != B_OK) {
2340 					if (clonedSampleArea >= 0) {
2341 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2342 						delete_area(clonedSampleArea);
2343 					}
2344 				}
2345 
2346 				// send a reply to the debugger
2347 				reply.start_profiler.error = result;
2348 				reply.start_profiler.interval = interval;
2349 				reply.start_profiler.image_event = imageEvent;
2350 				sendReply = true;
2351 				replySize = sizeof(reply.start_profiler);
2352 
2353 				break;
2354 			}
2355 
2356 			case B_DEBUG_STOP_PROFILER:
2357 			{
2358 				// get the parameters
2359 				thread_id threadID = message.stop_profiler.thread;
2360 				replyPort = message.stop_profiler.reply_port;
2361 				status_t result = B_OK;
2362 
2363 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: "
2364 					"thread: %" B_PRId32 "\n", nubThread->id, threadID));
2365 
2366 				area_id sampleArea = -1;
2367 				addr_t* samples = NULL;
2368 				int32 sampleCount = 0;
2369 				int32 stackDepth = 0;
2370 				bool variableStackDepth = false;
2371 				int32 imageEvent = 0;
2372 				int32 droppedTicks = 0;
2373 
2374 				// get the thread and detach the profile info
2375 				Thread* thread = Thread::GetAndLock(threadID);
2376 				BReference<Thread> threadReference(thread, true);
2377 				ThreadLocker threadLocker(thread, true);
2378 
2379 				if (thread && thread->team == nubThread->team) {
2380 					thread_debug_info &threadDebugInfo = thread->debug_info;
2381 
2382 					InterruptsSpinLocker threadDebugInfoLocker(
2383 						threadDebugInfo.lock);
2384 
2385 					if (threadDebugInfo.profile.samples != NULL) {
2386 						sampleArea = threadDebugInfo.profile.sample_area;
2387 						samples = threadDebugInfo.profile.samples;
2388 						sampleCount = threadDebugInfo.profile.sample_count;
2389 						droppedTicks = threadDebugInfo.profile.dropped_ticks;
2390 						stackDepth = threadDebugInfo.profile.stack_depth;
2391 						variableStackDepth
2392 							= threadDebugInfo.profile.variable_stack_depth;
2393 						imageEvent = threadDebugInfo.profile.image_event;
2394 						threadDebugInfo.profile.sample_area = -1;
2395 						threadDebugInfo.profile.samples = NULL;
2396 						threadDebugInfo.profile.buffer_full = false;
2397 						threadDebugInfo.profile.dropped_ticks = 0;
2398 					} else
2399 						result = B_BAD_VALUE;
2400 				} else
2401 					result = B_BAD_THREAD_ID;
2402 
2403 				threadLocker.Unlock();
2404 
2405 				// prepare the reply
2406 				if (result == B_OK) {
2407 					reply.profiler_update.origin.thread = threadID;
2408 					reply.profiler_update.image_event = imageEvent;
2409 					reply.profiler_update.stack_depth = stackDepth;
2410 					reply.profiler_update.variable_stack_depth
2411 						= variableStackDepth;
2412 					reply.profiler_update.sample_count = sampleCount;
2413 					reply.profiler_update.dropped_ticks = droppedTicks;
2414 					reply.profiler_update.stopped = true;
2415 				} else
2416 					reply.profiler_update.origin.thread = result;
2417 
2418 				replySize = sizeof(debug_profiler_update);
2419 				sendReply = true;
2420 
2421 				if (sampleArea >= 0) {
2422 					area_info areaInfo;
2423 					if (get_area_info(sampleArea, &areaInfo) == B_OK) {
2424 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2425 						delete_area(sampleArea);
2426 					}
2427 				}
2428 
2429 				break;
2430 			}
2431 
2432 			case B_DEBUG_WRITE_CORE_FILE:
2433 			{
2434 				// get the parameters
2435 				replyPort = message.write_core_file.reply_port;
2436 				char* path = message.write_core_file.path;
2437 				path[sizeof(message.write_core_file.path) - 1] = '\0';
2438 
2439 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE"
2440 					": path: %s\n", nubThread->id, path));
2441 
2442 				// write the core file
2443 				status_t result = core_dump_write_core_file(path, false);
2444 
2445 				// prepare the reply
2446 				reply.write_core_file.error = result;
2447 				replySize = sizeof(reply.write_core_file);
2448 				sendReply = true;
2449 
2450 				break;
2451 			}
2452 		}
2453 
2454 		// send the reply, if necessary
2455 		if (sendReply) {
2456 			status_t error = kill_interruptable_write_port(replyPort, command,
2457 				&reply, replySize);
2458 
2459 			if (error != B_OK) {
2460 				// The debugger port is either not longer existing or we got
2461 				// interrupted by a kill signal. In either case we terminate.
2462 				TRACE(("nub thread %" B_PRId32 ": failed to send reply to port "
2463 					"%" B_PRId32 ": %s\n", nubThread->id, replyPort,
2464 					strerror(error)));
2465 
2466 				nub_thread_cleanup(nubThread);
2467 				return error;
2468 			}
2469 		}
2470 	}
2471 }
2472 
2473 
2474 /**	\brief Helper function for install_team_debugger(), that sets up the team
2475 		   and thread debug infos.
2476 
2477 	The caller must hold the team's lock as well as the team debug info lock.
2478 
2479 	The function also clears the arch specific team and thread debug infos
2480 	(including among other things formerly set break/watchpoints).
2481  */
2482 static void
2483 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam,
2484 	port_id debuggerPort, port_id nubPort, thread_id nubThread,
2485 	sem_id debuggerPortWriteLock, thread_id causingThread)
2486 {
2487 	atomic_set(&team->debug_info.flags,
2488 		B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED);
2489 	team->debug_info.nub_port = nubPort;
2490 	team->debug_info.nub_thread = nubThread;
2491 	team->debug_info.debugger_team = debuggerTeam;
2492 	team->debug_info.debugger_port = debuggerPort;
2493 	team->debug_info.debugger_write_lock = debuggerPortWriteLock;
2494 	team->debug_info.causing_thread = causingThread;
2495 
2496 	arch_clear_team_debug_info(&team->debug_info.arch_info);
2497 
2498 	// set the user debug flags and signal masks of all threads to the default
2499 	for (Thread *thread = team->thread_list; thread;
2500 			thread = thread->team_next) {
2501 		SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2502 
2503 		if (thread->id == nubThread) {
2504 			atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD);
2505 		} else {
2506 			int32 flags = thread->debug_info.flags
2507 				& ~B_THREAD_DEBUG_USER_FLAG_MASK;
2508 			atomic_set(&thread->debug_info.flags,
2509 				flags | B_THREAD_DEBUG_DEFAULT_FLAGS);
2510 			thread->debug_info.ignore_signals = 0;
2511 			thread->debug_info.ignore_signals_once = 0;
2512 
2513 			arch_clear_thread_debug_info(&thread->debug_info.arch_info);
2514 		}
2515 	}
2516 
2517 	// update the thread::flags fields
2518 	update_threads_debugger_installed_flag(team);
2519 }
2520 
2521 
2522 static port_id
2523 install_team_debugger(team_id teamID, port_id debuggerPort,
2524 	thread_id causingThread, bool useDefault, bool dontReplace)
2525 {
2526 	TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", "
2527 		"default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault,
2528 		dontReplace));
2529 
2530 	if (useDefault)
2531 		debuggerPort = atomic_get(&sDefaultDebuggerPort);
2532 
2533 	// get the debugger team
2534 	port_info debuggerPortInfo;
2535 	status_t error = get_port_info(debuggerPort, &debuggerPortInfo);
2536 	if (error != B_OK) {
2537 		TRACE(("install_team_debugger(): Failed to get debugger port info: "
2538 			"%" B_PRIx32 "\n", error));
2539 		return error;
2540 	}
2541 	team_id debuggerTeam = debuggerPortInfo.team;
2542 
2543 	// Check the debugger team: It must neither be the kernel team nor the
2544 	// debugged team.
2545 	if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) {
2546 		TRACE(("install_team_debugger(): Can't debug kernel or debugger team. "
2547 			"debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam,
2548 			teamID));
2549 		return B_NOT_ALLOWED;
2550 	}
2551 
2552 	// get the team
2553 	Team* team;
2554 	ConditionVariable debugChangeCondition;
2555 	debugChangeCondition.Init(NULL, "debug change condition");
2556 	error = prepare_debugger_change(teamID, debugChangeCondition, team);
2557 	if (error != B_OK)
2558 		return error;
2559 
2560 	// get the real team ID
2561 	teamID = team->id;
2562 
2563 	// check, if a debugger is already installed
2564 
2565 	bool done = false;
2566 	port_id result = B_ERROR;
2567 	bool handOver = false;
2568 	port_id oldDebuggerPort = -1;
2569 	port_id nubPort = -1;
2570 
2571 	TeamLocker teamLocker(team);
2572 	cpu_status state = disable_interrupts();
2573 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2574 
2575 	int32 teamDebugFlags = team->debug_info.flags;
2576 
2577 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2578 		// There's already a debugger installed.
2579 		if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) {
2580 			if (dontReplace) {
2581 				// We're fine with already having a debugger.
2582 				error = B_OK;
2583 				done = true;
2584 				result = team->debug_info.nub_port;
2585 			} else {
2586 				// a handover to another debugger is requested
2587 				// Set the handing-over flag -- we'll clear both flags after
2588 				// having sent the handed-over message to the new debugger.
2589 				atomic_or(&team->debug_info.flags,
2590 					B_TEAM_DEBUG_DEBUGGER_HANDING_OVER);
2591 
2592 				oldDebuggerPort = team->debug_info.debugger_port;
2593 				result = nubPort = team->debug_info.nub_port;
2594 				if (causingThread < 0)
2595 					causingThread = team->debug_info.causing_thread;
2596 
2597 				// set the new debugger
2598 				install_team_debugger_init_debug_infos(team, debuggerTeam,
2599 					debuggerPort, nubPort, team->debug_info.nub_thread,
2600 					team->debug_info.debugger_write_lock, causingThread);
2601 
2602 				handOver = true;
2603 				done = true;
2604 			}
2605 		} else {
2606 			// there's already a debugger installed
2607 			error = (dontReplace ? B_OK : B_BAD_VALUE);
2608 			done = true;
2609 			result = team->debug_info.nub_port;
2610 		}
2611 	} else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0
2612 		&& useDefault) {
2613 		// No debugger yet, disable_debugger() had been invoked, and we
2614 		// would install the default debugger. Just fail.
2615 		error = B_BAD_VALUE;
2616 	}
2617 
2618 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2619 	restore_interrupts(state);
2620 	teamLocker.Unlock();
2621 
2622 	if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) {
2623 		// The old debugger must just have died. Just proceed as
2624 		// if there was no debugger installed. We may still be too
2625 		// early, in which case we'll fail, but this race condition
2626 		// should be unbelievably rare and relatively harmless.
2627 		handOver = false;
2628 		done = false;
2629 	}
2630 
2631 	if (handOver) {
2632 		// prepare the handed-over message
2633 		debug_handed_over notification;
2634 		notification.origin.thread = -1;
2635 		notification.origin.team = teamID;
2636 		notification.origin.nub_port = nubPort;
2637 		notification.debugger = debuggerTeam;
2638 		notification.debugger_port = debuggerPort;
2639 		notification.causing_thread = causingThread;
2640 
2641 		// notify the new debugger
2642 		error = write_port_etc(debuggerPort,
2643 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2644 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2645 		if (error != B_OK) {
2646 			dprintf("install_team_debugger(): Failed to send message to new "
2647 				"debugger: %s\n", strerror(error));
2648 		}
2649 
2650 		// clear the handed-over and handing-over flags
2651 		state = disable_interrupts();
2652 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2653 
2654 		atomic_and(&team->debug_info.flags,
2655 			~(B_TEAM_DEBUG_DEBUGGER_HANDOVER
2656 				| B_TEAM_DEBUG_DEBUGGER_HANDING_OVER));
2657 
2658 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2659 		restore_interrupts(state);
2660 
2661 		finish_debugger_change(team);
2662 
2663 		// notify the nub thread
2664 		kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER,
2665 			NULL, 0);
2666 
2667 		// notify the old debugger
2668 		error = write_port_etc(oldDebuggerPort,
2669 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2670 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2671 		if (error != B_OK) {
2672 			TRACE(("install_team_debugger(): Failed to send message to old "
2673 				"debugger: %s\n", strerror(error)));
2674 		}
2675 
2676 		TRACE(("install_team_debugger() done: handed over to debugger: team: "
2677 			"%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam,
2678 			debuggerPort));
2679 
2680 		return result;
2681 	}
2682 
2683 	if (done || error != B_OK) {
2684 		TRACE(("install_team_debugger() done1: %" B_PRId32 "\n",
2685 			(error == B_OK ? result : error)));
2686 		finish_debugger_change(team);
2687 		return (error == B_OK ? result : error);
2688 	}
2689 
2690 	// create the debugger write lock semaphore
2691 	char nameBuffer[B_OS_NAME_LENGTH];
2692 	snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port "
2693 		"write", teamID);
2694 	sem_id debuggerWriteLock = create_sem(1, nameBuffer);
2695 	if (debuggerWriteLock < 0)
2696 		error = debuggerWriteLock;
2697 
2698 	// create the nub port
2699 	snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID);
2700 	if (error == B_OK) {
2701 		nubPort = create_port(1, nameBuffer);
2702 		if (nubPort < 0)
2703 			error = nubPort;
2704 		else
2705 			result = nubPort;
2706 	}
2707 
2708 	// make the debugger team the port owner; thus we know, if the debugger is
2709 	// gone and can cleanup
2710 	if (error == B_OK)
2711 		error = set_port_owner(nubPort, debuggerTeam);
2712 
2713 	// create the breakpoint manager
2714 	BreakpointManager* breakpointManager = NULL;
2715 	if (error == B_OK) {
2716 		breakpointManager = new(std::nothrow) BreakpointManager;
2717 		if (breakpointManager != NULL)
2718 			error = breakpointManager->Init();
2719 		else
2720 			error = B_NO_MEMORY;
2721 	}
2722 
2723 	// spawn the nub thread
2724 	thread_id nubThread = -1;
2725 	if (error == B_OK) {
2726 		snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task",
2727 			teamID);
2728 		nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer,
2729 			B_NORMAL_PRIORITY, NULL, teamID);
2730 		if (nubThread < 0)
2731 			error = nubThread;
2732 	}
2733 
2734 	// now adjust the debug info accordingly
2735 	if (error == B_OK) {
2736 		TeamLocker teamLocker(team);
2737 		state = disable_interrupts();
2738 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2739 
2740 		team->debug_info.breakpoint_manager = breakpointManager;
2741 		install_team_debugger_init_debug_infos(team, debuggerTeam,
2742 			debuggerPort, nubPort, nubThread, debuggerWriteLock,
2743 			causingThread);
2744 
2745 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2746 		restore_interrupts(state);
2747 	}
2748 
2749 	finish_debugger_change(team);
2750 
2751 	// if everything went fine, resume the nub thread, otherwise clean up
2752 	if (error == B_OK) {
2753 		resume_thread(nubThread);
2754 	} else {
2755 		// delete port and terminate thread
2756 		if (nubPort >= 0) {
2757 			set_port_owner(nubPort, B_CURRENT_TEAM);
2758 			delete_port(nubPort);
2759 		}
2760 		if (nubThread >= 0) {
2761 			int32 result;
2762 			wait_for_thread(nubThread, &result);
2763 		}
2764 
2765 		delete breakpointManager;
2766 	}
2767 
2768 	TRACE(("install_team_debugger() done2: %" B_PRId32 "\n",
2769 		(error == B_OK ? result : error)));
2770 	return (error == B_OK ? result : error);
2771 }
2772 
2773 
2774 static status_t
2775 ensure_debugger_installed()
2776 {
2777 	port_id port = install_team_debugger(B_CURRENT_TEAM, -1,
2778 		thread_get_current_thread_id(), true, true);
2779 	return port >= 0 ? B_OK : port;
2780 }
2781 
2782 
2783 // #pragma mark -
2784 
2785 
2786 void
2787 _user_debugger(const char *userMessage)
2788 {
2789 	// install the default debugger, if there is none yet
2790 	status_t error = ensure_debugger_installed();
2791 	if (error != B_OK) {
2792 		// time to commit suicide
2793 		char buffer[128];
2794 		ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer));
2795 		if (length >= 0) {
2796 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2797 				"`%s'\n", buffer);
2798 		} else {
2799 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2800 				"%p (%s)\n", userMessage, strerror(length));
2801 		}
2802 		_user_exit_team(1);
2803 	}
2804 
2805 	// prepare the message
2806 	debug_debugger_call message;
2807 	message.message = (void*)userMessage;
2808 
2809 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message,
2810 		sizeof(message), true);
2811 }
2812 
2813 
2814 int
2815 _user_disable_debugger(int state)
2816 {
2817 	Team *team = thread_get_current_thread()->team;
2818 
2819 	TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state,
2820 		team->id));
2821 
2822 	cpu_status cpuState = disable_interrupts();
2823 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2824 
2825 	int32 oldFlags;
2826 	if (state) {
2827 		oldFlags = atomic_or(&team->debug_info.flags,
2828 			B_TEAM_DEBUG_DEBUGGER_DISABLED);
2829 	} else {
2830 		oldFlags = atomic_and(&team->debug_info.flags,
2831 			~B_TEAM_DEBUG_DEBUGGER_DISABLED);
2832 	}
2833 
2834 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2835 	restore_interrupts(cpuState);
2836 
2837 	// TODO: Check, if the return value is really the old state.
2838 	return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED);
2839 }
2840 
2841 
2842 status_t
2843 _user_install_default_debugger(port_id debuggerPort)
2844 {
2845 	// Do not allow non-root processes to install a default debugger.
2846 	if (geteuid() != 0)
2847 		return B_PERMISSION_DENIED;
2848 
2849 	// if supplied, check whether the port is a valid port
2850 	if (debuggerPort >= 0) {
2851 		port_info portInfo;
2852 		status_t error = get_port_info(debuggerPort, &portInfo);
2853 		if (error != B_OK)
2854 			return error;
2855 
2856 		// the debugger team must not be the kernel team
2857 		if (portInfo.team == team_get_kernel_team_id())
2858 			return B_NOT_ALLOWED;
2859 	}
2860 
2861 	atomic_set(&sDefaultDebuggerPort, debuggerPort);
2862 
2863 	return B_OK;
2864 }
2865 
2866 
2867 port_id
2868 _user_install_team_debugger(team_id teamID, port_id debuggerPort)
2869 {
2870 	if (geteuid() != 0 && team_geteuid(teamID) != geteuid())
2871 		return B_PERMISSION_DENIED;
2872 
2873 	return install_team_debugger(teamID, debuggerPort, -1, false, false);
2874 }
2875 
2876 
2877 status_t
2878 _user_remove_team_debugger(team_id teamID)
2879 {
2880 	Team* team;
2881 	ConditionVariable debugChangeCondition;
2882 	debugChangeCondition.Init(NULL, "debug change condition");
2883 	status_t error = prepare_debugger_change(teamID, debugChangeCondition,
2884 		team);
2885 	if (error != B_OK)
2886 		return error;
2887 
2888 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2889 
2890 	thread_id nubThread = -1;
2891 	port_id nubPort = -1;
2892 
2893 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2894 		// there's a debugger installed
2895 		nubThread = team->debug_info.nub_thread;
2896 		nubPort = team->debug_info.nub_port;
2897 	} else {
2898 		// no debugger installed
2899 		error = B_BAD_VALUE;
2900 	}
2901 
2902 	debugInfoLocker.Unlock();
2903 
2904 	// Delete the nub port -- this will cause the nub thread to terminate and
2905 	// remove the debugger.
2906 	if (nubPort >= 0)
2907 		delete_port(nubPort);
2908 
2909 	finish_debugger_change(team);
2910 
2911 	// wait for the nub thread
2912 	if (nubThread >= 0)
2913 		wait_for_thread(nubThread, NULL);
2914 
2915 	return error;
2916 }
2917 
2918 
2919 status_t
2920 _user_debug_thread(thread_id threadID)
2921 {
2922 	TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n",
2923 		find_thread(NULL), threadID));
2924 
2925 	// get the thread
2926 	Thread* thread = Thread::GetAndLock(threadID);
2927 	if (thread == NULL)
2928 		return B_BAD_THREAD_ID;
2929 	BReference<Thread> threadReference(thread, true);
2930 	ThreadLocker threadLocker(thread, true);
2931 
2932 	// we can't debug the kernel team
2933 	if (thread->team == team_get_kernel_team())
2934 		return B_NOT_ALLOWED;
2935 
2936 	InterruptsLocker interruptsLocker;
2937 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2938 
2939 	// If the thread is already dying, it's too late to debug it.
2940 	if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0)
2941 		return B_BAD_THREAD_ID;
2942 
2943 	// don't debug the nub thread
2944 	if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0)
2945 		return B_NOT_ALLOWED;
2946 
2947 	// already marked stopped or being told to stop?
2948 	if ((thread->debug_info.flags
2949 			& (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) {
2950 		return B_OK;
2951 	}
2952 
2953 	// set the flag that tells the thread to stop as soon as possible
2954 	atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
2955 
2956 	update_thread_user_debug_flag(thread);
2957 
2958 	// send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or
2959 	// continued)
2960 	threadDebugInfoLocker.Unlock();
2961 	ReadSpinLocker teamLocker(thread->team_lock);
2962 	SpinLocker locker(thread->team->signal_lock);
2963 
2964 	send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0);
2965 
2966 	return B_OK;
2967 }
2968 
2969 
2970 void
2971 _user_wait_for_debugger(void)
2972 {
2973 	debug_thread_debugged message = {};
2974 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message,
2975 		sizeof(message), false);
2976 }
2977 
2978 
2979 status_t
2980 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length,
2981 	bool watchpoint)
2982 {
2983 	// check the address and size
2984 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
2985 		return B_BAD_ADDRESS;
2986 	if (watchpoint && length < 0)
2987 		return B_BAD_VALUE;
2988 
2989 	// check whether a debugger is installed already
2990 	team_debug_info teamDebugInfo;
2991 	get_team_debug_info(teamDebugInfo);
2992 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2993 		return B_BAD_VALUE;
2994 
2995 	// We can't help it, here's a small but relatively harmless race condition,
2996 	// since a debugger could be installed in the meantime. The worst case is
2997 	// that we install a break/watchpoint the debugger doesn't know about.
2998 
2999 	// set the break/watchpoint
3000 	status_t result;
3001 	if (watchpoint)
3002 		result = arch_set_watchpoint(address, type, length);
3003 	else
3004 		result = arch_set_breakpoint(address);
3005 
3006 	if (result == B_OK)
3007 		update_threads_breakpoints_flag();
3008 
3009 	return result;
3010 }
3011 
3012 
3013 status_t
3014 _user_clear_debugger_breakpoint(void *address, bool watchpoint)
3015 {
3016 	// check the address
3017 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
3018 		return B_BAD_ADDRESS;
3019 
3020 	// check whether a debugger is installed already
3021 	team_debug_info teamDebugInfo;
3022 	get_team_debug_info(teamDebugInfo);
3023 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
3024 		return B_BAD_VALUE;
3025 
3026 	// We can't help it, here's a small but relatively harmless race condition,
3027 	// since a debugger could be installed in the meantime. The worst case is
3028 	// that we clear a break/watchpoint the debugger has just installed.
3029 
3030 	// clear the break/watchpoint
3031 	status_t result;
3032 	if (watchpoint)
3033 		result = arch_clear_watchpoint(address);
3034 	else
3035 		result = arch_clear_breakpoint(address);
3036 
3037 	if (result == B_OK)
3038 		update_threads_breakpoints_flag();
3039 
3040 	return result;
3041 }
3042