xref: /haiku/src/system/kernel/debug/user_debugger.cpp (revision 6f80a9801fedbe7355c4360bd204ba746ec3ec2d)
1 /*
2  * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2015, Rene Gollent, rene@gollent.com.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <errno.h>
9 #include <signal.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <string.h>
13 
14 #include <algorithm>
15 
16 #include <arch/debug.h>
17 #include <arch/user_debugger.h>
18 #include <core_dump.h>
19 #include <cpu.h>
20 #include <debugger.h>
21 #include <kernel.h>
22 #include <KernelExport.h>
23 #include <kscheduler.h>
24 #include <ksignal.h>
25 #include <ksyscalls.h>
26 #include <port.h>
27 #include <sem.h>
28 #include <team.h>
29 #include <thread.h>
30 #include <thread_types.h>
31 #include <user_debugger.h>
32 #include <vm/vm.h>
33 #include <vm/vm_types.h>
34 
35 #include <AutoDeleter.h>
36 #include <util/AutoLock.h>
37 #include <util/ThreadAutoLock.h>
38 
39 #include "BreakpointManager.h"
40 
41 
42 //#define TRACE_USER_DEBUGGER
43 #ifdef TRACE_USER_DEBUGGER
44 #	define TRACE(x) dprintf x
45 #else
46 #	define TRACE(x) ;
47 #endif
48 
49 
50 // TODO: Since the introduction of team_debug_info::debugger_changed_condition
51 // there's some potential for simplifications. E.g. clear_team_debug_info() and
52 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus
53 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()).
54 
55 
56 static port_id sDefaultDebuggerPort = -1;
57 	// accessed atomically
58 
59 static timer sProfilingTimers[SMP_MAX_CPUS];
60 	// a profiling timer for each CPU -- used when a profiled thread is running
61 	// on that CPU
62 
63 
64 static void schedule_profiling_timer(Thread* thread, bigtime_t interval);
65 static int32 profiling_event(timer* unused);
66 static status_t ensure_debugger_installed();
67 static void get_team_debug_info(team_debug_info &teamDebugInfo);
68 
69 
70 static inline status_t
71 kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
72 	size_t bufferSize)
73 {
74 	return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT,
75 		0);
76 }
77 
78 
79 static status_t
80 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
81 	bool dontWait)
82 {
83 	TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", "
84 		"port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, "
85 		"dontWait: %d\n", thread_get_current_thread()->id,
86 		thread_get_current_thread()->team->id, port, code, buffer, bufferSize,
87 		dontWait));
88 
89 	status_t error = B_OK;
90 
91 	// get the team debug info
92 	team_debug_info teamDebugInfo;
93 	get_team_debug_info(teamDebugInfo);
94 	sem_id writeLock = teamDebugInfo.debugger_write_lock;
95 
96 	// get the write lock
97 	TRACE(("debugger_write(): acquiring write lock...\n"));
98 	error = acquire_sem_etc(writeLock, 1,
99 		dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
100 	if (error != B_OK) {
101 		TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error));
102 		return error;
103 	}
104 
105 	// re-get the team debug info
106 	get_team_debug_info(teamDebugInfo);
107 
108 	if (teamDebugInfo.debugger_port != port
109 		|| (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) {
110 		// The debugger has changed in the meantime or we are about to be
111 		// handed over to a new debugger. In either case we don't send the
112 		// message.
113 		TRACE(("debugger_write(): %s\n",
114 			(teamDebugInfo.debugger_port != port ? "debugger port changed"
115 				: "handover flag set")));
116 	} else {
117 		TRACE(("debugger_write(): writing to port...\n"));
118 
119 		error = write_port_etc(port, code, buffer, bufferSize,
120 			dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
121 	}
122 
123 	// release the write lock
124 	release_sem(writeLock);
125 
126 	TRACE(("debugger_write() done: %" B_PRIx32 "\n", error));
127 
128 	return error;
129 }
130 
131 
132 /*!	Updates the thread::flags field according to what user debugger flags are
133 	set for the thread.
134 	Interrupts must be disabled and the thread's debug info lock must be held.
135 */
136 static void
137 update_thread_user_debug_flag(Thread* thread)
138 {
139 	if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0)
140 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD);
141 	else
142 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD);
143 }
144 
145 
146 /*!	Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
147 	given thread.
148 	Interrupts must be disabled and the thread debug info lock must be held.
149 */
150 static void
151 update_thread_breakpoints_flag(Thread* thread)
152 {
153 	Team* team = thread->team;
154 
155 	if (arch_has_breakpoints(&team->debug_info.arch_info))
156 		atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
157 	else
158 		atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
159 }
160 
161 
162 /*!	Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
163 	threads of the current team.
164 */
165 static void
166 update_threads_breakpoints_flag()
167 {
168 	Team* team = thread_get_current_thread()->team;
169 
170 	TeamLocker teamLocker(team);
171 
172 	Thread* thread = team->thread_list;
173 
174 	if (arch_has_breakpoints(&team->debug_info.arch_info)) {
175 		for (; thread != NULL; thread = thread->team_next)
176 			atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
177 	} else {
178 		for (; thread != NULL; thread = thread->team_next)
179 			atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
180 	}
181 }
182 
183 
184 /*!	Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
185 	given thread, which must be the current thread.
186 */
187 static void
188 update_thread_debugger_installed_flag(Thread* thread)
189 {
190 	Team* team = thread->team;
191 
192 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
193 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
194 	else
195 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
196 }
197 
198 
199 /*!	Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
200 	threads of the given team.
201 	The team's lock must be held.
202 */
203 static void
204 update_threads_debugger_installed_flag(Team* team)
205 {
206 	Thread* thread = team->thread_list;
207 
208 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
209 		for (; thread != NULL; thread = thread->team_next)
210 			atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
211 	} else {
212 		for (; thread != NULL; thread = thread->team_next)
213 			atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
214 	}
215 }
216 
217 
218 /**
219  *	For the first initialization the function must be called with \a initLock
220  *	set to \c true. If it would be possible that another thread accesses the
221  *	structure at the same time, `lock' must be held when calling the function.
222  */
223 void
224 clear_team_debug_info(struct team_debug_info *info, bool initLock)
225 {
226 	if (info) {
227 		arch_clear_team_debug_info(&info->arch_info);
228 		atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS);
229 		info->debugger_team = -1;
230 		info->debugger_port = -1;
231 		info->nub_thread = -1;
232 		info->nub_port = -1;
233 		info->debugger_write_lock = -1;
234 		info->causing_thread = -1;
235 		info->image_event = 0;
236 		info->breakpoint_manager = NULL;
237 
238 		if (initLock) {
239 			B_INITIALIZE_SPINLOCK(&info->lock);
240 			info->debugger_changed_condition = NULL;
241 		}
242 	}
243 }
244 
245 /**
246  *  `lock' must not be held nor may interrupts be disabled.
247  *  \a info must not be a member of a team struct (or the team struct must no
248  *  longer be accessible, i.e. the team should already be removed).
249  *
250  *	In case the team is still accessible, the procedure is:
251  *	1. get `lock'
252  *	2. copy the team debug info on stack
253  *	3. call clear_team_debug_info() on the team debug info
254  *	4. release `lock'
255  *	5. call destroy_team_debug_info() on the copied team debug info
256  */
257 static void
258 destroy_team_debug_info(struct team_debug_info *info)
259 {
260 	if (info) {
261 		arch_destroy_team_debug_info(&info->arch_info);
262 
263 		// delete the breakpoint manager
264 		delete info->breakpoint_manager ;
265 		info->breakpoint_manager = NULL;
266 
267 		// delete the debugger port write lock
268 		if (info->debugger_write_lock >= 0) {
269 			delete_sem(info->debugger_write_lock);
270 			info->debugger_write_lock = -1;
271 		}
272 
273 		// delete the nub port
274 		if (info->nub_port >= 0) {
275 			set_port_owner(info->nub_port, B_CURRENT_TEAM);
276 			delete_port(info->nub_port);
277 			info->nub_port = -1;
278 		}
279 
280 		// wait for the nub thread
281 		if (info->nub_thread >= 0) {
282 			if (info->nub_thread != thread_get_current_thread()->id) {
283 				int32 result;
284 				wait_for_thread(info->nub_thread, &result);
285 			}
286 
287 			info->nub_thread = -1;
288 		}
289 
290 		atomic_set(&info->flags, 0);
291 		info->debugger_team = -1;
292 		info->debugger_port = -1;
293 		info->causing_thread = -1;
294 		info->image_event = -1;
295 	}
296 }
297 
298 
299 void
300 init_thread_debug_info(struct thread_debug_info *info)
301 {
302 	if (info) {
303 		B_INITIALIZE_SPINLOCK(&info->lock);
304 		arch_clear_thread_debug_info(&info->arch_info);
305 		info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS;
306 		info->debug_port = -1;
307 		info->ignore_signals = 0;
308 		info->ignore_signals_once = 0;
309 		info->profile.sample_area = -1;
310 		info->profile.samples = NULL;
311 		info->profile.buffer_full = false;
312 		info->profile.installed_timer = NULL;
313 	}
314 }
315 
316 
317 /*!	Clears the debug info for the current thread.
318 	Invoked with thread debug info lock being held.
319 */
320 void
321 clear_thread_debug_info(struct thread_debug_info *info, bool dying)
322 {
323 	if (info) {
324 		// cancel profiling timer
325 		if (info->profile.installed_timer != NULL) {
326 			cancel_timer(info->profile.installed_timer);
327 			info->profile.installed_timer = NULL;
328 		}
329 
330 		arch_clear_thread_debug_info(&info->arch_info);
331 		atomic_set(&info->flags,
332 			B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0));
333 		info->debug_port = -1;
334 		info->ignore_signals = 0;
335 		info->ignore_signals_once = 0;
336 		info->profile.sample_area = -1;
337 		info->profile.samples = NULL;
338 		info->profile.buffer_full = false;
339 	}
340 }
341 
342 
343 void
344 destroy_thread_debug_info(struct thread_debug_info *info)
345 {
346 	if (info) {
347 		area_id sampleArea = info->profile.sample_area;
348 		if (sampleArea >= 0) {
349 			area_info areaInfo;
350 			if (get_area_info(sampleArea, &areaInfo) == B_OK) {
351 				unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
352 				delete_area(sampleArea);
353 			}
354 		}
355 
356 		arch_destroy_thread_debug_info(&info->arch_info);
357 
358 		if (info->debug_port >= 0) {
359 			delete_port(info->debug_port);
360 			info->debug_port = -1;
361 		}
362 
363 		info->ignore_signals = 0;
364 		info->ignore_signals_once = 0;
365 
366 		atomic_set(&info->flags, 0);
367 	}
368 }
369 
370 
371 static status_t
372 prepare_debugger_change(team_id teamID, ConditionVariable& condition,
373 	Team*& team)
374 {
375 	// We look up the team by ID, even in case of the current team, so we can be
376 	// sure, that the team is not already dying.
377 	if (teamID == B_CURRENT_TEAM)
378 		teamID = thread_get_current_thread()->team->id;
379 
380 	while (true) {
381 		// get the team
382 		team = Team::GetAndLock(teamID);
383 		if (team == NULL)
384 			return B_BAD_TEAM_ID;
385 		BReference<Team> teamReference(team, true);
386 		TeamLocker teamLocker(team, true);
387 
388 		// don't allow messing with the kernel team
389 		if (team == team_get_kernel_team())
390 			return B_NOT_ALLOWED;
391 
392 		// check whether the condition is already set
393 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
394 
395 		if (team->debug_info.debugger_changed_condition == NULL) {
396 			// nobody there yet -- set our condition variable and be done
397 			team->debug_info.debugger_changed_condition = &condition;
398 			return B_OK;
399 		}
400 
401 		// we'll have to wait
402 		ConditionVariableEntry entry;
403 		team->debug_info.debugger_changed_condition->Add(&entry);
404 
405 		debugInfoLocker.Unlock();
406 		teamLocker.Unlock();
407 
408 		entry.Wait();
409 	}
410 }
411 
412 
413 static void
414 prepare_debugger_change(Team* team, ConditionVariable& condition)
415 {
416 	while (true) {
417 		// check whether the condition is already set
418 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
419 
420 		if (team->debug_info.debugger_changed_condition == NULL) {
421 			// nobody there yet -- set our condition variable and be done
422 			team->debug_info.debugger_changed_condition = &condition;
423 			return;
424 		}
425 
426 		// we'll have to wait
427 		ConditionVariableEntry entry;
428 		team->debug_info.debugger_changed_condition->Add(&entry);
429 
430 		debugInfoLocker.Unlock();
431 
432 		entry.Wait();
433 	}
434 }
435 
436 
437 static void
438 finish_debugger_change(Team* team)
439 {
440 	// unset our condition variable and notify all threads waiting on it
441 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
442 
443 	ConditionVariable* condition = team->debug_info.debugger_changed_condition;
444 	team->debug_info.debugger_changed_condition = NULL;
445 
446 	condition->NotifyAll();
447 }
448 
449 
450 void
451 user_debug_prepare_for_exec()
452 {
453 	Thread *thread = thread_get_current_thread();
454 	Team *team = thread->team;
455 
456 	// If a debugger is installed for the team and the thread debug stuff
457 	// initialized, change the ownership of the debug port for the thread
458 	// to the kernel team, since exec_team() deletes all ports owned by this
459 	// team. We change the ownership back later.
460 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
461 		// get the port
462 		port_id debugPort = -1;
463 
464 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
465 
466 		if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0)
467 			debugPort = thread->debug_info.debug_port;
468 
469 		threadDebugInfoLocker.Unlock();
470 
471 		// set the new port ownership
472 		if (debugPort >= 0)
473 			set_port_owner(debugPort, team_get_kernel_team_id());
474 	}
475 }
476 
477 
478 void
479 user_debug_finish_after_exec()
480 {
481 	Thread *thread = thread_get_current_thread();
482 	Team *team = thread->team;
483 
484 	// If a debugger is installed for the team and the thread debug stuff
485 	// initialized for this thread, change the ownership of its debug port
486 	// back to this team.
487 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
488 		// get the port
489 		port_id debugPort = -1;
490 
491 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
492 
493 		if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
494 			debugPort = thread->debug_info.debug_port;
495 
496 		threadDebugInfoLocker.Unlock();
497 
498 		// set the new port ownership
499 		if (debugPort >= 0)
500 			set_port_owner(debugPort, team->id);
501 	}
502 }
503 
504 
505 void
506 init_user_debug()
507 {
508 	#ifdef ARCH_INIT_USER_DEBUG
509 		ARCH_INIT_USER_DEBUG();
510 	#endif
511 }
512 
513 
514 static void
515 get_team_debug_info(team_debug_info &teamDebugInfo)
516 {
517 	Thread *thread = thread_get_current_thread();
518 
519 	cpu_status state = disable_interrupts();
520 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
521 
522 	memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info));
523 
524 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
525 	restore_interrupts(state);
526 }
527 
528 
529 static status_t
530 thread_hit_debug_event_internal(debug_debugger_message event,
531 	const void *message, int32 size, bool requireDebugger, bool &restart)
532 {
533 	restart = false;
534 	Thread *thread = thread_get_current_thread();
535 
536 	TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32
537 		", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event,
538 		message, size));
539 
540 	// check, if there's a debug port already
541 	bool setPort = !(atomic_get(&thread->debug_info.flags)
542 		& B_THREAD_DEBUG_INITIALIZED);
543 
544 	// create a port, if there is none yet
545 	port_id port = -1;
546 	if (setPort) {
547 		char nameBuffer[128];
548 		snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32,
549 			thread->id);
550 
551 		port = create_port(1, nameBuffer);
552 		if (port < 0) {
553 			dprintf("thread_hit_debug_event(): Failed to create debug port: "
554 				"%s\n", strerror(port));
555 			return port;
556 		}
557 	}
558 
559 	// check the debug info structures once more: get the debugger port, set
560 	// the thread's debug port, and update the thread's debug flags
561 	port_id deletePort = port;
562 	port_id debuggerPort = -1;
563 	port_id nubPort = -1;
564 	status_t error = B_OK;
565 	cpu_status state = disable_interrupts();
566 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
567 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
568 
569 	uint32 threadFlags = thread->debug_info.flags;
570 	threadFlags &= ~B_THREAD_DEBUG_STOP;
571 	bool debuggerInstalled
572 		= (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED);
573 	if (thread->id == thread->team->debug_info.nub_thread) {
574 		// Ugh, we're the nub thread. We shouldn't be here.
575 		TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32
576 			"\n", thread->id));
577 
578 		error = B_ERROR;
579 	} else if (debuggerInstalled || !requireDebugger) {
580 		if (debuggerInstalled) {
581 			debuggerPort = thread->team->debug_info.debugger_port;
582 			nubPort = thread->team->debug_info.nub_port;
583 		}
584 
585 		if (setPort) {
586 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
587 				// someone created a port for us (the port we've created will
588 				// be deleted below)
589 				port = thread->debug_info.debug_port;
590 			} else {
591 				thread->debug_info.debug_port = port;
592 				deletePort = -1;	// keep the port
593 				threadFlags |= B_THREAD_DEBUG_INITIALIZED;
594 			}
595 		} else {
596 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
597 				port = thread->debug_info.debug_port;
598 			} else {
599 				// someone deleted our port
600 				error = B_ERROR;
601 			}
602 		}
603 	} else
604 		error = B_ERROR;
605 
606 	// update the flags
607 	if (error == B_OK)
608 		threadFlags |= B_THREAD_DEBUG_STOPPED;
609 	atomic_set(&thread->debug_info.flags, threadFlags);
610 
611 	update_thread_user_debug_flag(thread);
612 
613 	threadDebugInfoLocker.Unlock();
614 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
615 	restore_interrupts(state);
616 
617 	// delete the superfluous port
618 	if (deletePort >= 0)
619 		delete_port(deletePort);
620 
621 	if (error != B_OK) {
622 		TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: "
623 			"%" B_PRIx32 "\n", thread->id, error));
624 		return error;
625 	}
626 
627 	// send a message to the debugger port
628 	if (debuggerInstalled) {
629 		// update the message's origin info first
630 		debug_origin *origin = (debug_origin *)message;
631 		origin->thread = thread->id;
632 		origin->team = thread->team->id;
633 		origin->nub_port = nubPort;
634 
635 		TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending "
636 			"message to debugger port %" B_PRId32 "\n", thread->id,
637 			debuggerPort));
638 
639 		error = debugger_write(debuggerPort, event, message, size, false);
640 	}
641 
642 	status_t result = B_THREAD_DEBUG_HANDLE_EVENT;
643 	bool singleStep = false;
644 
645 	if (error == B_OK) {
646 		bool done = false;
647 		while (!done) {
648 			// read a command from the debug port
649 			int32 command;
650 			debugged_thread_message_data commandMessage;
651 			ssize_t commandMessageSize = read_port_etc(port, &command,
652 				&commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT,
653 				0);
654 
655 			if (commandMessageSize < 0) {
656 				error = commandMessageSize;
657 				TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed "
658 					"to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n",
659 					thread->id, port, error));
660 				break;
661 			}
662 
663 			switch (command) {
664 				case B_DEBUGGED_THREAD_MESSAGE_CONTINUE:
665 					TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": "
666 						"B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n",
667 						thread->id));
668 					result = commandMessage.continue_thread.handle_event;
669 
670 					singleStep = commandMessage.continue_thread.single_step;
671 					done = true;
672 					break;
673 
674 				case B_DEBUGGED_THREAD_SET_CPU_STATE:
675 				{
676 					TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": "
677 						"B_DEBUGGED_THREAD_SET_CPU_STATE\n",
678 						thread->id));
679 					arch_set_debug_cpu_state(
680 						&commandMessage.set_cpu_state.cpu_state);
681 
682 					break;
683 				}
684 
685 				case B_DEBUGGED_THREAD_GET_CPU_STATE:
686 				{
687 					port_id replyPort = commandMessage.get_cpu_state.reply_port;
688 
689 					// prepare the message
690 					debug_nub_get_cpu_state_reply replyMessage;
691 					replyMessage.error = B_OK;
692 					replyMessage.message = event;
693 					arch_get_debug_cpu_state(&replyMessage.cpu_state);
694 
695 					// send it
696 					error = kill_interruptable_write_port(replyPort, event,
697 						&replyMessage, sizeof(replyMessage));
698 
699 					break;
700 				}
701 
702 				case B_DEBUGGED_THREAD_DEBUGGER_CHANGED:
703 				{
704 					// Check, if the debugger really changed, i.e. is different
705 					// than the one we know.
706 					team_debug_info teamDebugInfo;
707 					get_team_debug_info(teamDebugInfo);
708 
709 					if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
710 						if (!debuggerInstalled
711 							|| teamDebugInfo.debugger_port != debuggerPort) {
712 							// debugger was installed or has changed: restart
713 							// this function
714 							restart = true;
715 							done = true;
716 						}
717 					} else {
718 						if (debuggerInstalled) {
719 							// debugger is gone: continue the thread normally
720 							done = true;
721 						}
722 					}
723 
724 					break;
725 				}
726 			}
727 		}
728 	} else {
729 		TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send "
730 			"message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n",
731 			thread->id, debuggerPort, error));
732 	}
733 
734 	// update the thread debug info
735 	bool destroyThreadInfo = false;
736 	thread_debug_info threadDebugInfo;
737 
738 	state = disable_interrupts();
739 	threadDebugInfoLocker.Lock();
740 
741 	// check, if the team is still being debugged
742 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
743 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
744 		// update the single-step flag
745 		if (singleStep) {
746 			atomic_or(&thread->debug_info.flags,
747 				B_THREAD_DEBUG_SINGLE_STEP);
748 			atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP);
749 		} else {
750 			atomic_and(&thread->debug_info.flags,
751 				~(int32)B_THREAD_DEBUG_SINGLE_STEP);
752 		}
753 
754 		// unset the "stopped" state
755 		atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED);
756 
757 		update_thread_user_debug_flag(thread);
758 
759 	} else {
760 		// the debugger is gone: cleanup our info completely
761 		threadDebugInfo = thread->debug_info;
762 		clear_thread_debug_info(&thread->debug_info, false);
763 		destroyThreadInfo = true;
764 	}
765 
766 	threadDebugInfoLocker.Unlock();
767 	restore_interrupts(state);
768 
769 	// enable/disable single stepping
770 	arch_update_thread_single_step();
771 
772 	if (destroyThreadInfo)
773 		destroy_thread_debug_info(&threadDebugInfo);
774 
775 	return (error == B_OK ? result : error);
776 }
777 
778 
779 static status_t
780 thread_hit_debug_event(debug_debugger_message event, const void *message,
781 	int32 size, bool requireDebugger)
782 {
783 	status_t result;
784 	bool restart;
785 	do {
786 		restart = false;
787 		result = thread_hit_debug_event_internal(event, message, size,
788 			requireDebugger, restart);
789 	} while (result >= 0 && restart);
790 
791 	// Prepare to continue -- we install a debugger change condition, so no one
792 	// will change the debugger while we're playing with the breakpoint manager.
793 	// TODO: Maybe better use ref-counting and a flag in the breakpoint manager.
794 	Team* team = thread_get_current_thread()->team;
795 	ConditionVariable debugChangeCondition;
796 	debugChangeCondition.Init(team, "debug change condition");
797 	prepare_debugger_change(team, debugChangeCondition);
798 
799 	if (team->debug_info.breakpoint_manager != NULL) {
800 		bool isSyscall;
801 		void* pc = arch_debug_get_interrupt_pc(&isSyscall);
802 		if (pc != NULL && !isSyscall)
803 			team->debug_info.breakpoint_manager->PrepareToContinue(pc);
804 	}
805 
806 	finish_debugger_change(team);
807 
808 	return result;
809 }
810 
811 
812 static status_t
813 thread_hit_serious_debug_event(debug_debugger_message event,
814 	const void *message, int32 messageSize)
815 {
816 	// ensure that a debugger is installed for this team
817 	status_t error = ensure_debugger_installed();
818 	if (error != B_OK) {
819 		Thread *thread = thread_get_current_thread();
820 		dprintf("thread_hit_serious_debug_event(): Failed to install debugger: "
821 			"thread: %" B_PRId32 " (%s): %s\n", thread->id, thread->name,
822 			strerror(error));
823 		return error;
824 	}
825 
826 	// enter the debug loop
827 	return thread_hit_debug_event(event, message, messageSize, true);
828 }
829 
830 
831 void
832 user_debug_pre_syscall(uint32 syscall, void *args)
833 {
834 	// check whether a debugger is installed
835 	Thread *thread = thread_get_current_thread();
836 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
837 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
838 		return;
839 
840 	// check whether pre-syscall tracing is enabled for team or thread
841 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
842 	if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL)
843 			&& !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) {
844 		return;
845 	}
846 
847 	// prepare the message
848 	debug_pre_syscall message;
849 	message.syscall = syscall;
850 
851 	// copy the syscall args
852 	if (syscall < (uint32)kSyscallCount) {
853 		if (kSyscallInfos[syscall].parameter_size > 0)
854 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
855 	}
856 
857 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message,
858 		sizeof(message), true);
859 }
860 
861 
862 void
863 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue,
864 	bigtime_t startTime)
865 {
866 	// check whether a debugger is installed
867 	Thread *thread = thread_get_current_thread();
868 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
869 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
870 		return;
871 
872 	// check whether post-syscall tracing is enabled for team or thread
873 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
874 	if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL)
875 			&& !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) {
876 		return;
877 	}
878 
879 	// prepare the message
880 	debug_post_syscall message;
881 	message.start_time = startTime;
882 	message.end_time = system_time();
883 	message.return_value = returnValue;
884 	message.syscall = syscall;
885 
886 	// copy the syscall args
887 	if (syscall < (uint32)kSyscallCount) {
888 		if (kSyscallInfos[syscall].parameter_size > 0)
889 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
890 	}
891 
892 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message,
893 		sizeof(message), true);
894 }
895 
896 
897 /**	\brief To be called when an unhandled processor exception (error/fault)
898  *		   occurred.
899  *	\param exception The debug_why_stopped value identifying the kind of fault.
900  *	\param signal The signal corresponding to the exception.
901  *	\return \c true, if the caller shall continue normally, i.e. usually send
902  *			a deadly signal. \c false, if the debugger insists to continue the
903  *			program (e.g. because it has solved the removed the cause of the
904  *			problem).
905  */
906 bool
907 user_debug_exception_occurred(debug_exception_type exception, int signal)
908 {
909 	// First check whether there's a signal handler installed for the signal.
910 	// If so, we don't want to install a debugger for the team. We always send
911 	// the signal instead. An already installed debugger will be notified, if
912 	// it has requested notifications of signal.
913 	struct sigaction signalAction;
914 	if (sigaction(signal, NULL, &signalAction) == 0
915 		&& signalAction.sa_handler != SIG_DFL) {
916 		return true;
917 	}
918 
919 	// prepare the message
920 	debug_exception_occurred message;
921 	message.exception = exception;
922 	message.signal = signal;
923 
924 	status_t result = thread_hit_serious_debug_event(
925 		B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message));
926 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
927 }
928 
929 
930 bool
931 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly)
932 {
933 	// check, if a debugger is installed and is interested in signals
934 	Thread *thread = thread_get_current_thread();
935 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
936 	if (~teamDebugFlags
937 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) {
938 		return true;
939 	}
940 
941 	// prepare the message
942 	debug_signal_received message;
943 	message.signal = signal;
944 	message.handler = *handler;
945 	message.deadly = deadly;
946 
947 	status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED,
948 		&message, sizeof(message), true);
949 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
950 }
951 
952 
953 void
954 user_debug_stop_thread()
955 {
956 	// check whether this is actually an emulated single-step notification
957 	Thread* thread = thread_get_current_thread();
958 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
959 
960 	bool singleStepped = false;
961 	if ((atomic_and(&thread->debug_info.flags,
962 				~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP)
963 			& B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) {
964 		singleStepped = true;
965 	}
966 
967 	threadDebugInfoLocker.Unlock();
968 
969 	if (singleStepped) {
970 		user_debug_single_stepped();
971 	} else {
972 		debug_thread_debugged message;
973 		thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED,
974 			&message, sizeof(message));
975 	}
976 }
977 
978 
979 void
980 user_debug_team_created(team_id teamID)
981 {
982 	// check, if a debugger is installed and is interested in team creation
983 	// events
984 	Thread *thread = thread_get_current_thread();
985 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
986 	if (~teamDebugFlags
987 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
988 		return;
989 	}
990 
991 	// prepare the message
992 	debug_team_created message;
993 	message.new_team = teamID;
994 
995 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message,
996 		sizeof(message), true);
997 }
998 
999 
1000 void
1001 user_debug_team_deleted(team_id teamID, port_id debuggerPort)
1002 {
1003 	if (debuggerPort >= 0) {
1004 		TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: "
1005 			"%" B_PRId32 ")\n", teamID, debuggerPort));
1006 
1007 		debug_team_deleted message;
1008 		message.origin.thread = -1;
1009 		message.origin.team = teamID;
1010 		message.origin.nub_port = -1;
1011 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message,
1012 			sizeof(message), B_RELATIVE_TIMEOUT, 0);
1013 	}
1014 }
1015 
1016 
1017 void
1018 user_debug_team_exec()
1019 {
1020 	// check, if a debugger is installed and is interested in team creation
1021 	// events
1022 	Thread *thread = thread_get_current_thread();
1023 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1024 	if (~teamDebugFlags
1025 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
1026 		return;
1027 	}
1028 
1029 	// prepare the message
1030 	debug_team_exec message;
1031 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1032 		+ 1;
1033 
1034 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message,
1035 		sizeof(message), true);
1036 }
1037 
1038 
1039 /*!	Called by a new userland thread to update the debugging related flags of
1040 	\c Thread::flags before the thread first enters userland.
1041 	\param thread The calling thread.
1042 */
1043 void
1044 user_debug_update_new_thread_flags(Thread* thread)
1045 {
1046 	// lock it and update it's flags
1047 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1048 
1049 	update_thread_user_debug_flag(thread);
1050 	update_thread_breakpoints_flag(thread);
1051 	update_thread_debugger_installed_flag(thread);
1052 }
1053 
1054 
1055 void
1056 user_debug_thread_created(thread_id threadID)
1057 {
1058 	// check, if a debugger is installed and is interested in thread events
1059 	Thread *thread = thread_get_current_thread();
1060 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1061 	if (~teamDebugFlags
1062 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1063 		return;
1064 	}
1065 
1066 	// prepare the message
1067 	debug_thread_created message;
1068 	message.new_thread = threadID;
1069 
1070 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message,
1071 		sizeof(message), true);
1072 }
1073 
1074 
1075 void
1076 user_debug_thread_deleted(team_id teamID, thread_id threadID)
1077 {
1078 	// Things are a bit complicated here, since this thread no longer belongs to
1079 	// the debugged team (but to the kernel). So we can't use debugger_write().
1080 
1081 	// get the team debug flags and debugger port
1082 	Team* team = Team::Get(teamID);
1083 	if (team == NULL)
1084 		return;
1085 	BReference<Team> teamReference(team, true);
1086 
1087 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1088 
1089 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1090 	port_id debuggerPort = team->debug_info.debugger_port;
1091 	sem_id writeLock = team->debug_info.debugger_write_lock;
1092 
1093 	debugInfoLocker.Unlock();
1094 
1095 	// check, if a debugger is installed and is interested in thread events
1096 	if (~teamDebugFlags
1097 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1098 		return;
1099 	}
1100 
1101 	// acquire the debugger write lock
1102 	status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0);
1103 	if (error != B_OK)
1104 		return;
1105 
1106 	// re-get the team debug info -- we need to check whether anything changed
1107 	debugInfoLocker.Lock();
1108 
1109 	teamDebugFlags = atomic_get(&team->debug_info.flags);
1110 	port_id newDebuggerPort = team->debug_info.debugger_port;
1111 
1112 	debugInfoLocker.Unlock();
1113 
1114 	// Send the message only if the debugger hasn't changed in the meantime or
1115 	// the team is about to be handed over.
1116 	if (newDebuggerPort == debuggerPort
1117 		|| (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) {
1118 		debug_thread_deleted message;
1119 		message.origin.thread = threadID;
1120 		message.origin.team = teamID;
1121 		message.origin.nub_port = -1;
1122 
1123 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED,
1124 			&message, sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1125 	}
1126 
1127 	// release the debugger write lock
1128 	release_sem(writeLock);
1129 }
1130 
1131 
1132 /*!	Called for a thread that is about to die, cleaning up all user debug
1133 	facilities installed for the thread.
1134 	\param thread The current thread, the one that is going to die.
1135 */
1136 void
1137 user_debug_thread_exiting(Thread* thread)
1138 {
1139 	// thread is the current thread, so using team is safe
1140 	Team* team = thread->team;
1141 
1142 	InterruptsLocker interruptsLocker;
1143 
1144 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1145 
1146 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1147 	port_id debuggerPort = team->debug_info.debugger_port;
1148 
1149 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1150 
1151 	// check, if a debugger is installed
1152 	if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0
1153 		|| debuggerPort < 0) {
1154 		return;
1155 	}
1156 
1157 	// detach the profile info and mark the thread dying
1158 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1159 
1160 	thread_debug_info& threadDebugInfo = thread->debug_info;
1161 	if (threadDebugInfo.profile.samples == NULL)
1162 		return;
1163 
1164 	area_id sampleArea = threadDebugInfo.profile.sample_area;
1165 	int32 sampleCount = threadDebugInfo.profile.sample_count;
1166 	int32 droppedTicks = threadDebugInfo.profile.dropped_ticks;
1167 	int32 stackDepth = threadDebugInfo.profile.stack_depth;
1168 	bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth;
1169 	int32 imageEvent = threadDebugInfo.profile.image_event;
1170 	threadDebugInfo.profile.sample_area = -1;
1171 	threadDebugInfo.profile.samples = NULL;
1172 	threadDebugInfo.profile.buffer_full = false;
1173 
1174 	atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
1175 
1176 	threadDebugInfoLocker.Unlock();
1177 	interruptsLocker.Unlock();
1178 
1179 	// notify the debugger
1180 	debug_profiler_update message;
1181 	message.origin.thread = thread->id;
1182 	message.origin.team = thread->team->id;
1183 	message.origin.nub_port = -1;	// asynchronous message
1184 	message.sample_count = sampleCount;
1185 	message.dropped_ticks = droppedTicks;
1186 	message.stack_depth = stackDepth;
1187 	message.variable_stack_depth = variableStackDepth;
1188 	message.image_event = imageEvent;
1189 	message.stopped = true;
1190 	debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE,
1191 		&message, sizeof(message), false);
1192 
1193 	if (sampleArea >= 0) {
1194 		area_info areaInfo;
1195 		if (get_area_info(sampleArea, &areaInfo) == B_OK) {
1196 			unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
1197 			delete_area(sampleArea);
1198 		}
1199 	}
1200 }
1201 
1202 
1203 void
1204 user_debug_image_created(const image_info *imageInfo)
1205 {
1206 	// check, if a debugger is installed and is interested in image events
1207 	Thread *thread = thread_get_current_thread();
1208 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1209 	if (~teamDebugFlags
1210 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1211 		return;
1212 	}
1213 
1214 	// prepare the message
1215 	debug_image_created message;
1216 	memcpy(&message.info, imageInfo, sizeof(image_info));
1217 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1218 		+ 1;
1219 
1220 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
1221 		sizeof(message), true);
1222 }
1223 
1224 
1225 void
1226 user_debug_image_deleted(const image_info *imageInfo)
1227 {
1228 	// check, if a debugger is installed and is interested in image events
1229 	Thread *thread = thread_get_current_thread();
1230 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1231 	if (~teamDebugFlags
1232 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1233 		return;
1234 	}
1235 
1236 	// prepare the message
1237 	debug_image_deleted message;
1238 	memcpy(&message.info, imageInfo, sizeof(image_info));
1239 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1240 		+ 1;
1241 
1242 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message,
1243 		sizeof(message), true);
1244 }
1245 
1246 
1247 void
1248 user_debug_breakpoint_hit(bool software)
1249 {
1250 	// prepare the message
1251 	debug_breakpoint_hit message;
1252 	arch_get_debug_cpu_state(&message.cpu_state);
1253 
1254 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message,
1255 		sizeof(message));
1256 }
1257 
1258 
1259 void
1260 user_debug_watchpoint_hit()
1261 {
1262 	// prepare the message
1263 	debug_watchpoint_hit message;
1264 	arch_get_debug_cpu_state(&message.cpu_state);
1265 
1266 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message,
1267 		sizeof(message));
1268 }
1269 
1270 
1271 void
1272 user_debug_single_stepped()
1273 {
1274 	// clear the single-step thread flag
1275 	Thread* thread = thread_get_current_thread();
1276 	atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP);
1277 
1278 	// prepare the message
1279 	debug_single_step message;
1280 	arch_get_debug_cpu_state(&message.cpu_state);
1281 
1282 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message,
1283 		sizeof(message));
1284 }
1285 
1286 
1287 /*!	Schedules the profiling timer for the current thread.
1288 	The caller must hold the thread's debug info lock.
1289 	\param thread The current thread.
1290 	\param interval The time after which the timer should fire.
1291 */
1292 static void
1293 schedule_profiling_timer(Thread* thread, bigtime_t interval)
1294 {
1295 	struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num];
1296 	thread->debug_info.profile.installed_timer = timer;
1297 	thread->debug_info.profile.timer_end = system_time() + interval;
1298 	add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER);
1299 }
1300 
1301 
1302 /*!	Samples the current thread's instruction pointer/stack trace.
1303 	The caller must hold the current thread's debug info lock.
1304 	\param flushBuffer Return parameter: Set to \c true when the sampling
1305 		buffer must be flushed.
1306 */
1307 static bool
1308 profiling_do_sample(bool& flushBuffer)
1309 {
1310 	Thread* thread = thread_get_current_thread();
1311 	thread_debug_info& debugInfo = thread->debug_info;
1312 
1313 	if (debugInfo.profile.samples == NULL)
1314 		return false;
1315 
1316 	// Check, whether the buffer is full or an image event occurred since the
1317 	// last sample was taken.
1318 	int32 maxSamples = debugInfo.profile.max_samples;
1319 	int32 sampleCount = debugInfo.profile.sample_count;
1320 	int32 stackDepth = debugInfo.profile.stack_depth;
1321 	int32 imageEvent = thread->team->debug_info.image_event;
1322 	if (debugInfo.profile.sample_count > 0) {
1323 		if (debugInfo.profile.last_image_event < imageEvent
1324 			&& debugInfo.profile.variable_stack_depth
1325 			&& sampleCount + 2 <= maxSamples) {
1326 			// an image event occurred, but we use variable stack depth and
1327 			// have enough room in the buffer to indicate an image event
1328 			addr_t* event = debugInfo.profile.samples + sampleCount;
1329 			event[0] = B_DEBUG_PROFILE_IMAGE_EVENT;
1330 			event[1] = imageEvent;
1331 			sampleCount += 2;
1332 			debugInfo.profile.sample_count = sampleCount;
1333 			debugInfo.profile.last_image_event = imageEvent;
1334 		}
1335 
1336 		if (debugInfo.profile.last_image_event < imageEvent
1337 			|| debugInfo.profile.flush_threshold - sampleCount < stackDepth) {
1338 			if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) {
1339 				flushBuffer = true;
1340 				return true;
1341 			}
1342 
1343 			// We can't flush the buffer now, since we interrupted a kernel
1344 			// function. If the buffer is not full yet, we add the samples,
1345 			// otherwise we have to drop them.
1346 			if (maxSamples - sampleCount < stackDepth) {
1347 				debugInfo.profile.dropped_ticks++;
1348 				return true;
1349 			}
1350 		}
1351 	} else {
1352 		// first sample -- set the image event
1353 		debugInfo.profile.image_event = imageEvent;
1354 		debugInfo.profile.last_image_event = imageEvent;
1355 	}
1356 
1357 	// get the samples
1358 	addr_t* returnAddresses = debugInfo.profile.samples
1359 		+ debugInfo.profile.sample_count;
1360 	if (debugInfo.profile.variable_stack_depth) {
1361 		// variable sample count per hit
1362 		*returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1,
1363 			stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1364 
1365 		debugInfo.profile.sample_count += *returnAddresses + 1;
1366 	} else {
1367 		// fixed sample count per hit
1368 		if (stackDepth > 1) {
1369 			int32 count = arch_debug_get_stack_trace(returnAddresses,
1370 				stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1371 
1372 			for (int32 i = count; i < stackDepth; i++)
1373 				returnAddresses[i] = 0;
1374 		} else
1375 			*returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL);
1376 
1377 		debugInfo.profile.sample_count += stackDepth;
1378 	}
1379 
1380 	return true;
1381 }
1382 
1383 
1384 static void
1385 profiling_buffer_full(void*)
1386 {
1387 	// It is undefined whether the function is called with interrupts enabled
1388 	// or disabled. We are allowed to enable interrupts, though. First make
1389 	// sure interrupts are disabled.
1390 	disable_interrupts();
1391 
1392 	Thread* thread = thread_get_current_thread();
1393 	thread_debug_info& debugInfo = thread->debug_info;
1394 
1395 	SpinLocker threadDebugInfoLocker(debugInfo.lock);
1396 
1397 	if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
1398 		int32 sampleCount = debugInfo.profile.sample_count;
1399 		int32 droppedTicks = debugInfo.profile.dropped_ticks;
1400 		int32 stackDepth = debugInfo.profile.stack_depth;
1401 		bool variableStackDepth = debugInfo.profile.variable_stack_depth;
1402 		int32 imageEvent = debugInfo.profile.image_event;
1403 
1404 		// notify the debugger
1405 		debugInfo.profile.sample_count = 0;
1406 		debugInfo.profile.dropped_ticks = 0;
1407 
1408 		threadDebugInfoLocker.Unlock();
1409 		enable_interrupts();
1410 
1411 		// prepare the message
1412 		debug_profiler_update message;
1413 		message.sample_count = sampleCount;
1414 		message.dropped_ticks = droppedTicks;
1415 		message.stack_depth = stackDepth;
1416 		message.variable_stack_depth = variableStackDepth;
1417 		message.image_event = imageEvent;
1418 		message.stopped = false;
1419 
1420 		thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message,
1421 			sizeof(message), false);
1422 
1423 		disable_interrupts();
1424 		threadDebugInfoLocker.Lock();
1425 
1426 		// do the sampling and reschedule timer, if still profiling this thread
1427 		bool flushBuffer;
1428 		if (profiling_do_sample(flushBuffer)) {
1429 			debugInfo.profile.buffer_full = false;
1430 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1431 		}
1432 	}
1433 
1434 	threadDebugInfoLocker.Unlock();
1435 	enable_interrupts();
1436 }
1437 
1438 
1439 /*!	Profiling timer event callback.
1440 	Called with interrupts disabled.
1441 */
1442 static int32
1443 profiling_event(timer* /*unused*/)
1444 {
1445 	Thread* thread = thread_get_current_thread();
1446 	thread_debug_info& debugInfo = thread->debug_info;
1447 
1448 	SpinLocker threadDebugInfoLocker(debugInfo.lock);
1449 
1450 	bool flushBuffer = false;
1451 	if (profiling_do_sample(flushBuffer)) {
1452 		if (flushBuffer) {
1453 			// The sample buffer needs to be flushed; we'll have to notify the
1454 			// debugger. We can't do that right here. Instead we set a post
1455 			// interrupt callback doing that for us, and don't reschedule the
1456 			// timer yet.
1457 			thread->post_interrupt_callback = profiling_buffer_full;
1458 			debugInfo.profile.installed_timer = NULL;
1459 			debugInfo.profile.buffer_full = true;
1460 		} else
1461 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1462 	} else
1463 		debugInfo.profile.installed_timer = NULL;
1464 
1465 	return B_HANDLED_INTERRUPT;
1466 }
1467 
1468 
1469 /*!	Called by the scheduler when a debugged thread has been unscheduled.
1470 	The scheduler lock is being held.
1471 */
1472 void
1473 user_debug_thread_unscheduled(Thread* thread)
1474 {
1475 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1476 
1477 	// if running, cancel the profiling timer
1478 	struct timer* timer = thread->debug_info.profile.installed_timer;
1479 	if (timer != NULL) {
1480 		// track remaining time
1481 		bigtime_t left = thread->debug_info.profile.timer_end - system_time();
1482 		thread->debug_info.profile.interval_left = max_c(left, 0);
1483 		thread->debug_info.profile.installed_timer = NULL;
1484 
1485 		// cancel timer
1486 		threadDebugInfoLocker.Unlock();
1487 			// not necessary, but doesn't harm and reduces contention
1488 		cancel_timer(timer);
1489 			// since invoked on the same CPU, this will not possibly wait for
1490 			// an already called timer hook
1491 	}
1492 }
1493 
1494 
1495 /*!	Called by the scheduler when a debugged thread has been scheduled.
1496 	The scheduler lock is being held.
1497 */
1498 void
1499 user_debug_thread_scheduled(Thread* thread)
1500 {
1501 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1502 
1503 	if (thread->debug_info.profile.samples != NULL
1504 		&& !thread->debug_info.profile.buffer_full) {
1505 		// install profiling timer
1506 		schedule_profiling_timer(thread,
1507 			thread->debug_info.profile.interval_left);
1508 	}
1509 }
1510 
1511 
1512 /*!	\brief Called by the debug nub thread of a team to broadcast a message to
1513 		all threads of the team that are initialized for debugging (and
1514 		thus have a debug port).
1515 */
1516 static void
1517 broadcast_debugged_thread_message(Thread *nubThread, int32 code,
1518 	const void *message, int32 size)
1519 {
1520 	// iterate through the threads
1521 	thread_info threadInfo;
1522 	int32 cookie = 0;
1523 	while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo)
1524 			== B_OK) {
1525 		// get the thread and lock it
1526 		Thread* thread = Thread::GetAndLock(threadInfo.thread);
1527 		if (thread == NULL)
1528 			continue;
1529 
1530 		BReference<Thread> threadReference(thread, true);
1531 		ThreadLocker threadLocker(thread, true);
1532 
1533 		// get the thread's debug port
1534 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1535 
1536 		port_id threadDebugPort = -1;
1537 		if (thread && thread != nubThread && thread->team == nubThread->team
1538 			&& (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0
1539 			&& (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) {
1540 			threadDebugPort = thread->debug_info.debug_port;
1541 		}
1542 
1543 		threadDebugInfoLocker.Unlock();
1544 		threadLocker.Unlock();
1545 
1546 		// send the message to the thread
1547 		if (threadDebugPort >= 0) {
1548 			status_t error = kill_interruptable_write_port(threadDebugPort,
1549 				code, message, size);
1550 			if (error != B_OK) {
1551 				TRACE(("broadcast_debugged_thread_message(): Failed to send "
1552 					"message to thread %" B_PRId32 ": %" B_PRIx32 "\n",
1553 					thread->id, error));
1554 			}
1555 		}
1556 	}
1557 }
1558 
1559 
1560 static void
1561 nub_thread_cleanup(Thread *nubThread)
1562 {
1563 	TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n",
1564 		nubThread->id, nubThread->team->debug_info.debugger_port));
1565 
1566 	ConditionVariable debugChangeCondition;
1567 	debugChangeCondition.Init(nubThread->team, "debug change condition");
1568 	prepare_debugger_change(nubThread->team, debugChangeCondition);
1569 
1570 	team_debug_info teamDebugInfo;
1571 	bool destroyDebugInfo = false;
1572 
1573 	TeamLocker teamLocker(nubThread->team);
1574 		// required by update_threads_debugger_installed_flag()
1575 
1576 	cpu_status state = disable_interrupts();
1577 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1578 
1579 	team_debug_info &info = nubThread->team->debug_info;
1580 	if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED
1581 		&& info.nub_thread == nubThread->id) {
1582 		teamDebugInfo = info;
1583 		clear_team_debug_info(&info, false);
1584 		destroyDebugInfo = true;
1585 	}
1586 
1587 	// update the thread::flags fields
1588 	update_threads_debugger_installed_flag(nubThread->team);
1589 
1590 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1591 	restore_interrupts(state);
1592 
1593 	teamLocker.Unlock();
1594 
1595 	if (destroyDebugInfo)
1596 		teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints();
1597 
1598 	finish_debugger_change(nubThread->team);
1599 
1600 	if (destroyDebugInfo)
1601 		destroy_team_debug_info(&teamDebugInfo);
1602 
1603 	// notify all threads that the debugger is gone
1604 	broadcast_debugged_thread_message(nubThread,
1605 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1606 }
1607 
1608 
1609 /**	\brief Debug nub thread helper function that returns the debug port of
1610  *		   a thread of the same team.
1611  */
1612 static status_t
1613 debug_nub_thread_get_thread_debug_port(Thread *nubThread,
1614 	thread_id threadID, port_id &threadDebugPort)
1615 {
1616 	threadDebugPort = -1;
1617 
1618 	// get the thread
1619 	Thread* thread = Thread::GetAndLock(threadID);
1620 	if (thread == NULL)
1621 		return B_BAD_THREAD_ID;
1622 	BReference<Thread> threadReference(thread, true);
1623 	ThreadLocker threadLocker(thread, true);
1624 
1625 	// get the debug port
1626 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1627 
1628 	if (thread->team != nubThread->team)
1629 		return B_BAD_VALUE;
1630 	if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0)
1631 		return B_BAD_THREAD_STATE;
1632 
1633 	threadDebugPort = thread->debug_info.debug_port;
1634 
1635 	threadDebugInfoLocker.Unlock();
1636 
1637 	if (threadDebugPort < 0)
1638 		return B_ERROR;
1639 
1640 	return B_OK;
1641 }
1642 
1643 
1644 static status_t
1645 debug_nub_thread(void *)
1646 {
1647 	Thread *nubThread = thread_get_current_thread();
1648 
1649 	// check, if we're still the current nub thread and get our port
1650 	cpu_status state = disable_interrupts();
1651 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1652 
1653 	if (nubThread->team->debug_info.nub_thread != nubThread->id) {
1654 		RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1655 		restore_interrupts(state);
1656 		return 0;
1657 	}
1658 
1659 	port_id port = nubThread->team->debug_info.nub_port;
1660 	sem_id writeLock = nubThread->team->debug_info.debugger_write_lock;
1661 	BreakpointManager* breakpointManager
1662 		= nubThread->team->debug_info.breakpoint_manager;
1663 
1664 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1665 	restore_interrupts(state);
1666 
1667 	TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub "
1668 		"port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port));
1669 
1670 	// notify all threads that a debugger has been installed
1671 	broadcast_debugged_thread_message(nubThread,
1672 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1673 
1674 	// command processing loop
1675 	while (true) {
1676 		int32 command;
1677 		debug_nub_message_data message;
1678 		ssize_t messageSize = read_port_etc(port, &command, &message,
1679 			sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1680 
1681 		if (messageSize < 0) {
1682 			// The port is no longer valid or we were interrupted by a kill
1683 			// signal: If we are still listed in the team's debug info as nub
1684 			// thread, we need to update that.
1685 			nub_thread_cleanup(nubThread);
1686 
1687 			TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n",
1688 				nubThread->id, messageSize));
1689 
1690 			return messageSize;
1691 		}
1692 
1693 		bool sendReply = false;
1694 		union {
1695 			debug_nub_read_memory_reply			read_memory;
1696 			debug_nub_write_memory_reply		write_memory;
1697 			debug_nub_get_cpu_state_reply		get_cpu_state;
1698 			debug_nub_set_breakpoint_reply		set_breakpoint;
1699 			debug_nub_set_watchpoint_reply		set_watchpoint;
1700 			debug_nub_get_signal_masks_reply	get_signal_masks;
1701 			debug_nub_get_signal_handler_reply	get_signal_handler;
1702 			debug_nub_start_profiler_reply		start_profiler;
1703 			debug_profiler_update				profiler_update;
1704 			debug_nub_write_core_file_reply		write_core_file;
1705 		} reply;
1706 		int32 replySize = 0;
1707 		port_id replyPort = -1;
1708 
1709 		// process the command
1710 		switch (command) {
1711 			case B_DEBUG_MESSAGE_READ_MEMORY:
1712 			{
1713 				// get the parameters
1714 				replyPort = message.read_memory.reply_port;
1715 				void *address = message.read_memory.address;
1716 				int32 size = message.read_memory.size;
1717 				status_t result = B_OK;
1718 
1719 				// check the parameters
1720 				if (!BreakpointManager::CanAccessAddress(address, false))
1721 					result = B_BAD_ADDRESS;
1722 				else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE)
1723 					result = B_BAD_VALUE;
1724 
1725 				// read the memory
1726 				size_t bytesRead = 0;
1727 				if (result == B_OK) {
1728 					result = breakpointManager->ReadMemory(address,
1729 						reply.read_memory.data, size, bytesRead);
1730 				}
1731 				reply.read_memory.error = result;
1732 
1733 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: "
1734 					"reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32
1735 					", result: %" B_PRIx32 ", read: %ld\n", nubThread->id,
1736 					replyPort, address, size, result, bytesRead));
1737 
1738 				// send only as much data as necessary
1739 				reply.read_memory.size = bytesRead;
1740 				replySize = reply.read_memory.data + bytesRead - (char*)&reply;
1741 				sendReply = true;
1742 				break;
1743 			}
1744 
1745 			case B_DEBUG_MESSAGE_WRITE_MEMORY:
1746 			{
1747 				// get the parameters
1748 				replyPort = message.write_memory.reply_port;
1749 				void *address = message.write_memory.address;
1750 				int32 size = message.write_memory.size;
1751 				const char *data = message.write_memory.data;
1752 				int32 realSize = (char*)&message + messageSize - data;
1753 				status_t result = B_OK;
1754 
1755 				// check the parameters
1756 				if (!BreakpointManager::CanAccessAddress(address, true))
1757 					result = B_BAD_ADDRESS;
1758 				else if (size <= 0 || size > realSize)
1759 					result = B_BAD_VALUE;
1760 
1761 				// write the memory
1762 				size_t bytesWritten = 0;
1763 				if (result == B_OK) {
1764 					result = breakpointManager->WriteMemory(address, data, size,
1765 						bytesWritten);
1766 				}
1767 				reply.write_memory.error = result;
1768 
1769 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: "
1770 					"reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32
1771 					", result: %" B_PRIx32 ", written: %ld\n", nubThread->id,
1772 					replyPort, address, size, result, bytesWritten));
1773 
1774 				reply.write_memory.size = bytesWritten;
1775 				sendReply = true;
1776 				replySize = sizeof(debug_nub_write_memory_reply);
1777 				break;
1778 			}
1779 
1780 			case B_DEBUG_MESSAGE_SET_TEAM_FLAGS:
1781 			{
1782 				// get the parameters
1783 				int32 flags = message.set_team_flags.flags
1784 					& B_TEAM_DEBUG_USER_FLAG_MASK;
1785 
1786 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS"
1787 					": flags: %" B_PRIx32 "\n", nubThread->id, flags));
1788 
1789 				Team *team = thread_get_current_thread()->team;
1790 
1791 				// set the flags
1792 				cpu_status state = disable_interrupts();
1793 				GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1794 
1795 				flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK;
1796 				atomic_set(&team->debug_info.flags, flags);
1797 
1798 				RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1799 				restore_interrupts(state);
1800 
1801 				break;
1802 			}
1803 
1804 			case B_DEBUG_MESSAGE_SET_THREAD_FLAGS:
1805 			{
1806 				// get the parameters
1807 				thread_id threadID = message.set_thread_flags.thread;
1808 				int32 flags = message.set_thread_flags.flags
1809 					& B_THREAD_DEBUG_USER_FLAG_MASK;
1810 
1811 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS"
1812 					": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n",
1813 					nubThread->id, threadID, flags));
1814 
1815 				// set the flags
1816 				Thread* thread = Thread::GetAndLock(threadID);
1817 				if (thread == NULL)
1818 					break;
1819 				BReference<Thread> threadReference(thread, true);
1820 				ThreadLocker threadLocker(thread, true);
1821 
1822 				InterruptsSpinLocker threadDebugInfoLocker(
1823 					thread->debug_info.lock);
1824 
1825 				if (thread->team == thread_get_current_thread()->team) {
1826 					flags |= thread->debug_info.flags
1827 						& B_THREAD_DEBUG_KERNEL_FLAG_MASK;
1828 					atomic_set(&thread->debug_info.flags, flags);
1829 				}
1830 
1831 				break;
1832 			}
1833 
1834 			case B_DEBUG_MESSAGE_CONTINUE_THREAD:
1835 			{
1836 				// get the parameters
1837 				thread_id threadID;
1838 				uint32 handleEvent;
1839 				bool singleStep;
1840 
1841 				threadID = message.continue_thread.thread;
1842 				handleEvent = message.continue_thread.handle_event;
1843 				singleStep = message.continue_thread.single_step;
1844 
1845 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD"
1846 					": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", "
1847 					"single step: %d\n", nubThread->id, threadID, handleEvent,
1848 					singleStep));
1849 
1850 				// find the thread and get its debug port
1851 				port_id threadDebugPort = -1;
1852 				status_t result = debug_nub_thread_get_thread_debug_port(
1853 					nubThread, threadID, threadDebugPort);
1854 
1855 				// send a message to the debugged thread
1856 				if (result == B_OK) {
1857 					debugged_thread_continue commandMessage;
1858 					commandMessage.handle_event = handleEvent;
1859 					commandMessage.single_step = singleStep;
1860 
1861 					result = write_port(threadDebugPort,
1862 						B_DEBUGGED_THREAD_MESSAGE_CONTINUE,
1863 						&commandMessage, sizeof(commandMessage));
1864 				} else if (result == B_BAD_THREAD_STATE) {
1865 					Thread* thread = Thread::GetAndLock(threadID);
1866 					if (thread == NULL)
1867 						break;
1868 
1869 					BReference<Thread> threadReference(thread, true);
1870 					ThreadLocker threadLocker(thread, true);
1871 					if (thread->state == B_THREAD_SUSPENDED) {
1872 						threadLocker.Unlock();
1873 						resume_thread(threadID);
1874 						break;
1875 					}
1876 				}
1877 
1878 				break;
1879 			}
1880 
1881 			case B_DEBUG_MESSAGE_SET_CPU_STATE:
1882 			{
1883 				// get the parameters
1884 				thread_id threadID = message.set_cpu_state.thread;
1885 				const debug_cpu_state &cpuState
1886 					= message.set_cpu_state.cpu_state;
1887 
1888 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE"
1889 					": thread: %" B_PRId32 "\n", nubThread->id, threadID));
1890 
1891 				// find the thread and get its debug port
1892 				port_id threadDebugPort = -1;
1893 				status_t result = debug_nub_thread_get_thread_debug_port(
1894 					nubThread, threadID, threadDebugPort);
1895 
1896 				// send a message to the debugged thread
1897 				if (result == B_OK) {
1898 					debugged_thread_set_cpu_state commandMessage;
1899 					memcpy(&commandMessage.cpu_state, &cpuState,
1900 						sizeof(debug_cpu_state));
1901 					write_port(threadDebugPort,
1902 						B_DEBUGGED_THREAD_SET_CPU_STATE,
1903 						&commandMessage, sizeof(commandMessage));
1904 				}
1905 
1906 				break;
1907 			}
1908 
1909 			case B_DEBUG_MESSAGE_GET_CPU_STATE:
1910 			{
1911 				// get the parameters
1912 				thread_id threadID = message.get_cpu_state.thread;
1913 				replyPort = message.get_cpu_state.reply_port;
1914 
1915 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE"
1916 					": thread: %" B_PRId32 "\n", nubThread->id, threadID));
1917 
1918 				// find the thread and get its debug port
1919 				port_id threadDebugPort = -1;
1920 				status_t result = debug_nub_thread_get_thread_debug_port(
1921 					nubThread, threadID, threadDebugPort);
1922 
1923 				// send a message to the debugged thread
1924 				if (threadDebugPort >= 0) {
1925 					debugged_thread_get_cpu_state commandMessage;
1926 					commandMessage.reply_port = replyPort;
1927 					result = write_port(threadDebugPort,
1928 						B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage,
1929 						sizeof(commandMessage));
1930 				}
1931 
1932 				// send a reply to the debugger in case of error
1933 				if (result != B_OK) {
1934 					reply.get_cpu_state.error = result;
1935 					sendReply = true;
1936 					replySize = sizeof(reply.get_cpu_state);
1937 				}
1938 
1939 				break;
1940 			}
1941 
1942 			case B_DEBUG_MESSAGE_SET_BREAKPOINT:
1943 			{
1944 				// get the parameters
1945 				replyPort = message.set_breakpoint.reply_port;
1946 				void *address = message.set_breakpoint.address;
1947 
1948 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT"
1949 					": address: %p\n", nubThread->id, address));
1950 
1951 				// check the address
1952 				status_t result = B_OK;
1953 				if (address == NULL
1954 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1955 					result = B_BAD_ADDRESS;
1956 				}
1957 
1958 				// set the breakpoint
1959 				if (result == B_OK)
1960 					result = breakpointManager->InstallBreakpoint(address);
1961 
1962 				if (result == B_OK)
1963 					update_threads_breakpoints_flag();
1964 
1965 				// prepare the reply
1966 				reply.set_breakpoint.error = result;
1967 				replySize = sizeof(reply.set_breakpoint);
1968 				sendReply = true;
1969 
1970 				break;
1971 			}
1972 
1973 			case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT:
1974 			{
1975 				// get the parameters
1976 				void *address = message.clear_breakpoint.address;
1977 
1978 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT"
1979 					": address: %p\n", nubThread->id, address));
1980 
1981 				// check the address
1982 				status_t result = B_OK;
1983 				if (address == NULL
1984 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1985 					result = B_BAD_ADDRESS;
1986 				}
1987 
1988 				// clear the breakpoint
1989 				if (result == B_OK)
1990 					result = breakpointManager->UninstallBreakpoint(address);
1991 
1992 				if (result == B_OK)
1993 					update_threads_breakpoints_flag();
1994 
1995 				break;
1996 			}
1997 
1998 			case B_DEBUG_MESSAGE_SET_WATCHPOINT:
1999 			{
2000 				// get the parameters
2001 				replyPort = message.set_watchpoint.reply_port;
2002 				void *address = message.set_watchpoint.address;
2003 				uint32 type = message.set_watchpoint.type;
2004 				int32 length = message.set_watchpoint.length;
2005 
2006 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT"
2007 					": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n",
2008 					nubThread->id, address, type, length));
2009 
2010 				// check the address and size
2011 				status_t result = B_OK;
2012 				if (address == NULL
2013 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2014 					result = B_BAD_ADDRESS;
2015 				}
2016 				if (length < 0)
2017 					result = B_BAD_VALUE;
2018 
2019 				// set the watchpoint
2020 				if (result == B_OK) {
2021 					result = breakpointManager->InstallWatchpoint(address, type,
2022 						length);
2023 				}
2024 
2025 				if (result == B_OK)
2026 					update_threads_breakpoints_flag();
2027 
2028 				// prepare the reply
2029 				reply.set_watchpoint.error = result;
2030 				replySize = sizeof(reply.set_watchpoint);
2031 				sendReply = true;
2032 
2033 				break;
2034 			}
2035 
2036 			case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT:
2037 			{
2038 				// get the parameters
2039 				void *address = message.clear_watchpoint.address;
2040 
2041 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT"
2042 					": address: %p\n", nubThread->id, address));
2043 
2044 				// check the address
2045 				status_t result = B_OK;
2046 				if (address == NULL
2047 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2048 					result = B_BAD_ADDRESS;
2049 				}
2050 
2051 				// clear the watchpoint
2052 				if (result == B_OK)
2053 					result = breakpointManager->UninstallWatchpoint(address);
2054 
2055 				if (result == B_OK)
2056 					update_threads_breakpoints_flag();
2057 
2058 				break;
2059 			}
2060 
2061 			case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS:
2062 			{
2063 				// get the parameters
2064 				thread_id threadID = message.set_signal_masks.thread;
2065 				uint64 ignore = message.set_signal_masks.ignore_mask;
2066 				uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask;
2067 				uint32 ignoreOp = message.set_signal_masks.ignore_op;
2068 				uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op;
2069 
2070 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS"
2071 					": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %"
2072 					B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32
2073 					")\n", nubThread->id, threadID, ignore, ignoreOp,
2074 					ignoreOnce, ignoreOnceOp));
2075 
2076 				// set the masks
2077 				Thread* thread = Thread::GetAndLock(threadID);
2078 				if (thread == NULL)
2079 					break;
2080 				BReference<Thread> threadReference(thread, true);
2081 				ThreadLocker threadLocker(thread, true);
2082 
2083 				InterruptsSpinLocker threadDebugInfoLocker(
2084 					thread->debug_info.lock);
2085 
2086 				if (thread->team == thread_get_current_thread()->team) {
2087 					thread_debug_info &threadDebugInfo = thread->debug_info;
2088 					// set ignore mask
2089 					switch (ignoreOp) {
2090 						case B_DEBUG_SIGNAL_MASK_AND:
2091 							threadDebugInfo.ignore_signals &= ignore;
2092 							break;
2093 						case B_DEBUG_SIGNAL_MASK_OR:
2094 							threadDebugInfo.ignore_signals |= ignore;
2095 							break;
2096 						case B_DEBUG_SIGNAL_MASK_SET:
2097 							threadDebugInfo.ignore_signals = ignore;
2098 							break;
2099 					}
2100 
2101 					// set ignore once mask
2102 					switch (ignoreOnceOp) {
2103 						case B_DEBUG_SIGNAL_MASK_AND:
2104 							threadDebugInfo.ignore_signals_once &= ignoreOnce;
2105 							break;
2106 						case B_DEBUG_SIGNAL_MASK_OR:
2107 							threadDebugInfo.ignore_signals_once |= ignoreOnce;
2108 							break;
2109 						case B_DEBUG_SIGNAL_MASK_SET:
2110 							threadDebugInfo.ignore_signals_once = ignoreOnce;
2111 							break;
2112 					}
2113 				}
2114 
2115 				break;
2116 			}
2117 
2118 			case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS:
2119 			{
2120 				// get the parameters
2121 				replyPort = message.get_signal_masks.reply_port;
2122 				thread_id threadID = message.get_signal_masks.thread;
2123 				status_t result = B_OK;
2124 
2125 				// get the masks
2126 				uint64 ignore = 0;
2127 				uint64 ignoreOnce = 0;
2128 
2129 				Thread* thread = Thread::GetAndLock(threadID);
2130 				if (thread != NULL) {
2131 					BReference<Thread> threadReference(thread, true);
2132 					ThreadLocker threadLocker(thread, true);
2133 
2134 					InterruptsSpinLocker threadDebugInfoLocker(
2135 						thread->debug_info.lock);
2136 
2137 					ignore = thread->debug_info.ignore_signals;
2138 					ignoreOnce = thread->debug_info.ignore_signals_once;
2139 				} else
2140 					result = B_BAD_THREAD_ID;
2141 
2142 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS"
2143 					": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", "
2144 					"ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: "
2145 					"%" B_PRIx32 "\n", nubThread->id, replyPort, threadID,
2146 					ignore, ignoreOnce, result));
2147 
2148 				// prepare the message
2149 				reply.get_signal_masks.error = result;
2150 				reply.get_signal_masks.ignore_mask = ignore;
2151 				reply.get_signal_masks.ignore_once_mask = ignoreOnce;
2152 				replySize = sizeof(reply.get_signal_masks);
2153 				sendReply = true;
2154 				break;
2155 			}
2156 
2157 			case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER:
2158 			{
2159 				// get the parameters
2160 				int signal = message.set_signal_handler.signal;
2161 				struct sigaction &handler = message.set_signal_handler.handler;
2162 
2163 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER"
2164 					": signal: %d, handler: %p\n", nubThread->id, signal,
2165 					handler.sa_handler));
2166 
2167 				// set the handler
2168 				sigaction(signal, &handler, NULL);
2169 
2170 				break;
2171 			}
2172 
2173 			case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER:
2174 			{
2175 				// get the parameters
2176 				replyPort = message.get_signal_handler.reply_port;
2177 				int signal = message.get_signal_handler.signal;
2178 				status_t result = B_OK;
2179 
2180 				// get the handler
2181 				if (sigaction(signal, NULL, &reply.get_signal_handler.handler)
2182 						!= 0) {
2183 					result = errno;
2184 				}
2185 
2186 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER"
2187 					": reply port: %" B_PRId32 ", signal: %d, handler: %p\n",
2188 					nubThread->id, replyPort, signal,
2189 					reply.get_signal_handler.handler.sa_handler));
2190 
2191 				// prepare the message
2192 				reply.get_signal_handler.error = result;
2193 				replySize = sizeof(reply.get_signal_handler);
2194 				sendReply = true;
2195 				break;
2196 			}
2197 
2198 			case B_DEBUG_MESSAGE_PREPARE_HANDOVER:
2199 			{
2200 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER"
2201 					"\n", nubThread->id));
2202 
2203 				Team *team = nubThread->team;
2204 
2205 				// Acquire the debugger write lock. As soon as we have it and
2206 				// have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread
2207 				// will write anything to the debugger port anymore.
2208 				status_t result = acquire_sem_etc(writeLock, 1,
2209 					B_KILL_CAN_INTERRUPT, 0);
2210 				if (result == B_OK) {
2211 					// set the respective team debug flag
2212 					cpu_status state = disable_interrupts();
2213 					GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2214 
2215 					atomic_or(&team->debug_info.flags,
2216 						B_TEAM_DEBUG_DEBUGGER_HANDOVER);
2217 					BreakpointManager* breakpointManager
2218 						= team->debug_info.breakpoint_manager;
2219 
2220 					RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2221 					restore_interrupts(state);
2222 
2223 					// remove all installed breakpoints
2224 					breakpointManager->RemoveAllBreakpoints();
2225 
2226 					release_sem(writeLock);
2227 				} else {
2228 					// We probably got a SIGKILL. If so, we will terminate when
2229 					// reading the next message fails.
2230 				}
2231 
2232 				break;
2233 			}
2234 
2235 			case B_DEBUG_MESSAGE_HANDED_OVER:
2236 			{
2237 				// notify all threads that the debugger has changed
2238 				broadcast_debugged_thread_message(nubThread,
2239 					B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
2240 
2241 				break;
2242 			}
2243 
2244 			case B_DEBUG_START_PROFILER:
2245 			{
2246 				// get the parameters
2247 				thread_id threadID = message.start_profiler.thread;
2248 				replyPort = message.start_profiler.reply_port;
2249 				area_id sampleArea = message.start_profiler.sample_area;
2250 				int32 stackDepth = message.start_profiler.stack_depth;
2251 				bool variableStackDepth
2252 					= message.start_profiler.variable_stack_depth;
2253 				bigtime_t interval = max_c(message.start_profiler.interval,
2254 					B_DEBUG_MIN_PROFILE_INTERVAL);
2255 				status_t result = B_OK;
2256 
2257 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: "
2258 					"thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n",
2259 					nubThread->id, threadID, sampleArea));
2260 
2261 				if (stackDepth < 1)
2262 					stackDepth = 1;
2263 				else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH)
2264 					stackDepth = B_DEBUG_STACK_TRACE_DEPTH;
2265 
2266 				// provision for an extra entry per hit (for the number of
2267 				// samples), if variable stack depth
2268 				if (variableStackDepth)
2269 					stackDepth++;
2270 
2271 				// clone the sample area
2272 				area_info areaInfo;
2273 				if (result == B_OK)
2274 					result = get_area_info(sampleArea, &areaInfo);
2275 
2276 				area_id clonedSampleArea = -1;
2277 				void* samples = NULL;
2278 				if (result == B_OK) {
2279 					clonedSampleArea = clone_area("profiling samples", &samples,
2280 						B_ANY_KERNEL_ADDRESS,
2281 						B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
2282 						sampleArea);
2283 					if (clonedSampleArea >= 0) {
2284 						// we need the memory locked
2285 						result = lock_memory(samples, areaInfo.size,
2286 							B_READ_DEVICE);
2287 						if (result != B_OK) {
2288 							delete_area(clonedSampleArea);
2289 							clonedSampleArea = -1;
2290 						}
2291 					} else
2292 						result = clonedSampleArea;
2293 				}
2294 
2295 				// get the thread and set the profile info
2296 				int32 imageEvent = nubThread->team->debug_info.image_event;
2297 				if (result == B_OK) {
2298 					Thread* thread = Thread::GetAndLock(threadID);
2299 					BReference<Thread> threadReference(thread, true);
2300 					ThreadLocker threadLocker(thread, true);
2301 
2302 					if (thread != NULL && thread->team == nubThread->team) {
2303 						thread_debug_info &threadDebugInfo = thread->debug_info;
2304 
2305 						InterruptsSpinLocker threadDebugInfoLocker(
2306 							threadDebugInfo.lock);
2307 
2308 						if (threadDebugInfo.profile.samples == NULL) {
2309 							threadDebugInfo.profile.interval = interval;
2310 							threadDebugInfo.profile.sample_area
2311 								= clonedSampleArea;
2312 							threadDebugInfo.profile.samples = (addr_t*)samples;
2313 							threadDebugInfo.profile.max_samples
2314 								= areaInfo.size / sizeof(addr_t);
2315 							threadDebugInfo.profile.flush_threshold
2316 								= threadDebugInfo.profile.max_samples
2317 									* B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD
2318 									/ 100;
2319 							threadDebugInfo.profile.sample_count = 0;
2320 							threadDebugInfo.profile.dropped_ticks = 0;
2321 							threadDebugInfo.profile.stack_depth = stackDepth;
2322 							threadDebugInfo.profile.variable_stack_depth
2323 								= variableStackDepth;
2324 							threadDebugInfo.profile.buffer_full = false;
2325 							threadDebugInfo.profile.interval_left = interval;
2326 							threadDebugInfo.profile.installed_timer = NULL;
2327 							threadDebugInfo.profile.image_event = imageEvent;
2328 							threadDebugInfo.profile.last_image_event
2329 								= imageEvent;
2330 						} else
2331 							result = B_BAD_VALUE;
2332 					} else
2333 						result = B_BAD_THREAD_ID;
2334 				}
2335 
2336 				// on error unlock and delete the sample area
2337 				if (result != B_OK) {
2338 					if (clonedSampleArea >= 0) {
2339 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2340 						delete_area(clonedSampleArea);
2341 					}
2342 				}
2343 
2344 				// send a reply to the debugger
2345 				reply.start_profiler.error = result;
2346 				reply.start_profiler.interval = interval;
2347 				reply.start_profiler.image_event = imageEvent;
2348 				sendReply = true;
2349 				replySize = sizeof(reply.start_profiler);
2350 
2351 				break;
2352 			}
2353 
2354 			case B_DEBUG_STOP_PROFILER:
2355 			{
2356 				// get the parameters
2357 				thread_id threadID = message.stop_profiler.thread;
2358 				replyPort = message.stop_profiler.reply_port;
2359 				status_t result = B_OK;
2360 
2361 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: "
2362 					"thread: %" B_PRId32 "\n", nubThread->id, threadID));
2363 
2364 				area_id sampleArea = -1;
2365 				addr_t* samples = NULL;
2366 				int32 sampleCount = 0;
2367 				int32 stackDepth = 0;
2368 				bool variableStackDepth = false;
2369 				int32 imageEvent = 0;
2370 				int32 droppedTicks = 0;
2371 
2372 				// get the thread and detach the profile info
2373 				Thread* thread = Thread::GetAndLock(threadID);
2374 				BReference<Thread> threadReference(thread, true);
2375 				ThreadLocker threadLocker(thread, true);
2376 
2377 				if (thread && thread->team == nubThread->team) {
2378 					thread_debug_info &threadDebugInfo = thread->debug_info;
2379 
2380 					InterruptsSpinLocker threadDebugInfoLocker(
2381 						threadDebugInfo.lock);
2382 
2383 					if (threadDebugInfo.profile.samples != NULL) {
2384 						sampleArea = threadDebugInfo.profile.sample_area;
2385 						samples = threadDebugInfo.profile.samples;
2386 						sampleCount = threadDebugInfo.profile.sample_count;
2387 						droppedTicks = threadDebugInfo.profile.dropped_ticks;
2388 						stackDepth = threadDebugInfo.profile.stack_depth;
2389 						variableStackDepth
2390 							= threadDebugInfo.profile.variable_stack_depth;
2391 						imageEvent = threadDebugInfo.profile.image_event;
2392 						threadDebugInfo.profile.sample_area = -1;
2393 						threadDebugInfo.profile.samples = NULL;
2394 						threadDebugInfo.profile.buffer_full = false;
2395 						threadDebugInfo.profile.dropped_ticks = 0;
2396 					} else
2397 						result = B_BAD_VALUE;
2398 				} else
2399 					result = B_BAD_THREAD_ID;
2400 
2401 				threadLocker.Unlock();
2402 
2403 				// prepare the reply
2404 				if (result == B_OK) {
2405 					reply.profiler_update.origin.thread = threadID;
2406 					reply.profiler_update.image_event = imageEvent;
2407 					reply.profiler_update.stack_depth = stackDepth;
2408 					reply.profiler_update.variable_stack_depth
2409 						= variableStackDepth;
2410 					reply.profiler_update.sample_count = sampleCount;
2411 					reply.profiler_update.dropped_ticks = droppedTicks;
2412 					reply.profiler_update.stopped = true;
2413 				} else
2414 					reply.profiler_update.origin.thread = result;
2415 
2416 				replySize = sizeof(debug_profiler_update);
2417 				sendReply = true;
2418 
2419 				if (sampleArea >= 0) {
2420 					area_info areaInfo;
2421 					if (get_area_info(sampleArea, &areaInfo) == B_OK) {
2422 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2423 						delete_area(sampleArea);
2424 					}
2425 				}
2426 
2427 				break;
2428 			}
2429 
2430 			case B_DEBUG_WRITE_CORE_FILE:
2431 			{
2432 				// get the parameters
2433 				replyPort = message.write_core_file.reply_port;
2434 				char* path = message.write_core_file.path;
2435 				path[sizeof(message.write_core_file.path) - 1] = '\0';
2436 
2437 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE"
2438 					": path: %s\n", nubThread->id, path));
2439 
2440 				// write the core file
2441 				status_t result = core_dump_write_core_file(path, false);
2442 
2443 				// prepare the reply
2444 				reply.write_core_file.error = result;
2445 				replySize = sizeof(reply.write_core_file);
2446 				sendReply = true;
2447 
2448 				break;
2449 			}
2450 		}
2451 
2452 		// send the reply, if necessary
2453 		if (sendReply) {
2454 			status_t error = kill_interruptable_write_port(replyPort, command,
2455 				&reply, replySize);
2456 
2457 			if (error != B_OK) {
2458 				// The debugger port is either not longer existing or we got
2459 				// interrupted by a kill signal. In either case we terminate.
2460 				TRACE(("nub thread %" B_PRId32 ": failed to send reply to port "
2461 					"%" B_PRId32 ": %s\n", nubThread->id, replyPort,
2462 					strerror(error)));
2463 
2464 				nub_thread_cleanup(nubThread);
2465 				return error;
2466 			}
2467 		}
2468 	}
2469 }
2470 
2471 
2472 /**	\brief Helper function for install_team_debugger(), that sets up the team
2473 		   and thread debug infos.
2474 
2475 	The caller must hold the team's lock as well as the team debug info lock.
2476 
2477 	The function also clears the arch specific team and thread debug infos
2478 	(including among other things formerly set break/watchpoints).
2479  */
2480 static void
2481 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam,
2482 	port_id debuggerPort, port_id nubPort, thread_id nubThread,
2483 	sem_id debuggerPortWriteLock, thread_id causingThread)
2484 {
2485 	atomic_set(&team->debug_info.flags,
2486 		B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED);
2487 	team->debug_info.nub_port = nubPort;
2488 	team->debug_info.nub_thread = nubThread;
2489 	team->debug_info.debugger_team = debuggerTeam;
2490 	team->debug_info.debugger_port = debuggerPort;
2491 	team->debug_info.debugger_write_lock = debuggerPortWriteLock;
2492 	team->debug_info.causing_thread = causingThread;
2493 
2494 	arch_clear_team_debug_info(&team->debug_info.arch_info);
2495 
2496 	// set the user debug flags and signal masks of all threads to the default
2497 	for (Thread *thread = team->thread_list; thread;
2498 			thread = thread->team_next) {
2499 		SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2500 
2501 		if (thread->id == nubThread) {
2502 			atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD);
2503 		} else {
2504 			int32 flags = thread->debug_info.flags
2505 				& ~B_THREAD_DEBUG_USER_FLAG_MASK;
2506 			atomic_set(&thread->debug_info.flags,
2507 				flags | B_THREAD_DEBUG_DEFAULT_FLAGS);
2508 			thread->debug_info.ignore_signals = 0;
2509 			thread->debug_info.ignore_signals_once = 0;
2510 
2511 			arch_clear_thread_debug_info(&thread->debug_info.arch_info);
2512 		}
2513 	}
2514 
2515 	// update the thread::flags fields
2516 	update_threads_debugger_installed_flag(team);
2517 }
2518 
2519 
2520 static port_id
2521 install_team_debugger(team_id teamID, port_id debuggerPort,
2522 	thread_id causingThread, bool useDefault, bool dontReplace)
2523 {
2524 	TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", "
2525 		"default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault,
2526 		dontReplace));
2527 
2528 	if (useDefault)
2529 		debuggerPort = atomic_get(&sDefaultDebuggerPort);
2530 
2531 	// get the debugger team
2532 	port_info debuggerPortInfo;
2533 	status_t error = get_port_info(debuggerPort, &debuggerPortInfo);
2534 	if (error != B_OK) {
2535 		TRACE(("install_team_debugger(): Failed to get debugger port info: "
2536 			"%" B_PRIx32 "\n", error));
2537 		return error;
2538 	}
2539 	team_id debuggerTeam = debuggerPortInfo.team;
2540 
2541 	// Check the debugger team: It must neither be the kernel team nor the
2542 	// debugged team.
2543 	if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) {
2544 		TRACE(("install_team_debugger(): Can't debug kernel or debugger team. "
2545 			"debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam,
2546 			teamID));
2547 		return B_NOT_ALLOWED;
2548 	}
2549 
2550 	// get the team
2551 	Team* team;
2552 	ConditionVariable debugChangeCondition;
2553 	debugChangeCondition.Init(NULL, "debug change condition");
2554 	error = prepare_debugger_change(teamID, debugChangeCondition, team);
2555 	if (error != B_OK)
2556 		return error;
2557 
2558 	// get the real team ID
2559 	teamID = team->id;
2560 
2561 	// check, if a debugger is already installed
2562 
2563 	bool done = false;
2564 	port_id result = B_ERROR;
2565 	bool handOver = false;
2566 	port_id oldDebuggerPort = -1;
2567 	port_id nubPort = -1;
2568 
2569 	TeamLocker teamLocker(team);
2570 	cpu_status state = disable_interrupts();
2571 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2572 
2573 	int32 teamDebugFlags = team->debug_info.flags;
2574 
2575 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2576 		// There's already a debugger installed.
2577 		if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) {
2578 			if (dontReplace) {
2579 				// We're fine with already having a debugger.
2580 				error = B_OK;
2581 				done = true;
2582 				result = team->debug_info.nub_port;
2583 			} else {
2584 				// a handover to another debugger is requested
2585 				// Set the handing-over flag -- we'll clear both flags after
2586 				// having sent the handed-over message to the new debugger.
2587 				atomic_or(&team->debug_info.flags,
2588 					B_TEAM_DEBUG_DEBUGGER_HANDING_OVER);
2589 
2590 				oldDebuggerPort = team->debug_info.debugger_port;
2591 				result = nubPort = team->debug_info.nub_port;
2592 				if (causingThread < 0)
2593 					causingThread = team->debug_info.causing_thread;
2594 
2595 				// set the new debugger
2596 				install_team_debugger_init_debug_infos(team, debuggerTeam,
2597 					debuggerPort, nubPort, team->debug_info.nub_thread,
2598 					team->debug_info.debugger_write_lock, causingThread);
2599 
2600 				handOver = true;
2601 				done = true;
2602 			}
2603 		} else {
2604 			// there's already a debugger installed
2605 			error = (dontReplace ? B_OK : B_BAD_VALUE);
2606 			done = true;
2607 			result = team->debug_info.nub_port;
2608 		}
2609 	} else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0
2610 		&& useDefault) {
2611 		// No debugger yet, disable_debugger() had been invoked, and we
2612 		// would install the default debugger. Just fail.
2613 		error = B_BAD_VALUE;
2614 	}
2615 
2616 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2617 	restore_interrupts(state);
2618 	teamLocker.Unlock();
2619 
2620 	if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) {
2621 		// The old debugger must just have died. Just proceed as
2622 		// if there was no debugger installed. We may still be too
2623 		// early, in which case we'll fail, but this race condition
2624 		// should be unbelievably rare and relatively harmless.
2625 		handOver = false;
2626 		done = false;
2627 	}
2628 
2629 	if (handOver) {
2630 		// prepare the handed-over message
2631 		debug_handed_over notification;
2632 		notification.origin.thread = -1;
2633 		notification.origin.team = teamID;
2634 		notification.origin.nub_port = nubPort;
2635 		notification.debugger = debuggerTeam;
2636 		notification.debugger_port = debuggerPort;
2637 		notification.causing_thread = causingThread;
2638 
2639 		// notify the new debugger
2640 		error = write_port_etc(debuggerPort,
2641 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2642 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2643 		if (error != B_OK) {
2644 			dprintf("install_team_debugger(): Failed to send message to new "
2645 				"debugger: %s\n", strerror(error));
2646 		}
2647 
2648 		// clear the handed-over and handing-over flags
2649 		state = disable_interrupts();
2650 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2651 
2652 		atomic_and(&team->debug_info.flags,
2653 			~(B_TEAM_DEBUG_DEBUGGER_HANDOVER
2654 				| B_TEAM_DEBUG_DEBUGGER_HANDING_OVER));
2655 
2656 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2657 		restore_interrupts(state);
2658 
2659 		finish_debugger_change(team);
2660 
2661 		// notify the nub thread
2662 		kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER,
2663 			NULL, 0);
2664 
2665 		// notify the old debugger
2666 		error = write_port_etc(oldDebuggerPort,
2667 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2668 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2669 		if (error != B_OK) {
2670 			TRACE(("install_team_debugger(): Failed to send message to old "
2671 				"debugger: %s\n", strerror(error)));
2672 		}
2673 
2674 		TRACE(("install_team_debugger() done: handed over to debugger: team: "
2675 			"%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam,
2676 			debuggerPort));
2677 
2678 		return result;
2679 	}
2680 
2681 	if (done || error != B_OK) {
2682 		TRACE(("install_team_debugger() done1: %" B_PRId32 "\n",
2683 			(error == B_OK ? result : error)));
2684 		finish_debugger_change(team);
2685 		return (error == B_OK ? result : error);
2686 	}
2687 
2688 	// create the debugger write lock semaphore
2689 	char nameBuffer[B_OS_NAME_LENGTH];
2690 	snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port "
2691 		"write", teamID);
2692 	sem_id debuggerWriteLock = create_sem(1, nameBuffer);
2693 	if (debuggerWriteLock < 0)
2694 		error = debuggerWriteLock;
2695 
2696 	// create the nub port
2697 	snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID);
2698 	if (error == B_OK) {
2699 		nubPort = create_port(1, nameBuffer);
2700 		if (nubPort < 0)
2701 			error = nubPort;
2702 		else
2703 			result = nubPort;
2704 	}
2705 
2706 	// make the debugger team the port owner; thus we know, if the debugger is
2707 	// gone and can cleanup
2708 	if (error == B_OK)
2709 		error = set_port_owner(nubPort, debuggerTeam);
2710 
2711 	// create the breakpoint manager
2712 	BreakpointManager* breakpointManager = NULL;
2713 	if (error == B_OK) {
2714 		breakpointManager = new(std::nothrow) BreakpointManager;
2715 		if (breakpointManager != NULL)
2716 			error = breakpointManager->Init();
2717 		else
2718 			error = B_NO_MEMORY;
2719 	}
2720 
2721 	// spawn the nub thread
2722 	thread_id nubThread = -1;
2723 	if (error == B_OK) {
2724 		snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task",
2725 			teamID);
2726 		nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer,
2727 			B_NORMAL_PRIORITY, NULL, teamID);
2728 		if (nubThread < 0)
2729 			error = nubThread;
2730 	}
2731 
2732 	// now adjust the debug info accordingly
2733 	if (error == B_OK) {
2734 		TeamLocker teamLocker(team);
2735 		state = disable_interrupts();
2736 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2737 
2738 		team->debug_info.breakpoint_manager = breakpointManager;
2739 		install_team_debugger_init_debug_infos(team, debuggerTeam,
2740 			debuggerPort, nubPort, nubThread, debuggerWriteLock,
2741 			causingThread);
2742 
2743 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2744 		restore_interrupts(state);
2745 	}
2746 
2747 	finish_debugger_change(team);
2748 
2749 	// if everything went fine, resume the nub thread, otherwise clean up
2750 	if (error == B_OK) {
2751 		resume_thread(nubThread);
2752 	} else {
2753 		// delete port and terminate thread
2754 		if (nubPort >= 0) {
2755 			set_port_owner(nubPort, B_CURRENT_TEAM);
2756 			delete_port(nubPort);
2757 		}
2758 		if (nubThread >= 0) {
2759 			int32 result;
2760 			wait_for_thread(nubThread, &result);
2761 		}
2762 
2763 		delete breakpointManager;
2764 	}
2765 
2766 	TRACE(("install_team_debugger() done2: %" B_PRId32 "\n",
2767 		(error == B_OK ? result : error)));
2768 	return (error == B_OK ? result : error);
2769 }
2770 
2771 
2772 static status_t
2773 ensure_debugger_installed()
2774 {
2775 	port_id port = install_team_debugger(B_CURRENT_TEAM, -1,
2776 		thread_get_current_thread_id(), true, true);
2777 	return port >= 0 ? B_OK : port;
2778 }
2779 
2780 
2781 // #pragma mark -
2782 
2783 
2784 void
2785 _user_debugger(const char *userMessage)
2786 {
2787 	// install the default debugger, if there is none yet
2788 	status_t error = ensure_debugger_installed();
2789 	if (error != B_OK) {
2790 		// time to commit suicide
2791 		char buffer[128];
2792 		ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer));
2793 		if (length >= 0) {
2794 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2795 				"`%s'\n", buffer);
2796 		} else {
2797 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2798 				"%p (%s)\n", userMessage, strerror(length));
2799 		}
2800 		_user_exit_team(1);
2801 	}
2802 
2803 	// prepare the message
2804 	debug_debugger_call message;
2805 	message.message = (void*)userMessage;
2806 
2807 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message,
2808 		sizeof(message), true);
2809 }
2810 
2811 
2812 int
2813 _user_disable_debugger(int state)
2814 {
2815 	Team *team = thread_get_current_thread()->team;
2816 
2817 	TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state,
2818 		team->id));
2819 
2820 	cpu_status cpuState = disable_interrupts();
2821 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2822 
2823 	int32 oldFlags;
2824 	if (state) {
2825 		oldFlags = atomic_or(&team->debug_info.flags,
2826 			B_TEAM_DEBUG_DEBUGGER_DISABLED);
2827 	} else {
2828 		oldFlags = atomic_and(&team->debug_info.flags,
2829 			~B_TEAM_DEBUG_DEBUGGER_DISABLED);
2830 	}
2831 
2832 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2833 	restore_interrupts(cpuState);
2834 
2835 	// TODO: Check, if the return value is really the old state.
2836 	return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED);
2837 }
2838 
2839 
2840 status_t
2841 _user_install_default_debugger(port_id debuggerPort)
2842 {
2843 	// Do not allow non-root processes to install a default debugger.
2844 	if (geteuid() != 0)
2845 		return B_PERMISSION_DENIED;
2846 
2847 	// if supplied, check whether the port is a valid port
2848 	if (debuggerPort >= 0) {
2849 		port_info portInfo;
2850 		status_t error = get_port_info(debuggerPort, &portInfo);
2851 		if (error != B_OK)
2852 			return error;
2853 
2854 		// the debugger team must not be the kernel team
2855 		if (portInfo.team == team_get_kernel_team_id())
2856 			return B_NOT_ALLOWED;
2857 	}
2858 
2859 	atomic_set(&sDefaultDebuggerPort, debuggerPort);
2860 
2861 	return B_OK;
2862 }
2863 
2864 
2865 port_id
2866 _user_install_team_debugger(team_id teamID, port_id debuggerPort)
2867 {
2868 	if (geteuid() != 0 && team_geteuid(teamID) != geteuid())
2869 		return B_PERMISSION_DENIED;
2870 
2871 	return install_team_debugger(teamID, debuggerPort, -1, false, false);
2872 }
2873 
2874 
2875 status_t
2876 _user_remove_team_debugger(team_id teamID)
2877 {
2878 	Team* team;
2879 	ConditionVariable debugChangeCondition;
2880 	debugChangeCondition.Init(NULL, "debug change condition");
2881 	status_t error = prepare_debugger_change(teamID, debugChangeCondition,
2882 		team);
2883 	if (error != B_OK)
2884 		return error;
2885 
2886 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2887 
2888 	thread_id nubThread = -1;
2889 	port_id nubPort = -1;
2890 
2891 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2892 		// there's a debugger installed
2893 		nubThread = team->debug_info.nub_thread;
2894 		nubPort = team->debug_info.nub_port;
2895 	} else {
2896 		// no debugger installed
2897 		error = B_BAD_VALUE;
2898 	}
2899 
2900 	debugInfoLocker.Unlock();
2901 
2902 	// Delete the nub port -- this will cause the nub thread to terminate and
2903 	// remove the debugger.
2904 	if (nubPort >= 0)
2905 		delete_port(nubPort);
2906 
2907 	finish_debugger_change(team);
2908 
2909 	// wait for the nub thread
2910 	if (nubThread >= 0)
2911 		wait_for_thread(nubThread, NULL);
2912 
2913 	return error;
2914 }
2915 
2916 
2917 status_t
2918 _user_debug_thread(thread_id threadID)
2919 {
2920 	TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n",
2921 		find_thread(NULL), threadID));
2922 
2923 	// get the thread
2924 	Thread* thread = Thread::GetAndLock(threadID);
2925 	if (thread == NULL)
2926 		return B_BAD_THREAD_ID;
2927 	BReference<Thread> threadReference(thread, true);
2928 	ThreadLocker threadLocker(thread, true);
2929 
2930 	// we can't debug the kernel team
2931 	if (thread->team == team_get_kernel_team())
2932 		return B_NOT_ALLOWED;
2933 
2934 	InterruptsLocker interruptsLocker;
2935 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2936 
2937 	// If the thread is already dying, it's too late to debug it.
2938 	if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0)
2939 		return B_BAD_THREAD_ID;
2940 
2941 	// don't debug the nub thread
2942 	if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0)
2943 		return B_NOT_ALLOWED;
2944 
2945 	// already marked stopped or being told to stop?
2946 	if ((thread->debug_info.flags
2947 			& (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) {
2948 		return B_OK;
2949 	}
2950 
2951 	// set the flag that tells the thread to stop as soon as possible
2952 	atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
2953 
2954 	update_thread_user_debug_flag(thread);
2955 
2956 	// send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or
2957 	// continued)
2958 	threadDebugInfoLocker.Unlock();
2959 	ReadSpinLocker teamLocker(thread->team_lock);
2960 	SpinLocker locker(thread->team->signal_lock);
2961 
2962 	send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0);
2963 
2964 	return B_OK;
2965 }
2966 
2967 
2968 void
2969 _user_wait_for_debugger(void)
2970 {
2971 	debug_thread_debugged message = {};
2972 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message,
2973 		sizeof(message), false);
2974 }
2975 
2976 
2977 status_t
2978 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length,
2979 	bool watchpoint)
2980 {
2981 	// check the address and size
2982 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
2983 		return B_BAD_ADDRESS;
2984 	if (watchpoint && length < 0)
2985 		return B_BAD_VALUE;
2986 
2987 	// check whether a debugger is installed already
2988 	team_debug_info teamDebugInfo;
2989 	get_team_debug_info(teamDebugInfo);
2990 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2991 		return B_BAD_VALUE;
2992 
2993 	// We can't help it, here's a small but relatively harmless race condition,
2994 	// since a debugger could be installed in the meantime. The worst case is
2995 	// that we install a break/watchpoint the debugger doesn't know about.
2996 
2997 	// set the break/watchpoint
2998 	status_t result;
2999 	if (watchpoint)
3000 		result = arch_set_watchpoint(address, type, length);
3001 	else
3002 		result = arch_set_breakpoint(address);
3003 
3004 	if (result == B_OK)
3005 		update_threads_breakpoints_flag();
3006 
3007 	return result;
3008 }
3009 
3010 
3011 status_t
3012 _user_clear_debugger_breakpoint(void *address, bool watchpoint)
3013 {
3014 	// check the address
3015 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
3016 		return B_BAD_ADDRESS;
3017 
3018 	// check whether a debugger is installed already
3019 	team_debug_info teamDebugInfo;
3020 	get_team_debug_info(teamDebugInfo);
3021 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
3022 		return B_BAD_VALUE;
3023 
3024 	// We can't help it, here's a small but relatively harmless race condition,
3025 	// since a debugger could be installed in the meantime. The worst case is
3026 	// that we clear a break/watchpoint the debugger has just installed.
3027 
3028 	// clear the break/watchpoint
3029 	status_t result;
3030 	if (watchpoint)
3031 		result = arch_clear_watchpoint(address);
3032 	else
3033 		result = arch_clear_breakpoint(address);
3034 
3035 	if (result == B_OK)
3036 		update_threads_breakpoints_flag();
3037 
3038 	return result;
3039 }
3040