xref: /haiku/src/system/kernel/debug/user_debugger.cpp (revision b671e9bbdbd10268a042b4f4cc4317ccd03d105e)
1 /*
2  * Copyright 2005-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <signal.h>
8 #include <stdlib.h>
9 #include <stdio.h>
10 #include <string.h>
11 
12 #include <algorithm>
13 
14 #include <arch/debug.h>
15 #include <arch/user_debugger.h>
16 #include <cpu.h>
17 #include <debugger.h>
18 #include <kernel.h>
19 #include <KernelExport.h>
20 #include <kscheduler.h>
21 #include <ksignal.h>
22 #include <ksyscalls.h>
23 #include <port.h>
24 #include <sem.h>
25 #include <team.h>
26 #include <thread.h>
27 #include <thread_types.h>
28 #include <user_debugger.h>
29 #include <vm.h>
30 #include <vm_types.h>
31 
32 #include <AutoDeleter.h>
33 #include <util/AutoLock.h>
34 
35 #include "BreakpointManager.h"
36 
37 
38 //#define TRACE_USER_DEBUGGER
39 #ifdef TRACE_USER_DEBUGGER
40 #	define TRACE(x) dprintf x
41 #else
42 #	define TRACE(x) ;
43 #endif
44 
45 
46 // TODO: Since the introduction of team_debug_info::debugger_changed_condition
47 // there's some potential for simplifications. E.g. clear_team_debug_info() and
48 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus
49 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()).
50 
51 
52 static port_id sDefaultDebuggerPort = -1;
53 	// accessed atomically
54 
55 static timer sProfilingTimers[B_MAX_CPU_COUNT];
56 	// a profiling timer for each CPU -- used when a profiled thread is running
57 	// on that CPU
58 
59 
60 static void schedule_profiling_timer(struct thread* thread,
61 	bigtime_t interval);
62 static int32 profiling_event(timer* unused);
63 static status_t ensure_debugger_installed();
64 static void get_team_debug_info(team_debug_info &teamDebugInfo);
65 
66 
67 static status_t
68 kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
69 	size_t bufferSize)
70 {
71 	return write_port_etc(port, code, buffer, bufferSize,
72 		B_KILL_CAN_INTERRUPT, 0);
73 }
74 
75 
76 static status_t
77 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
78 	bool dontWait)
79 {
80 	TRACE(("debugger_write(): thread: %ld, team %ld, port: %ld, code: %lx, message: %p, "
81 		"size: %lu, dontWait: %d\n", thread_get_current_thread()->id,
82 		thread_get_current_thread()->team->id, port, code, buffer, bufferSize,
83 		dontWait));
84 
85 	status_t error = B_OK;
86 
87 	// get the team debug info
88 	team_debug_info teamDebugInfo;
89 	get_team_debug_info(teamDebugInfo);
90 	sem_id writeLock = teamDebugInfo.debugger_write_lock;
91 
92 	// get the write lock
93 	TRACE(("debugger_write(): acquiring write lock...\n"));
94 	error = acquire_sem_etc(writeLock, 1,
95 		dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
96 	if (error != B_OK) {
97 		TRACE(("debugger_write() done1: %lx\n", error));
98 		return error;
99 	}
100 
101 	// re-get the team debug info
102 	get_team_debug_info(teamDebugInfo);
103 
104 	if (teamDebugInfo.debugger_port != port
105 		|| (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) {
106 		// The debugger has changed in the meantime or we are about to be
107 		// handed over to a new debugger. In either case we don't send the
108 		// message.
109 		TRACE(("debugger_write(): %s\n",
110 			(teamDebugInfo.debugger_port != port ? "debugger port changed"
111 				: "handover flag set")));
112 	} else {
113 		TRACE(("debugger_write(): writing to port...\n"));
114 
115 		error = write_port_etc(port, code, buffer, bufferSize,
116 			dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
117 	}
118 
119 	// release the write lock
120 	release_sem(writeLock);
121 
122 	TRACE(("debugger_write() done: %lx\n", error));
123 
124 	return error;
125 }
126 
127 
128 /*!	Updates the thread::flags field according to what user debugger flags are
129 	set for the thread.
130 	Interrupts must be disabled and the thread lock must be held.
131 */
132 static void
133 update_thread_user_debug_flag(struct thread* thread)
134 {
135 	if (atomic_get(&thread->debug_info.flags)
136 			& (B_THREAD_DEBUG_STOP | B_THREAD_DEBUG_SINGLE_STEP)) {
137 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD);
138 	} else
139 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD);
140 }
141 
142 
143 /*!	Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
144 	given thread.
145 	Interrupts must be disabled and the team lock must be held.
146 */
147 static void
148 update_thread_breakpoints_flag(struct thread* thread)
149 {
150 	struct team* team = thread->team;
151 
152 	if (arch_has_breakpoints(&team->debug_info.arch_info))
153 		atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
154 	else
155 		atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
156 }
157 
158 
159 /*!	Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
160 	threads of the current team.
161 	Interrupts must be disabled and the team lock must be held.
162 */
163 static void
164 update_threads_breakpoints_flag()
165 {
166 	InterruptsSpinLocker _(gTeamSpinlock);
167 
168 	struct team* team = thread_get_current_thread()->team;
169 	struct thread* thread = team->thread_list;
170 
171 	if (arch_has_breakpoints(&team->debug_info.arch_info)) {
172 		for (; thread != NULL; thread = thread->team_next)
173 			atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
174 	} else {
175 		for (; thread != NULL; thread = thread->team_next)
176 			atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
177 	}
178 }
179 
180 
181 /*!	Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
182 	given thread.
183 	Interrupts must be disabled and the team lock must be held.
184 */
185 static void
186 update_thread_debugger_installed_flag(struct thread* thread)
187 {
188 	struct team* team = thread->team;
189 
190 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
191 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
192 	else
193 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
194 }
195 
196 
197 /*!	Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
198 	threads of the given team.
199 	Interrupts must be disabled and the team lock must be held.
200 */
201 static void
202 update_threads_debugger_installed_flag(struct team* team)
203 {
204 	struct thread* thread = team->thread_list;
205 
206 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
207 		for (; thread != NULL; thread = thread->team_next)
208 			atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
209 	} else {
210 		for (; thread != NULL; thread = thread->team_next)
211 			atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
212 	}
213 }
214 
215 
216 /**
217  *	For the first initialization the function must be called with \a initLock
218  *	set to \c true. If it would be possible that another thread accesses the
219  *	structure at the same time, `lock' must be held when calling the function.
220  */
221 void
222 clear_team_debug_info(struct team_debug_info *info, bool initLock)
223 {
224 	if (info) {
225 		arch_clear_team_debug_info(&info->arch_info);
226 		atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS);
227 		info->debugger_team = -1;
228 		info->debugger_port = -1;
229 		info->nub_thread = -1;
230 		info->nub_port = -1;
231 		info->debugger_write_lock = -1;
232 		info->causing_thread = -1;
233 		info->image_event = 0;
234 		info->breakpoint_manager = NULL;
235 
236 		if (initLock) {
237 			B_INITIALIZE_SPINLOCK(&info->lock);
238 			info->debugger_changed_condition = NULL;
239 		}
240 	}
241 }
242 
243 /**
244  *  `lock' must not be held nor may interrupts be disabled.
245  *  \a info must not be a member of a team struct (or the team struct must no
246  *  longer be accessible, i.e. the team should already be removed).
247  *
248  *	In case the team is still accessible, the procedure is:
249  *	1. get `lock'
250  *	2. copy the team debug info on stack
251  *	3. call clear_team_debug_info() on the team debug info
252  *	4. release `lock'
253  *	5. call destroy_team_debug_info() on the copied team debug info
254  */
255 static void
256 destroy_team_debug_info(struct team_debug_info *info)
257 {
258 	if (info) {
259 		arch_destroy_team_debug_info(&info->arch_info);
260 
261 		// delete the breakpoint manager
262 		delete info->breakpoint_manager ;
263 		info->breakpoint_manager = NULL;
264 
265 		// delete the debugger port write lock
266 		if (info->debugger_write_lock >= 0) {
267 			delete_sem(info->debugger_write_lock);
268 			info->debugger_write_lock = -1;
269 		}
270 
271 		// delete the nub port
272 		if (info->nub_port >= 0) {
273 			set_port_owner(info->nub_port, B_CURRENT_TEAM);
274 			delete_port(info->nub_port);
275 			info->nub_port = -1;
276 		}
277 
278 		// wait for the nub thread
279 		if (info->nub_thread >= 0) {
280 			if (info->nub_thread != thread_get_current_thread()->id) {
281 				int32 result;
282 				wait_for_thread(info->nub_thread, &result);
283 			}
284 
285 			info->nub_thread = -1;
286 		}
287 
288 		atomic_set(&info->flags, 0);
289 		info->debugger_team = -1;
290 		info->debugger_port = -1;
291 		info->causing_thread = -1;
292 		info->image_event = -1;
293 	}
294 }
295 
296 
297 void
298 init_thread_debug_info(struct thread_debug_info *info)
299 {
300 	if (info) {
301 		arch_clear_thread_debug_info(&info->arch_info);
302 		info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS;
303 		info->debug_port = -1;
304 		info->ignore_signals = 0;
305 		info->ignore_signals_once = 0;
306 		info->profile.sample_area = -1;
307 		info->profile.samples = NULL;
308 		info->profile.buffer_full = false;
309 		info->profile.installed_timer = NULL;
310 	}
311 }
312 
313 
314 /*!	Invoked with thread lock being held.
315 */
316 void
317 clear_thread_debug_info(struct thread_debug_info *info, bool dying)
318 {
319 	if (info) {
320 		// cancel profiling timer
321 		if (info->profile.installed_timer != NULL) {
322 			cancel_timer(info->profile.installed_timer);
323 			info->profile.installed_timer = NULL;
324 		}
325 
326 		arch_clear_thread_debug_info(&info->arch_info);
327 		atomic_set(&info->flags,
328 			B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0));
329 		info->debug_port = -1;
330 		info->ignore_signals = 0;
331 		info->ignore_signals_once = 0;
332 		info->profile.sample_area = -1;
333 		info->profile.samples = NULL;
334 		info->profile.buffer_full = false;
335 	}
336 }
337 
338 
339 void
340 destroy_thread_debug_info(struct thread_debug_info *info)
341 {
342 	if (info) {
343 		area_id sampleArea = info->profile.sample_area;
344 		if (sampleArea >= 0) {
345 			area_info areaInfo;
346 			if (get_area_info(sampleArea, &areaInfo) == B_OK) {
347 				unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
348 				delete_area(sampleArea);
349 			}
350 		}
351 
352 		arch_destroy_thread_debug_info(&info->arch_info);
353 
354 		if (info->debug_port >= 0) {
355 			delete_port(info->debug_port);
356 			info->debug_port = -1;
357 		}
358 
359 		info->ignore_signals = 0;
360 		info->ignore_signals_once = 0;
361 
362 		atomic_set(&info->flags, 0);
363 	}
364 }
365 
366 
367 static status_t
368 prepare_debugger_change(team_id teamID, ConditionVariable& condition,
369 	struct team*& team)
370 {
371 	// We look up the team by ID, even in case of the current team, so we can be
372 	// sure, that the team is not already dying.
373 	if (teamID == B_CURRENT_TEAM)
374 		teamID = thread_get_current_thread()->team->id;
375 
376 	while (true) {
377 		// get the team
378 		InterruptsSpinLocker teamLocker(gTeamSpinlock);
379 
380 		team = team_get_team_struct_locked(teamID);
381 		if (team == NULL)
382 			return B_BAD_TEAM_ID;
383 
384 		// don't allow messing with the kernel team
385 		if (team == team_get_kernel_team())
386 			return B_NOT_ALLOWED;
387 
388 		// check whether the condition is already set
389 		SpinLocker threadLocker(gThreadSpinlock);
390 		SpinLocker debugInfoLocker(team->debug_info.lock);
391 
392 		if (team->debug_info.debugger_changed_condition == NULL) {
393 			// nobody there yet -- set our condition variable and be done
394 			team->debug_info.debugger_changed_condition = &condition;
395 			return B_OK;
396 		}
397 
398 		// we'll have to wait
399 		ConditionVariableEntry entry;
400 		team->debug_info.debugger_changed_condition->Add(&entry);
401 
402 		debugInfoLocker.Unlock();
403 		threadLocker.Unlock();
404 		teamLocker.Unlock();
405 
406 		entry.Wait();
407 	}
408 }
409 
410 
411 static void
412 prepare_debugger_change(struct team* team, ConditionVariable& condition)
413 {
414 	while (true) {
415 		// check whether the condition is already set
416 		InterruptsSpinLocker threadLocker(gThreadSpinlock);
417 		SpinLocker debugInfoLocker(team->debug_info.lock);
418 
419 		if (team->debug_info.debugger_changed_condition == NULL) {
420 			// nobody there yet -- set our condition variable and be done
421 			team->debug_info.debugger_changed_condition = &condition;
422 			return;
423 		}
424 
425 		// we'll have to wait
426 		ConditionVariableEntry entry;
427 		team->debug_info.debugger_changed_condition->Add(&entry);
428 
429 		debugInfoLocker.Unlock();
430 		threadLocker.Unlock();
431 
432 		entry.Wait();
433 	}
434 }
435 
436 
437 static void
438 finish_debugger_change(struct team* team)
439 {
440 	// unset our condition variable and notify all threads waiting on it
441 	InterruptsSpinLocker threadLocker(gThreadSpinlock);
442 	SpinLocker debugInfoLocker(team->debug_info.lock);
443 
444 	ConditionVariable* condition = team->debug_info.debugger_changed_condition;
445 	team->debug_info.debugger_changed_condition = NULL;
446 
447 	condition->NotifyAll(true);
448 }
449 
450 
451 void
452 user_debug_prepare_for_exec()
453 {
454 	struct thread *thread = thread_get_current_thread();
455 	struct team *team = thread->team;
456 
457 	// If a debugger is installed for the team and the thread debug stuff
458 	// initialized, change the ownership of the debug port for the thread
459 	// to the kernel team, since exec_team() deletes all ports owned by this
460 	// team. We change the ownership back later.
461 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
462 		// get the port
463 		port_id debugPort = -1;
464 
465 		cpu_status state = disable_interrupts();
466 		GRAB_THREAD_LOCK();
467 
468 		if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
469 			debugPort = thread->debug_info.debug_port;
470 
471 		RELEASE_THREAD_LOCK();
472 		restore_interrupts(state);
473 
474 		// set the new port ownership
475 		if (debugPort >= 0)
476 			set_port_owner(debugPort, team_get_kernel_team_id());
477 	}
478 }
479 
480 
481 void
482 user_debug_finish_after_exec()
483 {
484 	struct thread *thread = thread_get_current_thread();
485 	struct team *team = thread->team;
486 
487 	// If a debugger is installed for the team and the thread debug stuff
488 	// initialized for this thread, change the ownership of its debug port
489 	// back to this team.
490 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
491 		// get the port
492 		port_id debugPort = -1;
493 
494 		cpu_status state = disable_interrupts();
495 		GRAB_THREAD_LOCK();
496 
497 		if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
498 			debugPort = thread->debug_info.debug_port;
499 
500 		RELEASE_THREAD_LOCK();
501 		restore_interrupts(state);
502 
503 		// set the new port ownership
504 		if (debugPort >= 0)
505 			set_port_owner(debugPort, team->id);
506 	}
507 }
508 
509 
510 void
511 init_user_debug()
512 {
513 	#ifdef ARCH_INIT_USER_DEBUG
514 		ARCH_INIT_USER_DEBUG();
515 	#endif
516 }
517 
518 
519 static void
520 get_team_debug_info(team_debug_info &teamDebugInfo)
521 {
522 	struct thread *thread = thread_get_current_thread();
523 
524 	cpu_status state = disable_interrupts();
525 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
526 
527 	memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info));
528 
529 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
530 	restore_interrupts(state);
531 }
532 
533 
534 static status_t
535 thread_hit_debug_event_internal(debug_debugger_message event,
536 	const void *message, int32 size, bool requireDebugger, bool &restart)
537 {
538 	restart = false;
539 	struct thread *thread = thread_get_current_thread();
540 
541 	TRACE(("thread_hit_debug_event(): thread: %ld, event: %lu, message: %p, "
542 		"size: %ld\n", thread->id, (uint32)event, message, size));
543 
544 	// check, if there's a debug port already
545 	bool setPort = !(atomic_get(&thread->debug_info.flags)
546 		& B_THREAD_DEBUG_INITIALIZED);
547 
548 	// create a port, if there is none yet
549 	port_id port = -1;
550 	if (setPort) {
551 		char nameBuffer[128];
552 		snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %ld",
553 			thread->id);
554 
555 		port = create_port(1, nameBuffer);
556 		if (port < 0) {
557 			dprintf("thread_hit_debug_event(): Failed to create debug port: "
558 				"%s\n", strerror(port));
559 			return port;
560 		}
561 	}
562 
563 	// check the debug info structures once more: get the debugger port, set
564 	// the thread's debug port, and update the thread's debug flags
565 	port_id deletePort = port;
566 	port_id debuggerPort = -1;
567 	port_id nubPort = -1;
568 	status_t error = B_OK;
569 	cpu_status state = disable_interrupts();
570 	GRAB_THREAD_LOCK();
571 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
572 
573 	uint32 threadFlags = thread->debug_info.flags;
574 	threadFlags &= ~B_THREAD_DEBUG_STOP;
575 	bool debuggerInstalled
576 		= (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED);
577 	if (thread->id == thread->team->debug_info.nub_thread) {
578 		// Ugh, we're the nub thread. We shouldn't be here.
579 		TRACE(("thread_hit_debug_event(): Misdirected nub thread: %ld\n",
580 			thread->id));
581 
582 		error = B_ERROR;
583 
584 	} else if (debuggerInstalled || !requireDebugger) {
585 		if (debuggerInstalled) {
586 			debuggerPort = thread->team->debug_info.debugger_port;
587 			nubPort = thread->team->debug_info.nub_port;
588 		}
589 
590 		if (setPort) {
591 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
592 				// someone created a port for us (the port we've created will
593 				// be deleted below)
594 				port = thread->debug_info.debug_port;
595 			} else {
596 				thread->debug_info.debug_port = port;
597 				deletePort = -1;	// keep the port
598 				threadFlags |= B_THREAD_DEBUG_INITIALIZED;
599 			}
600 		} else {
601 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
602 				port = thread->debug_info.debug_port;
603 			} else {
604 				// someone deleted our port
605 				error = B_ERROR;
606 			}
607 		}
608 	} else
609 		error = B_ERROR;
610 
611 	// update the flags
612 	if (error == B_OK)
613 		threadFlags |= B_THREAD_DEBUG_STOPPED;
614 	atomic_set(&thread->debug_info.flags, threadFlags);
615 
616 	update_thread_user_debug_flag(thread);
617 
618 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
619 	RELEASE_THREAD_LOCK();
620 	restore_interrupts(state);
621 
622 	// delete the superfluous port
623 	if (deletePort >= 0)
624 		delete_port(deletePort);
625 
626 	if (error != B_OK) {
627 		TRACE(("thread_hit_debug_event() error: thread: %ld, error: %lx\n",
628 			thread->id, error));
629 		return error;
630 	}
631 
632 	// send a message to the debugger port
633 	if (debuggerInstalled) {
634 		// update the message's origin info first
635 		debug_origin *origin = (debug_origin *)message;
636 		origin->thread = thread->id;
637 		origin->team = thread->team->id;
638 		origin->nub_port = nubPort;
639 
640 		TRACE(("thread_hit_debug_event(): thread: %ld, sending message to "
641 			"debugger port %ld\n", thread->id, debuggerPort));
642 
643 		error = debugger_write(debuggerPort, event, message, size, false);
644 	}
645 
646 	status_t result = B_THREAD_DEBUG_HANDLE_EVENT;
647 	bool singleStep = false;
648 
649 	if (error == B_OK) {
650 		bool done = false;
651 		while (!done) {
652 			// read a command from the debug port
653 			int32 command;
654 			debugged_thread_message_data commandMessage;
655 			ssize_t commandMessageSize = read_port_etc(port, &command,
656 				&commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT,
657 				0);
658 
659 			if (commandMessageSize < 0) {
660 				error = commandMessageSize;
661 				TRACE(("thread_hit_debug_event(): thread: %ld, failed "
662 					"to receive message from port %ld: %lx\n",
663 					thread->id, port, error));
664 				break;
665 			}
666 
667 			switch (command) {
668 				case B_DEBUGGED_THREAD_MESSAGE_CONTINUE:
669 					TRACE(("thread_hit_debug_event(): thread: %ld: "
670 						"B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n",
671 						thread->id));
672 					result = commandMessage.continue_thread.handle_event;
673 
674 					singleStep = commandMessage.continue_thread.single_step;
675 					done = true;
676 					break;
677 
678 				case B_DEBUGGED_THREAD_SET_CPU_STATE:
679 				{
680 					TRACE(("thread_hit_debug_event(): thread: %ld: "
681 						"B_DEBUGGED_THREAD_SET_CPU_STATE\n",
682 						thread->id));
683 					arch_set_debug_cpu_state(
684 						&commandMessage.set_cpu_state.cpu_state);
685 
686 					break;
687 				}
688 
689 				case B_DEBUGGED_THREAD_GET_CPU_STATE:
690 				{
691 					port_id replyPort = commandMessage.get_cpu_state.reply_port;
692 
693 					// prepare the message
694 					debug_nub_get_cpu_state_reply replyMessage;
695 					replyMessage.error = B_OK;
696 					replyMessage.message = event;
697 					arch_get_debug_cpu_state(&replyMessage.cpu_state);
698 
699 					// send it
700 					error = kill_interruptable_write_port(replyPort, event,
701 						&replyMessage, sizeof(replyMessage));
702 
703 					break;
704 				}
705 
706 				case B_DEBUGGED_THREAD_DEBUGGER_CHANGED:
707 				{
708 					// Check, if the debugger really changed, i.e. is different
709 					// than the one we know.
710 					team_debug_info teamDebugInfo;
711 					get_team_debug_info(teamDebugInfo);
712 
713 					if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
714 						if (!debuggerInstalled
715 							|| teamDebugInfo.debugger_port != debuggerPort) {
716 							// debugger was installed or has changed: restart
717 							// this function
718 							restart = true;
719 							done = true;
720 						}
721 					} else {
722 						if (debuggerInstalled) {
723 							// debugger is gone: continue the thread normally
724 							done = true;
725 						}
726 					}
727 
728 					break;
729 				}
730 			}
731 		}
732 	} else {
733 		TRACE(("thread_hit_debug_event(): thread: %ld, failed to send "
734 			"message to debugger port %ld: %lx\n", thread->id,
735 			debuggerPort, error));
736 	}
737 
738 	// update the thread debug info
739 	bool destroyThreadInfo = false;
740 	thread_debug_info threadDebugInfo;
741 
742 	state = disable_interrupts();
743 	GRAB_THREAD_LOCK();
744 
745 	// check, if the team is still being debugged
746 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
747 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
748 		// update the single-step flag
749 		if (singleStep) {
750 			atomic_or(&thread->debug_info.flags,
751 				B_THREAD_DEBUG_SINGLE_STEP);
752 		} else {
753 			atomic_and(&thread->debug_info.flags,
754 				~B_THREAD_DEBUG_SINGLE_STEP);
755 		}
756 
757 		// unset the "stopped" state
758 		atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED);
759 
760 		update_thread_user_debug_flag(thread);
761 
762 	} else {
763 		// the debugger is gone: cleanup our info completely
764 		threadDebugInfo = thread->debug_info;
765 		clear_thread_debug_info(&thread->debug_info, false);
766 		destroyThreadInfo = true;
767 	}
768 
769 	RELEASE_THREAD_LOCK();
770 	restore_interrupts(state);
771 
772 	// enable/disable single stepping
773 	arch_update_thread_single_step();
774 
775 	if (destroyThreadInfo)
776 		destroy_thread_debug_info(&threadDebugInfo);
777 
778 	return (error == B_OK ? result : error);
779 }
780 
781 
782 static status_t
783 thread_hit_debug_event(debug_debugger_message event, const void *message,
784 	int32 size, bool requireDebugger)
785 {
786 	status_t result;
787 	bool restart;
788 	do {
789 		restart = false;
790 		result = thread_hit_debug_event_internal(event, message, size,
791 			requireDebugger, restart);
792 	} while (result >= 0 && restart);
793 
794 	// Prepare to continue -- we install a debugger change condition, so no-one
795 	// will change the debugger while we're playing with the breakpoint manager.
796 	// TODO: Maybe better use ref-counting and a flag in the breakpoint manager.
797 	struct team* team = thread_get_current_thread()->team;
798 	ConditionVariable debugChangeCondition;
799 	prepare_debugger_change(team, debugChangeCondition);
800 
801 	if (team->debug_info.breakpoint_manager != NULL) {
802 		bool isSyscall;
803 		void* pc = arch_debug_get_interrupt_pc(&isSyscall);
804 		if (pc != NULL && !isSyscall)
805 			team->debug_info.breakpoint_manager->PrepareToContinue(pc);
806 	}
807 
808 	finish_debugger_change(team);
809 
810 	return result;
811 }
812 
813 
814 static status_t
815 thread_hit_serious_debug_event(debug_debugger_message event,
816 	const void *message, int32 messageSize)
817 {
818 	// ensure that a debugger is installed for this team
819 	status_t error = ensure_debugger_installed();
820 	if (error != B_OK) {
821 		struct thread *thread = thread_get_current_thread();
822 		dprintf("thread_hit_serious_debug_event(): Failed to install debugger: "
823 			"thread: %ld: %s\n", thread->id, strerror(error));
824 		return error;
825 	}
826 
827 	// enter the debug loop
828 	return thread_hit_debug_event(event, message, messageSize, true);
829 }
830 
831 
832 void
833 user_debug_pre_syscall(uint32 syscall, void *args)
834 {
835 	// check whether a debugger is installed
836 	struct thread *thread = thread_get_current_thread();
837 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
838 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
839 		return;
840 
841 	// check whether pre-syscall tracing is enabled for team or thread
842 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
843 	if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL)
844 			&& !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) {
845 		return;
846 	}
847 
848 	// prepare the message
849 	debug_pre_syscall message;
850 	message.syscall = syscall;
851 
852 	// copy the syscall args
853 	if (syscall < (uint32)kSyscallCount) {
854 		if (kSyscallInfos[syscall].parameter_size > 0)
855 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
856 	}
857 
858 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message,
859 		sizeof(message), true);
860 }
861 
862 
863 void
864 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue,
865 	bigtime_t startTime)
866 {
867 	// check whether a debugger is installed
868 	struct thread *thread = thread_get_current_thread();
869 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
870 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
871 		return;
872 
873 	// check whether post-syscall tracing is enabled for team or thread
874 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
875 	if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL)
876 			&& !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) {
877 		return;
878 	}
879 
880 	// prepare the message
881 	debug_post_syscall message;
882 	message.start_time = startTime;
883 	message.end_time = system_time();
884 	message.return_value = returnValue;
885 	message.syscall = syscall;
886 
887 	// copy the syscall args
888 	if (syscall < (uint32)kSyscallCount) {
889 		if (kSyscallInfos[syscall].parameter_size > 0)
890 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
891 	}
892 
893 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message,
894 		sizeof(message), true);
895 }
896 
897 
898 /**	\brief To be called when an unhandled processor exception (error/fault)
899  *		   occurred.
900  *	\param exception The debug_why_stopped value identifying the kind of fault.
901  *	\param singal The signal corresponding to the exception.
902  *	\return \c true, if the caller shall continue normally, i.e. usually send
903  *			a deadly signal. \c false, if the debugger insists to continue the
904  *			program (e.g. because it has solved the removed the cause of the
905  *			problem).
906  */
907 bool
908 user_debug_exception_occurred(debug_exception_type exception, int signal)
909 {
910 	// First check whether there's a signal handler installed for the signal.
911 	// If so, we don't want to install a debugger for the team. We always send
912 	// the signal instead. An already installed debugger will be notified, if
913 	// it has requested notifications of signal.
914 	struct sigaction signalAction;
915 	if (sigaction(signal, NULL, &signalAction) == B_OK
916 		&& signalAction.sa_handler != SIG_DFL) {
917 		return true;
918 	}
919 
920 	// prepare the message
921 	debug_exception_occurred message;
922 	message.exception = exception;
923 	message.signal = signal;
924 
925 	status_t result = thread_hit_serious_debug_event(
926 		B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message));
927 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
928 }
929 
930 
931 bool
932 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly)
933 {
934 	// check, if a debugger is installed and is interested in signals
935 	struct thread *thread = thread_get_current_thread();
936 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
937 	if (~teamDebugFlags
938 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) {
939 		return true;
940 	}
941 
942 	// prepare the message
943 	debug_signal_received message;
944 	message.signal = signal;
945 	message.handler = *handler;
946 	message.deadly = deadly;
947 
948 	status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED,
949 		&message, sizeof(message), true);
950 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
951 }
952 
953 
954 void
955 user_debug_stop_thread()
956 {
957 	// prepare the message
958 	debug_thread_debugged message;
959 
960 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message,
961 		sizeof(message));
962 }
963 
964 
965 void
966 user_debug_team_created(team_id teamID)
967 {
968 	// check, if a debugger is installed and is interested in team creation
969 	// events
970 	struct thread *thread = thread_get_current_thread();
971 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
972 	if (~teamDebugFlags
973 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
974 		return;
975 	}
976 
977 	// prepare the message
978 	debug_team_created message;
979 	message.new_team = teamID;
980 
981 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message,
982 		sizeof(message), true);
983 }
984 
985 
986 void
987 user_debug_team_deleted(team_id teamID, port_id debuggerPort)
988 {
989 	if (debuggerPort >= 0) {
990 		TRACE(("user_debug_team_deleted(team: %ld, debugger port: %ld)\n",
991 			teamID, debuggerPort));
992 
993 		debug_team_deleted message;
994 		message.origin.thread = -1;
995 		message.origin.team = teamID;
996 		message.origin.nub_port = -1;
997 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message,
998 			sizeof(message), B_RELATIVE_TIMEOUT, 0);
999 	}
1000 }
1001 
1002 
1003 void
1004 user_debug_team_exec()
1005 {
1006 	// check, if a debugger is installed and is interested in team creation
1007 	// events
1008 	struct thread *thread = thread_get_current_thread();
1009 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1010 	if (~teamDebugFlags
1011 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
1012 		return;
1013 	}
1014 
1015 	// prepare the message
1016 	debug_team_exec message;
1017 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1018 		+ 1;
1019 
1020 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message,
1021 		sizeof(message), true);
1022 }
1023 
1024 
1025 void
1026 user_debug_update_new_thread_flags(thread_id threadID)
1027 {
1028 	// Update thread::flags of the thread.
1029 
1030 	InterruptsLocker interruptsLocker;
1031 
1032 	SpinLocker teamLocker(gTeamSpinlock);
1033 	SpinLocker threadLocker(gThreadSpinlock);
1034 
1035 	struct thread *thread = thread_get_thread_struct_locked(threadID);
1036 	if (!thread)
1037 		return;
1038 
1039 	update_thread_user_debug_flag(thread);
1040 	update_thread_breakpoints_flag(thread);
1041 	update_thread_debugger_installed_flag(thread);
1042 }
1043 
1044 
1045 void
1046 user_debug_thread_created(thread_id threadID)
1047 {
1048 	// check, if a debugger is installed and is interested in thread events
1049 	struct thread *thread = thread_get_current_thread();
1050 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1051 	if (~teamDebugFlags
1052 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1053 		return;
1054 	}
1055 
1056 	// prepare the message
1057 	debug_thread_created message;
1058 	message.new_thread = threadID;
1059 
1060 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message,
1061 		sizeof(message), true);
1062 }
1063 
1064 
1065 void
1066 user_debug_thread_deleted(team_id teamID, thread_id threadID)
1067 {
1068 	// Things are a bit complicated here, since this thread no longer belongs to
1069 	// the debugged team (but to the kernel). So we can't use debugger_write().
1070 
1071 	// get the team debug flags and debugger port
1072 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1073 
1074 	struct team *team = team_get_team_struct_locked(teamID);
1075 	if (team == NULL)
1076 		return;
1077 
1078 	SpinLocker debugInfoLocker(team->debug_info.lock);
1079 
1080 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1081 	port_id debuggerPort = team->debug_info.debugger_port;
1082 	sem_id writeLock = team->debug_info.debugger_write_lock;
1083 
1084 	debugInfoLocker.Unlock();
1085 	teamLocker.Unlock();
1086 
1087 	// check, if a debugger is installed and is interested in thread events
1088 	if (~teamDebugFlags
1089 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1090 		return;
1091 	}
1092 
1093 	// acquire the debugger write lock
1094 	status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0);
1095 	if (error != B_OK)
1096 		return;
1097 
1098 	// re-get the team debug info -- we need to check whether anything changed
1099 	teamLocker.Lock();
1100 
1101 	team = team_get_team_struct_locked(teamID);
1102 	if (team == NULL)
1103 		return;
1104 
1105 	debugInfoLocker.Lock();
1106 
1107 	teamDebugFlags = atomic_get(&team->debug_info.flags);
1108 	port_id newDebuggerPort = team->debug_info.debugger_port;
1109 
1110 	debugInfoLocker.Unlock();
1111 	teamLocker.Unlock();
1112 
1113 	// Send the message only if the debugger hasn't changed in the meantime or
1114 	// the team is about to be handed over.
1115 	if (newDebuggerPort == debuggerPort
1116 		|| (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) {
1117 		debug_thread_deleted message;
1118 		message.origin.thread = threadID;
1119 		message.origin.team = teamID;
1120 		message.origin.nub_port = -1;
1121 
1122 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED,
1123 			&message, sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1124 	}
1125 
1126 	// release the debugger write lock
1127 	release_sem(writeLock);
1128 }
1129 
1130 
1131 void
1132 user_debug_thread_exiting(struct thread* thread)
1133 {
1134 	InterruptsLocker interruptsLocker;
1135 	SpinLocker teamLocker(gTeamSpinlock);
1136 
1137 	struct team* team = thread->team;
1138 
1139 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1140 
1141 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1142 	port_id debuggerPort = team->debug_info.debugger_port;
1143 
1144 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1145 
1146 	teamLocker.Unlock();
1147 
1148 	// check, if a debugger is installed
1149 	if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0
1150 		|| debuggerPort < 0) {
1151 		return;
1152 	}
1153 
1154 	// detach the profile info and mark the thread dying
1155 	SpinLocker threadLocker(gThreadSpinlock);
1156 
1157 	thread_debug_info& threadDebugInfo = thread->debug_info;
1158 	if (threadDebugInfo.profile.samples == NULL)
1159 		return;
1160 
1161 	area_id sampleArea = threadDebugInfo.profile.sample_area;
1162 	int32 sampleCount = threadDebugInfo.profile.sample_count;
1163 	int32 droppedTicks = threadDebugInfo.profile.dropped_ticks;
1164 	int32 stackDepth = threadDebugInfo.profile.stack_depth;
1165 	bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth;
1166 	int32 imageEvent = threadDebugInfo.profile.image_event;
1167 	threadDebugInfo.profile.sample_area = -1;
1168 	threadDebugInfo.profile.samples = NULL;
1169 	threadDebugInfo.profile.buffer_full = false;
1170 
1171 	atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
1172 
1173 	threadLocker.Unlock();
1174 	interruptsLocker.Unlock();
1175 
1176 	// notify the debugger
1177 	debug_profiler_update message;
1178 	message.origin.thread = thread->id;
1179 	message.origin.team = thread->team->id;
1180 	message.origin.nub_port = -1;	// asynchronous message
1181 	message.sample_count = sampleCount;
1182 	message.dropped_ticks = droppedTicks;
1183 	message.stack_depth = stackDepth;
1184 	message.variable_stack_depth = variableStackDepth;
1185 	message.image_event = imageEvent;
1186 	message.stopped = true;
1187 	debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE,
1188 		&message, sizeof(message), false);
1189 
1190 	if (sampleArea >= 0) {
1191 		area_info areaInfo;
1192 		if (get_area_info(sampleArea, &areaInfo) == B_OK) {
1193 			unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
1194 			delete_area(sampleArea);
1195 		}
1196 	}
1197 }
1198 
1199 
1200 void
1201 user_debug_image_created(const image_info *imageInfo)
1202 {
1203 	// check, if a debugger is installed and is interested in image events
1204 	struct thread *thread = thread_get_current_thread();
1205 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1206 	if (~teamDebugFlags
1207 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1208 		return;
1209 	}
1210 
1211 	// prepare the message
1212 	debug_image_created message;
1213 	memcpy(&message.info, imageInfo, sizeof(image_info));
1214 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1215 		+ 1;
1216 
1217 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
1218 		sizeof(message), true);
1219 }
1220 
1221 
1222 void
1223 user_debug_image_deleted(const image_info *imageInfo)
1224 {
1225 	// check, if a debugger is installed and is interested in image events
1226 	struct thread *thread = thread_get_current_thread();
1227 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1228 	if (~teamDebugFlags
1229 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1230 		return;
1231 	}
1232 
1233 	// prepare the message
1234 	debug_image_deleted message;
1235 	memcpy(&message.info, imageInfo, sizeof(image_info));
1236 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1237 		+ 1;
1238 
1239 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
1240 		sizeof(message), true);
1241 }
1242 
1243 
1244 void
1245 user_debug_breakpoint_hit(bool software)
1246 {
1247 	// prepare the message
1248 	debug_breakpoint_hit message;
1249 	arch_get_debug_cpu_state(&message.cpu_state);
1250 
1251 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message,
1252 		sizeof(message));
1253 }
1254 
1255 
1256 void
1257 user_debug_watchpoint_hit()
1258 {
1259 	// prepare the message
1260 	debug_watchpoint_hit message;
1261 	arch_get_debug_cpu_state(&message.cpu_state);
1262 
1263 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message,
1264 		sizeof(message));
1265 }
1266 
1267 
1268 void
1269 user_debug_single_stepped()
1270 {
1271 	// prepare the message
1272 	debug_single_step message;
1273 	arch_get_debug_cpu_state(&message.cpu_state);
1274 
1275 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message,
1276 		sizeof(message));
1277 }
1278 
1279 
1280 static void
1281 schedule_profiling_timer(struct thread* thread, bigtime_t interval)
1282 {
1283 	struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num];
1284 	thread->debug_info.profile.installed_timer = timer;
1285 	thread->debug_info.profile.timer_end = system_time() + interval;
1286 	add_timer(timer, &profiling_event, interval,
1287 		B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
1288 }
1289 
1290 
1291 static bool
1292 profiling_do_sample(bool& flushBuffer)
1293 {
1294 	struct thread* thread = thread_get_current_thread();
1295 	thread_debug_info& debugInfo = thread->debug_info;
1296 
1297 	if (debugInfo.profile.samples == NULL)
1298 		return false;
1299 
1300 	// Check, whether the buffer is full or an image event occurred since the
1301 	// last sample was taken.
1302 	int32 maxSamples = debugInfo.profile.max_samples;
1303 	int32 sampleCount = debugInfo.profile.sample_count;
1304 	int32 stackDepth = debugInfo.profile.stack_depth;
1305 	int32 imageEvent = thread->team->debug_info.image_event;
1306 	if (debugInfo.profile.sample_count > 0) {
1307 		if (debugInfo.profile.last_image_event < imageEvent
1308 			&& debugInfo.profile.variable_stack_depth
1309 			&& sampleCount + 2 <= maxSamples) {
1310 			// an image event occurred, but we use variable stack depth and
1311 			// have enough room in the buffer to indicate an image event
1312 			addr_t* event = debugInfo.profile.samples + sampleCount;
1313 			event[0] = B_DEBUG_PROFILE_IMAGE_EVENT;
1314 			event[1] = imageEvent;
1315 			sampleCount += 2;
1316 			debugInfo.profile.sample_count = sampleCount;
1317 			debugInfo.profile.last_image_event = imageEvent;
1318 		}
1319 
1320 		if (debugInfo.profile.last_image_event < imageEvent
1321 			|| debugInfo.profile.flush_threshold - sampleCount < stackDepth) {
1322 			if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) {
1323 				flushBuffer = true;
1324 				return true;
1325 			}
1326 
1327 			// We can't flush the buffer now, since we interrupted a kernel
1328 			// function. If the buffer is not full yet, we add the samples,
1329 			// otherwise we have to drop them.
1330 			if (maxSamples - sampleCount < stackDepth) {
1331 				debugInfo.profile.dropped_ticks++;
1332 				return true;
1333 			}
1334 		}
1335 	} else {
1336 		// first sample -- set the image event
1337 		debugInfo.profile.image_event = imageEvent;
1338 		debugInfo.profile.last_image_event = imageEvent;
1339 	}
1340 
1341 	// get the samples
1342 	addr_t* returnAddresses = debugInfo.profile.samples
1343 		+ debugInfo.profile.sample_count;
1344 	if (debugInfo.profile.variable_stack_depth) {
1345 		// variable sample count per hit
1346 		*returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1,
1347 			stackDepth - 1, 1, 0, false);
1348 
1349 		debugInfo.profile.sample_count += *returnAddresses + 1;
1350 	} else {
1351 		// fixed sample count per hit
1352 		if (stackDepth > 1) {
1353 			int32 count = arch_debug_get_stack_trace(returnAddresses,
1354 				stackDepth, 1, 0, false);
1355 
1356 			for (int32 i = count; i < stackDepth; i++)
1357 				returnAddresses[i] = 0;
1358 		} else
1359 			*returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL);
1360 
1361 		debugInfo.profile.sample_count += stackDepth;
1362 	}
1363 
1364 	return true;
1365 }
1366 
1367 
1368 static void
1369 profiling_buffer_full(void*)
1370 {
1371 	struct thread* thread = thread_get_current_thread();
1372 	thread_debug_info& debugInfo = thread->debug_info;
1373 
1374 	GRAB_THREAD_LOCK();
1375 
1376 	if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
1377 		int32 sampleCount = debugInfo.profile.sample_count;
1378 		int32 droppedTicks = debugInfo.profile.dropped_ticks;
1379 		int32 stackDepth = debugInfo.profile.stack_depth;
1380 		bool variableStackDepth = debugInfo.profile.variable_stack_depth;
1381 		int32 imageEvent = debugInfo.profile.image_event;
1382 
1383 		// notify the debugger
1384 		debugInfo.profile.sample_count = 0;
1385 		debugInfo.profile.dropped_ticks = 0;
1386 
1387 		RELEASE_THREAD_LOCK();
1388 		enable_interrupts();
1389 
1390 		// prepare the message
1391 		debug_profiler_update message;
1392 		message.sample_count = sampleCount;
1393 		message.dropped_ticks = droppedTicks;
1394 		message.stack_depth = stackDepth;
1395 		message.variable_stack_depth = variableStackDepth;
1396 		message.image_event = imageEvent;
1397 		message.stopped = false;
1398 
1399 		thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message,
1400 			sizeof(message), false);
1401 
1402 		disable_interrupts();
1403 		GRAB_THREAD_LOCK();
1404 
1405 		// do the sampling and reschedule timer, if still profiling this thread
1406 		bool flushBuffer;
1407 		if (profiling_do_sample(flushBuffer)) {
1408 			debugInfo.profile.buffer_full = false;
1409 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1410 		}
1411 	}
1412 
1413 	RELEASE_THREAD_LOCK();
1414 }
1415 
1416 
1417 /*!	The thread spinlock is being held.
1418 */
1419 static int32
1420 profiling_event(timer* /*unused*/)
1421 {
1422 	struct thread* thread = thread_get_current_thread();
1423 	thread_debug_info& debugInfo = thread->debug_info;
1424 
1425 	bool flushBuffer = false;
1426 	if (profiling_do_sample(flushBuffer)) {
1427 		if (flushBuffer) {
1428 			// The sample buffer needs to be flushed; we'll have to notify the
1429 			// debugger. We can't do that right here. Instead we set a post
1430 			// interrupt callback doing that for us, and don't reschedule the
1431 			// timer yet.
1432 			thread->post_interrupt_callback = profiling_buffer_full;
1433 			debugInfo.profile.installed_timer = NULL;
1434 			debugInfo.profile.buffer_full = true;
1435 		} else
1436 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1437 	} else
1438 		debugInfo.profile.installed_timer = NULL;
1439 
1440 	return B_HANDLED_INTERRUPT;
1441 }
1442 
1443 
1444 void
1445 user_debug_thread_unscheduled(struct thread* thread)
1446 {
1447 	// if running, cancel the profiling timer
1448 	struct timer* timer = thread->debug_info.profile.installed_timer;
1449 	if (timer != NULL) {
1450 		// track remaining time
1451 		bigtime_t left = thread->debug_info.profile.timer_end - system_time();
1452 		thread->debug_info.profile.interval_left = max_c(left, 0);
1453 		thread->debug_info.profile.installed_timer = NULL;
1454 
1455 		// cancel timer
1456 		cancel_timer(timer);
1457 	}
1458 }
1459 
1460 
1461 void
1462 user_debug_thread_scheduled(struct thread* thread)
1463 {
1464 	if (thread->debug_info.profile.samples != NULL
1465 		&& !thread->debug_info.profile.buffer_full) {
1466 		// install profiling timer
1467 		schedule_profiling_timer(thread,
1468 			thread->debug_info.profile.interval_left);
1469 	}
1470 }
1471 
1472 
1473 /*!	\brief Called by the debug nub thread of a team to broadcast a message to
1474 		all threads of the team that are initialized for debugging (and
1475 		thus have a debug port).
1476 */
1477 static void
1478 broadcast_debugged_thread_message(struct thread *nubThread, int32 code,
1479 	const void *message, int32 size)
1480 {
1481 	// iterate through the threads
1482 	thread_info threadInfo;
1483 	int32 cookie = 0;
1484 	while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo)
1485 			== B_OK) {
1486 		// find the thread and get its debug port
1487 		cpu_status state = disable_interrupts();
1488 		GRAB_THREAD_LOCK();
1489 
1490 		port_id threadDebugPort = -1;
1491 		thread_id threadID = -1;
1492 		struct thread *thread
1493 			= thread_get_thread_struct_locked(threadInfo.thread);
1494 		if (thread && thread != nubThread && thread->team == nubThread->team
1495 			&& (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0
1496 			&& (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) {
1497 			threadDebugPort = thread->debug_info.debug_port;
1498 			threadID = thread->id;
1499 		}
1500 
1501 		RELEASE_THREAD_LOCK();
1502 		restore_interrupts(state);
1503 
1504 		// send the message to the thread
1505 		if (threadDebugPort >= 0) {
1506 			status_t error = kill_interruptable_write_port(threadDebugPort,
1507 				code, message, size);
1508 			if (error != B_OK) {
1509 				TRACE(("broadcast_debugged_thread_message(): Failed to send "
1510 					"message to thread %ld: %lx\n", threadID, error));
1511 			}
1512 		}
1513 	}
1514 }
1515 
1516 
1517 static void
1518 nub_thread_cleanup(struct thread *nubThread)
1519 {
1520 	TRACE(("nub_thread_cleanup(%ld): debugger port: %ld\n", nubThread->id,
1521 		nubThread->team->debug_info.debugger_port));
1522 
1523 	ConditionVariable debugChangeCondition;
1524 	prepare_debugger_change(nubThread->team, debugChangeCondition);
1525 
1526 	team_debug_info teamDebugInfo;
1527 	bool destroyDebugInfo = false;
1528 
1529 	cpu_status state = disable_interrupts();
1530 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1531 
1532 	team_debug_info &info = nubThread->team->debug_info;
1533 	if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED
1534 		&& info.nub_thread == nubThread->id) {
1535 		teamDebugInfo = info;
1536 		clear_team_debug_info(&info, false);
1537 		destroyDebugInfo = true;
1538 	}
1539 
1540 	// update the thread::flags fields
1541 	update_threads_debugger_installed_flag(nubThread->team);
1542 
1543 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1544 	restore_interrupts(state);
1545 
1546 	if (destroyDebugInfo)
1547 		teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints();
1548 
1549 	finish_debugger_change(nubThread->team);
1550 
1551 	if (destroyDebugInfo)
1552 		destroy_team_debug_info(&teamDebugInfo);
1553 
1554 	// notify all threads that the debugger is gone
1555 	broadcast_debugged_thread_message(nubThread,
1556 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1557 }
1558 
1559 
1560 /**	\brief Debug nub thread helper function that returns the debug port of
1561  *		   a thread of the same team.
1562  */
1563 static status_t
1564 debug_nub_thread_get_thread_debug_port(struct thread *nubThread,
1565 	thread_id threadID, port_id &threadDebugPort)
1566 {
1567 	status_t result = B_OK;
1568 	threadDebugPort = -1;
1569 
1570 	cpu_status state = disable_interrupts();
1571 	GRAB_THREAD_LOCK();
1572 
1573 	struct thread *thread = thread_get_thread_struct_locked(threadID);
1574 	if (thread) {
1575 		if (thread->team != nubThread->team)
1576 			result = B_BAD_VALUE;
1577 		else if (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED)
1578 			threadDebugPort = thread->debug_info.debug_port;
1579 		else
1580 			result = B_BAD_THREAD_STATE;
1581 	} else
1582 		result = B_BAD_THREAD_ID;
1583 
1584 	RELEASE_THREAD_LOCK();
1585 	restore_interrupts(state);
1586 
1587 	if (result == B_OK && threadDebugPort < 0)
1588 		result = B_ERROR;
1589 
1590 	return result;
1591 }
1592 
1593 
1594 static status_t
1595 debug_nub_thread(void *)
1596 {
1597 	struct thread *nubThread = thread_get_current_thread();
1598 
1599 	// check, if we're still the current nub thread and get our port
1600 	cpu_status state = disable_interrupts();
1601 
1602 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1603 
1604 	if (nubThread->team->debug_info.nub_thread != nubThread->id) {
1605 		RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1606 		restore_interrupts(state);
1607 		return 0;
1608 	}
1609 
1610 	port_id port = nubThread->team->debug_info.nub_port;
1611 	sem_id writeLock = nubThread->team->debug_info.debugger_write_lock;
1612 	BreakpointManager* breakpointManager
1613 		= nubThread->team->debug_info.breakpoint_manager;
1614 
1615 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1616 	restore_interrupts(state);
1617 
1618 	TRACE(("debug_nub_thread() thread: %ld, team %ld, nub port: %ld\n",
1619 		nubThread->id, nubThread->team->id, port));
1620 
1621 	// notify all threads that a debugger has been installed
1622 	broadcast_debugged_thread_message(nubThread,
1623 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1624 
1625 	// command processing loop
1626 	while (true) {
1627 		int32 command;
1628 		debug_nub_message_data message;
1629 		ssize_t messageSize = read_port_etc(port, &command, &message,
1630 			sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1631 
1632 		if (messageSize < 0) {
1633 			// The port is no longer valid or we were interrupted by a kill
1634 			// signal: If we are still listed in the team's debug info as nub
1635 			// thread, we need to update that.
1636 			nub_thread_cleanup(nubThread);
1637 
1638 			TRACE(("nub thread %ld: terminating: %lx\n", nubThread->id,
1639 				messageSize));
1640 
1641 			return messageSize;
1642 		}
1643 
1644 		bool sendReply = false;
1645 		union {
1646 			debug_nub_read_memory_reply			read_memory;
1647 			debug_nub_write_memory_reply		write_memory;
1648 			debug_nub_get_cpu_state_reply		get_cpu_state;
1649 			debug_nub_set_breakpoint_reply		set_breakpoint;
1650 			debug_nub_set_watchpoint_reply		set_watchpoint;
1651 			debug_nub_get_signal_masks_reply	get_signal_masks;
1652 			debug_nub_get_signal_handler_reply	get_signal_handler;
1653 			debug_nub_start_profiler_reply		start_profiler;
1654 			debug_profiler_update				profiler_update;
1655 		} reply;
1656 		int32 replySize = 0;
1657 		port_id replyPort = -1;
1658 
1659 		// process the command
1660 		switch (command) {
1661 			case B_DEBUG_MESSAGE_READ_MEMORY:
1662 			{
1663 				// get the parameters
1664 				replyPort = message.read_memory.reply_port;
1665 				void *address = message.read_memory.address;
1666 				int32 size = message.read_memory.size;
1667 				status_t result = B_OK;
1668 
1669 				// check the parameters
1670 				if (!BreakpointManager::CanAccessAddress(address, false))
1671 					result = B_BAD_ADDRESS;
1672 				else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE)
1673 					result = B_BAD_VALUE;
1674 
1675 				// read the memory
1676 				size_t bytesRead = 0;
1677 				if (result == B_OK) {
1678 					result = breakpointManager->ReadMemory(address,
1679 						reply.read_memory.data, size, bytesRead);
1680 				}
1681 				reply.read_memory.error = result;
1682 
1683 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_READ_MEMORY: "
1684 					"reply port: %ld, address: %p, size: %ld, result: %lx, "
1685 					"read: %ld\n", nubThread->id, replyPort, address, size,
1686 					result, bytesRead));
1687 
1688 				// send only as much data as necessary
1689 				reply.read_memory.size = bytesRead;
1690 				replySize = reply.read_memory.data + bytesRead - (char*)&reply;
1691 				sendReply = true;
1692 				break;
1693 			}
1694 
1695 			case B_DEBUG_MESSAGE_WRITE_MEMORY:
1696 			{
1697 				// get the parameters
1698 				replyPort = message.write_memory.reply_port;
1699 				void *address = message.write_memory.address;
1700 				int32 size = message.write_memory.size;
1701 				const char *data = message.write_memory.data;
1702 				int32 realSize = (char*)&message + messageSize - data;
1703 				status_t result = B_OK;
1704 
1705 				// check the parameters
1706 				if (!BreakpointManager::CanAccessAddress(address, true))
1707 					result = B_BAD_ADDRESS;
1708 				else if (size <= 0 || size > realSize)
1709 					result = B_BAD_VALUE;
1710 
1711 				// write the memory
1712 				size_t bytesWritten = 0;
1713 				if (result == B_OK) {
1714 					result = breakpointManager->WriteMemory(address, data, size,
1715 						bytesWritten);
1716 				}
1717 				reply.write_memory.error = result;
1718 
1719 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_WRITE_MEMORY: "
1720 					"reply port: %ld, address: %p, size: %ld, result: %lx, "
1721 					"written: %ld\n", nubThread->id, replyPort, address, size,
1722 					result, bytesWritten));
1723 
1724 				reply.write_memory.size = bytesWritten;
1725 				sendReply = true;
1726 				replySize = sizeof(debug_nub_write_memory_reply);
1727 				break;
1728 			}
1729 
1730 			case B_DEBUG_MESSAGE_SET_TEAM_FLAGS:
1731 			{
1732 				// get the parameters
1733 				int32 flags = message.set_team_flags.flags
1734 					& B_TEAM_DEBUG_USER_FLAG_MASK;
1735 
1736 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_TEAM_FLAGS: "
1737 					"flags: %lx\n", nubThread->id, flags));
1738 
1739 				struct team *team = thread_get_current_thread()->team;
1740 
1741 				// set the flags
1742 				cpu_status state = disable_interrupts();
1743 				GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1744 
1745 				flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK;
1746 				atomic_set(&team->debug_info.flags, flags);
1747 
1748 				RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1749 				restore_interrupts(state);
1750 
1751 				break;
1752 			}
1753 
1754 			case B_DEBUG_MESSAGE_SET_THREAD_FLAGS:
1755 			{
1756 				// get the parameters
1757 				thread_id threadID = message.set_thread_flags.thread;
1758 				int32 flags = message.set_thread_flags.flags
1759 					& B_THREAD_DEBUG_USER_FLAG_MASK;
1760 
1761 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_THREAD_FLAGS: "
1762 					"thread: %ld, flags: %lx\n", nubThread->id, threadID,
1763 					flags));
1764 
1765 				// set the flags
1766 				cpu_status state = disable_interrupts();
1767 				GRAB_THREAD_LOCK();
1768 
1769 				struct thread *thread
1770 					= thread_get_thread_struct_locked(threadID);
1771 				if (thread
1772 					&& thread->team == thread_get_current_thread()->team) {
1773 					flags |= thread->debug_info.flags
1774 						& B_THREAD_DEBUG_KERNEL_FLAG_MASK;
1775 					atomic_set(&thread->debug_info.flags, flags);
1776 				}
1777 
1778 				RELEASE_THREAD_LOCK();
1779 				restore_interrupts(state);
1780 
1781 				break;
1782 			}
1783 
1784 			case B_DEBUG_MESSAGE_CONTINUE_THREAD:
1785 			{
1786 				// get the parameters
1787 				thread_id threadID;
1788 				uint32 handleEvent;
1789 				bool singleStep;
1790 
1791 				threadID = message.continue_thread.thread;
1792 				handleEvent = message.continue_thread.handle_event;
1793 				singleStep = message.continue_thread.single_step;
1794 
1795 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CONTINUE_THREAD: "
1796 					"thread: %ld, handle event: %lu, single step: %d\n",
1797 					nubThread->id, threadID, handleEvent, singleStep));
1798 
1799 				// find the thread and get its debug port
1800 				port_id threadDebugPort = -1;
1801 				status_t result = debug_nub_thread_get_thread_debug_port(
1802 					nubThread, threadID, threadDebugPort);
1803 
1804 				// send a message to the debugged thread
1805 				if (result == B_OK) {
1806 					debugged_thread_continue commandMessage;
1807 					commandMessage.handle_event = handleEvent;
1808 					commandMessage.single_step = singleStep;
1809 
1810 					result = write_port(threadDebugPort,
1811 						B_DEBUGGED_THREAD_MESSAGE_CONTINUE,
1812 						&commandMessage, sizeof(commandMessage));
1813 				}
1814 
1815 				break;
1816 			}
1817 
1818 			case B_DEBUG_MESSAGE_SET_CPU_STATE:
1819 			{
1820 				// get the parameters
1821 				thread_id threadID = message.set_cpu_state.thread;
1822 				const debug_cpu_state &cpuState
1823 					= message.set_cpu_state.cpu_state;
1824 
1825 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_CPU_STATE: "
1826 					"thread: %ld\n", nubThread->id, threadID));
1827 
1828 				// find the thread and get its debug port
1829 				port_id threadDebugPort = -1;
1830 				status_t result = debug_nub_thread_get_thread_debug_port(
1831 					nubThread, threadID, threadDebugPort);
1832 
1833 				// send a message to the debugged thread
1834 				if (result == B_OK) {
1835 					debugged_thread_set_cpu_state commandMessage;
1836 					memcpy(&commandMessage.cpu_state, &cpuState,
1837 						sizeof(debug_cpu_state));
1838 					write_port(threadDebugPort,
1839 						B_DEBUGGED_THREAD_SET_CPU_STATE,
1840 						&commandMessage, sizeof(commandMessage));
1841 				}
1842 
1843 				break;
1844 			}
1845 
1846 			case B_DEBUG_MESSAGE_GET_CPU_STATE:
1847 			{
1848 				// get the parameters
1849 				thread_id threadID = message.get_cpu_state.thread;
1850 				replyPort = message.get_cpu_state.reply_port;
1851 
1852 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_CPU_STATE: "
1853 					"thread: %ld\n", nubThread->id, threadID));
1854 
1855 				// find the thread and get its debug port
1856 				port_id threadDebugPort = -1;
1857 				status_t result = debug_nub_thread_get_thread_debug_port(
1858 					nubThread, threadID, threadDebugPort);
1859 
1860 				// send a message to the debugged thread
1861 				if (threadDebugPort >= 0) {
1862 					debugged_thread_get_cpu_state commandMessage;
1863 					commandMessage.reply_port = replyPort;
1864 					result = write_port(threadDebugPort,
1865 						B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage,
1866 						sizeof(commandMessage));
1867 				}
1868 
1869 				// send a reply to the debugger in case of error
1870 				if (result != B_OK) {
1871 					reply.get_cpu_state.error = result;
1872 					sendReply = true;
1873 					replySize = sizeof(reply.get_cpu_state);
1874 				}
1875 
1876 				break;
1877 			}
1878 
1879 			case B_DEBUG_MESSAGE_SET_BREAKPOINT:
1880 			{
1881 				// get the parameters
1882 				replyPort = message.set_breakpoint.reply_port;
1883 				void *address = message.set_breakpoint.address;
1884 
1885 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_BREAKPOINT: "
1886 					"address: %p\n", nubThread->id, address));
1887 
1888 				// check the address
1889 				status_t result = B_OK;
1890 				if (address == NULL
1891 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1892 					result = B_BAD_ADDRESS;
1893 				}
1894 
1895 				// set the breakpoint
1896 				if (result == B_OK)
1897 					result = breakpointManager->InstallBreakpoint(address);
1898 
1899 				if (result == B_OK)
1900 					update_threads_breakpoints_flag();
1901 
1902 				// prepare the reply
1903 				reply.set_breakpoint.error = result;
1904 				replySize = sizeof(reply.set_breakpoint);
1905 				sendReply = true;
1906 
1907 				break;
1908 			}
1909 
1910 			case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT:
1911 			{
1912 				// get the parameters
1913 				void *address = message.clear_breakpoint.address;
1914 
1915 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: "
1916 					"address: %p\n", nubThread->id, address));
1917 
1918 				// check the address
1919 				status_t result = B_OK;
1920 				if (address == NULL
1921 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1922 					result = B_BAD_ADDRESS;
1923 				}
1924 
1925 				// clear the breakpoint
1926 				if (result == B_OK)
1927 					result = breakpointManager->UninstallBreakpoint(address);
1928 
1929 				if (result == B_OK)
1930 					update_threads_breakpoints_flag();
1931 
1932 				break;
1933 			}
1934 
1935 			case B_DEBUG_MESSAGE_SET_WATCHPOINT:
1936 			{
1937 				// get the parameters
1938 				replyPort = message.set_watchpoint.reply_port;
1939 				void *address = message.set_watchpoint.address;
1940 				uint32 type = message.set_watchpoint.type;
1941 				int32 length = message.set_watchpoint.length;
1942 
1943 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_WATCHPOINT: "
1944 					"address: %p, type: %lu, length: %ld\n", nubThread->id,
1945 					address, type, length));
1946 
1947 				// check the address and size
1948 				status_t result = B_OK;
1949 				if (address == NULL
1950 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1951 					result = B_BAD_ADDRESS;
1952 				}
1953 				if (length < 0)
1954 					result = B_BAD_VALUE;
1955 
1956 				// set the watchpoint
1957 				if (result == B_OK) {
1958 					result = breakpointManager->InstallWatchpoint(address, type,
1959 						length);
1960 				}
1961 
1962 				if (result == B_OK)
1963 					update_threads_breakpoints_flag();
1964 
1965 				// prepare the reply
1966 				reply.set_watchpoint.error = result;
1967 				replySize = sizeof(reply.set_watchpoint);
1968 				sendReply = true;
1969 
1970 				break;
1971 			}
1972 
1973 			case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT:
1974 			{
1975 				// get the parameters
1976 				void *address = message.clear_watchpoint.address;
1977 
1978 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: "
1979 					"address: %p\n", nubThread->id, address));
1980 
1981 				// check the address
1982 				status_t result = B_OK;
1983 				if (address == NULL
1984 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1985 					result = B_BAD_ADDRESS;
1986 				}
1987 
1988 				// clear the watchpoint
1989 				if (result == B_OK)
1990 					result = breakpointManager->UninstallWatchpoint(address);
1991 
1992 				if (result == B_OK)
1993 					update_threads_breakpoints_flag();
1994 
1995 				break;
1996 			}
1997 
1998 			case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS:
1999 			{
2000 				// get the parameters
2001 				thread_id threadID = message.set_signal_masks.thread;
2002 				uint64 ignore = message.set_signal_masks.ignore_mask;
2003 				uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask;
2004 				uint32 ignoreOp = message.set_signal_masks.ignore_op;
2005 				uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op;
2006 
2007 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: "
2008 					"thread: %ld, ignore: %llx (op: %lu), ignore once: %llx "
2009 					"(op: %lu)\n", nubThread->id, threadID, ignore,
2010 						ignoreOp, ignoreOnce, ignoreOnceOp));
2011 
2012 				// set the masks
2013 				cpu_status state = disable_interrupts();
2014 				GRAB_THREAD_LOCK();
2015 
2016 				struct thread *thread
2017 					= thread_get_thread_struct_locked(threadID);
2018 				if (thread
2019 					&& thread->team == thread_get_current_thread()->team) {
2020 					thread_debug_info &threadDebugInfo = thread->debug_info;
2021 					// set ignore mask
2022 					switch (ignoreOp) {
2023 						case B_DEBUG_SIGNAL_MASK_AND:
2024 							threadDebugInfo.ignore_signals &= ignore;
2025 							break;
2026 						case B_DEBUG_SIGNAL_MASK_OR:
2027 							threadDebugInfo.ignore_signals |= ignore;
2028 							break;
2029 						case B_DEBUG_SIGNAL_MASK_SET:
2030 							threadDebugInfo.ignore_signals = ignore;
2031 							break;
2032 					}
2033 
2034 					// set ignore once mask
2035 					switch (ignoreOnceOp) {
2036 						case B_DEBUG_SIGNAL_MASK_AND:
2037 							threadDebugInfo.ignore_signals_once &= ignoreOnce;
2038 							break;
2039 						case B_DEBUG_SIGNAL_MASK_OR:
2040 							threadDebugInfo.ignore_signals_once |= ignoreOnce;
2041 							break;
2042 						case B_DEBUG_SIGNAL_MASK_SET:
2043 							threadDebugInfo.ignore_signals_once = ignoreOnce;
2044 							break;
2045 					}
2046 				}
2047 
2048 				RELEASE_THREAD_LOCK();
2049 				restore_interrupts(state);
2050 
2051 				break;
2052 			}
2053 
2054 			case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS:
2055 			{
2056 				// get the parameters
2057 				replyPort = message.get_signal_masks.reply_port;
2058 				thread_id threadID = message.get_signal_masks.thread;
2059 				status_t result = B_OK;
2060 
2061 				// get the masks
2062 				uint64 ignore = 0;
2063 				uint64 ignoreOnce = 0;
2064 
2065 				cpu_status state = disable_interrupts();
2066 				GRAB_THREAD_LOCK();
2067 
2068 				struct thread *thread
2069 					= thread_get_thread_struct_locked(threadID);
2070 				if (thread) {
2071 					ignore = thread->debug_info.ignore_signals;
2072 					ignoreOnce = thread->debug_info.ignore_signals_once;
2073 				} else
2074 					result = B_BAD_THREAD_ID;
2075 
2076 				RELEASE_THREAD_LOCK();
2077 				restore_interrupts(state);
2078 
2079 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: "
2080 					"reply port: %ld, thread: %ld, ignore: %llx, "
2081 					"ignore once: %llx, result: %lx\n", nubThread->id,
2082 					replyPort, threadID, ignore, ignoreOnce, result));
2083 
2084 				// prepare the message
2085 				reply.get_signal_masks.error = result;
2086 				reply.get_signal_masks.ignore_mask = ignore;
2087 				reply.get_signal_masks.ignore_once_mask = ignoreOnce;
2088 				replySize = sizeof(reply.get_signal_masks);
2089 				sendReply = true;
2090 				break;
2091 			}
2092 
2093 			case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER:
2094 			{
2095 				// get the parameters
2096 				thread_id threadID = message.set_signal_handler.thread;
2097 				int signal = message.set_signal_handler.signal;
2098 				struct sigaction &handler = message.set_signal_handler.handler;
2099 
2100 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: "
2101 					"thread: %ld, signal: %d, handler: %p\n", nubThread->id,
2102 					threadID, signal, handler.sa_handler));
2103 
2104 				// check, if the thread exists and is ours
2105 				cpu_status state = disable_interrupts();
2106 				GRAB_THREAD_LOCK();
2107 
2108 				struct thread *thread
2109 					= thread_get_thread_struct_locked(threadID);
2110 				if (thread
2111 					&& thread->team != thread_get_current_thread()->team) {
2112 					thread = NULL;
2113 				}
2114 
2115 				RELEASE_THREAD_LOCK();
2116 				restore_interrupts(state);
2117 
2118 				// set the handler
2119 				if (thread)
2120 					sigaction_etc(threadID, signal, &handler, NULL);
2121 
2122 				break;
2123 			}
2124 
2125 			case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER:
2126 			{
2127 				// get the parameters
2128 				replyPort = message.get_signal_handler.reply_port;
2129 				thread_id threadID = message.get_signal_handler.thread;
2130 				int signal = message.get_signal_handler.signal;
2131 				status_t result = B_OK;
2132 
2133 				// check, if the thread exists and is ours
2134 				cpu_status state = disable_interrupts();
2135 				GRAB_THREAD_LOCK();
2136 
2137 				struct thread *thread
2138 					= thread_get_thread_struct_locked(threadID);
2139 				if (thread) {
2140 					if (thread->team != thread_get_current_thread()->team)
2141 						result = B_BAD_VALUE;
2142 				} else
2143 					result = B_BAD_THREAD_ID;
2144 
2145 				RELEASE_THREAD_LOCK();
2146 				restore_interrupts(state);
2147 
2148 				// get the handler
2149 				if (result == B_OK) {
2150 					result = sigaction_etc(threadID, signal, NULL,
2151 						&reply.get_signal_handler.handler);
2152 				}
2153 
2154 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: "
2155 					"reply port: %ld, thread: %ld, signal: %d, "
2156 					"handler: %p\n", nubThread->id, replyPort,
2157 					threadID, signal,
2158 					reply.get_signal_handler.handler.sa_handler));
2159 
2160 				// prepare the message
2161 				reply.get_signal_handler.error = result;
2162 				replySize = sizeof(reply.get_signal_handler);
2163 				sendReply = true;
2164 				break;
2165 			}
2166 
2167 			case B_DEBUG_MESSAGE_PREPARE_HANDOVER:
2168 			{
2169 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_PREPARE_HANDOVER\n",
2170 					nubThread->id));
2171 
2172 				struct team *team = nubThread->team;
2173 
2174 				// Acquire the debugger write lock. As soon as we have it and
2175 				// have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread
2176 				// will write anything to the debugger port anymore.
2177 				status_t result = acquire_sem_etc(writeLock, 1,
2178 					B_KILL_CAN_INTERRUPT, 0);
2179 				if (result == B_OK) {
2180 					// set the respective team debug flag
2181 					cpu_status state = disable_interrupts();
2182 					GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2183 
2184 					atomic_or(&team->debug_info.flags,
2185 						B_TEAM_DEBUG_DEBUGGER_HANDOVER);
2186 					BreakpointManager* breakpointManager
2187 						= team->debug_info.breakpoint_manager;
2188 
2189 					RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2190 					restore_interrupts(state);
2191 
2192 					// remove all installed breakpoints
2193 					breakpointManager->RemoveAllBreakpoints();
2194 
2195 					release_sem(writeLock);
2196 				} else {
2197 					// We probably got a SIGKILL. If so, we will terminate when
2198 					// reading the next message fails.
2199 				}
2200 
2201 				break;
2202 			}
2203 
2204 			case B_DEBUG_MESSAGE_HANDED_OVER:
2205 			{
2206 				// notify all threads that the debugger has changed
2207 				broadcast_debugged_thread_message(nubThread,
2208 					B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
2209 
2210 				break;
2211 			}
2212 
2213 			case B_DEBUG_START_PROFILER:
2214 			{
2215 				// get the parameters
2216 				thread_id threadID = message.start_profiler.thread;
2217 				replyPort = message.start_profiler.reply_port;
2218 				area_id sampleArea = message.start_profiler.sample_area;
2219 				int32 stackDepth = message.start_profiler.stack_depth;
2220 				bool variableStackDepth
2221 					= message.start_profiler.variable_stack_depth;
2222 				bigtime_t interval = max_c(message.start_profiler.interval,
2223 					B_DEBUG_MIN_PROFILE_INTERVAL);
2224 				status_t result = B_OK;
2225 
2226 				TRACE(("nub thread %ld: B_DEBUG_START_PROFILER: "
2227 					"thread: %ld, sample area: %ld\n", nubThread->id, threadID,
2228 					sampleArea));
2229 
2230 				if (stackDepth < 1)
2231 					stackDepth = 1;
2232 				else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH)
2233 					stackDepth = B_DEBUG_STACK_TRACE_DEPTH;
2234 
2235 				// provision for an extra entry per hit (for the number of
2236 				// samples), if variable stack depth
2237 				if (variableStackDepth)
2238 					stackDepth++;
2239 
2240 				// clone the sample area
2241 				area_info areaInfo;
2242 				if (result == B_OK)
2243 					result = get_area_info(sampleArea, &areaInfo);
2244 
2245 				area_id clonedSampleArea = -1;
2246 				void* samples = NULL;
2247 				if (result == B_OK) {
2248 					clonedSampleArea = clone_area("profiling samples", &samples,
2249 						B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
2250 						sampleArea);
2251 					if (clonedSampleArea >= 0) {
2252 						// we need the memory locked
2253 						result = lock_memory(samples, areaInfo.size,
2254 							B_READ_DEVICE);
2255 						if (result != B_OK) {
2256 							delete_area(clonedSampleArea);
2257 							clonedSampleArea = -1;
2258 						}
2259 					} else
2260 						result = clonedSampleArea;
2261 				}
2262 
2263 				// get the thread and set the profile info
2264 				int32 imageEvent = nubThread->team->debug_info.image_event;
2265 				if (result == B_OK) {
2266 					cpu_status state = disable_interrupts();
2267 					GRAB_THREAD_LOCK();
2268 
2269 					struct thread *thread
2270 						= thread_get_thread_struct_locked(threadID);
2271 					if (thread && thread->team == nubThread->team) {
2272 						thread_debug_info &threadDebugInfo = thread->debug_info;
2273 						if (threadDebugInfo.profile.samples == NULL) {
2274 							threadDebugInfo.profile.interval = interval;
2275 							threadDebugInfo.profile.sample_area
2276 								= clonedSampleArea;
2277 							threadDebugInfo.profile.samples = (addr_t*)samples;
2278 							threadDebugInfo.profile.max_samples
2279 								= areaInfo.size / sizeof(addr_t);
2280 							threadDebugInfo.profile.flush_threshold
2281 								= threadDebugInfo.profile.max_samples
2282 									* B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD
2283 									/ 100;
2284 							threadDebugInfo.profile.sample_count = 0;
2285 							threadDebugInfo.profile.dropped_ticks = 0;
2286 							threadDebugInfo.profile.stack_depth = stackDepth;
2287 							threadDebugInfo.profile.variable_stack_depth
2288 								= variableStackDepth;
2289 							threadDebugInfo.profile.buffer_full = false;
2290 							threadDebugInfo.profile.interval_left = interval;
2291 							threadDebugInfo.profile.installed_timer = NULL;
2292 							threadDebugInfo.profile.image_event = imageEvent;
2293 							threadDebugInfo.profile.last_image_event
2294 								= imageEvent;
2295 						} else
2296 							result = B_BAD_VALUE;
2297 					} else
2298 						result = B_BAD_THREAD_ID;
2299 
2300 					RELEASE_THREAD_LOCK();
2301 					restore_interrupts(state);
2302 				}
2303 
2304 				// on error unlock and delete the sample area
2305 				if (result != B_OK) {
2306 					if (clonedSampleArea >= 0) {
2307 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2308 						delete_area(clonedSampleArea);
2309 					}
2310 				}
2311 
2312 				// send a reply to the debugger
2313 				reply.start_profiler.error = result;
2314 				reply.start_profiler.interval = interval;
2315 				reply.start_profiler.image_event = imageEvent;
2316 				sendReply = true;
2317 				replySize = sizeof(reply.start_profiler);
2318 
2319 				break;
2320 			}
2321 
2322 			case B_DEBUG_STOP_PROFILER:
2323 			{
2324 				// get the parameters
2325 				thread_id threadID = message.stop_profiler.thread;
2326 				replyPort = message.stop_profiler.reply_port;
2327 				status_t result = B_OK;
2328 
2329 				TRACE(("nub thread %ld: B_DEBUG_STOP_PROFILER: "
2330 					"thread: %ld\n", nubThread->id, threadID));
2331 
2332 				area_id sampleArea = -1;
2333 				addr_t* samples = NULL;
2334 				int32 sampleCount = 0;
2335 				int32 stackDepth = 0;
2336 				bool variableStackDepth = false;
2337 				int32 imageEvent = 0;
2338 				int32 droppedTicks = 0;
2339 
2340 				// get the thread and detach the profile info
2341 				cpu_status state = disable_interrupts();
2342 				GRAB_THREAD_LOCK();
2343 
2344 				struct thread *thread
2345 					= thread_get_thread_struct_locked(threadID);
2346 				if (thread && thread->team == nubThread->team) {
2347 					thread_debug_info &threadDebugInfo = thread->debug_info;
2348 					if (threadDebugInfo.profile.samples != NULL) {
2349 						sampleArea = threadDebugInfo.profile.sample_area;
2350 						samples = threadDebugInfo.profile.samples;
2351 						sampleCount = threadDebugInfo.profile.sample_count;
2352 						droppedTicks = threadDebugInfo.profile.dropped_ticks;
2353 						stackDepth = threadDebugInfo.profile.stack_depth;
2354 						variableStackDepth
2355 							= threadDebugInfo.profile.variable_stack_depth;
2356 						imageEvent = threadDebugInfo.profile.image_event;
2357 						threadDebugInfo.profile.sample_area = -1;
2358 						threadDebugInfo.profile.samples = NULL;
2359 						threadDebugInfo.profile.buffer_full = false;
2360 						threadDebugInfo.profile.dropped_ticks = 0;
2361 					} else
2362 						result = B_BAD_VALUE;
2363 				} else
2364 					result = B_BAD_THREAD_ID;
2365 
2366 				RELEASE_THREAD_LOCK();
2367 				restore_interrupts(state);
2368 
2369 				// prepare the reply
2370 				if (result == B_OK) {
2371 					reply.profiler_update.origin.thread = threadID;
2372 					reply.profiler_update.image_event = imageEvent;
2373 					reply.profiler_update.stack_depth = stackDepth;
2374 					reply.profiler_update.variable_stack_depth
2375 						= variableStackDepth;
2376 					reply.profiler_update.sample_count = sampleCount;
2377 					reply.profiler_update.dropped_ticks = droppedTicks;
2378 					reply.profiler_update.stopped = true;
2379 				} else
2380 					reply.profiler_update.origin.thread = result;
2381 
2382 				replySize = sizeof(debug_profiler_update);
2383 				sendReply = true;
2384 
2385 				if (sampleArea >= 0) {
2386 					area_info areaInfo;
2387 					if (get_area_info(sampleArea, &areaInfo) == B_OK) {
2388 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2389 						delete_area(sampleArea);
2390 					}
2391 				}
2392 			}
2393 		}
2394 
2395 		// send the reply, if necessary
2396 		if (sendReply) {
2397 			status_t error = kill_interruptable_write_port(replyPort, command,
2398 				&reply, replySize);
2399 
2400 			if (error != B_OK) {
2401 				// The debugger port is either not longer existing or we got
2402 				// interrupted by a kill signal. In either case we terminate.
2403 				TRACE(("nub thread %ld: failed to send reply to port %ld: %s\n",
2404 					nubThread->id, replyPort, strerror(error)));
2405 
2406 				nub_thread_cleanup(nubThread);
2407 				return error;
2408 			}
2409 		}
2410 	}
2411 }
2412 
2413 
2414 /**	\brief Helper function for install_team_debugger(), that sets up the team
2415 		   and thread debug infos.
2416 
2417 	Interrupts must be disabled and the team debug info lock of the team to be
2418 	debugged must be held. The function will release the lock, but leave
2419 	interrupts disabled.
2420 
2421 	The function also clears the arch specific team and thread debug infos
2422 	(including among other things formerly set break/watchpoints).
2423  */
2424 static void
2425 install_team_debugger_init_debug_infos(struct team *team, team_id debuggerTeam,
2426 	port_id debuggerPort, port_id nubPort, thread_id nubThread,
2427 	sem_id debuggerPortWriteLock, thread_id causingThread)
2428 {
2429 	atomic_set(&team->debug_info.flags,
2430 		B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED);
2431 	team->debug_info.nub_port = nubPort;
2432 	team->debug_info.nub_thread = nubThread;
2433 	team->debug_info.debugger_team = debuggerTeam;
2434 	team->debug_info.debugger_port = debuggerPort;
2435 	team->debug_info.debugger_write_lock = debuggerPortWriteLock;
2436 	team->debug_info.causing_thread = causingThread;
2437 
2438 	arch_clear_team_debug_info(&team->debug_info.arch_info);
2439 
2440 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2441 
2442 	// set the user debug flags and signal masks of all threads to the default
2443 	GRAB_THREAD_LOCK();
2444 
2445 	for (struct thread *thread = team->thread_list;
2446 		 thread;
2447 		 thread = thread->team_next) {
2448 		if (thread->id == nubThread) {
2449 			atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD);
2450 		} else {
2451 			int32 flags = thread->debug_info.flags
2452 				& ~B_THREAD_DEBUG_USER_FLAG_MASK;
2453 			atomic_set(&thread->debug_info.flags,
2454 				flags | B_THREAD_DEBUG_DEFAULT_FLAGS);
2455 			thread->debug_info.ignore_signals = 0;
2456 			thread->debug_info.ignore_signals_once = 0;
2457 
2458 			arch_clear_thread_debug_info(&thread->debug_info.arch_info);
2459 		}
2460 	}
2461 
2462 	RELEASE_THREAD_LOCK();
2463 
2464 	// update the thread::flags fields
2465 	update_threads_debugger_installed_flag(team);
2466 }
2467 
2468 
2469 static port_id
2470 install_team_debugger(team_id teamID, port_id debuggerPort,
2471 	thread_id causingThread, bool useDefault, bool dontReplace)
2472 {
2473 	TRACE(("install_team_debugger(team: %ld, port: %ld, default: %d, "
2474 		"dontReplace: %d)\n", teamID, debuggerPort, useDefault, dontReplace));
2475 
2476 	if (useDefault)
2477 		debuggerPort = atomic_get(&sDefaultDebuggerPort);
2478 
2479 	// get the debugger team
2480 	port_info debuggerPortInfo;
2481 	status_t error = get_port_info(debuggerPort, &debuggerPortInfo);
2482 	if (error != B_OK) {
2483 		TRACE(("install_team_debugger(): Failed to get debugger port info: "
2484 			"%lx\n", error));
2485 		return error;
2486 	}
2487 	team_id debuggerTeam = debuggerPortInfo.team;
2488 
2489 	// Check the debugger team: It must neither be the kernel team nor the
2490 	// debugged team.
2491 	if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) {
2492 		TRACE(("install_team_debugger(): Can't debug kernel or debugger team. "
2493 			"debugger: %ld, debugged: %ld\n", debuggerTeam, teamID));
2494 		return B_NOT_ALLOWED;
2495 	}
2496 
2497 	// get the team
2498 	struct team* team;
2499 	ConditionVariable debugChangeCondition;
2500 	error = prepare_debugger_change(teamID, debugChangeCondition, team);
2501 	if (error != B_OK)
2502 		return error;
2503 
2504 	// get the real team ID
2505 	teamID = team->id;
2506 
2507 	// check, if a debugger is already installed
2508 
2509 	bool done = false;
2510 	port_id result = B_ERROR;
2511 	bool handOver = false;
2512 	bool releaseDebugInfoLock = true;
2513 	port_id oldDebuggerPort = -1;
2514 	port_id nubPort = -1;
2515 
2516 	cpu_status state = disable_interrupts();
2517 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2518 
2519 	int32 teamDebugFlags = team->debug_info.flags;
2520 
2521 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2522 		// There's already a debugger installed.
2523 		if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) {
2524 			if (dontReplace) {
2525 				// We're fine with already having a debugger.
2526 				error = B_OK;
2527 				done = true;
2528 				result = team->debug_info.nub_port;
2529 			} else {
2530 				// a handover to another debugger is requested
2531 				// Set the handing-over flag -- we'll clear both flags after
2532 				// having sent the handed-over message to the new debugger.
2533 				atomic_or(&team->debug_info.flags,
2534 					B_TEAM_DEBUG_DEBUGGER_HANDING_OVER);
2535 
2536 				oldDebuggerPort = team->debug_info.debugger_port;
2537 				result = nubPort = team->debug_info.nub_port;
2538 				if (causingThread < 0)
2539 					causingThread = team->debug_info.causing_thread;
2540 
2541 				// set the new debugger
2542 				install_team_debugger_init_debug_infos(team, debuggerTeam,
2543 					debuggerPort, nubPort, team->debug_info.nub_thread,
2544 					team->debug_info.debugger_write_lock, causingThread);
2545 
2546 				releaseDebugInfoLock = false;
2547 				handOver = true;
2548 				done = true;
2549 
2550 				// finally set the new port owner
2551 				if (set_port_owner(nubPort, debuggerTeam) != B_OK) {
2552 					// The old debugger must just have died. Just proceed as
2553 					// if there was no debugger installed. We may still be too
2554 					// early, in which case we'll fail, but this race condition
2555 					// should be unbelievably rare and relatively harmless.
2556 					handOver = false;
2557 					done = false;
2558 				}
2559 			}
2560 		} else {
2561 			// there's already a debugger installed
2562 			error = (dontReplace ? B_OK : B_BAD_VALUE);
2563 			done = true;
2564 			result = team->debug_info.nub_port;
2565 		}
2566 	} else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0
2567 		&& useDefault) {
2568 		// No debugger yet, disable_debugger() had been invoked, and we
2569 		// would install the default debugger. Just fail.
2570 		error = B_BAD_VALUE;
2571 	}
2572 
2573 	// in case of a handover the lock has already been released
2574 	if (releaseDebugInfoLock)
2575 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2576 
2577 	restore_interrupts(state);
2578 
2579 	if (handOver) {
2580 		// prepare the handed-over message
2581 		debug_handed_over notification;
2582 		notification.origin.thread = -1;
2583 		notification.origin.team = teamID;
2584 		notification.origin.nub_port = nubPort;
2585 		notification.debugger = debuggerTeam;
2586 		notification.debugger_port = debuggerPort;
2587 		notification.causing_thread = causingThread;
2588 
2589 		// notify the new debugger
2590 		error = write_port_etc(debuggerPort,
2591 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2592 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2593 		if (error != B_OK) {
2594 			dprintf("install_team_debugger(): Failed to send message to new "
2595 				"debugger: %s\n", strerror(error));
2596 		}
2597 
2598 		// clear the handed-over and handing-over flags
2599 		state = disable_interrupts();
2600 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2601 
2602 		atomic_and(&team->debug_info.flags,
2603 			~(B_TEAM_DEBUG_DEBUGGER_HANDOVER
2604 				| B_TEAM_DEBUG_DEBUGGER_HANDING_OVER));
2605 
2606 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2607 		restore_interrupts(state);
2608 
2609 		finish_debugger_change(team);
2610 
2611 		// notify the nub thread
2612 		kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER,
2613 			NULL, 0);
2614 
2615 		// notify the old debugger
2616 		error = write_port_etc(oldDebuggerPort,
2617 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2618 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2619 		if (error != B_OK) {
2620 			TRACE(("install_team_debugger(): Failed to send message to old "
2621 				"debugger: %s\n", strerror(error)));
2622 		}
2623 
2624 		TRACE(("install_team_debugger() done: handed over to debugger: team: "
2625 			"%ld, port: %ld\n", debuggerTeam, debuggerPort));
2626 
2627 		return result;
2628 	}
2629 
2630 	if (done || error != B_OK) {
2631 		TRACE(("install_team_debugger() done1: %ld\n",
2632 			(error == B_OK ? result : error)));
2633 		finish_debugger_change(team);
2634 		return (error == B_OK ? result : error);
2635 	}
2636 
2637 	// create the debugger write lock semaphore
2638 	char nameBuffer[B_OS_NAME_LENGTH];
2639 	snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debugger port write",
2640 		teamID);
2641 	sem_id debuggerWriteLock = create_sem(1, nameBuffer);
2642 	if (debuggerWriteLock < 0)
2643 		error = debuggerWriteLock;
2644 
2645 	// create the nub port
2646 	snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug", teamID);
2647 	if (error == B_OK) {
2648 		nubPort = create_port(1, nameBuffer);
2649 		if (nubPort < 0)
2650 			error = nubPort;
2651 		else
2652 			result = nubPort;
2653 	}
2654 
2655 	// make the debugger team the port owner; thus we know, if the debugger is
2656 	// gone and can cleanup
2657 	if (error == B_OK)
2658 		error = set_port_owner(nubPort, debuggerTeam);
2659 
2660 	// create the breakpoint manager
2661 	BreakpointManager* breakpointManager = NULL;
2662 	if (error == B_OK) {
2663 		breakpointManager = new(std::nothrow) BreakpointManager;
2664 		if (breakpointManager != NULL)
2665 			error = breakpointManager->Init();
2666 		else
2667 			error = B_NO_MEMORY;
2668 	}
2669 
2670 	// spawn the nub thread
2671 	thread_id nubThread = -1;
2672 	if (error == B_OK) {
2673 		snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug task", teamID);
2674 		nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer,
2675 			B_NORMAL_PRIORITY, NULL, teamID, -1);
2676 		if (nubThread < 0)
2677 			error = nubThread;
2678 	}
2679 
2680 	// now adjust the debug info accordingly
2681 	if (error == B_OK) {
2682 		state = disable_interrupts();
2683 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2684 
2685 		team->debug_info.breakpoint_manager = breakpointManager;
2686 		install_team_debugger_init_debug_infos(team, debuggerTeam,
2687 			debuggerPort, nubPort, nubThread, debuggerWriteLock,
2688 			causingThread);
2689 
2690 		restore_interrupts(state);
2691 	}
2692 
2693 	finish_debugger_change(team);
2694 
2695 	// if everything went fine, resume the nub thread, otherwise clean up
2696 	if (error == B_OK) {
2697 		resume_thread(nubThread);
2698 	} else {
2699 		// delete port and terminate thread
2700 		if (nubPort >= 0) {
2701 			set_port_owner(nubPort, B_CURRENT_TEAM);
2702 			delete_port(nubPort);
2703 		}
2704 		if (nubThread >= 0) {
2705 			int32 result;
2706 			wait_for_thread(nubThread, &result);
2707 		}
2708 
2709 		delete breakpointManager;
2710 	}
2711 
2712 	TRACE(("install_team_debugger() done2: %ld\n",
2713 		(error == B_OK ? result : error)));
2714 	return (error == B_OK ? result : error);
2715 }
2716 
2717 
2718 static status_t
2719 ensure_debugger_installed()
2720 {
2721 	port_id port = install_team_debugger(B_CURRENT_TEAM, -1,
2722 		thread_get_current_thread_id(), true, true);
2723 	return port >= 0 ? B_OK : port;
2724 }
2725 
2726 
2727 // #pragma mark -
2728 
2729 
2730 void
2731 _user_debugger(const char *userMessage)
2732 {
2733 	// install the default debugger, if there is none yet
2734 	status_t error = ensure_debugger_installed();
2735 	if (error != B_OK) {
2736 		// time to commit suicide
2737 		char buffer[128];
2738 		ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer));
2739 		if (length >= 0) {
2740 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2741 				"`%s'\n", buffer);
2742 		} else {
2743 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2744 				"%p (%s)\n", userMessage, strerror(length));
2745 		}
2746 		_user_exit_team(1);
2747 	}
2748 
2749 	// prepare the message
2750 	debug_debugger_call message;
2751 	message.message = (void*)userMessage;
2752 
2753 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message,
2754 		sizeof(message), true);
2755 }
2756 
2757 
2758 int
2759 _user_disable_debugger(int state)
2760 {
2761 	struct team *team = thread_get_current_thread()->team;
2762 
2763 	TRACE(("_user_disable_debugger(%d): team: %ld\n", state, team->id));
2764 
2765 	cpu_status cpuState = disable_interrupts();
2766 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2767 
2768 	int32 oldFlags;
2769 	if (state) {
2770 		oldFlags = atomic_or(&team->debug_info.flags,
2771 			B_TEAM_DEBUG_DEBUGGER_DISABLED);
2772 	} else {
2773 		oldFlags = atomic_and(&team->debug_info.flags,
2774 			~B_TEAM_DEBUG_DEBUGGER_DISABLED);
2775 	}
2776 
2777 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2778 	restore_interrupts(cpuState);
2779 
2780 	// TODO: Check, if the return value is really the old state.
2781 	return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED);
2782 }
2783 
2784 
2785 status_t
2786 _user_install_default_debugger(port_id debuggerPort)
2787 {
2788 	// if supplied, check whether the port is a valid port
2789 	if (debuggerPort >= 0) {
2790 		port_info portInfo;
2791 		status_t error = get_port_info(debuggerPort, &portInfo);
2792 		if (error != B_OK)
2793 			return error;
2794 
2795 		// the debugger team must not be the kernel team
2796 		if (portInfo.team == team_get_kernel_team_id())
2797 			return B_NOT_ALLOWED;
2798 	}
2799 
2800 	atomic_set(&sDefaultDebuggerPort, debuggerPort);
2801 
2802 	return B_OK;
2803 }
2804 
2805 
2806 port_id
2807 _user_install_team_debugger(team_id teamID, port_id debuggerPort)
2808 {
2809 	return install_team_debugger(teamID, debuggerPort, -1, false, false);
2810 }
2811 
2812 
2813 status_t
2814 _user_remove_team_debugger(team_id teamID)
2815 {
2816 	struct team* team;
2817 	ConditionVariable debugChangeCondition;
2818 	status_t error = prepare_debugger_change(teamID, debugChangeCondition,
2819 		team);
2820 	if (error != B_OK)
2821 		return error;
2822 
2823 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2824 
2825 	thread_id nubThread = -1;
2826 	port_id nubPort = -1;
2827 
2828 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2829 		// there's a debugger installed
2830 		nubThread = team->debug_info.nub_thread;
2831 		nubPort = team->debug_info.nub_port;
2832 	} else {
2833 		// no debugger installed
2834 		error = B_BAD_VALUE;
2835 	}
2836 
2837 	debugInfoLocker.Unlock();
2838 
2839 	// Delete the nub port -- this will cause the nub thread to terminate and
2840 	// remove the debugger.
2841 	if (nubPort >= 0)
2842 		delete_port(nubPort);
2843 
2844 	finish_debugger_change(team);
2845 
2846 	// wait for the nub thread
2847 	if (nubThread >= 0)
2848 		wait_for_thread(nubThread, NULL);
2849 
2850 	return error;
2851 }
2852 
2853 
2854 status_t
2855 _user_debug_thread(thread_id threadID)
2856 {
2857 	TRACE(("[%ld] _user_debug_thread(%ld)\n", find_thread(NULL), threadID));
2858 
2859 	// tell the thread to stop as soon as possible
2860 	status_t error = B_OK;
2861 	cpu_status state = disable_interrupts();
2862 	GRAB_THREAD_LOCK();
2863 
2864 	struct thread *thread = thread_get_thread_struct_locked(threadID);
2865 	if (!thread) {
2866 		// thread doesn't exist any longer
2867 		error = B_BAD_THREAD_ID;
2868 	} else if (thread->team == team_get_kernel_team()) {
2869 		// we can't debug the kernel team
2870 		error = B_NOT_ALLOWED;
2871 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_DYING) {
2872 		// the thread is already dying -- too late to debug it
2873 		error = B_BAD_THREAD_ID;
2874 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) {
2875 		// don't debug the nub thread
2876 		error = B_NOT_ALLOWED;
2877 	} else if (!(thread->debug_info.flags & B_THREAD_DEBUG_STOPPED)) {
2878 		// set the flag that tells the thread to stop as soon as possible
2879 		atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
2880 
2881 		update_thread_user_debug_flag(thread);
2882 
2883 		switch (thread->state) {
2884 			case B_THREAD_SUSPENDED:
2885 				// thread suspended: wake it up
2886 				scheduler_enqueue_in_run_queue(thread);
2887 				break;
2888 
2889 			default:
2890 				// thread may be waiting: interrupt it
2891 				thread_interrupt(thread, false);
2892 					// TODO: If the thread is already in the kernel and e.g.
2893 					// about to acquire a semaphore (before
2894 					// thread_prepare_to_block()), we won't interrupt it.
2895 					// Maybe we should rather send a signal (SIGTRAP).
2896 				break;
2897 		}
2898 	}
2899 
2900 	RELEASE_THREAD_LOCK();
2901 	restore_interrupts(state);
2902 
2903 	return error;
2904 }
2905 
2906 
2907 void
2908 _user_wait_for_debugger(void)
2909 {
2910 	debug_thread_debugged message;
2911 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message,
2912 		sizeof(message), false);
2913 }
2914 
2915 
2916 status_t
2917 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length,
2918 	bool watchpoint)
2919 {
2920 	// check the address and size
2921 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
2922 		return B_BAD_ADDRESS;
2923 	if (watchpoint && length < 0)
2924 		return B_BAD_VALUE;
2925 
2926 	// check whether a debugger is installed already
2927 	team_debug_info teamDebugInfo;
2928 	get_team_debug_info(teamDebugInfo);
2929 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2930 		return B_BAD_VALUE;
2931 
2932 	// We can't help it, here's a small but relatively harmless race condition,
2933 	// since a debugger could be installed in the meantime. The worst case is
2934 	// that we install a break/watchpoint the debugger doesn't know about.
2935 
2936 	// set the break/watchpoint
2937 	status_t result;
2938 	if (watchpoint)
2939 		result = arch_set_watchpoint(address, type, length);
2940 	else
2941 		result = arch_set_breakpoint(address);
2942 
2943 	if (result == B_OK)
2944 		update_threads_breakpoints_flag();
2945 
2946 	return result;
2947 }
2948 
2949 
2950 status_t
2951 _user_clear_debugger_breakpoint(void *address, bool watchpoint)
2952 {
2953 	// check the address
2954 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
2955 		return B_BAD_ADDRESS;
2956 
2957 	// check whether a debugger is installed already
2958 	team_debug_info teamDebugInfo;
2959 	get_team_debug_info(teamDebugInfo);
2960 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2961 		return B_BAD_VALUE;
2962 
2963 	// We can't help it, here's a small but relatively harmless race condition,
2964 	// since a debugger could be installed in the meantime. The worst case is
2965 	// that we clear a break/watchpoint the debugger has just installed.
2966 
2967 	// clear the break/watchpoint
2968 	status_t result;
2969 	if (watchpoint)
2970 		result = arch_clear_watchpoint(address);
2971 	else
2972 		result = arch_clear_breakpoint(address);
2973 
2974 	if (result == B_OK)
2975 		update_threads_breakpoints_flag();
2976 
2977 	return result;
2978 }
2979