xref: /haiku/src/system/kernel/debug/user_debugger.cpp (revision 9a6a20d4689307142a7ed26a1437ba47e244e73f)
1 /*
2  * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2015, Rene Gollent, rene@gollent.com.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <errno.h>
9 #include <signal.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <string.h>
13 
14 #include <algorithm>
15 
16 #include <arch/debug.h>
17 #include <arch/user_debugger.h>
18 #include <core_dump.h>
19 #include <cpu.h>
20 #include <debugger.h>
21 #include <kernel.h>
22 #include <KernelExport.h>
23 #include <kscheduler.h>
24 #include <ksignal.h>
25 #include <ksyscalls.h>
26 #include <port.h>
27 #include <sem.h>
28 #include <team.h>
29 #include <thread.h>
30 #include <thread_types.h>
31 #include <user_debugger.h>
32 #include <vm/vm.h>
33 #include <vm/vm_types.h>
34 
35 #include <AutoDeleter.h>
36 #include <util/AutoLock.h>
37 #include <util/ThreadAutoLock.h>
38 
39 #include "BreakpointManager.h"
40 
41 
42 //#define TRACE_USER_DEBUGGER
43 #ifdef TRACE_USER_DEBUGGER
44 #	define TRACE(x) dprintf x
45 #else
46 #	define TRACE(x) ;
47 #endif
48 
49 
50 // TODO: Since the introduction of team_debug_info::debugger_changed_condition
51 // there's some potential for simplifications. E.g. clear_team_debug_info() and
52 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus
53 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()).
54 
55 
56 static port_id sDefaultDebuggerPort = -1;
57 	// accessed atomically
58 
59 static timer sProfilingTimers[SMP_MAX_CPUS];
60 	// a profiling timer for each CPU -- used when a profiled thread is running
61 	// on that CPU
62 
63 
64 static void schedule_profiling_timer(Thread* thread, bigtime_t interval);
65 static int32 profiling_event(timer* unused);
66 static void profiling_flush(void*);
67 
68 static status_t ensure_debugger_installed();
69 static void get_team_debug_info(team_debug_info &teamDebugInfo);
70 
71 
72 static inline status_t
73 kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
74 	size_t bufferSize)
75 {
76 	return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT,
77 		0);
78 }
79 
80 
81 static status_t
82 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
83 	bool dontWait)
84 {
85 	TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", "
86 		"port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, "
87 		"dontWait: %d\n", thread_get_current_thread()->id,
88 		thread_get_current_thread()->team->id, port, code, buffer, bufferSize,
89 		dontWait));
90 
91 	status_t error = B_OK;
92 
93 	// get the team debug info
94 	team_debug_info teamDebugInfo;
95 	get_team_debug_info(teamDebugInfo);
96 	sem_id writeLock = teamDebugInfo.debugger_write_lock;
97 
98 	// get the write lock
99 	TRACE(("debugger_write(): acquiring write lock...\n"));
100 	error = acquire_sem_etc(writeLock, 1,
101 		dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
102 	if (error != B_OK) {
103 		TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error));
104 		return error;
105 	}
106 
107 	// re-get the team debug info
108 	get_team_debug_info(teamDebugInfo);
109 
110 	if (teamDebugInfo.debugger_port != port
111 		|| (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) {
112 		// The debugger has changed in the meantime or we are about to be
113 		// handed over to a new debugger. In either case we don't send the
114 		// message.
115 		TRACE(("debugger_write(): %s\n",
116 			(teamDebugInfo.debugger_port != port ? "debugger port changed"
117 				: "handover flag set")));
118 	} else {
119 		TRACE(("debugger_write(): writing to port...\n"));
120 
121 		error = write_port_etc(port, code, buffer, bufferSize,
122 			dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
123 	}
124 
125 	// release the write lock
126 	release_sem(writeLock);
127 
128 	TRACE(("debugger_write() done: %" B_PRIx32 "\n", error));
129 
130 	return error;
131 }
132 
133 
134 /*!	Updates the thread::flags field according to what user debugger flags are
135 	set for the thread.
136 	Interrupts must be disabled and the thread's debug info lock must be held.
137 */
138 static void
139 update_thread_user_debug_flag(Thread* thread)
140 {
141 	if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0)
142 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD);
143 	else
144 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD);
145 }
146 
147 
148 /*!	Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
149 	given thread.
150 	Interrupts must be disabled and the thread debug info lock must be held.
151 */
152 static void
153 update_thread_breakpoints_flag(Thread* thread)
154 {
155 	Team* team = thread->team;
156 
157 	if (arch_has_breakpoints(&team->debug_info.arch_info))
158 		atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
159 	else
160 		atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
161 }
162 
163 
164 /*!	Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
165 	threads of the current team.
166 */
167 static void
168 update_threads_breakpoints_flag()
169 {
170 	Team* team = thread_get_current_thread()->team;
171 
172 	TeamLocker teamLocker(team);
173 
174 	Thread* thread = team->thread_list;
175 
176 	if (arch_has_breakpoints(&team->debug_info.arch_info)) {
177 		for (; thread != NULL; thread = thread->team_next)
178 			atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
179 	} else {
180 		for (; thread != NULL; thread = thread->team_next)
181 			atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
182 	}
183 }
184 
185 
186 /*!	Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
187 	given thread, which must be the current thread.
188 */
189 static void
190 update_thread_debugger_installed_flag(Thread* thread)
191 {
192 	Team* team = thread->team;
193 
194 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
195 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
196 	else
197 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
198 }
199 
200 
201 /*!	Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
202 	threads of the given team.
203 	The team's lock must be held.
204 */
205 static void
206 update_threads_debugger_installed_flag(Team* team)
207 {
208 	Thread* thread = team->thread_list;
209 
210 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
211 		for (; thread != NULL; thread = thread->team_next)
212 			atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
213 	} else {
214 		for (; thread != NULL; thread = thread->team_next)
215 			atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
216 	}
217 }
218 
219 
220 /**
221  *	For the first initialization the function must be called with \a initLock
222  *	set to \c true. If it would be possible that another thread accesses the
223  *	structure at the same time, `lock' must be held when calling the function.
224  */
225 void
226 clear_team_debug_info(struct team_debug_info *info, bool initLock)
227 {
228 	if (info) {
229 		arch_clear_team_debug_info(&info->arch_info);
230 		atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS);
231 		info->debugger_team = -1;
232 		info->debugger_port = -1;
233 		info->nub_thread = -1;
234 		info->nub_port = -1;
235 		info->debugger_write_lock = -1;
236 		info->causing_thread = -1;
237 		info->image_event = 0;
238 		info->breakpoint_manager = NULL;
239 
240 		if (initLock) {
241 			B_INITIALIZE_SPINLOCK(&info->lock);
242 			info->debugger_changed_condition = NULL;
243 		}
244 	}
245 }
246 
247 /**
248  *  `lock' must not be held nor may interrupts be disabled.
249  *  \a info must not be a member of a team struct (or the team struct must no
250  *  longer be accessible, i.e. the team should already be removed).
251  *
252  *	In case the team is still accessible, the procedure is:
253  *	1. get `lock'
254  *	2. copy the team debug info on stack
255  *	3. call clear_team_debug_info() on the team debug info
256  *	4. release `lock'
257  *	5. call destroy_team_debug_info() on the copied team debug info
258  */
259 static void
260 destroy_team_debug_info(struct team_debug_info *info)
261 {
262 	if (info) {
263 		arch_destroy_team_debug_info(&info->arch_info);
264 
265 		// delete the breakpoint manager
266 		delete info->breakpoint_manager ;
267 		info->breakpoint_manager = NULL;
268 
269 		// delete the debugger port write lock
270 		if (info->debugger_write_lock >= 0) {
271 			delete_sem(info->debugger_write_lock);
272 			info->debugger_write_lock = -1;
273 		}
274 
275 		// delete the nub port
276 		if (info->nub_port >= 0) {
277 			set_port_owner(info->nub_port, B_CURRENT_TEAM);
278 			delete_port(info->nub_port);
279 			info->nub_port = -1;
280 		}
281 
282 		// wait for the nub thread
283 		if (info->nub_thread >= 0) {
284 			if (info->nub_thread != thread_get_current_thread()->id) {
285 				int32 result;
286 				wait_for_thread(info->nub_thread, &result);
287 			}
288 
289 			info->nub_thread = -1;
290 		}
291 
292 		atomic_set(&info->flags, 0);
293 		info->debugger_team = -1;
294 		info->debugger_port = -1;
295 		info->causing_thread = -1;
296 		info->image_event = -1;
297 	}
298 }
299 
300 
301 void
302 init_thread_debug_info(struct thread_debug_info *info)
303 {
304 	if (info) {
305 		B_INITIALIZE_SPINLOCK(&info->lock);
306 		arch_clear_thread_debug_info(&info->arch_info);
307 		info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS;
308 		info->debug_port = -1;
309 		info->ignore_signals = 0;
310 		info->ignore_signals_once = 0;
311 		info->profile.sample_area = -1;
312 		info->profile.interval = 0;
313 		info->profile.samples = NULL;
314 		info->profile.flush_needed = false;
315 		info->profile.installed_timer = NULL;
316 	}
317 }
318 
319 
320 /*!	Clears the debug info for the current thread.
321 	Invoked with thread debug info lock being held.
322 */
323 void
324 clear_thread_debug_info(struct thread_debug_info *info, bool dying)
325 {
326 	if (info) {
327 		// cancel profiling timer
328 		if (info->profile.installed_timer != NULL) {
329 			cancel_timer(info->profile.installed_timer);
330 			info->profile.installed_timer->hook = NULL;
331 			info->profile.installed_timer = NULL;
332 		}
333 
334 		arch_clear_thread_debug_info(&info->arch_info);
335 		atomic_set(&info->flags,
336 			B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0));
337 		info->debug_port = -1;
338 		info->ignore_signals = 0;
339 		info->ignore_signals_once = 0;
340 		info->profile.sample_area = -1;
341 		info->profile.interval = 0;
342 		info->profile.samples = NULL;
343 		info->profile.flush_needed = false;
344 	}
345 }
346 
347 
348 void
349 destroy_thread_debug_info(struct thread_debug_info *info)
350 {
351 	if (info) {
352 		area_id sampleArea = info->profile.sample_area;
353 		if (sampleArea >= 0) {
354 			area_info areaInfo;
355 			if (get_area_info(sampleArea, &areaInfo) == B_OK) {
356 				unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
357 				delete_area(sampleArea);
358 			}
359 		}
360 
361 		arch_destroy_thread_debug_info(&info->arch_info);
362 
363 		if (info->debug_port >= 0) {
364 			delete_port(info->debug_port);
365 			info->debug_port = -1;
366 		}
367 
368 		info->ignore_signals = 0;
369 		info->ignore_signals_once = 0;
370 
371 		atomic_set(&info->flags, 0);
372 	}
373 }
374 
375 
376 static status_t
377 prepare_debugger_change(team_id teamID, ConditionVariable& condition,
378 	Team*& team)
379 {
380 	// We look up the team by ID, even in case of the current team, so we can be
381 	// sure, that the team is not already dying.
382 	if (teamID == B_CURRENT_TEAM)
383 		teamID = thread_get_current_thread()->team->id;
384 
385 	while (true) {
386 		// get the team
387 		team = Team::GetAndLock(teamID);
388 		if (team == NULL)
389 			return B_BAD_TEAM_ID;
390 		BReference<Team> teamReference(team, true);
391 		TeamLocker teamLocker(team, true);
392 
393 		// don't allow messing with the kernel team
394 		if (team == team_get_kernel_team())
395 			return B_NOT_ALLOWED;
396 
397 		// check whether the condition is already set
398 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
399 
400 		if (team->debug_info.debugger_changed_condition == NULL) {
401 			// nobody there yet -- set our condition variable and be done
402 			team->debug_info.debugger_changed_condition = &condition;
403 			return B_OK;
404 		}
405 
406 		// we'll have to wait
407 		ConditionVariableEntry entry;
408 		team->debug_info.debugger_changed_condition->Add(&entry);
409 
410 		debugInfoLocker.Unlock();
411 		teamLocker.Unlock();
412 
413 		entry.Wait();
414 	}
415 }
416 
417 
418 static void
419 prepare_debugger_change(Team* team, ConditionVariable& condition)
420 {
421 	while (true) {
422 		// check whether the condition is already set
423 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
424 
425 		if (team->debug_info.debugger_changed_condition == NULL) {
426 			// nobody there yet -- set our condition variable and be done
427 			team->debug_info.debugger_changed_condition = &condition;
428 			return;
429 		}
430 
431 		// we'll have to wait
432 		ConditionVariableEntry entry;
433 		team->debug_info.debugger_changed_condition->Add(&entry);
434 
435 		debugInfoLocker.Unlock();
436 
437 		entry.Wait();
438 	}
439 }
440 
441 
442 static void
443 finish_debugger_change(Team* team)
444 {
445 	// unset our condition variable and notify all threads waiting on it
446 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
447 
448 	ConditionVariable* condition = team->debug_info.debugger_changed_condition;
449 	team->debug_info.debugger_changed_condition = NULL;
450 
451 	condition->NotifyAll();
452 }
453 
454 
455 void
456 user_debug_prepare_for_exec()
457 {
458 	Thread *thread = thread_get_current_thread();
459 	Team *team = thread->team;
460 
461 	// If a debugger is installed for the team and the thread debug stuff
462 	// initialized, change the ownership of the debug port for the thread
463 	// to the kernel team, since exec_team() deletes all ports owned by this
464 	// team. We change the ownership back later.
465 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
466 		// get the port
467 		port_id debugPort = -1;
468 
469 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
470 
471 		if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0)
472 			debugPort = thread->debug_info.debug_port;
473 
474 		threadDebugInfoLocker.Unlock();
475 
476 		// set the new port ownership
477 		if (debugPort >= 0)
478 			set_port_owner(debugPort, team_get_kernel_team_id());
479 	}
480 }
481 
482 
483 void
484 user_debug_finish_after_exec()
485 {
486 	Thread *thread = thread_get_current_thread();
487 	Team *team = thread->team;
488 
489 	// If a debugger is installed for the team and the thread debug stuff
490 	// initialized for this thread, change the ownership of its debug port
491 	// back to this team.
492 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
493 		// get the port
494 		port_id debugPort = -1;
495 
496 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
497 
498 		if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
499 			debugPort = thread->debug_info.debug_port;
500 
501 		threadDebugInfoLocker.Unlock();
502 
503 		// set the new port ownership
504 		if (debugPort >= 0)
505 			set_port_owner(debugPort, team->id);
506 	}
507 }
508 
509 
510 void
511 init_user_debug()
512 {
513 	#ifdef ARCH_INIT_USER_DEBUG
514 		ARCH_INIT_USER_DEBUG();
515 	#endif
516 }
517 
518 
519 static void
520 get_team_debug_info(team_debug_info &teamDebugInfo)
521 {
522 	Thread *thread = thread_get_current_thread();
523 
524 	cpu_status state = disable_interrupts();
525 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
526 
527 	memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info));
528 
529 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
530 	restore_interrupts(state);
531 }
532 
533 
534 static status_t
535 thread_hit_debug_event_internal(debug_debugger_message event,
536 	const void *message, int32 size, bool requireDebugger, bool &restart)
537 {
538 	restart = false;
539 	Thread *thread = thread_get_current_thread();
540 
541 	TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32
542 		", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event,
543 		message, size));
544 
545 	// check, if there's a debug port already
546 	bool setPort = !(atomic_get(&thread->debug_info.flags)
547 		& B_THREAD_DEBUG_INITIALIZED);
548 
549 	// create a port, if there is none yet
550 	port_id port = -1;
551 	if (setPort) {
552 		char nameBuffer[128];
553 		snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32,
554 			thread->id);
555 
556 		port = create_port(1, nameBuffer);
557 		if (port < 0) {
558 			dprintf("thread_hit_debug_event(): Failed to create debug port: "
559 				"%s\n", strerror(port));
560 			return port;
561 		}
562 	}
563 
564 	// check the debug info structures once more: get the debugger port, set
565 	// the thread's debug port, and update the thread's debug flags
566 	port_id deletePort = port;
567 	port_id debuggerPort = -1;
568 	port_id nubPort = -1;
569 	status_t error = B_OK;
570 	cpu_status state = disable_interrupts();
571 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
572 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
573 
574 	uint32 threadFlags = thread->debug_info.flags;
575 	threadFlags &= ~B_THREAD_DEBUG_STOP;
576 	bool debuggerInstalled
577 		= (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED);
578 	if (thread->id == thread->team->debug_info.nub_thread) {
579 		// Ugh, we're the nub thread. We shouldn't be here.
580 		TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32
581 			"\n", thread->id));
582 
583 		error = B_ERROR;
584 	} else if (debuggerInstalled || !requireDebugger) {
585 		if (debuggerInstalled) {
586 			debuggerPort = thread->team->debug_info.debugger_port;
587 			nubPort = thread->team->debug_info.nub_port;
588 		}
589 
590 		if (setPort) {
591 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
592 				// someone created a port for us (the port we've created will
593 				// be deleted below)
594 				port = thread->debug_info.debug_port;
595 			} else {
596 				thread->debug_info.debug_port = port;
597 				deletePort = -1;	// keep the port
598 				threadFlags |= B_THREAD_DEBUG_INITIALIZED;
599 			}
600 		} else {
601 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
602 				port = thread->debug_info.debug_port;
603 			} else {
604 				// someone deleted our port
605 				error = B_ERROR;
606 			}
607 		}
608 	} else
609 		error = B_ERROR;
610 
611 	// update the flags
612 	if (error == B_OK)
613 		threadFlags |= B_THREAD_DEBUG_STOPPED;
614 	atomic_set(&thread->debug_info.flags, threadFlags);
615 
616 	update_thread_user_debug_flag(thread);
617 
618 	threadDebugInfoLocker.Unlock();
619 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
620 	restore_interrupts(state);
621 
622 	// delete the superfluous port
623 	if (deletePort >= 0)
624 		delete_port(deletePort);
625 
626 	if (error != B_OK) {
627 		TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: "
628 			"%" B_PRIx32 "\n", thread->id, error));
629 		return error;
630 	}
631 
632 	// send a message to the debugger port
633 	if (debuggerInstalled) {
634 		// update the message's origin info first
635 		debug_origin *origin = (debug_origin *)message;
636 		origin->thread = thread->id;
637 		origin->team = thread->team->id;
638 		origin->nub_port = nubPort;
639 
640 		TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending "
641 			"message to debugger port %" B_PRId32 "\n", thread->id,
642 			debuggerPort));
643 
644 		error = debugger_write(debuggerPort, event, message, size, false);
645 	}
646 
647 	status_t result = B_THREAD_DEBUG_HANDLE_EVENT;
648 	bool singleStep = false;
649 
650 	if (error == B_OK) {
651 		bool done = false;
652 		while (!done) {
653 			// read a command from the debug port
654 			int32 command;
655 			debugged_thread_message_data commandMessage;
656 			ssize_t commandMessageSize = read_port_etc(port, &command,
657 				&commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT,
658 				0);
659 
660 			if (commandMessageSize < 0) {
661 				error = commandMessageSize;
662 				TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed "
663 					"to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n",
664 					thread->id, port, error));
665 				break;
666 			}
667 
668 			switch (command) {
669 				case B_DEBUGGED_THREAD_MESSAGE_CONTINUE:
670 					TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": "
671 						"B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n",
672 						thread->id));
673 					result = commandMessage.continue_thread.handle_event;
674 
675 					singleStep = commandMessage.continue_thread.single_step;
676 					done = true;
677 					break;
678 
679 				case B_DEBUGGED_THREAD_SET_CPU_STATE:
680 				{
681 					TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": "
682 						"B_DEBUGGED_THREAD_SET_CPU_STATE\n",
683 						thread->id));
684 					arch_set_debug_cpu_state(
685 						&commandMessage.set_cpu_state.cpu_state);
686 
687 					break;
688 				}
689 
690 				case B_DEBUGGED_THREAD_GET_CPU_STATE:
691 				{
692 					port_id replyPort = commandMessage.get_cpu_state.reply_port;
693 
694 					// prepare the message
695 					debug_nub_get_cpu_state_reply replyMessage;
696 					replyMessage.error = B_OK;
697 					replyMessage.message = event;
698 					arch_get_debug_cpu_state(&replyMessage.cpu_state);
699 
700 					// send it
701 					error = kill_interruptable_write_port(replyPort, event,
702 						&replyMessage, sizeof(replyMessage));
703 
704 					break;
705 				}
706 
707 				case B_DEBUGGED_THREAD_DEBUGGER_CHANGED:
708 				{
709 					// Check, if the debugger really changed, i.e. is different
710 					// than the one we know.
711 					team_debug_info teamDebugInfo;
712 					get_team_debug_info(teamDebugInfo);
713 
714 					if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
715 						if (!debuggerInstalled
716 							|| teamDebugInfo.debugger_port != debuggerPort) {
717 							// debugger was installed or has changed: restart
718 							// this function
719 							restart = true;
720 							done = true;
721 						}
722 					} else {
723 						if (debuggerInstalled) {
724 							// debugger is gone: continue the thread normally
725 							done = true;
726 						}
727 					}
728 
729 					break;
730 				}
731 			}
732 		}
733 	} else {
734 		TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send "
735 			"message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n",
736 			thread->id, debuggerPort, error));
737 	}
738 
739 	// update the thread debug info
740 	bool destroyThreadInfo = false;
741 	thread_debug_info threadDebugInfo;
742 
743 	state = disable_interrupts();
744 	threadDebugInfoLocker.Lock();
745 
746 	// check, if the team is still being debugged
747 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
748 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
749 		// update the single-step flag
750 		if (singleStep) {
751 			atomic_or(&thread->debug_info.flags,
752 				B_THREAD_DEBUG_SINGLE_STEP);
753 			atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP);
754 		} else {
755 			atomic_and(&thread->debug_info.flags,
756 				~(int32)B_THREAD_DEBUG_SINGLE_STEP);
757 		}
758 
759 		// unset the "stopped" state
760 		atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED);
761 
762 		update_thread_user_debug_flag(thread);
763 	} else {
764 		// the debugger is gone: cleanup our info completely
765 		threadDebugInfo = thread->debug_info;
766 		clear_thread_debug_info(&thread->debug_info, false);
767 		destroyThreadInfo = true;
768 	}
769 
770 	threadDebugInfoLocker.Unlock();
771 	restore_interrupts(state);
772 
773 	// enable/disable single stepping
774 	arch_update_thread_single_step();
775 
776 	if (destroyThreadInfo)
777 		destroy_thread_debug_info(&threadDebugInfo);
778 
779 	return (error == B_OK ? result : error);
780 }
781 
782 
783 static status_t
784 thread_hit_debug_event(debug_debugger_message event, const void *message,
785 	int32 size, bool requireDebugger)
786 {
787 	status_t result;
788 	bool restart;
789 	do {
790 		restart = false;
791 		result = thread_hit_debug_event_internal(event, message, size,
792 			requireDebugger, restart);
793 	} while (result >= 0 && restart);
794 
795 	// Prepare to continue -- we install a debugger change condition, so no one
796 	// will change the debugger while we're playing with the breakpoint manager.
797 	// TODO: Maybe better use ref-counting and a flag in the breakpoint manager.
798 	Team* team = thread_get_current_thread()->team;
799 	ConditionVariable debugChangeCondition;
800 	debugChangeCondition.Init(team, "debug change condition");
801 	prepare_debugger_change(team, debugChangeCondition);
802 
803 	if (team->debug_info.breakpoint_manager != NULL) {
804 		bool isSyscall;
805 		void* pc = arch_debug_get_interrupt_pc(&isSyscall);
806 		if (pc != NULL && !isSyscall)
807 			team->debug_info.breakpoint_manager->PrepareToContinue(pc);
808 	}
809 
810 	finish_debugger_change(team);
811 
812 	return result;
813 }
814 
815 
816 static status_t
817 thread_hit_serious_debug_event(debug_debugger_message event,
818 	const void *message, int32 messageSize)
819 {
820 	// ensure that a debugger is installed for this team
821 	status_t error = ensure_debugger_installed();
822 	if (error != B_OK) {
823 		Thread *thread = thread_get_current_thread();
824 		dprintf("thread_hit_serious_debug_event(): Failed to install debugger: "
825 			"thread: %" B_PRId32 " (%s): %s\n", thread->id, thread->name,
826 			strerror(error));
827 		return error;
828 	}
829 
830 	// enter the debug loop
831 	return thread_hit_debug_event(event, message, messageSize, true);
832 }
833 
834 
835 void
836 user_debug_pre_syscall(uint32 syscall, void *args)
837 {
838 	// check whether a debugger is installed
839 	Thread *thread = thread_get_current_thread();
840 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
841 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
842 		return;
843 
844 	// check whether pre-syscall tracing is enabled for team or thread
845 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
846 	if ((teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL)
847 			|| (threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) {
848 		// prepare the message
849 		debug_pre_syscall message;
850 		message.syscall = syscall;
851 
852 		// copy the syscall args
853 		if (syscall < (uint32)kSyscallCount) {
854 			if (kSyscallInfos[syscall].parameter_size > 0)
855 				memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
856 		}
857 
858 		thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message,
859 			sizeof(message), true);
860 	}
861 
862 	if ((teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL)
863 			|| (threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) {
864 		// The syscall_start_time storage is shared with the profiler's interval.
865 		if (thread->debug_info.profile.samples == NULL)
866 			thread->debug_info.profile.syscall_start_time = system_time();
867 	}
868 }
869 
870 
871 void
872 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue)
873 {
874 	// check whether a debugger is installed
875 	Thread *thread = thread_get_current_thread();
876 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
877 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
878 		return;
879 
880 	// check if we need to flush the profiling buffer
881 	if (thread->debug_info.profile.flush_needed)
882 		profiling_flush(NULL);
883 
884 	// check whether post-syscall tracing is enabled for team or thread
885 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
886 	if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL)
887 			&& !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) {
888 		return;
889 	}
890 
891 	bigtime_t startTime = 0;
892 	if (thread->debug_info.profile.samples == NULL) {
893 		startTime = thread->debug_info.profile.syscall_start_time;
894 		thread->debug_info.profile.syscall_start_time = 0;
895 	}
896 
897 	// prepare the message
898 	debug_post_syscall message;
899 	message.start_time = startTime;
900 	message.end_time = system_time();
901 	message.return_value = returnValue;
902 	message.syscall = syscall;
903 
904 	// copy the syscall args
905 	if (syscall < (uint32)kSyscallCount) {
906 		if (kSyscallInfos[syscall].parameter_size > 0)
907 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
908 	}
909 
910 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message,
911 		sizeof(message), true);
912 }
913 
914 
915 /**	\brief To be called when an unhandled processor exception (error/fault)
916  *		   occurred.
917  *	\param exception The debug_why_stopped value identifying the kind of fault.
918  *	\param signal The signal corresponding to the exception.
919  *	\return \c true, if the caller shall continue normally, i.e. usually send
920  *			a deadly signal. \c false, if the debugger insists to continue the
921  *			program (e.g. because it has solved the removed the cause of the
922  *			problem).
923  */
924 bool
925 user_debug_exception_occurred(debug_exception_type exception, int signal)
926 {
927 	// First check whether there's a signal handler installed for the signal.
928 	// If so, we don't want to install a debugger for the team. We always send
929 	// the signal instead. An already installed debugger will be notified, if
930 	// it has requested notifications of signal.
931 	struct sigaction signalAction;
932 	if (sigaction(signal, NULL, &signalAction) == 0
933 		&& signalAction.sa_handler != SIG_DFL) {
934 		return true;
935 	}
936 
937 	// prepare the message
938 	debug_exception_occurred message;
939 	message.exception = exception;
940 	message.signal = signal;
941 
942 	status_t result = thread_hit_serious_debug_event(
943 		B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message));
944 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
945 }
946 
947 
948 bool
949 user_debug_handle_signal(int signal, struct sigaction *handler, siginfo_t *info,
950 	bool deadly)
951 {
952 	// check, if a debugger is installed and is interested in signals
953 	Thread *thread = thread_get_current_thread();
954 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
955 	if (~teamDebugFlags
956 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) {
957 		return true;
958 	}
959 
960 	// prepare the message
961 	debug_signal_received message;
962 	message.signal = signal;
963 	message.handler = *handler;
964 	message.info = *info;
965 	message.deadly = deadly;
966 
967 	status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED,
968 		&message, sizeof(message), true);
969 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
970 }
971 
972 
973 void
974 user_debug_stop_thread()
975 {
976 	// check whether this is actually an emulated single-step notification
977 	Thread* thread = thread_get_current_thread();
978 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
979 
980 	bool singleStepped = false;
981 	if ((atomic_and(&thread->debug_info.flags,
982 				~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP)
983 			& B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) {
984 		singleStepped = true;
985 	}
986 
987 	threadDebugInfoLocker.Unlock();
988 
989 	if (singleStepped) {
990 		user_debug_single_stepped();
991 	} else {
992 		debug_thread_debugged message;
993 		thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED,
994 			&message, sizeof(message));
995 	}
996 }
997 
998 
999 void
1000 user_debug_team_created(team_id teamID)
1001 {
1002 	// check, if a debugger is installed and is interested in team creation
1003 	// events
1004 	Thread *thread = thread_get_current_thread();
1005 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1006 	if (~teamDebugFlags
1007 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
1008 		return;
1009 	}
1010 
1011 	// prepare the message
1012 	debug_team_created message;
1013 	message.new_team = teamID;
1014 
1015 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message,
1016 		sizeof(message), true);
1017 }
1018 
1019 
1020 void
1021 user_debug_team_deleted(team_id teamID, port_id debuggerPort, status_t status, int signal,
1022 	team_usage_info* usageInfo)
1023 {
1024 	if (debuggerPort >= 0) {
1025 		TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: "
1026 			"%" B_PRId32 ")\n", teamID, debuggerPort));
1027 
1028 		debug_team_deleted message;
1029 		message.origin.thread = -1;
1030 		message.origin.team = teamID;
1031 		message.origin.nub_port = -1;
1032 		message.status = status;
1033 		message.signal = signal;
1034 		message.usage = *usageInfo;
1035 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message,
1036 			sizeof(message), B_RELATIVE_TIMEOUT, 0);
1037 	}
1038 }
1039 
1040 
1041 void
1042 user_debug_team_exec()
1043 {
1044 	// check, if a debugger is installed and is interested in team creation
1045 	// events
1046 	Thread *thread = thread_get_current_thread();
1047 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1048 	if (~teamDebugFlags
1049 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
1050 		return;
1051 	}
1052 
1053 	// prepare the message
1054 	debug_team_exec message;
1055 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1056 		+ 1;
1057 
1058 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message,
1059 		sizeof(message), true);
1060 }
1061 
1062 
1063 /*!	Called by a new userland thread to update the debugging related flags of
1064 	\c Thread::flags before the thread first enters userland.
1065 	\param thread The calling thread.
1066 */
1067 void
1068 user_debug_update_new_thread_flags(Thread* thread)
1069 {
1070 	// lock it and update it's flags
1071 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1072 
1073 	update_thread_user_debug_flag(thread);
1074 	update_thread_breakpoints_flag(thread);
1075 	update_thread_debugger_installed_flag(thread);
1076 }
1077 
1078 
1079 void
1080 user_debug_thread_created(thread_id threadID)
1081 {
1082 	// check, if a debugger is installed and is interested in thread events
1083 	Thread *thread = thread_get_current_thread();
1084 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1085 	if (~teamDebugFlags
1086 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1087 		return;
1088 	}
1089 
1090 	// prepare the message
1091 	debug_thread_created message;
1092 	message.new_thread = threadID;
1093 
1094 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message,
1095 		sizeof(message), true);
1096 }
1097 
1098 
1099 void
1100 user_debug_thread_deleted(team_id teamID, thread_id threadID, status_t status)
1101 {
1102 	// Things are a bit complicated here, since this thread no longer belongs to
1103 	// the debugged team (but to the kernel). So we can't use debugger_write().
1104 
1105 	// get the team debug flags and debugger port
1106 	Team* team = Team::Get(teamID);
1107 	if (team == NULL)
1108 		return;
1109 	BReference<Team> teamReference(team, true);
1110 
1111 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1112 
1113 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1114 	port_id debuggerPort = team->debug_info.debugger_port;
1115 	sem_id writeLock = team->debug_info.debugger_write_lock;
1116 
1117 	debugInfoLocker.Unlock();
1118 
1119 	// check, if a debugger is installed and is interested in thread events
1120 	if (~teamDebugFlags
1121 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1122 		return;
1123 	}
1124 
1125 	// acquire the debugger write lock
1126 	status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0);
1127 	if (error != B_OK)
1128 		return;
1129 
1130 	// re-get the team debug info -- we need to check whether anything changed
1131 	debugInfoLocker.Lock();
1132 
1133 	teamDebugFlags = atomic_get(&team->debug_info.flags);
1134 	port_id newDebuggerPort = team->debug_info.debugger_port;
1135 
1136 	debugInfoLocker.Unlock();
1137 
1138 	// Send the message only if the debugger hasn't changed in the meantime or
1139 	// the team is about to be handed over.
1140 	if (newDebuggerPort == debuggerPort
1141 		|| (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) {
1142 		debug_thread_deleted message;
1143 		message.origin.thread = threadID;
1144 		message.origin.team = teamID;
1145 		message.origin.nub_port = -1;
1146 		message.status = status;
1147 
1148 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED,
1149 			&message, sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1150 	}
1151 
1152 	// release the debugger write lock
1153 	release_sem(writeLock);
1154 }
1155 
1156 
1157 /*!	Called for a thread that is about to die, cleaning up all user debug
1158 	facilities installed for the thread.
1159 	\param thread The current thread, the one that is going to die.
1160 */
1161 void
1162 user_debug_thread_exiting(Thread* thread)
1163 {
1164 	// thread is the current thread, so using team is safe
1165 	Team* team = thread->team;
1166 
1167 	InterruptsLocker interruptsLocker;
1168 
1169 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1170 
1171 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1172 	port_id debuggerPort = team->debug_info.debugger_port;
1173 
1174 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1175 
1176 	// check, if a debugger is installed
1177 	if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0
1178 		|| debuggerPort < 0) {
1179 		return;
1180 	}
1181 
1182 	// detach the profile info and mark the thread dying
1183 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1184 
1185 	thread_debug_info& threadDebugInfo = thread->debug_info;
1186 	if (threadDebugInfo.profile.samples == NULL)
1187 		return;
1188 
1189 	area_id sampleArea = threadDebugInfo.profile.sample_area;
1190 	int32 sampleCount = threadDebugInfo.profile.sample_count;
1191 	int32 droppedTicks = threadDebugInfo.profile.dropped_ticks;
1192 	int32 stackDepth = threadDebugInfo.profile.stack_depth;
1193 	bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth;
1194 	int32 imageEvent = threadDebugInfo.profile.image_event;
1195 	threadDebugInfo.profile.sample_area = -1;
1196 	threadDebugInfo.profile.samples = NULL;
1197 	threadDebugInfo.profile.flush_needed = false;
1198 	bigtime_t lastCPUTime; {
1199 		SpinLocker threadTimeLocker(thread->time_lock);
1200 		lastCPUTime = thread->CPUTime(false);
1201 	}
1202 
1203 	atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
1204 
1205 	threadDebugInfoLocker.Unlock();
1206 	interruptsLocker.Unlock();
1207 
1208 	// notify the debugger
1209 	debug_profiler_update message;
1210 	message.origin.thread = thread->id;
1211 	message.origin.team = thread->team->id;
1212 	message.origin.nub_port = -1;	// asynchronous message
1213 	message.sample_count = sampleCount;
1214 	message.dropped_ticks = droppedTicks;
1215 	message.stack_depth = stackDepth;
1216 	message.variable_stack_depth = variableStackDepth;
1217 	message.image_event = imageEvent;
1218 	message.stopped = true;
1219 	message.last_cpu_time = lastCPUTime;
1220 	debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE,
1221 		&message, sizeof(message), false);
1222 
1223 	if (sampleArea >= 0) {
1224 		area_info areaInfo;
1225 		if (get_area_info(sampleArea, &areaInfo) == B_OK) {
1226 			unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
1227 			delete_area(sampleArea);
1228 		}
1229 	}
1230 }
1231 
1232 
1233 void
1234 user_debug_image_created(const image_info *imageInfo)
1235 {
1236 	// check, if a debugger is installed and is interested in image events
1237 	Thread *thread = thread_get_current_thread();
1238 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1239 	if (~teamDebugFlags
1240 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1241 		return;
1242 	}
1243 
1244 	// prepare the message
1245 	debug_image_created message;
1246 	memcpy(&message.info, imageInfo, sizeof(image_info));
1247 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1248 		+ 1;
1249 
1250 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
1251 		sizeof(message), true);
1252 }
1253 
1254 
1255 void
1256 user_debug_image_deleted(const image_info *imageInfo)
1257 {
1258 	// check, if a debugger is installed and is interested in image events
1259 	Thread *thread = thread_get_current_thread();
1260 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1261 	if (~teamDebugFlags
1262 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1263 		return;
1264 	}
1265 
1266 	// prepare the message
1267 	debug_image_deleted message;
1268 	memcpy(&message.info, imageInfo, sizeof(image_info));
1269 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1270 		+ 1;
1271 
1272 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message,
1273 		sizeof(message), true);
1274 }
1275 
1276 
1277 void
1278 user_debug_breakpoint_hit(bool software)
1279 {
1280 	// prepare the message
1281 	debug_breakpoint_hit message;
1282 	arch_get_debug_cpu_state(&message.cpu_state);
1283 
1284 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message,
1285 		sizeof(message));
1286 }
1287 
1288 
1289 void
1290 user_debug_watchpoint_hit()
1291 {
1292 	// prepare the message
1293 	debug_watchpoint_hit message;
1294 	arch_get_debug_cpu_state(&message.cpu_state);
1295 
1296 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message,
1297 		sizeof(message));
1298 }
1299 
1300 
1301 void
1302 user_debug_single_stepped()
1303 {
1304 	// clear the single-step thread flag
1305 	Thread* thread = thread_get_current_thread();
1306 	atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP);
1307 
1308 	// prepare the message
1309 	debug_single_step message;
1310 	arch_get_debug_cpu_state(&message.cpu_state);
1311 
1312 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message,
1313 		sizeof(message));
1314 }
1315 
1316 
1317 /*!	Schedules the profiling timer for the current thread.
1318 	The caller must hold the thread's debug info lock.
1319 	\param thread The current thread.
1320 	\param interval The time after which the timer should fire.
1321 */
1322 static void
1323 schedule_profiling_timer(Thread* thread, bigtime_t interval)
1324 {
1325 	struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num];
1326 	// Use the "hook" field to sanity-check that this timer is not scheduled.
1327 	ASSERT(timer->hook == NULL);
1328 	thread->debug_info.profile.installed_timer = timer;
1329 	thread->debug_info.profile.timer_end = system_time() + interval;
1330 	add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER);
1331 }
1332 
1333 
1334 /*!	Returns the time remaining for the current profiling timer.
1335 	The caller must hold the thread's debug info lock.
1336 	\param thread The current thread.
1337 */
1338 static bigtime_t
1339 profiling_timer_left(Thread* thread)
1340 {
1341 	return thread->debug_info.profile.timer_end - system_time();
1342 }
1343 
1344 
1345 /*!	Samples the current thread's instruction pointer/stack trace.
1346 	The caller must hold the current thread's debug info lock.
1347 	\returns Whether the profiling timer should be rescheduled.
1348 */
1349 static bool
1350 profiling_do_sample()
1351 {
1352 	Thread* thread = thread_get_current_thread();
1353 	thread_debug_info& debugInfo = thread->debug_info;
1354 
1355 	if (debugInfo.profile.samples == NULL)
1356 		return false;
1357 
1358 	// Check, whether the buffer is full or an image event occurred since the
1359 	// last sample was taken.
1360 	int32 maxSamples = debugInfo.profile.max_samples;
1361 	int32 sampleCount = debugInfo.profile.sample_count;
1362 	int32 stackDepth = debugInfo.profile.stack_depth;
1363 	int32 imageEvent = thread->team->debug_info.image_event;
1364 	if (debugInfo.profile.sample_count > 0) {
1365 		if (debugInfo.profile.last_image_event < imageEvent
1366 			&& debugInfo.profile.variable_stack_depth
1367 			&& sampleCount + 2 <= maxSamples) {
1368 			// an image event occurred, but we use variable stack depth and
1369 			// have enough room in the buffer to indicate an image event
1370 			addr_t* event = debugInfo.profile.samples + sampleCount;
1371 			event[0] = B_DEBUG_PROFILE_IMAGE_EVENT;
1372 			event[1] = imageEvent;
1373 			sampleCount += 2;
1374 			debugInfo.profile.sample_count = sampleCount;
1375 			debugInfo.profile.last_image_event = imageEvent;
1376 		}
1377 
1378 		if (debugInfo.profile.last_image_event < imageEvent
1379 				|| debugInfo.profile.flush_threshold - sampleCount < stackDepth) {
1380 			debugInfo.profile.flush_needed = true;
1381 
1382 			// If the buffer is not full yet, we add the samples,
1383 			// otherwise we have to drop them.
1384 			if (maxSamples - sampleCount < stackDepth) {
1385 				debugInfo.profile.dropped_ticks++;
1386 				return true;
1387 			}
1388 		}
1389 	} else {
1390 		// first sample -- set the image event
1391 		debugInfo.profile.image_event = imageEvent;
1392 		debugInfo.profile.last_image_event = imageEvent;
1393 	}
1394 
1395 	// get the samples
1396 	uint32 flags = STACK_TRACE_USER;
1397 	int32 skipIFrames = 0;
1398 	if (debugInfo.profile.profile_kernel) {
1399 		flags |= STACK_TRACE_KERNEL;
1400 		skipIFrames = 1;
1401 	}
1402 
1403 	addr_t* returnAddresses = debugInfo.profile.samples
1404 		+ debugInfo.profile.sample_count;
1405 	if (debugInfo.profile.variable_stack_depth) {
1406 		// variable sample count per hit
1407 		*returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1,
1408 			stackDepth - 1, skipIFrames, 0, flags);
1409 
1410 		debugInfo.profile.sample_count += *returnAddresses + 1;
1411 	} else {
1412 		// fixed sample count per hit
1413 		if (stackDepth > 1 || !debugInfo.profile.profile_kernel) {
1414 			int32 count = arch_debug_get_stack_trace(returnAddresses,
1415 				stackDepth, skipIFrames, 0, flags);
1416 
1417 			for (int32 i = count; i < stackDepth; i++)
1418 				returnAddresses[i] = 0;
1419 		} else
1420 			*returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL);
1421 
1422 		debugInfo.profile.sample_count += stackDepth;
1423 	}
1424 
1425 	return true;
1426 }
1427 
1428 
1429 static void
1430 profiling_flush(void*)
1431 {
1432 	// This function may be called as a post_interrupt_callback. When it is,
1433 	// it is undefined whether the function is called with interrupts enabled
1434 	// or disabled. (When called elsewhere, interrupts will always be enabled.)
1435 	// We are allowed to enable interrupts, though. First make sure interrupts
1436 	// are disabled.
1437 	disable_interrupts();
1438 
1439 	Thread* thread = thread_get_current_thread();
1440 	thread_debug_info& debugInfo = thread->debug_info;
1441 
1442 	SpinLocker threadDebugInfoLocker(debugInfo.lock);
1443 
1444 	if (debugInfo.profile.samples != NULL && debugInfo.profile.flush_needed) {
1445 		int32 sampleCount = debugInfo.profile.sample_count;
1446 		int32 droppedTicks = debugInfo.profile.dropped_ticks;
1447 		int32 stackDepth = debugInfo.profile.stack_depth;
1448 		bool variableStackDepth = debugInfo.profile.variable_stack_depth;
1449 		int32 imageEvent = debugInfo.profile.image_event;
1450 
1451 		// prevent the timer from running until after we flush
1452 		bigtime_t interval = debugInfo.profile.interval;
1453 		if (debugInfo.profile.installed_timer != NULL) {
1454 			interval = max_c(profiling_timer_left(thread), 0);
1455 			cancel_timer(debugInfo.profile.installed_timer);
1456 			debugInfo.profile.installed_timer->hook = NULL;
1457 			debugInfo.profile.installed_timer = NULL;
1458 		}
1459 		debugInfo.profile.interval_left = -1;
1460 
1461 		// notify the debugger
1462 		debugInfo.profile.sample_count = 0;
1463 		debugInfo.profile.dropped_ticks = 0;
1464 		debugInfo.profile.flush_needed = false;
1465 
1466 		threadDebugInfoLocker.Unlock();
1467 		enable_interrupts();
1468 
1469 		// prepare the message
1470 		debug_profiler_update message;
1471 		message.sample_count = sampleCount;
1472 		message.dropped_ticks = droppedTicks;
1473 		message.stack_depth = stackDepth;
1474 		message.variable_stack_depth = variableStackDepth;
1475 		message.image_event = imageEvent;
1476 		message.stopped = false;
1477 
1478 		thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message,
1479 			sizeof(message), false);
1480 
1481 		disable_interrupts();
1482 		threadDebugInfoLocker.Lock();
1483 		if (debugInfo.profile.samples != NULL)
1484 			schedule_profiling_timer(thread, interval);
1485 	}
1486 
1487 	threadDebugInfoLocker.Unlock();
1488 	enable_interrupts();
1489 }
1490 
1491 
1492 /*!	Profiling timer event callback.
1493 	Called with interrupts disabled.
1494 */
1495 static int32
1496 profiling_event(timer* /*unused*/)
1497 {
1498 	Thread* thread = thread_get_current_thread();
1499 	thread_debug_info& debugInfo = thread->debug_info;
1500 
1501 	SpinLocker threadDebugInfoLocker(debugInfo.lock);
1502 	debugInfo.profile.installed_timer->hook = NULL;
1503 	debugInfo.profile.installed_timer = NULL;
1504 
1505 	if (profiling_do_sample()) {
1506 		// Check if the sample buffer needs to be flushed. We can't do it here,
1507 		// since we're in an interrupt handler, and we can't set the callback
1508 		// if we interrupted a kernel function, since the callback will pause
1509 		// this thread. (The post_syscall hook will do the flush in that case.)
1510 		if (debugInfo.profile.flush_needed
1511 				&& !IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) {
1512 			thread->post_interrupt_callback = profiling_flush;
1513 
1514 			// We don't reschedule the timer here because profiling_flush() will
1515 			// lead to the thread being descheduled until we are told to continue.
1516 			// The timer will be rescheduled after the flush concludes.
1517 			debugInfo.profile.interval_left = -1;
1518 		} else
1519 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1520 	}
1521 
1522 	return B_HANDLED_INTERRUPT;
1523 }
1524 
1525 
1526 /*!	Called by the scheduler when a debugged thread has been unscheduled.
1527 	The scheduler lock is being held.
1528 */
1529 void
1530 user_debug_thread_unscheduled(Thread* thread)
1531 {
1532 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1533 
1534 	// if running, cancel the profiling timer
1535 	struct timer* timer = thread->debug_info.profile.installed_timer;
1536 	if (timer != NULL) {
1537 		// track remaining time
1538 		bigtime_t left = profiling_timer_left(thread);
1539 		thread->debug_info.profile.interval_left = max_c(left, 0);
1540 		thread->debug_info.profile.installed_timer->hook = NULL;
1541 		thread->debug_info.profile.installed_timer = NULL;
1542 
1543 		// cancel timer
1544 		threadDebugInfoLocker.Unlock();
1545 			// not necessary, but doesn't harm and reduces contention
1546 		cancel_timer(timer);
1547 			// since invoked on the same CPU, this will not possibly wait for
1548 			// an already called timer hook
1549 	}
1550 }
1551 
1552 
1553 /*!	Called by the scheduler when a debugged thread has been scheduled.
1554 	The scheduler lock is being held.
1555 */
1556 void
1557 user_debug_thread_scheduled(Thread* thread)
1558 {
1559 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1560 
1561 	if (thread->debug_info.profile.samples != NULL
1562 			&& thread->debug_info.profile.interval_left >= 0) {
1563 		// install profiling timer
1564 		schedule_profiling_timer(thread,
1565 			thread->debug_info.profile.interval_left);
1566 	}
1567 }
1568 
1569 
1570 /*!	\brief Called by the debug nub thread of a team to broadcast a message to
1571 		all threads of the team that are initialized for debugging (and
1572 		thus have a debug port).
1573 */
1574 static void
1575 broadcast_debugged_thread_message(Thread *nubThread, int32 code,
1576 	const void *message, int32 size)
1577 {
1578 	// iterate through the threads
1579 	thread_info threadInfo;
1580 	int32 cookie = 0;
1581 	while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo)
1582 			== B_OK) {
1583 		// get the thread and lock it
1584 		Thread* thread = Thread::GetAndLock(threadInfo.thread);
1585 		if (thread == NULL)
1586 			continue;
1587 
1588 		BReference<Thread> threadReference(thread, true);
1589 		ThreadLocker threadLocker(thread, true);
1590 
1591 		// get the thread's debug port
1592 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1593 
1594 		port_id threadDebugPort = -1;
1595 		if (thread && thread != nubThread && thread->team == nubThread->team
1596 			&& (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0
1597 			&& (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) {
1598 			threadDebugPort = thread->debug_info.debug_port;
1599 		}
1600 
1601 		threadDebugInfoLocker.Unlock();
1602 		threadLocker.Unlock();
1603 
1604 		// send the message to the thread
1605 		if (threadDebugPort >= 0) {
1606 			status_t error = kill_interruptable_write_port(threadDebugPort,
1607 				code, message, size);
1608 			if (error != B_OK) {
1609 				TRACE(("broadcast_debugged_thread_message(): Failed to send "
1610 					"message to thread %" B_PRId32 ": %" B_PRIx32 "\n",
1611 					thread->id, error));
1612 			}
1613 		}
1614 	}
1615 }
1616 
1617 
1618 static void
1619 nub_thread_cleanup(Thread *nubThread)
1620 {
1621 	TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n",
1622 		nubThread->id, nubThread->team->debug_info.debugger_port));
1623 
1624 	ConditionVariable debugChangeCondition;
1625 	debugChangeCondition.Init(nubThread->team, "debug change condition");
1626 	prepare_debugger_change(nubThread->team, debugChangeCondition);
1627 
1628 	team_debug_info teamDebugInfo;
1629 	bool destroyDebugInfo = false;
1630 
1631 	TeamLocker teamLocker(nubThread->team);
1632 		// required by update_threads_debugger_installed_flag()
1633 
1634 	cpu_status state = disable_interrupts();
1635 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1636 
1637 	team_debug_info &info = nubThread->team->debug_info;
1638 	if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED
1639 			&& info.nub_thread == nubThread->id) {
1640 		teamDebugInfo = info;
1641 		clear_team_debug_info(&info, false);
1642 		destroyDebugInfo = true;
1643 	}
1644 
1645 	// update the thread::flags fields
1646 	update_threads_debugger_installed_flag(nubThread->team);
1647 
1648 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1649 	restore_interrupts(state);
1650 
1651 	teamLocker.Unlock();
1652 
1653 	if (destroyDebugInfo)
1654 		teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints();
1655 
1656 	finish_debugger_change(nubThread->team);
1657 
1658 	if (destroyDebugInfo)
1659 		destroy_team_debug_info(&teamDebugInfo);
1660 
1661 	// notify all threads that the debugger is gone
1662 	broadcast_debugged_thread_message(nubThread,
1663 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1664 }
1665 
1666 
1667 /**	\brief Debug nub thread helper function that returns the debug port of
1668  *		   a thread of the same team.
1669  */
1670 static status_t
1671 debug_nub_thread_get_thread_debug_port(Thread *nubThread,
1672 	thread_id threadID, port_id &threadDebugPort)
1673 {
1674 	threadDebugPort = -1;
1675 
1676 	// get the thread
1677 	Thread* thread = Thread::GetAndLock(threadID);
1678 	if (thread == NULL)
1679 		return B_BAD_THREAD_ID;
1680 	BReference<Thread> threadReference(thread, true);
1681 	ThreadLocker threadLocker(thread, true);
1682 
1683 	// get the debug port
1684 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1685 
1686 	if (thread->team != nubThread->team)
1687 		return B_BAD_VALUE;
1688 	if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0)
1689 		return B_BAD_THREAD_STATE;
1690 
1691 	threadDebugPort = thread->debug_info.debug_port;
1692 
1693 	threadDebugInfoLocker.Unlock();
1694 
1695 	if (threadDebugPort < 0)
1696 		return B_ERROR;
1697 
1698 	return B_OK;
1699 }
1700 
1701 
1702 static status_t
1703 debug_nub_thread(void *)
1704 {
1705 	Thread *nubThread = thread_get_current_thread();
1706 
1707 	// check, if we're still the current nub thread and get our port
1708 	cpu_status state = disable_interrupts();
1709 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1710 
1711 	if (nubThread->team->debug_info.nub_thread != nubThread->id) {
1712 		RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1713 		restore_interrupts(state);
1714 		return 0;
1715 	}
1716 
1717 	port_id port = nubThread->team->debug_info.nub_port;
1718 	sem_id writeLock = nubThread->team->debug_info.debugger_write_lock;
1719 	BreakpointManager* breakpointManager
1720 		= nubThread->team->debug_info.breakpoint_manager;
1721 
1722 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1723 	restore_interrupts(state);
1724 
1725 	TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub "
1726 		"port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port));
1727 
1728 	// notify all threads that a debugger has been installed
1729 	broadcast_debugged_thread_message(nubThread,
1730 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1731 
1732 	// command processing loop
1733 	while (true) {
1734 		int32 command;
1735 		debug_nub_message_data message;
1736 		ssize_t messageSize = read_port_etc(port, &command, &message,
1737 			sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1738 
1739 		if (messageSize < 0) {
1740 			// The port is no longer valid or we were interrupted by a kill
1741 			// signal: If we are still listed in the team's debug info as nub
1742 			// thread, we need to update that.
1743 			nub_thread_cleanup(nubThread);
1744 
1745 			TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n",
1746 				nubThread->id, messageSize));
1747 
1748 			return messageSize;
1749 		}
1750 
1751 		bool sendReply = false;
1752 		union {
1753 			debug_nub_read_memory_reply			read_memory;
1754 			debug_nub_write_memory_reply		write_memory;
1755 			debug_nub_get_cpu_state_reply		get_cpu_state;
1756 			debug_nub_set_breakpoint_reply		set_breakpoint;
1757 			debug_nub_set_watchpoint_reply		set_watchpoint;
1758 			debug_nub_get_signal_masks_reply	get_signal_masks;
1759 			debug_nub_get_signal_handler_reply	get_signal_handler;
1760 			debug_nub_start_profiler_reply		start_profiler;
1761 			debug_profiler_update				profiler_update;
1762 			debug_nub_write_core_file_reply		write_core_file;
1763 		} reply;
1764 		int32 replySize = 0;
1765 		port_id replyPort = -1;
1766 
1767 		// process the command
1768 		switch (command) {
1769 			case B_DEBUG_MESSAGE_READ_MEMORY:
1770 			{
1771 				// get the parameters
1772 				replyPort = message.read_memory.reply_port;
1773 				void *address = message.read_memory.address;
1774 				int32 size = message.read_memory.size;
1775 				status_t result = B_OK;
1776 
1777 				// check the parameters
1778 				if (!BreakpointManager::CanAccessAddress(address, false))
1779 					result = B_BAD_ADDRESS;
1780 				else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE)
1781 					result = B_BAD_VALUE;
1782 
1783 				// read the memory
1784 				size_t bytesRead = 0;
1785 				if (result == B_OK) {
1786 					result = breakpointManager->ReadMemory(address,
1787 						reply.read_memory.data, size, bytesRead);
1788 				}
1789 				reply.read_memory.error = result;
1790 
1791 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: "
1792 					"reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32
1793 					", result: %" B_PRIx32 ", read: %ld\n", nubThread->id,
1794 					replyPort, address, size, result, bytesRead));
1795 
1796 				// send only as much data as necessary
1797 				reply.read_memory.size = bytesRead;
1798 				replySize = reply.read_memory.data + bytesRead - (char*)&reply;
1799 				sendReply = true;
1800 				break;
1801 			}
1802 
1803 			case B_DEBUG_MESSAGE_WRITE_MEMORY:
1804 			{
1805 				// get the parameters
1806 				replyPort = message.write_memory.reply_port;
1807 				void *address = message.write_memory.address;
1808 				int32 size = message.write_memory.size;
1809 				const char *data = message.write_memory.data;
1810 				int32 realSize = (char*)&message + messageSize - data;
1811 				status_t result = B_OK;
1812 
1813 				// check the parameters
1814 				if (!BreakpointManager::CanAccessAddress(address, true))
1815 					result = B_BAD_ADDRESS;
1816 				else if (size <= 0 || size > realSize)
1817 					result = B_BAD_VALUE;
1818 
1819 				// write the memory
1820 				size_t bytesWritten = 0;
1821 				if (result == B_OK) {
1822 					result = breakpointManager->WriteMemory(address, data, size,
1823 						bytesWritten);
1824 				}
1825 				reply.write_memory.error = result;
1826 
1827 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: "
1828 					"reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32
1829 					", result: %" B_PRIx32 ", written: %ld\n", nubThread->id,
1830 					replyPort, address, size, result, bytesWritten));
1831 
1832 				reply.write_memory.size = bytesWritten;
1833 				sendReply = true;
1834 				replySize = sizeof(debug_nub_write_memory_reply);
1835 				break;
1836 			}
1837 
1838 			case B_DEBUG_MESSAGE_SET_TEAM_FLAGS:
1839 			{
1840 				// get the parameters
1841 				int32 flags = message.set_team_flags.flags
1842 					& B_TEAM_DEBUG_USER_FLAG_MASK;
1843 
1844 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS"
1845 					": flags: %" B_PRIx32 "\n", nubThread->id, flags));
1846 
1847 				Team *team = thread_get_current_thread()->team;
1848 
1849 				// set the flags
1850 				cpu_status state = disable_interrupts();
1851 				GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1852 
1853 				flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK;
1854 				atomic_set(&team->debug_info.flags, flags);
1855 
1856 				RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1857 				restore_interrupts(state);
1858 
1859 				break;
1860 			}
1861 
1862 			case B_DEBUG_MESSAGE_SET_THREAD_FLAGS:
1863 			{
1864 				// get the parameters
1865 				thread_id threadID = message.set_thread_flags.thread;
1866 				int32 flags = message.set_thread_flags.flags
1867 					& B_THREAD_DEBUG_USER_FLAG_MASK;
1868 
1869 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS"
1870 					": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n",
1871 					nubThread->id, threadID, flags));
1872 
1873 				// set the flags
1874 				Thread* thread = Thread::GetAndLock(threadID);
1875 				if (thread == NULL)
1876 					break;
1877 				BReference<Thread> threadReference(thread, true);
1878 				ThreadLocker threadLocker(thread, true);
1879 
1880 				InterruptsSpinLocker threadDebugInfoLocker(
1881 					thread->debug_info.lock);
1882 
1883 				if (thread->team == thread_get_current_thread()->team) {
1884 					flags |= thread->debug_info.flags
1885 						& B_THREAD_DEBUG_KERNEL_FLAG_MASK;
1886 					atomic_set(&thread->debug_info.flags, flags);
1887 				}
1888 
1889 				break;
1890 			}
1891 
1892 			case B_DEBUG_MESSAGE_CONTINUE_THREAD:
1893 			{
1894 				// get the parameters
1895 				thread_id threadID;
1896 				uint32 handleEvent;
1897 				bool singleStep;
1898 
1899 				threadID = message.continue_thread.thread;
1900 				handleEvent = message.continue_thread.handle_event;
1901 				singleStep = message.continue_thread.single_step;
1902 
1903 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD"
1904 					": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", "
1905 					"single step: %d\n", nubThread->id, threadID, handleEvent,
1906 					singleStep));
1907 
1908 				// find the thread and get its debug port
1909 				port_id threadDebugPort = -1;
1910 				status_t result = debug_nub_thread_get_thread_debug_port(
1911 					nubThread, threadID, threadDebugPort);
1912 
1913 				// send a message to the debugged thread
1914 				if (result == B_OK) {
1915 					debugged_thread_continue commandMessage;
1916 					commandMessage.handle_event = handleEvent;
1917 					commandMessage.single_step = singleStep;
1918 
1919 					result = write_port(threadDebugPort,
1920 						B_DEBUGGED_THREAD_MESSAGE_CONTINUE,
1921 						&commandMessage, sizeof(commandMessage));
1922 				} else if (result == B_BAD_THREAD_STATE) {
1923 					Thread* thread = Thread::GetAndLock(threadID);
1924 					if (thread == NULL)
1925 						break;
1926 
1927 					BReference<Thread> threadReference(thread, true);
1928 					ThreadLocker threadLocker(thread, true);
1929 					if (thread->state == B_THREAD_SUSPENDED) {
1930 						threadLocker.Unlock();
1931 						resume_thread(threadID);
1932 						break;
1933 					}
1934 				}
1935 
1936 				break;
1937 			}
1938 
1939 			case B_DEBUG_MESSAGE_SET_CPU_STATE:
1940 			{
1941 				// get the parameters
1942 				thread_id threadID = message.set_cpu_state.thread;
1943 				const debug_cpu_state &cpuState
1944 					= message.set_cpu_state.cpu_state;
1945 
1946 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE"
1947 					": thread: %" B_PRId32 "\n", nubThread->id, threadID));
1948 
1949 				// find the thread and get its debug port
1950 				port_id threadDebugPort = -1;
1951 				status_t result = debug_nub_thread_get_thread_debug_port(
1952 					nubThread, threadID, threadDebugPort);
1953 
1954 				// send a message to the debugged thread
1955 				if (result == B_OK) {
1956 					debugged_thread_set_cpu_state commandMessage;
1957 					memcpy(&commandMessage.cpu_state, &cpuState,
1958 						sizeof(debug_cpu_state));
1959 					write_port(threadDebugPort,
1960 						B_DEBUGGED_THREAD_SET_CPU_STATE,
1961 						&commandMessage, sizeof(commandMessage));
1962 				}
1963 
1964 				break;
1965 			}
1966 
1967 			case B_DEBUG_MESSAGE_GET_CPU_STATE:
1968 			{
1969 				// get the parameters
1970 				thread_id threadID = message.get_cpu_state.thread;
1971 				replyPort = message.get_cpu_state.reply_port;
1972 
1973 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE"
1974 					": thread: %" B_PRId32 "\n", nubThread->id, threadID));
1975 
1976 				// find the thread and get its debug port
1977 				port_id threadDebugPort = -1;
1978 				status_t result = debug_nub_thread_get_thread_debug_port(
1979 					nubThread, threadID, threadDebugPort);
1980 
1981 				// send a message to the debugged thread
1982 				if (threadDebugPort >= 0) {
1983 					debugged_thread_get_cpu_state commandMessage;
1984 					commandMessage.reply_port = replyPort;
1985 					result = write_port(threadDebugPort,
1986 						B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage,
1987 						sizeof(commandMessage));
1988 				}
1989 
1990 				// send a reply to the debugger in case of error
1991 				if (result != B_OK) {
1992 					reply.get_cpu_state.error = result;
1993 					sendReply = true;
1994 					replySize = sizeof(reply.get_cpu_state);
1995 				}
1996 
1997 				break;
1998 			}
1999 
2000 			case B_DEBUG_MESSAGE_SET_BREAKPOINT:
2001 			{
2002 				// get the parameters
2003 				replyPort = message.set_breakpoint.reply_port;
2004 				void *address = message.set_breakpoint.address;
2005 
2006 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT"
2007 					": address: %p\n", nubThread->id, address));
2008 
2009 				// check the address
2010 				status_t result = B_OK;
2011 				if (address == NULL
2012 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2013 					result = B_BAD_ADDRESS;
2014 				}
2015 
2016 				// set the breakpoint
2017 				if (result == B_OK)
2018 					result = breakpointManager->InstallBreakpoint(address);
2019 
2020 				if (result == B_OK)
2021 					update_threads_breakpoints_flag();
2022 
2023 				// prepare the reply
2024 				reply.set_breakpoint.error = result;
2025 				replySize = sizeof(reply.set_breakpoint);
2026 				sendReply = true;
2027 
2028 				break;
2029 			}
2030 
2031 			case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT:
2032 			{
2033 				// get the parameters
2034 				void *address = message.clear_breakpoint.address;
2035 
2036 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT"
2037 					": address: %p\n", nubThread->id, address));
2038 
2039 				// check the address
2040 				status_t result = B_OK;
2041 				if (address == NULL
2042 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2043 					result = B_BAD_ADDRESS;
2044 				}
2045 
2046 				// clear the breakpoint
2047 				if (result == B_OK)
2048 					result = breakpointManager->UninstallBreakpoint(address);
2049 
2050 				if (result == B_OK)
2051 					update_threads_breakpoints_flag();
2052 
2053 				break;
2054 			}
2055 
2056 			case B_DEBUG_MESSAGE_SET_WATCHPOINT:
2057 			{
2058 				// get the parameters
2059 				replyPort = message.set_watchpoint.reply_port;
2060 				void *address = message.set_watchpoint.address;
2061 				uint32 type = message.set_watchpoint.type;
2062 				int32 length = message.set_watchpoint.length;
2063 
2064 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT"
2065 					": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n",
2066 					nubThread->id, address, type, length));
2067 
2068 				// check the address and size
2069 				status_t result = B_OK;
2070 				if (address == NULL
2071 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2072 					result = B_BAD_ADDRESS;
2073 				}
2074 				if (length < 0)
2075 					result = B_BAD_VALUE;
2076 
2077 				// set the watchpoint
2078 				if (result == B_OK) {
2079 					result = breakpointManager->InstallWatchpoint(address, type,
2080 						length);
2081 				}
2082 
2083 				if (result == B_OK)
2084 					update_threads_breakpoints_flag();
2085 
2086 				// prepare the reply
2087 				reply.set_watchpoint.error = result;
2088 				replySize = sizeof(reply.set_watchpoint);
2089 				sendReply = true;
2090 
2091 				break;
2092 			}
2093 
2094 			case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT:
2095 			{
2096 				// get the parameters
2097 				void *address = message.clear_watchpoint.address;
2098 
2099 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT"
2100 					": address: %p\n", nubThread->id, address));
2101 
2102 				// check the address
2103 				status_t result = B_OK;
2104 				if (address == NULL
2105 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2106 					result = B_BAD_ADDRESS;
2107 				}
2108 
2109 				// clear the watchpoint
2110 				if (result == B_OK)
2111 					result = breakpointManager->UninstallWatchpoint(address);
2112 
2113 				if (result == B_OK)
2114 					update_threads_breakpoints_flag();
2115 
2116 				break;
2117 			}
2118 
2119 			case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS:
2120 			{
2121 				// get the parameters
2122 				thread_id threadID = message.set_signal_masks.thread;
2123 				uint64 ignore = message.set_signal_masks.ignore_mask;
2124 				uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask;
2125 				uint32 ignoreOp = message.set_signal_masks.ignore_op;
2126 				uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op;
2127 
2128 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS"
2129 					": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %"
2130 					B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32
2131 					")\n", nubThread->id, threadID, ignore, ignoreOp,
2132 					ignoreOnce, ignoreOnceOp));
2133 
2134 				// set the masks
2135 				Thread* thread = Thread::GetAndLock(threadID);
2136 				if (thread == NULL)
2137 					break;
2138 				BReference<Thread> threadReference(thread, true);
2139 				ThreadLocker threadLocker(thread, true);
2140 
2141 				InterruptsSpinLocker threadDebugInfoLocker(
2142 					thread->debug_info.lock);
2143 
2144 				if (thread->team == thread_get_current_thread()->team) {
2145 					thread_debug_info &threadDebugInfo = thread->debug_info;
2146 					// set ignore mask
2147 					switch (ignoreOp) {
2148 						case B_DEBUG_SIGNAL_MASK_AND:
2149 							threadDebugInfo.ignore_signals &= ignore;
2150 							break;
2151 						case B_DEBUG_SIGNAL_MASK_OR:
2152 							threadDebugInfo.ignore_signals |= ignore;
2153 							break;
2154 						case B_DEBUG_SIGNAL_MASK_SET:
2155 							threadDebugInfo.ignore_signals = ignore;
2156 							break;
2157 					}
2158 
2159 					// set ignore once mask
2160 					switch (ignoreOnceOp) {
2161 						case B_DEBUG_SIGNAL_MASK_AND:
2162 							threadDebugInfo.ignore_signals_once &= ignoreOnce;
2163 							break;
2164 						case B_DEBUG_SIGNAL_MASK_OR:
2165 							threadDebugInfo.ignore_signals_once |= ignoreOnce;
2166 							break;
2167 						case B_DEBUG_SIGNAL_MASK_SET:
2168 							threadDebugInfo.ignore_signals_once = ignoreOnce;
2169 							break;
2170 					}
2171 				}
2172 
2173 				break;
2174 			}
2175 
2176 			case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS:
2177 			{
2178 				// get the parameters
2179 				replyPort = message.get_signal_masks.reply_port;
2180 				thread_id threadID = message.get_signal_masks.thread;
2181 				status_t result = B_OK;
2182 
2183 				// get the masks
2184 				uint64 ignore = 0;
2185 				uint64 ignoreOnce = 0;
2186 
2187 				Thread* thread = Thread::GetAndLock(threadID);
2188 				if (thread != NULL) {
2189 					BReference<Thread> threadReference(thread, true);
2190 					ThreadLocker threadLocker(thread, true);
2191 
2192 					InterruptsSpinLocker threadDebugInfoLocker(
2193 						thread->debug_info.lock);
2194 
2195 					ignore = thread->debug_info.ignore_signals;
2196 					ignoreOnce = thread->debug_info.ignore_signals_once;
2197 				} else
2198 					result = B_BAD_THREAD_ID;
2199 
2200 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS"
2201 					": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", "
2202 					"ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: "
2203 					"%" B_PRIx32 "\n", nubThread->id, replyPort, threadID,
2204 					ignore, ignoreOnce, result));
2205 
2206 				// prepare the message
2207 				reply.get_signal_masks.error = result;
2208 				reply.get_signal_masks.ignore_mask = ignore;
2209 				reply.get_signal_masks.ignore_once_mask = ignoreOnce;
2210 				replySize = sizeof(reply.get_signal_masks);
2211 				sendReply = true;
2212 				break;
2213 			}
2214 
2215 			case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER:
2216 			{
2217 				// get the parameters
2218 				int signal = message.set_signal_handler.signal;
2219 				struct sigaction &handler = message.set_signal_handler.handler;
2220 
2221 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER"
2222 					": signal: %d, handler: %p\n", nubThread->id, signal,
2223 					handler.sa_handler));
2224 
2225 				// set the handler
2226 				sigaction(signal, &handler, NULL);
2227 
2228 				break;
2229 			}
2230 
2231 			case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER:
2232 			{
2233 				// get the parameters
2234 				replyPort = message.get_signal_handler.reply_port;
2235 				int signal = message.get_signal_handler.signal;
2236 				status_t result = B_OK;
2237 
2238 				// get the handler
2239 				if (sigaction(signal, NULL, &reply.get_signal_handler.handler)
2240 						!= 0) {
2241 					result = errno;
2242 				}
2243 
2244 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER"
2245 					": reply port: %" B_PRId32 ", signal: %d, handler: %p\n",
2246 					nubThread->id, replyPort, signal,
2247 					reply.get_signal_handler.handler.sa_handler));
2248 
2249 				// prepare the message
2250 				reply.get_signal_handler.error = result;
2251 				replySize = sizeof(reply.get_signal_handler);
2252 				sendReply = true;
2253 				break;
2254 			}
2255 
2256 			case B_DEBUG_MESSAGE_PREPARE_HANDOVER:
2257 			{
2258 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER"
2259 					"\n", nubThread->id));
2260 
2261 				Team *team = nubThread->team;
2262 
2263 				// Acquire the debugger write lock. As soon as we have it and
2264 				// have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread
2265 				// will write anything to the debugger port anymore.
2266 				status_t result = acquire_sem_etc(writeLock, 1,
2267 					B_KILL_CAN_INTERRUPT, 0);
2268 				if (result == B_OK) {
2269 					// set the respective team debug flag
2270 					cpu_status state = disable_interrupts();
2271 					GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2272 
2273 					atomic_or(&team->debug_info.flags,
2274 						B_TEAM_DEBUG_DEBUGGER_HANDOVER);
2275 					BreakpointManager* breakpointManager
2276 						= team->debug_info.breakpoint_manager;
2277 
2278 					RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2279 					restore_interrupts(state);
2280 
2281 					// remove all installed breakpoints
2282 					breakpointManager->RemoveAllBreakpoints();
2283 
2284 					release_sem(writeLock);
2285 				} else {
2286 					// We probably got a SIGKILL. If so, we will terminate when
2287 					// reading the next message fails.
2288 				}
2289 
2290 				break;
2291 			}
2292 
2293 			case B_DEBUG_MESSAGE_HANDED_OVER:
2294 			{
2295 				// notify all threads that the debugger has changed
2296 				broadcast_debugged_thread_message(nubThread,
2297 					B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
2298 
2299 				break;
2300 			}
2301 
2302 			case B_DEBUG_START_PROFILER:
2303 			{
2304 				// get the parameters
2305 				thread_id threadID = message.start_profiler.thread;
2306 				replyPort = message.start_profiler.reply_port;
2307 				area_id sampleArea = message.start_profiler.sample_area;
2308 				int32 stackDepth = message.start_profiler.stack_depth;
2309 				bool variableStackDepth
2310 					= message.start_profiler.variable_stack_depth;
2311 				bool profileKernel = message.start_profiler.profile_kernel;
2312 				bigtime_t interval = max_c(message.start_profiler.interval,
2313 					B_DEBUG_MIN_PROFILE_INTERVAL);
2314 				status_t result = B_OK;
2315 
2316 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: "
2317 					"thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n",
2318 					nubThread->id, threadID, sampleArea));
2319 
2320 				if (stackDepth < 1)
2321 					stackDepth = 1;
2322 				else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH)
2323 					stackDepth = B_DEBUG_STACK_TRACE_DEPTH;
2324 
2325 				// provision for an extra entry per hit (for the number of
2326 				// samples), if variable stack depth
2327 				if (variableStackDepth)
2328 					stackDepth++;
2329 
2330 				// clone the sample area
2331 				area_info areaInfo;
2332 				if (result == B_OK)
2333 					result = get_area_info(sampleArea, &areaInfo);
2334 
2335 				area_id clonedSampleArea = -1;
2336 				void* samples = NULL;
2337 				if (result == B_OK) {
2338 					clonedSampleArea = clone_area("profiling samples", &samples,
2339 						B_ANY_KERNEL_ADDRESS,
2340 						B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
2341 						sampleArea);
2342 					if (clonedSampleArea >= 0) {
2343 						// we need the memory locked
2344 						result = lock_memory(samples, areaInfo.size,
2345 							B_READ_DEVICE);
2346 						if (result != B_OK) {
2347 							delete_area(clonedSampleArea);
2348 							clonedSampleArea = -1;
2349 						}
2350 					} else
2351 						result = clonedSampleArea;
2352 				}
2353 
2354 				// get the thread and set the profile info
2355 				int32 imageEvent = nubThread->team->debug_info.image_event;
2356 				if (result == B_OK) {
2357 					Thread* thread = Thread::GetAndLock(threadID);
2358 					BReference<Thread> threadReference(thread, true);
2359 					ThreadLocker threadLocker(thread, true);
2360 
2361 					if (thread != NULL && thread->team == nubThread->team) {
2362 						thread_debug_info &threadDebugInfo = thread->debug_info;
2363 
2364 						InterruptsSpinLocker threadDebugInfoLocker(
2365 							threadDebugInfo.lock);
2366 
2367 						if (threadDebugInfo.profile.samples == NULL) {
2368 							threadDebugInfo.profile.interval = interval;
2369 							threadDebugInfo.profile.sample_area
2370 								= clonedSampleArea;
2371 							threadDebugInfo.profile.samples = (addr_t*)samples;
2372 							threadDebugInfo.profile.max_samples
2373 								= areaInfo.size / sizeof(addr_t);
2374 							threadDebugInfo.profile.flush_threshold
2375 								= threadDebugInfo.profile.max_samples
2376 									* B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD
2377 									/ 100;
2378 							threadDebugInfo.profile.sample_count = 0;
2379 							threadDebugInfo.profile.dropped_ticks = 0;
2380 							threadDebugInfo.profile.stack_depth = stackDepth;
2381 							threadDebugInfo.profile.variable_stack_depth
2382 								= variableStackDepth;
2383 							threadDebugInfo.profile.profile_kernel = profileKernel;
2384 							threadDebugInfo.profile.flush_needed = false;
2385 							threadDebugInfo.profile.interval_left = interval;
2386 							threadDebugInfo.profile.installed_timer = NULL;
2387 							threadDebugInfo.profile.image_event = imageEvent;
2388 							threadDebugInfo.profile.last_image_event
2389 								= imageEvent;
2390 						} else
2391 							result = B_BAD_VALUE;
2392 					} else
2393 						result = B_BAD_THREAD_ID;
2394 				}
2395 
2396 				// on error unlock and delete the sample area
2397 				if (result != B_OK) {
2398 					if (clonedSampleArea >= 0) {
2399 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2400 						delete_area(clonedSampleArea);
2401 					}
2402 				}
2403 
2404 				// send a reply to the debugger
2405 				reply.start_profiler.error = result;
2406 				reply.start_profiler.interval = interval;
2407 				reply.start_profiler.image_event = imageEvent;
2408 				sendReply = true;
2409 				replySize = sizeof(reply.start_profiler);
2410 
2411 				break;
2412 			}
2413 
2414 			case B_DEBUG_STOP_PROFILER:
2415 			{
2416 				// get the parameters
2417 				thread_id threadID = message.stop_profiler.thread;
2418 				replyPort = message.stop_profiler.reply_port;
2419 				status_t result = B_OK;
2420 
2421 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: "
2422 					"thread: %" B_PRId32 "\n", nubThread->id, threadID));
2423 
2424 				area_id sampleArea = -1;
2425 				addr_t* samples = NULL;
2426 				int32 sampleCount = 0;
2427 				int32 stackDepth = 0;
2428 				bool variableStackDepth = false;
2429 				int32 imageEvent = 0;
2430 				int32 droppedTicks = 0;
2431 				bigtime_t lastCPUTime = 0;
2432 
2433 				// get the thread and detach the profile info
2434 				Thread* thread = Thread::GetAndLock(threadID);
2435 				BReference<Thread> threadReference(thread, true);
2436 				ThreadLocker threadLocker(thread, true);
2437 
2438 				if (thread && thread->team == nubThread->team) {
2439 					thread_debug_info &threadDebugInfo = thread->debug_info;
2440 
2441 					InterruptsSpinLocker threadDebugInfoLocker(
2442 						threadDebugInfo.lock);
2443 
2444 					if (threadDebugInfo.profile.samples != NULL) {
2445 						sampleArea = threadDebugInfo.profile.sample_area;
2446 						samples = threadDebugInfo.profile.samples;
2447 						sampleCount = threadDebugInfo.profile.sample_count;
2448 						droppedTicks = threadDebugInfo.profile.dropped_ticks;
2449 						stackDepth = threadDebugInfo.profile.stack_depth;
2450 						variableStackDepth
2451 							= threadDebugInfo.profile.variable_stack_depth;
2452 						imageEvent = threadDebugInfo.profile.image_event;
2453 						threadDebugInfo.profile.sample_area = -1;
2454 						threadDebugInfo.profile.samples = NULL;
2455 						threadDebugInfo.profile.flush_needed = false;
2456 						threadDebugInfo.profile.dropped_ticks = 0;
2457 						{
2458 							SpinLocker threadTimeLocker(thread->time_lock);
2459 							lastCPUTime = thread->CPUTime(false);
2460 						}
2461 					} else
2462 						result = B_BAD_VALUE;
2463 				} else
2464 					result = B_BAD_THREAD_ID;
2465 
2466 				threadLocker.Unlock();
2467 
2468 				// prepare the reply
2469 				if (result == B_OK) {
2470 					reply.profiler_update.origin.thread = threadID;
2471 					reply.profiler_update.image_event = imageEvent;
2472 					reply.profiler_update.stack_depth = stackDepth;
2473 					reply.profiler_update.variable_stack_depth
2474 						= variableStackDepth;
2475 					reply.profiler_update.sample_count = sampleCount;
2476 					reply.profiler_update.dropped_ticks = droppedTicks;
2477 					reply.profiler_update.stopped = true;
2478 					reply.profiler_update.last_cpu_time = lastCPUTime;
2479 				} else
2480 					reply.profiler_update.origin.thread = result;
2481 
2482 				replySize = sizeof(debug_profiler_update);
2483 				sendReply = true;
2484 
2485 				if (sampleArea >= 0) {
2486 					area_info areaInfo;
2487 					if (get_area_info(sampleArea, &areaInfo) == B_OK) {
2488 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2489 						delete_area(sampleArea);
2490 					}
2491 				}
2492 
2493 				break;
2494 			}
2495 
2496 			case B_DEBUG_WRITE_CORE_FILE:
2497 			{
2498 				// get the parameters
2499 				replyPort = message.write_core_file.reply_port;
2500 				char* path = message.write_core_file.path;
2501 				path[sizeof(message.write_core_file.path) - 1] = '\0';
2502 
2503 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE"
2504 					": path: %s\n", nubThread->id, path));
2505 
2506 				// write the core file
2507 				status_t result = core_dump_write_core_file(path, false);
2508 
2509 				// prepare the reply
2510 				reply.write_core_file.error = result;
2511 				replySize = sizeof(reply.write_core_file);
2512 				sendReply = true;
2513 
2514 				break;
2515 			}
2516 		}
2517 
2518 		// send the reply, if necessary
2519 		if (sendReply) {
2520 			status_t error = kill_interruptable_write_port(replyPort, command,
2521 				&reply, replySize);
2522 
2523 			if (error != B_OK) {
2524 				// The debugger port is either not longer existing or we got
2525 				// interrupted by a kill signal. In either case we terminate.
2526 				TRACE(("nub thread %" B_PRId32 ": failed to send reply to port "
2527 					"%" B_PRId32 ": %s\n", nubThread->id, replyPort,
2528 					strerror(error)));
2529 
2530 				nub_thread_cleanup(nubThread);
2531 				return error;
2532 			}
2533 		}
2534 	}
2535 }
2536 
2537 
2538 /**	\brief Helper function for install_team_debugger(), that sets up the team
2539 		   and thread debug infos.
2540 
2541 	The caller must hold the team's lock as well as the team debug info lock.
2542 
2543 	The function also clears the arch specific team and thread debug infos
2544 	(including among other things formerly set break/watchpoints).
2545  */
2546 static void
2547 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam,
2548 	port_id debuggerPort, port_id nubPort, thread_id nubThread,
2549 	sem_id debuggerPortWriteLock, thread_id causingThread)
2550 {
2551 	atomic_set(&team->debug_info.flags,
2552 		B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED);
2553 	team->debug_info.nub_port = nubPort;
2554 	team->debug_info.nub_thread = nubThread;
2555 	team->debug_info.debugger_team = debuggerTeam;
2556 	team->debug_info.debugger_port = debuggerPort;
2557 	team->debug_info.debugger_write_lock = debuggerPortWriteLock;
2558 	team->debug_info.causing_thread = causingThread;
2559 
2560 	arch_clear_team_debug_info(&team->debug_info.arch_info);
2561 
2562 	// set the user debug flags and signal masks of all threads to the default
2563 	for (Thread *thread = team->thread_list; thread;
2564 			thread = thread->team_next) {
2565 		SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2566 
2567 		if (thread->id == nubThread) {
2568 			atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD);
2569 		} else {
2570 			int32 flags = thread->debug_info.flags
2571 				& ~B_THREAD_DEBUG_USER_FLAG_MASK;
2572 			atomic_set(&thread->debug_info.flags,
2573 				flags | B_THREAD_DEBUG_DEFAULT_FLAGS);
2574 			thread->debug_info.ignore_signals = 0;
2575 			thread->debug_info.ignore_signals_once = 0;
2576 
2577 			arch_clear_thread_debug_info(&thread->debug_info.arch_info);
2578 		}
2579 	}
2580 
2581 	// update the thread::flags fields
2582 	update_threads_debugger_installed_flag(team);
2583 }
2584 
2585 
2586 static port_id
2587 install_team_debugger(team_id teamID, port_id debuggerPort,
2588 	thread_id causingThread, bool useDefault, bool dontReplace)
2589 {
2590 	TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", "
2591 		"default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault,
2592 		dontReplace));
2593 
2594 	if (useDefault)
2595 		debuggerPort = atomic_get(&sDefaultDebuggerPort);
2596 
2597 	// get the debugger team
2598 	port_info debuggerPortInfo;
2599 	status_t error = get_port_info(debuggerPort, &debuggerPortInfo);
2600 	if (error != B_OK) {
2601 		TRACE(("install_team_debugger(): Failed to get debugger port info: "
2602 			"%" B_PRIx32 "\n", error));
2603 		return error;
2604 	}
2605 	team_id debuggerTeam = debuggerPortInfo.team;
2606 
2607 	// Check the debugger team: It must neither be the kernel team nor the
2608 	// debugged team.
2609 	if (teamID == B_CURRENT_TEAM)
2610 		teamID = team_get_current_team_id();
2611 	if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) {
2612 		TRACE(("install_team_debugger(): Can't debug kernel or debugger team. "
2613 			"debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam,
2614 			teamID));
2615 		return B_NOT_ALLOWED;
2616 	}
2617 
2618 	// get the team
2619 	Team* team;
2620 	ConditionVariable debugChangeCondition;
2621 	debugChangeCondition.Init(NULL, "debug change condition");
2622 	error = prepare_debugger_change(teamID, debugChangeCondition, team);
2623 	if (error != B_OK)
2624 		return error;
2625 
2626 	// check, if a debugger is already installed
2627 
2628 	bool done = false;
2629 	port_id result = B_ERROR;
2630 	bool handOver = false;
2631 	port_id oldDebuggerPort = -1;
2632 	port_id nubPort = -1;
2633 
2634 	TeamLocker teamLocker(team);
2635 	cpu_status state = disable_interrupts();
2636 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2637 
2638 	int32 teamDebugFlags = team->debug_info.flags;
2639 
2640 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2641 		// There's already a debugger installed.
2642 		if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) {
2643 			if (dontReplace) {
2644 				// We're fine with already having a debugger.
2645 				error = B_OK;
2646 				done = true;
2647 				result = team->debug_info.nub_port;
2648 			} else {
2649 				// a handover to another debugger is requested
2650 				// Set the handing-over flag -- we'll clear both flags after
2651 				// having sent the handed-over message to the new debugger.
2652 				atomic_or(&team->debug_info.flags,
2653 					B_TEAM_DEBUG_DEBUGGER_HANDING_OVER);
2654 
2655 				oldDebuggerPort = team->debug_info.debugger_port;
2656 				result = nubPort = team->debug_info.nub_port;
2657 				if (causingThread < 0)
2658 					causingThread = team->debug_info.causing_thread;
2659 
2660 				// set the new debugger
2661 				install_team_debugger_init_debug_infos(team, debuggerTeam,
2662 					debuggerPort, nubPort, team->debug_info.nub_thread,
2663 					team->debug_info.debugger_write_lock, causingThread);
2664 
2665 				handOver = true;
2666 				done = true;
2667 			}
2668 		} else {
2669 			// there's already a debugger installed
2670 			error = (dontReplace ? B_OK : B_BAD_VALUE);
2671 			done = true;
2672 			result = team->debug_info.nub_port;
2673 		}
2674 	} else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0
2675 		&& useDefault) {
2676 		// No debugger yet, disable_debugger() had been invoked, and we
2677 		// would install the default debugger. Just fail.
2678 		error = B_BAD_VALUE;
2679 	}
2680 
2681 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2682 	restore_interrupts(state);
2683 	teamLocker.Unlock();
2684 
2685 	if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) {
2686 		// The old debugger must just have died. Just proceed as
2687 		// if there was no debugger installed. We may still be too
2688 		// early, in which case we'll fail, but this race condition
2689 		// should be unbelievably rare and relatively harmless.
2690 		handOver = false;
2691 		done = false;
2692 	}
2693 
2694 	if (handOver) {
2695 		// prepare the handed-over message
2696 		debug_handed_over notification;
2697 		notification.origin.thread = -1;
2698 		notification.origin.team = teamID;
2699 		notification.origin.nub_port = nubPort;
2700 		notification.debugger = debuggerTeam;
2701 		notification.debugger_port = debuggerPort;
2702 		notification.causing_thread = causingThread;
2703 
2704 		// notify the new debugger
2705 		error = write_port_etc(debuggerPort,
2706 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2707 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2708 		if (error != B_OK) {
2709 			dprintf("install_team_debugger(): Failed to send message to new "
2710 				"debugger: %s\n", strerror(error));
2711 		}
2712 
2713 		// clear the handed-over and handing-over flags
2714 		state = disable_interrupts();
2715 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2716 
2717 		atomic_and(&team->debug_info.flags,
2718 			~(B_TEAM_DEBUG_DEBUGGER_HANDOVER
2719 				| B_TEAM_DEBUG_DEBUGGER_HANDING_OVER));
2720 
2721 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2722 		restore_interrupts(state);
2723 
2724 		finish_debugger_change(team);
2725 
2726 		// notify the nub thread
2727 		kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER,
2728 			NULL, 0);
2729 
2730 		// notify the old debugger
2731 		error = write_port_etc(oldDebuggerPort,
2732 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2733 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2734 		if (error != B_OK) {
2735 			TRACE(("install_team_debugger(): Failed to send message to old "
2736 				"debugger: %s\n", strerror(error)));
2737 		}
2738 
2739 		TRACE(("install_team_debugger() done: handed over to debugger: team: "
2740 			"%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam,
2741 			debuggerPort));
2742 
2743 		return result;
2744 	}
2745 
2746 	if (done || error != B_OK) {
2747 		TRACE(("install_team_debugger() done1: %" B_PRId32 "\n",
2748 			(error == B_OK ? result : error)));
2749 		finish_debugger_change(team);
2750 		return (error == B_OK ? result : error);
2751 	}
2752 
2753 	// create the debugger write lock semaphore
2754 	char nameBuffer[B_OS_NAME_LENGTH];
2755 	snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port "
2756 		"write", teamID);
2757 	sem_id debuggerWriteLock = create_sem(1, nameBuffer);
2758 	if (debuggerWriteLock < 0)
2759 		error = debuggerWriteLock;
2760 
2761 	// create the nub port
2762 	snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID);
2763 	if (error == B_OK) {
2764 		nubPort = create_port(1, nameBuffer);
2765 		if (nubPort < 0)
2766 			error = nubPort;
2767 		else
2768 			result = nubPort;
2769 	}
2770 
2771 	// make the debugger team the port owner; thus we know, if the debugger is
2772 	// gone and can cleanup
2773 	if (error == B_OK)
2774 		error = set_port_owner(nubPort, debuggerTeam);
2775 
2776 	// create the breakpoint manager
2777 	BreakpointManager* breakpointManager = NULL;
2778 	if (error == B_OK) {
2779 		breakpointManager = new(std::nothrow) BreakpointManager;
2780 		if (breakpointManager != NULL)
2781 			error = breakpointManager->Init();
2782 		else
2783 			error = B_NO_MEMORY;
2784 	}
2785 
2786 	// spawn the nub thread
2787 	thread_id nubThread = -1;
2788 	if (error == B_OK) {
2789 		snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task",
2790 			teamID);
2791 		nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer,
2792 			B_NORMAL_PRIORITY, NULL, teamID);
2793 		if (nubThread < 0)
2794 			error = nubThread;
2795 	}
2796 
2797 	// now adjust the debug info accordingly
2798 	if (error == B_OK) {
2799 		TeamLocker teamLocker(team);
2800 		state = disable_interrupts();
2801 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2802 
2803 		team->debug_info.breakpoint_manager = breakpointManager;
2804 		install_team_debugger_init_debug_infos(team, debuggerTeam,
2805 			debuggerPort, nubPort, nubThread, debuggerWriteLock,
2806 			causingThread);
2807 
2808 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2809 		restore_interrupts(state);
2810 	}
2811 
2812 	finish_debugger_change(team);
2813 
2814 	// if everything went fine, resume the nub thread, otherwise clean up
2815 	if (error == B_OK) {
2816 		resume_thread(nubThread);
2817 	} else {
2818 		// delete port and terminate thread
2819 		if (nubPort >= 0) {
2820 			set_port_owner(nubPort, B_CURRENT_TEAM);
2821 			delete_port(nubPort);
2822 		}
2823 		if (nubThread >= 0) {
2824 			int32 result;
2825 			wait_for_thread(nubThread, &result);
2826 		}
2827 
2828 		delete breakpointManager;
2829 	}
2830 
2831 	TRACE(("install_team_debugger() done2: %" B_PRId32 "\n",
2832 		(error == B_OK ? result : error)));
2833 	return (error == B_OK ? result : error);
2834 }
2835 
2836 
2837 static status_t
2838 ensure_debugger_installed()
2839 {
2840 	port_id port = install_team_debugger(B_CURRENT_TEAM, -1,
2841 		thread_get_current_thread_id(), true, true);
2842 	return port >= 0 ? B_OK : port;
2843 }
2844 
2845 
2846 // #pragma mark -
2847 
2848 
2849 void
2850 _user_debugger(const char *userMessage)
2851 {
2852 	// install the default debugger, if there is none yet
2853 	status_t error = ensure_debugger_installed();
2854 	if (error != B_OK) {
2855 		// time to commit suicide
2856 		char buffer[128];
2857 		ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer));
2858 		if (length >= 0) {
2859 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2860 				"`%s'\n", buffer);
2861 		} else {
2862 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2863 				"%p (%s)\n", userMessage, strerror(length));
2864 		}
2865 		_user_exit_team(1);
2866 	}
2867 
2868 	// prepare the message
2869 	debug_debugger_call message;
2870 	message.message = (void*)userMessage;
2871 
2872 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message,
2873 		sizeof(message), true);
2874 }
2875 
2876 
2877 int
2878 _user_disable_debugger(int state)
2879 {
2880 	Team *team = thread_get_current_thread()->team;
2881 
2882 	TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state,
2883 		team->id));
2884 
2885 	cpu_status cpuState = disable_interrupts();
2886 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2887 
2888 	int32 oldFlags;
2889 	if (state) {
2890 		oldFlags = atomic_or(&team->debug_info.flags,
2891 			B_TEAM_DEBUG_DEBUGGER_DISABLED);
2892 	} else {
2893 		oldFlags = atomic_and(&team->debug_info.flags,
2894 			~B_TEAM_DEBUG_DEBUGGER_DISABLED);
2895 	}
2896 
2897 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2898 	restore_interrupts(cpuState);
2899 
2900 	// TODO: Check, if the return value is really the old state.
2901 	return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED);
2902 }
2903 
2904 
2905 status_t
2906 _user_install_default_debugger(port_id debuggerPort)
2907 {
2908 	// Do not allow non-root processes to install a default debugger.
2909 	if (geteuid() != 0)
2910 		return B_PERMISSION_DENIED;
2911 
2912 	// if supplied, check whether the port is a valid port
2913 	if (debuggerPort >= 0) {
2914 		port_info portInfo;
2915 		status_t error = get_port_info(debuggerPort, &portInfo);
2916 		if (error != B_OK)
2917 			return error;
2918 
2919 		// the debugger team must not be the kernel team
2920 		if (portInfo.team == team_get_kernel_team_id())
2921 			return B_NOT_ALLOWED;
2922 	}
2923 
2924 	atomic_set(&sDefaultDebuggerPort, debuggerPort);
2925 
2926 	return B_OK;
2927 }
2928 
2929 
2930 port_id
2931 _user_install_team_debugger(team_id teamID, port_id debuggerPort)
2932 {
2933 	if (geteuid() != 0 && team_geteuid(teamID) != geteuid())
2934 		return B_PERMISSION_DENIED;
2935 
2936 	return install_team_debugger(teamID, debuggerPort, -1, false, false);
2937 }
2938 
2939 
2940 status_t
2941 _user_remove_team_debugger(team_id teamID)
2942 {
2943 	Team* team;
2944 	ConditionVariable debugChangeCondition;
2945 	debugChangeCondition.Init(NULL, "debug change condition");
2946 	status_t error = prepare_debugger_change(teamID, debugChangeCondition,
2947 		team);
2948 	if (error != B_OK)
2949 		return error;
2950 
2951 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2952 
2953 	thread_id nubThread = -1;
2954 	port_id nubPort = -1;
2955 
2956 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2957 		// there's a debugger installed
2958 		nubThread = team->debug_info.nub_thread;
2959 		nubPort = team->debug_info.nub_port;
2960 	} else {
2961 		// no debugger installed
2962 		error = B_BAD_VALUE;
2963 	}
2964 
2965 	debugInfoLocker.Unlock();
2966 
2967 	// Delete the nub port -- this will cause the nub thread to terminate and
2968 	// remove the debugger.
2969 	if (nubPort >= 0)
2970 		delete_port(nubPort);
2971 
2972 	finish_debugger_change(team);
2973 
2974 	// wait for the nub thread
2975 	if (nubThread >= 0)
2976 		wait_for_thread(nubThread, NULL);
2977 
2978 	return error;
2979 }
2980 
2981 
2982 status_t
2983 _user_debug_thread(thread_id threadID)
2984 {
2985 	TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n",
2986 		find_thread(NULL), threadID));
2987 
2988 	// get the thread
2989 	Thread* thread = Thread::GetAndLock(threadID);
2990 	if (thread == NULL)
2991 		return B_BAD_THREAD_ID;
2992 	BReference<Thread> threadReference(thread, true);
2993 	ThreadLocker threadLocker(thread, true);
2994 
2995 	// we can't debug the kernel team
2996 	if (thread->team == team_get_kernel_team())
2997 		return B_NOT_ALLOWED;
2998 
2999 	InterruptsLocker interruptsLocker;
3000 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
3001 
3002 	// If the thread is already dying, it's too late to debug it.
3003 	if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0)
3004 		return B_BAD_THREAD_ID;
3005 
3006 	// don't debug the nub thread
3007 	if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0)
3008 		return B_NOT_ALLOWED;
3009 
3010 	// already marked stopped or being told to stop?
3011 	if ((thread->debug_info.flags
3012 			& (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) {
3013 		return B_OK;
3014 	}
3015 
3016 	// set the flag that tells the thread to stop as soon as possible
3017 	atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
3018 
3019 	update_thread_user_debug_flag(thread);
3020 
3021 	// send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or
3022 	// continued)
3023 	threadDebugInfoLocker.Unlock();
3024 	ReadSpinLocker teamLocker(thread->team_lock);
3025 	SpinLocker locker(thread->team->signal_lock);
3026 
3027 	send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0);
3028 
3029 	return B_OK;
3030 }
3031 
3032 
3033 void
3034 _user_wait_for_debugger(void)
3035 {
3036 	debug_thread_debugged message = {};
3037 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message,
3038 		sizeof(message), false);
3039 }
3040 
3041 
3042 status_t
3043 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length,
3044 	bool watchpoint)
3045 {
3046 	// check the address and size
3047 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
3048 		return B_BAD_ADDRESS;
3049 	if (watchpoint && length < 0)
3050 		return B_BAD_VALUE;
3051 
3052 	// check whether a debugger is installed already
3053 	team_debug_info teamDebugInfo;
3054 	get_team_debug_info(teamDebugInfo);
3055 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
3056 		return B_BAD_VALUE;
3057 
3058 	// We can't help it, here's a small but relatively harmless race condition,
3059 	// since a debugger could be installed in the meantime. The worst case is
3060 	// that we install a break/watchpoint the debugger doesn't know about.
3061 
3062 	// set the break/watchpoint
3063 	status_t result;
3064 	if (watchpoint)
3065 		result = arch_set_watchpoint(address, type, length);
3066 	else
3067 		result = arch_set_breakpoint(address);
3068 
3069 	if (result == B_OK)
3070 		update_threads_breakpoints_flag();
3071 
3072 	return result;
3073 }
3074 
3075 
3076 status_t
3077 _user_clear_debugger_breakpoint(void *address, bool watchpoint)
3078 {
3079 	// check the address
3080 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
3081 		return B_BAD_ADDRESS;
3082 
3083 	// check whether a debugger is installed already
3084 	team_debug_info teamDebugInfo;
3085 	get_team_debug_info(teamDebugInfo);
3086 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
3087 		return B_BAD_VALUE;
3088 
3089 	// We can't help it, here's a small but relatively harmless race condition,
3090 	// since a debugger could be installed in the meantime. The worst case is
3091 	// that we clear a break/watchpoint the debugger has just installed.
3092 
3093 	// clear the break/watchpoint
3094 	status_t result;
3095 	if (watchpoint)
3096 		result = arch_clear_watchpoint(address);
3097 	else
3098 		result = arch_clear_breakpoint(address);
3099 
3100 	if (result == B_OK)
3101 		update_threads_breakpoints_flag();
3102 
3103 	return result;
3104 }
3105