xref: /haiku/src/system/kernel/debug/user_debugger.cpp (revision d157bf8522d5dc449602bec43f10ecdedc9943cd)
1 /*
2  * Copyright 2005-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 #include <signal.h>
7 #include <stdlib.h>
8 #include <stdio.h>
9 #include <string.h>
10 
11 #include <algorithm>
12 
13 #include <arch/debug.h>
14 #include <arch/user_debugger.h>
15 #include <cpu.h>
16 #include <debugger.h>
17 #include <kernel.h>
18 #include <KernelExport.h>
19 #include <kscheduler.h>
20 #include <ksignal.h>
21 #include <ksyscalls.h>
22 #include <port.h>
23 #include <sem.h>
24 #include <team.h>
25 #include <thread.h>
26 #include <thread_types.h>
27 #include <user_debugger.h>
28 #include <vm.h>
29 #include <vm_types.h>
30 
31 #include <AutoDeleter.h>
32 #include <util/AutoLock.h>
33 
34 #include "BreakpointManager.h"
35 
36 
37 //#define TRACE_USER_DEBUGGER
38 #ifdef TRACE_USER_DEBUGGER
39 #	define TRACE(x) dprintf x
40 #else
41 #	define TRACE(x) ;
42 #endif
43 
44 
45 // TODO: Since the introduction of team_debug_info::debugger_changed_condition
46 // there's some potential for simplifications. E.g. clear_team_debug_info() and
47 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus
48 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()).
49 
50 
51 static port_id sDefaultDebuggerPort = -1;
52 	// accessed atomically
53 
54 static timer sProfilingTimers[B_MAX_CPU_COUNT];
55 	// a profiling timer for each CPU -- used when a profiled thread is running
56 	// on that CPU
57 
58 
59 static void schedule_profiling_timer(struct thread* thread,
60 	bigtime_t interval);
61 static int32 profiling_event(timer* unused);
62 static status_t ensure_debugger_installed();
63 static void get_team_debug_info(team_debug_info &teamDebugInfo);
64 
65 
66 static status_t
67 kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
68 	size_t bufferSize)
69 {
70 	return write_port_etc(port, code, buffer, bufferSize,
71 		B_KILL_CAN_INTERRUPT, 0);
72 }
73 
74 
75 static status_t
76 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
77 	bool dontWait)
78 {
79 	TRACE(("debugger_write(): thread: %ld, team %ld, port: %ld, code: %lx, message: %p, "
80 		"size: %lu, dontWait: %d\n", thread_get_current_thread()->id,
81 		thread_get_current_thread()->team->id, port, code, buffer, bufferSize,
82 		dontWait));
83 
84 	status_t error = B_OK;
85 
86 	// get the team debug info
87 	team_debug_info teamDebugInfo;
88 	get_team_debug_info(teamDebugInfo);
89 	sem_id writeLock = teamDebugInfo.debugger_write_lock;
90 
91 	// get the write lock
92 	TRACE(("debugger_write(): acquiring write lock...\n"));
93 	error = acquire_sem_etc(writeLock, 1,
94 		dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
95 	if (error != B_OK) {
96 		TRACE(("debugger_write() done1: %lx\n", error));
97 		return error;
98 	}
99 
100 	// re-get the team debug info
101 	get_team_debug_info(teamDebugInfo);
102 
103 	if (teamDebugInfo.debugger_port != port
104 		|| (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) {
105 		// The debugger has changed in the meantime or we are about to be
106 		// handed over to a new debugger. In either case we don't send the
107 		// message.
108 		TRACE(("debugger_write(): %s\n",
109 			(teamDebugInfo.debugger_port != port ? "debugger port changed"
110 				: "handover flag set")));
111 	} else {
112 		TRACE(("debugger_write(): writing to port...\n"));
113 
114 		error = write_port_etc(port, code, buffer, bufferSize,
115 			dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
116 	}
117 
118 	// release the write lock
119 	release_sem(writeLock);
120 
121 	TRACE(("debugger_write() done: %lx\n", error));
122 
123 	return error;
124 }
125 
126 
127 /*!	Updates the thread::flags field according to what user debugger flags are
128 	set for the thread.
129 	Interrupts must be disabled and the thread lock must be held.
130 */
131 static void
132 update_thread_user_debug_flag(struct thread* thread)
133 {
134 	if (atomic_get(&thread->debug_info.flags)
135 			& (B_THREAD_DEBUG_STOP | B_THREAD_DEBUG_SINGLE_STEP)) {
136 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD);
137 	} else
138 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD);
139 }
140 
141 
142 /*!	Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
143 	given thread.
144 	Interrupts must be disabled and the team lock must be held.
145 */
146 static void
147 update_thread_breakpoints_flag(struct thread* thread)
148 {
149 	struct team* team = thread->team;
150 
151 	if (arch_has_breakpoints(&team->debug_info.arch_info))
152 		atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
153 	else
154 		atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
155 }
156 
157 
158 /*!	Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
159 	threads of the current team.
160 	Interrupts must be disabled and the team lock must be held.
161 */
162 static void
163 update_threads_breakpoints_flag()
164 {
165 	InterruptsSpinLocker _(gTeamSpinlock);
166 
167 	struct team* team = thread_get_current_thread()->team;
168 	struct thread* thread = team->thread_list;
169 
170 	if (arch_has_breakpoints(&team->debug_info.arch_info)) {
171 		for (; thread != NULL; thread = thread->team_next)
172 			atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
173 	} else {
174 		for (; thread != NULL; thread = thread->team_next)
175 			atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
176 	}
177 }
178 
179 
180 /*!	Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
181 	given thread.
182 	Interrupts must be disabled and the team lock must be held.
183 */
184 static void
185 update_thread_debugger_installed_flag(struct thread* thread)
186 {
187 	struct team* team = thread->team;
188 
189 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
190 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
191 	else
192 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
193 }
194 
195 
196 /*!	Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
197 	threads of the given team.
198 	Interrupts must be disabled and the team lock must be held.
199 */
200 static void
201 update_threads_debugger_installed_flag(struct team* team)
202 {
203 	struct thread* thread = team->thread_list;
204 
205 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
206 		for (; thread != NULL; thread = thread->team_next)
207 			atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
208 	} else {
209 		for (; thread != NULL; thread = thread->team_next)
210 			atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
211 	}
212 }
213 
214 
215 /**
216  *	For the first initialization the function must be called with \a initLock
217  *	set to \c true. If it would be possible that another thread accesses the
218  *	structure at the same time, `lock' must be held when calling the function.
219  */
220 void
221 clear_team_debug_info(struct team_debug_info *info, bool initLock)
222 {
223 	if (info) {
224 		arch_clear_team_debug_info(&info->arch_info);
225 		atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS);
226 		info->debugger_team = -1;
227 		info->debugger_port = -1;
228 		info->nub_thread = -1;
229 		info->nub_port = -1;
230 		info->debugger_write_lock = -1;
231 		info->causing_thread = -1;
232 		info->image_event = 0;
233 		info->breakpoint_manager = NULL;
234 
235 		if (initLock) {
236 			B_INITIALIZE_SPINLOCK(&info->lock);
237 			info->debugger_changed_condition = NULL;
238 		}
239 	}
240 }
241 
242 /**
243  *  `lock' must not be held nor may interrupts be disabled.
244  *  \a info must not be a member of a team struct (or the team struct must no
245  *  longer be accessible, i.e. the team should already be removed).
246  *
247  *	In case the team is still accessible, the procedure is:
248  *	1. get `lock'
249  *	2. copy the team debug info on stack
250  *	3. call clear_team_debug_info() on the team debug info
251  *	4. release `lock'
252  *	5. call destroy_team_debug_info() on the copied team debug info
253  */
254 static void
255 destroy_team_debug_info(struct team_debug_info *info)
256 {
257 	if (info) {
258 		arch_destroy_team_debug_info(&info->arch_info);
259 
260 		// delete the breakpoint manager
261 		delete info->breakpoint_manager ;
262 		info->breakpoint_manager = NULL;
263 
264 		// delete the debugger port write lock
265 		if (info->debugger_write_lock >= 0) {
266 			delete_sem(info->debugger_write_lock);
267 			info->debugger_write_lock = -1;
268 		}
269 
270 		// delete the nub port
271 		if (info->nub_port >= 0) {
272 			set_port_owner(info->nub_port, B_CURRENT_TEAM);
273 			delete_port(info->nub_port);
274 			info->nub_port = -1;
275 		}
276 
277 		// wait for the nub thread
278 		if (info->nub_thread >= 0) {
279 			if (info->nub_thread != thread_get_current_thread()->id) {
280 				int32 result;
281 				wait_for_thread(info->nub_thread, &result);
282 			}
283 
284 			info->nub_thread = -1;
285 		}
286 
287 		atomic_set(&info->flags, 0);
288 		info->debugger_team = -1;
289 		info->debugger_port = -1;
290 		info->causing_thread = -1;
291 		info->image_event = -1;
292 	}
293 }
294 
295 
296 void
297 init_thread_debug_info(struct thread_debug_info *info)
298 {
299 	if (info) {
300 		arch_clear_thread_debug_info(&info->arch_info);
301 		info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS;
302 		info->debug_port = -1;
303 		info->ignore_signals = 0;
304 		info->ignore_signals_once = 0;
305 		info->profile.sample_area = -1;
306 		info->profile.samples = NULL;
307 		info->profile.buffer_full = false;
308 		info->profile.installed_timer = NULL;
309 	}
310 }
311 
312 
313 /*!	Invoked with thread lock being held.
314 */
315 void
316 clear_thread_debug_info(struct thread_debug_info *info, bool dying)
317 {
318 	if (info) {
319 		// cancel profiling timer
320 		if (info->profile.installed_timer != NULL) {
321 			cancel_timer(info->profile.installed_timer);
322 			info->profile.installed_timer = NULL;
323 		}
324 
325 		arch_clear_thread_debug_info(&info->arch_info);
326 		atomic_set(&info->flags,
327 			B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0));
328 		info->debug_port = -1;
329 		info->ignore_signals = 0;
330 		info->ignore_signals_once = 0;
331 		info->profile.sample_area = -1;
332 		info->profile.samples = NULL;
333 		info->profile.buffer_full = false;
334 	}
335 }
336 
337 
338 void
339 destroy_thread_debug_info(struct thread_debug_info *info)
340 {
341 	if (info) {
342 		area_id sampleArea = info->profile.sample_area;
343 		if (sampleArea >= 0) {
344 			area_info areaInfo;
345 			if (get_area_info(sampleArea, &areaInfo) == B_OK) {
346 				unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
347 				delete_area(sampleArea);
348 			}
349 		}
350 
351 		arch_destroy_thread_debug_info(&info->arch_info);
352 
353 		if (info->debug_port >= 0) {
354 			delete_port(info->debug_port);
355 			info->debug_port = -1;
356 		}
357 
358 		info->ignore_signals = 0;
359 		info->ignore_signals_once = 0;
360 
361 		atomic_set(&info->flags, 0);
362 	}
363 }
364 
365 
366 static status_t
367 prepare_debugger_change(team_id teamID, ConditionVariable& condition,
368 	struct team*& team)
369 {
370 	// We look up the team by ID, even in case of the current team, so we can be
371 	// sure, that the team is not already dying.
372 	if (teamID == B_CURRENT_TEAM)
373 		teamID = thread_get_current_thread()->team->id;
374 
375 	while (true) {
376 		// get the team
377 		InterruptsSpinLocker teamLocker(gTeamSpinlock);
378 
379 		team = team_get_team_struct_locked(teamID);
380 		if (team == NULL)
381 			return B_BAD_TEAM_ID;
382 
383 		// don't allow messing with the kernel team
384 		if (team == team_get_kernel_team())
385 			return B_NOT_ALLOWED;
386 
387 		// check whether the condition is already set
388 		SpinLocker threadLocker(gThreadSpinlock);
389 		SpinLocker debugInfoLocker(team->debug_info.lock);
390 
391 		if (team->debug_info.debugger_changed_condition == NULL) {
392 			// nobody there yet -- set our condition variable and be done
393 			team->debug_info.debugger_changed_condition = &condition;
394 			return B_OK;
395 		}
396 
397 		// we'll have to wait
398 		ConditionVariableEntry entry;
399 		team->debug_info.debugger_changed_condition->Add(&entry);
400 
401 		debugInfoLocker.Unlock();
402 		threadLocker.Unlock();
403 		teamLocker.Unlock();
404 
405 		entry.Wait();
406 	}
407 }
408 
409 
410 static void
411 prepare_debugger_change(struct team* team, ConditionVariable& condition)
412 {
413 	while (true) {
414 		// check whether the condition is already set
415 		InterruptsSpinLocker threadLocker(gThreadSpinlock);
416 		SpinLocker debugInfoLocker(team->debug_info.lock);
417 
418 		if (team->debug_info.debugger_changed_condition == NULL) {
419 			// nobody there yet -- set our condition variable and be done
420 			team->debug_info.debugger_changed_condition = &condition;
421 			return;
422 		}
423 
424 		// we'll have to wait
425 		ConditionVariableEntry entry;
426 		team->debug_info.debugger_changed_condition->Add(&entry);
427 
428 		debugInfoLocker.Unlock();
429 		threadLocker.Unlock();
430 
431 		entry.Wait();
432 	}
433 }
434 
435 
436 static void
437 finish_debugger_change(struct team* team)
438 {
439 	// unset our condition variable and notify all threads waiting on it
440 	InterruptsSpinLocker threadLocker(gThreadSpinlock);
441 	SpinLocker debugInfoLocker(team->debug_info.lock);
442 
443 	ConditionVariable* condition = team->debug_info.debugger_changed_condition;
444 	team->debug_info.debugger_changed_condition = NULL;
445 
446 	condition->NotifyAll(true);
447 }
448 
449 
450 void
451 user_debug_prepare_for_exec()
452 {
453 	struct thread *thread = thread_get_current_thread();
454 	struct team *team = thread->team;
455 
456 	// If a debugger is installed for the team and the thread debug stuff
457 	// initialized, change the ownership of the debug port for the thread
458 	// to the kernel team, since exec_team() deletes all ports owned by this
459 	// team. We change the ownership back later.
460 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
461 		// get the port
462 		port_id debugPort = -1;
463 
464 		cpu_status state = disable_interrupts();
465 		GRAB_THREAD_LOCK();
466 
467 		if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
468 			debugPort = thread->debug_info.debug_port;
469 
470 		RELEASE_THREAD_LOCK();
471 		restore_interrupts(state);
472 
473 		// set the new port ownership
474 		if (debugPort >= 0)
475 			set_port_owner(debugPort, team_get_kernel_team_id());
476 	}
477 }
478 
479 
480 void
481 user_debug_finish_after_exec()
482 {
483 	struct thread *thread = thread_get_current_thread();
484 	struct team *team = thread->team;
485 
486 	// If a debugger is installed for the team and the thread debug stuff
487 	// initialized for this thread, change the ownership of its debug port
488 	// back to this team.
489 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
490 		// get the port
491 		port_id debugPort = -1;
492 
493 		cpu_status state = disable_interrupts();
494 		GRAB_THREAD_LOCK();
495 
496 		if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
497 			debugPort = thread->debug_info.debug_port;
498 
499 		RELEASE_THREAD_LOCK();
500 		restore_interrupts(state);
501 
502 		// set the new port ownership
503 		if (debugPort >= 0)
504 			set_port_owner(debugPort, team->id);
505 	}
506 }
507 
508 
509 void
510 init_user_debug()
511 {
512 	#ifdef ARCH_INIT_USER_DEBUG
513 		ARCH_INIT_USER_DEBUG();
514 	#endif
515 }
516 
517 
518 static void
519 get_team_debug_info(team_debug_info &teamDebugInfo)
520 {
521 	struct thread *thread = thread_get_current_thread();
522 
523 	cpu_status state = disable_interrupts();
524 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
525 
526 	memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info));
527 
528 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
529 	restore_interrupts(state);
530 }
531 
532 
533 static status_t
534 thread_hit_debug_event_internal(debug_debugger_message event,
535 	const void *message, int32 size, bool requireDebugger, bool &restart)
536 {
537 	restart = false;
538 	struct thread *thread = thread_get_current_thread();
539 
540 	TRACE(("thread_hit_debug_event(): thread: %ld, event: %lu, message: %p, "
541 		"size: %ld\n", thread->id, (uint32)event, message, size));
542 
543 	// check, if there's a debug port already
544 	bool setPort = !(atomic_get(&thread->debug_info.flags)
545 		& B_THREAD_DEBUG_INITIALIZED);
546 
547 	// create a port, if there is none yet
548 	port_id port = -1;
549 	if (setPort) {
550 		char nameBuffer[128];
551 		snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %ld",
552 			thread->id);
553 
554 		port = create_port(1, nameBuffer);
555 		if (port < 0) {
556 			dprintf("thread_hit_debug_event(): Failed to create debug port: "
557 				"%s\n", strerror(port));
558 			return port;
559 		}
560 	}
561 
562 	// check the debug info structures once more: get the debugger port, set
563 	// the thread's debug port, and update the thread's debug flags
564 	port_id deletePort = port;
565 	port_id debuggerPort = -1;
566 	port_id nubPort = -1;
567 	status_t error = B_OK;
568 	cpu_status state = disable_interrupts();
569 	GRAB_THREAD_LOCK();
570 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
571 
572 	uint32 threadFlags = thread->debug_info.flags;
573 	threadFlags &= ~B_THREAD_DEBUG_STOP;
574 	bool debuggerInstalled
575 		= (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED);
576 	if (thread->id == thread->team->debug_info.nub_thread) {
577 		// Ugh, we're the nub thread. We shouldn't be here.
578 		TRACE(("thread_hit_debug_event(): Misdirected nub thread: %ld\n",
579 			thread->id));
580 
581 		error = B_ERROR;
582 
583 	} else if (debuggerInstalled || !requireDebugger) {
584 		if (debuggerInstalled) {
585 			debuggerPort = thread->team->debug_info.debugger_port;
586 			nubPort = thread->team->debug_info.nub_port;
587 		}
588 
589 		if (setPort) {
590 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
591 				// someone created a port for us (the port we've created will
592 				// be deleted below)
593 				port = thread->debug_info.debug_port;
594 			} else {
595 				thread->debug_info.debug_port = port;
596 				deletePort = -1;	// keep the port
597 				threadFlags |= B_THREAD_DEBUG_INITIALIZED;
598 			}
599 		} else {
600 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
601 				port = thread->debug_info.debug_port;
602 			} else {
603 				// someone deleted our port
604 				error = B_ERROR;
605 			}
606 		}
607 	} else
608 		error = B_ERROR;
609 
610 	// update the flags
611 	if (error == B_OK)
612 		threadFlags |= B_THREAD_DEBUG_STOPPED;
613 	atomic_set(&thread->debug_info.flags, threadFlags);
614 
615 	update_thread_user_debug_flag(thread);
616 
617 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
618 	RELEASE_THREAD_LOCK();
619 	restore_interrupts(state);
620 
621 	// delete the superfluous port
622 	if (deletePort >= 0)
623 		delete_port(deletePort);
624 
625 	if (error != B_OK) {
626 		TRACE(("thread_hit_debug_event() error: thread: %ld, error: %lx\n",
627 			thread->id, error));
628 		return error;
629 	}
630 
631 	// send a message to the debugger port
632 	if (debuggerInstalled) {
633 		// update the message's origin info first
634 		debug_origin *origin = (debug_origin *)message;
635 		origin->thread = thread->id;
636 		origin->team = thread->team->id;
637 		origin->nub_port = nubPort;
638 
639 		TRACE(("thread_hit_debug_event(): thread: %ld, sending message to "
640 			"debugger port %ld\n", thread->id, debuggerPort));
641 
642 		error = debugger_write(debuggerPort, event, message, size, false);
643 	}
644 
645 	status_t result = B_THREAD_DEBUG_HANDLE_EVENT;
646 	bool singleStep = false;
647 
648 	if (error == B_OK) {
649 		bool done = false;
650 		while (!done) {
651 			// read a command from the debug port
652 			int32 command;
653 			debugged_thread_message_data commandMessage;
654 			ssize_t commandMessageSize = read_port_etc(port, &command,
655 				&commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT,
656 				0);
657 
658 			if (commandMessageSize < 0) {
659 				error = commandMessageSize;
660 				TRACE(("thread_hit_debug_event(): thread: %ld, failed "
661 					"to receive message from port %ld: %lx\n",
662 					thread->id, port, error));
663 				break;
664 			}
665 
666 			switch (command) {
667 				case B_DEBUGGED_THREAD_MESSAGE_CONTINUE:
668 					TRACE(("thread_hit_debug_event(): thread: %ld: "
669 						"B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n",
670 						thread->id));
671 					result = commandMessage.continue_thread.handle_event;
672 
673 					singleStep = commandMessage.continue_thread.single_step;
674 					done = true;
675 					break;
676 
677 				case B_DEBUGGED_THREAD_SET_CPU_STATE:
678 				{
679 					TRACE(("thread_hit_debug_event(): thread: %ld: "
680 						"B_DEBUGGED_THREAD_SET_CPU_STATE\n",
681 						thread->id));
682 					arch_set_debug_cpu_state(
683 						&commandMessage.set_cpu_state.cpu_state);
684 
685 					break;
686 				}
687 
688 				case B_DEBUGGED_THREAD_GET_CPU_STATE:
689 				{
690 					port_id replyPort = commandMessage.get_cpu_state.reply_port;
691 
692 					// prepare the message
693 					debug_nub_get_cpu_state_reply replyMessage;
694 					replyMessage.error = B_OK;
695 					replyMessage.message = event;
696 					arch_get_debug_cpu_state(&replyMessage.cpu_state);
697 
698 					// send it
699 					error = kill_interruptable_write_port(replyPort, event,
700 						&replyMessage, sizeof(replyMessage));
701 
702 					break;
703 				}
704 
705 				case B_DEBUGGED_THREAD_DEBUGGER_CHANGED:
706 				{
707 					// Check, if the debugger really changed, i.e. is different
708 					// than the one we know.
709 					team_debug_info teamDebugInfo;
710 					get_team_debug_info(teamDebugInfo);
711 
712 					if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
713 						if (!debuggerInstalled
714 							|| teamDebugInfo.debugger_port != debuggerPort) {
715 							// debugger was installed or has changed: restart
716 							// this function
717 							restart = true;
718 							done = true;
719 						}
720 					} else {
721 						if (debuggerInstalled) {
722 							// debugger is gone: continue the thread normally
723 							done = true;
724 						}
725 					}
726 
727 					break;
728 				}
729 			}
730 		}
731 	} else {
732 		TRACE(("thread_hit_debug_event(): thread: %ld, failed to send "
733 			"message to debugger port %ld: %lx\n", thread->id,
734 			debuggerPort, error));
735 	}
736 
737 	// update the thread debug info
738 	bool destroyThreadInfo = false;
739 	thread_debug_info threadDebugInfo;
740 
741 	state = disable_interrupts();
742 	GRAB_THREAD_LOCK();
743 
744 	// check, if the team is still being debugged
745 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
746 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
747 		// update the single-step flag
748 		if (singleStep) {
749 			atomic_or(&thread->debug_info.flags,
750 				B_THREAD_DEBUG_SINGLE_STEP);
751 		} else {
752 			atomic_and(&thread->debug_info.flags,
753 				~B_THREAD_DEBUG_SINGLE_STEP);
754 		}
755 
756 		// unset the "stopped" state
757 		atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED);
758 
759 		update_thread_user_debug_flag(thread);
760 
761 	} else {
762 		// the debugger is gone: cleanup our info completely
763 		threadDebugInfo = thread->debug_info;
764 		clear_thread_debug_info(&thread->debug_info, false);
765 		destroyThreadInfo = true;
766 	}
767 
768 	RELEASE_THREAD_LOCK();
769 	restore_interrupts(state);
770 
771 	// enable/disable single stepping
772 	arch_update_thread_single_step();
773 
774 	if (destroyThreadInfo)
775 		destroy_thread_debug_info(&threadDebugInfo);
776 
777 	return (error == B_OK ? result : error);
778 }
779 
780 
781 static status_t
782 thread_hit_debug_event(debug_debugger_message event, const void *message,
783 	int32 size, bool requireDebugger)
784 {
785 	status_t result;
786 	bool restart;
787 	do {
788 		restart = false;
789 		result = thread_hit_debug_event_internal(event, message, size,
790 			requireDebugger, restart);
791 	} while (result >= 0 && restart);
792 
793 	// Prepare to continue -- we install a debugger change condition, so no-one
794 	// will change the debugger while we're playing with the breakpoint manager.
795 	// TODO: Maybe better use ref-counting and a flag in the breakpoint manager.
796 	struct team* team = thread_get_current_thread()->team;
797 	ConditionVariable debugChangeCondition;
798 	prepare_debugger_change(team, debugChangeCondition);
799 
800 	if (team->debug_info.breakpoint_manager != NULL) {
801 		bool isSyscall;
802 		void* pc = arch_debug_get_interrupt_pc(&isSyscall);
803 		if (pc != NULL && !isSyscall)
804 			team->debug_info.breakpoint_manager->PrepareToContinue(pc);
805 	}
806 
807 	finish_debugger_change(team);
808 
809 	return result;
810 }
811 
812 
813 static status_t
814 thread_hit_serious_debug_event(debug_debugger_message event,
815 	const void *message, int32 messageSize)
816 {
817 	// ensure that a debugger is installed for this team
818 	status_t error = ensure_debugger_installed();
819 	if (error != B_OK) {
820 		struct thread *thread = thread_get_current_thread();
821 		dprintf("thread_hit_serious_debug_event(): Failed to install debugger: "
822 			"thread: %ld: %s\n", thread->id, strerror(error));
823 		return true;
824 	}
825 
826 	// enter the debug loop
827 	return thread_hit_debug_event(event, message, messageSize, true);
828 }
829 
830 
831 void
832 user_debug_pre_syscall(uint32 syscall, void *args)
833 {
834 	// check whether a debugger is installed
835 	struct thread *thread = thread_get_current_thread();
836 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
837 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
838 		return;
839 
840 	// check whether pre-syscall tracing is enabled for team or thread
841 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
842 	if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL)
843 			&& !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) {
844 		return;
845 	}
846 
847 	// prepare the message
848 	debug_pre_syscall message;
849 	message.syscall = syscall;
850 
851 	// copy the syscall args
852 	if (syscall < (uint32)kSyscallCount) {
853 		if (kSyscallInfos[syscall].parameter_size > 0)
854 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
855 	}
856 
857 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message,
858 		sizeof(message), true);
859 }
860 
861 
862 void
863 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue,
864 	bigtime_t startTime)
865 {
866 	// check whether a debugger is installed
867 	struct thread *thread = thread_get_current_thread();
868 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
869 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
870 		return;
871 
872 	// check whether post-syscall tracing is enabled for team or thread
873 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
874 	if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL)
875 			&& !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) {
876 		return;
877 	}
878 
879 	// prepare the message
880 	debug_post_syscall message;
881 	message.start_time = startTime;
882 	message.end_time = system_time();
883 	message.return_value = returnValue;
884 	message.syscall = syscall;
885 
886 	// copy the syscall args
887 	if (syscall < (uint32)kSyscallCount) {
888 		if (kSyscallInfos[syscall].parameter_size > 0)
889 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
890 	}
891 
892 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message,
893 		sizeof(message), true);
894 }
895 
896 
897 /**	\brief To be called when an unhandled processor exception (error/fault)
898  *		   occurred.
899  *	\param exception The debug_why_stopped value identifying the kind of fault.
900  *	\param singal The signal corresponding to the exception.
901  *	\return \c true, if the caller shall continue normally, i.e. usually send
902  *			a deadly signal. \c false, if the debugger insists to continue the
903  *			program (e.g. because it has solved the removed the cause of the
904  *			problem).
905  */
906 bool
907 user_debug_exception_occurred(debug_exception_type exception, int signal)
908 {
909 	// First check whether there's a signal handler installed for the signal.
910 	// If so, we don't want to install a debugger for the team. We always send
911 	// the signal instead. An already installed debugger will be notified, if
912 	// it has requested notifications of signal.
913 	struct sigaction signalAction;
914 	if (sigaction(signal, NULL, &signalAction) == B_OK
915 		&& signalAction.sa_handler != SIG_DFL) {
916 		return true;
917 	}
918 
919 	// prepare the message
920 	debug_exception_occurred message;
921 	message.exception = exception;
922 	message.signal = signal;
923 
924 	status_t result = thread_hit_serious_debug_event(
925 		B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message));
926 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
927 }
928 
929 
930 bool
931 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly)
932 {
933 	// check, if a debugger is installed and is interested in signals
934 	struct thread *thread = thread_get_current_thread();
935 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
936 	if (~teamDebugFlags
937 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) {
938 		return true;
939 	}
940 
941 	// prepare the message
942 	debug_signal_received message;
943 	message.signal = signal;
944 	message.handler = *handler;
945 	message.deadly = deadly;
946 
947 	status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED,
948 		&message, sizeof(message), true);
949 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
950 }
951 
952 
953 void
954 user_debug_stop_thread()
955 {
956 	// prepare the message
957 	debug_thread_debugged message;
958 
959 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message,
960 		sizeof(message));
961 }
962 
963 
964 void
965 user_debug_team_created(team_id teamID)
966 {
967 	// check, if a debugger is installed and is interested in team creation
968 	// events
969 	struct thread *thread = thread_get_current_thread();
970 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
971 	if (~teamDebugFlags
972 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
973 		return;
974 	}
975 
976 	// prepare the message
977 	debug_team_created message;
978 	message.new_team = teamID;
979 
980 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message,
981 		sizeof(message), true);
982 }
983 
984 
985 void
986 user_debug_team_deleted(team_id teamID, port_id debuggerPort)
987 {
988 	if (debuggerPort >= 0) {
989 		TRACE(("user_debug_team_deleted(team: %ld, debugger port: %ld)\n",
990 			teamID, debuggerPort));
991 
992 		debug_team_deleted message;
993 		message.origin.thread = -1;
994 		message.origin.team = teamID;
995 		message.origin.nub_port = -1;
996 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message,
997 			sizeof(message), B_RELATIVE_TIMEOUT, 0);
998 	}
999 }
1000 
1001 
1002 void
1003 user_debug_team_exec()
1004 {
1005 	// check, if a debugger is installed and is interested in team creation
1006 	// events
1007 	struct thread *thread = thread_get_current_thread();
1008 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1009 	if (~teamDebugFlags
1010 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
1011 		return;
1012 	}
1013 
1014 	// prepare the message
1015 	debug_team_exec message;
1016 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1017 		+ 1;
1018 
1019 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message,
1020 		sizeof(message), true);
1021 }
1022 
1023 
1024 void
1025 user_debug_update_new_thread_flags(thread_id threadID)
1026 {
1027 	// Update thread::flags of the thread.
1028 
1029 	InterruptsLocker interruptsLocker;
1030 
1031 	SpinLocker teamLocker(gTeamSpinlock);
1032 	SpinLocker threadLocker(gThreadSpinlock);
1033 
1034 	struct thread *thread = thread_get_thread_struct_locked(threadID);
1035 	if (!thread)
1036 		return;
1037 
1038 	update_thread_user_debug_flag(thread);
1039 	update_thread_breakpoints_flag(thread);
1040 	update_thread_debugger_installed_flag(thread);
1041 }
1042 
1043 
1044 void
1045 user_debug_thread_created(thread_id threadID)
1046 {
1047 	// check, if a debugger is installed and is interested in thread events
1048 	struct thread *thread = thread_get_current_thread();
1049 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1050 	if (~teamDebugFlags
1051 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1052 		return;
1053 	}
1054 
1055 	// prepare the message
1056 	debug_thread_created message;
1057 	message.new_thread = threadID;
1058 
1059 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message,
1060 		sizeof(message), true);
1061 }
1062 
1063 
1064 void
1065 user_debug_thread_deleted(team_id teamID, thread_id threadID)
1066 {
1067 	// Things are a bit complicated here, since this thread no longer belongs to
1068 	// the debugged team (but to the kernel). So we can't use debugger_write().
1069 
1070 	// get the team debug flags and debugger port
1071 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1072 
1073 	struct team *team = team_get_team_struct_locked(teamID);
1074 	if (team == NULL)
1075 		return;
1076 
1077 	SpinLocker debugInfoLocker(team->debug_info.lock);
1078 
1079 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1080 	port_id debuggerPort = team->debug_info.debugger_port;
1081 	sem_id writeLock = team->debug_info.debugger_write_lock;
1082 
1083 	debugInfoLocker.Unlock();
1084 	teamLocker.Unlock();
1085 
1086 	// check, if a debugger is installed and is interested in thread events
1087 	if (~teamDebugFlags
1088 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1089 		return;
1090 	}
1091 
1092 	// acquire the debugger write lock
1093 	status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0);
1094 	if (error != B_OK)
1095 		return;
1096 
1097 	// re-get the team debug info -- we need to check whether anything changed
1098 	teamLocker.Lock();
1099 
1100 	team = team_get_team_struct_locked(teamID);
1101 	if (team == NULL)
1102 		return;
1103 
1104 	debugInfoLocker.Lock();
1105 
1106 	teamDebugFlags = atomic_get(&team->debug_info.flags);
1107 	port_id newDebuggerPort = team->debug_info.debugger_port;
1108 
1109 	debugInfoLocker.Unlock();
1110 	teamLocker.Unlock();
1111 
1112 	// Send the message only if the debugger hasn't changed in the meantime or
1113 	// the team is about to be handed over.
1114 	if (newDebuggerPort == debuggerPort
1115 		|| (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) {
1116 		debug_thread_deleted message;
1117 		message.origin.thread = threadID;
1118 		message.origin.team = teamID;
1119 		message.origin.nub_port = -1;
1120 
1121 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED,
1122 			&message, sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1123 	}
1124 
1125 	// release the debugger write lock
1126 	release_sem(writeLock);
1127 }
1128 
1129 
1130 void
1131 user_debug_thread_exiting(struct thread* thread)
1132 {
1133 	InterruptsLocker interruptsLocker;
1134 	SpinLocker teamLocker(gTeamSpinlock);
1135 
1136 	struct team* team = thread->team;
1137 
1138 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1139 
1140 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1141 	port_id debuggerPort = team->debug_info.debugger_port;
1142 
1143 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1144 
1145 	teamLocker.Unlock();
1146 
1147 	// check, if a debugger is installed
1148 	if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0
1149 		|| debuggerPort < 0) {
1150 		return;
1151 	}
1152 
1153 	// detach the profile info and mark the thread dying
1154 	SpinLocker threadLocker(gThreadSpinlock);
1155 
1156 	thread_debug_info& threadDebugInfo = thread->debug_info;
1157 	if (threadDebugInfo.profile.samples == NULL)
1158 		return;
1159 
1160 	area_id sampleArea = threadDebugInfo.profile.sample_area;
1161 	int32 sampleCount = threadDebugInfo.profile.sample_count;
1162 	int32 droppedTicks = threadDebugInfo.profile.dropped_ticks;
1163 	int32 stackDepth = threadDebugInfo.profile.stack_depth;
1164 	bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth;
1165 	int32 imageEvent = threadDebugInfo.profile.image_event;
1166 	threadDebugInfo.profile.sample_area = -1;
1167 	threadDebugInfo.profile.samples = NULL;
1168 	threadDebugInfo.profile.buffer_full = false;
1169 
1170 	atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
1171 
1172 	threadLocker.Unlock();
1173 	interruptsLocker.Unlock();
1174 
1175 	// notify the debugger
1176 	debug_profiler_update message;
1177 	message.origin.thread = thread->id;
1178 	message.origin.team = thread->team->id;
1179 	message.origin.nub_port = -1;	// asynchronous message
1180 	message.sample_count = sampleCount;
1181 	message.dropped_ticks = droppedTicks;
1182 	message.stack_depth = stackDepth;
1183 	message.variable_stack_depth = variableStackDepth;
1184 	message.image_event = imageEvent;
1185 	message.stopped = true;
1186 	debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE,
1187 		&message, sizeof(message), false);
1188 
1189 	if (sampleArea >= 0) {
1190 		area_info areaInfo;
1191 		if (get_area_info(sampleArea, &areaInfo) == B_OK) {
1192 			unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
1193 			delete_area(sampleArea);
1194 		}
1195 	}
1196 }
1197 
1198 
1199 void
1200 user_debug_image_created(const image_info *imageInfo)
1201 {
1202 	// check, if a debugger is installed and is interested in image events
1203 	struct thread *thread = thread_get_current_thread();
1204 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1205 	if (~teamDebugFlags
1206 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1207 		return;
1208 	}
1209 
1210 	// prepare the message
1211 	debug_image_created message;
1212 	memcpy(&message.info, imageInfo, sizeof(image_info));
1213 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1214 		+ 1;
1215 
1216 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
1217 		sizeof(message), true);
1218 }
1219 
1220 
1221 void
1222 user_debug_image_deleted(const image_info *imageInfo)
1223 {
1224 	// check, if a debugger is installed and is interested in image events
1225 	struct thread *thread = thread_get_current_thread();
1226 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1227 	if (~teamDebugFlags
1228 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1229 		return;
1230 	}
1231 
1232 	// prepare the message
1233 	debug_image_deleted message;
1234 	memcpy(&message.info, imageInfo, sizeof(image_info));
1235 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1236 		+ 1;
1237 
1238 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
1239 		sizeof(message), true);
1240 }
1241 
1242 
1243 void
1244 user_debug_breakpoint_hit(bool software)
1245 {
1246 	// prepare the message
1247 	debug_breakpoint_hit message;
1248 	arch_get_debug_cpu_state(&message.cpu_state);
1249 
1250 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message,
1251 		sizeof(message));
1252 }
1253 
1254 
1255 void
1256 user_debug_watchpoint_hit()
1257 {
1258 	// prepare the message
1259 	debug_watchpoint_hit message;
1260 	arch_get_debug_cpu_state(&message.cpu_state);
1261 
1262 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message,
1263 		sizeof(message));
1264 }
1265 
1266 
1267 void
1268 user_debug_single_stepped()
1269 {
1270 	// prepare the message
1271 	debug_single_step message;
1272 	arch_get_debug_cpu_state(&message.cpu_state);
1273 
1274 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message,
1275 		sizeof(message));
1276 }
1277 
1278 
1279 static void
1280 schedule_profiling_timer(struct thread* thread, bigtime_t interval)
1281 {
1282 	struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num];
1283 	thread->debug_info.profile.installed_timer = timer;
1284 	thread->debug_info.profile.timer_end = system_time() + interval;
1285 	add_timer(timer, &profiling_event, interval,
1286 		B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
1287 }
1288 
1289 
1290 static bool
1291 profiling_do_sample(bool& flushBuffer)
1292 {
1293 	struct thread* thread = thread_get_current_thread();
1294 	thread_debug_info& debugInfo = thread->debug_info;
1295 
1296 	if (debugInfo.profile.samples == NULL)
1297 		return false;
1298 
1299 	// Check, whether the buffer is full or an image event occurred since the
1300 	// last sample was taken.
1301 	int32 maxSamples = debugInfo.profile.max_samples;
1302 	int32 sampleCount = debugInfo.profile.sample_count;
1303 	int32 stackDepth = debugInfo.profile.stack_depth;
1304 	int32 imageEvent = thread->team->debug_info.image_event;
1305 	if (debugInfo.profile.sample_count > 0) {
1306 		if (debugInfo.profile.last_image_event < imageEvent
1307 			&& debugInfo.profile.variable_stack_depth
1308 			&& sampleCount + 2 <= maxSamples) {
1309 			// an image event occurred, but we use variable stack depth and
1310 			// have enough room in the buffer to indicate an image event
1311 			addr_t* event = debugInfo.profile.samples + sampleCount;
1312 			event[0] = B_DEBUG_PROFILE_IMAGE_EVENT;
1313 			event[1] = imageEvent;
1314 			sampleCount += 2;
1315 			debugInfo.profile.sample_count = sampleCount;
1316 			debugInfo.profile.last_image_event = imageEvent;
1317 		}
1318 
1319 		if (debugInfo.profile.last_image_event < imageEvent
1320 			|| debugInfo.profile.flush_threshold - sampleCount < stackDepth) {
1321 			if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) {
1322 				flushBuffer = true;
1323 				return true;
1324 			}
1325 
1326 			// We can't flush the buffer now, since we interrupted a kernel
1327 			// function. If the buffer is not full yet, we add the samples,
1328 			// otherwise we have to drop them.
1329 			if (maxSamples - sampleCount < stackDepth) {
1330 				debugInfo.profile.dropped_ticks++;
1331 				return true;
1332 			}
1333 		}
1334 	} else {
1335 		// first sample -- set the image event
1336 		debugInfo.profile.image_event = imageEvent;
1337 		debugInfo.profile.last_image_event = imageEvent;
1338 	}
1339 
1340 	// get the samples
1341 	addr_t* returnAddresses = debugInfo.profile.samples
1342 		+ debugInfo.profile.sample_count;
1343 	if (debugInfo.profile.variable_stack_depth) {
1344 		// variable sample count per hit
1345 		*returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1,
1346 			stackDepth - 1, 1, 0, false);
1347 
1348 		debugInfo.profile.sample_count += *returnAddresses + 1;
1349 	} else {
1350 		// fixed sample count per hit
1351 		if (stackDepth > 1) {
1352 			int32 count = arch_debug_get_stack_trace(returnAddresses,
1353 				stackDepth, 1, 0, false);
1354 
1355 			for (int32 i = count; i < stackDepth; i++)
1356 				returnAddresses[i] = 0;
1357 		} else
1358 			*returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL);
1359 
1360 		debugInfo.profile.sample_count += stackDepth;
1361 	}
1362 
1363 	return true;
1364 }
1365 
1366 
1367 static void
1368 profiling_buffer_full(void*)
1369 {
1370 	struct thread* thread = thread_get_current_thread();
1371 	thread_debug_info& debugInfo = thread->debug_info;
1372 
1373 	GRAB_THREAD_LOCK();
1374 
1375 	if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
1376 		int32 sampleCount = debugInfo.profile.sample_count;
1377 		int32 droppedTicks = debugInfo.profile.dropped_ticks;
1378 		int32 stackDepth = debugInfo.profile.stack_depth;
1379 		bool variableStackDepth = debugInfo.profile.variable_stack_depth;
1380 		int32 imageEvent = debugInfo.profile.image_event;
1381 
1382 		// notify the debugger
1383 		debugInfo.profile.sample_count = 0;
1384 		debugInfo.profile.dropped_ticks = 0;
1385 
1386 		RELEASE_THREAD_LOCK();
1387 		enable_interrupts();
1388 
1389 		// prepare the message
1390 		debug_profiler_update message;
1391 		message.sample_count = sampleCount;
1392 		message.dropped_ticks = droppedTicks;
1393 		message.stack_depth = stackDepth;
1394 		message.variable_stack_depth = variableStackDepth;
1395 		message.image_event = imageEvent;
1396 		message.stopped = false;
1397 
1398 		thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message,
1399 			sizeof(message), false);
1400 
1401 		disable_interrupts();
1402 		GRAB_THREAD_LOCK();
1403 
1404 		// do the sampling and reschedule timer, if still profiling this thread
1405 		bool flushBuffer;
1406 		if (profiling_do_sample(flushBuffer)) {
1407 			debugInfo.profile.buffer_full = false;
1408 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1409 		}
1410 	}
1411 
1412 	RELEASE_THREAD_LOCK();
1413 }
1414 
1415 
1416 /*!	The thread spinlock is being held.
1417 */
1418 static int32
1419 profiling_event(timer* /*unused*/)
1420 {
1421 	struct thread* thread = thread_get_current_thread();
1422 	thread_debug_info& debugInfo = thread->debug_info;
1423 
1424 	bool flushBuffer = false;
1425 	if (profiling_do_sample(flushBuffer)) {
1426 		if (flushBuffer) {
1427 			// The sample buffer needs to be flushed; we'll have to notify the
1428 			// debugger. We can't do that right here. Instead we set a post
1429 			// interrupt callback doing that for us, and don't reschedule the
1430 			// timer yet.
1431 			thread->post_interrupt_callback = profiling_buffer_full;
1432 			debugInfo.profile.installed_timer = NULL;
1433 			debugInfo.profile.buffer_full = true;
1434 		} else
1435 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1436 	} else
1437 		debugInfo.profile.installed_timer = NULL;
1438 
1439 	return B_HANDLED_INTERRUPT;
1440 }
1441 
1442 
1443 void
1444 user_debug_thread_unscheduled(struct thread* thread)
1445 {
1446 	// if running, cancel the profiling timer
1447 	struct timer* timer = thread->debug_info.profile.installed_timer;
1448 	if (timer != NULL) {
1449 		// track remaining time
1450 		bigtime_t left = thread->debug_info.profile.timer_end - system_time();
1451 		thread->debug_info.profile.interval_left = max_c(left, 0);
1452 		thread->debug_info.profile.installed_timer = NULL;
1453 
1454 		// cancel timer
1455 		cancel_timer(timer);
1456 	}
1457 }
1458 
1459 
1460 void
1461 user_debug_thread_scheduled(struct thread* thread)
1462 {
1463 	if (thread->debug_info.profile.samples != NULL
1464 		&& !thread->debug_info.profile.buffer_full) {
1465 		// install profiling timer
1466 		schedule_profiling_timer(thread,
1467 			thread->debug_info.profile.interval_left);
1468 	}
1469 }
1470 
1471 
1472 /*!	\brief Called by the debug nub thread of a team to broadcast a message to
1473 		all threads of the team that are initialized for debugging (and
1474 		thus have a debug port).
1475 */
1476 static void
1477 broadcast_debugged_thread_message(struct thread *nubThread, int32 code,
1478 	const void *message, int32 size)
1479 {
1480 	// iterate through the threads
1481 	thread_info threadInfo;
1482 	int32 cookie = 0;
1483 	while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo)
1484 			== B_OK) {
1485 		// find the thread and get its debug port
1486 		cpu_status state = disable_interrupts();
1487 		GRAB_THREAD_LOCK();
1488 
1489 		port_id threadDebugPort = -1;
1490 		thread_id threadID = -1;
1491 		struct thread *thread
1492 			= thread_get_thread_struct_locked(threadInfo.thread);
1493 		if (thread && thread != nubThread && thread->team == nubThread->team
1494 			&& (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0
1495 			&& (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) {
1496 			threadDebugPort = thread->debug_info.debug_port;
1497 			threadID = thread->id;
1498 		}
1499 
1500 		RELEASE_THREAD_LOCK();
1501 		restore_interrupts(state);
1502 
1503 		// send the message to the thread
1504 		if (threadDebugPort >= 0) {
1505 			status_t error = kill_interruptable_write_port(threadDebugPort,
1506 				code, message, size);
1507 			if (error != B_OK) {
1508 				TRACE(("broadcast_debugged_thread_message(): Failed to send "
1509 					"message to thread %ld: %lx\n", threadID, error));
1510 			}
1511 		}
1512 	}
1513 }
1514 
1515 
1516 static void
1517 nub_thread_cleanup(struct thread *nubThread)
1518 {
1519 	TRACE(("nub_thread_cleanup(%ld): debugger port: %ld\n", nubThread->id,
1520 		nubThread->team->debug_info.debugger_port));
1521 
1522 	ConditionVariable debugChangeCondition;
1523 	prepare_debugger_change(nubThread->team, debugChangeCondition);
1524 
1525 	team_debug_info teamDebugInfo;
1526 	bool destroyDebugInfo = false;
1527 
1528 	cpu_status state = disable_interrupts();
1529 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1530 
1531 	team_debug_info &info = nubThread->team->debug_info;
1532 	if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED
1533 		&& info.nub_thread == nubThread->id) {
1534 		teamDebugInfo = info;
1535 		clear_team_debug_info(&info, false);
1536 		destroyDebugInfo = true;
1537 	}
1538 
1539 	// update the thread::flags fields
1540 	update_threads_debugger_installed_flag(nubThread->team);
1541 
1542 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1543 	restore_interrupts(state);
1544 
1545 	if (destroyDebugInfo)
1546 		teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints();
1547 
1548 	finish_debugger_change(nubThread->team);
1549 
1550 	if (destroyDebugInfo)
1551 		destroy_team_debug_info(&teamDebugInfo);
1552 
1553 	// notify all threads that the debugger is gone
1554 	broadcast_debugged_thread_message(nubThread,
1555 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1556 }
1557 
1558 
1559 /**	\brief Debug nub thread helper function that returns the debug port of
1560  *		   a thread of the same team.
1561  */
1562 static status_t
1563 debug_nub_thread_get_thread_debug_port(struct thread *nubThread,
1564 	thread_id threadID, port_id &threadDebugPort)
1565 {
1566 	status_t result = B_OK;
1567 	threadDebugPort = -1;
1568 
1569 	cpu_status state = disable_interrupts();
1570 	GRAB_THREAD_LOCK();
1571 
1572 	struct thread *thread = thread_get_thread_struct_locked(threadID);
1573 	if (thread) {
1574 		if (thread->team != nubThread->team)
1575 			result = B_BAD_VALUE;
1576 		else if (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED)
1577 			threadDebugPort = thread->debug_info.debug_port;
1578 		else
1579 			result = B_BAD_THREAD_STATE;
1580 	} else
1581 		result = B_BAD_THREAD_ID;
1582 
1583 	RELEASE_THREAD_LOCK();
1584 	restore_interrupts(state);
1585 
1586 	if (result == B_OK && threadDebugPort < 0)
1587 		result = B_ERROR;
1588 
1589 	return result;
1590 }
1591 
1592 
1593 static status_t
1594 debug_nub_thread(void *)
1595 {
1596 	struct thread *nubThread = thread_get_current_thread();
1597 
1598 	// check, if we're still the current nub thread and get our port
1599 	cpu_status state = disable_interrupts();
1600 
1601 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1602 
1603 	if (nubThread->team->debug_info.nub_thread != nubThread->id) {
1604 		RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1605 		restore_interrupts(state);
1606 		return 0;
1607 	}
1608 
1609 	port_id port = nubThread->team->debug_info.nub_port;
1610 	sem_id writeLock = nubThread->team->debug_info.debugger_write_lock;
1611 	BreakpointManager* breakpointManager
1612 		= nubThread->team->debug_info.breakpoint_manager;
1613 
1614 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1615 	restore_interrupts(state);
1616 
1617 	TRACE(("debug_nub_thread() thread: %ld, team %ld, nub port: %ld\n",
1618 		nubThread->id, nubThread->team->id, port));
1619 
1620 	// notify all threads that a debugger has been installed
1621 	broadcast_debugged_thread_message(nubThread,
1622 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1623 
1624 	// command processing loop
1625 	while (true) {
1626 		int32 command;
1627 		debug_nub_message_data message;
1628 		ssize_t messageSize = read_port_etc(port, &command, &message,
1629 			sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1630 
1631 		if (messageSize < 0) {
1632 			// The port is no longer valid or we were interrupted by a kill
1633 			// signal: If we are still listed in the team's debug info as nub
1634 			// thread, we need to update that.
1635 			nub_thread_cleanup(nubThread);
1636 
1637 			TRACE(("nub thread %ld: terminating: %lx\n", nubThread->id,
1638 				messageSize));
1639 
1640 			return messageSize;
1641 		}
1642 
1643 		bool sendReply = false;
1644 		union {
1645 			debug_nub_read_memory_reply			read_memory;
1646 			debug_nub_write_memory_reply		write_memory;
1647 			debug_nub_get_cpu_state_reply		get_cpu_state;
1648 			debug_nub_set_breakpoint_reply		set_breakpoint;
1649 			debug_nub_set_watchpoint_reply		set_watchpoint;
1650 			debug_nub_get_signal_masks_reply	get_signal_masks;
1651 			debug_nub_get_signal_handler_reply	get_signal_handler;
1652 			debug_nub_start_profiler_reply		start_profiler;
1653 			debug_profiler_update				profiler_update;
1654 		} reply;
1655 		int32 replySize = 0;
1656 		port_id replyPort = -1;
1657 
1658 		// process the command
1659 		switch (command) {
1660 			case B_DEBUG_MESSAGE_READ_MEMORY:
1661 			{
1662 				// get the parameters
1663 				replyPort = message.read_memory.reply_port;
1664 				void *address = message.read_memory.address;
1665 				int32 size = message.read_memory.size;
1666 				status_t result = B_OK;
1667 
1668 				// check the parameters
1669 				if (!BreakpointManager::CanAccessAddress(address, false))
1670 					result = B_BAD_ADDRESS;
1671 				else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE)
1672 					result = B_BAD_VALUE;
1673 
1674 				// read the memory
1675 				size_t bytesRead = 0;
1676 				if (result == B_OK) {
1677 					result = breakpointManager->ReadMemory(address,
1678 						reply.read_memory.data, size, bytesRead);
1679 				}
1680 				reply.read_memory.error = result;
1681 
1682 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_READ_MEMORY: "
1683 					"reply port: %ld, address: %p, size: %ld, result: %lx, "
1684 					"read: %ld\n", nubThread->id, replyPort, address, size,
1685 					result, bytesRead));
1686 
1687 				// send only as much data as necessary
1688 				reply.read_memory.size = bytesRead;
1689 				replySize = reply.read_memory.data + bytesRead - (char*)&reply;
1690 				sendReply = true;
1691 				break;
1692 			}
1693 
1694 			case B_DEBUG_MESSAGE_WRITE_MEMORY:
1695 			{
1696 				// get the parameters
1697 				replyPort = message.write_memory.reply_port;
1698 				void *address = message.write_memory.address;
1699 				int32 size = message.write_memory.size;
1700 				const char *data = message.write_memory.data;
1701 				int32 realSize = (char*)&message + messageSize - data;
1702 				status_t result = B_OK;
1703 
1704 				// check the parameters
1705 				if (!BreakpointManager::CanAccessAddress(address, true))
1706 					result = B_BAD_ADDRESS;
1707 				else if (size <= 0 || size > realSize)
1708 					result = B_BAD_VALUE;
1709 
1710 				// write the memory
1711 				size_t bytesWritten = 0;
1712 				if (result == B_OK) {
1713 					result = breakpointManager->WriteMemory(address, data, size,
1714 						bytesWritten);
1715 				}
1716 				reply.write_memory.error = result;
1717 
1718 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_WRITE_MEMORY: "
1719 					"reply port: %ld, address: %p, size: %ld, result: %lx, "
1720 					"written: %ld\n", nubThread->id, replyPort, address, size,
1721 					result, bytesWritten));
1722 
1723 				reply.write_memory.size = bytesWritten;
1724 				sendReply = true;
1725 				replySize = sizeof(debug_nub_write_memory_reply);
1726 				break;
1727 			}
1728 
1729 			case B_DEBUG_MESSAGE_SET_TEAM_FLAGS:
1730 			{
1731 				// get the parameters
1732 				int32 flags = message.set_team_flags.flags
1733 					& B_TEAM_DEBUG_USER_FLAG_MASK;
1734 
1735 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_TEAM_FLAGS: "
1736 					"flags: %lx\n", nubThread->id, flags));
1737 
1738 				struct team *team = thread_get_current_thread()->team;
1739 
1740 				// set the flags
1741 				cpu_status state = disable_interrupts();
1742 				GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1743 
1744 				flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK;
1745 				atomic_set(&team->debug_info.flags, flags);
1746 
1747 				RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1748 				restore_interrupts(state);
1749 
1750 				break;
1751 			}
1752 
1753 			case B_DEBUG_MESSAGE_SET_THREAD_FLAGS:
1754 			{
1755 				// get the parameters
1756 				thread_id threadID = message.set_thread_flags.thread;
1757 				int32 flags = message.set_thread_flags.flags
1758 					& B_THREAD_DEBUG_USER_FLAG_MASK;
1759 
1760 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_THREAD_FLAGS: "
1761 					"thread: %ld, flags: %lx\n", nubThread->id, threadID,
1762 					flags));
1763 
1764 				// set the flags
1765 				cpu_status state = disable_interrupts();
1766 				GRAB_THREAD_LOCK();
1767 
1768 				struct thread *thread
1769 					= thread_get_thread_struct_locked(threadID);
1770 				if (thread
1771 					&& thread->team == thread_get_current_thread()->team) {
1772 					flags |= thread->debug_info.flags
1773 						& B_THREAD_DEBUG_KERNEL_FLAG_MASK;
1774 					atomic_set(&thread->debug_info.flags, flags);
1775 				}
1776 
1777 				RELEASE_THREAD_LOCK();
1778 				restore_interrupts(state);
1779 
1780 				break;
1781 			}
1782 
1783 			case B_DEBUG_MESSAGE_CONTINUE_THREAD:
1784 			{
1785 				// get the parameters
1786 				thread_id threadID;
1787 				uint32 handleEvent;
1788 				bool singleStep;
1789 
1790 				threadID = message.continue_thread.thread;
1791 				handleEvent = message.continue_thread.handle_event;
1792 				singleStep = message.continue_thread.single_step;
1793 
1794 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CONTINUE_THREAD: "
1795 					"thread: %ld, handle event: %lu, single step: %d\n",
1796 					nubThread->id, threadID, handleEvent, singleStep));
1797 
1798 				// find the thread and get its debug port
1799 				port_id threadDebugPort = -1;
1800 				status_t result = debug_nub_thread_get_thread_debug_port(
1801 					nubThread, threadID, threadDebugPort);
1802 
1803 				// send a message to the debugged thread
1804 				if (result == B_OK) {
1805 					debugged_thread_continue commandMessage;
1806 					commandMessage.handle_event = handleEvent;
1807 					commandMessage.single_step = singleStep;
1808 
1809 					result = write_port(threadDebugPort,
1810 						B_DEBUGGED_THREAD_MESSAGE_CONTINUE,
1811 						&commandMessage, sizeof(commandMessage));
1812 				}
1813 
1814 				break;
1815 			}
1816 
1817 			case B_DEBUG_MESSAGE_SET_CPU_STATE:
1818 			{
1819 				// get the parameters
1820 				thread_id threadID = message.set_cpu_state.thread;
1821 				const debug_cpu_state &cpuState
1822 					= message.set_cpu_state.cpu_state;
1823 
1824 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_CPU_STATE: "
1825 					"thread: %ld\n", nubThread->id, threadID));
1826 
1827 				// find the thread and get its debug port
1828 				port_id threadDebugPort = -1;
1829 				status_t result = debug_nub_thread_get_thread_debug_port(
1830 					nubThread, threadID, threadDebugPort);
1831 
1832 				// send a message to the debugged thread
1833 				if (result == B_OK) {
1834 					debugged_thread_set_cpu_state commandMessage;
1835 					memcpy(&commandMessage.cpu_state, &cpuState,
1836 						sizeof(debug_cpu_state));
1837 					write_port(threadDebugPort,
1838 						B_DEBUGGED_THREAD_SET_CPU_STATE,
1839 						&commandMessage, sizeof(commandMessage));
1840 				}
1841 
1842 				break;
1843 			}
1844 
1845 			case B_DEBUG_MESSAGE_GET_CPU_STATE:
1846 			{
1847 				// get the parameters
1848 				thread_id threadID = message.get_cpu_state.thread;
1849 				replyPort = message.get_cpu_state.reply_port;
1850 
1851 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_CPU_STATE: "
1852 					"thread: %ld\n", nubThread->id, threadID));
1853 
1854 				// find the thread and get its debug port
1855 				port_id threadDebugPort = -1;
1856 				status_t result = debug_nub_thread_get_thread_debug_port(
1857 					nubThread, threadID, threadDebugPort);
1858 
1859 				// send a message to the debugged thread
1860 				if (threadDebugPort >= 0) {
1861 					debugged_thread_get_cpu_state commandMessage;
1862 					commandMessage.reply_port = replyPort;
1863 					result = write_port(threadDebugPort,
1864 						B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage,
1865 						sizeof(commandMessage));
1866 				}
1867 
1868 				// send a reply to the debugger in case of error
1869 				if (result != B_OK) {
1870 					reply.get_cpu_state.error = result;
1871 					sendReply = true;
1872 					replySize = sizeof(reply.get_cpu_state);
1873 				}
1874 
1875 				break;
1876 			}
1877 
1878 			case B_DEBUG_MESSAGE_SET_BREAKPOINT:
1879 			{
1880 				// get the parameters
1881 				replyPort = message.set_breakpoint.reply_port;
1882 				void *address = message.set_breakpoint.address;
1883 
1884 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_BREAKPOINT: "
1885 					"address: %p\n", nubThread->id, address));
1886 
1887 				// check the address
1888 				status_t result = B_OK;
1889 				if (address == NULL
1890 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1891 					result = B_BAD_ADDRESS;
1892 				}
1893 
1894 				// set the breakpoint
1895 				if (result == B_OK)
1896 					result = breakpointManager->InstallBreakpoint(address);
1897 
1898 				if (result == B_OK)
1899 					update_threads_breakpoints_flag();
1900 
1901 				// prepare the reply
1902 				reply.set_breakpoint.error = result;
1903 				replySize = sizeof(reply.set_breakpoint);
1904 				sendReply = true;
1905 
1906 				break;
1907 			}
1908 
1909 			case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT:
1910 			{
1911 				// get the parameters
1912 				void *address = message.clear_breakpoint.address;
1913 
1914 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: "
1915 					"address: %p\n", nubThread->id, address));
1916 
1917 				// check the address
1918 				status_t result = B_OK;
1919 				if (address == NULL
1920 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1921 					result = B_BAD_ADDRESS;
1922 				}
1923 
1924 				// clear the breakpoint
1925 				if (result == B_OK)
1926 					result = breakpointManager->UninstallBreakpoint(address);
1927 
1928 				if (result == B_OK)
1929 					update_threads_breakpoints_flag();
1930 
1931 				break;
1932 			}
1933 
1934 			case B_DEBUG_MESSAGE_SET_WATCHPOINT:
1935 			{
1936 				// get the parameters
1937 				replyPort = message.set_watchpoint.reply_port;
1938 				void *address = message.set_watchpoint.address;
1939 				uint32 type = message.set_watchpoint.type;
1940 				int32 length = message.set_watchpoint.length;
1941 
1942 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_WATCHPOINT: "
1943 					"address: %p, type: %lu, length: %ld\n", nubThread->id,
1944 					address, type, length));
1945 
1946 				// check the address and size
1947 				status_t result = B_OK;
1948 				if (address == NULL
1949 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1950 					result = B_BAD_ADDRESS;
1951 				}
1952 				if (length < 0)
1953 					result = B_BAD_VALUE;
1954 
1955 				// set the watchpoint
1956 				if (result == B_OK) {
1957 					result = breakpointManager->InstallWatchpoint(address, type,
1958 						length);
1959 				}
1960 
1961 				if (result == B_OK)
1962 					update_threads_breakpoints_flag();
1963 
1964 				// prepare the reply
1965 				reply.set_watchpoint.error = result;
1966 				replySize = sizeof(reply.set_watchpoint);
1967 				sendReply = true;
1968 
1969 				break;
1970 			}
1971 
1972 			case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT:
1973 			{
1974 				// get the parameters
1975 				void *address = message.clear_watchpoint.address;
1976 
1977 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: "
1978 					"address: %p\n", nubThread->id, address));
1979 
1980 				// check the address
1981 				status_t result = B_OK;
1982 				if (address == NULL
1983 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1984 					result = B_BAD_ADDRESS;
1985 				}
1986 
1987 				// clear the watchpoint
1988 				if (result == B_OK)
1989 					result = breakpointManager->UninstallWatchpoint(address);
1990 
1991 				if (result == B_OK)
1992 					update_threads_breakpoints_flag();
1993 
1994 				break;
1995 			}
1996 
1997 			case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS:
1998 			{
1999 				// get the parameters
2000 				thread_id threadID = message.set_signal_masks.thread;
2001 				uint64 ignore = message.set_signal_masks.ignore_mask;
2002 				uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask;
2003 				uint32 ignoreOp = message.set_signal_masks.ignore_op;
2004 				uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op;
2005 
2006 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: "
2007 					"thread: %ld, ignore: %llx (op: %lu), ignore once: %llx "
2008 					"(op: %lu)\n", nubThread->id, threadID, ignore,
2009 						ignoreOp, ignoreOnce, ignoreOnceOp));
2010 
2011 				// set the masks
2012 				cpu_status state = disable_interrupts();
2013 				GRAB_THREAD_LOCK();
2014 
2015 				struct thread *thread
2016 					= thread_get_thread_struct_locked(threadID);
2017 				if (thread
2018 					&& thread->team == thread_get_current_thread()->team) {
2019 					thread_debug_info &threadDebugInfo = thread->debug_info;
2020 					// set ignore mask
2021 					switch (ignoreOp) {
2022 						case B_DEBUG_SIGNAL_MASK_AND:
2023 							threadDebugInfo.ignore_signals &= ignore;
2024 							break;
2025 						case B_DEBUG_SIGNAL_MASK_OR:
2026 							threadDebugInfo.ignore_signals |= ignore;
2027 							break;
2028 						case B_DEBUG_SIGNAL_MASK_SET:
2029 							threadDebugInfo.ignore_signals = ignore;
2030 							break;
2031 					}
2032 
2033 					// set ignore once mask
2034 					switch (ignoreOnceOp) {
2035 						case B_DEBUG_SIGNAL_MASK_AND:
2036 							threadDebugInfo.ignore_signals_once &= ignoreOnce;
2037 							break;
2038 						case B_DEBUG_SIGNAL_MASK_OR:
2039 							threadDebugInfo.ignore_signals_once |= ignoreOnce;
2040 							break;
2041 						case B_DEBUG_SIGNAL_MASK_SET:
2042 							threadDebugInfo.ignore_signals_once = ignoreOnce;
2043 							break;
2044 					}
2045 				}
2046 
2047 				RELEASE_THREAD_LOCK();
2048 				restore_interrupts(state);
2049 
2050 				break;
2051 			}
2052 
2053 			case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS:
2054 			{
2055 				// get the parameters
2056 				replyPort = message.get_signal_masks.reply_port;
2057 				thread_id threadID = message.get_signal_masks.thread;
2058 				status_t result = B_OK;
2059 
2060 				// get the masks
2061 				uint64 ignore = 0;
2062 				uint64 ignoreOnce = 0;
2063 
2064 				cpu_status state = disable_interrupts();
2065 				GRAB_THREAD_LOCK();
2066 
2067 				struct thread *thread
2068 					= thread_get_thread_struct_locked(threadID);
2069 				if (thread) {
2070 					ignore = thread->debug_info.ignore_signals;
2071 					ignoreOnce = thread->debug_info.ignore_signals_once;
2072 				} else
2073 					result = B_BAD_THREAD_ID;
2074 
2075 				RELEASE_THREAD_LOCK();
2076 				restore_interrupts(state);
2077 
2078 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: "
2079 					"reply port: %ld, thread: %ld, ignore: %llx, "
2080 					"ignore once: %llx, result: %lx\n", nubThread->id,
2081 					replyPort, threadID, ignore, ignoreOnce, result));
2082 
2083 				// prepare the message
2084 				reply.get_signal_masks.error = result;
2085 				reply.get_signal_masks.ignore_mask = ignore;
2086 				reply.get_signal_masks.ignore_once_mask = ignoreOnce;
2087 				replySize = sizeof(reply.get_signal_masks);
2088 				sendReply = true;
2089 				break;
2090 			}
2091 
2092 			case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER:
2093 			{
2094 				// get the parameters
2095 				thread_id threadID = message.set_signal_handler.thread;
2096 				int signal = message.set_signal_handler.signal;
2097 				struct sigaction &handler = message.set_signal_handler.handler;
2098 
2099 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: "
2100 					"thread: %ld, signal: %d, handler: %p\n", nubThread->id,
2101 					threadID, signal, handler.sa_handler));
2102 
2103 				// check, if the thread exists and is ours
2104 				cpu_status state = disable_interrupts();
2105 				GRAB_THREAD_LOCK();
2106 
2107 				struct thread *thread
2108 					= thread_get_thread_struct_locked(threadID);
2109 				if (thread
2110 					&& thread->team != thread_get_current_thread()->team) {
2111 					thread = NULL;
2112 				}
2113 
2114 				RELEASE_THREAD_LOCK();
2115 				restore_interrupts(state);
2116 
2117 				// set the handler
2118 				if (thread)
2119 					sigaction_etc(threadID, signal, &handler, NULL);
2120 
2121 				break;
2122 			}
2123 
2124 			case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER:
2125 			{
2126 				// get the parameters
2127 				replyPort = message.get_signal_handler.reply_port;
2128 				thread_id threadID = message.get_signal_handler.thread;
2129 				int signal = message.get_signal_handler.signal;
2130 				status_t result = B_OK;
2131 
2132 				// check, if the thread exists and is ours
2133 				cpu_status state = disable_interrupts();
2134 				GRAB_THREAD_LOCK();
2135 
2136 				struct thread *thread
2137 					= thread_get_thread_struct_locked(threadID);
2138 				if (thread) {
2139 					if (thread->team != thread_get_current_thread()->team)
2140 						result = B_BAD_VALUE;
2141 				} else
2142 					result = B_BAD_THREAD_ID;
2143 
2144 				RELEASE_THREAD_LOCK();
2145 				restore_interrupts(state);
2146 
2147 				// get the handler
2148 				if (result == B_OK) {
2149 					result = sigaction_etc(threadID, signal, NULL,
2150 						&reply.get_signal_handler.handler);
2151 				}
2152 
2153 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: "
2154 					"reply port: %ld, thread: %ld, signal: %d, "
2155 					"handler: %p\n", nubThread->id, replyPort,
2156 					threadID, signal,
2157 					reply.get_signal_handler.handler.sa_handler));
2158 
2159 				// prepare the message
2160 				reply.get_signal_handler.error = result;
2161 				replySize = sizeof(reply.get_signal_handler);
2162 				sendReply = true;
2163 				break;
2164 			}
2165 
2166 			case B_DEBUG_MESSAGE_PREPARE_HANDOVER:
2167 			{
2168 				TRACE(("nub thread %ld: B_DEBUG_MESSAGE_PREPARE_HANDOVER\n",
2169 					nubThread->id));
2170 
2171 				struct team *team = nubThread->team;
2172 
2173 				// Acquire the debugger write lock. As soon as we have it and
2174 				// have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread
2175 				// will write anything to the debugger port anymore.
2176 				status_t result = acquire_sem_etc(writeLock, 1,
2177 					B_KILL_CAN_INTERRUPT, 0);
2178 				if (result == B_OK) {
2179 					// set the respective team debug flag
2180 					cpu_status state = disable_interrupts();
2181 					GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2182 
2183 					atomic_or(&team->debug_info.flags,
2184 						B_TEAM_DEBUG_DEBUGGER_HANDOVER);
2185 					BreakpointManager* breakpointManager
2186 						= team->debug_info.breakpoint_manager;
2187 
2188 					RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2189 					restore_interrupts(state);
2190 
2191 					// remove all installed breakpoints
2192 					breakpointManager->RemoveAllBreakpoints();
2193 
2194 					release_sem(writeLock);
2195 				} else {
2196 					// We probably got a SIGKILL. If so, we will terminate when
2197 					// reading the next message fails.
2198 				}
2199 
2200 				break;
2201 			}
2202 
2203 			case B_DEBUG_MESSAGE_HANDED_OVER:
2204 			{
2205 				// notify all threads that the debugger has changed
2206 				broadcast_debugged_thread_message(nubThread,
2207 					B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
2208 
2209 				break;
2210 			}
2211 
2212 			case B_DEBUG_START_PROFILER:
2213 			{
2214 				// get the parameters
2215 				thread_id threadID = message.start_profiler.thread;
2216 				replyPort = message.start_profiler.reply_port;
2217 				area_id sampleArea = message.start_profiler.sample_area;
2218 				int32 stackDepth = message.start_profiler.stack_depth;
2219 				bool variableStackDepth
2220 					= message.start_profiler.variable_stack_depth;
2221 				bigtime_t interval = max_c(message.start_profiler.interval,
2222 					B_DEBUG_MIN_PROFILE_INTERVAL);
2223 				status_t result = B_OK;
2224 
2225 				TRACE(("nub thread %ld: B_DEBUG_START_PROFILER: "
2226 					"thread: %ld, sample area: %ld\n", nubThread->id, threadID,
2227 					sampleArea));
2228 
2229 				if (stackDepth < 1)
2230 					stackDepth = 1;
2231 				else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH)
2232 					stackDepth = B_DEBUG_STACK_TRACE_DEPTH;
2233 
2234 				// provision for an extra entry per hit (for the number of
2235 				// samples), if variable stack depth
2236 				if (variableStackDepth)
2237 					stackDepth++;
2238 
2239 				// clone the sample area
2240 				area_info areaInfo;
2241 				if (result == B_OK)
2242 					result = get_area_info(sampleArea, &areaInfo);
2243 
2244 				area_id clonedSampleArea = -1;
2245 				void* samples = NULL;
2246 				if (result == B_OK) {
2247 					clonedSampleArea = clone_area("profiling samples", &samples,
2248 						B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
2249 						sampleArea);
2250 					if (clonedSampleArea >= 0) {
2251 						// we need the memory locked
2252 						result = lock_memory(samples, areaInfo.size,
2253 							B_READ_DEVICE);
2254 						if (result != B_OK) {
2255 							delete_area(clonedSampleArea);
2256 							clonedSampleArea = -1;
2257 						}
2258 					} else
2259 						result = clonedSampleArea;
2260 				}
2261 
2262 				// get the thread and set the profile info
2263 				int32 imageEvent = nubThread->team->debug_info.image_event;
2264 				if (result == B_OK) {
2265 					cpu_status state = disable_interrupts();
2266 					GRAB_THREAD_LOCK();
2267 
2268 					struct thread *thread
2269 						= thread_get_thread_struct_locked(threadID);
2270 					if (thread && thread->team == nubThread->team) {
2271 						thread_debug_info &threadDebugInfo = thread->debug_info;
2272 						if (threadDebugInfo.profile.samples == NULL) {
2273 							threadDebugInfo.profile.interval = interval;
2274 							threadDebugInfo.profile.sample_area
2275 								= clonedSampleArea;
2276 							threadDebugInfo.profile.samples = (addr_t*)samples;
2277 							threadDebugInfo.profile.max_samples
2278 								= areaInfo.size / sizeof(addr_t);
2279 							threadDebugInfo.profile.flush_threshold
2280 								= threadDebugInfo.profile.max_samples
2281 									* B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD
2282 									/ 100;
2283 							threadDebugInfo.profile.sample_count = 0;
2284 							threadDebugInfo.profile.dropped_ticks = 0;
2285 							threadDebugInfo.profile.stack_depth = stackDepth;
2286 							threadDebugInfo.profile.variable_stack_depth
2287 								= variableStackDepth;
2288 							threadDebugInfo.profile.buffer_full = false;
2289 							threadDebugInfo.profile.interval_left = interval;
2290 							threadDebugInfo.profile.installed_timer = NULL;
2291 							threadDebugInfo.profile.image_event = imageEvent;
2292 							threadDebugInfo.profile.last_image_event
2293 								= imageEvent;
2294 						} else
2295 							result = B_BAD_VALUE;
2296 					} else
2297 						result = B_BAD_THREAD_ID;
2298 
2299 					RELEASE_THREAD_LOCK();
2300 					restore_interrupts(state);
2301 				}
2302 
2303 				// on error unlock and delete the sample area
2304 				if (result != B_OK) {
2305 					if (clonedSampleArea >= 0) {
2306 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2307 						delete_area(clonedSampleArea);
2308 					}
2309 				}
2310 
2311 				// send a reply to the debugger
2312 				reply.start_profiler.error = result;
2313 				reply.start_profiler.interval = interval;
2314 				reply.start_profiler.image_event = imageEvent;
2315 				sendReply = true;
2316 				replySize = sizeof(reply.start_profiler);
2317 
2318 				break;
2319 			}
2320 
2321 			case B_DEBUG_STOP_PROFILER:
2322 			{
2323 				// get the parameters
2324 				thread_id threadID = message.stop_profiler.thread;
2325 				replyPort = message.stop_profiler.reply_port;
2326 				status_t result = B_OK;
2327 
2328 				TRACE(("nub thread %ld: B_DEBUG_STOP_PROFILER: "
2329 					"thread: %ld\n", nubThread->id, threadID));
2330 
2331 				area_id sampleArea = -1;
2332 				addr_t* samples = NULL;
2333 				int32 sampleCount = 0;
2334 				int32 stackDepth = 0;
2335 				bool variableStackDepth = false;
2336 				int32 imageEvent = 0;
2337 				int32 droppedTicks = 0;
2338 
2339 				// get the thread and detach the profile info
2340 				cpu_status state = disable_interrupts();
2341 				GRAB_THREAD_LOCK();
2342 
2343 				struct thread *thread
2344 					= thread_get_thread_struct_locked(threadID);
2345 				if (thread && thread->team == nubThread->team) {
2346 					thread_debug_info &threadDebugInfo = thread->debug_info;
2347 					if (threadDebugInfo.profile.samples != NULL) {
2348 						sampleArea = threadDebugInfo.profile.sample_area;
2349 						samples = threadDebugInfo.profile.samples;
2350 						sampleCount = threadDebugInfo.profile.sample_count;
2351 						droppedTicks = threadDebugInfo.profile.dropped_ticks;
2352 						stackDepth = threadDebugInfo.profile.stack_depth;
2353 						variableStackDepth
2354 							= threadDebugInfo.profile.variable_stack_depth;
2355 						imageEvent = threadDebugInfo.profile.image_event;
2356 						threadDebugInfo.profile.sample_area = -1;
2357 						threadDebugInfo.profile.samples = NULL;
2358 						threadDebugInfo.profile.buffer_full = false;
2359 						threadDebugInfo.profile.dropped_ticks = 0;
2360 					} else
2361 						result = B_BAD_VALUE;
2362 				} else
2363 					result = B_BAD_THREAD_ID;
2364 
2365 				RELEASE_THREAD_LOCK();
2366 				restore_interrupts(state);
2367 
2368 				// prepare the reply
2369 				if (result == B_OK) {
2370 					reply.profiler_update.origin.thread = threadID;
2371 					reply.profiler_update.image_event = imageEvent;
2372 					reply.profiler_update.stack_depth = stackDepth;
2373 					reply.profiler_update.variable_stack_depth
2374 						= variableStackDepth;
2375 					reply.profiler_update.sample_count = sampleCount;
2376 					reply.profiler_update.dropped_ticks = droppedTicks;
2377 					reply.profiler_update.stopped = true;
2378 				} else
2379 					reply.profiler_update.origin.thread = result;
2380 
2381 				replySize = sizeof(debug_profiler_update);
2382 				sendReply = true;
2383 
2384 				if (sampleArea >= 0) {
2385 					area_info areaInfo;
2386 					if (get_area_info(sampleArea, &areaInfo) == B_OK) {
2387 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2388 						delete_area(sampleArea);
2389 					}
2390 				}
2391 			}
2392 		}
2393 
2394 		// send the reply, if necessary
2395 		if (sendReply) {
2396 			status_t error = kill_interruptable_write_port(replyPort, command,
2397 				&reply, replySize);
2398 
2399 			if (error != B_OK) {
2400 				// The debugger port is either not longer existing or we got
2401 				// interrupted by a kill signal. In either case we terminate.
2402 				TRACE(("nub thread %ld: failed to send reply to port %ld: %s\n",
2403 					nubThread->id, replyPort, strerror(error)));
2404 
2405 				nub_thread_cleanup(nubThread);
2406 				return error;
2407 			}
2408 		}
2409 	}
2410 }
2411 
2412 
2413 /**	\brief Helper function for install_team_debugger(), that sets up the team
2414 		   and thread debug infos.
2415 
2416 	Interrupts must be disabled and the team debug info lock of the team to be
2417 	debugged must be held. The function will release the lock, but leave
2418 	interrupts disabled.
2419 
2420 	The function also clears the arch specific team and thread debug infos
2421 	(including among other things formerly set break/watchpoints).
2422  */
2423 static void
2424 install_team_debugger_init_debug_infos(struct team *team, team_id debuggerTeam,
2425 	port_id debuggerPort, port_id nubPort, thread_id nubThread,
2426 	sem_id debuggerPortWriteLock, thread_id causingThread)
2427 {
2428 	atomic_set(&team->debug_info.flags,
2429 		B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED);
2430 	team->debug_info.nub_port = nubPort;
2431 	team->debug_info.nub_thread = nubThread;
2432 	team->debug_info.debugger_team = debuggerTeam;
2433 	team->debug_info.debugger_port = debuggerPort;
2434 	team->debug_info.debugger_write_lock = debuggerPortWriteLock;
2435 	team->debug_info.causing_thread = causingThread;
2436 
2437 	arch_clear_team_debug_info(&team->debug_info.arch_info);
2438 
2439 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2440 
2441 	// set the user debug flags and signal masks of all threads to the default
2442 	GRAB_THREAD_LOCK();
2443 
2444 	for (struct thread *thread = team->thread_list;
2445 		 thread;
2446 		 thread = thread->team_next) {
2447 		if (thread->id == nubThread) {
2448 			atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD);
2449 		} else {
2450 			int32 flags = thread->debug_info.flags
2451 				& ~B_THREAD_DEBUG_USER_FLAG_MASK;
2452 			atomic_set(&thread->debug_info.flags,
2453 				flags | B_THREAD_DEBUG_DEFAULT_FLAGS);
2454 			thread->debug_info.ignore_signals = 0;
2455 			thread->debug_info.ignore_signals_once = 0;
2456 
2457 			arch_clear_thread_debug_info(&thread->debug_info.arch_info);
2458 		}
2459 	}
2460 
2461 	RELEASE_THREAD_LOCK();
2462 
2463 	// update the thread::flags fields
2464 	update_threads_debugger_installed_flag(team);
2465 }
2466 
2467 
2468 static port_id
2469 install_team_debugger(team_id teamID, port_id debuggerPort,
2470 	thread_id causingThread, bool useDefault, bool dontReplace)
2471 {
2472 	TRACE(("install_team_debugger(team: %ld, port: %ld, default: %d, "
2473 		"dontReplace: %d)\n", teamID, debuggerPort, useDefault, dontReplace));
2474 
2475 	if (useDefault)
2476 		debuggerPort = atomic_get(&sDefaultDebuggerPort);
2477 
2478 	// get the debugger team
2479 	port_info debuggerPortInfo;
2480 	status_t error = get_port_info(debuggerPort, &debuggerPortInfo);
2481 	if (error != B_OK) {
2482 		TRACE(("install_team_debugger(): Failed to get debugger port info: "
2483 			"%lx\n", error));
2484 		return error;
2485 	}
2486 	team_id debuggerTeam = debuggerPortInfo.team;
2487 
2488 	// Check the debugger team: It must neither be the kernel team nor the
2489 	// debugged team.
2490 	if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) {
2491 		TRACE(("install_team_debugger(): Can't debug kernel or debugger team. "
2492 			"debugger: %ld, debugged: %ld\n", debuggerTeam, teamID));
2493 		return B_NOT_ALLOWED;
2494 	}
2495 
2496 	// get the team
2497 	struct team* team;
2498 	ConditionVariable debugChangeCondition;
2499 	error = prepare_debugger_change(teamID, debugChangeCondition, team);
2500 	if (error != B_OK)
2501 		return error;
2502 
2503 	// get the real team ID
2504 	teamID = team->id;
2505 
2506 	// check, if a debugger is already installed
2507 
2508 	bool done = false;
2509 	port_id result = B_ERROR;
2510 	bool handOver = false;
2511 	bool releaseDebugInfoLock = true;
2512 	port_id oldDebuggerPort = -1;
2513 	port_id nubPort = -1;
2514 
2515 	cpu_status state = disable_interrupts();
2516 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2517 
2518 	int32 teamDebugFlags = team->debug_info.flags;
2519 
2520 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2521 		// There's already a debugger installed.
2522 		if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) {
2523 			if (dontReplace) {
2524 				// We're fine with already having a debugger.
2525 				error = B_OK;
2526 				done = true;
2527 				result = team->debug_info.nub_port;
2528 			} else {
2529 				// a handover to another debugger is requested
2530 				// Set the handing-over flag -- we'll clear both flags after
2531 				// having sent the handed-over message to the new debugger.
2532 				atomic_or(&team->debug_info.flags,
2533 					B_TEAM_DEBUG_DEBUGGER_HANDING_OVER);
2534 
2535 				oldDebuggerPort = team->debug_info.debugger_port;
2536 				result = nubPort = team->debug_info.nub_port;
2537 				if (causingThread < 0)
2538 					causingThread = team->debug_info.causing_thread;
2539 
2540 				// set the new debugger
2541 				install_team_debugger_init_debug_infos(team, debuggerTeam,
2542 					debuggerPort, nubPort, team->debug_info.nub_thread,
2543 					team->debug_info.debugger_write_lock, causingThread);
2544 
2545 				releaseDebugInfoLock = false;
2546 				handOver = true;
2547 				done = true;
2548 
2549 				// finally set the new port owner
2550 				if (set_port_owner(nubPort, debuggerTeam) != B_OK) {
2551 					// The old debugger must just have died. Just proceed as
2552 					// if there was no debugger installed. We may still be too
2553 					// early, in which case we'll fail, but this race condition
2554 					// should be unbelievably rare and relatively harmless.
2555 					handOver = false;
2556 					done = false;
2557 				}
2558 			}
2559 		} else {
2560 			// there's already a debugger installed
2561 			error = (dontReplace ? B_OK : B_BAD_VALUE);
2562 			done = true;
2563 			result = team->debug_info.nub_port;
2564 		}
2565 	} else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0
2566 		&& useDefault) {
2567 		// No debugger yet, disable_debugger() had been invoked, and we
2568 		// would install the default debugger. Just fail.
2569 		error = B_BAD_VALUE;
2570 	}
2571 
2572 	// in case of a handover the lock has already been released
2573 	if (releaseDebugInfoLock)
2574 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2575 
2576 	restore_interrupts(state);
2577 
2578 	if (handOver) {
2579 		// prepare the handed-over message
2580 		debug_handed_over notification;
2581 		notification.origin.thread = -1;
2582 		notification.origin.team = teamID;
2583 		notification.origin.nub_port = nubPort;
2584 		notification.debugger = debuggerTeam;
2585 		notification.debugger_port = debuggerPort;
2586 		notification.causing_thread = causingThread;
2587 
2588 		// notify the new debugger
2589 		error = write_port_etc(debuggerPort,
2590 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2591 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2592 		if (error != B_OK) {
2593 			dprintf("install_team_debugger(): Failed to send message to new "
2594 				"debugger: %s\n", strerror(error));
2595 		}
2596 
2597 		// clear the handed-over and handing-over flags
2598 		state = disable_interrupts();
2599 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2600 
2601 		atomic_and(&team->debug_info.flags,
2602 			~(B_TEAM_DEBUG_DEBUGGER_HANDOVER
2603 				| B_TEAM_DEBUG_DEBUGGER_HANDING_OVER));
2604 
2605 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2606 		restore_interrupts(state);
2607 
2608 		finish_debugger_change(team);
2609 
2610 		// notify the nub thread
2611 		kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER,
2612 			NULL, 0);
2613 
2614 		// notify the old debugger
2615 		error = write_port_etc(oldDebuggerPort,
2616 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2617 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2618 		if (error != B_OK) {
2619 			TRACE(("install_team_debugger(): Failed to send message to old "
2620 				"debugger: %s\n", strerror(error)));
2621 		}
2622 
2623 		TRACE(("install_team_debugger() done: handed over to debugger: team: "
2624 			"%ld, port: %ld\n", debuggerTeam, debuggerPort));
2625 
2626 		return result;
2627 	}
2628 
2629 	if (done || error != B_OK) {
2630 		TRACE(("install_team_debugger() done1: %ld\n",
2631 			(error == B_OK ? result : error)));
2632 		finish_debugger_change(team);
2633 		return (error == B_OK ? result : error);
2634 	}
2635 
2636 	// create the debugger write lock semaphore
2637 	char nameBuffer[B_OS_NAME_LENGTH];
2638 	snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debugger port write",
2639 		teamID);
2640 	sem_id debuggerWriteLock = create_sem(1, nameBuffer);
2641 	if (debuggerWriteLock < 0)
2642 		error = debuggerWriteLock;
2643 
2644 	// create the nub port
2645 	snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug", teamID);
2646 	if (error == B_OK) {
2647 		nubPort = create_port(1, nameBuffer);
2648 		if (nubPort < 0)
2649 			error = nubPort;
2650 		else
2651 			result = nubPort;
2652 	}
2653 
2654 	// make the debugger team the port owner; thus we know, if the debugger is
2655 	// gone and can cleanup
2656 	if (error == B_OK)
2657 		error = set_port_owner(nubPort, debuggerTeam);
2658 
2659 	// create the breakpoint manager
2660 	BreakpointManager* breakpointManager = NULL;
2661 	if (error == B_OK) {
2662 		breakpointManager = new(std::nothrow) BreakpointManager;
2663 		if (breakpointManager != NULL)
2664 			error = breakpointManager->Init();
2665 		else
2666 			error = B_NO_MEMORY;
2667 	}
2668 
2669 	// spawn the nub thread
2670 	thread_id nubThread = -1;
2671 	if (error == B_OK) {
2672 		snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug task", teamID);
2673 		nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer,
2674 			B_NORMAL_PRIORITY, NULL, teamID, -1);
2675 		if (nubThread < 0)
2676 			error = nubThread;
2677 	}
2678 
2679 	// now adjust the debug info accordingly
2680 	if (error == B_OK) {
2681 		state = disable_interrupts();
2682 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2683 
2684 		team->debug_info.breakpoint_manager = breakpointManager;
2685 		install_team_debugger_init_debug_infos(team, debuggerTeam,
2686 			debuggerPort, nubPort, nubThread, debuggerWriteLock,
2687 			causingThread);
2688 
2689 		restore_interrupts(state);
2690 	}
2691 
2692 	finish_debugger_change(team);
2693 
2694 	// if everything went fine, resume the nub thread, otherwise clean up
2695 	if (error == B_OK) {
2696 		resume_thread(nubThread);
2697 	} else {
2698 		// delete port and terminate thread
2699 		if (nubPort >= 0) {
2700 			set_port_owner(nubPort, B_CURRENT_TEAM);
2701 			delete_port(nubPort);
2702 		}
2703 		if (nubThread >= 0) {
2704 			int32 result;
2705 			wait_for_thread(nubThread, &result);
2706 		}
2707 
2708 		delete breakpointManager;
2709 	}
2710 
2711 	TRACE(("install_team_debugger() done2: %ld\n",
2712 		(error == B_OK ? result : error)));
2713 	return (error == B_OK ? result : error);
2714 }
2715 
2716 
2717 static status_t
2718 ensure_debugger_installed()
2719 {
2720 	port_id port = install_team_debugger(B_CURRENT_TEAM, -1,
2721 		thread_get_current_thread_id(), true, true);
2722 	return port >= 0 ? B_OK : port;
2723 }
2724 
2725 
2726 // #pragma mark -
2727 
2728 
2729 void
2730 _user_debugger(const char *userMessage)
2731 {
2732 	// install the default debugger, if there is none yet
2733 	status_t error = ensure_debugger_installed();
2734 	if (error != B_OK) {
2735 		// time to commit suicide
2736 		char buffer[128];
2737 		ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer));
2738 		if (length >= 0) {
2739 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2740 				"`%s'\n", buffer);
2741 		} else {
2742 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2743 				"%p (%s)\n", userMessage, strerror(length));
2744 		}
2745 		_user_exit_team(1);
2746 	}
2747 
2748 	// prepare the message
2749 	debug_debugger_call message;
2750 	message.message = (void*)userMessage;
2751 
2752 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message,
2753 		sizeof(message), true);
2754 }
2755 
2756 
2757 int
2758 _user_disable_debugger(int state)
2759 {
2760 	struct team *team = thread_get_current_thread()->team;
2761 
2762 	TRACE(("_user_disable_debugger(%d): team: %ld\n", state, team->id));
2763 
2764 	cpu_status cpuState = disable_interrupts();
2765 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2766 
2767 	int32 oldFlags;
2768 	if (state) {
2769 		oldFlags = atomic_or(&team->debug_info.flags,
2770 			B_TEAM_DEBUG_DEBUGGER_DISABLED);
2771 	} else {
2772 		oldFlags = atomic_and(&team->debug_info.flags,
2773 			~B_TEAM_DEBUG_DEBUGGER_DISABLED);
2774 	}
2775 
2776 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2777 	restore_interrupts(cpuState);
2778 
2779 	// TODO: Check, if the return value is really the old state.
2780 	return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED);
2781 }
2782 
2783 
2784 status_t
2785 _user_install_default_debugger(port_id debuggerPort)
2786 {
2787 	// if supplied, check whether the port is a valid port
2788 	if (debuggerPort >= 0) {
2789 		port_info portInfo;
2790 		status_t error = get_port_info(debuggerPort, &portInfo);
2791 		if (error != B_OK)
2792 			return error;
2793 
2794 		// the debugger team must not be the kernel team
2795 		if (portInfo.team == team_get_kernel_team_id())
2796 			return B_NOT_ALLOWED;
2797 	}
2798 
2799 	atomic_set(&sDefaultDebuggerPort, debuggerPort);
2800 
2801 	return B_OK;
2802 }
2803 
2804 
2805 port_id
2806 _user_install_team_debugger(team_id teamID, port_id debuggerPort)
2807 {
2808 	return install_team_debugger(teamID, debuggerPort, -1, false, false);
2809 }
2810 
2811 
2812 status_t
2813 _user_remove_team_debugger(team_id teamID)
2814 {
2815 	struct team* team;
2816 	ConditionVariable debugChangeCondition;
2817 	status_t error = prepare_debugger_change(teamID, debugChangeCondition,
2818 		team);
2819 	if (error != B_OK)
2820 		return error;
2821 
2822 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2823 
2824 	thread_id nubThread = -1;
2825 	port_id nubPort = -1;
2826 
2827 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2828 		// there's a debugger installed
2829 		nubThread = team->debug_info.nub_thread;
2830 		nubPort = team->debug_info.nub_port;
2831 	} else {
2832 		// no debugger installed
2833 		error = B_BAD_VALUE;
2834 	}
2835 
2836 	debugInfoLocker.Unlock();
2837 
2838 	// Delete the nub port -- this will cause the nub thread to terminate and
2839 	// remove the debugger.
2840 	if (nubPort >= 0)
2841 		delete_port(nubPort);
2842 
2843 	finish_debugger_change(team);
2844 
2845 	// wait for the nub thread
2846 	if (nubThread >= 0)
2847 		wait_for_thread(nubThread, NULL);
2848 
2849 	return error;
2850 }
2851 
2852 
2853 status_t
2854 _user_debug_thread(thread_id threadID)
2855 {
2856 	TRACE(("[%ld] _user_debug_thread(%ld)\n", find_thread(NULL), threadID));
2857 
2858 	// tell the thread to stop as soon as possible
2859 	status_t error = B_OK;
2860 	cpu_status state = disable_interrupts();
2861 	GRAB_THREAD_LOCK();
2862 
2863 	struct thread *thread = thread_get_thread_struct_locked(threadID);
2864 	if (!thread) {
2865 		// thread doesn't exist any longer
2866 		error = B_BAD_THREAD_ID;
2867 	} else if (thread->team == team_get_kernel_team()) {
2868 		// we can't debug the kernel team
2869 		error = B_NOT_ALLOWED;
2870 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_DYING) {
2871 		// the thread is already dying -- too late to debug it
2872 		error = B_BAD_THREAD_ID;
2873 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) {
2874 		// don't debug the nub thread
2875 		error = B_NOT_ALLOWED;
2876 	} else if (!(thread->debug_info.flags & B_THREAD_DEBUG_STOPPED)) {
2877 		// set the flag that tells the thread to stop as soon as possible
2878 		atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
2879 
2880 		update_thread_user_debug_flag(thread);
2881 
2882 		switch (thread->state) {
2883 			case B_THREAD_SUSPENDED:
2884 				// thread suspended: wake it up
2885 				scheduler_enqueue_in_run_queue(thread);
2886 				break;
2887 
2888 			default:
2889 				// thread may be waiting: interrupt it
2890 				thread_interrupt(thread, false);
2891 					// TODO: If the thread is already in the kernel and e.g.
2892 					// about to acquire a semaphore (before
2893 					// thread_prepare_to_block()), we won't interrupt it.
2894 					// Maybe we should rather send a signal (SIGTRAP).
2895 				break;
2896 		}
2897 	}
2898 
2899 	RELEASE_THREAD_LOCK();
2900 	restore_interrupts(state);
2901 
2902 	return error;
2903 }
2904 
2905 
2906 status_t
2907 _user_get_thread_cpu_state(thread_id threadID,
2908 	struct debug_cpu_state *userCPUState)
2909 {
2910 	TRACE(("[%ld] _user_get_thread_cpu_state(%ld, %p)\n", find_thread(NULL),
2911 		threadID, userCPUState));
2912 
2913 	if (userCPUState == NULL || !IS_USER_ADDRESS(userCPUState))
2914 		return B_BAD_ADDRESS;
2915 
2916 	InterruptsSpinLocker locker(gThreadSpinlock);
2917 
2918 	// get and check the thread
2919 	struct thread *thread = thread_get_thread_struct_locked(threadID);
2920 	if (thread == NULL) {
2921 		// thread doesn't exist any longer
2922 		return B_BAD_THREAD_ID;
2923 	} else if (thread->team == team_get_kernel_team()) {
2924 		// we can't debug the kernel team
2925 		return B_NOT_ALLOWED;
2926 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_DYING) {
2927 		// the thread is already dying
2928 		return B_BAD_THREAD_ID;
2929 	} else if (thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) {
2930 		// don't play with the nub thread
2931 		return B_NOT_ALLOWED;
2932 	} else if (thread->state == B_THREAD_RUNNING) {
2933 		// thread is running -- no way to get its CPU state
2934 		return B_BAD_THREAD_STATE;
2935 	}
2936 
2937 	// get the CPU state
2938 	debug_cpu_state cpuState;
2939 	status_t error = arch_get_thread_debug_cpu_state(thread, &cpuState);
2940 	if (error != B_OK)
2941 		return error;
2942 
2943 	locker.Unlock();
2944 
2945 	return user_memcpy(userCPUState, &cpuState, sizeof(cpuState));
2946 }
2947 
2948 
2949 void
2950 _user_wait_for_debugger(void)
2951 {
2952 	debug_thread_debugged message;
2953 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message,
2954 		sizeof(message), false);
2955 }
2956 
2957 
2958 status_t
2959 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length,
2960 	bool watchpoint)
2961 {
2962 	// check the address and size
2963 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
2964 		return B_BAD_ADDRESS;
2965 	if (watchpoint && length < 0)
2966 		return B_BAD_VALUE;
2967 
2968 	// check whether a debugger is installed already
2969 	team_debug_info teamDebugInfo;
2970 	get_team_debug_info(teamDebugInfo);
2971 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2972 		return B_BAD_VALUE;
2973 
2974 	// We can't help it, here's a small but relatively harmless race condition,
2975 	// since a debugger could be installed in the meantime. The worst case is
2976 	// that we install a break/watchpoint the debugger doesn't know about.
2977 
2978 	// set the break/watchpoint
2979 	status_t result;
2980 	if (watchpoint)
2981 		result = arch_set_watchpoint(address, type, length);
2982 	else
2983 		result = arch_set_breakpoint(address);
2984 
2985 	if (result == B_OK)
2986 		update_threads_breakpoints_flag();
2987 
2988 	return result;
2989 }
2990 
2991 
2992 status_t
2993 _user_clear_debugger_breakpoint(void *address, bool watchpoint)
2994 {
2995 	// check the address
2996 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
2997 		return B_BAD_ADDRESS;
2998 
2999 	// check whether a debugger is installed already
3000 	team_debug_info teamDebugInfo;
3001 	get_team_debug_info(teamDebugInfo);
3002 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
3003 		return B_BAD_VALUE;
3004 
3005 	// We can't help it, here's a small but relatively harmless race condition,
3006 	// since a debugger could be installed in the meantime. The worst case is
3007 	// that we clear a break/watchpoint the debugger has just installed.
3008 
3009 	// clear the break/watchpoint
3010 	status_t result;
3011 	if (watchpoint)
3012 		result = arch_clear_watchpoint(address);
3013 	else
3014 		result = arch_clear_breakpoint(address);
3015 
3016 	if (result == B_OK)
3017 		update_threads_breakpoints_flag();
3018 
3019 	return result;
3020 }
3021