xref: /haiku/src/system/kernel/debug/user_debugger.cpp (revision f2df0cfe93a902842f6f4629ff614f5b3f9bf687)
1 /*
2  * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2015, Rene Gollent, rene@gollent.com.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <errno.h>
9 #include <signal.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <string.h>
13 
14 #include <algorithm>
15 
16 #include <arch/debug.h>
17 #include <arch/user_debugger.h>
18 #include <cpu.h>
19 #include <debugger.h>
20 #include <kernel.h>
21 #include <KernelExport.h>
22 #include <kscheduler.h>
23 #include <ksignal.h>
24 #include <ksyscalls.h>
25 #include <port.h>
26 #include <sem.h>
27 #include <team.h>
28 #include <thread.h>
29 #include <thread_types.h>
30 #include <user_debugger.h>
31 #include <vm/vm.h>
32 #include <vm/vm_types.h>
33 
34 #include <AutoDeleter.h>
35 #include <util/AutoLock.h>
36 
37 #include "BreakpointManager.h"
38 
39 
40 //#define TRACE_USER_DEBUGGER
41 #ifdef TRACE_USER_DEBUGGER
42 #	define TRACE(x) dprintf x
43 #else
44 #	define TRACE(x) ;
45 #endif
46 
47 
48 // TODO: Since the introduction of team_debug_info::debugger_changed_condition
49 // there's some potential for simplifications. E.g. clear_team_debug_info() and
50 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus
51 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()).
52 
53 
54 static port_id sDefaultDebuggerPort = -1;
55 	// accessed atomically
56 
57 static timer sProfilingTimers[SMP_MAX_CPUS];
58 	// a profiling timer for each CPU -- used when a profiled thread is running
59 	// on that CPU
60 
61 
62 static void schedule_profiling_timer(Thread* thread, bigtime_t interval);
63 static int32 profiling_event(timer* unused);
64 static status_t ensure_debugger_installed();
65 static void get_team_debug_info(team_debug_info &teamDebugInfo);
66 
67 
68 static inline status_t
69 kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
70 	size_t bufferSize)
71 {
72 	return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT,
73 		0);
74 }
75 
76 
77 static status_t
78 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
79 	bool dontWait)
80 {
81 	TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", "
82 		"port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, "
83 		"dontWait: %d\n", thread_get_current_thread()->id,
84 		thread_get_current_thread()->team->id, port, code, buffer, bufferSize,
85 		dontWait));
86 
87 	status_t error = B_OK;
88 
89 	// get the team debug info
90 	team_debug_info teamDebugInfo;
91 	get_team_debug_info(teamDebugInfo);
92 	sem_id writeLock = teamDebugInfo.debugger_write_lock;
93 
94 	// get the write lock
95 	TRACE(("debugger_write(): acquiring write lock...\n"));
96 	error = acquire_sem_etc(writeLock, 1,
97 		dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
98 	if (error != B_OK) {
99 		TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error));
100 		return error;
101 	}
102 
103 	// re-get the team debug info
104 	get_team_debug_info(teamDebugInfo);
105 
106 	if (teamDebugInfo.debugger_port != port
107 		|| (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) {
108 		// The debugger has changed in the meantime or we are about to be
109 		// handed over to a new debugger. In either case we don't send the
110 		// message.
111 		TRACE(("debugger_write(): %s\n",
112 			(teamDebugInfo.debugger_port != port ? "debugger port changed"
113 				: "handover flag set")));
114 	} else {
115 		TRACE(("debugger_write(): writing to port...\n"));
116 
117 		error = write_port_etc(port, code, buffer, bufferSize,
118 			dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
119 	}
120 
121 	// release the write lock
122 	release_sem(writeLock);
123 
124 	TRACE(("debugger_write() done: %" B_PRIx32 "\n", error));
125 
126 	return error;
127 }
128 
129 
130 /*!	Updates the thread::flags field according to what user debugger flags are
131 	set for the thread.
132 	Interrupts must be disabled and the thread's debug info lock must be held.
133 */
134 static void
135 update_thread_user_debug_flag(Thread* thread)
136 {
137 	if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0)
138 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD);
139 	else
140 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD);
141 }
142 
143 
144 /*!	Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
145 	given thread.
146 	Interrupts must be disabled and the thread debug info lock must be held.
147 */
148 static void
149 update_thread_breakpoints_flag(Thread* thread)
150 {
151 	Team* team = thread->team;
152 
153 	if (arch_has_breakpoints(&team->debug_info.arch_info))
154 		atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
155 	else
156 		atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
157 }
158 
159 
160 /*!	Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
161 	threads of the current team.
162 */
163 static void
164 update_threads_breakpoints_flag()
165 {
166 	Team* team = thread_get_current_thread()->team;
167 
168 	TeamLocker teamLocker(team);
169 
170 	Thread* thread = team->thread_list;
171 
172 	if (arch_has_breakpoints(&team->debug_info.arch_info)) {
173 		for (; thread != NULL; thread = thread->team_next)
174 			atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
175 	} else {
176 		for (; thread != NULL; thread = thread->team_next)
177 			atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
178 	}
179 }
180 
181 
182 /*!	Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
183 	given thread, which must be the current thread.
184 */
185 static void
186 update_thread_debugger_installed_flag(Thread* thread)
187 {
188 	Team* team = thread->team;
189 
190 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
191 		atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
192 	else
193 		atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
194 }
195 
196 
197 /*!	Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
198 	threads of the given team.
199 	The team's lock must be held.
200 */
201 static void
202 update_threads_debugger_installed_flag(Team* team)
203 {
204 	Thread* thread = team->thread_list;
205 
206 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
207 		for (; thread != NULL; thread = thread->team_next)
208 			atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
209 	} else {
210 		for (; thread != NULL; thread = thread->team_next)
211 			atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
212 	}
213 }
214 
215 
216 /**
217  *	For the first initialization the function must be called with \a initLock
218  *	set to \c true. If it would be possible that another thread accesses the
219  *	structure at the same time, `lock' must be held when calling the function.
220  */
221 void
222 clear_team_debug_info(struct team_debug_info *info, bool initLock)
223 {
224 	if (info) {
225 		arch_clear_team_debug_info(&info->arch_info);
226 		atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS);
227 		info->debugger_team = -1;
228 		info->debugger_port = -1;
229 		info->nub_thread = -1;
230 		info->nub_port = -1;
231 		info->debugger_write_lock = -1;
232 		info->causing_thread = -1;
233 		info->image_event = 0;
234 		info->breakpoint_manager = NULL;
235 
236 		if (initLock) {
237 			B_INITIALIZE_SPINLOCK(&info->lock);
238 			info->debugger_changed_condition = NULL;
239 		}
240 	}
241 }
242 
243 /**
244  *  `lock' must not be held nor may interrupts be disabled.
245  *  \a info must not be a member of a team struct (or the team struct must no
246  *  longer be accessible, i.e. the team should already be removed).
247  *
248  *	In case the team is still accessible, the procedure is:
249  *	1. get `lock'
250  *	2. copy the team debug info on stack
251  *	3. call clear_team_debug_info() on the team debug info
252  *	4. release `lock'
253  *	5. call destroy_team_debug_info() on the copied team debug info
254  */
255 static void
256 destroy_team_debug_info(struct team_debug_info *info)
257 {
258 	if (info) {
259 		arch_destroy_team_debug_info(&info->arch_info);
260 
261 		// delete the breakpoint manager
262 		delete info->breakpoint_manager ;
263 		info->breakpoint_manager = NULL;
264 
265 		// delete the debugger port write lock
266 		if (info->debugger_write_lock >= 0) {
267 			delete_sem(info->debugger_write_lock);
268 			info->debugger_write_lock = -1;
269 		}
270 
271 		// delete the nub port
272 		if (info->nub_port >= 0) {
273 			set_port_owner(info->nub_port, B_CURRENT_TEAM);
274 			delete_port(info->nub_port);
275 			info->nub_port = -1;
276 		}
277 
278 		// wait for the nub thread
279 		if (info->nub_thread >= 0) {
280 			if (info->nub_thread != thread_get_current_thread()->id) {
281 				int32 result;
282 				wait_for_thread(info->nub_thread, &result);
283 			}
284 
285 			info->nub_thread = -1;
286 		}
287 
288 		atomic_set(&info->flags, 0);
289 		info->debugger_team = -1;
290 		info->debugger_port = -1;
291 		info->causing_thread = -1;
292 		info->image_event = -1;
293 	}
294 }
295 
296 
297 void
298 init_thread_debug_info(struct thread_debug_info *info)
299 {
300 	if (info) {
301 		B_INITIALIZE_SPINLOCK(&info->lock);
302 		arch_clear_thread_debug_info(&info->arch_info);
303 		info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS;
304 		info->debug_port = -1;
305 		info->ignore_signals = 0;
306 		info->ignore_signals_once = 0;
307 		info->profile.sample_area = -1;
308 		info->profile.samples = NULL;
309 		info->profile.buffer_full = false;
310 		info->profile.installed_timer = NULL;
311 	}
312 }
313 
314 
315 /*!	Clears the debug info for the current thread.
316 	Invoked with thread debug info lock being held.
317 */
318 void
319 clear_thread_debug_info(struct thread_debug_info *info, bool dying)
320 {
321 	if (info) {
322 		// cancel profiling timer
323 		if (info->profile.installed_timer != NULL) {
324 			cancel_timer(info->profile.installed_timer);
325 			info->profile.installed_timer = NULL;
326 		}
327 
328 		arch_clear_thread_debug_info(&info->arch_info);
329 		atomic_set(&info->flags,
330 			B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0));
331 		info->debug_port = -1;
332 		info->ignore_signals = 0;
333 		info->ignore_signals_once = 0;
334 		info->profile.sample_area = -1;
335 		info->profile.samples = NULL;
336 		info->profile.buffer_full = false;
337 	}
338 }
339 
340 
341 void
342 destroy_thread_debug_info(struct thread_debug_info *info)
343 {
344 	if (info) {
345 		area_id sampleArea = info->profile.sample_area;
346 		if (sampleArea >= 0) {
347 			area_info areaInfo;
348 			if (get_area_info(sampleArea, &areaInfo) == B_OK) {
349 				unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
350 				delete_area(sampleArea);
351 			}
352 		}
353 
354 		arch_destroy_thread_debug_info(&info->arch_info);
355 
356 		if (info->debug_port >= 0) {
357 			delete_port(info->debug_port);
358 			info->debug_port = -1;
359 		}
360 
361 		info->ignore_signals = 0;
362 		info->ignore_signals_once = 0;
363 
364 		atomic_set(&info->flags, 0);
365 	}
366 }
367 
368 
369 static status_t
370 prepare_debugger_change(team_id teamID, ConditionVariable& condition,
371 	Team*& team)
372 {
373 	// We look up the team by ID, even in case of the current team, so we can be
374 	// sure, that the team is not already dying.
375 	if (teamID == B_CURRENT_TEAM)
376 		teamID = thread_get_current_thread()->team->id;
377 
378 	while (true) {
379 		// get the team
380 		team = Team::GetAndLock(teamID);
381 		if (team == NULL)
382 			return B_BAD_TEAM_ID;
383 		BReference<Team> teamReference(team, true);
384 		TeamLocker teamLocker(team, true);
385 
386 		// don't allow messing with the kernel team
387 		if (team == team_get_kernel_team())
388 			return B_NOT_ALLOWED;
389 
390 		// check whether the condition is already set
391 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
392 
393 		if (team->debug_info.debugger_changed_condition == NULL) {
394 			// nobody there yet -- set our condition variable and be done
395 			team->debug_info.debugger_changed_condition = &condition;
396 			return B_OK;
397 		}
398 
399 		// we'll have to wait
400 		ConditionVariableEntry entry;
401 		team->debug_info.debugger_changed_condition->Add(&entry);
402 
403 		debugInfoLocker.Unlock();
404 		teamLocker.Unlock();
405 
406 		entry.Wait();
407 	}
408 }
409 
410 
411 static void
412 prepare_debugger_change(Team* team, ConditionVariable& condition)
413 {
414 	while (true) {
415 		// check whether the condition is already set
416 		InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
417 
418 		if (team->debug_info.debugger_changed_condition == NULL) {
419 			// nobody there yet -- set our condition variable and be done
420 			team->debug_info.debugger_changed_condition = &condition;
421 			return;
422 		}
423 
424 		// we'll have to wait
425 		ConditionVariableEntry entry;
426 		team->debug_info.debugger_changed_condition->Add(&entry);
427 
428 		debugInfoLocker.Unlock();
429 
430 		entry.Wait();
431 	}
432 }
433 
434 
435 static void
436 finish_debugger_change(Team* team)
437 {
438 	// unset our condition variable and notify all threads waiting on it
439 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
440 
441 	ConditionVariable* condition = team->debug_info.debugger_changed_condition;
442 	team->debug_info.debugger_changed_condition = NULL;
443 
444 	condition->NotifyAll();
445 }
446 
447 
448 void
449 user_debug_prepare_for_exec()
450 {
451 	Thread *thread = thread_get_current_thread();
452 	Team *team = thread->team;
453 
454 	// If a debugger is installed for the team and the thread debug stuff
455 	// initialized, change the ownership of the debug port for the thread
456 	// to the kernel team, since exec_team() deletes all ports owned by this
457 	// team. We change the ownership back later.
458 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
459 		// get the port
460 		port_id debugPort = -1;
461 
462 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
463 
464 		if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0)
465 			debugPort = thread->debug_info.debug_port;
466 
467 		threadDebugInfoLocker.Unlock();
468 
469 		// set the new port ownership
470 		if (debugPort >= 0)
471 			set_port_owner(debugPort, team_get_kernel_team_id());
472 	}
473 }
474 
475 
476 void
477 user_debug_finish_after_exec()
478 {
479 	Thread *thread = thread_get_current_thread();
480 	Team *team = thread->team;
481 
482 	// If a debugger is installed for the team and the thread debug stuff
483 	// initialized for this thread, change the ownership of its debug port
484 	// back to this team.
485 	if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
486 		// get the port
487 		port_id debugPort = -1;
488 
489 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
490 
491 		if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED)
492 			debugPort = thread->debug_info.debug_port;
493 
494 		threadDebugInfoLocker.Unlock();
495 
496 		// set the new port ownership
497 		if (debugPort >= 0)
498 			set_port_owner(debugPort, team->id);
499 	}
500 }
501 
502 
503 void
504 init_user_debug()
505 {
506 	#ifdef ARCH_INIT_USER_DEBUG
507 		ARCH_INIT_USER_DEBUG();
508 	#endif
509 }
510 
511 
512 static void
513 get_team_debug_info(team_debug_info &teamDebugInfo)
514 {
515 	Thread *thread = thread_get_current_thread();
516 
517 	cpu_status state = disable_interrupts();
518 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
519 
520 	memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info));
521 
522 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
523 	restore_interrupts(state);
524 }
525 
526 
527 static status_t
528 thread_hit_debug_event_internal(debug_debugger_message event,
529 	const void *message, int32 size, bool requireDebugger, bool &restart)
530 {
531 	restart = false;
532 	Thread *thread = thread_get_current_thread();
533 
534 	TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32
535 		", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event,
536 		message, size));
537 
538 	// check, if there's a debug port already
539 	bool setPort = !(atomic_get(&thread->debug_info.flags)
540 		& B_THREAD_DEBUG_INITIALIZED);
541 
542 	// create a port, if there is none yet
543 	port_id port = -1;
544 	if (setPort) {
545 		char nameBuffer[128];
546 		snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32,
547 			thread->id);
548 
549 		port = create_port(1, nameBuffer);
550 		if (port < 0) {
551 			dprintf("thread_hit_debug_event(): Failed to create debug port: "
552 				"%s\n", strerror(port));
553 			return port;
554 		}
555 	}
556 
557 	// check the debug info structures once more: get the debugger port, set
558 	// the thread's debug port, and update the thread's debug flags
559 	port_id deletePort = port;
560 	port_id debuggerPort = -1;
561 	port_id nubPort = -1;
562 	status_t error = B_OK;
563 	cpu_status state = disable_interrupts();
564 	GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
565 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
566 
567 	uint32 threadFlags = thread->debug_info.flags;
568 	threadFlags &= ~B_THREAD_DEBUG_STOP;
569 	bool debuggerInstalled
570 		= (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED);
571 	if (thread->id == thread->team->debug_info.nub_thread) {
572 		// Ugh, we're the nub thread. We shouldn't be here.
573 		TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32
574 			"\n", thread->id));
575 
576 		error = B_ERROR;
577 	} else if (debuggerInstalled || !requireDebugger) {
578 		if (debuggerInstalled) {
579 			debuggerPort = thread->team->debug_info.debugger_port;
580 			nubPort = thread->team->debug_info.nub_port;
581 		}
582 
583 		if (setPort) {
584 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
585 				// someone created a port for us (the port we've created will
586 				// be deleted below)
587 				port = thread->debug_info.debug_port;
588 			} else {
589 				thread->debug_info.debug_port = port;
590 				deletePort = -1;	// keep the port
591 				threadFlags |= B_THREAD_DEBUG_INITIALIZED;
592 			}
593 		} else {
594 			if (threadFlags & B_THREAD_DEBUG_INITIALIZED) {
595 				port = thread->debug_info.debug_port;
596 			} else {
597 				// someone deleted our port
598 				error = B_ERROR;
599 			}
600 		}
601 	} else
602 		error = B_ERROR;
603 
604 	// update the flags
605 	if (error == B_OK)
606 		threadFlags |= B_THREAD_DEBUG_STOPPED;
607 	atomic_set(&thread->debug_info.flags, threadFlags);
608 
609 	update_thread_user_debug_flag(thread);
610 
611 	threadDebugInfoLocker.Unlock();
612 	RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
613 	restore_interrupts(state);
614 
615 	// delete the superfluous port
616 	if (deletePort >= 0)
617 		delete_port(deletePort);
618 
619 	if (error != B_OK) {
620 		TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: "
621 			"%" B_PRIx32 "\n", thread->id, error));
622 		return error;
623 	}
624 
625 	// send a message to the debugger port
626 	if (debuggerInstalled) {
627 		// update the message's origin info first
628 		debug_origin *origin = (debug_origin *)message;
629 		origin->thread = thread->id;
630 		origin->team = thread->team->id;
631 		origin->nub_port = nubPort;
632 
633 		TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending "
634 			"message to debugger port %" B_PRId32 "\n", thread->id,
635 			debuggerPort));
636 
637 		error = debugger_write(debuggerPort, event, message, size, false);
638 	}
639 
640 	status_t result = B_THREAD_DEBUG_HANDLE_EVENT;
641 	bool singleStep = false;
642 
643 	if (error == B_OK) {
644 		bool done = false;
645 		while (!done) {
646 			// read a command from the debug port
647 			int32 command;
648 			debugged_thread_message_data commandMessage;
649 			ssize_t commandMessageSize = read_port_etc(port, &command,
650 				&commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT,
651 				0);
652 
653 			if (commandMessageSize < 0) {
654 				error = commandMessageSize;
655 				TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed "
656 					"to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n",
657 					thread->id, port, error));
658 				break;
659 			}
660 
661 			switch (command) {
662 				case B_DEBUGGED_THREAD_MESSAGE_CONTINUE:
663 					TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": "
664 						"B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n",
665 						thread->id));
666 					result = commandMessage.continue_thread.handle_event;
667 
668 					singleStep = commandMessage.continue_thread.single_step;
669 					done = true;
670 					break;
671 
672 				case B_DEBUGGED_THREAD_SET_CPU_STATE:
673 				{
674 					TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": "
675 						"B_DEBUGGED_THREAD_SET_CPU_STATE\n",
676 						thread->id));
677 					arch_set_debug_cpu_state(
678 						&commandMessage.set_cpu_state.cpu_state);
679 
680 					break;
681 				}
682 
683 				case B_DEBUGGED_THREAD_GET_CPU_STATE:
684 				{
685 					port_id replyPort = commandMessage.get_cpu_state.reply_port;
686 
687 					// prepare the message
688 					debug_nub_get_cpu_state_reply replyMessage;
689 					replyMessage.error = B_OK;
690 					replyMessage.message = event;
691 					arch_get_debug_cpu_state(&replyMessage.cpu_state);
692 
693 					// send it
694 					error = kill_interruptable_write_port(replyPort, event,
695 						&replyMessage, sizeof(replyMessage));
696 
697 					break;
698 				}
699 
700 				case B_DEBUGGED_THREAD_DEBUGGER_CHANGED:
701 				{
702 					// Check, if the debugger really changed, i.e. is different
703 					// than the one we know.
704 					team_debug_info teamDebugInfo;
705 					get_team_debug_info(teamDebugInfo);
706 
707 					if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
708 						if (!debuggerInstalled
709 							|| teamDebugInfo.debugger_port != debuggerPort) {
710 							// debugger was installed or has changed: restart
711 							// this function
712 							restart = true;
713 							done = true;
714 						}
715 					} else {
716 						if (debuggerInstalled) {
717 							// debugger is gone: continue the thread normally
718 							done = true;
719 						}
720 					}
721 
722 					break;
723 				}
724 			}
725 		}
726 	} else {
727 		TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send "
728 			"message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n",
729 			thread->id, debuggerPort, error));
730 	}
731 
732 	// update the thread debug info
733 	bool destroyThreadInfo = false;
734 	thread_debug_info threadDebugInfo;
735 
736 	state = disable_interrupts();
737 	threadDebugInfoLocker.Lock();
738 
739 	// check, if the team is still being debugged
740 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
741 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
742 		// update the single-step flag
743 		if (singleStep) {
744 			atomic_or(&thread->debug_info.flags,
745 				B_THREAD_DEBUG_SINGLE_STEP);
746 			atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP);
747 		} else {
748 			atomic_and(&thread->debug_info.flags,
749 				~(int32)B_THREAD_DEBUG_SINGLE_STEP);
750 		}
751 
752 		// unset the "stopped" state
753 		atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED);
754 
755 		update_thread_user_debug_flag(thread);
756 
757 	} else {
758 		// the debugger is gone: cleanup our info completely
759 		threadDebugInfo = thread->debug_info;
760 		clear_thread_debug_info(&thread->debug_info, false);
761 		destroyThreadInfo = true;
762 	}
763 
764 	threadDebugInfoLocker.Unlock();
765 	restore_interrupts(state);
766 
767 	// enable/disable single stepping
768 	arch_update_thread_single_step();
769 
770 	if (destroyThreadInfo)
771 		destroy_thread_debug_info(&threadDebugInfo);
772 
773 	return (error == B_OK ? result : error);
774 }
775 
776 
777 static status_t
778 thread_hit_debug_event(debug_debugger_message event, const void *message,
779 	int32 size, bool requireDebugger)
780 {
781 	status_t result;
782 	bool restart;
783 	do {
784 		restart = false;
785 		result = thread_hit_debug_event_internal(event, message, size,
786 			requireDebugger, restart);
787 	} while (result >= 0 && restart);
788 
789 	// Prepare to continue -- we install a debugger change condition, so no one
790 	// will change the debugger while we're playing with the breakpoint manager.
791 	// TODO: Maybe better use ref-counting and a flag in the breakpoint manager.
792 	Team* team = thread_get_current_thread()->team;
793 	ConditionVariable debugChangeCondition;
794 	prepare_debugger_change(team, debugChangeCondition);
795 
796 	if (team->debug_info.breakpoint_manager != NULL) {
797 		bool isSyscall;
798 		void* pc = arch_debug_get_interrupt_pc(&isSyscall);
799 		if (pc != NULL && !isSyscall)
800 			team->debug_info.breakpoint_manager->PrepareToContinue(pc);
801 	}
802 
803 	finish_debugger_change(team);
804 
805 	return result;
806 }
807 
808 
809 static status_t
810 thread_hit_serious_debug_event(debug_debugger_message event,
811 	const void *message, int32 messageSize)
812 {
813 	// ensure that a debugger is installed for this team
814 	status_t error = ensure_debugger_installed();
815 	if (error != B_OK) {
816 		Thread *thread = thread_get_current_thread();
817 		dprintf("thread_hit_serious_debug_event(): Failed to install debugger: "
818 			"thread: %" B_PRId32 ": %s\n", thread->id, strerror(error));
819 		return error;
820 	}
821 
822 	// enter the debug loop
823 	return thread_hit_debug_event(event, message, messageSize, true);
824 }
825 
826 
827 void
828 user_debug_pre_syscall(uint32 syscall, void *args)
829 {
830 	// check whether a debugger is installed
831 	Thread *thread = thread_get_current_thread();
832 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
833 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
834 		return;
835 
836 	// check whether pre-syscall tracing is enabled for team or thread
837 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
838 	if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL)
839 			&& !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) {
840 		return;
841 	}
842 
843 	// prepare the message
844 	debug_pre_syscall message;
845 	message.syscall = syscall;
846 
847 	// copy the syscall args
848 	if (syscall < (uint32)kSyscallCount) {
849 		if (kSyscallInfos[syscall].parameter_size > 0)
850 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
851 	}
852 
853 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message,
854 		sizeof(message), true);
855 }
856 
857 
858 void
859 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue,
860 	bigtime_t startTime)
861 {
862 	// check whether a debugger is installed
863 	Thread *thread = thread_get_current_thread();
864 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
865 	if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED))
866 		return;
867 
868 	// check whether post-syscall tracing is enabled for team or thread
869 	int32 threadDebugFlags = atomic_get(&thread->debug_info.flags);
870 	if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL)
871 			&& !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) {
872 		return;
873 	}
874 
875 	// prepare the message
876 	debug_post_syscall message;
877 	message.start_time = startTime;
878 	message.end_time = system_time();
879 	message.return_value = returnValue;
880 	message.syscall = syscall;
881 
882 	// copy the syscall args
883 	if (syscall < (uint32)kSyscallCount) {
884 		if (kSyscallInfos[syscall].parameter_size > 0)
885 			memcpy(message.args, args, kSyscallInfos[syscall].parameter_size);
886 	}
887 
888 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message,
889 		sizeof(message), true);
890 }
891 
892 
893 /**	\brief To be called when an unhandled processor exception (error/fault)
894  *		   occurred.
895  *	\param exception The debug_why_stopped value identifying the kind of fault.
896  *	\param singal The signal corresponding to the exception.
897  *	\return \c true, if the caller shall continue normally, i.e. usually send
898  *			a deadly signal. \c false, if the debugger insists to continue the
899  *			program (e.g. because it has solved the removed the cause of the
900  *			problem).
901  */
902 bool
903 user_debug_exception_occurred(debug_exception_type exception, int signal)
904 {
905 	// First check whether there's a signal handler installed for the signal.
906 	// If so, we don't want to install a debugger for the team. We always send
907 	// the signal instead. An already installed debugger will be notified, if
908 	// it has requested notifications of signal.
909 	struct sigaction signalAction;
910 	if (sigaction(signal, NULL, &signalAction) == 0
911 		&& signalAction.sa_handler != SIG_DFL) {
912 		return true;
913 	}
914 
915 	// prepare the message
916 	debug_exception_occurred message;
917 	message.exception = exception;
918 	message.signal = signal;
919 
920 	status_t result = thread_hit_serious_debug_event(
921 		B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message));
922 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
923 }
924 
925 
926 bool
927 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly)
928 {
929 	// check, if a debugger is installed and is interested in signals
930 	Thread *thread = thread_get_current_thread();
931 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
932 	if (~teamDebugFlags
933 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) {
934 		return true;
935 	}
936 
937 	// prepare the message
938 	debug_signal_received message;
939 	message.signal = signal;
940 	message.handler = *handler;
941 	message.deadly = deadly;
942 
943 	status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED,
944 		&message, sizeof(message), true);
945 	return (result != B_THREAD_DEBUG_IGNORE_EVENT);
946 }
947 
948 
949 void
950 user_debug_stop_thread()
951 {
952 	// check whether this is actually an emulated single-step notification
953 	Thread* thread = thread_get_current_thread();
954 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
955 
956 	bool singleStepped = false;
957 	if ((atomic_and(&thread->debug_info.flags,
958 				~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP)
959 			& B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) {
960 		singleStepped = true;
961 	}
962 
963 	threadDebugInfoLocker.Unlock();
964 
965 	if (singleStepped) {
966 		user_debug_single_stepped();
967 	} else {
968 		debug_thread_debugged message;
969 		thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED,
970 			&message, sizeof(message));
971 	}
972 }
973 
974 
975 void
976 user_debug_team_created(team_id teamID)
977 {
978 	// check, if a debugger is installed and is interested in team creation
979 	// events
980 	Thread *thread = thread_get_current_thread();
981 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
982 	if (~teamDebugFlags
983 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
984 		return;
985 	}
986 
987 	// prepare the message
988 	debug_team_created message;
989 	message.new_team = teamID;
990 
991 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message,
992 		sizeof(message), true);
993 }
994 
995 
996 void
997 user_debug_team_deleted(team_id teamID, port_id debuggerPort)
998 {
999 	if (debuggerPort >= 0) {
1000 		TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: "
1001 			"%" B_PRId32 ")\n", teamID, debuggerPort));
1002 
1003 		debug_team_deleted message;
1004 		message.origin.thread = -1;
1005 		message.origin.team = teamID;
1006 		message.origin.nub_port = -1;
1007 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message,
1008 			sizeof(message), B_RELATIVE_TIMEOUT, 0);
1009 	}
1010 }
1011 
1012 
1013 void
1014 user_debug_team_exec()
1015 {
1016 	// check, if a debugger is installed and is interested in team creation
1017 	// events
1018 	Thread *thread = thread_get_current_thread();
1019 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1020 	if (~teamDebugFlags
1021 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) {
1022 		return;
1023 	}
1024 
1025 	// prepare the message
1026 	debug_team_exec message;
1027 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1028 		+ 1;
1029 
1030 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message,
1031 		sizeof(message), true);
1032 }
1033 
1034 
1035 /*!	Called by a new userland thread to update the debugging related flags of
1036 	\c Thread::flags before the thread first enters userland.
1037 	\param thread The calling thread.
1038 */
1039 void
1040 user_debug_update_new_thread_flags(Thread* thread)
1041 {
1042 	// lock it and update it's flags
1043 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1044 
1045 	update_thread_user_debug_flag(thread);
1046 	update_thread_breakpoints_flag(thread);
1047 	update_thread_debugger_installed_flag(thread);
1048 }
1049 
1050 
1051 void
1052 user_debug_thread_created(thread_id threadID)
1053 {
1054 	// check, if a debugger is installed and is interested in thread events
1055 	Thread *thread = thread_get_current_thread();
1056 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1057 	if (~teamDebugFlags
1058 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1059 		return;
1060 	}
1061 
1062 	// prepare the message
1063 	debug_thread_created message;
1064 	message.new_thread = threadID;
1065 
1066 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message,
1067 		sizeof(message), true);
1068 }
1069 
1070 
1071 void
1072 user_debug_thread_deleted(team_id teamID, thread_id threadID)
1073 {
1074 	// Things are a bit complicated here, since this thread no longer belongs to
1075 	// the debugged team (but to the kernel). So we can't use debugger_write().
1076 
1077 	// get the team debug flags and debugger port
1078 	Team* team = Team::Get(teamID);
1079 	if (team == NULL)
1080 		return;
1081 	BReference<Team> teamReference(team, true);
1082 
1083 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1084 
1085 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1086 	port_id debuggerPort = team->debug_info.debugger_port;
1087 	sem_id writeLock = team->debug_info.debugger_write_lock;
1088 
1089 	debugInfoLocker.Unlock();
1090 
1091 	// check, if a debugger is installed and is interested in thread events
1092 	if (~teamDebugFlags
1093 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) {
1094 		return;
1095 	}
1096 
1097 	// acquire the debugger write lock
1098 	status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0);
1099 	if (error != B_OK)
1100 		return;
1101 
1102 	// re-get the team debug info -- we need to check whether anything changed
1103 	debugInfoLocker.Lock();
1104 
1105 	teamDebugFlags = atomic_get(&team->debug_info.flags);
1106 	port_id newDebuggerPort = team->debug_info.debugger_port;
1107 
1108 	debugInfoLocker.Unlock();
1109 
1110 	// Send the message only if the debugger hasn't changed in the meantime or
1111 	// the team is about to be handed over.
1112 	if (newDebuggerPort == debuggerPort
1113 		|| (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) {
1114 		debug_thread_deleted message;
1115 		message.origin.thread = threadID;
1116 		message.origin.team = teamID;
1117 		message.origin.nub_port = -1;
1118 
1119 		write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED,
1120 			&message, sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1121 	}
1122 
1123 	// release the debugger write lock
1124 	release_sem(writeLock);
1125 }
1126 
1127 
1128 /*!	Called for a thread that is about to die, cleaning up all user debug
1129 	facilities installed for the thread.
1130 	\param thread The current thread, the one that is going to die.
1131 */
1132 void
1133 user_debug_thread_exiting(Thread* thread)
1134 {
1135 	// thread is the current thread, so using team is safe
1136 	Team* team = thread->team;
1137 
1138 	InterruptsLocker interruptsLocker;
1139 
1140 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1141 
1142 	int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1143 	port_id debuggerPort = team->debug_info.debugger_port;
1144 
1145 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1146 
1147 	// check, if a debugger is installed
1148 	if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0
1149 		|| debuggerPort < 0) {
1150 		return;
1151 	}
1152 
1153 	// detach the profile info and mark the thread dying
1154 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1155 
1156 	thread_debug_info& threadDebugInfo = thread->debug_info;
1157 	if (threadDebugInfo.profile.samples == NULL)
1158 		return;
1159 
1160 	area_id sampleArea = threadDebugInfo.profile.sample_area;
1161 	int32 sampleCount = threadDebugInfo.profile.sample_count;
1162 	int32 droppedTicks = threadDebugInfo.profile.dropped_ticks;
1163 	int32 stackDepth = threadDebugInfo.profile.stack_depth;
1164 	bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth;
1165 	int32 imageEvent = threadDebugInfo.profile.image_event;
1166 	threadDebugInfo.profile.sample_area = -1;
1167 	threadDebugInfo.profile.samples = NULL;
1168 	threadDebugInfo.profile.buffer_full = false;
1169 
1170 	atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
1171 
1172 	threadDebugInfoLocker.Unlock();
1173 	interruptsLocker.Unlock();
1174 
1175 	// notify the debugger
1176 	debug_profiler_update message;
1177 	message.origin.thread = thread->id;
1178 	message.origin.team = thread->team->id;
1179 	message.origin.nub_port = -1;	// asynchronous message
1180 	message.sample_count = sampleCount;
1181 	message.dropped_ticks = droppedTicks;
1182 	message.stack_depth = stackDepth;
1183 	message.variable_stack_depth = variableStackDepth;
1184 	message.image_event = imageEvent;
1185 	message.stopped = true;
1186 	debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE,
1187 		&message, sizeof(message), false);
1188 
1189 	if (sampleArea >= 0) {
1190 		area_info areaInfo;
1191 		if (get_area_info(sampleArea, &areaInfo) == B_OK) {
1192 			unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE);
1193 			delete_area(sampleArea);
1194 		}
1195 	}
1196 }
1197 
1198 
1199 void
1200 user_debug_image_created(const image_info *imageInfo)
1201 {
1202 	// check, if a debugger is installed and is interested in image events
1203 	Thread *thread = thread_get_current_thread();
1204 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1205 	if (~teamDebugFlags
1206 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1207 		return;
1208 	}
1209 
1210 	// prepare the message
1211 	debug_image_created message;
1212 	memcpy(&message.info, imageInfo, sizeof(image_info));
1213 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1214 		+ 1;
1215 
1216 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
1217 		sizeof(message), true);
1218 }
1219 
1220 
1221 void
1222 user_debug_image_deleted(const image_info *imageInfo)
1223 {
1224 	// check, if a debugger is installed and is interested in image events
1225 	Thread *thread = thread_get_current_thread();
1226 	int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags);
1227 	if (~teamDebugFlags
1228 		& (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) {
1229 		return;
1230 	}
1231 
1232 	// prepare the message
1233 	debug_image_deleted message;
1234 	memcpy(&message.info, imageInfo, sizeof(image_info));
1235 	message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
1236 		+ 1;
1237 
1238 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message,
1239 		sizeof(message), true);
1240 }
1241 
1242 
1243 void
1244 user_debug_breakpoint_hit(bool software)
1245 {
1246 	// prepare the message
1247 	debug_breakpoint_hit message;
1248 	arch_get_debug_cpu_state(&message.cpu_state);
1249 
1250 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message,
1251 		sizeof(message));
1252 }
1253 
1254 
1255 void
1256 user_debug_watchpoint_hit()
1257 {
1258 	// prepare the message
1259 	debug_watchpoint_hit message;
1260 	arch_get_debug_cpu_state(&message.cpu_state);
1261 
1262 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message,
1263 		sizeof(message));
1264 }
1265 
1266 
1267 void
1268 user_debug_single_stepped()
1269 {
1270 	// clear the single-step thread flag
1271 	Thread* thread = thread_get_current_thread();
1272 	atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP);
1273 
1274 	// prepare the message
1275 	debug_single_step message;
1276 	arch_get_debug_cpu_state(&message.cpu_state);
1277 
1278 	thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message,
1279 		sizeof(message));
1280 }
1281 
1282 
1283 /*!	Schedules the profiling timer for the current thread.
1284 	The caller must hold the thread's debug info lock.
1285 	\param thread The current thread.
1286 	\param interval The time after which the timer should fire.
1287 */
1288 static void
1289 schedule_profiling_timer(Thread* thread, bigtime_t interval)
1290 {
1291 	struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num];
1292 	thread->debug_info.profile.installed_timer = timer;
1293 	thread->debug_info.profile.timer_end = system_time() + interval;
1294 	add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER);
1295 }
1296 
1297 
1298 /*!	Samples the current thread's instruction pointer/stack trace.
1299 	The caller must hold the current thread's debug info lock.
1300 	\param flushBuffer Return parameter: Set to \c true when the sampling
1301 		buffer must be flushed.
1302 */
1303 static bool
1304 profiling_do_sample(bool& flushBuffer)
1305 {
1306 	Thread* thread = thread_get_current_thread();
1307 	thread_debug_info& debugInfo = thread->debug_info;
1308 
1309 	if (debugInfo.profile.samples == NULL)
1310 		return false;
1311 
1312 	// Check, whether the buffer is full or an image event occurred since the
1313 	// last sample was taken.
1314 	int32 maxSamples = debugInfo.profile.max_samples;
1315 	int32 sampleCount = debugInfo.profile.sample_count;
1316 	int32 stackDepth = debugInfo.profile.stack_depth;
1317 	int32 imageEvent = thread->team->debug_info.image_event;
1318 	if (debugInfo.profile.sample_count > 0) {
1319 		if (debugInfo.profile.last_image_event < imageEvent
1320 			&& debugInfo.profile.variable_stack_depth
1321 			&& sampleCount + 2 <= maxSamples) {
1322 			// an image event occurred, but we use variable stack depth and
1323 			// have enough room in the buffer to indicate an image event
1324 			addr_t* event = debugInfo.profile.samples + sampleCount;
1325 			event[0] = B_DEBUG_PROFILE_IMAGE_EVENT;
1326 			event[1] = imageEvent;
1327 			sampleCount += 2;
1328 			debugInfo.profile.sample_count = sampleCount;
1329 			debugInfo.profile.last_image_event = imageEvent;
1330 		}
1331 
1332 		if (debugInfo.profile.last_image_event < imageEvent
1333 			|| debugInfo.profile.flush_threshold - sampleCount < stackDepth) {
1334 			if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) {
1335 				flushBuffer = true;
1336 				return true;
1337 			}
1338 
1339 			// We can't flush the buffer now, since we interrupted a kernel
1340 			// function. If the buffer is not full yet, we add the samples,
1341 			// otherwise we have to drop them.
1342 			if (maxSamples - sampleCount < stackDepth) {
1343 				debugInfo.profile.dropped_ticks++;
1344 				return true;
1345 			}
1346 		}
1347 	} else {
1348 		// first sample -- set the image event
1349 		debugInfo.profile.image_event = imageEvent;
1350 		debugInfo.profile.last_image_event = imageEvent;
1351 	}
1352 
1353 	// get the samples
1354 	addr_t* returnAddresses = debugInfo.profile.samples
1355 		+ debugInfo.profile.sample_count;
1356 	if (debugInfo.profile.variable_stack_depth) {
1357 		// variable sample count per hit
1358 		*returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1,
1359 			stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1360 
1361 		debugInfo.profile.sample_count += *returnAddresses + 1;
1362 	} else {
1363 		// fixed sample count per hit
1364 		if (stackDepth > 1) {
1365 			int32 count = arch_debug_get_stack_trace(returnAddresses,
1366 				stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1367 
1368 			for (int32 i = count; i < stackDepth; i++)
1369 				returnAddresses[i] = 0;
1370 		} else
1371 			*returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL);
1372 
1373 		debugInfo.profile.sample_count += stackDepth;
1374 	}
1375 
1376 	return true;
1377 }
1378 
1379 
1380 static void
1381 profiling_buffer_full(void*)
1382 {
1383 	// It is undefined whether the function is called with interrupts enabled
1384 	// or disabled. We are allowed to enable interrupts, though. First make
1385 	// sure interrupts are disabled.
1386 	disable_interrupts();
1387 
1388 	Thread* thread = thread_get_current_thread();
1389 	thread_debug_info& debugInfo = thread->debug_info;
1390 
1391 	SpinLocker threadDebugInfoLocker(debugInfo.lock);
1392 
1393 	if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
1394 		int32 sampleCount = debugInfo.profile.sample_count;
1395 		int32 droppedTicks = debugInfo.profile.dropped_ticks;
1396 		int32 stackDepth = debugInfo.profile.stack_depth;
1397 		bool variableStackDepth = debugInfo.profile.variable_stack_depth;
1398 		int32 imageEvent = debugInfo.profile.image_event;
1399 
1400 		// notify the debugger
1401 		debugInfo.profile.sample_count = 0;
1402 		debugInfo.profile.dropped_ticks = 0;
1403 
1404 		threadDebugInfoLocker.Unlock();
1405 		enable_interrupts();
1406 
1407 		// prepare the message
1408 		debug_profiler_update message;
1409 		message.sample_count = sampleCount;
1410 		message.dropped_ticks = droppedTicks;
1411 		message.stack_depth = stackDepth;
1412 		message.variable_stack_depth = variableStackDepth;
1413 		message.image_event = imageEvent;
1414 		message.stopped = false;
1415 
1416 		thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message,
1417 			sizeof(message), false);
1418 
1419 		disable_interrupts();
1420 		threadDebugInfoLocker.Lock();
1421 
1422 		// do the sampling and reschedule timer, if still profiling this thread
1423 		bool flushBuffer;
1424 		if (profiling_do_sample(flushBuffer)) {
1425 			debugInfo.profile.buffer_full = false;
1426 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1427 		}
1428 	}
1429 
1430 	threadDebugInfoLocker.Unlock();
1431 	enable_interrupts();
1432 }
1433 
1434 
1435 /*!	Profiling timer event callback.
1436 	Called with interrupts disabled.
1437 */
1438 static int32
1439 profiling_event(timer* /*unused*/)
1440 {
1441 	Thread* thread = thread_get_current_thread();
1442 	thread_debug_info& debugInfo = thread->debug_info;
1443 
1444 	SpinLocker threadDebugInfoLocker(debugInfo.lock);
1445 
1446 	bool flushBuffer = false;
1447 	if (profiling_do_sample(flushBuffer)) {
1448 		if (flushBuffer) {
1449 			// The sample buffer needs to be flushed; we'll have to notify the
1450 			// debugger. We can't do that right here. Instead we set a post
1451 			// interrupt callback doing that for us, and don't reschedule the
1452 			// timer yet.
1453 			thread->post_interrupt_callback = profiling_buffer_full;
1454 			debugInfo.profile.installed_timer = NULL;
1455 			debugInfo.profile.buffer_full = true;
1456 		} else
1457 			schedule_profiling_timer(thread, debugInfo.profile.interval);
1458 	} else
1459 		debugInfo.profile.installed_timer = NULL;
1460 
1461 	return B_HANDLED_INTERRUPT;
1462 }
1463 
1464 
1465 /*!	Called by the scheduler when a debugged thread has been unscheduled.
1466 	The scheduler lock is being held.
1467 */
1468 void
1469 user_debug_thread_unscheduled(Thread* thread)
1470 {
1471 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1472 
1473 	// if running, cancel the profiling timer
1474 	struct timer* timer = thread->debug_info.profile.installed_timer;
1475 	if (timer != NULL) {
1476 		// track remaining time
1477 		bigtime_t left = thread->debug_info.profile.timer_end - system_time();
1478 		thread->debug_info.profile.interval_left = max_c(left, 0);
1479 		thread->debug_info.profile.installed_timer = NULL;
1480 
1481 		// cancel timer
1482 		threadDebugInfoLocker.Unlock();
1483 			// not necessary, but doesn't harm and reduces contention
1484 		cancel_timer(timer);
1485 			// since invoked on the same CPU, this will not possibly wait for
1486 			// an already called timer hook
1487 	}
1488 }
1489 
1490 
1491 /*!	Called by the scheduler when a debugged thread has been scheduled.
1492 	The scheduler lock is being held.
1493 */
1494 void
1495 user_debug_thread_scheduled(Thread* thread)
1496 {
1497 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1498 
1499 	if (thread->debug_info.profile.samples != NULL
1500 		&& !thread->debug_info.profile.buffer_full) {
1501 		// install profiling timer
1502 		schedule_profiling_timer(thread,
1503 			thread->debug_info.profile.interval_left);
1504 	}
1505 }
1506 
1507 
1508 /*!	\brief Called by the debug nub thread of a team to broadcast a message to
1509 		all threads of the team that are initialized for debugging (and
1510 		thus have a debug port).
1511 */
1512 static void
1513 broadcast_debugged_thread_message(Thread *nubThread, int32 code,
1514 	const void *message, int32 size)
1515 {
1516 	// iterate through the threads
1517 	thread_info threadInfo;
1518 	int32 cookie = 0;
1519 	while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo)
1520 			== B_OK) {
1521 		// get the thread and lock it
1522 		Thread* thread = Thread::GetAndLock(threadInfo.thread);
1523 		if (thread == NULL)
1524 			continue;
1525 
1526 		BReference<Thread> threadReference(thread, true);
1527 		ThreadLocker threadLocker(thread, true);
1528 
1529 		// get the thread's debug port
1530 		InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1531 
1532 		port_id threadDebugPort = -1;
1533 		if (thread && thread != nubThread && thread->team == nubThread->team
1534 			&& (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0
1535 			&& (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) {
1536 			threadDebugPort = thread->debug_info.debug_port;
1537 		}
1538 
1539 		threadDebugInfoLocker.Unlock();
1540 		threadLocker.Unlock();
1541 
1542 		// send the message to the thread
1543 		if (threadDebugPort >= 0) {
1544 			status_t error = kill_interruptable_write_port(threadDebugPort,
1545 				code, message, size);
1546 			if (error != B_OK) {
1547 				TRACE(("broadcast_debugged_thread_message(): Failed to send "
1548 					"message to thread %" B_PRId32 ": %" B_PRIx32 "\n",
1549 					thread->id, error));
1550 			}
1551 		}
1552 	}
1553 }
1554 
1555 
1556 static void
1557 nub_thread_cleanup(Thread *nubThread)
1558 {
1559 	TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n",
1560 		nubThread->id, nubThread->team->debug_info.debugger_port));
1561 
1562 	ConditionVariable debugChangeCondition;
1563 	prepare_debugger_change(nubThread->team, debugChangeCondition);
1564 
1565 	team_debug_info teamDebugInfo;
1566 	bool destroyDebugInfo = false;
1567 
1568 	TeamLocker teamLocker(nubThread->team);
1569 		// required by update_threads_debugger_installed_flag()
1570 
1571 	cpu_status state = disable_interrupts();
1572 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1573 
1574 	team_debug_info &info = nubThread->team->debug_info;
1575 	if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED
1576 		&& info.nub_thread == nubThread->id) {
1577 		teamDebugInfo = info;
1578 		clear_team_debug_info(&info, false);
1579 		destroyDebugInfo = true;
1580 	}
1581 
1582 	// update the thread::flags fields
1583 	update_threads_debugger_installed_flag(nubThread->team);
1584 
1585 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1586 	restore_interrupts(state);
1587 
1588 	teamLocker.Unlock();
1589 
1590 	if (destroyDebugInfo)
1591 		teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints();
1592 
1593 	finish_debugger_change(nubThread->team);
1594 
1595 	if (destroyDebugInfo)
1596 		destroy_team_debug_info(&teamDebugInfo);
1597 
1598 	// notify all threads that the debugger is gone
1599 	broadcast_debugged_thread_message(nubThread,
1600 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1601 }
1602 
1603 
1604 /**	\brief Debug nub thread helper function that returns the debug port of
1605  *		   a thread of the same team.
1606  */
1607 static status_t
1608 debug_nub_thread_get_thread_debug_port(Thread *nubThread,
1609 	thread_id threadID, port_id &threadDebugPort)
1610 {
1611 	threadDebugPort = -1;
1612 
1613 	// get the thread
1614 	Thread* thread = Thread::GetAndLock(threadID);
1615 	if (thread == NULL)
1616 		return B_BAD_THREAD_ID;
1617 	BReference<Thread> threadReference(thread, true);
1618 	ThreadLocker threadLocker(thread, true);
1619 
1620 	// get the debug port
1621 	InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
1622 
1623 	if (thread->team != nubThread->team)
1624 		return B_BAD_VALUE;
1625 	if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0)
1626 		return B_BAD_THREAD_STATE;
1627 
1628 	threadDebugPort = thread->debug_info.debug_port;
1629 
1630 	threadDebugInfoLocker.Unlock();
1631 
1632 	if (threadDebugPort < 0)
1633 		return B_ERROR;
1634 
1635 	return B_OK;
1636 }
1637 
1638 
1639 static status_t
1640 debug_nub_thread(void *)
1641 {
1642 	Thread *nubThread = thread_get_current_thread();
1643 
1644 	// check, if we're still the current nub thread and get our port
1645 	cpu_status state = disable_interrupts();
1646 	GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1647 
1648 	if (nubThread->team->debug_info.nub_thread != nubThread->id) {
1649 		RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1650 		restore_interrupts(state);
1651 		return 0;
1652 	}
1653 
1654 	port_id port = nubThread->team->debug_info.nub_port;
1655 	sem_id writeLock = nubThread->team->debug_info.debugger_write_lock;
1656 	BreakpointManager* breakpointManager
1657 		= nubThread->team->debug_info.breakpoint_manager;
1658 
1659 	RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
1660 	restore_interrupts(state);
1661 
1662 	TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub "
1663 		"port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port));
1664 
1665 	// notify all threads that a debugger has been installed
1666 	broadcast_debugged_thread_message(nubThread,
1667 		B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
1668 
1669 	// command processing loop
1670 	while (true) {
1671 		int32 command;
1672 		debug_nub_message_data message;
1673 		ssize_t messageSize = read_port_etc(port, &command, &message,
1674 			sizeof(message), B_KILL_CAN_INTERRUPT, 0);
1675 
1676 		if (messageSize < 0) {
1677 			// The port is no longer valid or we were interrupted by a kill
1678 			// signal: If we are still listed in the team's debug info as nub
1679 			// thread, we need to update that.
1680 			nub_thread_cleanup(nubThread);
1681 
1682 			TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n",
1683 				nubThread->id, messageSize));
1684 
1685 			return messageSize;
1686 		}
1687 
1688 		bool sendReply = false;
1689 		union {
1690 			debug_nub_read_memory_reply			read_memory;
1691 			debug_nub_write_memory_reply		write_memory;
1692 			debug_nub_get_cpu_state_reply		get_cpu_state;
1693 			debug_nub_set_breakpoint_reply		set_breakpoint;
1694 			debug_nub_set_watchpoint_reply		set_watchpoint;
1695 			debug_nub_get_signal_masks_reply	get_signal_masks;
1696 			debug_nub_get_signal_handler_reply	get_signal_handler;
1697 			debug_nub_start_profiler_reply		start_profiler;
1698 			debug_profiler_update				profiler_update;
1699 		} reply;
1700 		int32 replySize = 0;
1701 		port_id replyPort = -1;
1702 
1703 		// process the command
1704 		switch (command) {
1705 			case B_DEBUG_MESSAGE_READ_MEMORY:
1706 			{
1707 				// get the parameters
1708 				replyPort = message.read_memory.reply_port;
1709 				void *address = message.read_memory.address;
1710 				int32 size = message.read_memory.size;
1711 				status_t result = B_OK;
1712 
1713 				// check the parameters
1714 				if (!BreakpointManager::CanAccessAddress(address, false))
1715 					result = B_BAD_ADDRESS;
1716 				else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE)
1717 					result = B_BAD_VALUE;
1718 
1719 				// read the memory
1720 				size_t bytesRead = 0;
1721 				if (result == B_OK) {
1722 					result = breakpointManager->ReadMemory(address,
1723 						reply.read_memory.data, size, bytesRead);
1724 				}
1725 				reply.read_memory.error = result;
1726 
1727 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: "
1728 					"reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32
1729 					", result: %" B_PRIx32 ", read: %ld\n", nubThread->id,
1730 					replyPort, address, size, result, bytesRead));
1731 
1732 				// send only as much data as necessary
1733 				reply.read_memory.size = bytesRead;
1734 				replySize = reply.read_memory.data + bytesRead - (char*)&reply;
1735 				sendReply = true;
1736 				break;
1737 			}
1738 
1739 			case B_DEBUG_MESSAGE_WRITE_MEMORY:
1740 			{
1741 				// get the parameters
1742 				replyPort = message.write_memory.reply_port;
1743 				void *address = message.write_memory.address;
1744 				int32 size = message.write_memory.size;
1745 				const char *data = message.write_memory.data;
1746 				int32 realSize = (char*)&message + messageSize - data;
1747 				status_t result = B_OK;
1748 
1749 				// check the parameters
1750 				if (!BreakpointManager::CanAccessAddress(address, true))
1751 					result = B_BAD_ADDRESS;
1752 				else if (size <= 0 || size > realSize)
1753 					result = B_BAD_VALUE;
1754 
1755 				// write the memory
1756 				size_t bytesWritten = 0;
1757 				if (result == B_OK) {
1758 					result = breakpointManager->WriteMemory(address, data, size,
1759 						bytesWritten);
1760 				}
1761 				reply.write_memory.error = result;
1762 
1763 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: "
1764 					"reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32
1765 					", result: %" B_PRIx32 ", written: %ld\n", nubThread->id,
1766 					replyPort, address, size, result, bytesWritten));
1767 
1768 				reply.write_memory.size = bytesWritten;
1769 				sendReply = true;
1770 				replySize = sizeof(debug_nub_write_memory_reply);
1771 				break;
1772 			}
1773 
1774 			case B_DEBUG_MESSAGE_SET_TEAM_FLAGS:
1775 			{
1776 				// get the parameters
1777 				int32 flags = message.set_team_flags.flags
1778 					& B_TEAM_DEBUG_USER_FLAG_MASK;
1779 
1780 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS"
1781 					": flags: %" B_PRIx32 "\n", nubThread->id, flags));
1782 
1783 				Team *team = thread_get_current_thread()->team;
1784 
1785 				// set the flags
1786 				cpu_status state = disable_interrupts();
1787 				GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1788 
1789 				flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK;
1790 				atomic_set(&team->debug_info.flags, flags);
1791 
1792 				RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1793 				restore_interrupts(state);
1794 
1795 				break;
1796 			}
1797 
1798 			case B_DEBUG_MESSAGE_SET_THREAD_FLAGS:
1799 			{
1800 				// get the parameters
1801 				thread_id threadID = message.set_thread_flags.thread;
1802 				int32 flags = message.set_thread_flags.flags
1803 					& B_THREAD_DEBUG_USER_FLAG_MASK;
1804 
1805 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS"
1806 					": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n",
1807 					nubThread->id, threadID, flags));
1808 
1809 				// set the flags
1810 				Thread* thread = Thread::GetAndLock(threadID);
1811 				if (thread == NULL)
1812 					break;
1813 				BReference<Thread> threadReference(thread, true);
1814 				ThreadLocker threadLocker(thread, true);
1815 
1816 				InterruptsSpinLocker threadDebugInfoLocker(
1817 					thread->debug_info.lock);
1818 
1819 				if (thread->team == thread_get_current_thread()->team) {
1820 					flags |= thread->debug_info.flags
1821 						& B_THREAD_DEBUG_KERNEL_FLAG_MASK;
1822 					atomic_set(&thread->debug_info.flags, flags);
1823 				}
1824 
1825 				break;
1826 			}
1827 
1828 			case B_DEBUG_MESSAGE_CONTINUE_THREAD:
1829 			{
1830 				// get the parameters
1831 				thread_id threadID;
1832 				uint32 handleEvent;
1833 				bool singleStep;
1834 
1835 				threadID = message.continue_thread.thread;
1836 				handleEvent = message.continue_thread.handle_event;
1837 				singleStep = message.continue_thread.single_step;
1838 
1839 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD"
1840 					": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", "
1841 					"single step: %d\n", nubThread->id, threadID, handleEvent,
1842 					singleStep));
1843 
1844 				// find the thread and get its debug port
1845 				port_id threadDebugPort = -1;
1846 				status_t result = debug_nub_thread_get_thread_debug_port(
1847 					nubThread, threadID, threadDebugPort);
1848 
1849 				// send a message to the debugged thread
1850 				if (result == B_OK) {
1851 					debugged_thread_continue commandMessage;
1852 					commandMessage.handle_event = handleEvent;
1853 					commandMessage.single_step = singleStep;
1854 
1855 					result = write_port(threadDebugPort,
1856 						B_DEBUGGED_THREAD_MESSAGE_CONTINUE,
1857 						&commandMessage, sizeof(commandMessage));
1858 				} else if (result == B_BAD_THREAD_STATE) {
1859 					Thread* thread = Thread::GetAndLock(threadID);
1860 					if (thread == NULL)
1861 						break;
1862 
1863 					BReference<Thread> threadReference(thread, true);
1864 					ThreadLocker threadLocker(thread, true);
1865 					if (thread->state == B_THREAD_SUSPENDED) {
1866 						threadLocker.Unlock();
1867 						resume_thread(threadID);
1868 						break;
1869 					}
1870 				}
1871 
1872 				break;
1873 			}
1874 
1875 			case B_DEBUG_MESSAGE_SET_CPU_STATE:
1876 			{
1877 				// get the parameters
1878 				thread_id threadID = message.set_cpu_state.thread;
1879 				const debug_cpu_state &cpuState
1880 					= message.set_cpu_state.cpu_state;
1881 
1882 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE"
1883 					": thread: %" B_PRId32 "\n", nubThread->id, threadID));
1884 
1885 				// find the thread and get its debug port
1886 				port_id threadDebugPort = -1;
1887 				status_t result = debug_nub_thread_get_thread_debug_port(
1888 					nubThread, threadID, threadDebugPort);
1889 
1890 				// send a message to the debugged thread
1891 				if (result == B_OK) {
1892 					debugged_thread_set_cpu_state commandMessage;
1893 					memcpy(&commandMessage.cpu_state, &cpuState,
1894 						sizeof(debug_cpu_state));
1895 					write_port(threadDebugPort,
1896 						B_DEBUGGED_THREAD_SET_CPU_STATE,
1897 						&commandMessage, sizeof(commandMessage));
1898 				}
1899 
1900 				break;
1901 			}
1902 
1903 			case B_DEBUG_MESSAGE_GET_CPU_STATE:
1904 			{
1905 				// get the parameters
1906 				thread_id threadID = message.get_cpu_state.thread;
1907 				replyPort = message.get_cpu_state.reply_port;
1908 
1909 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE"
1910 					": thread: %" B_PRId32 "\n", nubThread->id, threadID));
1911 
1912 				// find the thread and get its debug port
1913 				port_id threadDebugPort = -1;
1914 				status_t result = debug_nub_thread_get_thread_debug_port(
1915 					nubThread, threadID, threadDebugPort);
1916 
1917 				// send a message to the debugged thread
1918 				if (threadDebugPort >= 0) {
1919 					debugged_thread_get_cpu_state commandMessage;
1920 					commandMessage.reply_port = replyPort;
1921 					result = write_port(threadDebugPort,
1922 						B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage,
1923 						sizeof(commandMessage));
1924 				}
1925 
1926 				// send a reply to the debugger in case of error
1927 				if (result != B_OK) {
1928 					reply.get_cpu_state.error = result;
1929 					sendReply = true;
1930 					replySize = sizeof(reply.get_cpu_state);
1931 				}
1932 
1933 				break;
1934 			}
1935 
1936 			case B_DEBUG_MESSAGE_SET_BREAKPOINT:
1937 			{
1938 				// get the parameters
1939 				replyPort = message.set_breakpoint.reply_port;
1940 				void *address = message.set_breakpoint.address;
1941 
1942 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT"
1943 					": address: %p\n", nubThread->id, address));
1944 
1945 				// check the address
1946 				status_t result = B_OK;
1947 				if (address == NULL
1948 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1949 					result = B_BAD_ADDRESS;
1950 				}
1951 
1952 				// set the breakpoint
1953 				if (result == B_OK)
1954 					result = breakpointManager->InstallBreakpoint(address);
1955 
1956 				if (result == B_OK)
1957 					update_threads_breakpoints_flag();
1958 
1959 				// prepare the reply
1960 				reply.set_breakpoint.error = result;
1961 				replySize = sizeof(reply.set_breakpoint);
1962 				sendReply = true;
1963 
1964 				break;
1965 			}
1966 
1967 			case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT:
1968 			{
1969 				// get the parameters
1970 				void *address = message.clear_breakpoint.address;
1971 
1972 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT"
1973 					": address: %p\n", nubThread->id, address));
1974 
1975 				// check the address
1976 				status_t result = B_OK;
1977 				if (address == NULL
1978 					|| !BreakpointManager::CanAccessAddress(address, false)) {
1979 					result = B_BAD_ADDRESS;
1980 				}
1981 
1982 				// clear the breakpoint
1983 				if (result == B_OK)
1984 					result = breakpointManager->UninstallBreakpoint(address);
1985 
1986 				if (result == B_OK)
1987 					update_threads_breakpoints_flag();
1988 
1989 				break;
1990 			}
1991 
1992 			case B_DEBUG_MESSAGE_SET_WATCHPOINT:
1993 			{
1994 				// get the parameters
1995 				replyPort = message.set_watchpoint.reply_port;
1996 				void *address = message.set_watchpoint.address;
1997 				uint32 type = message.set_watchpoint.type;
1998 				int32 length = message.set_watchpoint.length;
1999 
2000 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT"
2001 					": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n",
2002 					nubThread->id, address, type, length));
2003 
2004 				// check the address and size
2005 				status_t result = B_OK;
2006 				if (address == NULL
2007 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2008 					result = B_BAD_ADDRESS;
2009 				}
2010 				if (length < 0)
2011 					result = B_BAD_VALUE;
2012 
2013 				// set the watchpoint
2014 				if (result == B_OK) {
2015 					result = breakpointManager->InstallWatchpoint(address, type,
2016 						length);
2017 				}
2018 
2019 				if (result == B_OK)
2020 					update_threads_breakpoints_flag();
2021 
2022 				// prepare the reply
2023 				reply.set_watchpoint.error = result;
2024 				replySize = sizeof(reply.set_watchpoint);
2025 				sendReply = true;
2026 
2027 				break;
2028 			}
2029 
2030 			case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT:
2031 			{
2032 				// get the parameters
2033 				void *address = message.clear_watchpoint.address;
2034 
2035 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT"
2036 					": address: %p\n", nubThread->id, address));
2037 
2038 				// check the address
2039 				status_t result = B_OK;
2040 				if (address == NULL
2041 					|| !BreakpointManager::CanAccessAddress(address, false)) {
2042 					result = B_BAD_ADDRESS;
2043 				}
2044 
2045 				// clear the watchpoint
2046 				if (result == B_OK)
2047 					result = breakpointManager->UninstallWatchpoint(address);
2048 
2049 				if (result == B_OK)
2050 					update_threads_breakpoints_flag();
2051 
2052 				break;
2053 			}
2054 
2055 			case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS:
2056 			{
2057 				// get the parameters
2058 				thread_id threadID = message.set_signal_masks.thread;
2059 				uint64 ignore = message.set_signal_masks.ignore_mask;
2060 				uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask;
2061 				uint32 ignoreOp = message.set_signal_masks.ignore_op;
2062 				uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op;
2063 
2064 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS"
2065 					": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %"
2066 					B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32
2067 					")\n", nubThread->id, threadID, ignore, ignoreOp,
2068 					ignoreOnce, ignoreOnceOp));
2069 
2070 				// set the masks
2071 				Thread* thread = Thread::GetAndLock(threadID);
2072 				if (thread == NULL)
2073 					break;
2074 				BReference<Thread> threadReference(thread, true);
2075 				ThreadLocker threadLocker(thread, true);
2076 
2077 				InterruptsSpinLocker threadDebugInfoLocker(
2078 					thread->debug_info.lock);
2079 
2080 				if (thread->team == thread_get_current_thread()->team) {
2081 					thread_debug_info &threadDebugInfo = thread->debug_info;
2082 					// set ignore mask
2083 					switch (ignoreOp) {
2084 						case B_DEBUG_SIGNAL_MASK_AND:
2085 							threadDebugInfo.ignore_signals &= ignore;
2086 							break;
2087 						case B_DEBUG_SIGNAL_MASK_OR:
2088 							threadDebugInfo.ignore_signals |= ignore;
2089 							break;
2090 						case B_DEBUG_SIGNAL_MASK_SET:
2091 							threadDebugInfo.ignore_signals = ignore;
2092 							break;
2093 					}
2094 
2095 					// set ignore once mask
2096 					switch (ignoreOnceOp) {
2097 						case B_DEBUG_SIGNAL_MASK_AND:
2098 							threadDebugInfo.ignore_signals_once &= ignoreOnce;
2099 							break;
2100 						case B_DEBUG_SIGNAL_MASK_OR:
2101 							threadDebugInfo.ignore_signals_once |= ignoreOnce;
2102 							break;
2103 						case B_DEBUG_SIGNAL_MASK_SET:
2104 							threadDebugInfo.ignore_signals_once = ignoreOnce;
2105 							break;
2106 					}
2107 				}
2108 
2109 				break;
2110 			}
2111 
2112 			case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS:
2113 			{
2114 				// get the parameters
2115 				replyPort = message.get_signal_masks.reply_port;
2116 				thread_id threadID = message.get_signal_masks.thread;
2117 				status_t result = B_OK;
2118 
2119 				// get the masks
2120 				uint64 ignore = 0;
2121 				uint64 ignoreOnce = 0;
2122 
2123 				Thread* thread = Thread::GetAndLock(threadID);
2124 				if (thread != NULL) {
2125 					BReference<Thread> threadReference(thread, true);
2126 					ThreadLocker threadLocker(thread, true);
2127 
2128 					InterruptsSpinLocker threadDebugInfoLocker(
2129 						thread->debug_info.lock);
2130 
2131 					ignore = thread->debug_info.ignore_signals;
2132 					ignoreOnce = thread->debug_info.ignore_signals_once;
2133 				} else
2134 					result = B_BAD_THREAD_ID;
2135 
2136 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS"
2137 					": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", "
2138 					"ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: "
2139 					"%" B_PRIx32 "\n", nubThread->id, replyPort, threadID,
2140 					ignore, ignoreOnce, result));
2141 
2142 				// prepare the message
2143 				reply.get_signal_masks.error = result;
2144 				reply.get_signal_masks.ignore_mask = ignore;
2145 				reply.get_signal_masks.ignore_once_mask = ignoreOnce;
2146 				replySize = sizeof(reply.get_signal_masks);
2147 				sendReply = true;
2148 				break;
2149 			}
2150 
2151 			case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER:
2152 			{
2153 				// get the parameters
2154 				int signal = message.set_signal_handler.signal;
2155 				struct sigaction &handler = message.set_signal_handler.handler;
2156 
2157 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER"
2158 					": signal: %d, handler: %p\n", nubThread->id, signal,
2159 					handler.sa_handler));
2160 
2161 				// set the handler
2162 				sigaction(signal, &handler, NULL);
2163 
2164 				break;
2165 			}
2166 
2167 			case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER:
2168 			{
2169 				// get the parameters
2170 				replyPort = message.get_signal_handler.reply_port;
2171 				int signal = message.get_signal_handler.signal;
2172 				status_t result = B_OK;
2173 
2174 				// get the handler
2175 				if (sigaction(signal, NULL, &reply.get_signal_handler.handler)
2176 						!= 0) {
2177 					result = errno;
2178 				}
2179 
2180 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER"
2181 					": reply port: %" B_PRId32 ", signal: %d, handler: %p\n",
2182 					nubThread->id, replyPort, signal,
2183 					reply.get_signal_handler.handler.sa_handler));
2184 
2185 				// prepare the message
2186 				reply.get_signal_handler.error = result;
2187 				replySize = sizeof(reply.get_signal_handler);
2188 				sendReply = true;
2189 				break;
2190 			}
2191 
2192 			case B_DEBUG_MESSAGE_PREPARE_HANDOVER:
2193 			{
2194 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER"
2195 					"\n", nubThread->id));
2196 
2197 				Team *team = nubThread->team;
2198 
2199 				// Acquire the debugger write lock. As soon as we have it and
2200 				// have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread
2201 				// will write anything to the debugger port anymore.
2202 				status_t result = acquire_sem_etc(writeLock, 1,
2203 					B_KILL_CAN_INTERRUPT, 0);
2204 				if (result == B_OK) {
2205 					// set the respective team debug flag
2206 					cpu_status state = disable_interrupts();
2207 					GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2208 
2209 					atomic_or(&team->debug_info.flags,
2210 						B_TEAM_DEBUG_DEBUGGER_HANDOVER);
2211 					BreakpointManager* breakpointManager
2212 						= team->debug_info.breakpoint_manager;
2213 
2214 					RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2215 					restore_interrupts(state);
2216 
2217 					// remove all installed breakpoints
2218 					breakpointManager->RemoveAllBreakpoints();
2219 
2220 					release_sem(writeLock);
2221 				} else {
2222 					// We probably got a SIGKILL. If so, we will terminate when
2223 					// reading the next message fails.
2224 				}
2225 
2226 				break;
2227 			}
2228 
2229 			case B_DEBUG_MESSAGE_HANDED_OVER:
2230 			{
2231 				// notify all threads that the debugger has changed
2232 				broadcast_debugged_thread_message(nubThread,
2233 					B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0);
2234 
2235 				break;
2236 			}
2237 
2238 			case B_DEBUG_START_PROFILER:
2239 			{
2240 				// get the parameters
2241 				thread_id threadID = message.start_profiler.thread;
2242 				replyPort = message.start_profiler.reply_port;
2243 				area_id sampleArea = message.start_profiler.sample_area;
2244 				int32 stackDepth = message.start_profiler.stack_depth;
2245 				bool variableStackDepth
2246 					= message.start_profiler.variable_stack_depth;
2247 				bigtime_t interval = max_c(message.start_profiler.interval,
2248 					B_DEBUG_MIN_PROFILE_INTERVAL);
2249 				status_t result = B_OK;
2250 
2251 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: "
2252 					"thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n",
2253 					nubThread->id, threadID, sampleArea));
2254 
2255 				if (stackDepth < 1)
2256 					stackDepth = 1;
2257 				else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH)
2258 					stackDepth = B_DEBUG_STACK_TRACE_DEPTH;
2259 
2260 				// provision for an extra entry per hit (for the number of
2261 				// samples), if variable stack depth
2262 				if (variableStackDepth)
2263 					stackDepth++;
2264 
2265 				// clone the sample area
2266 				area_info areaInfo;
2267 				if (result == B_OK)
2268 					result = get_area_info(sampleArea, &areaInfo);
2269 
2270 				area_id clonedSampleArea = -1;
2271 				void* samples = NULL;
2272 				if (result == B_OK) {
2273 					clonedSampleArea = clone_area("profiling samples", &samples,
2274 						B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
2275 						sampleArea);
2276 					if (clonedSampleArea >= 0) {
2277 						// we need the memory locked
2278 						result = lock_memory(samples, areaInfo.size,
2279 							B_READ_DEVICE);
2280 						if (result != B_OK) {
2281 							delete_area(clonedSampleArea);
2282 							clonedSampleArea = -1;
2283 						}
2284 					} else
2285 						result = clonedSampleArea;
2286 				}
2287 
2288 				// get the thread and set the profile info
2289 				int32 imageEvent = nubThread->team->debug_info.image_event;
2290 				if (result == B_OK) {
2291 					Thread* thread = Thread::GetAndLock(threadID);
2292 					BReference<Thread> threadReference(thread, true);
2293 					ThreadLocker threadLocker(thread, true);
2294 
2295 					if (thread != NULL && thread->team == nubThread->team) {
2296 						thread_debug_info &threadDebugInfo = thread->debug_info;
2297 
2298 						InterruptsSpinLocker threadDebugInfoLocker(
2299 							threadDebugInfo.lock);
2300 
2301 						if (threadDebugInfo.profile.samples == NULL) {
2302 							threadDebugInfo.profile.interval = interval;
2303 							threadDebugInfo.profile.sample_area
2304 								= clonedSampleArea;
2305 							threadDebugInfo.profile.samples = (addr_t*)samples;
2306 							threadDebugInfo.profile.max_samples
2307 								= areaInfo.size / sizeof(addr_t);
2308 							threadDebugInfo.profile.flush_threshold
2309 								= threadDebugInfo.profile.max_samples
2310 									* B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD
2311 									/ 100;
2312 							threadDebugInfo.profile.sample_count = 0;
2313 							threadDebugInfo.profile.dropped_ticks = 0;
2314 							threadDebugInfo.profile.stack_depth = stackDepth;
2315 							threadDebugInfo.profile.variable_stack_depth
2316 								= variableStackDepth;
2317 							threadDebugInfo.profile.buffer_full = false;
2318 							threadDebugInfo.profile.interval_left = interval;
2319 							threadDebugInfo.profile.installed_timer = NULL;
2320 							threadDebugInfo.profile.image_event = imageEvent;
2321 							threadDebugInfo.profile.last_image_event
2322 								= imageEvent;
2323 						} else
2324 							result = B_BAD_VALUE;
2325 					} else
2326 						result = B_BAD_THREAD_ID;
2327 				}
2328 
2329 				// on error unlock and delete the sample area
2330 				if (result != B_OK) {
2331 					if (clonedSampleArea >= 0) {
2332 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2333 						delete_area(clonedSampleArea);
2334 					}
2335 				}
2336 
2337 				// send a reply to the debugger
2338 				reply.start_profiler.error = result;
2339 				reply.start_profiler.interval = interval;
2340 				reply.start_profiler.image_event = imageEvent;
2341 				sendReply = true;
2342 				replySize = sizeof(reply.start_profiler);
2343 
2344 				break;
2345 			}
2346 
2347 			case B_DEBUG_STOP_PROFILER:
2348 			{
2349 				// get the parameters
2350 				thread_id threadID = message.stop_profiler.thread;
2351 				replyPort = message.stop_profiler.reply_port;
2352 				status_t result = B_OK;
2353 
2354 				TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: "
2355 					"thread: %" B_PRId32 "\n", nubThread->id, threadID));
2356 
2357 				area_id sampleArea = -1;
2358 				addr_t* samples = NULL;
2359 				int32 sampleCount = 0;
2360 				int32 stackDepth = 0;
2361 				bool variableStackDepth = false;
2362 				int32 imageEvent = 0;
2363 				int32 droppedTicks = 0;
2364 
2365 				// get the thread and detach the profile info
2366 				Thread* thread = Thread::GetAndLock(threadID);
2367 				BReference<Thread> threadReference(thread, true);
2368 				ThreadLocker threadLocker(thread, true);
2369 
2370 				if (thread && thread->team == nubThread->team) {
2371 					thread_debug_info &threadDebugInfo = thread->debug_info;
2372 
2373 					InterruptsSpinLocker threadDebugInfoLocker(
2374 						threadDebugInfo.lock);
2375 
2376 					if (threadDebugInfo.profile.samples != NULL) {
2377 						sampleArea = threadDebugInfo.profile.sample_area;
2378 						samples = threadDebugInfo.profile.samples;
2379 						sampleCount = threadDebugInfo.profile.sample_count;
2380 						droppedTicks = threadDebugInfo.profile.dropped_ticks;
2381 						stackDepth = threadDebugInfo.profile.stack_depth;
2382 						variableStackDepth
2383 							= threadDebugInfo.profile.variable_stack_depth;
2384 						imageEvent = threadDebugInfo.profile.image_event;
2385 						threadDebugInfo.profile.sample_area = -1;
2386 						threadDebugInfo.profile.samples = NULL;
2387 						threadDebugInfo.profile.buffer_full = false;
2388 						threadDebugInfo.profile.dropped_ticks = 0;
2389 					} else
2390 						result = B_BAD_VALUE;
2391 				} else
2392 					result = B_BAD_THREAD_ID;
2393 
2394 				threadLocker.Unlock();
2395 
2396 				// prepare the reply
2397 				if (result == B_OK) {
2398 					reply.profiler_update.origin.thread = threadID;
2399 					reply.profiler_update.image_event = imageEvent;
2400 					reply.profiler_update.stack_depth = stackDepth;
2401 					reply.profiler_update.variable_stack_depth
2402 						= variableStackDepth;
2403 					reply.profiler_update.sample_count = sampleCount;
2404 					reply.profiler_update.dropped_ticks = droppedTicks;
2405 					reply.profiler_update.stopped = true;
2406 				} else
2407 					reply.profiler_update.origin.thread = result;
2408 
2409 				replySize = sizeof(debug_profiler_update);
2410 				sendReply = true;
2411 
2412 				if (sampleArea >= 0) {
2413 					area_info areaInfo;
2414 					if (get_area_info(sampleArea, &areaInfo) == B_OK) {
2415 						unlock_memory(samples, areaInfo.size, B_READ_DEVICE);
2416 						delete_area(sampleArea);
2417 					}
2418 				}
2419 			}
2420 		}
2421 
2422 		// send the reply, if necessary
2423 		if (sendReply) {
2424 			status_t error = kill_interruptable_write_port(replyPort, command,
2425 				&reply, replySize);
2426 
2427 			if (error != B_OK) {
2428 				// The debugger port is either not longer existing or we got
2429 				// interrupted by a kill signal. In either case we terminate.
2430 				TRACE(("nub thread %" B_PRId32 ": failed to send reply to port "
2431 					"%" B_PRId32 ": %s\n", nubThread->id, replyPort,
2432 					strerror(error)));
2433 
2434 				nub_thread_cleanup(nubThread);
2435 				return error;
2436 			}
2437 		}
2438 	}
2439 }
2440 
2441 
2442 /**	\brief Helper function for install_team_debugger(), that sets up the team
2443 		   and thread debug infos.
2444 
2445 	The caller must hold the team's lock as well as the team debug info lock.
2446 
2447 	The function also clears the arch specific team and thread debug infos
2448 	(including among other things formerly set break/watchpoints).
2449  */
2450 static void
2451 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam,
2452 	port_id debuggerPort, port_id nubPort, thread_id nubThread,
2453 	sem_id debuggerPortWriteLock, thread_id causingThread)
2454 {
2455 	atomic_set(&team->debug_info.flags,
2456 		B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED);
2457 	team->debug_info.nub_port = nubPort;
2458 	team->debug_info.nub_thread = nubThread;
2459 	team->debug_info.debugger_team = debuggerTeam;
2460 	team->debug_info.debugger_port = debuggerPort;
2461 	team->debug_info.debugger_write_lock = debuggerPortWriteLock;
2462 	team->debug_info.causing_thread = causingThread;
2463 
2464 	arch_clear_team_debug_info(&team->debug_info.arch_info);
2465 
2466 	// set the user debug flags and signal masks of all threads to the default
2467 	for (Thread *thread = team->thread_list; thread;
2468 			thread = thread->team_next) {
2469 		SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2470 
2471 		if (thread->id == nubThread) {
2472 			atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD);
2473 		} else {
2474 			int32 flags = thread->debug_info.flags
2475 				& ~B_THREAD_DEBUG_USER_FLAG_MASK;
2476 			atomic_set(&thread->debug_info.flags,
2477 				flags | B_THREAD_DEBUG_DEFAULT_FLAGS);
2478 			thread->debug_info.ignore_signals = 0;
2479 			thread->debug_info.ignore_signals_once = 0;
2480 
2481 			arch_clear_thread_debug_info(&thread->debug_info.arch_info);
2482 		}
2483 	}
2484 
2485 	// update the thread::flags fields
2486 	update_threads_debugger_installed_flag(team);
2487 }
2488 
2489 
2490 static port_id
2491 install_team_debugger(team_id teamID, port_id debuggerPort,
2492 	thread_id causingThread, bool useDefault, bool dontReplace)
2493 {
2494 	TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", "
2495 		"default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault,
2496 		dontReplace));
2497 
2498 	if (useDefault)
2499 		debuggerPort = atomic_get(&sDefaultDebuggerPort);
2500 
2501 	// get the debugger team
2502 	port_info debuggerPortInfo;
2503 	status_t error = get_port_info(debuggerPort, &debuggerPortInfo);
2504 	if (error != B_OK) {
2505 		TRACE(("install_team_debugger(): Failed to get debugger port info: "
2506 			"%" B_PRIx32 "\n", error));
2507 		return error;
2508 	}
2509 	team_id debuggerTeam = debuggerPortInfo.team;
2510 
2511 	// Check the debugger team: It must neither be the kernel team nor the
2512 	// debugged team.
2513 	if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) {
2514 		TRACE(("install_team_debugger(): Can't debug kernel or debugger team. "
2515 			"debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam,
2516 			teamID));
2517 		return B_NOT_ALLOWED;
2518 	}
2519 
2520 	// get the team
2521 	Team* team;
2522 	ConditionVariable debugChangeCondition;
2523 	error = prepare_debugger_change(teamID, debugChangeCondition, team);
2524 	if (error != B_OK)
2525 		return error;
2526 
2527 	// get the real team ID
2528 	teamID = team->id;
2529 
2530 	// check, if a debugger is already installed
2531 
2532 	bool done = false;
2533 	port_id result = B_ERROR;
2534 	bool handOver = false;
2535 	port_id oldDebuggerPort = -1;
2536 	port_id nubPort = -1;
2537 
2538 	TeamLocker teamLocker(team);
2539 	cpu_status state = disable_interrupts();
2540 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2541 
2542 	int32 teamDebugFlags = team->debug_info.flags;
2543 
2544 	if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2545 		// There's already a debugger installed.
2546 		if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) {
2547 			if (dontReplace) {
2548 				// We're fine with already having a debugger.
2549 				error = B_OK;
2550 				done = true;
2551 				result = team->debug_info.nub_port;
2552 			} else {
2553 				// a handover to another debugger is requested
2554 				// Set the handing-over flag -- we'll clear both flags after
2555 				// having sent the handed-over message to the new debugger.
2556 				atomic_or(&team->debug_info.flags,
2557 					B_TEAM_DEBUG_DEBUGGER_HANDING_OVER);
2558 
2559 				oldDebuggerPort = team->debug_info.debugger_port;
2560 				result = nubPort = team->debug_info.nub_port;
2561 				if (causingThread < 0)
2562 					causingThread = team->debug_info.causing_thread;
2563 
2564 				// set the new debugger
2565 				install_team_debugger_init_debug_infos(team, debuggerTeam,
2566 					debuggerPort, nubPort, team->debug_info.nub_thread,
2567 					team->debug_info.debugger_write_lock, causingThread);
2568 
2569 				handOver = true;
2570 				done = true;
2571 			}
2572 		} else {
2573 			// there's already a debugger installed
2574 			error = (dontReplace ? B_OK : B_BAD_VALUE);
2575 			done = true;
2576 			result = team->debug_info.nub_port;
2577 		}
2578 	} else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0
2579 		&& useDefault) {
2580 		// No debugger yet, disable_debugger() had been invoked, and we
2581 		// would install the default debugger. Just fail.
2582 		error = B_BAD_VALUE;
2583 	}
2584 
2585 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2586 	restore_interrupts(state);
2587 	teamLocker.Unlock();
2588 
2589 	if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) {
2590 		// The old debugger must just have died. Just proceed as
2591 		// if there was no debugger installed. We may still be too
2592 		// early, in which case we'll fail, but this race condition
2593 		// should be unbelievably rare and relatively harmless.
2594 		handOver = false;
2595 		done = false;
2596 	}
2597 
2598 	if (handOver) {
2599 		// prepare the handed-over message
2600 		debug_handed_over notification;
2601 		notification.origin.thread = -1;
2602 		notification.origin.team = teamID;
2603 		notification.origin.nub_port = nubPort;
2604 		notification.debugger = debuggerTeam;
2605 		notification.debugger_port = debuggerPort;
2606 		notification.causing_thread = causingThread;
2607 
2608 		// notify the new debugger
2609 		error = write_port_etc(debuggerPort,
2610 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2611 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2612 		if (error != B_OK) {
2613 			dprintf("install_team_debugger(): Failed to send message to new "
2614 				"debugger: %s\n", strerror(error));
2615 		}
2616 
2617 		// clear the handed-over and handing-over flags
2618 		state = disable_interrupts();
2619 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2620 
2621 		atomic_and(&team->debug_info.flags,
2622 			~(B_TEAM_DEBUG_DEBUGGER_HANDOVER
2623 				| B_TEAM_DEBUG_DEBUGGER_HANDING_OVER));
2624 
2625 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2626 		restore_interrupts(state);
2627 
2628 		finish_debugger_change(team);
2629 
2630 		// notify the nub thread
2631 		kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER,
2632 			NULL, 0);
2633 
2634 		// notify the old debugger
2635 		error = write_port_etc(oldDebuggerPort,
2636 			B_DEBUGGER_MESSAGE_HANDED_OVER, &notification,
2637 			sizeof(notification), B_RELATIVE_TIMEOUT, 0);
2638 		if (error != B_OK) {
2639 			TRACE(("install_team_debugger(): Failed to send message to old "
2640 				"debugger: %s\n", strerror(error)));
2641 		}
2642 
2643 		TRACE(("install_team_debugger() done: handed over to debugger: team: "
2644 			"%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam,
2645 			debuggerPort));
2646 
2647 		return result;
2648 	}
2649 
2650 	if (done || error != B_OK) {
2651 		TRACE(("install_team_debugger() done1: %" B_PRId32 "\n",
2652 			(error == B_OK ? result : error)));
2653 		finish_debugger_change(team);
2654 		return (error == B_OK ? result : error);
2655 	}
2656 
2657 	// create the debugger write lock semaphore
2658 	char nameBuffer[B_OS_NAME_LENGTH];
2659 	snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port "
2660 		"write", teamID);
2661 	sem_id debuggerWriteLock = create_sem(1, nameBuffer);
2662 	if (debuggerWriteLock < 0)
2663 		error = debuggerWriteLock;
2664 
2665 	// create the nub port
2666 	snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID);
2667 	if (error == B_OK) {
2668 		nubPort = create_port(1, nameBuffer);
2669 		if (nubPort < 0)
2670 			error = nubPort;
2671 		else
2672 			result = nubPort;
2673 	}
2674 
2675 	// make the debugger team the port owner; thus we know, if the debugger is
2676 	// gone and can cleanup
2677 	if (error == B_OK)
2678 		error = set_port_owner(nubPort, debuggerTeam);
2679 
2680 	// create the breakpoint manager
2681 	BreakpointManager* breakpointManager = NULL;
2682 	if (error == B_OK) {
2683 		breakpointManager = new(std::nothrow) BreakpointManager;
2684 		if (breakpointManager != NULL)
2685 			error = breakpointManager->Init();
2686 		else
2687 			error = B_NO_MEMORY;
2688 	}
2689 
2690 	// spawn the nub thread
2691 	thread_id nubThread = -1;
2692 	if (error == B_OK) {
2693 		snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task",
2694 			teamID);
2695 		nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer,
2696 			B_NORMAL_PRIORITY, NULL, teamID);
2697 		if (nubThread < 0)
2698 			error = nubThread;
2699 	}
2700 
2701 	// now adjust the debug info accordingly
2702 	if (error == B_OK) {
2703 		TeamLocker teamLocker(team);
2704 		state = disable_interrupts();
2705 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2706 
2707 		team->debug_info.breakpoint_manager = breakpointManager;
2708 		install_team_debugger_init_debug_infos(team, debuggerTeam,
2709 			debuggerPort, nubPort, nubThread, debuggerWriteLock,
2710 			causingThread);
2711 
2712 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2713 		restore_interrupts(state);
2714 	}
2715 
2716 	finish_debugger_change(team);
2717 
2718 	// if everything went fine, resume the nub thread, otherwise clean up
2719 	if (error == B_OK) {
2720 		resume_thread(nubThread);
2721 	} else {
2722 		// delete port and terminate thread
2723 		if (nubPort >= 0) {
2724 			set_port_owner(nubPort, B_CURRENT_TEAM);
2725 			delete_port(nubPort);
2726 		}
2727 		if (nubThread >= 0) {
2728 			int32 result;
2729 			wait_for_thread(nubThread, &result);
2730 		}
2731 
2732 		delete breakpointManager;
2733 	}
2734 
2735 	TRACE(("install_team_debugger() done2: %" B_PRId32 "\n",
2736 		(error == B_OK ? result : error)));
2737 	return (error == B_OK ? result : error);
2738 }
2739 
2740 
2741 static status_t
2742 ensure_debugger_installed()
2743 {
2744 	port_id port = install_team_debugger(B_CURRENT_TEAM, -1,
2745 		thread_get_current_thread_id(), true, true);
2746 	return port >= 0 ? B_OK : port;
2747 }
2748 
2749 
2750 // #pragma mark -
2751 
2752 
2753 void
2754 _user_debugger(const char *userMessage)
2755 {
2756 	// install the default debugger, if there is none yet
2757 	status_t error = ensure_debugger_installed();
2758 	if (error != B_OK) {
2759 		// time to commit suicide
2760 		char buffer[128];
2761 		ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer));
2762 		if (length >= 0) {
2763 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2764 				"`%s'\n", buffer);
2765 		} else {
2766 			dprintf("_user_debugger(): Failed to install debugger. Message is: "
2767 				"%p (%s)\n", userMessage, strerror(length));
2768 		}
2769 		_user_exit_team(1);
2770 	}
2771 
2772 	// prepare the message
2773 	debug_debugger_call message;
2774 	message.message = (void*)userMessage;
2775 
2776 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message,
2777 		sizeof(message), true);
2778 }
2779 
2780 
2781 int
2782 _user_disable_debugger(int state)
2783 {
2784 	Team *team = thread_get_current_thread()->team;
2785 
2786 	TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state,
2787 		team->id));
2788 
2789 	cpu_status cpuState = disable_interrupts();
2790 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2791 
2792 	int32 oldFlags;
2793 	if (state) {
2794 		oldFlags = atomic_or(&team->debug_info.flags,
2795 			B_TEAM_DEBUG_DEBUGGER_DISABLED);
2796 	} else {
2797 		oldFlags = atomic_and(&team->debug_info.flags,
2798 			~B_TEAM_DEBUG_DEBUGGER_DISABLED);
2799 	}
2800 
2801 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2802 	restore_interrupts(cpuState);
2803 
2804 	// TODO: Check, if the return value is really the old state.
2805 	return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED);
2806 }
2807 
2808 
2809 status_t
2810 _user_install_default_debugger(port_id debuggerPort)
2811 {
2812 	// if supplied, check whether the port is a valid port
2813 	if (debuggerPort >= 0) {
2814 		port_info portInfo;
2815 		status_t error = get_port_info(debuggerPort, &portInfo);
2816 		if (error != B_OK)
2817 			return error;
2818 
2819 		// the debugger team must not be the kernel team
2820 		if (portInfo.team == team_get_kernel_team_id())
2821 			return B_NOT_ALLOWED;
2822 	}
2823 
2824 	atomic_set(&sDefaultDebuggerPort, debuggerPort);
2825 
2826 	return B_OK;
2827 }
2828 
2829 
2830 port_id
2831 _user_install_team_debugger(team_id teamID, port_id debuggerPort)
2832 {
2833 	return install_team_debugger(teamID, debuggerPort, -1, false, false);
2834 }
2835 
2836 
2837 status_t
2838 _user_remove_team_debugger(team_id teamID)
2839 {
2840 	Team* team;
2841 	ConditionVariable debugChangeCondition;
2842 	status_t error = prepare_debugger_change(teamID, debugChangeCondition,
2843 		team);
2844 	if (error != B_OK)
2845 		return error;
2846 
2847 	InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2848 
2849 	thread_id nubThread = -1;
2850 	port_id nubPort = -1;
2851 
2852 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
2853 		// there's a debugger installed
2854 		nubThread = team->debug_info.nub_thread;
2855 		nubPort = team->debug_info.nub_port;
2856 	} else {
2857 		// no debugger installed
2858 		error = B_BAD_VALUE;
2859 	}
2860 
2861 	debugInfoLocker.Unlock();
2862 
2863 	// Delete the nub port -- this will cause the nub thread to terminate and
2864 	// remove the debugger.
2865 	if (nubPort >= 0)
2866 		delete_port(nubPort);
2867 
2868 	finish_debugger_change(team);
2869 
2870 	// wait for the nub thread
2871 	if (nubThread >= 0)
2872 		wait_for_thread(nubThread, NULL);
2873 
2874 	return error;
2875 }
2876 
2877 
2878 status_t
2879 _user_debug_thread(thread_id threadID)
2880 {
2881 	TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n",
2882 		find_thread(NULL), threadID));
2883 
2884 	// get the thread
2885 	Thread* thread = Thread::GetAndLock(threadID);
2886 	if (thread == NULL)
2887 		return B_BAD_THREAD_ID;
2888 	BReference<Thread> threadReference(thread, true);
2889 	ThreadLocker threadLocker(thread, true);
2890 
2891 	// we can't debug the kernel team
2892 	if (thread->team == team_get_kernel_team())
2893 		return B_NOT_ALLOWED;
2894 
2895 	InterruptsLocker interruptsLocker;
2896 	SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2897 
2898 	// If the thread is already dying, it's too late to debug it.
2899 	if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0)
2900 		return B_BAD_THREAD_ID;
2901 
2902 	// don't debug the nub thread
2903 	if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0)
2904 		return B_NOT_ALLOWED;
2905 
2906 	// already marked stopped?
2907 	if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0)
2908 		return B_OK;
2909 
2910 	// set the flag that tells the thread to stop as soon as possible
2911 	atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
2912 
2913 	update_thread_user_debug_flag(thread);
2914 
2915 	// resume/interrupt the thread, if necessary
2916 	threadDebugInfoLocker.Unlock();
2917 	SpinLocker schedulerLocker(thread->scheduler_lock);
2918 
2919 	switch (thread->state) {
2920 		case B_THREAD_SUSPENDED:
2921 			// thread suspended: wake it up
2922 			scheduler_enqueue_in_run_queue(thread);
2923 			break;
2924 
2925 		default:
2926 			// thread may be waiting: interrupt it
2927 			thread_interrupt(thread, false);
2928 				// TODO: If the thread is already in the kernel and e.g.
2929 				// about to acquire a semaphore (before
2930 				// thread_prepare_to_block()), we won't interrupt it.
2931 				// Maybe we should rather send a signal (SIGTRAP).
2932 			schedulerLocker.Unlock();
2933 
2934 			schedulerLocker.SetTo(thread_get_current_thread()->scheduler_lock,
2935 				false);
2936 			scheduler_reschedule_if_necessary_locked();
2937 			break;
2938 	}
2939 
2940 	return B_OK;
2941 }
2942 
2943 
2944 void
2945 _user_wait_for_debugger(void)
2946 {
2947 	debug_thread_debugged message;
2948 	thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message,
2949 		sizeof(message), false);
2950 }
2951 
2952 
2953 status_t
2954 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length,
2955 	bool watchpoint)
2956 {
2957 	// check the address and size
2958 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
2959 		return B_BAD_ADDRESS;
2960 	if (watchpoint && length < 0)
2961 		return B_BAD_VALUE;
2962 
2963 	// check whether a debugger is installed already
2964 	team_debug_info teamDebugInfo;
2965 	get_team_debug_info(teamDebugInfo);
2966 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2967 		return B_BAD_VALUE;
2968 
2969 	// We can't help it, here's a small but relatively harmless race condition,
2970 	// since a debugger could be installed in the meantime. The worst case is
2971 	// that we install a break/watchpoint the debugger doesn't know about.
2972 
2973 	// set the break/watchpoint
2974 	status_t result;
2975 	if (watchpoint)
2976 		result = arch_set_watchpoint(address, type, length);
2977 	else
2978 		result = arch_set_breakpoint(address);
2979 
2980 	if (result == B_OK)
2981 		update_threads_breakpoints_flag();
2982 
2983 	return result;
2984 }
2985 
2986 
2987 status_t
2988 _user_clear_debugger_breakpoint(void *address, bool watchpoint)
2989 {
2990 	// check the address
2991 	if (address == NULL || !BreakpointManager::CanAccessAddress(address, false))
2992 		return B_BAD_ADDRESS;
2993 
2994 	// check whether a debugger is installed already
2995 	team_debug_info teamDebugInfo;
2996 	get_team_debug_info(teamDebugInfo);
2997 	if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2998 		return B_BAD_VALUE;
2999 
3000 	// We can't help it, here's a small but relatively harmless race condition,
3001 	// since a debugger could be installed in the meantime. The worst case is
3002 	// that we clear a break/watchpoint the debugger has just installed.
3003 
3004 	// clear the break/watchpoint
3005 	status_t result;
3006 	if (watchpoint)
3007 		result = arch_clear_watchpoint(address);
3008 	else
3009 		result = arch_clear_breakpoint(address);
3010 
3011 	if (result == B_OK)
3012 		update_threads_breakpoints_flag();
3013 
3014 	return result;
3015 }
3016