1 /* 2 * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2015, Rene Gollent, rene@gollent.com. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include <errno.h> 9 #include <signal.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <algorithm> 15 16 #include <arch/debug.h> 17 #include <arch/user_debugger.h> 18 #include <core_dump.h> 19 #include <cpu.h> 20 #include <debugger.h> 21 #include <kernel.h> 22 #include <KernelExport.h> 23 #include <kscheduler.h> 24 #include <ksignal.h> 25 #include <ksyscalls.h> 26 #include <port.h> 27 #include <sem.h> 28 #include <team.h> 29 #include <thread.h> 30 #include <thread_types.h> 31 #include <user_debugger.h> 32 #include <vm/vm.h> 33 #include <vm/vm_types.h> 34 35 #include <AutoDeleter.h> 36 #include <util/AutoLock.h> 37 38 #include "BreakpointManager.h" 39 40 41 //#define TRACE_USER_DEBUGGER 42 #ifdef TRACE_USER_DEBUGGER 43 # define TRACE(x) dprintf x 44 #else 45 # define TRACE(x) ; 46 #endif 47 48 49 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 50 // there's some potential for simplifications. E.g. clear_team_debug_info() and 51 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 52 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 53 54 55 static port_id sDefaultDebuggerPort = -1; 56 // accessed atomically 57 58 static timer sProfilingTimers[SMP_MAX_CPUS]; 59 // a profiling timer for each CPU -- used when a profiled thread is running 60 // on that CPU 61 62 63 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 64 static int32 profiling_event(timer* unused); 65 static status_t ensure_debugger_installed(); 66 static void get_team_debug_info(team_debug_info &teamDebugInfo); 67 68 69 static inline status_t 70 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 71 size_t bufferSize) 72 { 73 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 74 0); 75 } 76 77 78 static status_t 79 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 80 bool dontWait) 81 { 82 TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", " 83 "port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, " 84 "dontWait: %d\n", thread_get_current_thread()->id, 85 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 86 dontWait)); 87 88 status_t error = B_OK; 89 90 // get the team debug info 91 team_debug_info teamDebugInfo; 92 get_team_debug_info(teamDebugInfo); 93 sem_id writeLock = teamDebugInfo.debugger_write_lock; 94 95 // get the write lock 96 TRACE(("debugger_write(): acquiring write lock...\n")); 97 error = acquire_sem_etc(writeLock, 1, 98 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 99 if (error != B_OK) { 100 TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error)); 101 return error; 102 } 103 104 // re-get the team debug info 105 get_team_debug_info(teamDebugInfo); 106 107 if (teamDebugInfo.debugger_port != port 108 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 109 // The debugger has changed in the meantime or we are about to be 110 // handed over to a new debugger. In either case we don't send the 111 // message. 112 TRACE(("debugger_write(): %s\n", 113 (teamDebugInfo.debugger_port != port ? "debugger port changed" 114 : "handover flag set"))); 115 } else { 116 TRACE(("debugger_write(): writing to port...\n")); 117 118 error = write_port_etc(port, code, buffer, bufferSize, 119 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 120 } 121 122 // release the write lock 123 release_sem(writeLock); 124 125 TRACE(("debugger_write() done: %" B_PRIx32 "\n", error)); 126 127 return error; 128 } 129 130 131 /*! Updates the thread::flags field according to what user debugger flags are 132 set for the thread. 133 Interrupts must be disabled and the thread's debug info lock must be held. 134 */ 135 static void 136 update_thread_user_debug_flag(Thread* thread) 137 { 138 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 139 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 140 else 141 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 142 } 143 144 145 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 146 given thread. 147 Interrupts must be disabled and the thread debug info lock must be held. 148 */ 149 static void 150 update_thread_breakpoints_flag(Thread* thread) 151 { 152 Team* team = thread->team; 153 154 if (arch_has_breakpoints(&team->debug_info.arch_info)) 155 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 156 else 157 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 158 } 159 160 161 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 162 threads of the current team. 163 */ 164 static void 165 update_threads_breakpoints_flag() 166 { 167 Team* team = thread_get_current_thread()->team; 168 169 TeamLocker teamLocker(team); 170 171 Thread* thread = team->thread_list; 172 173 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 174 for (; thread != NULL; thread = thread->team_next) 175 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 176 } else { 177 for (; thread != NULL; thread = thread->team_next) 178 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 179 } 180 } 181 182 183 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 184 given thread, which must be the current thread. 185 */ 186 static void 187 update_thread_debugger_installed_flag(Thread* thread) 188 { 189 Team* team = thread->team; 190 191 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 192 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 193 else 194 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 195 } 196 197 198 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 199 threads of the given team. 200 The team's lock must be held. 201 */ 202 static void 203 update_threads_debugger_installed_flag(Team* team) 204 { 205 Thread* thread = team->thread_list; 206 207 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 208 for (; thread != NULL; thread = thread->team_next) 209 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 210 } else { 211 for (; thread != NULL; thread = thread->team_next) 212 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 213 } 214 } 215 216 217 /** 218 * For the first initialization the function must be called with \a initLock 219 * set to \c true. If it would be possible that another thread accesses the 220 * structure at the same time, `lock' must be held when calling the function. 221 */ 222 void 223 clear_team_debug_info(struct team_debug_info *info, bool initLock) 224 { 225 if (info) { 226 arch_clear_team_debug_info(&info->arch_info); 227 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 228 info->debugger_team = -1; 229 info->debugger_port = -1; 230 info->nub_thread = -1; 231 info->nub_port = -1; 232 info->debugger_write_lock = -1; 233 info->causing_thread = -1; 234 info->image_event = 0; 235 info->breakpoint_manager = NULL; 236 237 if (initLock) { 238 B_INITIALIZE_SPINLOCK(&info->lock); 239 info->debugger_changed_condition = NULL; 240 } 241 } 242 } 243 244 /** 245 * `lock' must not be held nor may interrupts be disabled. 246 * \a info must not be a member of a team struct (or the team struct must no 247 * longer be accessible, i.e. the team should already be removed). 248 * 249 * In case the team is still accessible, the procedure is: 250 * 1. get `lock' 251 * 2. copy the team debug info on stack 252 * 3. call clear_team_debug_info() on the team debug info 253 * 4. release `lock' 254 * 5. call destroy_team_debug_info() on the copied team debug info 255 */ 256 static void 257 destroy_team_debug_info(struct team_debug_info *info) 258 { 259 if (info) { 260 arch_destroy_team_debug_info(&info->arch_info); 261 262 // delete the breakpoint manager 263 delete info->breakpoint_manager ; 264 info->breakpoint_manager = NULL; 265 266 // delete the debugger port write lock 267 if (info->debugger_write_lock >= 0) { 268 delete_sem(info->debugger_write_lock); 269 info->debugger_write_lock = -1; 270 } 271 272 // delete the nub port 273 if (info->nub_port >= 0) { 274 set_port_owner(info->nub_port, B_CURRENT_TEAM); 275 delete_port(info->nub_port); 276 info->nub_port = -1; 277 } 278 279 // wait for the nub thread 280 if (info->nub_thread >= 0) { 281 if (info->nub_thread != thread_get_current_thread()->id) { 282 int32 result; 283 wait_for_thread(info->nub_thread, &result); 284 } 285 286 info->nub_thread = -1; 287 } 288 289 atomic_set(&info->flags, 0); 290 info->debugger_team = -1; 291 info->debugger_port = -1; 292 info->causing_thread = -1; 293 info->image_event = -1; 294 } 295 } 296 297 298 void 299 init_thread_debug_info(struct thread_debug_info *info) 300 { 301 if (info) { 302 B_INITIALIZE_SPINLOCK(&info->lock); 303 arch_clear_thread_debug_info(&info->arch_info); 304 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 305 info->debug_port = -1; 306 info->ignore_signals = 0; 307 info->ignore_signals_once = 0; 308 info->profile.sample_area = -1; 309 info->profile.samples = NULL; 310 info->profile.buffer_full = false; 311 info->profile.installed_timer = NULL; 312 } 313 } 314 315 316 /*! Clears the debug info for the current thread. 317 Invoked with thread debug info lock being held. 318 */ 319 void 320 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 321 { 322 if (info) { 323 // cancel profiling timer 324 if (info->profile.installed_timer != NULL) { 325 cancel_timer(info->profile.installed_timer); 326 info->profile.installed_timer = NULL; 327 } 328 329 arch_clear_thread_debug_info(&info->arch_info); 330 atomic_set(&info->flags, 331 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 332 info->debug_port = -1; 333 info->ignore_signals = 0; 334 info->ignore_signals_once = 0; 335 info->profile.sample_area = -1; 336 info->profile.samples = NULL; 337 info->profile.buffer_full = false; 338 } 339 } 340 341 342 void 343 destroy_thread_debug_info(struct thread_debug_info *info) 344 { 345 if (info) { 346 area_id sampleArea = info->profile.sample_area; 347 if (sampleArea >= 0) { 348 area_info areaInfo; 349 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 350 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 351 delete_area(sampleArea); 352 } 353 } 354 355 arch_destroy_thread_debug_info(&info->arch_info); 356 357 if (info->debug_port >= 0) { 358 delete_port(info->debug_port); 359 info->debug_port = -1; 360 } 361 362 info->ignore_signals = 0; 363 info->ignore_signals_once = 0; 364 365 atomic_set(&info->flags, 0); 366 } 367 } 368 369 370 static status_t 371 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 372 Team*& team) 373 { 374 // We look up the team by ID, even in case of the current team, so we can be 375 // sure, that the team is not already dying. 376 if (teamID == B_CURRENT_TEAM) 377 teamID = thread_get_current_thread()->team->id; 378 379 while (true) { 380 // get the team 381 team = Team::GetAndLock(teamID); 382 if (team == NULL) 383 return B_BAD_TEAM_ID; 384 BReference<Team> teamReference(team, true); 385 TeamLocker teamLocker(team, true); 386 387 // don't allow messing with the kernel team 388 if (team == team_get_kernel_team()) 389 return B_NOT_ALLOWED; 390 391 // check whether the condition is already set 392 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 393 394 if (team->debug_info.debugger_changed_condition == NULL) { 395 // nobody there yet -- set our condition variable and be done 396 team->debug_info.debugger_changed_condition = &condition; 397 return B_OK; 398 } 399 400 // we'll have to wait 401 ConditionVariableEntry entry; 402 team->debug_info.debugger_changed_condition->Add(&entry); 403 404 debugInfoLocker.Unlock(); 405 teamLocker.Unlock(); 406 407 entry.Wait(); 408 } 409 } 410 411 412 static void 413 prepare_debugger_change(Team* team, ConditionVariable& condition) 414 { 415 while (true) { 416 // check whether the condition is already set 417 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 418 419 if (team->debug_info.debugger_changed_condition == NULL) { 420 // nobody there yet -- set our condition variable and be done 421 team->debug_info.debugger_changed_condition = &condition; 422 return; 423 } 424 425 // we'll have to wait 426 ConditionVariableEntry entry; 427 team->debug_info.debugger_changed_condition->Add(&entry); 428 429 debugInfoLocker.Unlock(); 430 431 entry.Wait(); 432 } 433 } 434 435 436 static void 437 finish_debugger_change(Team* team) 438 { 439 // unset our condition variable and notify all threads waiting on it 440 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 441 442 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 443 team->debug_info.debugger_changed_condition = NULL; 444 445 condition->NotifyAll(); 446 } 447 448 449 void 450 user_debug_prepare_for_exec() 451 { 452 Thread *thread = thread_get_current_thread(); 453 Team *team = thread->team; 454 455 // If a debugger is installed for the team and the thread debug stuff 456 // initialized, change the ownership of the debug port for the thread 457 // to the kernel team, since exec_team() deletes all ports owned by this 458 // team. We change the ownership back later. 459 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 460 // get the port 461 port_id debugPort = -1; 462 463 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 464 465 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 466 debugPort = thread->debug_info.debug_port; 467 468 threadDebugInfoLocker.Unlock(); 469 470 // set the new port ownership 471 if (debugPort >= 0) 472 set_port_owner(debugPort, team_get_kernel_team_id()); 473 } 474 } 475 476 477 void 478 user_debug_finish_after_exec() 479 { 480 Thread *thread = thread_get_current_thread(); 481 Team *team = thread->team; 482 483 // If a debugger is installed for the team and the thread debug stuff 484 // initialized for this thread, change the ownership of its debug port 485 // back to this team. 486 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 487 // get the port 488 port_id debugPort = -1; 489 490 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 491 492 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 493 debugPort = thread->debug_info.debug_port; 494 495 threadDebugInfoLocker.Unlock(); 496 497 // set the new port ownership 498 if (debugPort >= 0) 499 set_port_owner(debugPort, team->id); 500 } 501 } 502 503 504 void 505 init_user_debug() 506 { 507 #ifdef ARCH_INIT_USER_DEBUG 508 ARCH_INIT_USER_DEBUG(); 509 #endif 510 } 511 512 513 static void 514 get_team_debug_info(team_debug_info &teamDebugInfo) 515 { 516 Thread *thread = thread_get_current_thread(); 517 518 cpu_status state = disable_interrupts(); 519 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 520 521 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 522 523 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 524 restore_interrupts(state); 525 } 526 527 528 static status_t 529 thread_hit_debug_event_internal(debug_debugger_message event, 530 const void *message, int32 size, bool requireDebugger, bool &restart) 531 { 532 restart = false; 533 Thread *thread = thread_get_current_thread(); 534 535 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32 536 ", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event, 537 message, size)); 538 539 // check, if there's a debug port already 540 bool setPort = !(atomic_get(&thread->debug_info.flags) 541 & B_THREAD_DEBUG_INITIALIZED); 542 543 // create a port, if there is none yet 544 port_id port = -1; 545 if (setPort) { 546 char nameBuffer[128]; 547 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32, 548 thread->id); 549 550 port = create_port(1, nameBuffer); 551 if (port < 0) { 552 dprintf("thread_hit_debug_event(): Failed to create debug port: " 553 "%s\n", strerror(port)); 554 return port; 555 } 556 } 557 558 // check the debug info structures once more: get the debugger port, set 559 // the thread's debug port, and update the thread's debug flags 560 port_id deletePort = port; 561 port_id debuggerPort = -1; 562 port_id nubPort = -1; 563 status_t error = B_OK; 564 cpu_status state = disable_interrupts(); 565 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 566 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 567 568 uint32 threadFlags = thread->debug_info.flags; 569 threadFlags &= ~B_THREAD_DEBUG_STOP; 570 bool debuggerInstalled 571 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 572 if (thread->id == thread->team->debug_info.nub_thread) { 573 // Ugh, we're the nub thread. We shouldn't be here. 574 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32 575 "\n", thread->id)); 576 577 error = B_ERROR; 578 } else if (debuggerInstalled || !requireDebugger) { 579 if (debuggerInstalled) { 580 debuggerPort = thread->team->debug_info.debugger_port; 581 nubPort = thread->team->debug_info.nub_port; 582 } 583 584 if (setPort) { 585 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 586 // someone created a port for us (the port we've created will 587 // be deleted below) 588 port = thread->debug_info.debug_port; 589 } else { 590 thread->debug_info.debug_port = port; 591 deletePort = -1; // keep the port 592 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 593 } 594 } else { 595 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 596 port = thread->debug_info.debug_port; 597 } else { 598 // someone deleted our port 599 error = B_ERROR; 600 } 601 } 602 } else 603 error = B_ERROR; 604 605 // update the flags 606 if (error == B_OK) 607 threadFlags |= B_THREAD_DEBUG_STOPPED; 608 atomic_set(&thread->debug_info.flags, threadFlags); 609 610 update_thread_user_debug_flag(thread); 611 612 threadDebugInfoLocker.Unlock(); 613 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 614 restore_interrupts(state); 615 616 // delete the superfluous port 617 if (deletePort >= 0) 618 delete_port(deletePort); 619 620 if (error != B_OK) { 621 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: " 622 "%" B_PRIx32 "\n", thread->id, error)); 623 return error; 624 } 625 626 // send a message to the debugger port 627 if (debuggerInstalled) { 628 // update the message's origin info first 629 debug_origin *origin = (debug_origin *)message; 630 origin->thread = thread->id; 631 origin->team = thread->team->id; 632 origin->nub_port = nubPort; 633 634 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending " 635 "message to debugger port %" B_PRId32 "\n", thread->id, 636 debuggerPort)); 637 638 error = debugger_write(debuggerPort, event, message, size, false); 639 } 640 641 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 642 bool singleStep = false; 643 644 if (error == B_OK) { 645 bool done = false; 646 while (!done) { 647 // read a command from the debug port 648 int32 command; 649 debugged_thread_message_data commandMessage; 650 ssize_t commandMessageSize = read_port_etc(port, &command, 651 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 652 0); 653 654 if (commandMessageSize < 0) { 655 error = commandMessageSize; 656 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed " 657 "to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n", 658 thread->id, port, error)); 659 break; 660 } 661 662 switch (command) { 663 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 664 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 665 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 666 thread->id)); 667 result = commandMessage.continue_thread.handle_event; 668 669 singleStep = commandMessage.continue_thread.single_step; 670 done = true; 671 break; 672 673 case B_DEBUGGED_THREAD_SET_CPU_STATE: 674 { 675 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 676 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 677 thread->id)); 678 arch_set_debug_cpu_state( 679 &commandMessage.set_cpu_state.cpu_state); 680 681 break; 682 } 683 684 case B_DEBUGGED_THREAD_GET_CPU_STATE: 685 { 686 port_id replyPort = commandMessage.get_cpu_state.reply_port; 687 688 // prepare the message 689 debug_nub_get_cpu_state_reply replyMessage; 690 replyMessage.error = B_OK; 691 replyMessage.message = event; 692 arch_get_debug_cpu_state(&replyMessage.cpu_state); 693 694 // send it 695 error = kill_interruptable_write_port(replyPort, event, 696 &replyMessage, sizeof(replyMessage)); 697 698 break; 699 } 700 701 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 702 { 703 // Check, if the debugger really changed, i.e. is different 704 // than the one we know. 705 team_debug_info teamDebugInfo; 706 get_team_debug_info(teamDebugInfo); 707 708 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 709 if (!debuggerInstalled 710 || teamDebugInfo.debugger_port != debuggerPort) { 711 // debugger was installed or has changed: restart 712 // this function 713 restart = true; 714 done = true; 715 } 716 } else { 717 if (debuggerInstalled) { 718 // debugger is gone: continue the thread normally 719 done = true; 720 } 721 } 722 723 break; 724 } 725 } 726 } 727 } else { 728 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send " 729 "message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n", 730 thread->id, debuggerPort, error)); 731 } 732 733 // update the thread debug info 734 bool destroyThreadInfo = false; 735 thread_debug_info threadDebugInfo; 736 737 state = disable_interrupts(); 738 threadDebugInfoLocker.Lock(); 739 740 // check, if the team is still being debugged 741 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 742 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 743 // update the single-step flag 744 if (singleStep) { 745 atomic_or(&thread->debug_info.flags, 746 B_THREAD_DEBUG_SINGLE_STEP); 747 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 748 } else { 749 atomic_and(&thread->debug_info.flags, 750 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 751 } 752 753 // unset the "stopped" state 754 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 755 756 update_thread_user_debug_flag(thread); 757 758 } else { 759 // the debugger is gone: cleanup our info completely 760 threadDebugInfo = thread->debug_info; 761 clear_thread_debug_info(&thread->debug_info, false); 762 destroyThreadInfo = true; 763 } 764 765 threadDebugInfoLocker.Unlock(); 766 restore_interrupts(state); 767 768 // enable/disable single stepping 769 arch_update_thread_single_step(); 770 771 if (destroyThreadInfo) 772 destroy_thread_debug_info(&threadDebugInfo); 773 774 return (error == B_OK ? result : error); 775 } 776 777 778 static status_t 779 thread_hit_debug_event(debug_debugger_message event, const void *message, 780 int32 size, bool requireDebugger) 781 { 782 status_t result; 783 bool restart; 784 do { 785 restart = false; 786 result = thread_hit_debug_event_internal(event, message, size, 787 requireDebugger, restart); 788 } while (result >= 0 && restart); 789 790 // Prepare to continue -- we install a debugger change condition, so no one 791 // will change the debugger while we're playing with the breakpoint manager. 792 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 793 Team* team = thread_get_current_thread()->team; 794 ConditionVariable debugChangeCondition; 795 prepare_debugger_change(team, debugChangeCondition); 796 797 if (team->debug_info.breakpoint_manager != NULL) { 798 bool isSyscall; 799 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 800 if (pc != NULL && !isSyscall) 801 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 802 } 803 804 finish_debugger_change(team); 805 806 return result; 807 } 808 809 810 static status_t 811 thread_hit_serious_debug_event(debug_debugger_message event, 812 const void *message, int32 messageSize) 813 { 814 // ensure that a debugger is installed for this team 815 status_t error = ensure_debugger_installed(); 816 if (error != B_OK) { 817 Thread *thread = thread_get_current_thread(); 818 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 819 "thread: %" B_PRId32 ": %s\n", thread->id, strerror(error)); 820 return error; 821 } 822 823 // enter the debug loop 824 return thread_hit_debug_event(event, message, messageSize, true); 825 } 826 827 828 void 829 user_debug_pre_syscall(uint32 syscall, void *args) 830 { 831 // check whether a debugger is installed 832 Thread *thread = thread_get_current_thread(); 833 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 834 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 835 return; 836 837 // check whether pre-syscall tracing is enabled for team or thread 838 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 839 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 840 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 841 return; 842 } 843 844 // prepare the message 845 debug_pre_syscall message; 846 message.syscall = syscall; 847 848 // copy the syscall args 849 if (syscall < (uint32)kSyscallCount) { 850 if (kSyscallInfos[syscall].parameter_size > 0) 851 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 852 } 853 854 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 855 sizeof(message), true); 856 } 857 858 859 void 860 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 861 bigtime_t startTime) 862 { 863 // check whether a debugger is installed 864 Thread *thread = thread_get_current_thread(); 865 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 866 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 867 return; 868 869 // check whether post-syscall tracing is enabled for team or thread 870 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 871 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 872 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 873 return; 874 } 875 876 // prepare the message 877 debug_post_syscall message; 878 message.start_time = startTime; 879 message.end_time = system_time(); 880 message.return_value = returnValue; 881 message.syscall = syscall; 882 883 // copy the syscall args 884 if (syscall < (uint32)kSyscallCount) { 885 if (kSyscallInfos[syscall].parameter_size > 0) 886 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 887 } 888 889 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 890 sizeof(message), true); 891 } 892 893 894 /** \brief To be called when an unhandled processor exception (error/fault) 895 * occurred. 896 * \param exception The debug_why_stopped value identifying the kind of fault. 897 * \param singal The signal corresponding to the exception. 898 * \return \c true, if the caller shall continue normally, i.e. usually send 899 * a deadly signal. \c false, if the debugger insists to continue the 900 * program (e.g. because it has solved the removed the cause of the 901 * problem). 902 */ 903 bool 904 user_debug_exception_occurred(debug_exception_type exception, int signal) 905 { 906 // First check whether there's a signal handler installed for the signal. 907 // If so, we don't want to install a debugger for the team. We always send 908 // the signal instead. An already installed debugger will be notified, if 909 // it has requested notifications of signal. 910 struct sigaction signalAction; 911 if (sigaction(signal, NULL, &signalAction) == 0 912 && signalAction.sa_handler != SIG_DFL) { 913 return true; 914 } 915 916 // prepare the message 917 debug_exception_occurred message; 918 message.exception = exception; 919 message.signal = signal; 920 921 status_t result = thread_hit_serious_debug_event( 922 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 923 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 924 } 925 926 927 bool 928 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly) 929 { 930 // check, if a debugger is installed and is interested in signals 931 Thread *thread = thread_get_current_thread(); 932 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 933 if (~teamDebugFlags 934 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 935 return true; 936 } 937 938 // prepare the message 939 debug_signal_received message; 940 message.signal = signal; 941 message.handler = *handler; 942 message.deadly = deadly; 943 944 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 945 &message, sizeof(message), true); 946 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 947 } 948 949 950 void 951 user_debug_stop_thread() 952 { 953 // check whether this is actually an emulated single-step notification 954 Thread* thread = thread_get_current_thread(); 955 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 956 957 bool singleStepped = false; 958 if ((atomic_and(&thread->debug_info.flags, 959 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 960 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 961 singleStepped = true; 962 } 963 964 threadDebugInfoLocker.Unlock(); 965 966 if (singleStepped) { 967 user_debug_single_stepped(); 968 } else { 969 debug_thread_debugged message; 970 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 971 &message, sizeof(message)); 972 } 973 } 974 975 976 void 977 user_debug_team_created(team_id teamID) 978 { 979 // check, if a debugger is installed and is interested in team creation 980 // events 981 Thread *thread = thread_get_current_thread(); 982 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 983 if (~teamDebugFlags 984 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 985 return; 986 } 987 988 // prepare the message 989 debug_team_created message; 990 message.new_team = teamID; 991 992 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 993 sizeof(message), true); 994 } 995 996 997 void 998 user_debug_team_deleted(team_id teamID, port_id debuggerPort) 999 { 1000 if (debuggerPort >= 0) { 1001 TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: " 1002 "%" B_PRId32 ")\n", teamID, debuggerPort)); 1003 1004 debug_team_deleted message; 1005 message.origin.thread = -1; 1006 message.origin.team = teamID; 1007 message.origin.nub_port = -1; 1008 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1009 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1010 } 1011 } 1012 1013 1014 void 1015 user_debug_team_exec() 1016 { 1017 // check, if a debugger is installed and is interested in team creation 1018 // events 1019 Thread *thread = thread_get_current_thread(); 1020 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1021 if (~teamDebugFlags 1022 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1023 return; 1024 } 1025 1026 // prepare the message 1027 debug_team_exec message; 1028 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1029 + 1; 1030 1031 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1032 sizeof(message), true); 1033 } 1034 1035 1036 /*! Called by a new userland thread to update the debugging related flags of 1037 \c Thread::flags before the thread first enters userland. 1038 \param thread The calling thread. 1039 */ 1040 void 1041 user_debug_update_new_thread_flags(Thread* thread) 1042 { 1043 // lock it and update it's flags 1044 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1045 1046 update_thread_user_debug_flag(thread); 1047 update_thread_breakpoints_flag(thread); 1048 update_thread_debugger_installed_flag(thread); 1049 } 1050 1051 1052 void 1053 user_debug_thread_created(thread_id threadID) 1054 { 1055 // check, if a debugger is installed and is interested in thread events 1056 Thread *thread = thread_get_current_thread(); 1057 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1058 if (~teamDebugFlags 1059 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1060 return; 1061 } 1062 1063 // prepare the message 1064 debug_thread_created message; 1065 message.new_thread = threadID; 1066 1067 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1068 sizeof(message), true); 1069 } 1070 1071 1072 void 1073 user_debug_thread_deleted(team_id teamID, thread_id threadID) 1074 { 1075 // Things are a bit complicated here, since this thread no longer belongs to 1076 // the debugged team (but to the kernel). So we can't use debugger_write(). 1077 1078 // get the team debug flags and debugger port 1079 Team* team = Team::Get(teamID); 1080 if (team == NULL) 1081 return; 1082 BReference<Team> teamReference(team, true); 1083 1084 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1085 1086 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1087 port_id debuggerPort = team->debug_info.debugger_port; 1088 sem_id writeLock = team->debug_info.debugger_write_lock; 1089 1090 debugInfoLocker.Unlock(); 1091 1092 // check, if a debugger is installed and is interested in thread events 1093 if (~teamDebugFlags 1094 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1095 return; 1096 } 1097 1098 // acquire the debugger write lock 1099 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1100 if (error != B_OK) 1101 return; 1102 1103 // re-get the team debug info -- we need to check whether anything changed 1104 debugInfoLocker.Lock(); 1105 1106 teamDebugFlags = atomic_get(&team->debug_info.flags); 1107 port_id newDebuggerPort = team->debug_info.debugger_port; 1108 1109 debugInfoLocker.Unlock(); 1110 1111 // Send the message only if the debugger hasn't changed in the meantime or 1112 // the team is about to be handed over. 1113 if (newDebuggerPort == debuggerPort 1114 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1115 debug_thread_deleted message; 1116 message.origin.thread = threadID; 1117 message.origin.team = teamID; 1118 message.origin.nub_port = -1; 1119 1120 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1121 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1122 } 1123 1124 // release the debugger write lock 1125 release_sem(writeLock); 1126 } 1127 1128 1129 /*! Called for a thread that is about to die, cleaning up all user debug 1130 facilities installed for the thread. 1131 \param thread The current thread, the one that is going to die. 1132 */ 1133 void 1134 user_debug_thread_exiting(Thread* thread) 1135 { 1136 // thread is the current thread, so using team is safe 1137 Team* team = thread->team; 1138 1139 InterruptsLocker interruptsLocker; 1140 1141 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1142 1143 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1144 port_id debuggerPort = team->debug_info.debugger_port; 1145 1146 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1147 1148 // check, if a debugger is installed 1149 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1150 || debuggerPort < 0) { 1151 return; 1152 } 1153 1154 // detach the profile info and mark the thread dying 1155 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1156 1157 thread_debug_info& threadDebugInfo = thread->debug_info; 1158 if (threadDebugInfo.profile.samples == NULL) 1159 return; 1160 1161 area_id sampleArea = threadDebugInfo.profile.sample_area; 1162 int32 sampleCount = threadDebugInfo.profile.sample_count; 1163 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1164 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1165 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1166 int32 imageEvent = threadDebugInfo.profile.image_event; 1167 threadDebugInfo.profile.sample_area = -1; 1168 threadDebugInfo.profile.samples = NULL; 1169 threadDebugInfo.profile.buffer_full = false; 1170 1171 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1172 1173 threadDebugInfoLocker.Unlock(); 1174 interruptsLocker.Unlock(); 1175 1176 // notify the debugger 1177 debug_profiler_update message; 1178 message.origin.thread = thread->id; 1179 message.origin.team = thread->team->id; 1180 message.origin.nub_port = -1; // asynchronous message 1181 message.sample_count = sampleCount; 1182 message.dropped_ticks = droppedTicks; 1183 message.stack_depth = stackDepth; 1184 message.variable_stack_depth = variableStackDepth; 1185 message.image_event = imageEvent; 1186 message.stopped = true; 1187 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1188 &message, sizeof(message), false); 1189 1190 if (sampleArea >= 0) { 1191 area_info areaInfo; 1192 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1193 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1194 delete_area(sampleArea); 1195 } 1196 } 1197 } 1198 1199 1200 void 1201 user_debug_image_created(const image_info *imageInfo) 1202 { 1203 // check, if a debugger is installed and is interested in image events 1204 Thread *thread = thread_get_current_thread(); 1205 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1206 if (~teamDebugFlags 1207 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1208 return; 1209 } 1210 1211 // prepare the message 1212 debug_image_created message; 1213 memcpy(&message.info, imageInfo, sizeof(image_info)); 1214 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1215 + 1; 1216 1217 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1218 sizeof(message), true); 1219 } 1220 1221 1222 void 1223 user_debug_image_deleted(const image_info *imageInfo) 1224 { 1225 // check, if a debugger is installed and is interested in image events 1226 Thread *thread = thread_get_current_thread(); 1227 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1228 if (~teamDebugFlags 1229 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1230 return; 1231 } 1232 1233 // prepare the message 1234 debug_image_deleted message; 1235 memcpy(&message.info, imageInfo, sizeof(image_info)); 1236 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1237 + 1; 1238 1239 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message, 1240 sizeof(message), true); 1241 } 1242 1243 1244 void 1245 user_debug_breakpoint_hit(bool software) 1246 { 1247 // prepare the message 1248 debug_breakpoint_hit message; 1249 arch_get_debug_cpu_state(&message.cpu_state); 1250 1251 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1252 sizeof(message)); 1253 } 1254 1255 1256 void 1257 user_debug_watchpoint_hit() 1258 { 1259 // prepare the message 1260 debug_watchpoint_hit message; 1261 arch_get_debug_cpu_state(&message.cpu_state); 1262 1263 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1264 sizeof(message)); 1265 } 1266 1267 1268 void 1269 user_debug_single_stepped() 1270 { 1271 // clear the single-step thread flag 1272 Thread* thread = thread_get_current_thread(); 1273 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1274 1275 // prepare the message 1276 debug_single_step message; 1277 arch_get_debug_cpu_state(&message.cpu_state); 1278 1279 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1280 sizeof(message)); 1281 } 1282 1283 1284 /*! Schedules the profiling timer for the current thread. 1285 The caller must hold the thread's debug info lock. 1286 \param thread The current thread. 1287 \param interval The time after which the timer should fire. 1288 */ 1289 static void 1290 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1291 { 1292 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1293 thread->debug_info.profile.installed_timer = timer; 1294 thread->debug_info.profile.timer_end = system_time() + interval; 1295 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1296 } 1297 1298 1299 /*! Samples the current thread's instruction pointer/stack trace. 1300 The caller must hold the current thread's debug info lock. 1301 \param flushBuffer Return parameter: Set to \c true when the sampling 1302 buffer must be flushed. 1303 */ 1304 static bool 1305 profiling_do_sample(bool& flushBuffer) 1306 { 1307 Thread* thread = thread_get_current_thread(); 1308 thread_debug_info& debugInfo = thread->debug_info; 1309 1310 if (debugInfo.profile.samples == NULL) 1311 return false; 1312 1313 // Check, whether the buffer is full or an image event occurred since the 1314 // last sample was taken. 1315 int32 maxSamples = debugInfo.profile.max_samples; 1316 int32 sampleCount = debugInfo.profile.sample_count; 1317 int32 stackDepth = debugInfo.profile.stack_depth; 1318 int32 imageEvent = thread->team->debug_info.image_event; 1319 if (debugInfo.profile.sample_count > 0) { 1320 if (debugInfo.profile.last_image_event < imageEvent 1321 && debugInfo.profile.variable_stack_depth 1322 && sampleCount + 2 <= maxSamples) { 1323 // an image event occurred, but we use variable stack depth and 1324 // have enough room in the buffer to indicate an image event 1325 addr_t* event = debugInfo.profile.samples + sampleCount; 1326 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1327 event[1] = imageEvent; 1328 sampleCount += 2; 1329 debugInfo.profile.sample_count = sampleCount; 1330 debugInfo.profile.last_image_event = imageEvent; 1331 } 1332 1333 if (debugInfo.profile.last_image_event < imageEvent 1334 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1335 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1336 flushBuffer = true; 1337 return true; 1338 } 1339 1340 // We can't flush the buffer now, since we interrupted a kernel 1341 // function. If the buffer is not full yet, we add the samples, 1342 // otherwise we have to drop them. 1343 if (maxSamples - sampleCount < stackDepth) { 1344 debugInfo.profile.dropped_ticks++; 1345 return true; 1346 } 1347 } 1348 } else { 1349 // first sample -- set the image event 1350 debugInfo.profile.image_event = imageEvent; 1351 debugInfo.profile.last_image_event = imageEvent; 1352 } 1353 1354 // get the samples 1355 addr_t* returnAddresses = debugInfo.profile.samples 1356 + debugInfo.profile.sample_count; 1357 if (debugInfo.profile.variable_stack_depth) { 1358 // variable sample count per hit 1359 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1360 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1361 1362 debugInfo.profile.sample_count += *returnAddresses + 1; 1363 } else { 1364 // fixed sample count per hit 1365 if (stackDepth > 1) { 1366 int32 count = arch_debug_get_stack_trace(returnAddresses, 1367 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1368 1369 for (int32 i = count; i < stackDepth; i++) 1370 returnAddresses[i] = 0; 1371 } else 1372 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1373 1374 debugInfo.profile.sample_count += stackDepth; 1375 } 1376 1377 return true; 1378 } 1379 1380 1381 static void 1382 profiling_buffer_full(void*) 1383 { 1384 // It is undefined whether the function is called with interrupts enabled 1385 // or disabled. We are allowed to enable interrupts, though. First make 1386 // sure interrupts are disabled. 1387 disable_interrupts(); 1388 1389 Thread* thread = thread_get_current_thread(); 1390 thread_debug_info& debugInfo = thread->debug_info; 1391 1392 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1393 1394 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1395 int32 sampleCount = debugInfo.profile.sample_count; 1396 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1397 int32 stackDepth = debugInfo.profile.stack_depth; 1398 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1399 int32 imageEvent = debugInfo.profile.image_event; 1400 1401 // notify the debugger 1402 debugInfo.profile.sample_count = 0; 1403 debugInfo.profile.dropped_ticks = 0; 1404 1405 threadDebugInfoLocker.Unlock(); 1406 enable_interrupts(); 1407 1408 // prepare the message 1409 debug_profiler_update message; 1410 message.sample_count = sampleCount; 1411 message.dropped_ticks = droppedTicks; 1412 message.stack_depth = stackDepth; 1413 message.variable_stack_depth = variableStackDepth; 1414 message.image_event = imageEvent; 1415 message.stopped = false; 1416 1417 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1418 sizeof(message), false); 1419 1420 disable_interrupts(); 1421 threadDebugInfoLocker.Lock(); 1422 1423 // do the sampling and reschedule timer, if still profiling this thread 1424 bool flushBuffer; 1425 if (profiling_do_sample(flushBuffer)) { 1426 debugInfo.profile.buffer_full = false; 1427 schedule_profiling_timer(thread, debugInfo.profile.interval); 1428 } 1429 } 1430 1431 threadDebugInfoLocker.Unlock(); 1432 enable_interrupts(); 1433 } 1434 1435 1436 /*! Profiling timer event callback. 1437 Called with interrupts disabled. 1438 */ 1439 static int32 1440 profiling_event(timer* /*unused*/) 1441 { 1442 Thread* thread = thread_get_current_thread(); 1443 thread_debug_info& debugInfo = thread->debug_info; 1444 1445 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1446 1447 bool flushBuffer = false; 1448 if (profiling_do_sample(flushBuffer)) { 1449 if (flushBuffer) { 1450 // The sample buffer needs to be flushed; we'll have to notify the 1451 // debugger. We can't do that right here. Instead we set a post 1452 // interrupt callback doing that for us, and don't reschedule the 1453 // timer yet. 1454 thread->post_interrupt_callback = profiling_buffer_full; 1455 debugInfo.profile.installed_timer = NULL; 1456 debugInfo.profile.buffer_full = true; 1457 } else 1458 schedule_profiling_timer(thread, debugInfo.profile.interval); 1459 } else 1460 debugInfo.profile.installed_timer = NULL; 1461 1462 return B_HANDLED_INTERRUPT; 1463 } 1464 1465 1466 /*! Called by the scheduler when a debugged thread has been unscheduled. 1467 The scheduler lock is being held. 1468 */ 1469 void 1470 user_debug_thread_unscheduled(Thread* thread) 1471 { 1472 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1473 1474 // if running, cancel the profiling timer 1475 struct timer* timer = thread->debug_info.profile.installed_timer; 1476 if (timer != NULL) { 1477 // track remaining time 1478 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1479 thread->debug_info.profile.interval_left = max_c(left, 0); 1480 thread->debug_info.profile.installed_timer = NULL; 1481 1482 // cancel timer 1483 threadDebugInfoLocker.Unlock(); 1484 // not necessary, but doesn't harm and reduces contention 1485 cancel_timer(timer); 1486 // since invoked on the same CPU, this will not possibly wait for 1487 // an already called timer hook 1488 } 1489 } 1490 1491 1492 /*! Called by the scheduler when a debugged thread has been scheduled. 1493 The scheduler lock is being held. 1494 */ 1495 void 1496 user_debug_thread_scheduled(Thread* thread) 1497 { 1498 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1499 1500 if (thread->debug_info.profile.samples != NULL 1501 && !thread->debug_info.profile.buffer_full) { 1502 // install profiling timer 1503 schedule_profiling_timer(thread, 1504 thread->debug_info.profile.interval_left); 1505 } 1506 } 1507 1508 1509 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1510 all threads of the team that are initialized for debugging (and 1511 thus have a debug port). 1512 */ 1513 static void 1514 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1515 const void *message, int32 size) 1516 { 1517 // iterate through the threads 1518 thread_info threadInfo; 1519 int32 cookie = 0; 1520 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1521 == B_OK) { 1522 // get the thread and lock it 1523 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1524 if (thread == NULL) 1525 continue; 1526 1527 BReference<Thread> threadReference(thread, true); 1528 ThreadLocker threadLocker(thread, true); 1529 1530 // get the thread's debug port 1531 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1532 1533 port_id threadDebugPort = -1; 1534 if (thread && thread != nubThread && thread->team == nubThread->team 1535 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1536 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1537 threadDebugPort = thread->debug_info.debug_port; 1538 } 1539 1540 threadDebugInfoLocker.Unlock(); 1541 threadLocker.Unlock(); 1542 1543 // send the message to the thread 1544 if (threadDebugPort >= 0) { 1545 status_t error = kill_interruptable_write_port(threadDebugPort, 1546 code, message, size); 1547 if (error != B_OK) { 1548 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1549 "message to thread %" B_PRId32 ": %" B_PRIx32 "\n", 1550 thread->id, error)); 1551 } 1552 } 1553 } 1554 } 1555 1556 1557 static void 1558 nub_thread_cleanup(Thread *nubThread) 1559 { 1560 TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n", 1561 nubThread->id, nubThread->team->debug_info.debugger_port)); 1562 1563 ConditionVariable debugChangeCondition; 1564 prepare_debugger_change(nubThread->team, debugChangeCondition); 1565 1566 team_debug_info teamDebugInfo; 1567 bool destroyDebugInfo = false; 1568 1569 TeamLocker teamLocker(nubThread->team); 1570 // required by update_threads_debugger_installed_flag() 1571 1572 cpu_status state = disable_interrupts(); 1573 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1574 1575 team_debug_info &info = nubThread->team->debug_info; 1576 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1577 && info.nub_thread == nubThread->id) { 1578 teamDebugInfo = info; 1579 clear_team_debug_info(&info, false); 1580 destroyDebugInfo = true; 1581 } 1582 1583 // update the thread::flags fields 1584 update_threads_debugger_installed_flag(nubThread->team); 1585 1586 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1587 restore_interrupts(state); 1588 1589 teamLocker.Unlock(); 1590 1591 if (destroyDebugInfo) 1592 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1593 1594 finish_debugger_change(nubThread->team); 1595 1596 if (destroyDebugInfo) 1597 destroy_team_debug_info(&teamDebugInfo); 1598 1599 // notify all threads that the debugger is gone 1600 broadcast_debugged_thread_message(nubThread, 1601 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1602 } 1603 1604 1605 /** \brief Debug nub thread helper function that returns the debug port of 1606 * a thread of the same team. 1607 */ 1608 static status_t 1609 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1610 thread_id threadID, port_id &threadDebugPort) 1611 { 1612 threadDebugPort = -1; 1613 1614 // get the thread 1615 Thread* thread = Thread::GetAndLock(threadID); 1616 if (thread == NULL) 1617 return B_BAD_THREAD_ID; 1618 BReference<Thread> threadReference(thread, true); 1619 ThreadLocker threadLocker(thread, true); 1620 1621 // get the debug port 1622 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1623 1624 if (thread->team != nubThread->team) 1625 return B_BAD_VALUE; 1626 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1627 return B_BAD_THREAD_STATE; 1628 1629 threadDebugPort = thread->debug_info.debug_port; 1630 1631 threadDebugInfoLocker.Unlock(); 1632 1633 if (threadDebugPort < 0) 1634 return B_ERROR; 1635 1636 return B_OK; 1637 } 1638 1639 1640 static status_t 1641 debug_nub_thread(void *) 1642 { 1643 Thread *nubThread = thread_get_current_thread(); 1644 1645 // check, if we're still the current nub thread and get our port 1646 cpu_status state = disable_interrupts(); 1647 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1648 1649 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1650 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1651 restore_interrupts(state); 1652 return 0; 1653 } 1654 1655 port_id port = nubThread->team->debug_info.nub_port; 1656 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1657 BreakpointManager* breakpointManager 1658 = nubThread->team->debug_info.breakpoint_manager; 1659 1660 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1661 restore_interrupts(state); 1662 1663 TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub " 1664 "port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port)); 1665 1666 // notify all threads that a debugger has been installed 1667 broadcast_debugged_thread_message(nubThread, 1668 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1669 1670 // command processing loop 1671 while (true) { 1672 int32 command; 1673 debug_nub_message_data message; 1674 ssize_t messageSize = read_port_etc(port, &command, &message, 1675 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1676 1677 if (messageSize < 0) { 1678 // The port is no longer valid or we were interrupted by a kill 1679 // signal: If we are still listed in the team's debug info as nub 1680 // thread, we need to update that. 1681 nub_thread_cleanup(nubThread); 1682 1683 TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n", 1684 nubThread->id, messageSize)); 1685 1686 return messageSize; 1687 } 1688 1689 bool sendReply = false; 1690 union { 1691 debug_nub_read_memory_reply read_memory; 1692 debug_nub_write_memory_reply write_memory; 1693 debug_nub_get_cpu_state_reply get_cpu_state; 1694 debug_nub_set_breakpoint_reply set_breakpoint; 1695 debug_nub_set_watchpoint_reply set_watchpoint; 1696 debug_nub_get_signal_masks_reply get_signal_masks; 1697 debug_nub_get_signal_handler_reply get_signal_handler; 1698 debug_nub_start_profiler_reply start_profiler; 1699 debug_profiler_update profiler_update; 1700 debug_nub_write_core_file_reply write_core_file; 1701 } reply; 1702 int32 replySize = 0; 1703 port_id replyPort = -1; 1704 1705 // process the command 1706 switch (command) { 1707 case B_DEBUG_MESSAGE_READ_MEMORY: 1708 { 1709 // get the parameters 1710 replyPort = message.read_memory.reply_port; 1711 void *address = message.read_memory.address; 1712 int32 size = message.read_memory.size; 1713 status_t result = B_OK; 1714 1715 // check the parameters 1716 if (!BreakpointManager::CanAccessAddress(address, false)) 1717 result = B_BAD_ADDRESS; 1718 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1719 result = B_BAD_VALUE; 1720 1721 // read the memory 1722 size_t bytesRead = 0; 1723 if (result == B_OK) { 1724 result = breakpointManager->ReadMemory(address, 1725 reply.read_memory.data, size, bytesRead); 1726 } 1727 reply.read_memory.error = result; 1728 1729 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: " 1730 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1731 ", result: %" B_PRIx32 ", read: %ld\n", nubThread->id, 1732 replyPort, address, size, result, bytesRead)); 1733 1734 // send only as much data as necessary 1735 reply.read_memory.size = bytesRead; 1736 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1737 sendReply = true; 1738 break; 1739 } 1740 1741 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1742 { 1743 // get the parameters 1744 replyPort = message.write_memory.reply_port; 1745 void *address = message.write_memory.address; 1746 int32 size = message.write_memory.size; 1747 const char *data = message.write_memory.data; 1748 int32 realSize = (char*)&message + messageSize - data; 1749 status_t result = B_OK; 1750 1751 // check the parameters 1752 if (!BreakpointManager::CanAccessAddress(address, true)) 1753 result = B_BAD_ADDRESS; 1754 else if (size <= 0 || size > realSize) 1755 result = B_BAD_VALUE; 1756 1757 // write the memory 1758 size_t bytesWritten = 0; 1759 if (result == B_OK) { 1760 result = breakpointManager->WriteMemory(address, data, size, 1761 bytesWritten); 1762 } 1763 reply.write_memory.error = result; 1764 1765 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: " 1766 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1767 ", result: %" B_PRIx32 ", written: %ld\n", nubThread->id, 1768 replyPort, address, size, result, bytesWritten)); 1769 1770 reply.write_memory.size = bytesWritten; 1771 sendReply = true; 1772 replySize = sizeof(debug_nub_write_memory_reply); 1773 break; 1774 } 1775 1776 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1777 { 1778 // get the parameters 1779 int32 flags = message.set_team_flags.flags 1780 & B_TEAM_DEBUG_USER_FLAG_MASK; 1781 1782 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS" 1783 ": flags: %" B_PRIx32 "\n", nubThread->id, flags)); 1784 1785 Team *team = thread_get_current_thread()->team; 1786 1787 // set the flags 1788 cpu_status state = disable_interrupts(); 1789 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1790 1791 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1792 atomic_set(&team->debug_info.flags, flags); 1793 1794 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1795 restore_interrupts(state); 1796 1797 break; 1798 } 1799 1800 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1801 { 1802 // get the parameters 1803 thread_id threadID = message.set_thread_flags.thread; 1804 int32 flags = message.set_thread_flags.flags 1805 & B_THREAD_DEBUG_USER_FLAG_MASK; 1806 1807 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS" 1808 ": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n", 1809 nubThread->id, threadID, flags)); 1810 1811 // set the flags 1812 Thread* thread = Thread::GetAndLock(threadID); 1813 if (thread == NULL) 1814 break; 1815 BReference<Thread> threadReference(thread, true); 1816 ThreadLocker threadLocker(thread, true); 1817 1818 InterruptsSpinLocker threadDebugInfoLocker( 1819 thread->debug_info.lock); 1820 1821 if (thread->team == thread_get_current_thread()->team) { 1822 flags |= thread->debug_info.flags 1823 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1824 atomic_set(&thread->debug_info.flags, flags); 1825 } 1826 1827 break; 1828 } 1829 1830 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1831 { 1832 // get the parameters 1833 thread_id threadID; 1834 uint32 handleEvent; 1835 bool singleStep; 1836 1837 threadID = message.continue_thread.thread; 1838 handleEvent = message.continue_thread.handle_event; 1839 singleStep = message.continue_thread.single_step; 1840 1841 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD" 1842 ": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", " 1843 "single step: %d\n", nubThread->id, threadID, handleEvent, 1844 singleStep)); 1845 1846 // find the thread and get its debug port 1847 port_id threadDebugPort = -1; 1848 status_t result = debug_nub_thread_get_thread_debug_port( 1849 nubThread, threadID, threadDebugPort); 1850 1851 // send a message to the debugged thread 1852 if (result == B_OK) { 1853 debugged_thread_continue commandMessage; 1854 commandMessage.handle_event = handleEvent; 1855 commandMessage.single_step = singleStep; 1856 1857 result = write_port(threadDebugPort, 1858 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1859 &commandMessage, sizeof(commandMessage)); 1860 } else if (result == B_BAD_THREAD_STATE) { 1861 Thread* thread = Thread::GetAndLock(threadID); 1862 if (thread == NULL) 1863 break; 1864 1865 BReference<Thread> threadReference(thread, true); 1866 ThreadLocker threadLocker(thread, true); 1867 if (thread->state == B_THREAD_SUSPENDED) { 1868 threadLocker.Unlock(); 1869 resume_thread(threadID); 1870 break; 1871 } 1872 } 1873 1874 break; 1875 } 1876 1877 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1878 { 1879 // get the parameters 1880 thread_id threadID = message.set_cpu_state.thread; 1881 const debug_cpu_state &cpuState 1882 = message.set_cpu_state.cpu_state; 1883 1884 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE" 1885 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1886 1887 // find the thread and get its debug port 1888 port_id threadDebugPort = -1; 1889 status_t result = debug_nub_thread_get_thread_debug_port( 1890 nubThread, threadID, threadDebugPort); 1891 1892 // send a message to the debugged thread 1893 if (result == B_OK) { 1894 debugged_thread_set_cpu_state commandMessage; 1895 memcpy(&commandMessage.cpu_state, &cpuState, 1896 sizeof(debug_cpu_state)); 1897 write_port(threadDebugPort, 1898 B_DEBUGGED_THREAD_SET_CPU_STATE, 1899 &commandMessage, sizeof(commandMessage)); 1900 } 1901 1902 break; 1903 } 1904 1905 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1906 { 1907 // get the parameters 1908 thread_id threadID = message.get_cpu_state.thread; 1909 replyPort = message.get_cpu_state.reply_port; 1910 1911 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE" 1912 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1913 1914 // find the thread and get its debug port 1915 port_id threadDebugPort = -1; 1916 status_t result = debug_nub_thread_get_thread_debug_port( 1917 nubThread, threadID, threadDebugPort); 1918 1919 // send a message to the debugged thread 1920 if (threadDebugPort >= 0) { 1921 debugged_thread_get_cpu_state commandMessage; 1922 commandMessage.reply_port = replyPort; 1923 result = write_port(threadDebugPort, 1924 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1925 sizeof(commandMessage)); 1926 } 1927 1928 // send a reply to the debugger in case of error 1929 if (result != B_OK) { 1930 reply.get_cpu_state.error = result; 1931 sendReply = true; 1932 replySize = sizeof(reply.get_cpu_state); 1933 } 1934 1935 break; 1936 } 1937 1938 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1939 { 1940 // get the parameters 1941 replyPort = message.set_breakpoint.reply_port; 1942 void *address = message.set_breakpoint.address; 1943 1944 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT" 1945 ": address: %p\n", nubThread->id, address)); 1946 1947 // check the address 1948 status_t result = B_OK; 1949 if (address == NULL 1950 || !BreakpointManager::CanAccessAddress(address, false)) { 1951 result = B_BAD_ADDRESS; 1952 } 1953 1954 // set the breakpoint 1955 if (result == B_OK) 1956 result = breakpointManager->InstallBreakpoint(address); 1957 1958 if (result == B_OK) 1959 update_threads_breakpoints_flag(); 1960 1961 // prepare the reply 1962 reply.set_breakpoint.error = result; 1963 replySize = sizeof(reply.set_breakpoint); 1964 sendReply = true; 1965 1966 break; 1967 } 1968 1969 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1970 { 1971 // get the parameters 1972 void *address = message.clear_breakpoint.address; 1973 1974 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT" 1975 ": address: %p\n", nubThread->id, address)); 1976 1977 // check the address 1978 status_t result = B_OK; 1979 if (address == NULL 1980 || !BreakpointManager::CanAccessAddress(address, false)) { 1981 result = B_BAD_ADDRESS; 1982 } 1983 1984 // clear the breakpoint 1985 if (result == B_OK) 1986 result = breakpointManager->UninstallBreakpoint(address); 1987 1988 if (result == B_OK) 1989 update_threads_breakpoints_flag(); 1990 1991 break; 1992 } 1993 1994 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 1995 { 1996 // get the parameters 1997 replyPort = message.set_watchpoint.reply_port; 1998 void *address = message.set_watchpoint.address; 1999 uint32 type = message.set_watchpoint.type; 2000 int32 length = message.set_watchpoint.length; 2001 2002 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT" 2003 ": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n", 2004 nubThread->id, address, type, length)); 2005 2006 // check the address and size 2007 status_t result = B_OK; 2008 if (address == NULL 2009 || !BreakpointManager::CanAccessAddress(address, false)) { 2010 result = B_BAD_ADDRESS; 2011 } 2012 if (length < 0) 2013 result = B_BAD_VALUE; 2014 2015 // set the watchpoint 2016 if (result == B_OK) { 2017 result = breakpointManager->InstallWatchpoint(address, type, 2018 length); 2019 } 2020 2021 if (result == B_OK) 2022 update_threads_breakpoints_flag(); 2023 2024 // prepare the reply 2025 reply.set_watchpoint.error = result; 2026 replySize = sizeof(reply.set_watchpoint); 2027 sendReply = true; 2028 2029 break; 2030 } 2031 2032 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2033 { 2034 // get the parameters 2035 void *address = message.clear_watchpoint.address; 2036 2037 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT" 2038 ": address: %p\n", nubThread->id, address)); 2039 2040 // check the address 2041 status_t result = B_OK; 2042 if (address == NULL 2043 || !BreakpointManager::CanAccessAddress(address, false)) { 2044 result = B_BAD_ADDRESS; 2045 } 2046 2047 // clear the watchpoint 2048 if (result == B_OK) 2049 result = breakpointManager->UninstallWatchpoint(address); 2050 2051 if (result == B_OK) 2052 update_threads_breakpoints_flag(); 2053 2054 break; 2055 } 2056 2057 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2058 { 2059 // get the parameters 2060 thread_id threadID = message.set_signal_masks.thread; 2061 uint64 ignore = message.set_signal_masks.ignore_mask; 2062 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2063 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2064 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2065 2066 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS" 2067 ": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %" 2068 B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32 2069 ")\n", nubThread->id, threadID, ignore, ignoreOp, 2070 ignoreOnce, ignoreOnceOp)); 2071 2072 // set the masks 2073 Thread* thread = Thread::GetAndLock(threadID); 2074 if (thread == NULL) 2075 break; 2076 BReference<Thread> threadReference(thread, true); 2077 ThreadLocker threadLocker(thread, true); 2078 2079 InterruptsSpinLocker threadDebugInfoLocker( 2080 thread->debug_info.lock); 2081 2082 if (thread->team == thread_get_current_thread()->team) { 2083 thread_debug_info &threadDebugInfo = thread->debug_info; 2084 // set ignore mask 2085 switch (ignoreOp) { 2086 case B_DEBUG_SIGNAL_MASK_AND: 2087 threadDebugInfo.ignore_signals &= ignore; 2088 break; 2089 case B_DEBUG_SIGNAL_MASK_OR: 2090 threadDebugInfo.ignore_signals |= ignore; 2091 break; 2092 case B_DEBUG_SIGNAL_MASK_SET: 2093 threadDebugInfo.ignore_signals = ignore; 2094 break; 2095 } 2096 2097 // set ignore once mask 2098 switch (ignoreOnceOp) { 2099 case B_DEBUG_SIGNAL_MASK_AND: 2100 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2101 break; 2102 case B_DEBUG_SIGNAL_MASK_OR: 2103 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2104 break; 2105 case B_DEBUG_SIGNAL_MASK_SET: 2106 threadDebugInfo.ignore_signals_once = ignoreOnce; 2107 break; 2108 } 2109 } 2110 2111 break; 2112 } 2113 2114 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2115 { 2116 // get the parameters 2117 replyPort = message.get_signal_masks.reply_port; 2118 thread_id threadID = message.get_signal_masks.thread; 2119 status_t result = B_OK; 2120 2121 // get the masks 2122 uint64 ignore = 0; 2123 uint64 ignoreOnce = 0; 2124 2125 Thread* thread = Thread::GetAndLock(threadID); 2126 if (thread != NULL) { 2127 BReference<Thread> threadReference(thread, true); 2128 ThreadLocker threadLocker(thread, true); 2129 2130 InterruptsSpinLocker threadDebugInfoLocker( 2131 thread->debug_info.lock); 2132 2133 ignore = thread->debug_info.ignore_signals; 2134 ignoreOnce = thread->debug_info.ignore_signals_once; 2135 } else 2136 result = B_BAD_THREAD_ID; 2137 2138 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS" 2139 ": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", " 2140 "ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: " 2141 "%" B_PRIx32 "\n", nubThread->id, replyPort, threadID, 2142 ignore, ignoreOnce, result)); 2143 2144 // prepare the message 2145 reply.get_signal_masks.error = result; 2146 reply.get_signal_masks.ignore_mask = ignore; 2147 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2148 replySize = sizeof(reply.get_signal_masks); 2149 sendReply = true; 2150 break; 2151 } 2152 2153 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2154 { 2155 // get the parameters 2156 int signal = message.set_signal_handler.signal; 2157 struct sigaction &handler = message.set_signal_handler.handler; 2158 2159 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER" 2160 ": signal: %d, handler: %p\n", nubThread->id, signal, 2161 handler.sa_handler)); 2162 2163 // set the handler 2164 sigaction(signal, &handler, NULL); 2165 2166 break; 2167 } 2168 2169 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2170 { 2171 // get the parameters 2172 replyPort = message.get_signal_handler.reply_port; 2173 int signal = message.get_signal_handler.signal; 2174 status_t result = B_OK; 2175 2176 // get the handler 2177 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2178 != 0) { 2179 result = errno; 2180 } 2181 2182 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER" 2183 ": reply port: %" B_PRId32 ", signal: %d, handler: %p\n", 2184 nubThread->id, replyPort, signal, 2185 reply.get_signal_handler.handler.sa_handler)); 2186 2187 // prepare the message 2188 reply.get_signal_handler.error = result; 2189 replySize = sizeof(reply.get_signal_handler); 2190 sendReply = true; 2191 break; 2192 } 2193 2194 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2195 { 2196 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER" 2197 "\n", nubThread->id)); 2198 2199 Team *team = nubThread->team; 2200 2201 // Acquire the debugger write lock. As soon as we have it and 2202 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2203 // will write anything to the debugger port anymore. 2204 status_t result = acquire_sem_etc(writeLock, 1, 2205 B_KILL_CAN_INTERRUPT, 0); 2206 if (result == B_OK) { 2207 // set the respective team debug flag 2208 cpu_status state = disable_interrupts(); 2209 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2210 2211 atomic_or(&team->debug_info.flags, 2212 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2213 BreakpointManager* breakpointManager 2214 = team->debug_info.breakpoint_manager; 2215 2216 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2217 restore_interrupts(state); 2218 2219 // remove all installed breakpoints 2220 breakpointManager->RemoveAllBreakpoints(); 2221 2222 release_sem(writeLock); 2223 } else { 2224 // We probably got a SIGKILL. If so, we will terminate when 2225 // reading the next message fails. 2226 } 2227 2228 break; 2229 } 2230 2231 case B_DEBUG_MESSAGE_HANDED_OVER: 2232 { 2233 // notify all threads that the debugger has changed 2234 broadcast_debugged_thread_message(nubThread, 2235 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2236 2237 break; 2238 } 2239 2240 case B_DEBUG_START_PROFILER: 2241 { 2242 // get the parameters 2243 thread_id threadID = message.start_profiler.thread; 2244 replyPort = message.start_profiler.reply_port; 2245 area_id sampleArea = message.start_profiler.sample_area; 2246 int32 stackDepth = message.start_profiler.stack_depth; 2247 bool variableStackDepth 2248 = message.start_profiler.variable_stack_depth; 2249 bigtime_t interval = max_c(message.start_profiler.interval, 2250 B_DEBUG_MIN_PROFILE_INTERVAL); 2251 status_t result = B_OK; 2252 2253 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: " 2254 "thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n", 2255 nubThread->id, threadID, sampleArea)); 2256 2257 if (stackDepth < 1) 2258 stackDepth = 1; 2259 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2260 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2261 2262 // provision for an extra entry per hit (for the number of 2263 // samples), if variable stack depth 2264 if (variableStackDepth) 2265 stackDepth++; 2266 2267 // clone the sample area 2268 area_info areaInfo; 2269 if (result == B_OK) 2270 result = get_area_info(sampleArea, &areaInfo); 2271 2272 area_id clonedSampleArea = -1; 2273 void* samples = NULL; 2274 if (result == B_OK) { 2275 clonedSampleArea = clone_area("profiling samples", &samples, 2276 B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, 2277 sampleArea); 2278 if (clonedSampleArea >= 0) { 2279 // we need the memory locked 2280 result = lock_memory(samples, areaInfo.size, 2281 B_READ_DEVICE); 2282 if (result != B_OK) { 2283 delete_area(clonedSampleArea); 2284 clonedSampleArea = -1; 2285 } 2286 } else 2287 result = clonedSampleArea; 2288 } 2289 2290 // get the thread and set the profile info 2291 int32 imageEvent = nubThread->team->debug_info.image_event; 2292 if (result == B_OK) { 2293 Thread* thread = Thread::GetAndLock(threadID); 2294 BReference<Thread> threadReference(thread, true); 2295 ThreadLocker threadLocker(thread, true); 2296 2297 if (thread != NULL && thread->team == nubThread->team) { 2298 thread_debug_info &threadDebugInfo = thread->debug_info; 2299 2300 InterruptsSpinLocker threadDebugInfoLocker( 2301 threadDebugInfo.lock); 2302 2303 if (threadDebugInfo.profile.samples == NULL) { 2304 threadDebugInfo.profile.interval = interval; 2305 threadDebugInfo.profile.sample_area 2306 = clonedSampleArea; 2307 threadDebugInfo.profile.samples = (addr_t*)samples; 2308 threadDebugInfo.profile.max_samples 2309 = areaInfo.size / sizeof(addr_t); 2310 threadDebugInfo.profile.flush_threshold 2311 = threadDebugInfo.profile.max_samples 2312 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2313 / 100; 2314 threadDebugInfo.profile.sample_count = 0; 2315 threadDebugInfo.profile.dropped_ticks = 0; 2316 threadDebugInfo.profile.stack_depth = stackDepth; 2317 threadDebugInfo.profile.variable_stack_depth 2318 = variableStackDepth; 2319 threadDebugInfo.profile.buffer_full = false; 2320 threadDebugInfo.profile.interval_left = interval; 2321 threadDebugInfo.profile.installed_timer = NULL; 2322 threadDebugInfo.profile.image_event = imageEvent; 2323 threadDebugInfo.profile.last_image_event 2324 = imageEvent; 2325 } else 2326 result = B_BAD_VALUE; 2327 } else 2328 result = B_BAD_THREAD_ID; 2329 } 2330 2331 // on error unlock and delete the sample area 2332 if (result != B_OK) { 2333 if (clonedSampleArea >= 0) { 2334 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2335 delete_area(clonedSampleArea); 2336 } 2337 } 2338 2339 // send a reply to the debugger 2340 reply.start_profiler.error = result; 2341 reply.start_profiler.interval = interval; 2342 reply.start_profiler.image_event = imageEvent; 2343 sendReply = true; 2344 replySize = sizeof(reply.start_profiler); 2345 2346 break; 2347 } 2348 2349 case B_DEBUG_STOP_PROFILER: 2350 { 2351 // get the parameters 2352 thread_id threadID = message.stop_profiler.thread; 2353 replyPort = message.stop_profiler.reply_port; 2354 status_t result = B_OK; 2355 2356 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: " 2357 "thread: %" B_PRId32 "\n", nubThread->id, threadID)); 2358 2359 area_id sampleArea = -1; 2360 addr_t* samples = NULL; 2361 int32 sampleCount = 0; 2362 int32 stackDepth = 0; 2363 bool variableStackDepth = false; 2364 int32 imageEvent = 0; 2365 int32 droppedTicks = 0; 2366 2367 // get the thread and detach the profile info 2368 Thread* thread = Thread::GetAndLock(threadID); 2369 BReference<Thread> threadReference(thread, true); 2370 ThreadLocker threadLocker(thread, true); 2371 2372 if (thread && thread->team == nubThread->team) { 2373 thread_debug_info &threadDebugInfo = thread->debug_info; 2374 2375 InterruptsSpinLocker threadDebugInfoLocker( 2376 threadDebugInfo.lock); 2377 2378 if (threadDebugInfo.profile.samples != NULL) { 2379 sampleArea = threadDebugInfo.profile.sample_area; 2380 samples = threadDebugInfo.profile.samples; 2381 sampleCount = threadDebugInfo.profile.sample_count; 2382 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2383 stackDepth = threadDebugInfo.profile.stack_depth; 2384 variableStackDepth 2385 = threadDebugInfo.profile.variable_stack_depth; 2386 imageEvent = threadDebugInfo.profile.image_event; 2387 threadDebugInfo.profile.sample_area = -1; 2388 threadDebugInfo.profile.samples = NULL; 2389 threadDebugInfo.profile.buffer_full = false; 2390 threadDebugInfo.profile.dropped_ticks = 0; 2391 } else 2392 result = B_BAD_VALUE; 2393 } else 2394 result = B_BAD_THREAD_ID; 2395 2396 threadLocker.Unlock(); 2397 2398 // prepare the reply 2399 if (result == B_OK) { 2400 reply.profiler_update.origin.thread = threadID; 2401 reply.profiler_update.image_event = imageEvent; 2402 reply.profiler_update.stack_depth = stackDepth; 2403 reply.profiler_update.variable_stack_depth 2404 = variableStackDepth; 2405 reply.profiler_update.sample_count = sampleCount; 2406 reply.profiler_update.dropped_ticks = droppedTicks; 2407 reply.profiler_update.stopped = true; 2408 } else 2409 reply.profiler_update.origin.thread = result; 2410 2411 replySize = sizeof(debug_profiler_update); 2412 sendReply = true; 2413 2414 if (sampleArea >= 0) { 2415 area_info areaInfo; 2416 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2417 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2418 delete_area(sampleArea); 2419 } 2420 } 2421 2422 break; 2423 } 2424 2425 case B_DEBUG_WRITE_CORE_FILE: 2426 { 2427 // get the parameters 2428 replyPort = message.write_core_file.reply_port; 2429 char* path = message.write_core_file.path; 2430 path[sizeof(message.write_core_file.path) - 1] = '\0'; 2431 2432 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE" 2433 ": path: %s\n", nubThread->id, path)); 2434 2435 // write the core file 2436 status_t result = core_dump_write_core_file(path, false); 2437 2438 // prepare the reply 2439 reply.write_core_file.error = result; 2440 replySize = sizeof(reply.write_core_file); 2441 sendReply = true; 2442 2443 break; 2444 } 2445 } 2446 2447 // send the reply, if necessary 2448 if (sendReply) { 2449 status_t error = kill_interruptable_write_port(replyPort, command, 2450 &reply, replySize); 2451 2452 if (error != B_OK) { 2453 // The debugger port is either not longer existing or we got 2454 // interrupted by a kill signal. In either case we terminate. 2455 TRACE(("nub thread %" B_PRId32 ": failed to send reply to port " 2456 "%" B_PRId32 ": %s\n", nubThread->id, replyPort, 2457 strerror(error))); 2458 2459 nub_thread_cleanup(nubThread); 2460 return error; 2461 } 2462 } 2463 } 2464 } 2465 2466 2467 /** \brief Helper function for install_team_debugger(), that sets up the team 2468 and thread debug infos. 2469 2470 The caller must hold the team's lock as well as the team debug info lock. 2471 2472 The function also clears the arch specific team and thread debug infos 2473 (including among other things formerly set break/watchpoints). 2474 */ 2475 static void 2476 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2477 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2478 sem_id debuggerPortWriteLock, thread_id causingThread) 2479 { 2480 atomic_set(&team->debug_info.flags, 2481 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2482 team->debug_info.nub_port = nubPort; 2483 team->debug_info.nub_thread = nubThread; 2484 team->debug_info.debugger_team = debuggerTeam; 2485 team->debug_info.debugger_port = debuggerPort; 2486 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2487 team->debug_info.causing_thread = causingThread; 2488 2489 arch_clear_team_debug_info(&team->debug_info.arch_info); 2490 2491 // set the user debug flags and signal masks of all threads to the default 2492 for (Thread *thread = team->thread_list; thread; 2493 thread = thread->team_next) { 2494 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2495 2496 if (thread->id == nubThread) { 2497 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2498 } else { 2499 int32 flags = thread->debug_info.flags 2500 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2501 atomic_set(&thread->debug_info.flags, 2502 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2503 thread->debug_info.ignore_signals = 0; 2504 thread->debug_info.ignore_signals_once = 0; 2505 2506 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2507 } 2508 } 2509 2510 // update the thread::flags fields 2511 update_threads_debugger_installed_flag(team); 2512 } 2513 2514 2515 static port_id 2516 install_team_debugger(team_id teamID, port_id debuggerPort, 2517 thread_id causingThread, bool useDefault, bool dontReplace) 2518 { 2519 TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", " 2520 "default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault, 2521 dontReplace)); 2522 2523 if (useDefault) 2524 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2525 2526 // get the debugger team 2527 port_info debuggerPortInfo; 2528 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2529 if (error != B_OK) { 2530 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2531 "%" B_PRIx32 "\n", error)); 2532 return error; 2533 } 2534 team_id debuggerTeam = debuggerPortInfo.team; 2535 2536 // Check the debugger team: It must neither be the kernel team nor the 2537 // debugged team. 2538 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2539 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2540 "debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam, 2541 teamID)); 2542 return B_NOT_ALLOWED; 2543 } 2544 2545 // get the team 2546 Team* team; 2547 ConditionVariable debugChangeCondition; 2548 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2549 if (error != B_OK) 2550 return error; 2551 2552 // get the real team ID 2553 teamID = team->id; 2554 2555 // check, if a debugger is already installed 2556 2557 bool done = false; 2558 port_id result = B_ERROR; 2559 bool handOver = false; 2560 port_id oldDebuggerPort = -1; 2561 port_id nubPort = -1; 2562 2563 TeamLocker teamLocker(team); 2564 cpu_status state = disable_interrupts(); 2565 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2566 2567 int32 teamDebugFlags = team->debug_info.flags; 2568 2569 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2570 // There's already a debugger installed. 2571 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2572 if (dontReplace) { 2573 // We're fine with already having a debugger. 2574 error = B_OK; 2575 done = true; 2576 result = team->debug_info.nub_port; 2577 } else { 2578 // a handover to another debugger is requested 2579 // Set the handing-over flag -- we'll clear both flags after 2580 // having sent the handed-over message to the new debugger. 2581 atomic_or(&team->debug_info.flags, 2582 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2583 2584 oldDebuggerPort = team->debug_info.debugger_port; 2585 result = nubPort = team->debug_info.nub_port; 2586 if (causingThread < 0) 2587 causingThread = team->debug_info.causing_thread; 2588 2589 // set the new debugger 2590 install_team_debugger_init_debug_infos(team, debuggerTeam, 2591 debuggerPort, nubPort, team->debug_info.nub_thread, 2592 team->debug_info.debugger_write_lock, causingThread); 2593 2594 handOver = true; 2595 done = true; 2596 } 2597 } else { 2598 // there's already a debugger installed 2599 error = (dontReplace ? B_OK : B_BAD_VALUE); 2600 done = true; 2601 result = team->debug_info.nub_port; 2602 } 2603 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2604 && useDefault) { 2605 // No debugger yet, disable_debugger() had been invoked, and we 2606 // would install the default debugger. Just fail. 2607 error = B_BAD_VALUE; 2608 } 2609 2610 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2611 restore_interrupts(state); 2612 teamLocker.Unlock(); 2613 2614 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2615 // The old debugger must just have died. Just proceed as 2616 // if there was no debugger installed. We may still be too 2617 // early, in which case we'll fail, but this race condition 2618 // should be unbelievably rare and relatively harmless. 2619 handOver = false; 2620 done = false; 2621 } 2622 2623 if (handOver) { 2624 // prepare the handed-over message 2625 debug_handed_over notification; 2626 notification.origin.thread = -1; 2627 notification.origin.team = teamID; 2628 notification.origin.nub_port = nubPort; 2629 notification.debugger = debuggerTeam; 2630 notification.debugger_port = debuggerPort; 2631 notification.causing_thread = causingThread; 2632 2633 // notify the new debugger 2634 error = write_port_etc(debuggerPort, 2635 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2636 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2637 if (error != B_OK) { 2638 dprintf("install_team_debugger(): Failed to send message to new " 2639 "debugger: %s\n", strerror(error)); 2640 } 2641 2642 // clear the handed-over and handing-over flags 2643 state = disable_interrupts(); 2644 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2645 2646 atomic_and(&team->debug_info.flags, 2647 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2648 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2649 2650 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2651 restore_interrupts(state); 2652 2653 finish_debugger_change(team); 2654 2655 // notify the nub thread 2656 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2657 NULL, 0); 2658 2659 // notify the old debugger 2660 error = write_port_etc(oldDebuggerPort, 2661 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2662 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2663 if (error != B_OK) { 2664 TRACE(("install_team_debugger(): Failed to send message to old " 2665 "debugger: %s\n", strerror(error))); 2666 } 2667 2668 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2669 "%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam, 2670 debuggerPort)); 2671 2672 return result; 2673 } 2674 2675 if (done || error != B_OK) { 2676 TRACE(("install_team_debugger() done1: %" B_PRId32 "\n", 2677 (error == B_OK ? result : error))); 2678 finish_debugger_change(team); 2679 return (error == B_OK ? result : error); 2680 } 2681 2682 // create the debugger write lock semaphore 2683 char nameBuffer[B_OS_NAME_LENGTH]; 2684 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port " 2685 "write", teamID); 2686 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2687 if (debuggerWriteLock < 0) 2688 error = debuggerWriteLock; 2689 2690 // create the nub port 2691 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID); 2692 if (error == B_OK) { 2693 nubPort = create_port(1, nameBuffer); 2694 if (nubPort < 0) 2695 error = nubPort; 2696 else 2697 result = nubPort; 2698 } 2699 2700 // make the debugger team the port owner; thus we know, if the debugger is 2701 // gone and can cleanup 2702 if (error == B_OK) 2703 error = set_port_owner(nubPort, debuggerTeam); 2704 2705 // create the breakpoint manager 2706 BreakpointManager* breakpointManager = NULL; 2707 if (error == B_OK) { 2708 breakpointManager = new(std::nothrow) BreakpointManager; 2709 if (breakpointManager != NULL) 2710 error = breakpointManager->Init(); 2711 else 2712 error = B_NO_MEMORY; 2713 } 2714 2715 // spawn the nub thread 2716 thread_id nubThread = -1; 2717 if (error == B_OK) { 2718 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task", 2719 teamID); 2720 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2721 B_NORMAL_PRIORITY, NULL, teamID); 2722 if (nubThread < 0) 2723 error = nubThread; 2724 } 2725 2726 // now adjust the debug info accordingly 2727 if (error == B_OK) { 2728 TeamLocker teamLocker(team); 2729 state = disable_interrupts(); 2730 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2731 2732 team->debug_info.breakpoint_manager = breakpointManager; 2733 install_team_debugger_init_debug_infos(team, debuggerTeam, 2734 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2735 causingThread); 2736 2737 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2738 restore_interrupts(state); 2739 } 2740 2741 finish_debugger_change(team); 2742 2743 // if everything went fine, resume the nub thread, otherwise clean up 2744 if (error == B_OK) { 2745 resume_thread(nubThread); 2746 } else { 2747 // delete port and terminate thread 2748 if (nubPort >= 0) { 2749 set_port_owner(nubPort, B_CURRENT_TEAM); 2750 delete_port(nubPort); 2751 } 2752 if (nubThread >= 0) { 2753 int32 result; 2754 wait_for_thread(nubThread, &result); 2755 } 2756 2757 delete breakpointManager; 2758 } 2759 2760 TRACE(("install_team_debugger() done2: %" B_PRId32 "\n", 2761 (error == B_OK ? result : error))); 2762 return (error == B_OK ? result : error); 2763 } 2764 2765 2766 static status_t 2767 ensure_debugger_installed() 2768 { 2769 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2770 thread_get_current_thread_id(), true, true); 2771 return port >= 0 ? B_OK : port; 2772 } 2773 2774 2775 // #pragma mark - 2776 2777 2778 void 2779 _user_debugger(const char *userMessage) 2780 { 2781 // install the default debugger, if there is none yet 2782 status_t error = ensure_debugger_installed(); 2783 if (error != B_OK) { 2784 // time to commit suicide 2785 char buffer[128]; 2786 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2787 if (length >= 0) { 2788 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2789 "`%s'\n", buffer); 2790 } else { 2791 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2792 "%p (%s)\n", userMessage, strerror(length)); 2793 } 2794 _user_exit_team(1); 2795 } 2796 2797 // prepare the message 2798 debug_debugger_call message; 2799 message.message = (void*)userMessage; 2800 2801 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2802 sizeof(message), true); 2803 } 2804 2805 2806 int 2807 _user_disable_debugger(int state) 2808 { 2809 Team *team = thread_get_current_thread()->team; 2810 2811 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state, 2812 team->id)); 2813 2814 cpu_status cpuState = disable_interrupts(); 2815 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2816 2817 int32 oldFlags; 2818 if (state) { 2819 oldFlags = atomic_or(&team->debug_info.flags, 2820 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2821 } else { 2822 oldFlags = atomic_and(&team->debug_info.flags, 2823 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2824 } 2825 2826 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2827 restore_interrupts(cpuState); 2828 2829 // TODO: Check, if the return value is really the old state. 2830 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2831 } 2832 2833 2834 status_t 2835 _user_install_default_debugger(port_id debuggerPort) 2836 { 2837 // if supplied, check whether the port is a valid port 2838 if (debuggerPort >= 0) { 2839 port_info portInfo; 2840 status_t error = get_port_info(debuggerPort, &portInfo); 2841 if (error != B_OK) 2842 return error; 2843 2844 // the debugger team must not be the kernel team 2845 if (portInfo.team == team_get_kernel_team_id()) 2846 return B_NOT_ALLOWED; 2847 } 2848 2849 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2850 2851 return B_OK; 2852 } 2853 2854 2855 port_id 2856 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2857 { 2858 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2859 } 2860 2861 2862 status_t 2863 _user_remove_team_debugger(team_id teamID) 2864 { 2865 Team* team; 2866 ConditionVariable debugChangeCondition; 2867 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2868 team); 2869 if (error != B_OK) 2870 return error; 2871 2872 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2873 2874 thread_id nubThread = -1; 2875 port_id nubPort = -1; 2876 2877 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2878 // there's a debugger installed 2879 nubThread = team->debug_info.nub_thread; 2880 nubPort = team->debug_info.nub_port; 2881 } else { 2882 // no debugger installed 2883 error = B_BAD_VALUE; 2884 } 2885 2886 debugInfoLocker.Unlock(); 2887 2888 // Delete the nub port -- this will cause the nub thread to terminate and 2889 // remove the debugger. 2890 if (nubPort >= 0) 2891 delete_port(nubPort); 2892 2893 finish_debugger_change(team); 2894 2895 // wait for the nub thread 2896 if (nubThread >= 0) 2897 wait_for_thread(nubThread, NULL); 2898 2899 return error; 2900 } 2901 2902 2903 status_t 2904 _user_debug_thread(thread_id threadID) 2905 { 2906 TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n", 2907 find_thread(NULL), threadID)); 2908 2909 // get the thread 2910 Thread* thread = Thread::GetAndLock(threadID); 2911 if (thread == NULL) 2912 return B_BAD_THREAD_ID; 2913 BReference<Thread> threadReference(thread, true); 2914 ThreadLocker threadLocker(thread, true); 2915 2916 // we can't debug the kernel team 2917 if (thread->team == team_get_kernel_team()) 2918 return B_NOT_ALLOWED; 2919 2920 InterruptsLocker interruptsLocker; 2921 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2922 2923 // If the thread is already dying, it's too late to debug it. 2924 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2925 return B_BAD_THREAD_ID; 2926 2927 // don't debug the nub thread 2928 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2929 return B_NOT_ALLOWED; 2930 2931 // already marked stopped or being told to stop? 2932 if ((thread->debug_info.flags 2933 & (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) { 2934 return B_OK; 2935 } 2936 2937 // set the flag that tells the thread to stop as soon as possible 2938 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2939 2940 update_thread_user_debug_flag(thread); 2941 2942 // send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or 2943 // continued) 2944 threadDebugInfoLocker.Unlock(); 2945 ReadSpinLocker teamLocker(thread->team_lock); 2946 SpinLocker locker(thread->team->signal_lock); 2947 2948 send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0); 2949 2950 return B_OK; 2951 } 2952 2953 2954 void 2955 _user_wait_for_debugger(void) 2956 { 2957 debug_thread_debugged message; 2958 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2959 sizeof(message), false); 2960 } 2961 2962 2963 status_t 2964 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2965 bool watchpoint) 2966 { 2967 // check the address and size 2968 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2969 return B_BAD_ADDRESS; 2970 if (watchpoint && length < 0) 2971 return B_BAD_VALUE; 2972 2973 // check whether a debugger is installed already 2974 team_debug_info teamDebugInfo; 2975 get_team_debug_info(teamDebugInfo); 2976 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2977 return B_BAD_VALUE; 2978 2979 // We can't help it, here's a small but relatively harmless race condition, 2980 // since a debugger could be installed in the meantime. The worst case is 2981 // that we install a break/watchpoint the debugger doesn't know about. 2982 2983 // set the break/watchpoint 2984 status_t result; 2985 if (watchpoint) 2986 result = arch_set_watchpoint(address, type, length); 2987 else 2988 result = arch_set_breakpoint(address); 2989 2990 if (result == B_OK) 2991 update_threads_breakpoints_flag(); 2992 2993 return result; 2994 } 2995 2996 2997 status_t 2998 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 2999 { 3000 // check the address 3001 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3002 return B_BAD_ADDRESS; 3003 3004 // check whether a debugger is installed already 3005 team_debug_info teamDebugInfo; 3006 get_team_debug_info(teamDebugInfo); 3007 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3008 return B_BAD_VALUE; 3009 3010 // We can't help it, here's a small but relatively harmless race condition, 3011 // since a debugger could be installed in the meantime. The worst case is 3012 // that we clear a break/watchpoint the debugger has just installed. 3013 3014 // clear the break/watchpoint 3015 status_t result; 3016 if (watchpoint) 3017 result = arch_clear_watchpoint(address); 3018 else 3019 result = arch_clear_breakpoint(address); 3020 3021 if (result == B_OK) 3022 update_threads_breakpoints_flag(); 3023 3024 return result; 3025 } 3026