1 /* 2 * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <errno.h> 8 #include <signal.h> 9 #include <stdlib.h> 10 #include <stdio.h> 11 #include <string.h> 12 13 #include <algorithm> 14 15 #include <arch/debug.h> 16 #include <arch/user_debugger.h> 17 #include <cpu.h> 18 #include <debugger.h> 19 #include <kernel.h> 20 #include <KernelExport.h> 21 #include <kscheduler.h> 22 #include <ksignal.h> 23 #include <ksyscalls.h> 24 #include <port.h> 25 #include <sem.h> 26 #include <team.h> 27 #include <thread.h> 28 #include <thread_types.h> 29 #include <user_debugger.h> 30 #include <vm/vm.h> 31 #include <vm/vm_types.h> 32 33 #include <AutoDeleter.h> 34 #include <util/AutoLock.h> 35 36 #include "BreakpointManager.h" 37 38 39 //#define TRACE_USER_DEBUGGER 40 #ifdef TRACE_USER_DEBUGGER 41 # define TRACE(x) dprintf x 42 #else 43 # define TRACE(x) ; 44 #endif 45 46 47 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 48 // there's some potential for simplifications. E.g. clear_team_debug_info() and 49 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 50 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 51 52 53 static port_id sDefaultDebuggerPort = -1; 54 // accessed atomically 55 56 static timer sProfilingTimers[SMP_MAX_CPUS]; 57 // a profiling timer for each CPU -- used when a profiled thread is running 58 // on that CPU 59 60 61 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 62 static int32 profiling_event(timer* unused); 63 static status_t ensure_debugger_installed(); 64 static void get_team_debug_info(team_debug_info &teamDebugInfo); 65 66 67 static inline status_t 68 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 69 size_t bufferSize) 70 { 71 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 72 0); 73 } 74 75 76 static status_t 77 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 78 bool dontWait) 79 { 80 TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", " 81 "port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, " 82 "dontWait: %d\n", thread_get_current_thread()->id, 83 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 84 dontWait)); 85 86 status_t error = B_OK; 87 88 // get the team debug info 89 team_debug_info teamDebugInfo; 90 get_team_debug_info(teamDebugInfo); 91 sem_id writeLock = teamDebugInfo.debugger_write_lock; 92 93 // get the write lock 94 TRACE(("debugger_write(): acquiring write lock...\n")); 95 error = acquire_sem_etc(writeLock, 1, 96 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 97 if (error != B_OK) { 98 TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error)); 99 return error; 100 } 101 102 // re-get the team debug info 103 get_team_debug_info(teamDebugInfo); 104 105 if (teamDebugInfo.debugger_port != port 106 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 107 // The debugger has changed in the meantime or we are about to be 108 // handed over to a new debugger. In either case we don't send the 109 // message. 110 TRACE(("debugger_write(): %s\n", 111 (teamDebugInfo.debugger_port != port ? "debugger port changed" 112 : "handover flag set"))); 113 } else { 114 TRACE(("debugger_write(): writing to port...\n")); 115 116 error = write_port_etc(port, code, buffer, bufferSize, 117 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 118 } 119 120 // release the write lock 121 release_sem(writeLock); 122 123 TRACE(("debugger_write() done: %" B_PRIx32 "\n", error)); 124 125 return error; 126 } 127 128 129 /*! Updates the thread::flags field according to what user debugger flags are 130 set for the thread. 131 Interrupts must be disabled and the thread's debug info lock must be held. 132 */ 133 static void 134 update_thread_user_debug_flag(Thread* thread) 135 { 136 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 137 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 138 else 139 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 140 } 141 142 143 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 144 given thread. 145 Interrupts must be disabled and the thread debug info lock must be held. 146 */ 147 static void 148 update_thread_breakpoints_flag(Thread* thread) 149 { 150 Team* team = thread->team; 151 152 if (arch_has_breakpoints(&team->debug_info.arch_info)) 153 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 154 else 155 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 156 } 157 158 159 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 160 threads of the current team. 161 */ 162 static void 163 update_threads_breakpoints_flag() 164 { 165 Team* team = thread_get_current_thread()->team; 166 167 TeamLocker teamLocker(team); 168 169 Thread* thread = team->thread_list; 170 171 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 172 for (; thread != NULL; thread = thread->team_next) 173 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 174 } else { 175 for (; thread != NULL; thread = thread->team_next) 176 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 177 } 178 } 179 180 181 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 182 given thread, which must be the current thread. 183 */ 184 static void 185 update_thread_debugger_installed_flag(Thread* thread) 186 { 187 Team* team = thread->team; 188 189 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 190 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 191 else 192 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 193 } 194 195 196 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 197 threads of the given team. 198 The team's lock must be held. 199 */ 200 static void 201 update_threads_debugger_installed_flag(Team* team) 202 { 203 Thread* thread = team->thread_list; 204 205 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 206 for (; thread != NULL; thread = thread->team_next) 207 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 208 } else { 209 for (; thread != NULL; thread = thread->team_next) 210 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 211 } 212 } 213 214 215 /** 216 * For the first initialization the function must be called with \a initLock 217 * set to \c true. If it would be possible that another thread accesses the 218 * structure at the same time, `lock' must be held when calling the function. 219 */ 220 void 221 clear_team_debug_info(struct team_debug_info *info, bool initLock) 222 { 223 if (info) { 224 arch_clear_team_debug_info(&info->arch_info); 225 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 226 info->debugger_team = -1; 227 info->debugger_port = -1; 228 info->nub_thread = -1; 229 info->nub_port = -1; 230 info->debugger_write_lock = -1; 231 info->causing_thread = -1; 232 info->image_event = 0; 233 info->breakpoint_manager = NULL; 234 235 if (initLock) { 236 B_INITIALIZE_SPINLOCK(&info->lock); 237 info->debugger_changed_condition = NULL; 238 } 239 } 240 } 241 242 /** 243 * `lock' must not be held nor may interrupts be disabled. 244 * \a info must not be a member of a team struct (or the team struct must no 245 * longer be accessible, i.e. the team should already be removed). 246 * 247 * In case the team is still accessible, the procedure is: 248 * 1. get `lock' 249 * 2. copy the team debug info on stack 250 * 3. call clear_team_debug_info() on the team debug info 251 * 4. release `lock' 252 * 5. call destroy_team_debug_info() on the copied team debug info 253 */ 254 static void 255 destroy_team_debug_info(struct team_debug_info *info) 256 { 257 if (info) { 258 arch_destroy_team_debug_info(&info->arch_info); 259 260 // delete the breakpoint manager 261 delete info->breakpoint_manager ; 262 info->breakpoint_manager = NULL; 263 264 // delete the debugger port write lock 265 if (info->debugger_write_lock >= 0) { 266 delete_sem(info->debugger_write_lock); 267 info->debugger_write_lock = -1; 268 } 269 270 // delete the nub port 271 if (info->nub_port >= 0) { 272 set_port_owner(info->nub_port, B_CURRENT_TEAM); 273 delete_port(info->nub_port); 274 info->nub_port = -1; 275 } 276 277 // wait for the nub thread 278 if (info->nub_thread >= 0) { 279 if (info->nub_thread != thread_get_current_thread()->id) { 280 int32 result; 281 wait_for_thread(info->nub_thread, &result); 282 } 283 284 info->nub_thread = -1; 285 } 286 287 atomic_set(&info->flags, 0); 288 info->debugger_team = -1; 289 info->debugger_port = -1; 290 info->causing_thread = -1; 291 info->image_event = -1; 292 } 293 } 294 295 296 void 297 init_thread_debug_info(struct thread_debug_info *info) 298 { 299 if (info) { 300 B_INITIALIZE_SPINLOCK(&info->lock); 301 arch_clear_thread_debug_info(&info->arch_info); 302 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 303 info->debug_port = -1; 304 info->ignore_signals = 0; 305 info->ignore_signals_once = 0; 306 info->profile.sample_area = -1; 307 info->profile.samples = NULL; 308 info->profile.buffer_full = false; 309 info->profile.installed_timer = NULL; 310 } 311 } 312 313 314 /*! Clears the debug info for the current thread. 315 Invoked with thread debug info lock being held. 316 */ 317 void 318 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 319 { 320 if (info) { 321 // cancel profiling timer 322 if (info->profile.installed_timer != NULL) { 323 cancel_timer(info->profile.installed_timer); 324 info->profile.installed_timer = NULL; 325 } 326 327 arch_clear_thread_debug_info(&info->arch_info); 328 atomic_set(&info->flags, 329 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 330 info->debug_port = -1; 331 info->ignore_signals = 0; 332 info->ignore_signals_once = 0; 333 info->profile.sample_area = -1; 334 info->profile.samples = NULL; 335 info->profile.buffer_full = false; 336 } 337 } 338 339 340 void 341 destroy_thread_debug_info(struct thread_debug_info *info) 342 { 343 if (info) { 344 area_id sampleArea = info->profile.sample_area; 345 if (sampleArea >= 0) { 346 area_info areaInfo; 347 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 348 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 349 delete_area(sampleArea); 350 } 351 } 352 353 arch_destroy_thread_debug_info(&info->arch_info); 354 355 if (info->debug_port >= 0) { 356 delete_port(info->debug_port); 357 info->debug_port = -1; 358 } 359 360 info->ignore_signals = 0; 361 info->ignore_signals_once = 0; 362 363 atomic_set(&info->flags, 0); 364 } 365 } 366 367 368 static status_t 369 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 370 Team*& team) 371 { 372 // We look up the team by ID, even in case of the current team, so we can be 373 // sure, that the team is not already dying. 374 if (teamID == B_CURRENT_TEAM) 375 teamID = thread_get_current_thread()->team->id; 376 377 while (true) { 378 // get the team 379 team = Team::GetAndLock(teamID); 380 if (team == NULL) 381 return B_BAD_TEAM_ID; 382 BReference<Team> teamReference(team, true); 383 TeamLocker teamLocker(team, true); 384 385 // don't allow messing with the kernel team 386 if (team == team_get_kernel_team()) 387 return B_NOT_ALLOWED; 388 389 // check whether the condition is already set 390 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 391 392 if (team->debug_info.debugger_changed_condition == NULL) { 393 // nobody there yet -- set our condition variable and be done 394 team->debug_info.debugger_changed_condition = &condition; 395 return B_OK; 396 } 397 398 // we'll have to wait 399 ConditionVariableEntry entry; 400 team->debug_info.debugger_changed_condition->Add(&entry); 401 402 debugInfoLocker.Unlock(); 403 teamLocker.Unlock(); 404 405 entry.Wait(); 406 } 407 } 408 409 410 static void 411 prepare_debugger_change(Team* team, ConditionVariable& condition) 412 { 413 while (true) { 414 // check whether the condition is already set 415 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 416 417 if (team->debug_info.debugger_changed_condition == NULL) { 418 // nobody there yet -- set our condition variable and be done 419 team->debug_info.debugger_changed_condition = &condition; 420 return; 421 } 422 423 // we'll have to wait 424 ConditionVariableEntry entry; 425 team->debug_info.debugger_changed_condition->Add(&entry); 426 427 debugInfoLocker.Unlock(); 428 429 entry.Wait(); 430 } 431 } 432 433 434 static void 435 finish_debugger_change(Team* team) 436 { 437 // unset our condition variable and notify all threads waiting on it 438 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 439 440 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 441 team->debug_info.debugger_changed_condition = NULL; 442 443 condition->NotifyAll(); 444 } 445 446 447 void 448 user_debug_prepare_for_exec() 449 { 450 Thread *thread = thread_get_current_thread(); 451 Team *team = thread->team; 452 453 // If a debugger is installed for the team and the thread debug stuff 454 // initialized, change the ownership of the debug port for the thread 455 // to the kernel team, since exec_team() deletes all ports owned by this 456 // team. We change the ownership back later. 457 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 458 // get the port 459 port_id debugPort = -1; 460 461 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 462 463 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 464 debugPort = thread->debug_info.debug_port; 465 466 threadDebugInfoLocker.Unlock(); 467 468 // set the new port ownership 469 if (debugPort >= 0) 470 set_port_owner(debugPort, team_get_kernel_team_id()); 471 } 472 } 473 474 475 void 476 user_debug_finish_after_exec() 477 { 478 Thread *thread = thread_get_current_thread(); 479 Team *team = thread->team; 480 481 // If a debugger is installed for the team and the thread debug stuff 482 // initialized for this thread, change the ownership of its debug port 483 // back to this team. 484 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 485 // get the port 486 port_id debugPort = -1; 487 488 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 489 490 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 491 debugPort = thread->debug_info.debug_port; 492 493 threadDebugInfoLocker.Unlock(); 494 495 // set the new port ownership 496 if (debugPort >= 0) 497 set_port_owner(debugPort, team->id); 498 } 499 } 500 501 502 void 503 init_user_debug() 504 { 505 #ifdef ARCH_INIT_USER_DEBUG 506 ARCH_INIT_USER_DEBUG(); 507 #endif 508 } 509 510 511 static void 512 get_team_debug_info(team_debug_info &teamDebugInfo) 513 { 514 Thread *thread = thread_get_current_thread(); 515 516 cpu_status state = disable_interrupts(); 517 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 518 519 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 520 521 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 522 restore_interrupts(state); 523 } 524 525 526 static status_t 527 thread_hit_debug_event_internal(debug_debugger_message event, 528 const void *message, int32 size, bool requireDebugger, bool &restart) 529 { 530 restart = false; 531 Thread *thread = thread_get_current_thread(); 532 533 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32 534 ", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event, 535 message, size)); 536 537 // check, if there's a debug port already 538 bool setPort = !(atomic_get(&thread->debug_info.flags) 539 & B_THREAD_DEBUG_INITIALIZED); 540 541 // create a port, if there is none yet 542 port_id port = -1; 543 if (setPort) { 544 char nameBuffer[128]; 545 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32, 546 thread->id); 547 548 port = create_port(1, nameBuffer); 549 if (port < 0) { 550 dprintf("thread_hit_debug_event(): Failed to create debug port: " 551 "%s\n", strerror(port)); 552 return port; 553 } 554 } 555 556 // check the debug info structures once more: get the debugger port, set 557 // the thread's debug port, and update the thread's debug flags 558 port_id deletePort = port; 559 port_id debuggerPort = -1; 560 port_id nubPort = -1; 561 status_t error = B_OK; 562 cpu_status state = disable_interrupts(); 563 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 564 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 565 566 uint32 threadFlags = thread->debug_info.flags; 567 threadFlags &= ~B_THREAD_DEBUG_STOP; 568 bool debuggerInstalled 569 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 570 if (thread->id == thread->team->debug_info.nub_thread) { 571 // Ugh, we're the nub thread. We shouldn't be here. 572 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32 573 "\n", thread->id)); 574 575 error = B_ERROR; 576 } else if (debuggerInstalled || !requireDebugger) { 577 if (debuggerInstalled) { 578 debuggerPort = thread->team->debug_info.debugger_port; 579 nubPort = thread->team->debug_info.nub_port; 580 } 581 582 if (setPort) { 583 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 584 // someone created a port for us (the port we've created will 585 // be deleted below) 586 port = thread->debug_info.debug_port; 587 } else { 588 thread->debug_info.debug_port = port; 589 deletePort = -1; // keep the port 590 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 591 } 592 } else { 593 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 594 port = thread->debug_info.debug_port; 595 } else { 596 // someone deleted our port 597 error = B_ERROR; 598 } 599 } 600 } else 601 error = B_ERROR; 602 603 // update the flags 604 if (error == B_OK) 605 threadFlags |= B_THREAD_DEBUG_STOPPED; 606 atomic_set(&thread->debug_info.flags, threadFlags); 607 608 update_thread_user_debug_flag(thread); 609 610 threadDebugInfoLocker.Unlock(); 611 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 612 restore_interrupts(state); 613 614 // delete the superfluous port 615 if (deletePort >= 0) 616 delete_port(deletePort); 617 618 if (error != B_OK) { 619 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: " 620 "%" B_PRIx32 "\n", thread->id, error)); 621 return error; 622 } 623 624 // send a message to the debugger port 625 if (debuggerInstalled) { 626 // update the message's origin info first 627 debug_origin *origin = (debug_origin *)message; 628 origin->thread = thread->id; 629 origin->team = thread->team->id; 630 origin->nub_port = nubPort; 631 632 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending " 633 "message to debugger port %" B_PRId32 "\n", thread->id, 634 debuggerPort)); 635 636 error = debugger_write(debuggerPort, event, message, size, false); 637 } 638 639 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 640 bool singleStep = false; 641 642 if (error == B_OK) { 643 bool done = false; 644 while (!done) { 645 // read a command from the debug port 646 int32 command; 647 debugged_thread_message_data commandMessage; 648 ssize_t commandMessageSize = read_port_etc(port, &command, 649 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 650 0); 651 652 if (commandMessageSize < 0) { 653 error = commandMessageSize; 654 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed " 655 "to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n", 656 thread->id, port, error)); 657 break; 658 } 659 660 switch (command) { 661 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 662 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 663 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 664 thread->id)); 665 result = commandMessage.continue_thread.handle_event; 666 667 singleStep = commandMessage.continue_thread.single_step; 668 done = true; 669 break; 670 671 case B_DEBUGGED_THREAD_SET_CPU_STATE: 672 { 673 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 674 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 675 thread->id)); 676 arch_set_debug_cpu_state( 677 &commandMessage.set_cpu_state.cpu_state); 678 679 break; 680 } 681 682 case B_DEBUGGED_THREAD_GET_CPU_STATE: 683 { 684 port_id replyPort = commandMessage.get_cpu_state.reply_port; 685 686 // prepare the message 687 debug_nub_get_cpu_state_reply replyMessage; 688 replyMessage.error = B_OK; 689 replyMessage.message = event; 690 arch_get_debug_cpu_state(&replyMessage.cpu_state); 691 692 // send it 693 error = kill_interruptable_write_port(replyPort, event, 694 &replyMessage, sizeof(replyMessage)); 695 696 break; 697 } 698 699 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 700 { 701 // Check, if the debugger really changed, i.e. is different 702 // than the one we know. 703 team_debug_info teamDebugInfo; 704 get_team_debug_info(teamDebugInfo); 705 706 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 707 if (!debuggerInstalled 708 || teamDebugInfo.debugger_port != debuggerPort) { 709 // debugger was installed or has changed: restart 710 // this function 711 restart = true; 712 done = true; 713 } 714 } else { 715 if (debuggerInstalled) { 716 // debugger is gone: continue the thread normally 717 done = true; 718 } 719 } 720 721 break; 722 } 723 } 724 } 725 } else { 726 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send " 727 "message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n", 728 thread->id, debuggerPort, error)); 729 } 730 731 // update the thread debug info 732 bool destroyThreadInfo = false; 733 thread_debug_info threadDebugInfo; 734 735 state = disable_interrupts(); 736 threadDebugInfoLocker.Lock(); 737 738 // check, if the team is still being debugged 739 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 740 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 741 // update the single-step flag 742 if (singleStep) { 743 atomic_or(&thread->debug_info.flags, 744 B_THREAD_DEBUG_SINGLE_STEP); 745 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 746 } else { 747 atomic_and(&thread->debug_info.flags, 748 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 749 } 750 751 // unset the "stopped" state 752 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 753 754 update_thread_user_debug_flag(thread); 755 756 } else { 757 // the debugger is gone: cleanup our info completely 758 threadDebugInfo = thread->debug_info; 759 clear_thread_debug_info(&thread->debug_info, false); 760 destroyThreadInfo = true; 761 } 762 763 threadDebugInfoLocker.Unlock(); 764 restore_interrupts(state); 765 766 // enable/disable single stepping 767 arch_update_thread_single_step(); 768 769 if (destroyThreadInfo) 770 destroy_thread_debug_info(&threadDebugInfo); 771 772 return (error == B_OK ? result : error); 773 } 774 775 776 static status_t 777 thread_hit_debug_event(debug_debugger_message event, const void *message, 778 int32 size, bool requireDebugger) 779 { 780 status_t result; 781 bool restart; 782 do { 783 restart = false; 784 result = thread_hit_debug_event_internal(event, message, size, 785 requireDebugger, restart); 786 } while (result >= 0 && restart); 787 788 // Prepare to continue -- we install a debugger change condition, so no one 789 // will change the debugger while we're playing with the breakpoint manager. 790 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 791 Team* team = thread_get_current_thread()->team; 792 ConditionVariable debugChangeCondition; 793 prepare_debugger_change(team, debugChangeCondition); 794 795 if (team->debug_info.breakpoint_manager != NULL) { 796 bool isSyscall; 797 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 798 if (pc != NULL && !isSyscall) 799 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 800 } 801 802 finish_debugger_change(team); 803 804 return result; 805 } 806 807 808 static status_t 809 thread_hit_serious_debug_event(debug_debugger_message event, 810 const void *message, int32 messageSize) 811 { 812 // ensure that a debugger is installed for this team 813 status_t error = ensure_debugger_installed(); 814 if (error != B_OK) { 815 Thread *thread = thread_get_current_thread(); 816 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 817 "thread: %" B_PRId32 ": %s\n", thread->id, strerror(error)); 818 return error; 819 } 820 821 // enter the debug loop 822 return thread_hit_debug_event(event, message, messageSize, true); 823 } 824 825 826 void 827 user_debug_pre_syscall(uint32 syscall, void *args) 828 { 829 // check whether a debugger is installed 830 Thread *thread = thread_get_current_thread(); 831 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 832 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 833 return; 834 835 // check whether pre-syscall tracing is enabled for team or thread 836 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 837 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 838 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 839 return; 840 } 841 842 // prepare the message 843 debug_pre_syscall message; 844 message.syscall = syscall; 845 846 // copy the syscall args 847 if (syscall < (uint32)kSyscallCount) { 848 if (kSyscallInfos[syscall].parameter_size > 0) 849 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 850 } 851 852 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 853 sizeof(message), true); 854 } 855 856 857 void 858 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 859 bigtime_t startTime) 860 { 861 // check whether a debugger is installed 862 Thread *thread = thread_get_current_thread(); 863 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 864 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 865 return; 866 867 // check whether post-syscall tracing is enabled for team or thread 868 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 869 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 870 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 871 return; 872 } 873 874 // prepare the message 875 debug_post_syscall message; 876 message.start_time = startTime; 877 message.end_time = system_time(); 878 message.return_value = returnValue; 879 message.syscall = syscall; 880 881 // copy the syscall args 882 if (syscall < (uint32)kSyscallCount) { 883 if (kSyscallInfos[syscall].parameter_size > 0) 884 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 885 } 886 887 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 888 sizeof(message), true); 889 } 890 891 892 /** \brief To be called when an unhandled processor exception (error/fault) 893 * occurred. 894 * \param exception The debug_why_stopped value identifying the kind of fault. 895 * \param singal The signal corresponding to the exception. 896 * \return \c true, if the caller shall continue normally, i.e. usually send 897 * a deadly signal. \c false, if the debugger insists to continue the 898 * program (e.g. because it has solved the removed the cause of the 899 * problem). 900 */ 901 bool 902 user_debug_exception_occurred(debug_exception_type exception, int signal) 903 { 904 // First check whether there's a signal handler installed for the signal. 905 // If so, we don't want to install a debugger for the team. We always send 906 // the signal instead. An already installed debugger will be notified, if 907 // it has requested notifications of signal. 908 struct sigaction signalAction; 909 if (sigaction(signal, NULL, &signalAction) == 0 910 && signalAction.sa_handler != SIG_DFL) { 911 return true; 912 } 913 914 // prepare the message 915 debug_exception_occurred message; 916 message.exception = exception; 917 message.signal = signal; 918 919 status_t result = thread_hit_serious_debug_event( 920 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 921 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 922 } 923 924 925 bool 926 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly) 927 { 928 // check, if a debugger is installed and is interested in signals 929 Thread *thread = thread_get_current_thread(); 930 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 931 if (~teamDebugFlags 932 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 933 return true; 934 } 935 936 // prepare the message 937 debug_signal_received message; 938 message.signal = signal; 939 message.handler = *handler; 940 message.deadly = deadly; 941 942 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 943 &message, sizeof(message), true); 944 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 945 } 946 947 948 void 949 user_debug_stop_thread() 950 { 951 // check whether this is actually an emulated single-step notification 952 Thread* thread = thread_get_current_thread(); 953 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 954 955 bool singleStepped = false; 956 if ((atomic_and(&thread->debug_info.flags, 957 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 958 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 959 singleStepped = true; 960 } 961 962 threadDebugInfoLocker.Unlock(); 963 964 if (singleStepped) { 965 user_debug_single_stepped(); 966 } else { 967 debug_thread_debugged message; 968 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 969 &message, sizeof(message)); 970 } 971 } 972 973 974 void 975 user_debug_team_created(team_id teamID) 976 { 977 // check, if a debugger is installed and is interested in team creation 978 // events 979 Thread *thread = thread_get_current_thread(); 980 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 981 if (~teamDebugFlags 982 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 983 return; 984 } 985 986 // prepare the message 987 debug_team_created message; 988 message.new_team = teamID; 989 990 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 991 sizeof(message), true); 992 } 993 994 995 void 996 user_debug_team_deleted(team_id teamID, port_id debuggerPort) 997 { 998 if (debuggerPort >= 0) { 999 TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: " 1000 "%" B_PRId32 ")\n", teamID, debuggerPort)); 1001 1002 debug_team_deleted message; 1003 message.origin.thread = -1; 1004 message.origin.team = teamID; 1005 message.origin.nub_port = -1; 1006 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1007 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1008 } 1009 } 1010 1011 1012 void 1013 user_debug_team_exec() 1014 { 1015 // check, if a debugger is installed and is interested in team creation 1016 // events 1017 Thread *thread = thread_get_current_thread(); 1018 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1019 if (~teamDebugFlags 1020 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1021 return; 1022 } 1023 1024 // prepare the message 1025 debug_team_exec message; 1026 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1027 + 1; 1028 1029 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1030 sizeof(message), true); 1031 } 1032 1033 1034 /*! Called by a new userland thread to update the debugging related flags of 1035 \c Thread::flags before the thread first enters userland. 1036 \param thread The calling thread. 1037 */ 1038 void 1039 user_debug_update_new_thread_flags(Thread* thread) 1040 { 1041 // lock it and update it's flags 1042 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1043 1044 update_thread_user_debug_flag(thread); 1045 update_thread_breakpoints_flag(thread); 1046 update_thread_debugger_installed_flag(thread); 1047 } 1048 1049 1050 void 1051 user_debug_thread_created(thread_id threadID) 1052 { 1053 // check, if a debugger is installed and is interested in thread events 1054 Thread *thread = thread_get_current_thread(); 1055 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1056 if (~teamDebugFlags 1057 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1058 return; 1059 } 1060 1061 // prepare the message 1062 debug_thread_created message; 1063 message.new_thread = threadID; 1064 1065 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1066 sizeof(message), true); 1067 } 1068 1069 1070 void 1071 user_debug_thread_deleted(team_id teamID, thread_id threadID) 1072 { 1073 // Things are a bit complicated here, since this thread no longer belongs to 1074 // the debugged team (but to the kernel). So we can't use debugger_write(). 1075 1076 // get the team debug flags and debugger port 1077 Team* team = Team::Get(teamID); 1078 if (team == NULL) 1079 return; 1080 BReference<Team> teamReference(team, true); 1081 1082 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1083 1084 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1085 port_id debuggerPort = team->debug_info.debugger_port; 1086 sem_id writeLock = team->debug_info.debugger_write_lock; 1087 1088 debugInfoLocker.Unlock(); 1089 1090 // check, if a debugger is installed and is interested in thread events 1091 if (~teamDebugFlags 1092 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1093 return; 1094 } 1095 1096 // acquire the debugger write lock 1097 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1098 if (error != B_OK) 1099 return; 1100 1101 // re-get the team debug info -- we need to check whether anything changed 1102 debugInfoLocker.Lock(); 1103 1104 teamDebugFlags = atomic_get(&team->debug_info.flags); 1105 port_id newDebuggerPort = team->debug_info.debugger_port; 1106 1107 debugInfoLocker.Unlock(); 1108 1109 // Send the message only if the debugger hasn't changed in the meantime or 1110 // the team is about to be handed over. 1111 if (newDebuggerPort == debuggerPort 1112 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1113 debug_thread_deleted message; 1114 message.origin.thread = threadID; 1115 message.origin.team = teamID; 1116 message.origin.nub_port = -1; 1117 1118 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1119 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1120 } 1121 1122 // release the debugger write lock 1123 release_sem(writeLock); 1124 } 1125 1126 1127 /*! Called for a thread that is about to die, cleaning up all user debug 1128 facilities installed for the thread. 1129 \param thread The current thread, the one that is going to die. 1130 */ 1131 void 1132 user_debug_thread_exiting(Thread* thread) 1133 { 1134 // thread is the current thread, so using team is safe 1135 Team* team = thread->team; 1136 1137 InterruptsLocker interruptsLocker; 1138 1139 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1140 1141 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1142 port_id debuggerPort = team->debug_info.debugger_port; 1143 1144 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1145 1146 // check, if a debugger is installed 1147 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1148 || debuggerPort < 0) { 1149 return; 1150 } 1151 1152 // detach the profile info and mark the thread dying 1153 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1154 1155 thread_debug_info& threadDebugInfo = thread->debug_info; 1156 if (threadDebugInfo.profile.samples == NULL) 1157 return; 1158 1159 area_id sampleArea = threadDebugInfo.profile.sample_area; 1160 int32 sampleCount = threadDebugInfo.profile.sample_count; 1161 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1162 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1163 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1164 int32 imageEvent = threadDebugInfo.profile.image_event; 1165 threadDebugInfo.profile.sample_area = -1; 1166 threadDebugInfo.profile.samples = NULL; 1167 threadDebugInfo.profile.buffer_full = false; 1168 1169 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1170 1171 threadDebugInfoLocker.Unlock(); 1172 interruptsLocker.Unlock(); 1173 1174 // notify the debugger 1175 debug_profiler_update message; 1176 message.origin.thread = thread->id; 1177 message.origin.team = thread->team->id; 1178 message.origin.nub_port = -1; // asynchronous message 1179 message.sample_count = sampleCount; 1180 message.dropped_ticks = droppedTicks; 1181 message.stack_depth = stackDepth; 1182 message.variable_stack_depth = variableStackDepth; 1183 message.image_event = imageEvent; 1184 message.stopped = true; 1185 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1186 &message, sizeof(message), false); 1187 1188 if (sampleArea >= 0) { 1189 area_info areaInfo; 1190 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1191 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1192 delete_area(sampleArea); 1193 } 1194 } 1195 } 1196 1197 1198 void 1199 user_debug_image_created(const image_info *imageInfo) 1200 { 1201 // check, if a debugger is installed and is interested in image events 1202 Thread *thread = thread_get_current_thread(); 1203 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1204 if (~teamDebugFlags 1205 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1206 return; 1207 } 1208 1209 // prepare the message 1210 debug_image_created message; 1211 memcpy(&message.info, imageInfo, sizeof(image_info)); 1212 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1213 + 1; 1214 1215 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1216 sizeof(message), true); 1217 } 1218 1219 1220 void 1221 user_debug_image_deleted(const image_info *imageInfo) 1222 { 1223 // check, if a debugger is installed and is interested in image events 1224 Thread *thread = thread_get_current_thread(); 1225 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1226 if (~teamDebugFlags 1227 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1228 return; 1229 } 1230 1231 // prepare the message 1232 debug_image_deleted message; 1233 memcpy(&message.info, imageInfo, sizeof(image_info)); 1234 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1235 + 1; 1236 1237 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message, 1238 sizeof(message), true); 1239 } 1240 1241 1242 void 1243 user_debug_breakpoint_hit(bool software) 1244 { 1245 // prepare the message 1246 debug_breakpoint_hit message; 1247 arch_get_debug_cpu_state(&message.cpu_state); 1248 1249 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1250 sizeof(message)); 1251 } 1252 1253 1254 void 1255 user_debug_watchpoint_hit() 1256 { 1257 // prepare the message 1258 debug_watchpoint_hit message; 1259 arch_get_debug_cpu_state(&message.cpu_state); 1260 1261 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1262 sizeof(message)); 1263 } 1264 1265 1266 void 1267 user_debug_single_stepped() 1268 { 1269 // clear the single-step thread flag 1270 Thread* thread = thread_get_current_thread(); 1271 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1272 1273 // prepare the message 1274 debug_single_step message; 1275 arch_get_debug_cpu_state(&message.cpu_state); 1276 1277 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1278 sizeof(message)); 1279 } 1280 1281 1282 /*! Schedules the profiling timer for the current thread. 1283 The caller must hold the thread's debug info lock. 1284 \param thread The current thread. 1285 \param interval The time after which the timer should fire. 1286 */ 1287 static void 1288 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1289 { 1290 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1291 thread->debug_info.profile.installed_timer = timer; 1292 thread->debug_info.profile.timer_end = system_time() + interval; 1293 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1294 } 1295 1296 1297 /*! Samples the current thread's instruction pointer/stack trace. 1298 The caller must hold the current thread's debug info lock. 1299 \param flushBuffer Return parameter: Set to \c true when the sampling 1300 buffer must be flushed. 1301 */ 1302 static bool 1303 profiling_do_sample(bool& flushBuffer) 1304 { 1305 Thread* thread = thread_get_current_thread(); 1306 thread_debug_info& debugInfo = thread->debug_info; 1307 1308 if (debugInfo.profile.samples == NULL) 1309 return false; 1310 1311 // Check, whether the buffer is full or an image event occurred since the 1312 // last sample was taken. 1313 int32 maxSamples = debugInfo.profile.max_samples; 1314 int32 sampleCount = debugInfo.profile.sample_count; 1315 int32 stackDepth = debugInfo.profile.stack_depth; 1316 int32 imageEvent = thread->team->debug_info.image_event; 1317 if (debugInfo.profile.sample_count > 0) { 1318 if (debugInfo.profile.last_image_event < imageEvent 1319 && debugInfo.profile.variable_stack_depth 1320 && sampleCount + 2 <= maxSamples) { 1321 // an image event occurred, but we use variable stack depth and 1322 // have enough room in the buffer to indicate an image event 1323 addr_t* event = debugInfo.profile.samples + sampleCount; 1324 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1325 event[1] = imageEvent; 1326 sampleCount += 2; 1327 debugInfo.profile.sample_count = sampleCount; 1328 debugInfo.profile.last_image_event = imageEvent; 1329 } 1330 1331 if (debugInfo.profile.last_image_event < imageEvent 1332 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1333 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1334 flushBuffer = true; 1335 return true; 1336 } 1337 1338 // We can't flush the buffer now, since we interrupted a kernel 1339 // function. If the buffer is not full yet, we add the samples, 1340 // otherwise we have to drop them. 1341 if (maxSamples - sampleCount < stackDepth) { 1342 debugInfo.profile.dropped_ticks++; 1343 return true; 1344 } 1345 } 1346 } else { 1347 // first sample -- set the image event 1348 debugInfo.profile.image_event = imageEvent; 1349 debugInfo.profile.last_image_event = imageEvent; 1350 } 1351 1352 // get the samples 1353 addr_t* returnAddresses = debugInfo.profile.samples 1354 + debugInfo.profile.sample_count; 1355 if (debugInfo.profile.variable_stack_depth) { 1356 // variable sample count per hit 1357 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1358 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1359 1360 debugInfo.profile.sample_count += *returnAddresses + 1; 1361 } else { 1362 // fixed sample count per hit 1363 if (stackDepth > 1) { 1364 int32 count = arch_debug_get_stack_trace(returnAddresses, 1365 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1366 1367 for (int32 i = count; i < stackDepth; i++) 1368 returnAddresses[i] = 0; 1369 } else 1370 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1371 1372 debugInfo.profile.sample_count += stackDepth; 1373 } 1374 1375 return true; 1376 } 1377 1378 1379 static void 1380 profiling_buffer_full(void*) 1381 { 1382 // It is undefined whether the function is called with interrupts enabled 1383 // or disabled. We are allowed to enable interrupts, though. First make 1384 // sure interrupts are disabled. 1385 disable_interrupts(); 1386 1387 Thread* thread = thread_get_current_thread(); 1388 thread_debug_info& debugInfo = thread->debug_info; 1389 1390 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1391 1392 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1393 int32 sampleCount = debugInfo.profile.sample_count; 1394 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1395 int32 stackDepth = debugInfo.profile.stack_depth; 1396 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1397 int32 imageEvent = debugInfo.profile.image_event; 1398 1399 // notify the debugger 1400 debugInfo.profile.sample_count = 0; 1401 debugInfo.profile.dropped_ticks = 0; 1402 1403 threadDebugInfoLocker.Unlock(); 1404 enable_interrupts(); 1405 1406 // prepare the message 1407 debug_profiler_update message; 1408 message.sample_count = sampleCount; 1409 message.dropped_ticks = droppedTicks; 1410 message.stack_depth = stackDepth; 1411 message.variable_stack_depth = variableStackDepth; 1412 message.image_event = imageEvent; 1413 message.stopped = false; 1414 1415 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1416 sizeof(message), false); 1417 1418 disable_interrupts(); 1419 threadDebugInfoLocker.Lock(); 1420 1421 // do the sampling and reschedule timer, if still profiling this thread 1422 bool flushBuffer; 1423 if (profiling_do_sample(flushBuffer)) { 1424 debugInfo.profile.buffer_full = false; 1425 schedule_profiling_timer(thread, debugInfo.profile.interval); 1426 } 1427 } 1428 1429 threadDebugInfoLocker.Unlock(); 1430 enable_interrupts(); 1431 } 1432 1433 1434 /*! Profiling timer event callback. 1435 Called with interrupts disabled. 1436 */ 1437 static int32 1438 profiling_event(timer* /*unused*/) 1439 { 1440 Thread* thread = thread_get_current_thread(); 1441 thread_debug_info& debugInfo = thread->debug_info; 1442 1443 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1444 1445 bool flushBuffer = false; 1446 if (profiling_do_sample(flushBuffer)) { 1447 if (flushBuffer) { 1448 // The sample buffer needs to be flushed; we'll have to notify the 1449 // debugger. We can't do that right here. Instead we set a post 1450 // interrupt callback doing that for us, and don't reschedule the 1451 // timer yet. 1452 thread->post_interrupt_callback = profiling_buffer_full; 1453 debugInfo.profile.installed_timer = NULL; 1454 debugInfo.profile.buffer_full = true; 1455 } else 1456 schedule_profiling_timer(thread, debugInfo.profile.interval); 1457 } else 1458 debugInfo.profile.installed_timer = NULL; 1459 1460 return B_HANDLED_INTERRUPT; 1461 } 1462 1463 1464 /*! Called by the scheduler when a debugged thread has been unscheduled. 1465 The scheduler lock is being held. 1466 */ 1467 void 1468 user_debug_thread_unscheduled(Thread* thread) 1469 { 1470 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1471 1472 // if running, cancel the profiling timer 1473 struct timer* timer = thread->debug_info.profile.installed_timer; 1474 if (timer != NULL) { 1475 // track remaining time 1476 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1477 thread->debug_info.profile.interval_left = max_c(left, 0); 1478 thread->debug_info.profile.installed_timer = NULL; 1479 1480 // cancel timer 1481 threadDebugInfoLocker.Unlock(); 1482 // not necessary, but doesn't harm and reduces contention 1483 cancel_timer(timer); 1484 // since invoked on the same CPU, this will not possibly wait for 1485 // an already called timer hook 1486 } 1487 } 1488 1489 1490 /*! Called by the scheduler when a debugged thread has been scheduled. 1491 The scheduler lock is being held. 1492 */ 1493 void 1494 user_debug_thread_scheduled(Thread* thread) 1495 { 1496 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1497 1498 if (thread->debug_info.profile.samples != NULL 1499 && !thread->debug_info.profile.buffer_full) { 1500 // install profiling timer 1501 schedule_profiling_timer(thread, 1502 thread->debug_info.profile.interval_left); 1503 } 1504 } 1505 1506 1507 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1508 all threads of the team that are initialized for debugging (and 1509 thus have a debug port). 1510 */ 1511 static void 1512 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1513 const void *message, int32 size) 1514 { 1515 // iterate through the threads 1516 thread_info threadInfo; 1517 int32 cookie = 0; 1518 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1519 == B_OK) { 1520 // get the thread and lock it 1521 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1522 if (thread == NULL) 1523 continue; 1524 1525 BReference<Thread> threadReference(thread, true); 1526 ThreadLocker threadLocker(thread, true); 1527 1528 // get the thread's debug port 1529 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1530 1531 port_id threadDebugPort = -1; 1532 if (thread && thread != nubThread && thread->team == nubThread->team 1533 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1534 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1535 threadDebugPort = thread->debug_info.debug_port; 1536 } 1537 1538 threadDebugInfoLocker.Unlock(); 1539 threadLocker.Unlock(); 1540 1541 // send the message to the thread 1542 if (threadDebugPort >= 0) { 1543 status_t error = kill_interruptable_write_port(threadDebugPort, 1544 code, message, size); 1545 if (error != B_OK) { 1546 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1547 "message to thread %" B_PRId32 ": %" B_PRIx32 "\n", 1548 thread->id, error)); 1549 } 1550 } 1551 } 1552 } 1553 1554 1555 static void 1556 nub_thread_cleanup(Thread *nubThread) 1557 { 1558 TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n", 1559 nubThread->id, nubThread->team->debug_info.debugger_port)); 1560 1561 ConditionVariable debugChangeCondition; 1562 prepare_debugger_change(nubThread->team, debugChangeCondition); 1563 1564 team_debug_info teamDebugInfo; 1565 bool destroyDebugInfo = false; 1566 1567 TeamLocker teamLocker(nubThread->team); 1568 // required by update_threads_debugger_installed_flag() 1569 1570 cpu_status state = disable_interrupts(); 1571 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1572 1573 team_debug_info &info = nubThread->team->debug_info; 1574 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1575 && info.nub_thread == nubThread->id) { 1576 teamDebugInfo = info; 1577 clear_team_debug_info(&info, false); 1578 destroyDebugInfo = true; 1579 } 1580 1581 // update the thread::flags fields 1582 update_threads_debugger_installed_flag(nubThread->team); 1583 1584 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1585 restore_interrupts(state); 1586 1587 teamLocker.Unlock(); 1588 1589 if (destroyDebugInfo) 1590 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1591 1592 finish_debugger_change(nubThread->team); 1593 1594 if (destroyDebugInfo) 1595 destroy_team_debug_info(&teamDebugInfo); 1596 1597 // notify all threads that the debugger is gone 1598 broadcast_debugged_thread_message(nubThread, 1599 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1600 } 1601 1602 1603 /** \brief Debug nub thread helper function that returns the debug port of 1604 * a thread of the same team. 1605 */ 1606 static status_t 1607 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1608 thread_id threadID, port_id &threadDebugPort) 1609 { 1610 threadDebugPort = -1; 1611 1612 // get the thread 1613 Thread* thread = Thread::GetAndLock(threadID); 1614 if (thread == NULL) 1615 return B_BAD_THREAD_ID; 1616 BReference<Thread> threadReference(thread, true); 1617 ThreadLocker threadLocker(thread, true); 1618 1619 // get the debug port 1620 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1621 1622 if (thread->team != nubThread->team) 1623 return B_BAD_VALUE; 1624 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1625 return B_BAD_THREAD_STATE; 1626 1627 threadDebugPort = thread->debug_info.debug_port; 1628 1629 threadDebugInfoLocker.Unlock(); 1630 1631 if (threadDebugPort < 0) 1632 return B_ERROR; 1633 1634 return B_OK; 1635 } 1636 1637 1638 static status_t 1639 debug_nub_thread(void *) 1640 { 1641 Thread *nubThread = thread_get_current_thread(); 1642 1643 // check, if we're still the current nub thread and get our port 1644 cpu_status state = disable_interrupts(); 1645 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1646 1647 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1648 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1649 restore_interrupts(state); 1650 return 0; 1651 } 1652 1653 port_id port = nubThread->team->debug_info.nub_port; 1654 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1655 BreakpointManager* breakpointManager 1656 = nubThread->team->debug_info.breakpoint_manager; 1657 1658 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1659 restore_interrupts(state); 1660 1661 TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub " 1662 "port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port)); 1663 1664 // notify all threads that a debugger has been installed 1665 broadcast_debugged_thread_message(nubThread, 1666 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1667 1668 // command processing loop 1669 while (true) { 1670 int32 command; 1671 debug_nub_message_data message; 1672 ssize_t messageSize = read_port_etc(port, &command, &message, 1673 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1674 1675 if (messageSize < 0) { 1676 // The port is no longer valid or we were interrupted by a kill 1677 // signal: If we are still listed in the team's debug info as nub 1678 // thread, we need to update that. 1679 nub_thread_cleanup(nubThread); 1680 1681 TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n", 1682 nubThread->id, messageSize)); 1683 1684 return messageSize; 1685 } 1686 1687 bool sendReply = false; 1688 union { 1689 debug_nub_read_memory_reply read_memory; 1690 debug_nub_write_memory_reply write_memory; 1691 debug_nub_get_cpu_state_reply get_cpu_state; 1692 debug_nub_set_breakpoint_reply set_breakpoint; 1693 debug_nub_set_watchpoint_reply set_watchpoint; 1694 debug_nub_get_signal_masks_reply get_signal_masks; 1695 debug_nub_get_signal_handler_reply get_signal_handler; 1696 debug_nub_start_profiler_reply start_profiler; 1697 debug_profiler_update profiler_update; 1698 } reply; 1699 int32 replySize = 0; 1700 port_id replyPort = -1; 1701 1702 // process the command 1703 switch (command) { 1704 case B_DEBUG_MESSAGE_READ_MEMORY: 1705 { 1706 // get the parameters 1707 replyPort = message.read_memory.reply_port; 1708 void *address = message.read_memory.address; 1709 int32 size = message.read_memory.size; 1710 status_t result = B_OK; 1711 1712 // check the parameters 1713 if (!BreakpointManager::CanAccessAddress(address, false)) 1714 result = B_BAD_ADDRESS; 1715 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1716 result = B_BAD_VALUE; 1717 1718 // read the memory 1719 size_t bytesRead = 0; 1720 if (result == B_OK) { 1721 result = breakpointManager->ReadMemory(address, 1722 reply.read_memory.data, size, bytesRead); 1723 } 1724 reply.read_memory.error = result; 1725 1726 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: " 1727 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1728 ", result: %" B_PRIx32 ", read: %ld\n", nubThread->id, 1729 replyPort, address, size, result, bytesRead)); 1730 1731 // send only as much data as necessary 1732 reply.read_memory.size = bytesRead; 1733 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1734 sendReply = true; 1735 break; 1736 } 1737 1738 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1739 { 1740 // get the parameters 1741 replyPort = message.write_memory.reply_port; 1742 void *address = message.write_memory.address; 1743 int32 size = message.write_memory.size; 1744 const char *data = message.write_memory.data; 1745 int32 realSize = (char*)&message + messageSize - data; 1746 status_t result = B_OK; 1747 1748 // check the parameters 1749 if (!BreakpointManager::CanAccessAddress(address, true)) 1750 result = B_BAD_ADDRESS; 1751 else if (size <= 0 || size > realSize) 1752 result = B_BAD_VALUE; 1753 1754 // write the memory 1755 size_t bytesWritten = 0; 1756 if (result == B_OK) { 1757 result = breakpointManager->WriteMemory(address, data, size, 1758 bytesWritten); 1759 } 1760 reply.write_memory.error = result; 1761 1762 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: " 1763 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1764 ", result: %" B_PRIx32 ", written: %ld\n", nubThread->id, 1765 replyPort, address, size, result, bytesWritten)); 1766 1767 reply.write_memory.size = bytesWritten; 1768 sendReply = true; 1769 replySize = sizeof(debug_nub_write_memory_reply); 1770 break; 1771 } 1772 1773 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1774 { 1775 // get the parameters 1776 int32 flags = message.set_team_flags.flags 1777 & B_TEAM_DEBUG_USER_FLAG_MASK; 1778 1779 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS" 1780 ": flags: %" B_PRIx32 "\n", nubThread->id, flags)); 1781 1782 Team *team = thread_get_current_thread()->team; 1783 1784 // set the flags 1785 cpu_status state = disable_interrupts(); 1786 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1787 1788 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1789 atomic_set(&team->debug_info.flags, flags); 1790 1791 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1792 restore_interrupts(state); 1793 1794 break; 1795 } 1796 1797 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1798 { 1799 // get the parameters 1800 thread_id threadID = message.set_thread_flags.thread; 1801 int32 flags = message.set_thread_flags.flags 1802 & B_THREAD_DEBUG_USER_FLAG_MASK; 1803 1804 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS" 1805 ": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n", 1806 nubThread->id, threadID, flags)); 1807 1808 // set the flags 1809 Thread* thread = Thread::GetAndLock(threadID); 1810 if (thread == NULL) 1811 break; 1812 BReference<Thread> threadReference(thread, true); 1813 ThreadLocker threadLocker(thread, true); 1814 1815 InterruptsSpinLocker threadDebugInfoLocker( 1816 thread->debug_info.lock); 1817 1818 if (thread->team == thread_get_current_thread()->team) { 1819 flags |= thread->debug_info.flags 1820 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1821 atomic_set(&thread->debug_info.flags, flags); 1822 } 1823 1824 break; 1825 } 1826 1827 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1828 { 1829 // get the parameters 1830 thread_id threadID; 1831 uint32 handleEvent; 1832 bool singleStep; 1833 1834 threadID = message.continue_thread.thread; 1835 handleEvent = message.continue_thread.handle_event; 1836 singleStep = message.continue_thread.single_step; 1837 1838 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD" 1839 ": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", " 1840 "single step: %d\n", nubThread->id, threadID, handleEvent, 1841 singleStep)); 1842 1843 // find the thread and get its debug port 1844 port_id threadDebugPort = -1; 1845 status_t result = debug_nub_thread_get_thread_debug_port( 1846 nubThread, threadID, threadDebugPort); 1847 1848 // send a message to the debugged thread 1849 if (result == B_OK) { 1850 debugged_thread_continue commandMessage; 1851 commandMessage.handle_event = handleEvent; 1852 commandMessage.single_step = singleStep; 1853 1854 result = write_port(threadDebugPort, 1855 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1856 &commandMessage, sizeof(commandMessage)); 1857 } 1858 1859 break; 1860 } 1861 1862 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1863 { 1864 // get the parameters 1865 thread_id threadID = message.set_cpu_state.thread; 1866 const debug_cpu_state &cpuState 1867 = message.set_cpu_state.cpu_state; 1868 1869 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE" 1870 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1871 1872 // find the thread and get its debug port 1873 port_id threadDebugPort = -1; 1874 status_t result = debug_nub_thread_get_thread_debug_port( 1875 nubThread, threadID, threadDebugPort); 1876 1877 // send a message to the debugged thread 1878 if (result == B_OK) { 1879 debugged_thread_set_cpu_state commandMessage; 1880 memcpy(&commandMessage.cpu_state, &cpuState, 1881 sizeof(debug_cpu_state)); 1882 write_port(threadDebugPort, 1883 B_DEBUGGED_THREAD_SET_CPU_STATE, 1884 &commandMessage, sizeof(commandMessage)); 1885 } 1886 1887 break; 1888 } 1889 1890 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1891 { 1892 // get the parameters 1893 thread_id threadID = message.get_cpu_state.thread; 1894 replyPort = message.get_cpu_state.reply_port; 1895 1896 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE" 1897 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1898 1899 // find the thread and get its debug port 1900 port_id threadDebugPort = -1; 1901 status_t result = debug_nub_thread_get_thread_debug_port( 1902 nubThread, threadID, threadDebugPort); 1903 1904 // send a message to the debugged thread 1905 if (threadDebugPort >= 0) { 1906 debugged_thread_get_cpu_state commandMessage; 1907 commandMessage.reply_port = replyPort; 1908 result = write_port(threadDebugPort, 1909 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1910 sizeof(commandMessage)); 1911 } 1912 1913 // send a reply to the debugger in case of error 1914 if (result != B_OK) { 1915 reply.get_cpu_state.error = result; 1916 sendReply = true; 1917 replySize = sizeof(reply.get_cpu_state); 1918 } 1919 1920 break; 1921 } 1922 1923 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1924 { 1925 // get the parameters 1926 replyPort = message.set_breakpoint.reply_port; 1927 void *address = message.set_breakpoint.address; 1928 1929 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT" 1930 ": address: %p\n", nubThread->id, address)); 1931 1932 // check the address 1933 status_t result = B_OK; 1934 if (address == NULL 1935 || !BreakpointManager::CanAccessAddress(address, false)) { 1936 result = B_BAD_ADDRESS; 1937 } 1938 1939 // set the breakpoint 1940 if (result == B_OK) 1941 result = breakpointManager->InstallBreakpoint(address); 1942 1943 if (result == B_OK) 1944 update_threads_breakpoints_flag(); 1945 1946 // prepare the reply 1947 reply.set_breakpoint.error = result; 1948 replySize = sizeof(reply.set_breakpoint); 1949 sendReply = true; 1950 1951 break; 1952 } 1953 1954 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1955 { 1956 // get the parameters 1957 void *address = message.clear_breakpoint.address; 1958 1959 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT" 1960 ": address: %p\n", nubThread->id, address)); 1961 1962 // check the address 1963 status_t result = B_OK; 1964 if (address == NULL 1965 || !BreakpointManager::CanAccessAddress(address, false)) { 1966 result = B_BAD_ADDRESS; 1967 } 1968 1969 // clear the breakpoint 1970 if (result == B_OK) 1971 result = breakpointManager->UninstallBreakpoint(address); 1972 1973 if (result == B_OK) 1974 update_threads_breakpoints_flag(); 1975 1976 break; 1977 } 1978 1979 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 1980 { 1981 // get the parameters 1982 replyPort = message.set_watchpoint.reply_port; 1983 void *address = message.set_watchpoint.address; 1984 uint32 type = message.set_watchpoint.type; 1985 int32 length = message.set_watchpoint.length; 1986 1987 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT" 1988 ": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n", 1989 nubThread->id, address, type, length)); 1990 1991 // check the address and size 1992 status_t result = B_OK; 1993 if (address == NULL 1994 || !BreakpointManager::CanAccessAddress(address, false)) { 1995 result = B_BAD_ADDRESS; 1996 } 1997 if (length < 0) 1998 result = B_BAD_VALUE; 1999 2000 // set the watchpoint 2001 if (result == B_OK) { 2002 result = breakpointManager->InstallWatchpoint(address, type, 2003 length); 2004 } 2005 2006 if (result == B_OK) 2007 update_threads_breakpoints_flag(); 2008 2009 // prepare the reply 2010 reply.set_watchpoint.error = result; 2011 replySize = sizeof(reply.set_watchpoint); 2012 sendReply = true; 2013 2014 break; 2015 } 2016 2017 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2018 { 2019 // get the parameters 2020 void *address = message.clear_watchpoint.address; 2021 2022 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT" 2023 ": address: %p\n", nubThread->id, address)); 2024 2025 // check the address 2026 status_t result = B_OK; 2027 if (address == NULL 2028 || !BreakpointManager::CanAccessAddress(address, false)) { 2029 result = B_BAD_ADDRESS; 2030 } 2031 2032 // clear the watchpoint 2033 if (result == B_OK) 2034 result = breakpointManager->UninstallWatchpoint(address); 2035 2036 if (result == B_OK) 2037 update_threads_breakpoints_flag(); 2038 2039 break; 2040 } 2041 2042 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2043 { 2044 // get the parameters 2045 thread_id threadID = message.set_signal_masks.thread; 2046 uint64 ignore = message.set_signal_masks.ignore_mask; 2047 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2048 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2049 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2050 2051 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS" 2052 ": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %" 2053 B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32 2054 ")\n", nubThread->id, threadID, ignore, ignoreOp, 2055 ignoreOnce, ignoreOnceOp)); 2056 2057 // set the masks 2058 Thread* thread = Thread::GetAndLock(threadID); 2059 if (thread == NULL) 2060 break; 2061 BReference<Thread> threadReference(thread, true); 2062 ThreadLocker threadLocker(thread, true); 2063 2064 InterruptsSpinLocker threadDebugInfoLocker( 2065 thread->debug_info.lock); 2066 2067 if (thread->team == thread_get_current_thread()->team) { 2068 thread_debug_info &threadDebugInfo = thread->debug_info; 2069 // set ignore mask 2070 switch (ignoreOp) { 2071 case B_DEBUG_SIGNAL_MASK_AND: 2072 threadDebugInfo.ignore_signals &= ignore; 2073 break; 2074 case B_DEBUG_SIGNAL_MASK_OR: 2075 threadDebugInfo.ignore_signals |= ignore; 2076 break; 2077 case B_DEBUG_SIGNAL_MASK_SET: 2078 threadDebugInfo.ignore_signals = ignore; 2079 break; 2080 } 2081 2082 // set ignore once mask 2083 switch (ignoreOnceOp) { 2084 case B_DEBUG_SIGNAL_MASK_AND: 2085 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2086 break; 2087 case B_DEBUG_SIGNAL_MASK_OR: 2088 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2089 break; 2090 case B_DEBUG_SIGNAL_MASK_SET: 2091 threadDebugInfo.ignore_signals_once = ignoreOnce; 2092 break; 2093 } 2094 } 2095 2096 break; 2097 } 2098 2099 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2100 { 2101 // get the parameters 2102 replyPort = message.get_signal_masks.reply_port; 2103 thread_id threadID = message.get_signal_masks.thread; 2104 status_t result = B_OK; 2105 2106 // get the masks 2107 uint64 ignore = 0; 2108 uint64 ignoreOnce = 0; 2109 2110 Thread* thread = Thread::GetAndLock(threadID); 2111 if (thread != NULL) { 2112 BReference<Thread> threadReference(thread, true); 2113 ThreadLocker threadLocker(thread, true); 2114 2115 InterruptsSpinLocker threadDebugInfoLocker( 2116 thread->debug_info.lock); 2117 2118 ignore = thread->debug_info.ignore_signals; 2119 ignoreOnce = thread->debug_info.ignore_signals_once; 2120 } else 2121 result = B_BAD_THREAD_ID; 2122 2123 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS" 2124 ": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", " 2125 "ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: " 2126 "%" B_PRIx32 "\n", nubThread->id, replyPort, threadID, 2127 ignore, ignoreOnce, result)); 2128 2129 // prepare the message 2130 reply.get_signal_masks.error = result; 2131 reply.get_signal_masks.ignore_mask = ignore; 2132 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2133 replySize = sizeof(reply.get_signal_masks); 2134 sendReply = true; 2135 break; 2136 } 2137 2138 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2139 { 2140 // get the parameters 2141 int signal = message.set_signal_handler.signal; 2142 struct sigaction &handler = message.set_signal_handler.handler; 2143 2144 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER" 2145 ": signal: %d, handler: %p\n", nubThread->id, signal, 2146 handler.sa_handler)); 2147 2148 // set the handler 2149 sigaction(signal, &handler, NULL); 2150 2151 break; 2152 } 2153 2154 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2155 { 2156 // get the parameters 2157 replyPort = message.get_signal_handler.reply_port; 2158 int signal = message.get_signal_handler.signal; 2159 status_t result = B_OK; 2160 2161 // get the handler 2162 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2163 != 0) { 2164 result = errno; 2165 } 2166 2167 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER" 2168 ": reply port: %" B_PRId32 ", signal: %d, handler: %p\n", 2169 nubThread->id, replyPort, signal, 2170 reply.get_signal_handler.handler.sa_handler)); 2171 2172 // prepare the message 2173 reply.get_signal_handler.error = result; 2174 replySize = sizeof(reply.get_signal_handler); 2175 sendReply = true; 2176 break; 2177 } 2178 2179 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2180 { 2181 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER" 2182 "\n", nubThread->id)); 2183 2184 Team *team = nubThread->team; 2185 2186 // Acquire the debugger write lock. As soon as we have it and 2187 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2188 // will write anything to the debugger port anymore. 2189 status_t result = acquire_sem_etc(writeLock, 1, 2190 B_KILL_CAN_INTERRUPT, 0); 2191 if (result == B_OK) { 2192 // set the respective team debug flag 2193 cpu_status state = disable_interrupts(); 2194 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2195 2196 atomic_or(&team->debug_info.flags, 2197 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2198 BreakpointManager* breakpointManager 2199 = team->debug_info.breakpoint_manager; 2200 2201 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2202 restore_interrupts(state); 2203 2204 // remove all installed breakpoints 2205 breakpointManager->RemoveAllBreakpoints(); 2206 2207 release_sem(writeLock); 2208 } else { 2209 // We probably got a SIGKILL. If so, we will terminate when 2210 // reading the next message fails. 2211 } 2212 2213 break; 2214 } 2215 2216 case B_DEBUG_MESSAGE_HANDED_OVER: 2217 { 2218 // notify all threads that the debugger has changed 2219 broadcast_debugged_thread_message(nubThread, 2220 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2221 2222 break; 2223 } 2224 2225 case B_DEBUG_START_PROFILER: 2226 { 2227 // get the parameters 2228 thread_id threadID = message.start_profiler.thread; 2229 replyPort = message.start_profiler.reply_port; 2230 area_id sampleArea = message.start_profiler.sample_area; 2231 int32 stackDepth = message.start_profiler.stack_depth; 2232 bool variableStackDepth 2233 = message.start_profiler.variable_stack_depth; 2234 bigtime_t interval = max_c(message.start_profiler.interval, 2235 B_DEBUG_MIN_PROFILE_INTERVAL); 2236 status_t result = B_OK; 2237 2238 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: " 2239 "thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n", 2240 nubThread->id, threadID, sampleArea)); 2241 2242 if (stackDepth < 1) 2243 stackDepth = 1; 2244 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2245 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2246 2247 // provision for an extra entry per hit (for the number of 2248 // samples), if variable stack depth 2249 if (variableStackDepth) 2250 stackDepth++; 2251 2252 // clone the sample area 2253 area_info areaInfo; 2254 if (result == B_OK) 2255 result = get_area_info(sampleArea, &areaInfo); 2256 2257 area_id clonedSampleArea = -1; 2258 void* samples = NULL; 2259 if (result == B_OK) { 2260 clonedSampleArea = clone_area("profiling samples", &samples, 2261 B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, 2262 sampleArea); 2263 if (clonedSampleArea >= 0) { 2264 // we need the memory locked 2265 result = lock_memory(samples, areaInfo.size, 2266 B_READ_DEVICE); 2267 if (result != B_OK) { 2268 delete_area(clonedSampleArea); 2269 clonedSampleArea = -1; 2270 } 2271 } else 2272 result = clonedSampleArea; 2273 } 2274 2275 // get the thread and set the profile info 2276 int32 imageEvent = nubThread->team->debug_info.image_event; 2277 if (result == B_OK) { 2278 Thread* thread = Thread::GetAndLock(threadID); 2279 BReference<Thread> threadReference(thread, true); 2280 ThreadLocker threadLocker(thread, true); 2281 2282 if (thread != NULL && thread->team == nubThread->team) { 2283 thread_debug_info &threadDebugInfo = thread->debug_info; 2284 2285 InterruptsSpinLocker threadDebugInfoLocker( 2286 threadDebugInfo.lock); 2287 2288 if (threadDebugInfo.profile.samples == NULL) { 2289 threadDebugInfo.profile.interval = interval; 2290 threadDebugInfo.profile.sample_area 2291 = clonedSampleArea; 2292 threadDebugInfo.profile.samples = (addr_t*)samples; 2293 threadDebugInfo.profile.max_samples 2294 = areaInfo.size / sizeof(addr_t); 2295 threadDebugInfo.profile.flush_threshold 2296 = threadDebugInfo.profile.max_samples 2297 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2298 / 100; 2299 threadDebugInfo.profile.sample_count = 0; 2300 threadDebugInfo.profile.dropped_ticks = 0; 2301 threadDebugInfo.profile.stack_depth = stackDepth; 2302 threadDebugInfo.profile.variable_stack_depth 2303 = variableStackDepth; 2304 threadDebugInfo.profile.buffer_full = false; 2305 threadDebugInfo.profile.interval_left = interval; 2306 threadDebugInfo.profile.installed_timer = NULL; 2307 threadDebugInfo.profile.image_event = imageEvent; 2308 threadDebugInfo.profile.last_image_event 2309 = imageEvent; 2310 } else 2311 result = B_BAD_VALUE; 2312 } else 2313 result = B_BAD_THREAD_ID; 2314 } 2315 2316 // on error unlock and delete the sample area 2317 if (result != B_OK) { 2318 if (clonedSampleArea >= 0) { 2319 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2320 delete_area(clonedSampleArea); 2321 } 2322 } 2323 2324 // send a reply to the debugger 2325 reply.start_profiler.error = result; 2326 reply.start_profiler.interval = interval; 2327 reply.start_profiler.image_event = imageEvent; 2328 sendReply = true; 2329 replySize = sizeof(reply.start_profiler); 2330 2331 break; 2332 } 2333 2334 case B_DEBUG_STOP_PROFILER: 2335 { 2336 // get the parameters 2337 thread_id threadID = message.stop_profiler.thread; 2338 replyPort = message.stop_profiler.reply_port; 2339 status_t result = B_OK; 2340 2341 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: " 2342 "thread: %" B_PRId32 "\n", nubThread->id, threadID)); 2343 2344 area_id sampleArea = -1; 2345 addr_t* samples = NULL; 2346 int32 sampleCount = 0; 2347 int32 stackDepth = 0; 2348 bool variableStackDepth = false; 2349 int32 imageEvent = 0; 2350 int32 droppedTicks = 0; 2351 2352 // get the thread and detach the profile info 2353 Thread* thread = Thread::GetAndLock(threadID); 2354 BReference<Thread> threadReference(thread, true); 2355 ThreadLocker threadLocker(thread, true); 2356 2357 if (thread && thread->team == nubThread->team) { 2358 thread_debug_info &threadDebugInfo = thread->debug_info; 2359 2360 InterruptsSpinLocker threadDebugInfoLocker( 2361 threadDebugInfo.lock); 2362 2363 if (threadDebugInfo.profile.samples != NULL) { 2364 sampleArea = threadDebugInfo.profile.sample_area; 2365 samples = threadDebugInfo.profile.samples; 2366 sampleCount = threadDebugInfo.profile.sample_count; 2367 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2368 stackDepth = threadDebugInfo.profile.stack_depth; 2369 variableStackDepth 2370 = threadDebugInfo.profile.variable_stack_depth; 2371 imageEvent = threadDebugInfo.profile.image_event; 2372 threadDebugInfo.profile.sample_area = -1; 2373 threadDebugInfo.profile.samples = NULL; 2374 threadDebugInfo.profile.buffer_full = false; 2375 threadDebugInfo.profile.dropped_ticks = 0; 2376 } else 2377 result = B_BAD_VALUE; 2378 } else 2379 result = B_BAD_THREAD_ID; 2380 2381 threadLocker.Unlock(); 2382 2383 // prepare the reply 2384 if (result == B_OK) { 2385 reply.profiler_update.origin.thread = threadID; 2386 reply.profiler_update.image_event = imageEvent; 2387 reply.profiler_update.stack_depth = stackDepth; 2388 reply.profiler_update.variable_stack_depth 2389 = variableStackDepth; 2390 reply.profiler_update.sample_count = sampleCount; 2391 reply.profiler_update.dropped_ticks = droppedTicks; 2392 reply.profiler_update.stopped = true; 2393 } else 2394 reply.profiler_update.origin.thread = result; 2395 2396 replySize = sizeof(debug_profiler_update); 2397 sendReply = true; 2398 2399 if (sampleArea >= 0) { 2400 area_info areaInfo; 2401 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2402 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2403 delete_area(sampleArea); 2404 } 2405 } 2406 } 2407 } 2408 2409 // send the reply, if necessary 2410 if (sendReply) { 2411 status_t error = kill_interruptable_write_port(replyPort, command, 2412 &reply, replySize); 2413 2414 if (error != B_OK) { 2415 // The debugger port is either not longer existing or we got 2416 // interrupted by a kill signal. In either case we terminate. 2417 TRACE(("nub thread %" B_PRId32 ": failed to send reply to port " 2418 "%" B_PRId32 ": %s\n", nubThread->id, replyPort, 2419 strerror(error))); 2420 2421 nub_thread_cleanup(nubThread); 2422 return error; 2423 } 2424 } 2425 } 2426 } 2427 2428 2429 /** \brief Helper function for install_team_debugger(), that sets up the team 2430 and thread debug infos. 2431 2432 The caller must hold the team's lock as well as the team debug info lock. 2433 2434 The function also clears the arch specific team and thread debug infos 2435 (including among other things formerly set break/watchpoints). 2436 */ 2437 static void 2438 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2439 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2440 sem_id debuggerPortWriteLock, thread_id causingThread) 2441 { 2442 atomic_set(&team->debug_info.flags, 2443 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2444 team->debug_info.nub_port = nubPort; 2445 team->debug_info.nub_thread = nubThread; 2446 team->debug_info.debugger_team = debuggerTeam; 2447 team->debug_info.debugger_port = debuggerPort; 2448 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2449 team->debug_info.causing_thread = causingThread; 2450 2451 arch_clear_team_debug_info(&team->debug_info.arch_info); 2452 2453 // set the user debug flags and signal masks of all threads to the default 2454 for (Thread *thread = team->thread_list; thread; 2455 thread = thread->team_next) { 2456 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2457 2458 if (thread->id == nubThread) { 2459 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2460 } else { 2461 int32 flags = thread->debug_info.flags 2462 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2463 atomic_set(&thread->debug_info.flags, 2464 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2465 thread->debug_info.ignore_signals = 0; 2466 thread->debug_info.ignore_signals_once = 0; 2467 2468 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2469 } 2470 } 2471 2472 // update the thread::flags fields 2473 update_threads_debugger_installed_flag(team); 2474 } 2475 2476 2477 static port_id 2478 install_team_debugger(team_id teamID, port_id debuggerPort, 2479 thread_id causingThread, bool useDefault, bool dontReplace) 2480 { 2481 TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", " 2482 "default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault, 2483 dontReplace)); 2484 2485 if (useDefault) 2486 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2487 2488 // get the debugger team 2489 port_info debuggerPortInfo; 2490 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2491 if (error != B_OK) { 2492 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2493 "%" B_PRIx32 "\n", error)); 2494 return error; 2495 } 2496 team_id debuggerTeam = debuggerPortInfo.team; 2497 2498 // Check the debugger team: It must neither be the kernel team nor the 2499 // debugged team. 2500 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2501 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2502 "debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam, 2503 teamID)); 2504 return B_NOT_ALLOWED; 2505 } 2506 2507 // get the team 2508 Team* team; 2509 ConditionVariable debugChangeCondition; 2510 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2511 if (error != B_OK) 2512 return error; 2513 2514 // get the real team ID 2515 teamID = team->id; 2516 2517 // check, if a debugger is already installed 2518 2519 bool done = false; 2520 port_id result = B_ERROR; 2521 bool handOver = false; 2522 port_id oldDebuggerPort = -1; 2523 port_id nubPort = -1; 2524 2525 TeamLocker teamLocker(team); 2526 cpu_status state = disable_interrupts(); 2527 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2528 2529 int32 teamDebugFlags = team->debug_info.flags; 2530 2531 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2532 // There's already a debugger installed. 2533 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2534 if (dontReplace) { 2535 // We're fine with already having a debugger. 2536 error = B_OK; 2537 done = true; 2538 result = team->debug_info.nub_port; 2539 } else { 2540 // a handover to another debugger is requested 2541 // Set the handing-over flag -- we'll clear both flags after 2542 // having sent the handed-over message to the new debugger. 2543 atomic_or(&team->debug_info.flags, 2544 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2545 2546 oldDebuggerPort = team->debug_info.debugger_port; 2547 result = nubPort = team->debug_info.nub_port; 2548 if (causingThread < 0) 2549 causingThread = team->debug_info.causing_thread; 2550 2551 // set the new debugger 2552 install_team_debugger_init_debug_infos(team, debuggerTeam, 2553 debuggerPort, nubPort, team->debug_info.nub_thread, 2554 team->debug_info.debugger_write_lock, causingThread); 2555 2556 handOver = true; 2557 done = true; 2558 } 2559 } else { 2560 // there's already a debugger installed 2561 error = (dontReplace ? B_OK : B_BAD_VALUE); 2562 done = true; 2563 result = team->debug_info.nub_port; 2564 } 2565 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2566 && useDefault) { 2567 // No debugger yet, disable_debugger() had been invoked, and we 2568 // would install the default debugger. Just fail. 2569 error = B_BAD_VALUE; 2570 } 2571 2572 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2573 restore_interrupts(state); 2574 teamLocker.Unlock(); 2575 2576 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2577 // The old debugger must just have died. Just proceed as 2578 // if there was no debugger installed. We may still be too 2579 // early, in which case we'll fail, but this race condition 2580 // should be unbelievably rare and relatively harmless. 2581 handOver = false; 2582 done = false; 2583 } 2584 2585 if (handOver) { 2586 // prepare the handed-over message 2587 debug_handed_over notification; 2588 notification.origin.thread = -1; 2589 notification.origin.team = teamID; 2590 notification.origin.nub_port = nubPort; 2591 notification.debugger = debuggerTeam; 2592 notification.debugger_port = debuggerPort; 2593 notification.causing_thread = causingThread; 2594 2595 // notify the new debugger 2596 error = write_port_etc(debuggerPort, 2597 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2598 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2599 if (error != B_OK) { 2600 dprintf("install_team_debugger(): Failed to send message to new " 2601 "debugger: %s\n", strerror(error)); 2602 } 2603 2604 // clear the handed-over and handing-over flags 2605 state = disable_interrupts(); 2606 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2607 2608 atomic_and(&team->debug_info.flags, 2609 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2610 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2611 2612 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2613 restore_interrupts(state); 2614 2615 finish_debugger_change(team); 2616 2617 // notify the nub thread 2618 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2619 NULL, 0); 2620 2621 // notify the old debugger 2622 error = write_port_etc(oldDebuggerPort, 2623 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2624 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2625 if (error != B_OK) { 2626 TRACE(("install_team_debugger(): Failed to send message to old " 2627 "debugger: %s\n", strerror(error))); 2628 } 2629 2630 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2631 "%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam, 2632 debuggerPort)); 2633 2634 return result; 2635 } 2636 2637 if (done || error != B_OK) { 2638 TRACE(("install_team_debugger() done1: %" B_PRId32 "\n", 2639 (error == B_OK ? result : error))); 2640 finish_debugger_change(team); 2641 return (error == B_OK ? result : error); 2642 } 2643 2644 // create the debugger write lock semaphore 2645 char nameBuffer[B_OS_NAME_LENGTH]; 2646 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port " 2647 "write", teamID); 2648 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2649 if (debuggerWriteLock < 0) 2650 error = debuggerWriteLock; 2651 2652 // create the nub port 2653 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID); 2654 if (error == B_OK) { 2655 nubPort = create_port(1, nameBuffer); 2656 if (nubPort < 0) 2657 error = nubPort; 2658 else 2659 result = nubPort; 2660 } 2661 2662 // make the debugger team the port owner; thus we know, if the debugger is 2663 // gone and can cleanup 2664 if (error == B_OK) 2665 error = set_port_owner(nubPort, debuggerTeam); 2666 2667 // create the breakpoint manager 2668 BreakpointManager* breakpointManager = NULL; 2669 if (error == B_OK) { 2670 breakpointManager = new(std::nothrow) BreakpointManager; 2671 if (breakpointManager != NULL) 2672 error = breakpointManager->Init(); 2673 else 2674 error = B_NO_MEMORY; 2675 } 2676 2677 // spawn the nub thread 2678 thread_id nubThread = -1; 2679 if (error == B_OK) { 2680 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task", 2681 teamID); 2682 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2683 B_NORMAL_PRIORITY, NULL, teamID); 2684 if (nubThread < 0) 2685 error = nubThread; 2686 } 2687 2688 // now adjust the debug info accordingly 2689 if (error == B_OK) { 2690 TeamLocker teamLocker(team); 2691 state = disable_interrupts(); 2692 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2693 2694 team->debug_info.breakpoint_manager = breakpointManager; 2695 install_team_debugger_init_debug_infos(team, debuggerTeam, 2696 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2697 causingThread); 2698 2699 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2700 restore_interrupts(state); 2701 } 2702 2703 finish_debugger_change(team); 2704 2705 // if everything went fine, resume the nub thread, otherwise clean up 2706 if (error == B_OK) { 2707 resume_thread(nubThread); 2708 } else { 2709 // delete port and terminate thread 2710 if (nubPort >= 0) { 2711 set_port_owner(nubPort, B_CURRENT_TEAM); 2712 delete_port(nubPort); 2713 } 2714 if (nubThread >= 0) { 2715 int32 result; 2716 wait_for_thread(nubThread, &result); 2717 } 2718 2719 delete breakpointManager; 2720 } 2721 2722 TRACE(("install_team_debugger() done2: %" B_PRId32 "\n", 2723 (error == B_OK ? result : error))); 2724 return (error == B_OK ? result : error); 2725 } 2726 2727 2728 static status_t 2729 ensure_debugger_installed() 2730 { 2731 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2732 thread_get_current_thread_id(), true, true); 2733 return port >= 0 ? B_OK : port; 2734 } 2735 2736 2737 // #pragma mark - 2738 2739 2740 void 2741 _user_debugger(const char *userMessage) 2742 { 2743 // install the default debugger, if there is none yet 2744 status_t error = ensure_debugger_installed(); 2745 if (error != B_OK) { 2746 // time to commit suicide 2747 char buffer[128]; 2748 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2749 if (length >= 0) { 2750 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2751 "`%s'\n", buffer); 2752 } else { 2753 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2754 "%p (%s)\n", userMessage, strerror(length)); 2755 } 2756 _user_exit_team(1); 2757 } 2758 2759 // prepare the message 2760 debug_debugger_call message; 2761 message.message = (void*)userMessage; 2762 2763 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2764 sizeof(message), true); 2765 } 2766 2767 2768 int 2769 _user_disable_debugger(int state) 2770 { 2771 Team *team = thread_get_current_thread()->team; 2772 2773 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state, 2774 team->id)); 2775 2776 cpu_status cpuState = disable_interrupts(); 2777 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2778 2779 int32 oldFlags; 2780 if (state) { 2781 oldFlags = atomic_or(&team->debug_info.flags, 2782 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2783 } else { 2784 oldFlags = atomic_and(&team->debug_info.flags, 2785 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2786 } 2787 2788 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2789 restore_interrupts(cpuState); 2790 2791 // TODO: Check, if the return value is really the old state. 2792 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2793 } 2794 2795 2796 status_t 2797 _user_install_default_debugger(port_id debuggerPort) 2798 { 2799 // if supplied, check whether the port is a valid port 2800 if (debuggerPort >= 0) { 2801 port_info portInfo; 2802 status_t error = get_port_info(debuggerPort, &portInfo); 2803 if (error != B_OK) 2804 return error; 2805 2806 // the debugger team must not be the kernel team 2807 if (portInfo.team == team_get_kernel_team_id()) 2808 return B_NOT_ALLOWED; 2809 } 2810 2811 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2812 2813 return B_OK; 2814 } 2815 2816 2817 port_id 2818 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2819 { 2820 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2821 } 2822 2823 2824 status_t 2825 _user_remove_team_debugger(team_id teamID) 2826 { 2827 Team* team; 2828 ConditionVariable debugChangeCondition; 2829 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2830 team); 2831 if (error != B_OK) 2832 return error; 2833 2834 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2835 2836 thread_id nubThread = -1; 2837 port_id nubPort = -1; 2838 2839 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2840 // there's a debugger installed 2841 nubThread = team->debug_info.nub_thread; 2842 nubPort = team->debug_info.nub_port; 2843 } else { 2844 // no debugger installed 2845 error = B_BAD_VALUE; 2846 } 2847 2848 debugInfoLocker.Unlock(); 2849 2850 // Delete the nub port -- this will cause the nub thread to terminate and 2851 // remove the debugger. 2852 if (nubPort >= 0) 2853 delete_port(nubPort); 2854 2855 finish_debugger_change(team); 2856 2857 // wait for the nub thread 2858 if (nubThread >= 0) 2859 wait_for_thread(nubThread, NULL); 2860 2861 return error; 2862 } 2863 2864 2865 status_t 2866 _user_debug_thread(thread_id threadID) 2867 { 2868 TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n", 2869 find_thread(NULL), threadID)); 2870 2871 // get the thread 2872 Thread* thread = Thread::GetAndLock(threadID); 2873 if (thread == NULL) 2874 return B_BAD_THREAD_ID; 2875 BReference<Thread> threadReference(thread, true); 2876 ThreadLocker threadLocker(thread, true); 2877 2878 // we can't debug the kernel team 2879 if (thread->team == team_get_kernel_team()) 2880 return B_NOT_ALLOWED; 2881 2882 InterruptsLocker interruptsLocker; 2883 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2884 2885 // If the thread is already dying, it's too late to debug it. 2886 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2887 return B_BAD_THREAD_ID; 2888 2889 // don't debug the nub thread 2890 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2891 return B_NOT_ALLOWED; 2892 2893 // already marked stopped? 2894 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) 2895 return B_OK; 2896 2897 // set the flag that tells the thread to stop as soon as possible 2898 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2899 2900 update_thread_user_debug_flag(thread); 2901 2902 // resume/interrupt the thread, if necessary 2903 threadDebugInfoLocker.Unlock(); 2904 SpinLocker schedulerLocker(thread->scheduler_lock); 2905 2906 switch (thread->state) { 2907 case B_THREAD_SUSPENDED: 2908 // thread suspended: wake it up 2909 scheduler_enqueue_in_run_queue(thread); 2910 break; 2911 2912 default: 2913 // thread may be waiting: interrupt it 2914 thread_interrupt(thread, false); 2915 // TODO: If the thread is already in the kernel and e.g. 2916 // about to acquire a semaphore (before 2917 // thread_prepare_to_block()), we won't interrupt it. 2918 // Maybe we should rather send a signal (SIGTRAP). 2919 schedulerLocker.Unlock(); 2920 2921 schedulerLocker.SetTo(thread_get_current_thread()->scheduler_lock, 2922 false); 2923 scheduler_reschedule_if_necessary_locked(); 2924 break; 2925 } 2926 2927 return B_OK; 2928 } 2929 2930 2931 void 2932 _user_wait_for_debugger(void) 2933 { 2934 debug_thread_debugged message; 2935 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2936 sizeof(message), false); 2937 } 2938 2939 2940 status_t 2941 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2942 bool watchpoint) 2943 { 2944 // check the address and size 2945 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2946 return B_BAD_ADDRESS; 2947 if (watchpoint && length < 0) 2948 return B_BAD_VALUE; 2949 2950 // check whether a debugger is installed already 2951 team_debug_info teamDebugInfo; 2952 get_team_debug_info(teamDebugInfo); 2953 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2954 return B_BAD_VALUE; 2955 2956 // We can't help it, here's a small but relatively harmless race condition, 2957 // since a debugger could be installed in the meantime. The worst case is 2958 // that we install a break/watchpoint the debugger doesn't know about. 2959 2960 // set the break/watchpoint 2961 status_t result; 2962 if (watchpoint) 2963 result = arch_set_watchpoint(address, type, length); 2964 else 2965 result = arch_set_breakpoint(address); 2966 2967 if (result == B_OK) 2968 update_threads_breakpoints_flag(); 2969 2970 return result; 2971 } 2972 2973 2974 status_t 2975 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 2976 { 2977 // check the address 2978 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2979 return B_BAD_ADDRESS; 2980 2981 // check whether a debugger is installed already 2982 team_debug_info teamDebugInfo; 2983 get_team_debug_info(teamDebugInfo); 2984 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2985 return B_BAD_VALUE; 2986 2987 // We can't help it, here's a small but relatively harmless race condition, 2988 // since a debugger could be installed in the meantime. The worst case is 2989 // that we clear a break/watchpoint the debugger has just installed. 2990 2991 // clear the break/watchpoint 2992 status_t result; 2993 if (watchpoint) 2994 result = arch_clear_watchpoint(address); 2995 else 2996 result = arch_clear_breakpoint(address); 2997 2998 if (result == B_OK) 2999 update_threads_breakpoints_flag(); 3000 3001 return result; 3002 } 3003