1 /* 2 * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2015, Rene Gollent, rene@gollent.com. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include <errno.h> 9 #include <signal.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <algorithm> 15 16 #include <arch/debug.h> 17 #include <arch/user_debugger.h> 18 #include <core_dump.h> 19 #include <cpu.h> 20 #include <debugger.h> 21 #include <kernel.h> 22 #include <KernelExport.h> 23 #include <kscheduler.h> 24 #include <ksignal.h> 25 #include <ksyscalls.h> 26 #include <port.h> 27 #include <sem.h> 28 #include <team.h> 29 #include <thread.h> 30 #include <thread_types.h> 31 #include <user_debugger.h> 32 #include <vm/vm.h> 33 #include <vm/vm_types.h> 34 35 #include <AutoDeleter.h> 36 #include <util/AutoLock.h> 37 38 #include "BreakpointManager.h" 39 40 41 //#define TRACE_USER_DEBUGGER 42 #ifdef TRACE_USER_DEBUGGER 43 # define TRACE(x) dprintf x 44 #else 45 # define TRACE(x) ; 46 #endif 47 48 49 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 50 // there's some potential for simplifications. E.g. clear_team_debug_info() and 51 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 52 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 53 54 55 static port_id sDefaultDebuggerPort = -1; 56 // accessed atomically 57 58 static timer sProfilingTimers[SMP_MAX_CPUS]; 59 // a profiling timer for each CPU -- used when a profiled thread is running 60 // on that CPU 61 62 63 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 64 static int32 profiling_event(timer* unused); 65 static status_t ensure_debugger_installed(); 66 static void get_team_debug_info(team_debug_info &teamDebugInfo); 67 68 69 static inline status_t 70 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 71 size_t bufferSize) 72 { 73 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 74 0); 75 } 76 77 78 static status_t 79 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 80 bool dontWait) 81 { 82 TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", " 83 "port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, " 84 "dontWait: %d\n", thread_get_current_thread()->id, 85 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 86 dontWait)); 87 88 status_t error = B_OK; 89 90 // get the team debug info 91 team_debug_info teamDebugInfo; 92 get_team_debug_info(teamDebugInfo); 93 sem_id writeLock = teamDebugInfo.debugger_write_lock; 94 95 // get the write lock 96 TRACE(("debugger_write(): acquiring write lock...\n")); 97 error = acquire_sem_etc(writeLock, 1, 98 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 99 if (error != B_OK) { 100 TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error)); 101 return error; 102 } 103 104 // re-get the team debug info 105 get_team_debug_info(teamDebugInfo); 106 107 if (teamDebugInfo.debugger_port != port 108 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 109 // The debugger has changed in the meantime or we are about to be 110 // handed over to a new debugger. In either case we don't send the 111 // message. 112 TRACE(("debugger_write(): %s\n", 113 (teamDebugInfo.debugger_port != port ? "debugger port changed" 114 : "handover flag set"))); 115 } else { 116 TRACE(("debugger_write(): writing to port...\n")); 117 118 error = write_port_etc(port, code, buffer, bufferSize, 119 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 120 } 121 122 // release the write lock 123 release_sem(writeLock); 124 125 TRACE(("debugger_write() done: %" B_PRIx32 "\n", error)); 126 127 return error; 128 } 129 130 131 /*! Updates the thread::flags field according to what user debugger flags are 132 set for the thread. 133 Interrupts must be disabled and the thread's debug info lock must be held. 134 */ 135 static void 136 update_thread_user_debug_flag(Thread* thread) 137 { 138 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 139 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 140 else 141 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 142 } 143 144 145 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 146 given thread. 147 Interrupts must be disabled and the thread debug info lock must be held. 148 */ 149 static void 150 update_thread_breakpoints_flag(Thread* thread) 151 { 152 Team* team = thread->team; 153 154 if (arch_has_breakpoints(&team->debug_info.arch_info)) 155 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 156 else 157 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 158 } 159 160 161 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 162 threads of the current team. 163 */ 164 static void 165 update_threads_breakpoints_flag() 166 { 167 Team* team = thread_get_current_thread()->team; 168 169 TeamLocker teamLocker(team); 170 171 Thread* thread = team->thread_list; 172 173 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 174 for (; thread != NULL; thread = thread->team_next) 175 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 176 } else { 177 for (; thread != NULL; thread = thread->team_next) 178 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 179 } 180 } 181 182 183 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 184 given thread, which must be the current thread. 185 */ 186 static void 187 update_thread_debugger_installed_flag(Thread* thread) 188 { 189 Team* team = thread->team; 190 191 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 192 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 193 else 194 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 195 } 196 197 198 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 199 threads of the given team. 200 The team's lock must be held. 201 */ 202 static void 203 update_threads_debugger_installed_flag(Team* team) 204 { 205 Thread* thread = team->thread_list; 206 207 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 208 for (; thread != NULL; thread = thread->team_next) 209 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 210 } else { 211 for (; thread != NULL; thread = thread->team_next) 212 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 213 } 214 } 215 216 217 /** 218 * For the first initialization the function must be called with \a initLock 219 * set to \c true. If it would be possible that another thread accesses the 220 * structure at the same time, `lock' must be held when calling the function. 221 */ 222 void 223 clear_team_debug_info(struct team_debug_info *info, bool initLock) 224 { 225 if (info) { 226 arch_clear_team_debug_info(&info->arch_info); 227 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 228 info->debugger_team = -1; 229 info->debugger_port = -1; 230 info->nub_thread = -1; 231 info->nub_port = -1; 232 info->debugger_write_lock = -1; 233 info->causing_thread = -1; 234 info->image_event = 0; 235 info->breakpoint_manager = NULL; 236 237 if (initLock) { 238 B_INITIALIZE_SPINLOCK(&info->lock); 239 info->debugger_changed_condition = NULL; 240 } 241 } 242 } 243 244 /** 245 * `lock' must not be held nor may interrupts be disabled. 246 * \a info must not be a member of a team struct (or the team struct must no 247 * longer be accessible, i.e. the team should already be removed). 248 * 249 * In case the team is still accessible, the procedure is: 250 * 1. get `lock' 251 * 2. copy the team debug info on stack 252 * 3. call clear_team_debug_info() on the team debug info 253 * 4. release `lock' 254 * 5. call destroy_team_debug_info() on the copied team debug info 255 */ 256 static void 257 destroy_team_debug_info(struct team_debug_info *info) 258 { 259 if (info) { 260 arch_destroy_team_debug_info(&info->arch_info); 261 262 // delete the breakpoint manager 263 delete info->breakpoint_manager ; 264 info->breakpoint_manager = NULL; 265 266 // delete the debugger port write lock 267 if (info->debugger_write_lock >= 0) { 268 delete_sem(info->debugger_write_lock); 269 info->debugger_write_lock = -1; 270 } 271 272 // delete the nub port 273 if (info->nub_port >= 0) { 274 set_port_owner(info->nub_port, B_CURRENT_TEAM); 275 delete_port(info->nub_port); 276 info->nub_port = -1; 277 } 278 279 // wait for the nub thread 280 if (info->nub_thread >= 0) { 281 if (info->nub_thread != thread_get_current_thread()->id) { 282 int32 result; 283 wait_for_thread(info->nub_thread, &result); 284 } 285 286 info->nub_thread = -1; 287 } 288 289 atomic_set(&info->flags, 0); 290 info->debugger_team = -1; 291 info->debugger_port = -1; 292 info->causing_thread = -1; 293 info->image_event = -1; 294 } 295 } 296 297 298 void 299 init_thread_debug_info(struct thread_debug_info *info) 300 { 301 if (info) { 302 B_INITIALIZE_SPINLOCK(&info->lock); 303 arch_clear_thread_debug_info(&info->arch_info); 304 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 305 info->debug_port = -1; 306 info->ignore_signals = 0; 307 info->ignore_signals_once = 0; 308 info->profile.sample_area = -1; 309 info->profile.samples = NULL; 310 info->profile.buffer_full = false; 311 info->profile.installed_timer = NULL; 312 } 313 } 314 315 316 /*! Clears the debug info for the current thread. 317 Invoked with thread debug info lock being held. 318 */ 319 void 320 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 321 { 322 if (info) { 323 // cancel profiling timer 324 if (info->profile.installed_timer != NULL) { 325 cancel_timer(info->profile.installed_timer); 326 info->profile.installed_timer = NULL; 327 } 328 329 arch_clear_thread_debug_info(&info->arch_info); 330 atomic_set(&info->flags, 331 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 332 info->debug_port = -1; 333 info->ignore_signals = 0; 334 info->ignore_signals_once = 0; 335 info->profile.sample_area = -1; 336 info->profile.samples = NULL; 337 info->profile.buffer_full = false; 338 } 339 } 340 341 342 void 343 destroy_thread_debug_info(struct thread_debug_info *info) 344 { 345 if (info) { 346 area_id sampleArea = info->profile.sample_area; 347 if (sampleArea >= 0) { 348 area_info areaInfo; 349 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 350 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 351 delete_area(sampleArea); 352 } 353 } 354 355 arch_destroy_thread_debug_info(&info->arch_info); 356 357 if (info->debug_port >= 0) { 358 delete_port(info->debug_port); 359 info->debug_port = -1; 360 } 361 362 info->ignore_signals = 0; 363 info->ignore_signals_once = 0; 364 365 atomic_set(&info->flags, 0); 366 } 367 } 368 369 370 static status_t 371 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 372 Team*& team) 373 { 374 // We look up the team by ID, even in case of the current team, so we can be 375 // sure, that the team is not already dying. 376 if (teamID == B_CURRENT_TEAM) 377 teamID = thread_get_current_thread()->team->id; 378 379 while (true) { 380 // get the team 381 team = Team::GetAndLock(teamID); 382 if (team == NULL) 383 return B_BAD_TEAM_ID; 384 BReference<Team> teamReference(team, true); 385 TeamLocker teamLocker(team, true); 386 387 // don't allow messing with the kernel team 388 if (team == team_get_kernel_team()) 389 return B_NOT_ALLOWED; 390 391 // check whether the condition is already set 392 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 393 394 if (team->debug_info.debugger_changed_condition == NULL) { 395 // nobody there yet -- set our condition variable and be done 396 team->debug_info.debugger_changed_condition = &condition; 397 return B_OK; 398 } 399 400 // we'll have to wait 401 ConditionVariableEntry entry; 402 team->debug_info.debugger_changed_condition->Add(&entry); 403 404 debugInfoLocker.Unlock(); 405 teamLocker.Unlock(); 406 407 entry.Wait(); 408 } 409 } 410 411 412 static void 413 prepare_debugger_change(Team* team, ConditionVariable& condition) 414 { 415 while (true) { 416 // check whether the condition is already set 417 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 418 419 if (team->debug_info.debugger_changed_condition == NULL) { 420 // nobody there yet -- set our condition variable and be done 421 team->debug_info.debugger_changed_condition = &condition; 422 return; 423 } 424 425 // we'll have to wait 426 ConditionVariableEntry entry; 427 team->debug_info.debugger_changed_condition->Add(&entry); 428 429 debugInfoLocker.Unlock(); 430 431 entry.Wait(); 432 } 433 } 434 435 436 static void 437 finish_debugger_change(Team* team) 438 { 439 // unset our condition variable and notify all threads waiting on it 440 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 441 442 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 443 team->debug_info.debugger_changed_condition = NULL; 444 445 condition->NotifyAll(); 446 } 447 448 449 void 450 user_debug_prepare_for_exec() 451 { 452 Thread *thread = thread_get_current_thread(); 453 Team *team = thread->team; 454 455 // If a debugger is installed for the team and the thread debug stuff 456 // initialized, change the ownership of the debug port for the thread 457 // to the kernel team, since exec_team() deletes all ports owned by this 458 // team. We change the ownership back later. 459 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 460 // get the port 461 port_id debugPort = -1; 462 463 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 464 465 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 466 debugPort = thread->debug_info.debug_port; 467 468 threadDebugInfoLocker.Unlock(); 469 470 // set the new port ownership 471 if (debugPort >= 0) 472 set_port_owner(debugPort, team_get_kernel_team_id()); 473 } 474 } 475 476 477 void 478 user_debug_finish_after_exec() 479 { 480 Thread *thread = thread_get_current_thread(); 481 Team *team = thread->team; 482 483 // If a debugger is installed for the team and the thread debug stuff 484 // initialized for this thread, change the ownership of its debug port 485 // back to this team. 486 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 487 // get the port 488 port_id debugPort = -1; 489 490 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 491 492 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 493 debugPort = thread->debug_info.debug_port; 494 495 threadDebugInfoLocker.Unlock(); 496 497 // set the new port ownership 498 if (debugPort >= 0) 499 set_port_owner(debugPort, team->id); 500 } 501 } 502 503 504 void 505 init_user_debug() 506 { 507 #ifdef ARCH_INIT_USER_DEBUG 508 ARCH_INIT_USER_DEBUG(); 509 #endif 510 } 511 512 513 static void 514 get_team_debug_info(team_debug_info &teamDebugInfo) 515 { 516 Thread *thread = thread_get_current_thread(); 517 518 cpu_status state = disable_interrupts(); 519 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 520 521 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 522 523 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 524 restore_interrupts(state); 525 } 526 527 528 static status_t 529 thread_hit_debug_event_internal(debug_debugger_message event, 530 const void *message, int32 size, bool requireDebugger, bool &restart) 531 { 532 restart = false; 533 Thread *thread = thread_get_current_thread(); 534 535 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32 536 ", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event, 537 message, size)); 538 539 // check, if there's a debug port already 540 bool setPort = !(atomic_get(&thread->debug_info.flags) 541 & B_THREAD_DEBUG_INITIALIZED); 542 543 // create a port, if there is none yet 544 port_id port = -1; 545 if (setPort) { 546 char nameBuffer[128]; 547 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32, 548 thread->id); 549 550 port = create_port(1, nameBuffer); 551 if (port < 0) { 552 dprintf("thread_hit_debug_event(): Failed to create debug port: " 553 "%s\n", strerror(port)); 554 return port; 555 } 556 } 557 558 // check the debug info structures once more: get the debugger port, set 559 // the thread's debug port, and update the thread's debug flags 560 port_id deletePort = port; 561 port_id debuggerPort = -1; 562 port_id nubPort = -1; 563 status_t error = B_OK; 564 cpu_status state = disable_interrupts(); 565 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 566 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 567 568 uint32 threadFlags = thread->debug_info.flags; 569 threadFlags &= ~B_THREAD_DEBUG_STOP; 570 bool debuggerInstalled 571 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 572 if (thread->id == thread->team->debug_info.nub_thread) { 573 // Ugh, we're the nub thread. We shouldn't be here. 574 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32 575 "\n", thread->id)); 576 577 error = B_ERROR; 578 } else if (debuggerInstalled || !requireDebugger) { 579 if (debuggerInstalled) { 580 debuggerPort = thread->team->debug_info.debugger_port; 581 nubPort = thread->team->debug_info.nub_port; 582 } 583 584 if (setPort) { 585 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 586 // someone created a port for us (the port we've created will 587 // be deleted below) 588 port = thread->debug_info.debug_port; 589 } else { 590 thread->debug_info.debug_port = port; 591 deletePort = -1; // keep the port 592 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 593 } 594 } else { 595 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 596 port = thread->debug_info.debug_port; 597 } else { 598 // someone deleted our port 599 error = B_ERROR; 600 } 601 } 602 } else 603 error = B_ERROR; 604 605 // update the flags 606 if (error == B_OK) 607 threadFlags |= B_THREAD_DEBUG_STOPPED; 608 atomic_set(&thread->debug_info.flags, threadFlags); 609 610 update_thread_user_debug_flag(thread); 611 612 threadDebugInfoLocker.Unlock(); 613 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 614 restore_interrupts(state); 615 616 // delete the superfluous port 617 if (deletePort >= 0) 618 delete_port(deletePort); 619 620 if (error != B_OK) { 621 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: " 622 "%" B_PRIx32 "\n", thread->id, error)); 623 return error; 624 } 625 626 // send a message to the debugger port 627 if (debuggerInstalled) { 628 // update the message's origin info first 629 debug_origin *origin = (debug_origin *)message; 630 origin->thread = thread->id; 631 origin->team = thread->team->id; 632 origin->nub_port = nubPort; 633 634 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending " 635 "message to debugger port %" B_PRId32 "\n", thread->id, 636 debuggerPort)); 637 638 error = debugger_write(debuggerPort, event, message, size, false); 639 } 640 641 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 642 bool singleStep = false; 643 644 if (error == B_OK) { 645 bool done = false; 646 while (!done) { 647 // read a command from the debug port 648 int32 command; 649 debugged_thread_message_data commandMessage; 650 ssize_t commandMessageSize = read_port_etc(port, &command, 651 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 652 0); 653 654 if (commandMessageSize < 0) { 655 error = commandMessageSize; 656 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed " 657 "to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n", 658 thread->id, port, error)); 659 break; 660 } 661 662 switch (command) { 663 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 664 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 665 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 666 thread->id)); 667 result = commandMessage.continue_thread.handle_event; 668 669 singleStep = commandMessage.continue_thread.single_step; 670 done = true; 671 break; 672 673 case B_DEBUGGED_THREAD_SET_CPU_STATE: 674 { 675 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 676 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 677 thread->id)); 678 arch_set_debug_cpu_state( 679 &commandMessage.set_cpu_state.cpu_state); 680 681 break; 682 } 683 684 case B_DEBUGGED_THREAD_GET_CPU_STATE: 685 { 686 port_id replyPort = commandMessage.get_cpu_state.reply_port; 687 688 // prepare the message 689 debug_nub_get_cpu_state_reply replyMessage; 690 replyMessage.error = B_OK; 691 replyMessage.message = event; 692 arch_get_debug_cpu_state(&replyMessage.cpu_state); 693 694 // send it 695 error = kill_interruptable_write_port(replyPort, event, 696 &replyMessage, sizeof(replyMessage)); 697 698 break; 699 } 700 701 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 702 { 703 // Check, if the debugger really changed, i.e. is different 704 // than the one we know. 705 team_debug_info teamDebugInfo; 706 get_team_debug_info(teamDebugInfo); 707 708 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 709 if (!debuggerInstalled 710 || teamDebugInfo.debugger_port != debuggerPort) { 711 // debugger was installed or has changed: restart 712 // this function 713 restart = true; 714 done = true; 715 } 716 } else { 717 if (debuggerInstalled) { 718 // debugger is gone: continue the thread normally 719 done = true; 720 } 721 } 722 723 break; 724 } 725 } 726 } 727 } else { 728 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send " 729 "message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n", 730 thread->id, debuggerPort, error)); 731 } 732 733 // update the thread debug info 734 bool destroyThreadInfo = false; 735 thread_debug_info threadDebugInfo; 736 737 state = disable_interrupts(); 738 threadDebugInfoLocker.Lock(); 739 740 // check, if the team is still being debugged 741 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 742 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 743 // update the single-step flag 744 if (singleStep) { 745 atomic_or(&thread->debug_info.flags, 746 B_THREAD_DEBUG_SINGLE_STEP); 747 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 748 } else { 749 atomic_and(&thread->debug_info.flags, 750 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 751 } 752 753 // unset the "stopped" state 754 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 755 756 update_thread_user_debug_flag(thread); 757 758 } else { 759 // the debugger is gone: cleanup our info completely 760 threadDebugInfo = thread->debug_info; 761 clear_thread_debug_info(&thread->debug_info, false); 762 destroyThreadInfo = true; 763 } 764 765 threadDebugInfoLocker.Unlock(); 766 restore_interrupts(state); 767 768 // enable/disable single stepping 769 arch_update_thread_single_step(); 770 771 if (destroyThreadInfo) 772 destroy_thread_debug_info(&threadDebugInfo); 773 774 return (error == B_OK ? result : error); 775 } 776 777 778 static status_t 779 thread_hit_debug_event(debug_debugger_message event, const void *message, 780 int32 size, bool requireDebugger) 781 { 782 status_t result; 783 bool restart; 784 do { 785 restart = false; 786 result = thread_hit_debug_event_internal(event, message, size, 787 requireDebugger, restart); 788 } while (result >= 0 && restart); 789 790 // Prepare to continue -- we install a debugger change condition, so no one 791 // will change the debugger while we're playing with the breakpoint manager. 792 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 793 Team* team = thread_get_current_thread()->team; 794 ConditionVariable debugChangeCondition; 795 debugChangeCondition.Init(team, "debug change condition"); 796 prepare_debugger_change(team, debugChangeCondition); 797 798 if (team->debug_info.breakpoint_manager != NULL) { 799 bool isSyscall; 800 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 801 if (pc != NULL && !isSyscall) 802 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 803 } 804 805 finish_debugger_change(team); 806 807 return result; 808 } 809 810 811 static status_t 812 thread_hit_serious_debug_event(debug_debugger_message event, 813 const void *message, int32 messageSize) 814 { 815 // ensure that a debugger is installed for this team 816 status_t error = ensure_debugger_installed(); 817 if (error != B_OK) { 818 Thread *thread = thread_get_current_thread(); 819 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 820 "thread: %" B_PRId32 ": %s\n", thread->id, strerror(error)); 821 return error; 822 } 823 824 // enter the debug loop 825 return thread_hit_debug_event(event, message, messageSize, true); 826 } 827 828 829 void 830 user_debug_pre_syscall(uint32 syscall, void *args) 831 { 832 // check whether a debugger is installed 833 Thread *thread = thread_get_current_thread(); 834 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 835 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 836 return; 837 838 // check whether pre-syscall tracing is enabled for team or thread 839 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 840 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 841 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 842 return; 843 } 844 845 // prepare the message 846 debug_pre_syscall message; 847 message.syscall = syscall; 848 849 // copy the syscall args 850 if (syscall < (uint32)kSyscallCount) { 851 if (kSyscallInfos[syscall].parameter_size > 0) 852 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 853 } 854 855 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 856 sizeof(message), true); 857 } 858 859 860 void 861 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 862 bigtime_t startTime) 863 { 864 // check whether a debugger is installed 865 Thread *thread = thread_get_current_thread(); 866 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 867 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 868 return; 869 870 // check whether post-syscall tracing is enabled for team or thread 871 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 872 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 873 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 874 return; 875 } 876 877 // prepare the message 878 debug_post_syscall message; 879 message.start_time = startTime; 880 message.end_time = system_time(); 881 message.return_value = returnValue; 882 message.syscall = syscall; 883 884 // copy the syscall args 885 if (syscall < (uint32)kSyscallCount) { 886 if (kSyscallInfos[syscall].parameter_size > 0) 887 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 888 } 889 890 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 891 sizeof(message), true); 892 } 893 894 895 /** \brief To be called when an unhandled processor exception (error/fault) 896 * occurred. 897 * \param exception The debug_why_stopped value identifying the kind of fault. 898 * \param signal The signal corresponding to the exception. 899 * \return \c true, if the caller shall continue normally, i.e. usually send 900 * a deadly signal. \c false, if the debugger insists to continue the 901 * program (e.g. because it has solved the removed the cause of the 902 * problem). 903 */ 904 bool 905 user_debug_exception_occurred(debug_exception_type exception, int signal) 906 { 907 // First check whether there's a signal handler installed for the signal. 908 // If so, we don't want to install a debugger for the team. We always send 909 // the signal instead. An already installed debugger will be notified, if 910 // it has requested notifications of signal. 911 struct sigaction signalAction; 912 if (sigaction(signal, NULL, &signalAction) == 0 913 && signalAction.sa_handler != SIG_DFL) { 914 return true; 915 } 916 917 // prepare the message 918 debug_exception_occurred message; 919 message.exception = exception; 920 message.signal = signal; 921 922 status_t result = thread_hit_serious_debug_event( 923 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 924 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 925 } 926 927 928 bool 929 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly) 930 { 931 // check, if a debugger is installed and is interested in signals 932 Thread *thread = thread_get_current_thread(); 933 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 934 if (~teamDebugFlags 935 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 936 return true; 937 } 938 939 // prepare the message 940 debug_signal_received message; 941 message.signal = signal; 942 message.handler = *handler; 943 message.deadly = deadly; 944 945 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 946 &message, sizeof(message), true); 947 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 948 } 949 950 951 void 952 user_debug_stop_thread() 953 { 954 // check whether this is actually an emulated single-step notification 955 Thread* thread = thread_get_current_thread(); 956 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 957 958 bool singleStepped = false; 959 if ((atomic_and(&thread->debug_info.flags, 960 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 961 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 962 singleStepped = true; 963 } 964 965 threadDebugInfoLocker.Unlock(); 966 967 if (singleStepped) { 968 user_debug_single_stepped(); 969 } else { 970 debug_thread_debugged message; 971 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 972 &message, sizeof(message)); 973 } 974 } 975 976 977 void 978 user_debug_team_created(team_id teamID) 979 { 980 // check, if a debugger is installed and is interested in team creation 981 // events 982 Thread *thread = thread_get_current_thread(); 983 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 984 if (~teamDebugFlags 985 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 986 return; 987 } 988 989 // prepare the message 990 debug_team_created message; 991 message.new_team = teamID; 992 993 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 994 sizeof(message), true); 995 } 996 997 998 void 999 user_debug_team_deleted(team_id teamID, port_id debuggerPort) 1000 { 1001 if (debuggerPort >= 0) { 1002 TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: " 1003 "%" B_PRId32 ")\n", teamID, debuggerPort)); 1004 1005 debug_team_deleted message; 1006 message.origin.thread = -1; 1007 message.origin.team = teamID; 1008 message.origin.nub_port = -1; 1009 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1010 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1011 } 1012 } 1013 1014 1015 void 1016 user_debug_team_exec() 1017 { 1018 // check, if a debugger is installed and is interested in team creation 1019 // events 1020 Thread *thread = thread_get_current_thread(); 1021 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1022 if (~teamDebugFlags 1023 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1024 return; 1025 } 1026 1027 // prepare the message 1028 debug_team_exec message; 1029 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1030 + 1; 1031 1032 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1033 sizeof(message), true); 1034 } 1035 1036 1037 /*! Called by a new userland thread to update the debugging related flags of 1038 \c Thread::flags before the thread first enters userland. 1039 \param thread The calling thread. 1040 */ 1041 void 1042 user_debug_update_new_thread_flags(Thread* thread) 1043 { 1044 // lock it and update it's flags 1045 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1046 1047 update_thread_user_debug_flag(thread); 1048 update_thread_breakpoints_flag(thread); 1049 update_thread_debugger_installed_flag(thread); 1050 } 1051 1052 1053 void 1054 user_debug_thread_created(thread_id threadID) 1055 { 1056 // check, if a debugger is installed and is interested in thread events 1057 Thread *thread = thread_get_current_thread(); 1058 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1059 if (~teamDebugFlags 1060 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1061 return; 1062 } 1063 1064 // prepare the message 1065 debug_thread_created message; 1066 message.new_thread = threadID; 1067 1068 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1069 sizeof(message), true); 1070 } 1071 1072 1073 void 1074 user_debug_thread_deleted(team_id teamID, thread_id threadID) 1075 { 1076 // Things are a bit complicated here, since this thread no longer belongs to 1077 // the debugged team (but to the kernel). So we can't use debugger_write(). 1078 1079 // get the team debug flags and debugger port 1080 Team* team = Team::Get(teamID); 1081 if (team == NULL) 1082 return; 1083 BReference<Team> teamReference(team, true); 1084 1085 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1086 1087 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1088 port_id debuggerPort = team->debug_info.debugger_port; 1089 sem_id writeLock = team->debug_info.debugger_write_lock; 1090 1091 debugInfoLocker.Unlock(); 1092 1093 // check, if a debugger is installed and is interested in thread events 1094 if (~teamDebugFlags 1095 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1096 return; 1097 } 1098 1099 // acquire the debugger write lock 1100 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1101 if (error != B_OK) 1102 return; 1103 1104 // re-get the team debug info -- we need to check whether anything changed 1105 debugInfoLocker.Lock(); 1106 1107 teamDebugFlags = atomic_get(&team->debug_info.flags); 1108 port_id newDebuggerPort = team->debug_info.debugger_port; 1109 1110 debugInfoLocker.Unlock(); 1111 1112 // Send the message only if the debugger hasn't changed in the meantime or 1113 // the team is about to be handed over. 1114 if (newDebuggerPort == debuggerPort 1115 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1116 debug_thread_deleted message; 1117 message.origin.thread = threadID; 1118 message.origin.team = teamID; 1119 message.origin.nub_port = -1; 1120 1121 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1122 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1123 } 1124 1125 // release the debugger write lock 1126 release_sem(writeLock); 1127 } 1128 1129 1130 /*! Called for a thread that is about to die, cleaning up all user debug 1131 facilities installed for the thread. 1132 \param thread The current thread, the one that is going to die. 1133 */ 1134 void 1135 user_debug_thread_exiting(Thread* thread) 1136 { 1137 // thread is the current thread, so using team is safe 1138 Team* team = thread->team; 1139 1140 InterruptsLocker interruptsLocker; 1141 1142 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1143 1144 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1145 port_id debuggerPort = team->debug_info.debugger_port; 1146 1147 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1148 1149 // check, if a debugger is installed 1150 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1151 || debuggerPort < 0) { 1152 return; 1153 } 1154 1155 // detach the profile info and mark the thread dying 1156 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1157 1158 thread_debug_info& threadDebugInfo = thread->debug_info; 1159 if (threadDebugInfo.profile.samples == NULL) 1160 return; 1161 1162 area_id sampleArea = threadDebugInfo.profile.sample_area; 1163 int32 sampleCount = threadDebugInfo.profile.sample_count; 1164 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1165 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1166 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1167 int32 imageEvent = threadDebugInfo.profile.image_event; 1168 threadDebugInfo.profile.sample_area = -1; 1169 threadDebugInfo.profile.samples = NULL; 1170 threadDebugInfo.profile.buffer_full = false; 1171 1172 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1173 1174 threadDebugInfoLocker.Unlock(); 1175 interruptsLocker.Unlock(); 1176 1177 // notify the debugger 1178 debug_profiler_update message; 1179 message.origin.thread = thread->id; 1180 message.origin.team = thread->team->id; 1181 message.origin.nub_port = -1; // asynchronous message 1182 message.sample_count = sampleCount; 1183 message.dropped_ticks = droppedTicks; 1184 message.stack_depth = stackDepth; 1185 message.variable_stack_depth = variableStackDepth; 1186 message.image_event = imageEvent; 1187 message.stopped = true; 1188 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1189 &message, sizeof(message), false); 1190 1191 if (sampleArea >= 0) { 1192 area_info areaInfo; 1193 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1194 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1195 delete_area(sampleArea); 1196 } 1197 } 1198 } 1199 1200 1201 void 1202 user_debug_image_created(const image_info *imageInfo) 1203 { 1204 // check, if a debugger is installed and is interested in image events 1205 Thread *thread = thread_get_current_thread(); 1206 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1207 if (~teamDebugFlags 1208 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1209 return; 1210 } 1211 1212 // prepare the message 1213 debug_image_created message; 1214 memcpy(&message.info, imageInfo, sizeof(image_info)); 1215 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1216 + 1; 1217 1218 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1219 sizeof(message), true); 1220 } 1221 1222 1223 void 1224 user_debug_image_deleted(const image_info *imageInfo) 1225 { 1226 // check, if a debugger is installed and is interested in image events 1227 Thread *thread = thread_get_current_thread(); 1228 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1229 if (~teamDebugFlags 1230 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1231 return; 1232 } 1233 1234 // prepare the message 1235 debug_image_deleted message; 1236 memcpy(&message.info, imageInfo, sizeof(image_info)); 1237 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1238 + 1; 1239 1240 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message, 1241 sizeof(message), true); 1242 } 1243 1244 1245 void 1246 user_debug_breakpoint_hit(bool software) 1247 { 1248 // prepare the message 1249 debug_breakpoint_hit message; 1250 arch_get_debug_cpu_state(&message.cpu_state); 1251 1252 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1253 sizeof(message)); 1254 } 1255 1256 1257 void 1258 user_debug_watchpoint_hit() 1259 { 1260 // prepare the message 1261 debug_watchpoint_hit message; 1262 arch_get_debug_cpu_state(&message.cpu_state); 1263 1264 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1265 sizeof(message)); 1266 } 1267 1268 1269 void 1270 user_debug_single_stepped() 1271 { 1272 // clear the single-step thread flag 1273 Thread* thread = thread_get_current_thread(); 1274 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1275 1276 // prepare the message 1277 debug_single_step message; 1278 arch_get_debug_cpu_state(&message.cpu_state); 1279 1280 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1281 sizeof(message)); 1282 } 1283 1284 1285 /*! Schedules the profiling timer for the current thread. 1286 The caller must hold the thread's debug info lock. 1287 \param thread The current thread. 1288 \param interval The time after which the timer should fire. 1289 */ 1290 static void 1291 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1292 { 1293 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1294 thread->debug_info.profile.installed_timer = timer; 1295 thread->debug_info.profile.timer_end = system_time() + interval; 1296 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1297 } 1298 1299 1300 /*! Samples the current thread's instruction pointer/stack trace. 1301 The caller must hold the current thread's debug info lock. 1302 \param flushBuffer Return parameter: Set to \c true when the sampling 1303 buffer must be flushed. 1304 */ 1305 static bool 1306 profiling_do_sample(bool& flushBuffer) 1307 { 1308 Thread* thread = thread_get_current_thread(); 1309 thread_debug_info& debugInfo = thread->debug_info; 1310 1311 if (debugInfo.profile.samples == NULL) 1312 return false; 1313 1314 // Check, whether the buffer is full or an image event occurred since the 1315 // last sample was taken. 1316 int32 maxSamples = debugInfo.profile.max_samples; 1317 int32 sampleCount = debugInfo.profile.sample_count; 1318 int32 stackDepth = debugInfo.profile.stack_depth; 1319 int32 imageEvent = thread->team->debug_info.image_event; 1320 if (debugInfo.profile.sample_count > 0) { 1321 if (debugInfo.profile.last_image_event < imageEvent 1322 && debugInfo.profile.variable_stack_depth 1323 && sampleCount + 2 <= maxSamples) { 1324 // an image event occurred, but we use variable stack depth and 1325 // have enough room in the buffer to indicate an image event 1326 addr_t* event = debugInfo.profile.samples + sampleCount; 1327 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1328 event[1] = imageEvent; 1329 sampleCount += 2; 1330 debugInfo.profile.sample_count = sampleCount; 1331 debugInfo.profile.last_image_event = imageEvent; 1332 } 1333 1334 if (debugInfo.profile.last_image_event < imageEvent 1335 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1336 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1337 flushBuffer = true; 1338 return true; 1339 } 1340 1341 // We can't flush the buffer now, since we interrupted a kernel 1342 // function. If the buffer is not full yet, we add the samples, 1343 // otherwise we have to drop them. 1344 if (maxSamples - sampleCount < stackDepth) { 1345 debugInfo.profile.dropped_ticks++; 1346 return true; 1347 } 1348 } 1349 } else { 1350 // first sample -- set the image event 1351 debugInfo.profile.image_event = imageEvent; 1352 debugInfo.profile.last_image_event = imageEvent; 1353 } 1354 1355 // get the samples 1356 addr_t* returnAddresses = debugInfo.profile.samples 1357 + debugInfo.profile.sample_count; 1358 if (debugInfo.profile.variable_stack_depth) { 1359 // variable sample count per hit 1360 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1361 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1362 1363 debugInfo.profile.sample_count += *returnAddresses + 1; 1364 } else { 1365 // fixed sample count per hit 1366 if (stackDepth > 1) { 1367 int32 count = arch_debug_get_stack_trace(returnAddresses, 1368 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1369 1370 for (int32 i = count; i < stackDepth; i++) 1371 returnAddresses[i] = 0; 1372 } else 1373 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1374 1375 debugInfo.profile.sample_count += stackDepth; 1376 } 1377 1378 return true; 1379 } 1380 1381 1382 static void 1383 profiling_buffer_full(void*) 1384 { 1385 // It is undefined whether the function is called with interrupts enabled 1386 // or disabled. We are allowed to enable interrupts, though. First make 1387 // sure interrupts are disabled. 1388 disable_interrupts(); 1389 1390 Thread* thread = thread_get_current_thread(); 1391 thread_debug_info& debugInfo = thread->debug_info; 1392 1393 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1394 1395 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1396 int32 sampleCount = debugInfo.profile.sample_count; 1397 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1398 int32 stackDepth = debugInfo.profile.stack_depth; 1399 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1400 int32 imageEvent = debugInfo.profile.image_event; 1401 1402 // notify the debugger 1403 debugInfo.profile.sample_count = 0; 1404 debugInfo.profile.dropped_ticks = 0; 1405 1406 threadDebugInfoLocker.Unlock(); 1407 enable_interrupts(); 1408 1409 // prepare the message 1410 debug_profiler_update message; 1411 message.sample_count = sampleCount; 1412 message.dropped_ticks = droppedTicks; 1413 message.stack_depth = stackDepth; 1414 message.variable_stack_depth = variableStackDepth; 1415 message.image_event = imageEvent; 1416 message.stopped = false; 1417 1418 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1419 sizeof(message), false); 1420 1421 disable_interrupts(); 1422 threadDebugInfoLocker.Lock(); 1423 1424 // do the sampling and reschedule timer, if still profiling this thread 1425 bool flushBuffer; 1426 if (profiling_do_sample(flushBuffer)) { 1427 debugInfo.profile.buffer_full = false; 1428 schedule_profiling_timer(thread, debugInfo.profile.interval); 1429 } 1430 } 1431 1432 threadDebugInfoLocker.Unlock(); 1433 enable_interrupts(); 1434 } 1435 1436 1437 /*! Profiling timer event callback. 1438 Called with interrupts disabled. 1439 */ 1440 static int32 1441 profiling_event(timer* /*unused*/) 1442 { 1443 Thread* thread = thread_get_current_thread(); 1444 thread_debug_info& debugInfo = thread->debug_info; 1445 1446 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1447 1448 bool flushBuffer = false; 1449 if (profiling_do_sample(flushBuffer)) { 1450 if (flushBuffer) { 1451 // The sample buffer needs to be flushed; we'll have to notify the 1452 // debugger. We can't do that right here. Instead we set a post 1453 // interrupt callback doing that for us, and don't reschedule the 1454 // timer yet. 1455 thread->post_interrupt_callback = profiling_buffer_full; 1456 debugInfo.profile.installed_timer = NULL; 1457 debugInfo.profile.buffer_full = true; 1458 } else 1459 schedule_profiling_timer(thread, debugInfo.profile.interval); 1460 } else 1461 debugInfo.profile.installed_timer = NULL; 1462 1463 return B_HANDLED_INTERRUPT; 1464 } 1465 1466 1467 /*! Called by the scheduler when a debugged thread has been unscheduled. 1468 The scheduler lock is being held. 1469 */ 1470 void 1471 user_debug_thread_unscheduled(Thread* thread) 1472 { 1473 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1474 1475 // if running, cancel the profiling timer 1476 struct timer* timer = thread->debug_info.profile.installed_timer; 1477 if (timer != NULL) { 1478 // track remaining time 1479 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1480 thread->debug_info.profile.interval_left = max_c(left, 0); 1481 thread->debug_info.profile.installed_timer = NULL; 1482 1483 // cancel timer 1484 threadDebugInfoLocker.Unlock(); 1485 // not necessary, but doesn't harm and reduces contention 1486 cancel_timer(timer); 1487 // since invoked on the same CPU, this will not possibly wait for 1488 // an already called timer hook 1489 } 1490 } 1491 1492 1493 /*! Called by the scheduler when a debugged thread has been scheduled. 1494 The scheduler lock is being held. 1495 */ 1496 void 1497 user_debug_thread_scheduled(Thread* thread) 1498 { 1499 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1500 1501 if (thread->debug_info.profile.samples != NULL 1502 && !thread->debug_info.profile.buffer_full) { 1503 // install profiling timer 1504 schedule_profiling_timer(thread, 1505 thread->debug_info.profile.interval_left); 1506 } 1507 } 1508 1509 1510 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1511 all threads of the team that are initialized for debugging (and 1512 thus have a debug port). 1513 */ 1514 static void 1515 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1516 const void *message, int32 size) 1517 { 1518 // iterate through the threads 1519 thread_info threadInfo; 1520 int32 cookie = 0; 1521 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1522 == B_OK) { 1523 // get the thread and lock it 1524 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1525 if (thread == NULL) 1526 continue; 1527 1528 BReference<Thread> threadReference(thread, true); 1529 ThreadLocker threadLocker(thread, true); 1530 1531 // get the thread's debug port 1532 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1533 1534 port_id threadDebugPort = -1; 1535 if (thread && thread != nubThread && thread->team == nubThread->team 1536 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1537 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1538 threadDebugPort = thread->debug_info.debug_port; 1539 } 1540 1541 threadDebugInfoLocker.Unlock(); 1542 threadLocker.Unlock(); 1543 1544 // send the message to the thread 1545 if (threadDebugPort >= 0) { 1546 status_t error = kill_interruptable_write_port(threadDebugPort, 1547 code, message, size); 1548 if (error != B_OK) { 1549 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1550 "message to thread %" B_PRId32 ": %" B_PRIx32 "\n", 1551 thread->id, error)); 1552 } 1553 } 1554 } 1555 } 1556 1557 1558 static void 1559 nub_thread_cleanup(Thread *nubThread) 1560 { 1561 TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n", 1562 nubThread->id, nubThread->team->debug_info.debugger_port)); 1563 1564 ConditionVariable debugChangeCondition; 1565 debugChangeCondition.Init(nubThread->team, "debug change condition"); 1566 prepare_debugger_change(nubThread->team, debugChangeCondition); 1567 1568 team_debug_info teamDebugInfo; 1569 bool destroyDebugInfo = false; 1570 1571 TeamLocker teamLocker(nubThread->team); 1572 // required by update_threads_debugger_installed_flag() 1573 1574 cpu_status state = disable_interrupts(); 1575 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1576 1577 team_debug_info &info = nubThread->team->debug_info; 1578 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1579 && info.nub_thread == nubThread->id) { 1580 teamDebugInfo = info; 1581 clear_team_debug_info(&info, false); 1582 destroyDebugInfo = true; 1583 } 1584 1585 // update the thread::flags fields 1586 update_threads_debugger_installed_flag(nubThread->team); 1587 1588 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1589 restore_interrupts(state); 1590 1591 teamLocker.Unlock(); 1592 1593 if (destroyDebugInfo) 1594 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1595 1596 finish_debugger_change(nubThread->team); 1597 1598 if (destroyDebugInfo) 1599 destroy_team_debug_info(&teamDebugInfo); 1600 1601 // notify all threads that the debugger is gone 1602 broadcast_debugged_thread_message(nubThread, 1603 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1604 } 1605 1606 1607 /** \brief Debug nub thread helper function that returns the debug port of 1608 * a thread of the same team. 1609 */ 1610 static status_t 1611 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1612 thread_id threadID, port_id &threadDebugPort) 1613 { 1614 threadDebugPort = -1; 1615 1616 // get the thread 1617 Thread* thread = Thread::GetAndLock(threadID); 1618 if (thread == NULL) 1619 return B_BAD_THREAD_ID; 1620 BReference<Thread> threadReference(thread, true); 1621 ThreadLocker threadLocker(thread, true); 1622 1623 // get the debug port 1624 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1625 1626 if (thread->team != nubThread->team) 1627 return B_BAD_VALUE; 1628 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1629 return B_BAD_THREAD_STATE; 1630 1631 threadDebugPort = thread->debug_info.debug_port; 1632 1633 threadDebugInfoLocker.Unlock(); 1634 1635 if (threadDebugPort < 0) 1636 return B_ERROR; 1637 1638 return B_OK; 1639 } 1640 1641 1642 static status_t 1643 debug_nub_thread(void *) 1644 { 1645 Thread *nubThread = thread_get_current_thread(); 1646 1647 // check, if we're still the current nub thread and get our port 1648 cpu_status state = disable_interrupts(); 1649 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1650 1651 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1652 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1653 restore_interrupts(state); 1654 return 0; 1655 } 1656 1657 port_id port = nubThread->team->debug_info.nub_port; 1658 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1659 BreakpointManager* breakpointManager 1660 = nubThread->team->debug_info.breakpoint_manager; 1661 1662 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1663 restore_interrupts(state); 1664 1665 TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub " 1666 "port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port)); 1667 1668 // notify all threads that a debugger has been installed 1669 broadcast_debugged_thread_message(nubThread, 1670 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1671 1672 // command processing loop 1673 while (true) { 1674 int32 command; 1675 debug_nub_message_data message; 1676 ssize_t messageSize = read_port_etc(port, &command, &message, 1677 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1678 1679 if (messageSize < 0) { 1680 // The port is no longer valid or we were interrupted by a kill 1681 // signal: If we are still listed in the team's debug info as nub 1682 // thread, we need to update that. 1683 nub_thread_cleanup(nubThread); 1684 1685 TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n", 1686 nubThread->id, messageSize)); 1687 1688 return messageSize; 1689 } 1690 1691 bool sendReply = false; 1692 union { 1693 debug_nub_read_memory_reply read_memory; 1694 debug_nub_write_memory_reply write_memory; 1695 debug_nub_get_cpu_state_reply get_cpu_state; 1696 debug_nub_set_breakpoint_reply set_breakpoint; 1697 debug_nub_set_watchpoint_reply set_watchpoint; 1698 debug_nub_get_signal_masks_reply get_signal_masks; 1699 debug_nub_get_signal_handler_reply get_signal_handler; 1700 debug_nub_start_profiler_reply start_profiler; 1701 debug_profiler_update profiler_update; 1702 debug_nub_write_core_file_reply write_core_file; 1703 } reply; 1704 int32 replySize = 0; 1705 port_id replyPort = -1; 1706 1707 // process the command 1708 switch (command) { 1709 case B_DEBUG_MESSAGE_READ_MEMORY: 1710 { 1711 // get the parameters 1712 replyPort = message.read_memory.reply_port; 1713 void *address = message.read_memory.address; 1714 int32 size = message.read_memory.size; 1715 status_t result = B_OK; 1716 1717 // check the parameters 1718 if (!BreakpointManager::CanAccessAddress(address, false)) 1719 result = B_BAD_ADDRESS; 1720 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1721 result = B_BAD_VALUE; 1722 1723 // read the memory 1724 size_t bytesRead = 0; 1725 if (result == B_OK) { 1726 result = breakpointManager->ReadMemory(address, 1727 reply.read_memory.data, size, bytesRead); 1728 } 1729 reply.read_memory.error = result; 1730 1731 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: " 1732 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1733 ", result: %" B_PRIx32 ", read: %ld\n", nubThread->id, 1734 replyPort, address, size, result, bytesRead)); 1735 1736 // send only as much data as necessary 1737 reply.read_memory.size = bytesRead; 1738 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1739 sendReply = true; 1740 break; 1741 } 1742 1743 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1744 { 1745 // get the parameters 1746 replyPort = message.write_memory.reply_port; 1747 void *address = message.write_memory.address; 1748 int32 size = message.write_memory.size; 1749 const char *data = message.write_memory.data; 1750 int32 realSize = (char*)&message + messageSize - data; 1751 status_t result = B_OK; 1752 1753 // check the parameters 1754 if (!BreakpointManager::CanAccessAddress(address, true)) 1755 result = B_BAD_ADDRESS; 1756 else if (size <= 0 || size > realSize) 1757 result = B_BAD_VALUE; 1758 1759 // write the memory 1760 size_t bytesWritten = 0; 1761 if (result == B_OK) { 1762 result = breakpointManager->WriteMemory(address, data, size, 1763 bytesWritten); 1764 } 1765 reply.write_memory.error = result; 1766 1767 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: " 1768 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1769 ", result: %" B_PRIx32 ", written: %ld\n", nubThread->id, 1770 replyPort, address, size, result, bytesWritten)); 1771 1772 reply.write_memory.size = bytesWritten; 1773 sendReply = true; 1774 replySize = sizeof(debug_nub_write_memory_reply); 1775 break; 1776 } 1777 1778 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1779 { 1780 // get the parameters 1781 int32 flags = message.set_team_flags.flags 1782 & B_TEAM_DEBUG_USER_FLAG_MASK; 1783 1784 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS" 1785 ": flags: %" B_PRIx32 "\n", nubThread->id, flags)); 1786 1787 Team *team = thread_get_current_thread()->team; 1788 1789 // set the flags 1790 cpu_status state = disable_interrupts(); 1791 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1792 1793 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1794 atomic_set(&team->debug_info.flags, flags); 1795 1796 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1797 restore_interrupts(state); 1798 1799 break; 1800 } 1801 1802 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1803 { 1804 // get the parameters 1805 thread_id threadID = message.set_thread_flags.thread; 1806 int32 flags = message.set_thread_flags.flags 1807 & B_THREAD_DEBUG_USER_FLAG_MASK; 1808 1809 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS" 1810 ": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n", 1811 nubThread->id, threadID, flags)); 1812 1813 // set the flags 1814 Thread* thread = Thread::GetAndLock(threadID); 1815 if (thread == NULL) 1816 break; 1817 BReference<Thread> threadReference(thread, true); 1818 ThreadLocker threadLocker(thread, true); 1819 1820 InterruptsSpinLocker threadDebugInfoLocker( 1821 thread->debug_info.lock); 1822 1823 if (thread->team == thread_get_current_thread()->team) { 1824 flags |= thread->debug_info.flags 1825 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1826 atomic_set(&thread->debug_info.flags, flags); 1827 } 1828 1829 break; 1830 } 1831 1832 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1833 { 1834 // get the parameters 1835 thread_id threadID; 1836 uint32 handleEvent; 1837 bool singleStep; 1838 1839 threadID = message.continue_thread.thread; 1840 handleEvent = message.continue_thread.handle_event; 1841 singleStep = message.continue_thread.single_step; 1842 1843 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD" 1844 ": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", " 1845 "single step: %d\n", nubThread->id, threadID, handleEvent, 1846 singleStep)); 1847 1848 // find the thread and get its debug port 1849 port_id threadDebugPort = -1; 1850 status_t result = debug_nub_thread_get_thread_debug_port( 1851 nubThread, threadID, threadDebugPort); 1852 1853 // send a message to the debugged thread 1854 if (result == B_OK) { 1855 debugged_thread_continue commandMessage; 1856 commandMessage.handle_event = handleEvent; 1857 commandMessage.single_step = singleStep; 1858 1859 result = write_port(threadDebugPort, 1860 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1861 &commandMessage, sizeof(commandMessage)); 1862 } else if (result == B_BAD_THREAD_STATE) { 1863 Thread* thread = Thread::GetAndLock(threadID); 1864 if (thread == NULL) 1865 break; 1866 1867 BReference<Thread> threadReference(thread, true); 1868 ThreadLocker threadLocker(thread, true); 1869 if (thread->state == B_THREAD_SUSPENDED) { 1870 threadLocker.Unlock(); 1871 resume_thread(threadID); 1872 break; 1873 } 1874 } 1875 1876 break; 1877 } 1878 1879 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1880 { 1881 // get the parameters 1882 thread_id threadID = message.set_cpu_state.thread; 1883 const debug_cpu_state &cpuState 1884 = message.set_cpu_state.cpu_state; 1885 1886 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE" 1887 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1888 1889 // find the thread and get its debug port 1890 port_id threadDebugPort = -1; 1891 status_t result = debug_nub_thread_get_thread_debug_port( 1892 nubThread, threadID, threadDebugPort); 1893 1894 // send a message to the debugged thread 1895 if (result == B_OK) { 1896 debugged_thread_set_cpu_state commandMessage; 1897 memcpy(&commandMessage.cpu_state, &cpuState, 1898 sizeof(debug_cpu_state)); 1899 write_port(threadDebugPort, 1900 B_DEBUGGED_THREAD_SET_CPU_STATE, 1901 &commandMessage, sizeof(commandMessage)); 1902 } 1903 1904 break; 1905 } 1906 1907 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1908 { 1909 // get the parameters 1910 thread_id threadID = message.get_cpu_state.thread; 1911 replyPort = message.get_cpu_state.reply_port; 1912 1913 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE" 1914 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1915 1916 // find the thread and get its debug port 1917 port_id threadDebugPort = -1; 1918 status_t result = debug_nub_thread_get_thread_debug_port( 1919 nubThread, threadID, threadDebugPort); 1920 1921 // send a message to the debugged thread 1922 if (threadDebugPort >= 0) { 1923 debugged_thread_get_cpu_state commandMessage; 1924 commandMessage.reply_port = replyPort; 1925 result = write_port(threadDebugPort, 1926 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1927 sizeof(commandMessage)); 1928 } 1929 1930 // send a reply to the debugger in case of error 1931 if (result != B_OK) { 1932 reply.get_cpu_state.error = result; 1933 sendReply = true; 1934 replySize = sizeof(reply.get_cpu_state); 1935 } 1936 1937 break; 1938 } 1939 1940 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1941 { 1942 // get the parameters 1943 replyPort = message.set_breakpoint.reply_port; 1944 void *address = message.set_breakpoint.address; 1945 1946 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT" 1947 ": address: %p\n", nubThread->id, address)); 1948 1949 // check the address 1950 status_t result = B_OK; 1951 if (address == NULL 1952 || !BreakpointManager::CanAccessAddress(address, false)) { 1953 result = B_BAD_ADDRESS; 1954 } 1955 1956 // set the breakpoint 1957 if (result == B_OK) 1958 result = breakpointManager->InstallBreakpoint(address); 1959 1960 if (result == B_OK) 1961 update_threads_breakpoints_flag(); 1962 1963 // prepare the reply 1964 reply.set_breakpoint.error = result; 1965 replySize = sizeof(reply.set_breakpoint); 1966 sendReply = true; 1967 1968 break; 1969 } 1970 1971 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1972 { 1973 // get the parameters 1974 void *address = message.clear_breakpoint.address; 1975 1976 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT" 1977 ": address: %p\n", nubThread->id, address)); 1978 1979 // check the address 1980 status_t result = B_OK; 1981 if (address == NULL 1982 || !BreakpointManager::CanAccessAddress(address, false)) { 1983 result = B_BAD_ADDRESS; 1984 } 1985 1986 // clear the breakpoint 1987 if (result == B_OK) 1988 result = breakpointManager->UninstallBreakpoint(address); 1989 1990 if (result == B_OK) 1991 update_threads_breakpoints_flag(); 1992 1993 break; 1994 } 1995 1996 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 1997 { 1998 // get the parameters 1999 replyPort = message.set_watchpoint.reply_port; 2000 void *address = message.set_watchpoint.address; 2001 uint32 type = message.set_watchpoint.type; 2002 int32 length = message.set_watchpoint.length; 2003 2004 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT" 2005 ": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n", 2006 nubThread->id, address, type, length)); 2007 2008 // check the address and size 2009 status_t result = B_OK; 2010 if (address == NULL 2011 || !BreakpointManager::CanAccessAddress(address, false)) { 2012 result = B_BAD_ADDRESS; 2013 } 2014 if (length < 0) 2015 result = B_BAD_VALUE; 2016 2017 // set the watchpoint 2018 if (result == B_OK) { 2019 result = breakpointManager->InstallWatchpoint(address, type, 2020 length); 2021 } 2022 2023 if (result == B_OK) 2024 update_threads_breakpoints_flag(); 2025 2026 // prepare the reply 2027 reply.set_watchpoint.error = result; 2028 replySize = sizeof(reply.set_watchpoint); 2029 sendReply = true; 2030 2031 break; 2032 } 2033 2034 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2035 { 2036 // get the parameters 2037 void *address = message.clear_watchpoint.address; 2038 2039 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT" 2040 ": address: %p\n", nubThread->id, address)); 2041 2042 // check the address 2043 status_t result = B_OK; 2044 if (address == NULL 2045 || !BreakpointManager::CanAccessAddress(address, false)) { 2046 result = B_BAD_ADDRESS; 2047 } 2048 2049 // clear the watchpoint 2050 if (result == B_OK) 2051 result = breakpointManager->UninstallWatchpoint(address); 2052 2053 if (result == B_OK) 2054 update_threads_breakpoints_flag(); 2055 2056 break; 2057 } 2058 2059 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2060 { 2061 // get the parameters 2062 thread_id threadID = message.set_signal_masks.thread; 2063 uint64 ignore = message.set_signal_masks.ignore_mask; 2064 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2065 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2066 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2067 2068 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS" 2069 ": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %" 2070 B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32 2071 ")\n", nubThread->id, threadID, ignore, ignoreOp, 2072 ignoreOnce, ignoreOnceOp)); 2073 2074 // set the masks 2075 Thread* thread = Thread::GetAndLock(threadID); 2076 if (thread == NULL) 2077 break; 2078 BReference<Thread> threadReference(thread, true); 2079 ThreadLocker threadLocker(thread, true); 2080 2081 InterruptsSpinLocker threadDebugInfoLocker( 2082 thread->debug_info.lock); 2083 2084 if (thread->team == thread_get_current_thread()->team) { 2085 thread_debug_info &threadDebugInfo = thread->debug_info; 2086 // set ignore mask 2087 switch (ignoreOp) { 2088 case B_DEBUG_SIGNAL_MASK_AND: 2089 threadDebugInfo.ignore_signals &= ignore; 2090 break; 2091 case B_DEBUG_SIGNAL_MASK_OR: 2092 threadDebugInfo.ignore_signals |= ignore; 2093 break; 2094 case B_DEBUG_SIGNAL_MASK_SET: 2095 threadDebugInfo.ignore_signals = ignore; 2096 break; 2097 } 2098 2099 // set ignore once mask 2100 switch (ignoreOnceOp) { 2101 case B_DEBUG_SIGNAL_MASK_AND: 2102 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2103 break; 2104 case B_DEBUG_SIGNAL_MASK_OR: 2105 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2106 break; 2107 case B_DEBUG_SIGNAL_MASK_SET: 2108 threadDebugInfo.ignore_signals_once = ignoreOnce; 2109 break; 2110 } 2111 } 2112 2113 break; 2114 } 2115 2116 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2117 { 2118 // get the parameters 2119 replyPort = message.get_signal_masks.reply_port; 2120 thread_id threadID = message.get_signal_masks.thread; 2121 status_t result = B_OK; 2122 2123 // get the masks 2124 uint64 ignore = 0; 2125 uint64 ignoreOnce = 0; 2126 2127 Thread* thread = Thread::GetAndLock(threadID); 2128 if (thread != NULL) { 2129 BReference<Thread> threadReference(thread, true); 2130 ThreadLocker threadLocker(thread, true); 2131 2132 InterruptsSpinLocker threadDebugInfoLocker( 2133 thread->debug_info.lock); 2134 2135 ignore = thread->debug_info.ignore_signals; 2136 ignoreOnce = thread->debug_info.ignore_signals_once; 2137 } else 2138 result = B_BAD_THREAD_ID; 2139 2140 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS" 2141 ": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", " 2142 "ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: " 2143 "%" B_PRIx32 "\n", nubThread->id, replyPort, threadID, 2144 ignore, ignoreOnce, result)); 2145 2146 // prepare the message 2147 reply.get_signal_masks.error = result; 2148 reply.get_signal_masks.ignore_mask = ignore; 2149 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2150 replySize = sizeof(reply.get_signal_masks); 2151 sendReply = true; 2152 break; 2153 } 2154 2155 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2156 { 2157 // get the parameters 2158 int signal = message.set_signal_handler.signal; 2159 struct sigaction &handler = message.set_signal_handler.handler; 2160 2161 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER" 2162 ": signal: %d, handler: %p\n", nubThread->id, signal, 2163 handler.sa_handler)); 2164 2165 // set the handler 2166 sigaction(signal, &handler, NULL); 2167 2168 break; 2169 } 2170 2171 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2172 { 2173 // get the parameters 2174 replyPort = message.get_signal_handler.reply_port; 2175 int signal = message.get_signal_handler.signal; 2176 status_t result = B_OK; 2177 2178 // get the handler 2179 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2180 != 0) { 2181 result = errno; 2182 } 2183 2184 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER" 2185 ": reply port: %" B_PRId32 ", signal: %d, handler: %p\n", 2186 nubThread->id, replyPort, signal, 2187 reply.get_signal_handler.handler.sa_handler)); 2188 2189 // prepare the message 2190 reply.get_signal_handler.error = result; 2191 replySize = sizeof(reply.get_signal_handler); 2192 sendReply = true; 2193 break; 2194 } 2195 2196 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2197 { 2198 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER" 2199 "\n", nubThread->id)); 2200 2201 Team *team = nubThread->team; 2202 2203 // Acquire the debugger write lock. As soon as we have it and 2204 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2205 // will write anything to the debugger port anymore. 2206 status_t result = acquire_sem_etc(writeLock, 1, 2207 B_KILL_CAN_INTERRUPT, 0); 2208 if (result == B_OK) { 2209 // set the respective team debug flag 2210 cpu_status state = disable_interrupts(); 2211 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2212 2213 atomic_or(&team->debug_info.flags, 2214 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2215 BreakpointManager* breakpointManager 2216 = team->debug_info.breakpoint_manager; 2217 2218 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2219 restore_interrupts(state); 2220 2221 // remove all installed breakpoints 2222 breakpointManager->RemoveAllBreakpoints(); 2223 2224 release_sem(writeLock); 2225 } else { 2226 // We probably got a SIGKILL. If so, we will terminate when 2227 // reading the next message fails. 2228 } 2229 2230 break; 2231 } 2232 2233 case B_DEBUG_MESSAGE_HANDED_OVER: 2234 { 2235 // notify all threads that the debugger has changed 2236 broadcast_debugged_thread_message(nubThread, 2237 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2238 2239 break; 2240 } 2241 2242 case B_DEBUG_START_PROFILER: 2243 { 2244 // get the parameters 2245 thread_id threadID = message.start_profiler.thread; 2246 replyPort = message.start_profiler.reply_port; 2247 area_id sampleArea = message.start_profiler.sample_area; 2248 int32 stackDepth = message.start_profiler.stack_depth; 2249 bool variableStackDepth 2250 = message.start_profiler.variable_stack_depth; 2251 bigtime_t interval = max_c(message.start_profiler.interval, 2252 B_DEBUG_MIN_PROFILE_INTERVAL); 2253 status_t result = B_OK; 2254 2255 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: " 2256 "thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n", 2257 nubThread->id, threadID, sampleArea)); 2258 2259 if (stackDepth < 1) 2260 stackDepth = 1; 2261 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2262 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2263 2264 // provision for an extra entry per hit (for the number of 2265 // samples), if variable stack depth 2266 if (variableStackDepth) 2267 stackDepth++; 2268 2269 // clone the sample area 2270 area_info areaInfo; 2271 if (result == B_OK) 2272 result = get_area_info(sampleArea, &areaInfo); 2273 2274 area_id clonedSampleArea = -1; 2275 void* samples = NULL; 2276 if (result == B_OK) { 2277 clonedSampleArea = clone_area("profiling samples", &samples, 2278 B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, 2279 sampleArea); 2280 if (clonedSampleArea >= 0) { 2281 // we need the memory locked 2282 result = lock_memory(samples, areaInfo.size, 2283 B_READ_DEVICE); 2284 if (result != B_OK) { 2285 delete_area(clonedSampleArea); 2286 clonedSampleArea = -1; 2287 } 2288 } else 2289 result = clonedSampleArea; 2290 } 2291 2292 // get the thread and set the profile info 2293 int32 imageEvent = nubThread->team->debug_info.image_event; 2294 if (result == B_OK) { 2295 Thread* thread = Thread::GetAndLock(threadID); 2296 BReference<Thread> threadReference(thread, true); 2297 ThreadLocker threadLocker(thread, true); 2298 2299 if (thread != NULL && thread->team == nubThread->team) { 2300 thread_debug_info &threadDebugInfo = thread->debug_info; 2301 2302 InterruptsSpinLocker threadDebugInfoLocker( 2303 threadDebugInfo.lock); 2304 2305 if (threadDebugInfo.profile.samples == NULL) { 2306 threadDebugInfo.profile.interval = interval; 2307 threadDebugInfo.profile.sample_area 2308 = clonedSampleArea; 2309 threadDebugInfo.profile.samples = (addr_t*)samples; 2310 threadDebugInfo.profile.max_samples 2311 = areaInfo.size / sizeof(addr_t); 2312 threadDebugInfo.profile.flush_threshold 2313 = threadDebugInfo.profile.max_samples 2314 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2315 / 100; 2316 threadDebugInfo.profile.sample_count = 0; 2317 threadDebugInfo.profile.dropped_ticks = 0; 2318 threadDebugInfo.profile.stack_depth = stackDepth; 2319 threadDebugInfo.profile.variable_stack_depth 2320 = variableStackDepth; 2321 threadDebugInfo.profile.buffer_full = false; 2322 threadDebugInfo.profile.interval_left = interval; 2323 threadDebugInfo.profile.installed_timer = NULL; 2324 threadDebugInfo.profile.image_event = imageEvent; 2325 threadDebugInfo.profile.last_image_event 2326 = imageEvent; 2327 } else 2328 result = B_BAD_VALUE; 2329 } else 2330 result = B_BAD_THREAD_ID; 2331 } 2332 2333 // on error unlock and delete the sample area 2334 if (result != B_OK) { 2335 if (clonedSampleArea >= 0) { 2336 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2337 delete_area(clonedSampleArea); 2338 } 2339 } 2340 2341 // send a reply to the debugger 2342 reply.start_profiler.error = result; 2343 reply.start_profiler.interval = interval; 2344 reply.start_profiler.image_event = imageEvent; 2345 sendReply = true; 2346 replySize = sizeof(reply.start_profiler); 2347 2348 break; 2349 } 2350 2351 case B_DEBUG_STOP_PROFILER: 2352 { 2353 // get the parameters 2354 thread_id threadID = message.stop_profiler.thread; 2355 replyPort = message.stop_profiler.reply_port; 2356 status_t result = B_OK; 2357 2358 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: " 2359 "thread: %" B_PRId32 "\n", nubThread->id, threadID)); 2360 2361 area_id sampleArea = -1; 2362 addr_t* samples = NULL; 2363 int32 sampleCount = 0; 2364 int32 stackDepth = 0; 2365 bool variableStackDepth = false; 2366 int32 imageEvent = 0; 2367 int32 droppedTicks = 0; 2368 2369 // get the thread and detach the profile info 2370 Thread* thread = Thread::GetAndLock(threadID); 2371 BReference<Thread> threadReference(thread, true); 2372 ThreadLocker threadLocker(thread, true); 2373 2374 if (thread && thread->team == nubThread->team) { 2375 thread_debug_info &threadDebugInfo = thread->debug_info; 2376 2377 InterruptsSpinLocker threadDebugInfoLocker( 2378 threadDebugInfo.lock); 2379 2380 if (threadDebugInfo.profile.samples != NULL) { 2381 sampleArea = threadDebugInfo.profile.sample_area; 2382 samples = threadDebugInfo.profile.samples; 2383 sampleCount = threadDebugInfo.profile.sample_count; 2384 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2385 stackDepth = threadDebugInfo.profile.stack_depth; 2386 variableStackDepth 2387 = threadDebugInfo.profile.variable_stack_depth; 2388 imageEvent = threadDebugInfo.profile.image_event; 2389 threadDebugInfo.profile.sample_area = -1; 2390 threadDebugInfo.profile.samples = NULL; 2391 threadDebugInfo.profile.buffer_full = false; 2392 threadDebugInfo.profile.dropped_ticks = 0; 2393 } else 2394 result = B_BAD_VALUE; 2395 } else 2396 result = B_BAD_THREAD_ID; 2397 2398 threadLocker.Unlock(); 2399 2400 // prepare the reply 2401 if (result == B_OK) { 2402 reply.profiler_update.origin.thread = threadID; 2403 reply.profiler_update.image_event = imageEvent; 2404 reply.profiler_update.stack_depth = stackDepth; 2405 reply.profiler_update.variable_stack_depth 2406 = variableStackDepth; 2407 reply.profiler_update.sample_count = sampleCount; 2408 reply.profiler_update.dropped_ticks = droppedTicks; 2409 reply.profiler_update.stopped = true; 2410 } else 2411 reply.profiler_update.origin.thread = result; 2412 2413 replySize = sizeof(debug_profiler_update); 2414 sendReply = true; 2415 2416 if (sampleArea >= 0) { 2417 area_info areaInfo; 2418 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2419 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2420 delete_area(sampleArea); 2421 } 2422 } 2423 2424 break; 2425 } 2426 2427 case B_DEBUG_WRITE_CORE_FILE: 2428 { 2429 // get the parameters 2430 replyPort = message.write_core_file.reply_port; 2431 char* path = message.write_core_file.path; 2432 path[sizeof(message.write_core_file.path) - 1] = '\0'; 2433 2434 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE" 2435 ": path: %s\n", nubThread->id, path)); 2436 2437 // write the core file 2438 status_t result = core_dump_write_core_file(path, false); 2439 2440 // prepare the reply 2441 reply.write_core_file.error = result; 2442 replySize = sizeof(reply.write_core_file); 2443 sendReply = true; 2444 2445 break; 2446 } 2447 } 2448 2449 // send the reply, if necessary 2450 if (sendReply) { 2451 status_t error = kill_interruptable_write_port(replyPort, command, 2452 &reply, replySize); 2453 2454 if (error != B_OK) { 2455 // The debugger port is either not longer existing or we got 2456 // interrupted by a kill signal. In either case we terminate. 2457 TRACE(("nub thread %" B_PRId32 ": failed to send reply to port " 2458 "%" B_PRId32 ": %s\n", nubThread->id, replyPort, 2459 strerror(error))); 2460 2461 nub_thread_cleanup(nubThread); 2462 return error; 2463 } 2464 } 2465 } 2466 } 2467 2468 2469 /** \brief Helper function for install_team_debugger(), that sets up the team 2470 and thread debug infos. 2471 2472 The caller must hold the team's lock as well as the team debug info lock. 2473 2474 The function also clears the arch specific team and thread debug infos 2475 (including among other things formerly set break/watchpoints). 2476 */ 2477 static void 2478 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2479 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2480 sem_id debuggerPortWriteLock, thread_id causingThread) 2481 { 2482 atomic_set(&team->debug_info.flags, 2483 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2484 team->debug_info.nub_port = nubPort; 2485 team->debug_info.nub_thread = nubThread; 2486 team->debug_info.debugger_team = debuggerTeam; 2487 team->debug_info.debugger_port = debuggerPort; 2488 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2489 team->debug_info.causing_thread = causingThread; 2490 2491 arch_clear_team_debug_info(&team->debug_info.arch_info); 2492 2493 // set the user debug flags and signal masks of all threads to the default 2494 for (Thread *thread = team->thread_list; thread; 2495 thread = thread->team_next) { 2496 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2497 2498 if (thread->id == nubThread) { 2499 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2500 } else { 2501 int32 flags = thread->debug_info.flags 2502 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2503 atomic_set(&thread->debug_info.flags, 2504 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2505 thread->debug_info.ignore_signals = 0; 2506 thread->debug_info.ignore_signals_once = 0; 2507 2508 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2509 } 2510 } 2511 2512 // update the thread::flags fields 2513 update_threads_debugger_installed_flag(team); 2514 } 2515 2516 2517 static port_id 2518 install_team_debugger(team_id teamID, port_id debuggerPort, 2519 thread_id causingThread, bool useDefault, bool dontReplace) 2520 { 2521 TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", " 2522 "default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault, 2523 dontReplace)); 2524 2525 if (useDefault) 2526 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2527 2528 // get the debugger team 2529 port_info debuggerPortInfo; 2530 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2531 if (error != B_OK) { 2532 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2533 "%" B_PRIx32 "\n", error)); 2534 return error; 2535 } 2536 team_id debuggerTeam = debuggerPortInfo.team; 2537 2538 // Check the debugger team: It must neither be the kernel team nor the 2539 // debugged team. 2540 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2541 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2542 "debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam, 2543 teamID)); 2544 return B_NOT_ALLOWED; 2545 } 2546 2547 // get the team 2548 Team* team; 2549 ConditionVariable debugChangeCondition; 2550 debugChangeCondition.Init(NULL, "debug change condition"); 2551 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2552 if (error != B_OK) 2553 return error; 2554 2555 // get the real team ID 2556 teamID = team->id; 2557 2558 // check, if a debugger is already installed 2559 2560 bool done = false; 2561 port_id result = B_ERROR; 2562 bool handOver = false; 2563 port_id oldDebuggerPort = -1; 2564 port_id nubPort = -1; 2565 2566 TeamLocker teamLocker(team); 2567 cpu_status state = disable_interrupts(); 2568 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2569 2570 int32 teamDebugFlags = team->debug_info.flags; 2571 2572 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2573 // There's already a debugger installed. 2574 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2575 if (dontReplace) { 2576 // We're fine with already having a debugger. 2577 error = B_OK; 2578 done = true; 2579 result = team->debug_info.nub_port; 2580 } else { 2581 // a handover to another debugger is requested 2582 // Set the handing-over flag -- we'll clear both flags after 2583 // having sent the handed-over message to the new debugger. 2584 atomic_or(&team->debug_info.flags, 2585 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2586 2587 oldDebuggerPort = team->debug_info.debugger_port; 2588 result = nubPort = team->debug_info.nub_port; 2589 if (causingThread < 0) 2590 causingThread = team->debug_info.causing_thread; 2591 2592 // set the new debugger 2593 install_team_debugger_init_debug_infos(team, debuggerTeam, 2594 debuggerPort, nubPort, team->debug_info.nub_thread, 2595 team->debug_info.debugger_write_lock, causingThread); 2596 2597 handOver = true; 2598 done = true; 2599 } 2600 } else { 2601 // there's already a debugger installed 2602 error = (dontReplace ? B_OK : B_BAD_VALUE); 2603 done = true; 2604 result = team->debug_info.nub_port; 2605 } 2606 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2607 && useDefault) { 2608 // No debugger yet, disable_debugger() had been invoked, and we 2609 // would install the default debugger. Just fail. 2610 error = B_BAD_VALUE; 2611 } 2612 2613 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2614 restore_interrupts(state); 2615 teamLocker.Unlock(); 2616 2617 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2618 // The old debugger must just have died. Just proceed as 2619 // if there was no debugger installed. We may still be too 2620 // early, in which case we'll fail, but this race condition 2621 // should be unbelievably rare and relatively harmless. 2622 handOver = false; 2623 done = false; 2624 } 2625 2626 if (handOver) { 2627 // prepare the handed-over message 2628 debug_handed_over notification; 2629 notification.origin.thread = -1; 2630 notification.origin.team = teamID; 2631 notification.origin.nub_port = nubPort; 2632 notification.debugger = debuggerTeam; 2633 notification.debugger_port = debuggerPort; 2634 notification.causing_thread = causingThread; 2635 2636 // notify the new debugger 2637 error = write_port_etc(debuggerPort, 2638 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2639 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2640 if (error != B_OK) { 2641 dprintf("install_team_debugger(): Failed to send message to new " 2642 "debugger: %s\n", strerror(error)); 2643 } 2644 2645 // clear the handed-over and handing-over flags 2646 state = disable_interrupts(); 2647 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2648 2649 atomic_and(&team->debug_info.flags, 2650 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2651 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2652 2653 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2654 restore_interrupts(state); 2655 2656 finish_debugger_change(team); 2657 2658 // notify the nub thread 2659 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2660 NULL, 0); 2661 2662 // notify the old debugger 2663 error = write_port_etc(oldDebuggerPort, 2664 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2665 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2666 if (error != B_OK) { 2667 TRACE(("install_team_debugger(): Failed to send message to old " 2668 "debugger: %s\n", strerror(error))); 2669 } 2670 2671 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2672 "%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam, 2673 debuggerPort)); 2674 2675 return result; 2676 } 2677 2678 if (done || error != B_OK) { 2679 TRACE(("install_team_debugger() done1: %" B_PRId32 "\n", 2680 (error == B_OK ? result : error))); 2681 finish_debugger_change(team); 2682 return (error == B_OK ? result : error); 2683 } 2684 2685 // create the debugger write lock semaphore 2686 char nameBuffer[B_OS_NAME_LENGTH]; 2687 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port " 2688 "write", teamID); 2689 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2690 if (debuggerWriteLock < 0) 2691 error = debuggerWriteLock; 2692 2693 // create the nub port 2694 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID); 2695 if (error == B_OK) { 2696 nubPort = create_port(1, nameBuffer); 2697 if (nubPort < 0) 2698 error = nubPort; 2699 else 2700 result = nubPort; 2701 } 2702 2703 // make the debugger team the port owner; thus we know, if the debugger is 2704 // gone and can cleanup 2705 if (error == B_OK) 2706 error = set_port_owner(nubPort, debuggerTeam); 2707 2708 // create the breakpoint manager 2709 BreakpointManager* breakpointManager = NULL; 2710 if (error == B_OK) { 2711 breakpointManager = new(std::nothrow) BreakpointManager; 2712 if (breakpointManager != NULL) 2713 error = breakpointManager->Init(); 2714 else 2715 error = B_NO_MEMORY; 2716 } 2717 2718 // spawn the nub thread 2719 thread_id nubThread = -1; 2720 if (error == B_OK) { 2721 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task", 2722 teamID); 2723 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2724 B_NORMAL_PRIORITY, NULL, teamID); 2725 if (nubThread < 0) 2726 error = nubThread; 2727 } 2728 2729 // now adjust the debug info accordingly 2730 if (error == B_OK) { 2731 TeamLocker teamLocker(team); 2732 state = disable_interrupts(); 2733 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2734 2735 team->debug_info.breakpoint_manager = breakpointManager; 2736 install_team_debugger_init_debug_infos(team, debuggerTeam, 2737 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2738 causingThread); 2739 2740 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2741 restore_interrupts(state); 2742 } 2743 2744 finish_debugger_change(team); 2745 2746 // if everything went fine, resume the nub thread, otherwise clean up 2747 if (error == B_OK) { 2748 resume_thread(nubThread); 2749 } else { 2750 // delete port and terminate thread 2751 if (nubPort >= 0) { 2752 set_port_owner(nubPort, B_CURRENT_TEAM); 2753 delete_port(nubPort); 2754 } 2755 if (nubThread >= 0) { 2756 int32 result; 2757 wait_for_thread(nubThread, &result); 2758 } 2759 2760 delete breakpointManager; 2761 } 2762 2763 TRACE(("install_team_debugger() done2: %" B_PRId32 "\n", 2764 (error == B_OK ? result : error))); 2765 return (error == B_OK ? result : error); 2766 } 2767 2768 2769 static status_t 2770 ensure_debugger_installed() 2771 { 2772 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2773 thread_get_current_thread_id(), true, true); 2774 return port >= 0 ? B_OK : port; 2775 } 2776 2777 2778 // #pragma mark - 2779 2780 2781 void 2782 _user_debugger(const char *userMessage) 2783 { 2784 // install the default debugger, if there is none yet 2785 status_t error = ensure_debugger_installed(); 2786 if (error != B_OK) { 2787 // time to commit suicide 2788 char buffer[128]; 2789 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2790 if (length >= 0) { 2791 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2792 "`%s'\n", buffer); 2793 } else { 2794 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2795 "%p (%s)\n", userMessage, strerror(length)); 2796 } 2797 _user_exit_team(1); 2798 } 2799 2800 // prepare the message 2801 debug_debugger_call message; 2802 message.message = (void*)userMessage; 2803 2804 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2805 sizeof(message), true); 2806 } 2807 2808 2809 int 2810 _user_disable_debugger(int state) 2811 { 2812 Team *team = thread_get_current_thread()->team; 2813 2814 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state, 2815 team->id)); 2816 2817 cpu_status cpuState = disable_interrupts(); 2818 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2819 2820 int32 oldFlags; 2821 if (state) { 2822 oldFlags = atomic_or(&team->debug_info.flags, 2823 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2824 } else { 2825 oldFlags = atomic_and(&team->debug_info.flags, 2826 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2827 } 2828 2829 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2830 restore_interrupts(cpuState); 2831 2832 // TODO: Check, if the return value is really the old state. 2833 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2834 } 2835 2836 2837 status_t 2838 _user_install_default_debugger(port_id debuggerPort) 2839 { 2840 // Do not allow non-root processes to install a default debugger. 2841 if (geteuid() != 0) 2842 return B_PERMISSION_DENIED; 2843 2844 // if supplied, check whether the port is a valid port 2845 if (debuggerPort >= 0) { 2846 port_info portInfo; 2847 status_t error = get_port_info(debuggerPort, &portInfo); 2848 if (error != B_OK) 2849 return error; 2850 2851 // the debugger team must not be the kernel team 2852 if (portInfo.team == team_get_kernel_team_id()) 2853 return B_NOT_ALLOWED; 2854 } 2855 2856 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2857 2858 return B_OK; 2859 } 2860 2861 2862 port_id 2863 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2864 { 2865 if (geteuid() != 0 && team_geteuid(teamID) != geteuid()) 2866 return B_PERMISSION_DENIED; 2867 2868 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2869 } 2870 2871 2872 status_t 2873 _user_remove_team_debugger(team_id teamID) 2874 { 2875 Team* team; 2876 ConditionVariable debugChangeCondition; 2877 debugChangeCondition.Init(NULL, "debug change condition"); 2878 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2879 team); 2880 if (error != B_OK) 2881 return error; 2882 2883 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2884 2885 thread_id nubThread = -1; 2886 port_id nubPort = -1; 2887 2888 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2889 // there's a debugger installed 2890 nubThread = team->debug_info.nub_thread; 2891 nubPort = team->debug_info.nub_port; 2892 } else { 2893 // no debugger installed 2894 error = B_BAD_VALUE; 2895 } 2896 2897 debugInfoLocker.Unlock(); 2898 2899 // Delete the nub port -- this will cause the nub thread to terminate and 2900 // remove the debugger. 2901 if (nubPort >= 0) 2902 delete_port(nubPort); 2903 2904 finish_debugger_change(team); 2905 2906 // wait for the nub thread 2907 if (nubThread >= 0) 2908 wait_for_thread(nubThread, NULL); 2909 2910 return error; 2911 } 2912 2913 2914 status_t 2915 _user_debug_thread(thread_id threadID) 2916 { 2917 TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n", 2918 find_thread(NULL), threadID)); 2919 2920 // get the thread 2921 Thread* thread = Thread::GetAndLock(threadID); 2922 if (thread == NULL) 2923 return B_BAD_THREAD_ID; 2924 BReference<Thread> threadReference(thread, true); 2925 ThreadLocker threadLocker(thread, true); 2926 2927 // we can't debug the kernel team 2928 if (thread->team == team_get_kernel_team()) 2929 return B_NOT_ALLOWED; 2930 2931 InterruptsLocker interruptsLocker; 2932 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2933 2934 // If the thread is already dying, it's too late to debug it. 2935 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2936 return B_BAD_THREAD_ID; 2937 2938 // don't debug the nub thread 2939 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2940 return B_NOT_ALLOWED; 2941 2942 // already marked stopped or being told to stop? 2943 if ((thread->debug_info.flags 2944 & (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) { 2945 return B_OK; 2946 } 2947 2948 // set the flag that tells the thread to stop as soon as possible 2949 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2950 2951 update_thread_user_debug_flag(thread); 2952 2953 // send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or 2954 // continued) 2955 threadDebugInfoLocker.Unlock(); 2956 ReadSpinLocker teamLocker(thread->team_lock); 2957 SpinLocker locker(thread->team->signal_lock); 2958 2959 send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0); 2960 2961 return B_OK; 2962 } 2963 2964 2965 void 2966 _user_wait_for_debugger(void) 2967 { 2968 debug_thread_debugged message; 2969 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2970 sizeof(message), false); 2971 } 2972 2973 2974 status_t 2975 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2976 bool watchpoint) 2977 { 2978 // check the address and size 2979 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2980 return B_BAD_ADDRESS; 2981 if (watchpoint && length < 0) 2982 return B_BAD_VALUE; 2983 2984 // check whether a debugger is installed already 2985 team_debug_info teamDebugInfo; 2986 get_team_debug_info(teamDebugInfo); 2987 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2988 return B_BAD_VALUE; 2989 2990 // We can't help it, here's a small but relatively harmless race condition, 2991 // since a debugger could be installed in the meantime. The worst case is 2992 // that we install a break/watchpoint the debugger doesn't know about. 2993 2994 // set the break/watchpoint 2995 status_t result; 2996 if (watchpoint) 2997 result = arch_set_watchpoint(address, type, length); 2998 else 2999 result = arch_set_breakpoint(address); 3000 3001 if (result == B_OK) 3002 update_threads_breakpoints_flag(); 3003 3004 return result; 3005 } 3006 3007 3008 status_t 3009 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 3010 { 3011 // check the address 3012 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3013 return B_BAD_ADDRESS; 3014 3015 // check whether a debugger is installed already 3016 team_debug_info teamDebugInfo; 3017 get_team_debug_info(teamDebugInfo); 3018 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3019 return B_BAD_VALUE; 3020 3021 // We can't help it, here's a small but relatively harmless race condition, 3022 // since a debugger could be installed in the meantime. The worst case is 3023 // that we clear a break/watchpoint the debugger has just installed. 3024 3025 // clear the break/watchpoint 3026 status_t result; 3027 if (watchpoint) 3028 result = arch_clear_watchpoint(address); 3029 else 3030 result = arch_clear_breakpoint(address); 3031 3032 if (result == B_OK) 3033 update_threads_breakpoints_flag(); 3034 3035 return result; 3036 } 3037