1 /* 2 * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <errno.h> 8 #include <signal.h> 9 #include <stdlib.h> 10 #include <stdio.h> 11 #include <string.h> 12 13 #include <algorithm> 14 15 #include <arch/debug.h> 16 #include <arch/user_debugger.h> 17 #include <cpu.h> 18 #include <debugger.h> 19 #include <kernel.h> 20 #include <KernelExport.h> 21 #include <kscheduler.h> 22 #include <ksignal.h> 23 #include <ksyscalls.h> 24 #include <port.h> 25 #include <sem.h> 26 #include <team.h> 27 #include <thread.h> 28 #include <thread_types.h> 29 #include <user_debugger.h> 30 #include <vm/vm.h> 31 #include <vm/vm_types.h> 32 33 #include <AutoDeleter.h> 34 #include <util/AutoLock.h> 35 36 #include "BreakpointManager.h" 37 38 39 //#define TRACE_USER_DEBUGGER 40 #ifdef TRACE_USER_DEBUGGER 41 # define TRACE(x) dprintf x 42 #else 43 # define TRACE(x) ; 44 #endif 45 46 47 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 48 // there's some potential for simplifications. E.g. clear_team_debug_info() and 49 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 50 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 51 52 53 static port_id sDefaultDebuggerPort = -1; 54 // accessed atomically 55 56 static timer sProfilingTimers[B_MAX_CPU_COUNT]; 57 // a profiling timer for each CPU -- used when a profiled thread is running 58 // on that CPU 59 60 61 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 62 static int32 profiling_event(timer* unused); 63 static status_t ensure_debugger_installed(); 64 static void get_team_debug_info(team_debug_info &teamDebugInfo); 65 66 67 static inline status_t 68 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 69 size_t bufferSize) 70 { 71 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 72 0); 73 } 74 75 76 static status_t 77 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 78 bool dontWait) 79 { 80 TRACE(("debugger_write(): thread: %ld, team %ld, port: %ld, code: %lx, message: %p, " 81 "size: %lu, dontWait: %d\n", thread_get_current_thread()->id, 82 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 83 dontWait)); 84 85 status_t error = B_OK; 86 87 // get the team debug info 88 team_debug_info teamDebugInfo; 89 get_team_debug_info(teamDebugInfo); 90 sem_id writeLock = teamDebugInfo.debugger_write_lock; 91 92 // get the write lock 93 TRACE(("debugger_write(): acquiring write lock...\n")); 94 error = acquire_sem_etc(writeLock, 1, 95 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 96 if (error != B_OK) { 97 TRACE(("debugger_write() done1: %lx\n", error)); 98 return error; 99 } 100 101 // re-get the team debug info 102 get_team_debug_info(teamDebugInfo); 103 104 if (teamDebugInfo.debugger_port != port 105 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 106 // The debugger has changed in the meantime or we are about to be 107 // handed over to a new debugger. In either case we don't send the 108 // message. 109 TRACE(("debugger_write(): %s\n", 110 (teamDebugInfo.debugger_port != port ? "debugger port changed" 111 : "handover flag set"))); 112 } else { 113 TRACE(("debugger_write(): writing to port...\n")); 114 115 error = write_port_etc(port, code, buffer, bufferSize, 116 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 117 } 118 119 // release the write lock 120 release_sem(writeLock); 121 122 TRACE(("debugger_write() done: %lx\n", error)); 123 124 return error; 125 } 126 127 128 /*! Updates the thread::flags field according to what user debugger flags are 129 set for the thread. 130 Interrupts must be disabled and the thread's debug info lock must be held. 131 */ 132 static void 133 update_thread_user_debug_flag(Thread* thread) 134 { 135 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 136 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 137 else 138 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 139 } 140 141 142 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 143 given thread. 144 Interrupts must be disabled and the thread debug info lock must be held. 145 */ 146 static void 147 update_thread_breakpoints_flag(Thread* thread) 148 { 149 Team* team = thread->team; 150 151 if (arch_has_breakpoints(&team->debug_info.arch_info)) 152 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 153 else 154 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 155 } 156 157 158 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 159 threads of the current team. 160 */ 161 static void 162 update_threads_breakpoints_flag() 163 { 164 Team* team = thread_get_current_thread()->team; 165 166 TeamLocker teamLocker(team); 167 168 Thread* thread = team->thread_list; 169 170 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 171 for (; thread != NULL; thread = thread->team_next) 172 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 173 } else { 174 for (; thread != NULL; thread = thread->team_next) 175 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 176 } 177 } 178 179 180 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 181 given thread, which must be the current thread. 182 */ 183 static void 184 update_thread_debugger_installed_flag(Thread* thread) 185 { 186 Team* team = thread->team; 187 188 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 189 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 190 else 191 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 192 } 193 194 195 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 196 threads of the given team. 197 The team's lock must be held. 198 */ 199 static void 200 update_threads_debugger_installed_flag(Team* team) 201 { 202 Thread* thread = team->thread_list; 203 204 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 205 for (; thread != NULL; thread = thread->team_next) 206 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 207 } else { 208 for (; thread != NULL; thread = thread->team_next) 209 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 210 } 211 } 212 213 214 /** 215 * For the first initialization the function must be called with \a initLock 216 * set to \c true. If it would be possible that another thread accesses the 217 * structure at the same time, `lock' must be held when calling the function. 218 */ 219 void 220 clear_team_debug_info(struct team_debug_info *info, bool initLock) 221 { 222 if (info) { 223 arch_clear_team_debug_info(&info->arch_info); 224 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 225 info->debugger_team = -1; 226 info->debugger_port = -1; 227 info->nub_thread = -1; 228 info->nub_port = -1; 229 info->debugger_write_lock = -1; 230 info->causing_thread = -1; 231 info->image_event = 0; 232 info->breakpoint_manager = NULL; 233 234 if (initLock) { 235 B_INITIALIZE_SPINLOCK(&info->lock); 236 info->debugger_changed_condition = NULL; 237 } 238 } 239 } 240 241 /** 242 * `lock' must not be held nor may interrupts be disabled. 243 * \a info must not be a member of a team struct (or the team struct must no 244 * longer be accessible, i.e. the team should already be removed). 245 * 246 * In case the team is still accessible, the procedure is: 247 * 1. get `lock' 248 * 2. copy the team debug info on stack 249 * 3. call clear_team_debug_info() on the team debug info 250 * 4. release `lock' 251 * 5. call destroy_team_debug_info() on the copied team debug info 252 */ 253 static void 254 destroy_team_debug_info(struct team_debug_info *info) 255 { 256 if (info) { 257 arch_destroy_team_debug_info(&info->arch_info); 258 259 // delete the breakpoint manager 260 delete info->breakpoint_manager ; 261 info->breakpoint_manager = NULL; 262 263 // delete the debugger port write lock 264 if (info->debugger_write_lock >= 0) { 265 delete_sem(info->debugger_write_lock); 266 info->debugger_write_lock = -1; 267 } 268 269 // delete the nub port 270 if (info->nub_port >= 0) { 271 set_port_owner(info->nub_port, B_CURRENT_TEAM); 272 delete_port(info->nub_port); 273 info->nub_port = -1; 274 } 275 276 // wait for the nub thread 277 if (info->nub_thread >= 0) { 278 if (info->nub_thread != thread_get_current_thread()->id) { 279 int32 result; 280 wait_for_thread(info->nub_thread, &result); 281 } 282 283 info->nub_thread = -1; 284 } 285 286 atomic_set(&info->flags, 0); 287 info->debugger_team = -1; 288 info->debugger_port = -1; 289 info->causing_thread = -1; 290 info->image_event = -1; 291 } 292 } 293 294 295 void 296 init_thread_debug_info(struct thread_debug_info *info) 297 { 298 if (info) { 299 B_INITIALIZE_SPINLOCK(&info->lock); 300 arch_clear_thread_debug_info(&info->arch_info); 301 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 302 info->debug_port = -1; 303 info->ignore_signals = 0; 304 info->ignore_signals_once = 0; 305 info->profile.sample_area = -1; 306 info->profile.samples = NULL; 307 info->profile.buffer_full = false; 308 info->profile.installed_timer = NULL; 309 } 310 } 311 312 313 /*! Clears the debug info for the current thread. 314 Invoked with thread debug info lock being held. 315 */ 316 void 317 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 318 { 319 if (info) { 320 // cancel profiling timer 321 if (info->profile.installed_timer != NULL) { 322 cancel_timer(info->profile.installed_timer); 323 info->profile.installed_timer = NULL; 324 } 325 326 arch_clear_thread_debug_info(&info->arch_info); 327 atomic_set(&info->flags, 328 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 329 info->debug_port = -1; 330 info->ignore_signals = 0; 331 info->ignore_signals_once = 0; 332 info->profile.sample_area = -1; 333 info->profile.samples = NULL; 334 info->profile.buffer_full = false; 335 } 336 } 337 338 339 void 340 destroy_thread_debug_info(struct thread_debug_info *info) 341 { 342 if (info) { 343 area_id sampleArea = info->profile.sample_area; 344 if (sampleArea >= 0) { 345 area_info areaInfo; 346 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 347 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 348 delete_area(sampleArea); 349 } 350 } 351 352 arch_destroy_thread_debug_info(&info->arch_info); 353 354 if (info->debug_port >= 0) { 355 delete_port(info->debug_port); 356 info->debug_port = -1; 357 } 358 359 info->ignore_signals = 0; 360 info->ignore_signals_once = 0; 361 362 atomic_set(&info->flags, 0); 363 } 364 } 365 366 367 static status_t 368 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 369 Team*& team) 370 { 371 // We look up the team by ID, even in case of the current team, so we can be 372 // sure, that the team is not already dying. 373 if (teamID == B_CURRENT_TEAM) 374 teamID = thread_get_current_thread()->team->id; 375 376 while (true) { 377 // get the team 378 team = Team::GetAndLock(teamID); 379 if (team == NULL) 380 return B_BAD_TEAM_ID; 381 BReference<Team> teamReference(team, true); 382 TeamLocker teamLocker(team, true); 383 384 // don't allow messing with the kernel team 385 if (team == team_get_kernel_team()) 386 return B_NOT_ALLOWED; 387 388 // check whether the condition is already set 389 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 390 391 if (team->debug_info.debugger_changed_condition == NULL) { 392 // nobody there yet -- set our condition variable and be done 393 team->debug_info.debugger_changed_condition = &condition; 394 return B_OK; 395 } 396 397 // we'll have to wait 398 ConditionVariableEntry entry; 399 team->debug_info.debugger_changed_condition->Add(&entry); 400 401 debugInfoLocker.Unlock(); 402 teamLocker.Unlock(); 403 404 entry.Wait(); 405 } 406 } 407 408 409 static void 410 prepare_debugger_change(Team* team, ConditionVariable& condition) 411 { 412 while (true) { 413 // check whether the condition is already set 414 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 415 416 if (team->debug_info.debugger_changed_condition == NULL) { 417 // nobody there yet -- set our condition variable and be done 418 team->debug_info.debugger_changed_condition = &condition; 419 return; 420 } 421 422 // we'll have to wait 423 ConditionVariableEntry entry; 424 team->debug_info.debugger_changed_condition->Add(&entry); 425 426 debugInfoLocker.Unlock(); 427 428 entry.Wait(); 429 } 430 } 431 432 433 static void 434 finish_debugger_change(Team* team) 435 { 436 // unset our condition variable and notify all threads waiting on it 437 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 438 439 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 440 team->debug_info.debugger_changed_condition = NULL; 441 442 condition->NotifyAll(false); 443 } 444 445 446 void 447 user_debug_prepare_for_exec() 448 { 449 Thread *thread = thread_get_current_thread(); 450 Team *team = thread->team; 451 452 // If a debugger is installed for the team and the thread debug stuff 453 // initialized, change the ownership of the debug port for the thread 454 // to the kernel team, since exec_team() deletes all ports owned by this 455 // team. We change the ownership back later. 456 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 457 // get the port 458 port_id debugPort = -1; 459 460 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 461 462 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 463 debugPort = thread->debug_info.debug_port; 464 465 threadDebugInfoLocker.Unlock(); 466 467 // set the new port ownership 468 if (debugPort >= 0) 469 set_port_owner(debugPort, team_get_kernel_team_id()); 470 } 471 } 472 473 474 void 475 user_debug_finish_after_exec() 476 { 477 Thread *thread = thread_get_current_thread(); 478 Team *team = thread->team; 479 480 // If a debugger is installed for the team and the thread debug stuff 481 // initialized for this thread, change the ownership of its debug port 482 // back to this team. 483 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 484 // get the port 485 port_id debugPort = -1; 486 487 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 488 489 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 490 debugPort = thread->debug_info.debug_port; 491 492 threadDebugInfoLocker.Unlock(); 493 494 // set the new port ownership 495 if (debugPort >= 0) 496 set_port_owner(debugPort, team->id); 497 } 498 } 499 500 501 void 502 init_user_debug() 503 { 504 #ifdef ARCH_INIT_USER_DEBUG 505 ARCH_INIT_USER_DEBUG(); 506 #endif 507 } 508 509 510 static void 511 get_team_debug_info(team_debug_info &teamDebugInfo) 512 { 513 Thread *thread = thread_get_current_thread(); 514 515 cpu_status state = disable_interrupts(); 516 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 517 518 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 519 520 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 521 restore_interrupts(state); 522 } 523 524 525 static status_t 526 thread_hit_debug_event_internal(debug_debugger_message event, 527 const void *message, int32 size, bool requireDebugger, bool &restart) 528 { 529 restart = false; 530 Thread *thread = thread_get_current_thread(); 531 532 TRACE(("thread_hit_debug_event(): thread: %ld, event: %lu, message: %p, " 533 "size: %ld\n", thread->id, (uint32)event, message, size)); 534 535 // check, if there's a debug port already 536 bool setPort = !(atomic_get(&thread->debug_info.flags) 537 & B_THREAD_DEBUG_INITIALIZED); 538 539 // create a port, if there is none yet 540 port_id port = -1; 541 if (setPort) { 542 char nameBuffer[128]; 543 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %ld", 544 thread->id); 545 546 port = create_port(1, nameBuffer); 547 if (port < 0) { 548 dprintf("thread_hit_debug_event(): Failed to create debug port: " 549 "%s\n", strerror(port)); 550 return port; 551 } 552 } 553 554 // check the debug info structures once more: get the debugger port, set 555 // the thread's debug port, and update the thread's debug flags 556 port_id deletePort = port; 557 port_id debuggerPort = -1; 558 port_id nubPort = -1; 559 status_t error = B_OK; 560 cpu_status state = disable_interrupts(); 561 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 562 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 563 564 uint32 threadFlags = thread->debug_info.flags; 565 threadFlags &= ~B_THREAD_DEBUG_STOP; 566 bool debuggerInstalled 567 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 568 if (thread->id == thread->team->debug_info.nub_thread) { 569 // Ugh, we're the nub thread. We shouldn't be here. 570 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %ld\n", 571 thread->id)); 572 573 error = B_ERROR; 574 } else if (debuggerInstalled || !requireDebugger) { 575 if (debuggerInstalled) { 576 debuggerPort = thread->team->debug_info.debugger_port; 577 nubPort = thread->team->debug_info.nub_port; 578 } 579 580 if (setPort) { 581 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 582 // someone created a port for us (the port we've created will 583 // be deleted below) 584 port = thread->debug_info.debug_port; 585 } else { 586 thread->debug_info.debug_port = port; 587 deletePort = -1; // keep the port 588 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 589 } 590 } else { 591 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 592 port = thread->debug_info.debug_port; 593 } else { 594 // someone deleted our port 595 error = B_ERROR; 596 } 597 } 598 } else 599 error = B_ERROR; 600 601 // update the flags 602 if (error == B_OK) 603 threadFlags |= B_THREAD_DEBUG_STOPPED; 604 atomic_set(&thread->debug_info.flags, threadFlags); 605 606 update_thread_user_debug_flag(thread); 607 608 threadDebugInfoLocker.Unlock(); 609 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 610 restore_interrupts(state); 611 612 // delete the superfluous port 613 if (deletePort >= 0) 614 delete_port(deletePort); 615 616 if (error != B_OK) { 617 TRACE(("thread_hit_debug_event() error: thread: %ld, error: %lx\n", 618 thread->id, error)); 619 return error; 620 } 621 622 // send a message to the debugger port 623 if (debuggerInstalled) { 624 // update the message's origin info first 625 debug_origin *origin = (debug_origin *)message; 626 origin->thread = thread->id; 627 origin->team = thread->team->id; 628 origin->nub_port = nubPort; 629 630 TRACE(("thread_hit_debug_event(): thread: %ld, sending message to " 631 "debugger port %ld\n", thread->id, debuggerPort)); 632 633 error = debugger_write(debuggerPort, event, message, size, false); 634 } 635 636 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 637 bool singleStep = false; 638 639 if (error == B_OK) { 640 bool done = false; 641 while (!done) { 642 // read a command from the debug port 643 int32 command; 644 debugged_thread_message_data commandMessage; 645 ssize_t commandMessageSize = read_port_etc(port, &command, 646 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 647 0); 648 649 if (commandMessageSize < 0) { 650 error = commandMessageSize; 651 TRACE(("thread_hit_debug_event(): thread: %ld, failed " 652 "to receive message from port %ld: %lx\n", 653 thread->id, port, error)); 654 break; 655 } 656 657 switch (command) { 658 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 659 TRACE(("thread_hit_debug_event(): thread: %ld: " 660 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 661 thread->id)); 662 result = commandMessage.continue_thread.handle_event; 663 664 singleStep = commandMessage.continue_thread.single_step; 665 done = true; 666 break; 667 668 case B_DEBUGGED_THREAD_SET_CPU_STATE: 669 { 670 TRACE(("thread_hit_debug_event(): thread: %ld: " 671 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 672 thread->id)); 673 arch_set_debug_cpu_state( 674 &commandMessage.set_cpu_state.cpu_state); 675 676 break; 677 } 678 679 case B_DEBUGGED_THREAD_GET_CPU_STATE: 680 { 681 port_id replyPort = commandMessage.get_cpu_state.reply_port; 682 683 // prepare the message 684 debug_nub_get_cpu_state_reply replyMessage; 685 replyMessage.error = B_OK; 686 replyMessage.message = event; 687 arch_get_debug_cpu_state(&replyMessage.cpu_state); 688 689 // send it 690 error = kill_interruptable_write_port(replyPort, event, 691 &replyMessage, sizeof(replyMessage)); 692 693 break; 694 } 695 696 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 697 { 698 // Check, if the debugger really changed, i.e. is different 699 // than the one we know. 700 team_debug_info teamDebugInfo; 701 get_team_debug_info(teamDebugInfo); 702 703 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 704 if (!debuggerInstalled 705 || teamDebugInfo.debugger_port != debuggerPort) { 706 // debugger was installed or has changed: restart 707 // this function 708 restart = true; 709 done = true; 710 } 711 } else { 712 if (debuggerInstalled) { 713 // debugger is gone: continue the thread normally 714 done = true; 715 } 716 } 717 718 break; 719 } 720 } 721 } 722 } else { 723 TRACE(("thread_hit_debug_event(): thread: %ld, failed to send " 724 "message to debugger port %ld: %lx\n", thread->id, 725 debuggerPort, error)); 726 } 727 728 // update the thread debug info 729 bool destroyThreadInfo = false; 730 thread_debug_info threadDebugInfo; 731 732 state = disable_interrupts(); 733 threadDebugInfoLocker.Lock(); 734 735 // check, if the team is still being debugged 736 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 737 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 738 // update the single-step flag 739 if (singleStep) { 740 atomic_or(&thread->debug_info.flags, 741 B_THREAD_DEBUG_SINGLE_STEP); 742 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 743 } else { 744 atomic_and(&thread->debug_info.flags, 745 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 746 } 747 748 // unset the "stopped" state 749 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 750 751 update_thread_user_debug_flag(thread); 752 753 } else { 754 // the debugger is gone: cleanup our info completely 755 threadDebugInfo = thread->debug_info; 756 clear_thread_debug_info(&thread->debug_info, false); 757 destroyThreadInfo = true; 758 } 759 760 threadDebugInfoLocker.Unlock(); 761 restore_interrupts(state); 762 763 // enable/disable single stepping 764 arch_update_thread_single_step(); 765 766 if (destroyThreadInfo) 767 destroy_thread_debug_info(&threadDebugInfo); 768 769 return (error == B_OK ? result : error); 770 } 771 772 773 static status_t 774 thread_hit_debug_event(debug_debugger_message event, const void *message, 775 int32 size, bool requireDebugger) 776 { 777 status_t result; 778 bool restart; 779 do { 780 restart = false; 781 result = thread_hit_debug_event_internal(event, message, size, 782 requireDebugger, restart); 783 } while (result >= 0 && restart); 784 785 // Prepare to continue -- we install a debugger change condition, so no one 786 // will change the debugger while we're playing with the breakpoint manager. 787 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 788 Team* team = thread_get_current_thread()->team; 789 ConditionVariable debugChangeCondition; 790 prepare_debugger_change(team, debugChangeCondition); 791 792 if (team->debug_info.breakpoint_manager != NULL) { 793 bool isSyscall; 794 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 795 if (pc != NULL && !isSyscall) 796 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 797 } 798 799 finish_debugger_change(team); 800 801 return result; 802 } 803 804 805 static status_t 806 thread_hit_serious_debug_event(debug_debugger_message event, 807 const void *message, int32 messageSize) 808 { 809 // ensure that a debugger is installed for this team 810 status_t error = ensure_debugger_installed(); 811 if (error != B_OK) { 812 Thread *thread = thread_get_current_thread(); 813 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 814 "thread: %ld: %s\n", thread->id, strerror(error)); 815 return error; 816 } 817 818 // enter the debug loop 819 return thread_hit_debug_event(event, message, messageSize, true); 820 } 821 822 823 void 824 user_debug_pre_syscall(uint32 syscall, void *args) 825 { 826 // check whether a debugger is installed 827 Thread *thread = thread_get_current_thread(); 828 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 829 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 830 return; 831 832 // check whether pre-syscall tracing is enabled for team or thread 833 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 834 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 835 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 836 return; 837 } 838 839 // prepare the message 840 debug_pre_syscall message; 841 message.syscall = syscall; 842 843 // copy the syscall args 844 if (syscall < (uint32)kSyscallCount) { 845 if (kSyscallInfos[syscall].parameter_size > 0) 846 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 847 } 848 849 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 850 sizeof(message), true); 851 } 852 853 854 void 855 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 856 bigtime_t startTime) 857 { 858 // check whether a debugger is installed 859 Thread *thread = thread_get_current_thread(); 860 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 861 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 862 return; 863 864 // check whether post-syscall tracing is enabled for team or thread 865 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 866 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 867 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 868 return; 869 } 870 871 // prepare the message 872 debug_post_syscall message; 873 message.start_time = startTime; 874 message.end_time = system_time(); 875 message.return_value = returnValue; 876 message.syscall = syscall; 877 878 // copy the syscall args 879 if (syscall < (uint32)kSyscallCount) { 880 if (kSyscallInfos[syscall].parameter_size > 0) 881 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 882 } 883 884 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 885 sizeof(message), true); 886 } 887 888 889 /** \brief To be called when an unhandled processor exception (error/fault) 890 * occurred. 891 * \param exception The debug_why_stopped value identifying the kind of fault. 892 * \param singal The signal corresponding to the exception. 893 * \return \c true, if the caller shall continue normally, i.e. usually send 894 * a deadly signal. \c false, if the debugger insists to continue the 895 * program (e.g. because it has solved the removed the cause of the 896 * problem). 897 */ 898 bool 899 user_debug_exception_occurred(debug_exception_type exception, int signal) 900 { 901 // First check whether there's a signal handler installed for the signal. 902 // If so, we don't want to install a debugger for the team. We always send 903 // the signal instead. An already installed debugger will be notified, if 904 // it has requested notifications of signal. 905 struct sigaction signalAction; 906 if (sigaction(signal, NULL, &signalAction) == 0 907 && signalAction.sa_handler != SIG_DFL) { 908 return true; 909 } 910 911 // prepare the message 912 debug_exception_occurred message; 913 message.exception = exception; 914 message.signal = signal; 915 916 status_t result = thread_hit_serious_debug_event( 917 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 918 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 919 } 920 921 922 bool 923 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly) 924 { 925 // check, if a debugger is installed and is interested in signals 926 Thread *thread = thread_get_current_thread(); 927 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 928 if (~teamDebugFlags 929 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 930 return true; 931 } 932 933 // prepare the message 934 debug_signal_received message; 935 message.signal = signal; 936 message.handler = *handler; 937 message.deadly = deadly; 938 939 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 940 &message, sizeof(message), true); 941 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 942 } 943 944 945 void 946 user_debug_stop_thread() 947 { 948 // check whether this is actually an emulated single-step notification 949 Thread* thread = thread_get_current_thread(); 950 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 951 952 bool singleStepped = false; 953 if ((atomic_and(&thread->debug_info.flags, 954 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 955 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 956 singleStepped = true; 957 } 958 959 threadDebugInfoLocker.Unlock(); 960 961 if (singleStepped) { 962 user_debug_single_stepped(); 963 } else { 964 debug_thread_debugged message; 965 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 966 &message, sizeof(message)); 967 } 968 } 969 970 971 void 972 user_debug_team_created(team_id teamID) 973 { 974 // check, if a debugger is installed and is interested in team creation 975 // events 976 Thread *thread = thread_get_current_thread(); 977 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 978 if (~teamDebugFlags 979 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 980 return; 981 } 982 983 // prepare the message 984 debug_team_created message; 985 message.new_team = teamID; 986 987 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 988 sizeof(message), true); 989 } 990 991 992 void 993 user_debug_team_deleted(team_id teamID, port_id debuggerPort) 994 { 995 if (debuggerPort >= 0) { 996 TRACE(("user_debug_team_deleted(team: %ld, debugger port: %ld)\n", 997 teamID, debuggerPort)); 998 999 debug_team_deleted message; 1000 message.origin.thread = -1; 1001 message.origin.team = teamID; 1002 message.origin.nub_port = -1; 1003 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1004 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1005 } 1006 } 1007 1008 1009 void 1010 user_debug_team_exec() 1011 { 1012 // check, if a debugger is installed and is interested in team creation 1013 // events 1014 Thread *thread = thread_get_current_thread(); 1015 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1016 if (~teamDebugFlags 1017 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1018 return; 1019 } 1020 1021 // prepare the message 1022 debug_team_exec message; 1023 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1024 + 1; 1025 1026 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1027 sizeof(message), true); 1028 } 1029 1030 1031 /*! Called by a new userland thread to update the debugging related flags of 1032 \c Thread::flags before the thread first enters userland. 1033 \param thread The calling thread. 1034 */ 1035 void 1036 user_debug_update_new_thread_flags(Thread* thread) 1037 { 1038 // lock it and update it's flags 1039 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1040 1041 update_thread_user_debug_flag(thread); 1042 update_thread_breakpoints_flag(thread); 1043 update_thread_debugger_installed_flag(thread); 1044 } 1045 1046 1047 void 1048 user_debug_thread_created(thread_id threadID) 1049 { 1050 // check, if a debugger is installed and is interested in thread events 1051 Thread *thread = thread_get_current_thread(); 1052 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1053 if (~teamDebugFlags 1054 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1055 return; 1056 } 1057 1058 // prepare the message 1059 debug_thread_created message; 1060 message.new_thread = threadID; 1061 1062 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1063 sizeof(message), true); 1064 } 1065 1066 1067 void 1068 user_debug_thread_deleted(team_id teamID, thread_id threadID) 1069 { 1070 // Things are a bit complicated here, since this thread no longer belongs to 1071 // the debugged team (but to the kernel). So we can't use debugger_write(). 1072 1073 // get the team debug flags and debugger port 1074 Team* team = Team::Get(teamID); 1075 if (team == NULL) 1076 return; 1077 BReference<Team> teamReference(team, true); 1078 1079 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1080 1081 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1082 port_id debuggerPort = team->debug_info.debugger_port; 1083 sem_id writeLock = team->debug_info.debugger_write_lock; 1084 1085 debugInfoLocker.Unlock(); 1086 1087 // check, if a debugger is installed and is interested in thread events 1088 if (~teamDebugFlags 1089 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1090 return; 1091 } 1092 1093 // acquire the debugger write lock 1094 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1095 if (error != B_OK) 1096 return; 1097 1098 // re-get the team debug info -- we need to check whether anything changed 1099 debugInfoLocker.Lock(); 1100 1101 teamDebugFlags = atomic_get(&team->debug_info.flags); 1102 port_id newDebuggerPort = team->debug_info.debugger_port; 1103 1104 debugInfoLocker.Unlock(); 1105 1106 // Send the message only if the debugger hasn't changed in the meantime or 1107 // the team is about to be handed over. 1108 if (newDebuggerPort == debuggerPort 1109 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1110 debug_thread_deleted message; 1111 message.origin.thread = threadID; 1112 message.origin.team = teamID; 1113 message.origin.nub_port = -1; 1114 1115 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1116 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1117 } 1118 1119 // release the debugger write lock 1120 release_sem(writeLock); 1121 } 1122 1123 1124 /*! Called for a thread that is about to die, cleaning up all user debug 1125 facilities installed for the thread. 1126 \param thread The current thread, the one that is going to die. 1127 */ 1128 void 1129 user_debug_thread_exiting(Thread* thread) 1130 { 1131 // thread is the current thread, so using team is safe 1132 Team* team = thread->team; 1133 1134 InterruptsLocker interruptsLocker; 1135 1136 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1137 1138 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1139 port_id debuggerPort = team->debug_info.debugger_port; 1140 1141 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1142 1143 // check, if a debugger is installed 1144 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1145 || debuggerPort < 0) { 1146 return; 1147 } 1148 1149 // detach the profile info and mark the thread dying 1150 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1151 1152 thread_debug_info& threadDebugInfo = thread->debug_info; 1153 if (threadDebugInfo.profile.samples == NULL) 1154 return; 1155 1156 area_id sampleArea = threadDebugInfo.profile.sample_area; 1157 int32 sampleCount = threadDebugInfo.profile.sample_count; 1158 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1159 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1160 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1161 int32 imageEvent = threadDebugInfo.profile.image_event; 1162 threadDebugInfo.profile.sample_area = -1; 1163 threadDebugInfo.profile.samples = NULL; 1164 threadDebugInfo.profile.buffer_full = false; 1165 1166 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1167 1168 threadDebugInfoLocker.Unlock(); 1169 interruptsLocker.Unlock(); 1170 1171 // notify the debugger 1172 debug_profiler_update message; 1173 message.origin.thread = thread->id; 1174 message.origin.team = thread->team->id; 1175 message.origin.nub_port = -1; // asynchronous message 1176 message.sample_count = sampleCount; 1177 message.dropped_ticks = droppedTicks; 1178 message.stack_depth = stackDepth; 1179 message.variable_stack_depth = variableStackDepth; 1180 message.image_event = imageEvent; 1181 message.stopped = true; 1182 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1183 &message, sizeof(message), false); 1184 1185 if (sampleArea >= 0) { 1186 area_info areaInfo; 1187 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1188 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1189 delete_area(sampleArea); 1190 } 1191 } 1192 } 1193 1194 1195 void 1196 user_debug_image_created(const image_info *imageInfo) 1197 { 1198 // check, if a debugger is installed and is interested in image events 1199 Thread *thread = thread_get_current_thread(); 1200 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1201 if (~teamDebugFlags 1202 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1203 return; 1204 } 1205 1206 // prepare the message 1207 debug_image_created message; 1208 memcpy(&message.info, imageInfo, sizeof(image_info)); 1209 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1210 + 1; 1211 1212 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1213 sizeof(message), true); 1214 } 1215 1216 1217 void 1218 user_debug_image_deleted(const image_info *imageInfo) 1219 { 1220 // check, if a debugger is installed and is interested in image events 1221 Thread *thread = thread_get_current_thread(); 1222 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1223 if (~teamDebugFlags 1224 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1225 return; 1226 } 1227 1228 // prepare the message 1229 debug_image_deleted message; 1230 memcpy(&message.info, imageInfo, sizeof(image_info)); 1231 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1232 + 1; 1233 1234 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1235 sizeof(message), true); 1236 } 1237 1238 1239 void 1240 user_debug_breakpoint_hit(bool software) 1241 { 1242 // prepare the message 1243 debug_breakpoint_hit message; 1244 arch_get_debug_cpu_state(&message.cpu_state); 1245 1246 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1247 sizeof(message)); 1248 } 1249 1250 1251 void 1252 user_debug_watchpoint_hit() 1253 { 1254 // prepare the message 1255 debug_watchpoint_hit message; 1256 arch_get_debug_cpu_state(&message.cpu_state); 1257 1258 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1259 sizeof(message)); 1260 } 1261 1262 1263 void 1264 user_debug_single_stepped() 1265 { 1266 // clear the single-step thread flag 1267 Thread* thread = thread_get_current_thread(); 1268 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1269 1270 // prepare the message 1271 debug_single_step message; 1272 arch_get_debug_cpu_state(&message.cpu_state); 1273 1274 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1275 sizeof(message)); 1276 } 1277 1278 1279 /*! Schedules the profiling timer for the current thread. 1280 The caller must hold the thread's debug info lock. 1281 \param thread The current thread. 1282 \param interval The time after which the timer should fire. 1283 */ 1284 static void 1285 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1286 { 1287 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1288 thread->debug_info.profile.installed_timer = timer; 1289 thread->debug_info.profile.timer_end = system_time() + interval; 1290 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1291 } 1292 1293 1294 /*! Samples the current thread's instruction pointer/stack trace. 1295 The caller must hold the current thread's debug info lock. 1296 \param flushBuffer Return parameter: Set to \c true when the sampling 1297 buffer must be flushed. 1298 */ 1299 static bool 1300 profiling_do_sample(bool& flushBuffer) 1301 { 1302 Thread* thread = thread_get_current_thread(); 1303 thread_debug_info& debugInfo = thread->debug_info; 1304 1305 if (debugInfo.profile.samples == NULL) 1306 return false; 1307 1308 // Check, whether the buffer is full or an image event occurred since the 1309 // last sample was taken. 1310 int32 maxSamples = debugInfo.profile.max_samples; 1311 int32 sampleCount = debugInfo.profile.sample_count; 1312 int32 stackDepth = debugInfo.profile.stack_depth; 1313 int32 imageEvent = thread->team->debug_info.image_event; 1314 if (debugInfo.profile.sample_count > 0) { 1315 if (debugInfo.profile.last_image_event < imageEvent 1316 && debugInfo.profile.variable_stack_depth 1317 && sampleCount + 2 <= maxSamples) { 1318 // an image event occurred, but we use variable stack depth and 1319 // have enough room in the buffer to indicate an image event 1320 addr_t* event = debugInfo.profile.samples + sampleCount; 1321 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1322 event[1] = imageEvent; 1323 sampleCount += 2; 1324 debugInfo.profile.sample_count = sampleCount; 1325 debugInfo.profile.last_image_event = imageEvent; 1326 } 1327 1328 if (debugInfo.profile.last_image_event < imageEvent 1329 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1330 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1331 flushBuffer = true; 1332 return true; 1333 } 1334 1335 // We can't flush the buffer now, since we interrupted a kernel 1336 // function. If the buffer is not full yet, we add the samples, 1337 // otherwise we have to drop them. 1338 if (maxSamples - sampleCount < stackDepth) { 1339 debugInfo.profile.dropped_ticks++; 1340 return true; 1341 } 1342 } 1343 } else { 1344 // first sample -- set the image event 1345 debugInfo.profile.image_event = imageEvent; 1346 debugInfo.profile.last_image_event = imageEvent; 1347 } 1348 1349 // get the samples 1350 addr_t* returnAddresses = debugInfo.profile.samples 1351 + debugInfo.profile.sample_count; 1352 if (debugInfo.profile.variable_stack_depth) { 1353 // variable sample count per hit 1354 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1355 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1356 1357 debugInfo.profile.sample_count += *returnAddresses + 1; 1358 } else { 1359 // fixed sample count per hit 1360 if (stackDepth > 1) { 1361 int32 count = arch_debug_get_stack_trace(returnAddresses, 1362 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1363 1364 for (int32 i = count; i < stackDepth; i++) 1365 returnAddresses[i] = 0; 1366 } else 1367 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1368 1369 debugInfo.profile.sample_count += stackDepth; 1370 } 1371 1372 return true; 1373 } 1374 1375 1376 static void 1377 profiling_buffer_full(void*) 1378 { 1379 // It is undefined whether the function is called with interrupts enabled 1380 // or disabled. We are allowed to enable interrupts, though. First make 1381 // sure interrupts are disabled. 1382 disable_interrupts(); 1383 1384 Thread* thread = thread_get_current_thread(); 1385 thread_debug_info& debugInfo = thread->debug_info; 1386 1387 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1388 1389 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1390 int32 sampleCount = debugInfo.profile.sample_count; 1391 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1392 int32 stackDepth = debugInfo.profile.stack_depth; 1393 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1394 int32 imageEvent = debugInfo.profile.image_event; 1395 1396 // notify the debugger 1397 debugInfo.profile.sample_count = 0; 1398 debugInfo.profile.dropped_ticks = 0; 1399 1400 threadDebugInfoLocker.Unlock(); 1401 enable_interrupts(); 1402 1403 // prepare the message 1404 debug_profiler_update message; 1405 message.sample_count = sampleCount; 1406 message.dropped_ticks = droppedTicks; 1407 message.stack_depth = stackDepth; 1408 message.variable_stack_depth = variableStackDepth; 1409 message.image_event = imageEvent; 1410 message.stopped = false; 1411 1412 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1413 sizeof(message), false); 1414 1415 disable_interrupts(); 1416 threadDebugInfoLocker.Lock(); 1417 1418 // do the sampling and reschedule timer, if still profiling this thread 1419 bool flushBuffer; 1420 if (profiling_do_sample(flushBuffer)) { 1421 debugInfo.profile.buffer_full = false; 1422 schedule_profiling_timer(thread, debugInfo.profile.interval); 1423 } 1424 } 1425 1426 threadDebugInfoLocker.Unlock(); 1427 enable_interrupts(); 1428 } 1429 1430 1431 /*! Profiling timer event callback. 1432 Called with interrupts disabled. 1433 */ 1434 static int32 1435 profiling_event(timer* /*unused*/) 1436 { 1437 Thread* thread = thread_get_current_thread(); 1438 thread_debug_info& debugInfo = thread->debug_info; 1439 1440 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1441 1442 bool flushBuffer = false; 1443 if (profiling_do_sample(flushBuffer)) { 1444 if (flushBuffer) { 1445 // The sample buffer needs to be flushed; we'll have to notify the 1446 // debugger. We can't do that right here. Instead we set a post 1447 // interrupt callback doing that for us, and don't reschedule the 1448 // timer yet. 1449 thread->post_interrupt_callback = profiling_buffer_full; 1450 debugInfo.profile.installed_timer = NULL; 1451 debugInfo.profile.buffer_full = true; 1452 } else 1453 schedule_profiling_timer(thread, debugInfo.profile.interval); 1454 } else 1455 debugInfo.profile.installed_timer = NULL; 1456 1457 return B_HANDLED_INTERRUPT; 1458 } 1459 1460 1461 /*! Called by the scheduler when a debugged thread has been unscheduled. 1462 The scheduler lock is being held. 1463 */ 1464 void 1465 user_debug_thread_unscheduled(Thread* thread) 1466 { 1467 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1468 1469 // if running, cancel the profiling timer 1470 struct timer* timer = thread->debug_info.profile.installed_timer; 1471 if (timer != NULL) { 1472 // track remaining time 1473 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1474 thread->debug_info.profile.interval_left = max_c(left, 0); 1475 thread->debug_info.profile.installed_timer = NULL; 1476 1477 // cancel timer 1478 threadDebugInfoLocker.Unlock(); 1479 // not necessary, but doesn't harm and reduces contention 1480 cancel_timer(timer); 1481 // since invoked on the same CPU, this will not possibly wait for 1482 // an already called timer hook 1483 } 1484 } 1485 1486 1487 /*! Called by the scheduler when a debugged thread has been scheduled. 1488 The scheduler lock is being held. 1489 */ 1490 void 1491 user_debug_thread_scheduled(Thread* thread) 1492 { 1493 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1494 1495 if (thread->debug_info.profile.samples != NULL 1496 && !thread->debug_info.profile.buffer_full) { 1497 // install profiling timer 1498 schedule_profiling_timer(thread, 1499 thread->debug_info.profile.interval_left); 1500 } 1501 } 1502 1503 1504 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1505 all threads of the team that are initialized for debugging (and 1506 thus have a debug port). 1507 */ 1508 static void 1509 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1510 const void *message, int32 size) 1511 { 1512 // iterate through the threads 1513 thread_info threadInfo; 1514 int32 cookie = 0; 1515 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1516 == B_OK) { 1517 // get the thread and lock it 1518 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1519 if (thread == NULL) 1520 continue; 1521 1522 BReference<Thread> threadReference(thread, true); 1523 ThreadLocker threadLocker(thread, true); 1524 1525 // get the thread's debug port 1526 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1527 1528 port_id threadDebugPort = -1; 1529 if (thread && thread != nubThread && thread->team == nubThread->team 1530 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1531 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1532 threadDebugPort = thread->debug_info.debug_port; 1533 } 1534 1535 threadDebugInfoLocker.Unlock(); 1536 threadLocker.Unlock(); 1537 1538 // send the message to the thread 1539 if (threadDebugPort >= 0) { 1540 status_t error = kill_interruptable_write_port(threadDebugPort, 1541 code, message, size); 1542 if (error != B_OK) { 1543 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1544 "message to thread %ld: %lx\n", thread->id, error)); 1545 } 1546 } 1547 } 1548 } 1549 1550 1551 static void 1552 nub_thread_cleanup(Thread *nubThread) 1553 { 1554 TRACE(("nub_thread_cleanup(%ld): debugger port: %ld\n", nubThread->id, 1555 nubThread->team->debug_info.debugger_port)); 1556 1557 ConditionVariable debugChangeCondition; 1558 prepare_debugger_change(nubThread->team, debugChangeCondition); 1559 1560 team_debug_info teamDebugInfo; 1561 bool destroyDebugInfo = false; 1562 1563 TeamLocker teamLocker(nubThread->team); 1564 // required by update_threads_debugger_installed_flag() 1565 1566 cpu_status state = disable_interrupts(); 1567 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1568 1569 team_debug_info &info = nubThread->team->debug_info; 1570 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1571 && info.nub_thread == nubThread->id) { 1572 teamDebugInfo = info; 1573 clear_team_debug_info(&info, false); 1574 destroyDebugInfo = true; 1575 } 1576 1577 // update the thread::flags fields 1578 update_threads_debugger_installed_flag(nubThread->team); 1579 1580 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1581 restore_interrupts(state); 1582 1583 teamLocker.Unlock(); 1584 1585 if (destroyDebugInfo) 1586 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1587 1588 finish_debugger_change(nubThread->team); 1589 1590 if (destroyDebugInfo) 1591 destroy_team_debug_info(&teamDebugInfo); 1592 1593 // notify all threads that the debugger is gone 1594 broadcast_debugged_thread_message(nubThread, 1595 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1596 } 1597 1598 1599 /** \brief Debug nub thread helper function that returns the debug port of 1600 * a thread of the same team. 1601 */ 1602 static status_t 1603 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1604 thread_id threadID, port_id &threadDebugPort) 1605 { 1606 threadDebugPort = -1; 1607 1608 // get the thread 1609 Thread* thread = Thread::GetAndLock(threadID); 1610 if (thread == NULL) 1611 return B_BAD_THREAD_ID; 1612 BReference<Thread> threadReference(thread, true); 1613 ThreadLocker threadLocker(thread, true); 1614 1615 // get the debug port 1616 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1617 1618 if (thread->team != nubThread->team) 1619 return B_BAD_VALUE; 1620 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1621 return B_BAD_THREAD_STATE; 1622 1623 threadDebugPort = thread->debug_info.debug_port; 1624 1625 threadDebugInfoLocker.Unlock(); 1626 1627 if (threadDebugPort < 0) 1628 return B_ERROR; 1629 1630 return B_OK; 1631 } 1632 1633 1634 static status_t 1635 debug_nub_thread(void *) 1636 { 1637 Thread *nubThread = thread_get_current_thread(); 1638 1639 // check, if we're still the current nub thread and get our port 1640 cpu_status state = disable_interrupts(); 1641 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1642 1643 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1644 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1645 restore_interrupts(state); 1646 return 0; 1647 } 1648 1649 port_id port = nubThread->team->debug_info.nub_port; 1650 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1651 BreakpointManager* breakpointManager 1652 = nubThread->team->debug_info.breakpoint_manager; 1653 1654 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1655 restore_interrupts(state); 1656 1657 TRACE(("debug_nub_thread() thread: %ld, team %ld, nub port: %ld\n", 1658 nubThread->id, nubThread->team->id, port)); 1659 1660 // notify all threads that a debugger has been installed 1661 broadcast_debugged_thread_message(nubThread, 1662 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1663 1664 // command processing loop 1665 while (true) { 1666 int32 command; 1667 debug_nub_message_data message; 1668 ssize_t messageSize = read_port_etc(port, &command, &message, 1669 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1670 1671 if (messageSize < 0) { 1672 // The port is no longer valid or we were interrupted by a kill 1673 // signal: If we are still listed in the team's debug info as nub 1674 // thread, we need to update that. 1675 nub_thread_cleanup(nubThread); 1676 1677 TRACE(("nub thread %ld: terminating: %lx\n", nubThread->id, 1678 messageSize)); 1679 1680 return messageSize; 1681 } 1682 1683 bool sendReply = false; 1684 union { 1685 debug_nub_read_memory_reply read_memory; 1686 debug_nub_write_memory_reply write_memory; 1687 debug_nub_get_cpu_state_reply get_cpu_state; 1688 debug_nub_set_breakpoint_reply set_breakpoint; 1689 debug_nub_set_watchpoint_reply set_watchpoint; 1690 debug_nub_get_signal_masks_reply get_signal_masks; 1691 debug_nub_get_signal_handler_reply get_signal_handler; 1692 debug_nub_start_profiler_reply start_profiler; 1693 debug_profiler_update profiler_update; 1694 } reply; 1695 int32 replySize = 0; 1696 port_id replyPort = -1; 1697 1698 // process the command 1699 switch (command) { 1700 case B_DEBUG_MESSAGE_READ_MEMORY: 1701 { 1702 // get the parameters 1703 replyPort = message.read_memory.reply_port; 1704 void *address = message.read_memory.address; 1705 int32 size = message.read_memory.size; 1706 status_t result = B_OK; 1707 1708 // check the parameters 1709 if (!BreakpointManager::CanAccessAddress(address, false)) 1710 result = B_BAD_ADDRESS; 1711 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1712 result = B_BAD_VALUE; 1713 1714 // read the memory 1715 size_t bytesRead = 0; 1716 if (result == B_OK) { 1717 result = breakpointManager->ReadMemory(address, 1718 reply.read_memory.data, size, bytesRead); 1719 } 1720 reply.read_memory.error = result; 1721 1722 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_READ_MEMORY: " 1723 "reply port: %ld, address: %p, size: %ld, result: %lx, " 1724 "read: %ld\n", nubThread->id, replyPort, address, size, 1725 result, bytesRead)); 1726 1727 // send only as much data as necessary 1728 reply.read_memory.size = bytesRead; 1729 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1730 sendReply = true; 1731 break; 1732 } 1733 1734 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1735 { 1736 // get the parameters 1737 replyPort = message.write_memory.reply_port; 1738 void *address = message.write_memory.address; 1739 int32 size = message.write_memory.size; 1740 const char *data = message.write_memory.data; 1741 int32 realSize = (char*)&message + messageSize - data; 1742 status_t result = B_OK; 1743 1744 // check the parameters 1745 if (!BreakpointManager::CanAccessAddress(address, true)) 1746 result = B_BAD_ADDRESS; 1747 else if (size <= 0 || size > realSize) 1748 result = B_BAD_VALUE; 1749 1750 // write the memory 1751 size_t bytesWritten = 0; 1752 if (result == B_OK) { 1753 result = breakpointManager->WriteMemory(address, data, size, 1754 bytesWritten); 1755 } 1756 reply.write_memory.error = result; 1757 1758 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_WRITE_MEMORY: " 1759 "reply port: %ld, address: %p, size: %ld, result: %lx, " 1760 "written: %ld\n", nubThread->id, replyPort, address, size, 1761 result, bytesWritten)); 1762 1763 reply.write_memory.size = bytesWritten; 1764 sendReply = true; 1765 replySize = sizeof(debug_nub_write_memory_reply); 1766 break; 1767 } 1768 1769 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1770 { 1771 // get the parameters 1772 int32 flags = message.set_team_flags.flags 1773 & B_TEAM_DEBUG_USER_FLAG_MASK; 1774 1775 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_TEAM_FLAGS: " 1776 "flags: %lx\n", nubThread->id, flags)); 1777 1778 Team *team = thread_get_current_thread()->team; 1779 1780 // set the flags 1781 cpu_status state = disable_interrupts(); 1782 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1783 1784 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1785 atomic_set(&team->debug_info.flags, flags); 1786 1787 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1788 restore_interrupts(state); 1789 1790 break; 1791 } 1792 1793 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1794 { 1795 // get the parameters 1796 thread_id threadID = message.set_thread_flags.thread; 1797 int32 flags = message.set_thread_flags.flags 1798 & B_THREAD_DEBUG_USER_FLAG_MASK; 1799 1800 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_THREAD_FLAGS: " 1801 "thread: %ld, flags: %lx\n", nubThread->id, threadID, 1802 flags)); 1803 1804 // set the flags 1805 Thread* thread = Thread::GetAndLock(threadID); 1806 if (thread == NULL) 1807 break; 1808 BReference<Thread> threadReference(thread, true); 1809 ThreadLocker threadLocker(thread, true); 1810 1811 InterruptsSpinLocker threadDebugInfoLocker( 1812 thread->debug_info.lock); 1813 1814 if (thread->team == thread_get_current_thread()->team) { 1815 flags |= thread->debug_info.flags 1816 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1817 atomic_set(&thread->debug_info.flags, flags); 1818 } 1819 1820 break; 1821 } 1822 1823 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1824 { 1825 // get the parameters 1826 thread_id threadID; 1827 uint32 handleEvent; 1828 bool singleStep; 1829 1830 threadID = message.continue_thread.thread; 1831 handleEvent = message.continue_thread.handle_event; 1832 singleStep = message.continue_thread.single_step; 1833 1834 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CONTINUE_THREAD: " 1835 "thread: %ld, handle event: %lu, single step: %d\n", 1836 nubThread->id, threadID, handleEvent, singleStep)); 1837 1838 // find the thread and get its debug port 1839 port_id threadDebugPort = -1; 1840 status_t result = debug_nub_thread_get_thread_debug_port( 1841 nubThread, threadID, threadDebugPort); 1842 1843 // send a message to the debugged thread 1844 if (result == B_OK) { 1845 debugged_thread_continue commandMessage; 1846 commandMessage.handle_event = handleEvent; 1847 commandMessage.single_step = singleStep; 1848 1849 result = write_port(threadDebugPort, 1850 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1851 &commandMessage, sizeof(commandMessage)); 1852 } 1853 1854 break; 1855 } 1856 1857 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1858 { 1859 // get the parameters 1860 thread_id threadID = message.set_cpu_state.thread; 1861 const debug_cpu_state &cpuState 1862 = message.set_cpu_state.cpu_state; 1863 1864 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_CPU_STATE: " 1865 "thread: %ld\n", nubThread->id, threadID)); 1866 1867 // find the thread and get its debug port 1868 port_id threadDebugPort = -1; 1869 status_t result = debug_nub_thread_get_thread_debug_port( 1870 nubThread, threadID, threadDebugPort); 1871 1872 // send a message to the debugged thread 1873 if (result == B_OK) { 1874 debugged_thread_set_cpu_state commandMessage; 1875 memcpy(&commandMessage.cpu_state, &cpuState, 1876 sizeof(debug_cpu_state)); 1877 write_port(threadDebugPort, 1878 B_DEBUGGED_THREAD_SET_CPU_STATE, 1879 &commandMessage, sizeof(commandMessage)); 1880 } 1881 1882 break; 1883 } 1884 1885 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1886 { 1887 // get the parameters 1888 thread_id threadID = message.get_cpu_state.thread; 1889 replyPort = message.get_cpu_state.reply_port; 1890 1891 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_CPU_STATE: " 1892 "thread: %ld\n", nubThread->id, threadID)); 1893 1894 // find the thread and get its debug port 1895 port_id threadDebugPort = -1; 1896 status_t result = debug_nub_thread_get_thread_debug_port( 1897 nubThread, threadID, threadDebugPort); 1898 1899 // send a message to the debugged thread 1900 if (threadDebugPort >= 0) { 1901 debugged_thread_get_cpu_state commandMessage; 1902 commandMessage.reply_port = replyPort; 1903 result = write_port(threadDebugPort, 1904 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1905 sizeof(commandMessage)); 1906 } 1907 1908 // send a reply to the debugger in case of error 1909 if (result != B_OK) { 1910 reply.get_cpu_state.error = result; 1911 sendReply = true; 1912 replySize = sizeof(reply.get_cpu_state); 1913 } 1914 1915 break; 1916 } 1917 1918 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1919 { 1920 // get the parameters 1921 replyPort = message.set_breakpoint.reply_port; 1922 void *address = message.set_breakpoint.address; 1923 1924 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_BREAKPOINT: " 1925 "address: %p\n", nubThread->id, address)); 1926 1927 // check the address 1928 status_t result = B_OK; 1929 if (address == NULL 1930 || !BreakpointManager::CanAccessAddress(address, false)) { 1931 result = B_BAD_ADDRESS; 1932 } 1933 1934 // set the breakpoint 1935 if (result == B_OK) 1936 result = breakpointManager->InstallBreakpoint(address); 1937 1938 if (result == B_OK) 1939 update_threads_breakpoints_flag(); 1940 1941 // prepare the reply 1942 reply.set_breakpoint.error = result; 1943 replySize = sizeof(reply.set_breakpoint); 1944 sendReply = true; 1945 1946 break; 1947 } 1948 1949 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1950 { 1951 // get the parameters 1952 void *address = message.clear_breakpoint.address; 1953 1954 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: " 1955 "address: %p\n", nubThread->id, address)); 1956 1957 // check the address 1958 status_t result = B_OK; 1959 if (address == NULL 1960 || !BreakpointManager::CanAccessAddress(address, false)) { 1961 result = B_BAD_ADDRESS; 1962 } 1963 1964 // clear the breakpoint 1965 if (result == B_OK) 1966 result = breakpointManager->UninstallBreakpoint(address); 1967 1968 if (result == B_OK) 1969 update_threads_breakpoints_flag(); 1970 1971 break; 1972 } 1973 1974 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 1975 { 1976 // get the parameters 1977 replyPort = message.set_watchpoint.reply_port; 1978 void *address = message.set_watchpoint.address; 1979 uint32 type = message.set_watchpoint.type; 1980 int32 length = message.set_watchpoint.length; 1981 1982 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_WATCHPOINT: " 1983 "address: %p, type: %lu, length: %ld\n", nubThread->id, 1984 address, type, length)); 1985 1986 // check the address and size 1987 status_t result = B_OK; 1988 if (address == NULL 1989 || !BreakpointManager::CanAccessAddress(address, false)) { 1990 result = B_BAD_ADDRESS; 1991 } 1992 if (length < 0) 1993 result = B_BAD_VALUE; 1994 1995 // set the watchpoint 1996 if (result == B_OK) { 1997 result = breakpointManager->InstallWatchpoint(address, type, 1998 length); 1999 } 2000 2001 if (result == B_OK) 2002 update_threads_breakpoints_flag(); 2003 2004 // prepare the reply 2005 reply.set_watchpoint.error = result; 2006 replySize = sizeof(reply.set_watchpoint); 2007 sendReply = true; 2008 2009 break; 2010 } 2011 2012 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2013 { 2014 // get the parameters 2015 void *address = message.clear_watchpoint.address; 2016 2017 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: " 2018 "address: %p\n", nubThread->id, address)); 2019 2020 // check the address 2021 status_t result = B_OK; 2022 if (address == NULL 2023 || !BreakpointManager::CanAccessAddress(address, false)) { 2024 result = B_BAD_ADDRESS; 2025 } 2026 2027 // clear the watchpoint 2028 if (result == B_OK) 2029 result = breakpointManager->UninstallWatchpoint(address); 2030 2031 if (result == B_OK) 2032 update_threads_breakpoints_flag(); 2033 2034 break; 2035 } 2036 2037 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2038 { 2039 // get the parameters 2040 thread_id threadID = message.set_signal_masks.thread; 2041 uint64 ignore = message.set_signal_masks.ignore_mask; 2042 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2043 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2044 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2045 2046 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: " 2047 "thread: %ld, ignore: %llx (op: %lu), ignore once: %llx " 2048 "(op: %lu)\n", nubThread->id, threadID, ignore, 2049 ignoreOp, ignoreOnce, ignoreOnceOp)); 2050 2051 // set the masks 2052 Thread* thread = Thread::GetAndLock(threadID); 2053 if (thread == NULL) 2054 break; 2055 BReference<Thread> threadReference(thread, true); 2056 ThreadLocker threadLocker(thread, true); 2057 2058 InterruptsSpinLocker threadDebugInfoLocker( 2059 thread->debug_info.lock); 2060 2061 if (thread->team == thread_get_current_thread()->team) { 2062 thread_debug_info &threadDebugInfo = thread->debug_info; 2063 // set ignore mask 2064 switch (ignoreOp) { 2065 case B_DEBUG_SIGNAL_MASK_AND: 2066 threadDebugInfo.ignore_signals &= ignore; 2067 break; 2068 case B_DEBUG_SIGNAL_MASK_OR: 2069 threadDebugInfo.ignore_signals |= ignore; 2070 break; 2071 case B_DEBUG_SIGNAL_MASK_SET: 2072 threadDebugInfo.ignore_signals = ignore; 2073 break; 2074 } 2075 2076 // set ignore once mask 2077 switch (ignoreOnceOp) { 2078 case B_DEBUG_SIGNAL_MASK_AND: 2079 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2080 break; 2081 case B_DEBUG_SIGNAL_MASK_OR: 2082 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2083 break; 2084 case B_DEBUG_SIGNAL_MASK_SET: 2085 threadDebugInfo.ignore_signals_once = ignoreOnce; 2086 break; 2087 } 2088 } 2089 2090 break; 2091 } 2092 2093 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2094 { 2095 // get the parameters 2096 replyPort = message.get_signal_masks.reply_port; 2097 thread_id threadID = message.get_signal_masks.thread; 2098 status_t result = B_OK; 2099 2100 // get the masks 2101 uint64 ignore = 0; 2102 uint64 ignoreOnce = 0; 2103 2104 Thread* thread = Thread::GetAndLock(threadID); 2105 if (thread != NULL) { 2106 BReference<Thread> threadReference(thread, true); 2107 ThreadLocker threadLocker(thread, true); 2108 2109 InterruptsSpinLocker threadDebugInfoLocker( 2110 thread->debug_info.lock); 2111 2112 ignore = thread->debug_info.ignore_signals; 2113 ignoreOnce = thread->debug_info.ignore_signals_once; 2114 } else 2115 result = B_BAD_THREAD_ID; 2116 2117 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: " 2118 "reply port: %ld, thread: %ld, ignore: %llx, " 2119 "ignore once: %llx, result: %lx\n", nubThread->id, 2120 replyPort, threadID, ignore, ignoreOnce, result)); 2121 2122 // prepare the message 2123 reply.get_signal_masks.error = result; 2124 reply.get_signal_masks.ignore_mask = ignore; 2125 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2126 replySize = sizeof(reply.get_signal_masks); 2127 sendReply = true; 2128 break; 2129 } 2130 2131 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2132 { 2133 // get the parameters 2134 int signal = message.set_signal_handler.signal; 2135 struct sigaction &handler = message.set_signal_handler.handler; 2136 2137 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: " 2138 "signal: %d, handler: %p\n", nubThread->id, 2139 signal, handler.sa_handler)); 2140 2141 // set the handler 2142 sigaction(signal, &handler, NULL); 2143 2144 break; 2145 } 2146 2147 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2148 { 2149 // get the parameters 2150 replyPort = message.get_signal_handler.reply_port; 2151 int signal = message.get_signal_handler.signal; 2152 status_t result = B_OK; 2153 2154 // get the handler 2155 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2156 != 0) { 2157 result = errno; 2158 } 2159 2160 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: " 2161 "reply port: %ld, signal: %d, handler: %p\n", nubThread->id, 2162 replyPort, signal, 2163 reply.get_signal_handler.handler.sa_handler)); 2164 2165 // prepare the message 2166 reply.get_signal_handler.error = result; 2167 replySize = sizeof(reply.get_signal_handler); 2168 sendReply = true; 2169 break; 2170 } 2171 2172 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2173 { 2174 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_PREPARE_HANDOVER\n", 2175 nubThread->id)); 2176 2177 Team *team = nubThread->team; 2178 2179 // Acquire the debugger write lock. As soon as we have it and 2180 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2181 // will write anything to the debugger port anymore. 2182 status_t result = acquire_sem_etc(writeLock, 1, 2183 B_KILL_CAN_INTERRUPT, 0); 2184 if (result == B_OK) { 2185 // set the respective team debug flag 2186 cpu_status state = disable_interrupts(); 2187 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2188 2189 atomic_or(&team->debug_info.flags, 2190 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2191 BreakpointManager* breakpointManager 2192 = team->debug_info.breakpoint_manager; 2193 2194 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2195 restore_interrupts(state); 2196 2197 // remove all installed breakpoints 2198 breakpointManager->RemoveAllBreakpoints(); 2199 2200 release_sem(writeLock); 2201 } else { 2202 // We probably got a SIGKILL. If so, we will terminate when 2203 // reading the next message fails. 2204 } 2205 2206 break; 2207 } 2208 2209 case B_DEBUG_MESSAGE_HANDED_OVER: 2210 { 2211 // notify all threads that the debugger has changed 2212 broadcast_debugged_thread_message(nubThread, 2213 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2214 2215 break; 2216 } 2217 2218 case B_DEBUG_START_PROFILER: 2219 { 2220 // get the parameters 2221 thread_id threadID = message.start_profiler.thread; 2222 replyPort = message.start_profiler.reply_port; 2223 area_id sampleArea = message.start_profiler.sample_area; 2224 int32 stackDepth = message.start_profiler.stack_depth; 2225 bool variableStackDepth 2226 = message.start_profiler.variable_stack_depth; 2227 bigtime_t interval = max_c(message.start_profiler.interval, 2228 B_DEBUG_MIN_PROFILE_INTERVAL); 2229 status_t result = B_OK; 2230 2231 TRACE(("nub thread %ld: B_DEBUG_START_PROFILER: " 2232 "thread: %ld, sample area: %ld\n", nubThread->id, threadID, 2233 sampleArea)); 2234 2235 if (stackDepth < 1) 2236 stackDepth = 1; 2237 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2238 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2239 2240 // provision for an extra entry per hit (for the number of 2241 // samples), if variable stack depth 2242 if (variableStackDepth) 2243 stackDepth++; 2244 2245 // clone the sample area 2246 area_info areaInfo; 2247 if (result == B_OK) 2248 result = get_area_info(sampleArea, &areaInfo); 2249 2250 area_id clonedSampleArea = -1; 2251 void* samples = NULL; 2252 if (result == B_OK) { 2253 clonedSampleArea = clone_area("profiling samples", &samples, 2254 B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, 2255 sampleArea); 2256 if (clonedSampleArea >= 0) { 2257 // we need the memory locked 2258 result = lock_memory(samples, areaInfo.size, 2259 B_READ_DEVICE); 2260 if (result != B_OK) { 2261 delete_area(clonedSampleArea); 2262 clonedSampleArea = -1; 2263 } 2264 } else 2265 result = clonedSampleArea; 2266 } 2267 2268 // get the thread and set the profile info 2269 int32 imageEvent = nubThread->team->debug_info.image_event; 2270 if (result == B_OK) { 2271 Thread* thread = Thread::GetAndLock(threadID); 2272 BReference<Thread> threadReference(thread, true); 2273 ThreadLocker threadLocker(thread, true); 2274 2275 if (thread != NULL && thread->team == nubThread->team) { 2276 thread_debug_info &threadDebugInfo = thread->debug_info; 2277 2278 InterruptsSpinLocker threadDebugInfoLocker( 2279 threadDebugInfo.lock); 2280 2281 if (threadDebugInfo.profile.samples == NULL) { 2282 threadDebugInfo.profile.interval = interval; 2283 threadDebugInfo.profile.sample_area 2284 = clonedSampleArea; 2285 threadDebugInfo.profile.samples = (addr_t*)samples; 2286 threadDebugInfo.profile.max_samples 2287 = areaInfo.size / sizeof(addr_t); 2288 threadDebugInfo.profile.flush_threshold 2289 = threadDebugInfo.profile.max_samples 2290 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2291 / 100; 2292 threadDebugInfo.profile.sample_count = 0; 2293 threadDebugInfo.profile.dropped_ticks = 0; 2294 threadDebugInfo.profile.stack_depth = stackDepth; 2295 threadDebugInfo.profile.variable_stack_depth 2296 = variableStackDepth; 2297 threadDebugInfo.profile.buffer_full = false; 2298 threadDebugInfo.profile.interval_left = interval; 2299 threadDebugInfo.profile.installed_timer = NULL; 2300 threadDebugInfo.profile.image_event = imageEvent; 2301 threadDebugInfo.profile.last_image_event 2302 = imageEvent; 2303 } else 2304 result = B_BAD_VALUE; 2305 } else 2306 result = B_BAD_THREAD_ID; 2307 } 2308 2309 // on error unlock and delete the sample area 2310 if (result != B_OK) { 2311 if (clonedSampleArea >= 0) { 2312 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2313 delete_area(clonedSampleArea); 2314 } 2315 } 2316 2317 // send a reply to the debugger 2318 reply.start_profiler.error = result; 2319 reply.start_profiler.interval = interval; 2320 reply.start_profiler.image_event = imageEvent; 2321 sendReply = true; 2322 replySize = sizeof(reply.start_profiler); 2323 2324 break; 2325 } 2326 2327 case B_DEBUG_STOP_PROFILER: 2328 { 2329 // get the parameters 2330 thread_id threadID = message.stop_profiler.thread; 2331 replyPort = message.stop_profiler.reply_port; 2332 status_t result = B_OK; 2333 2334 TRACE(("nub thread %ld: B_DEBUG_STOP_PROFILER: " 2335 "thread: %ld\n", nubThread->id, threadID)); 2336 2337 area_id sampleArea = -1; 2338 addr_t* samples = NULL; 2339 int32 sampleCount = 0; 2340 int32 stackDepth = 0; 2341 bool variableStackDepth = false; 2342 int32 imageEvent = 0; 2343 int32 droppedTicks = 0; 2344 2345 // get the thread and detach the profile info 2346 Thread* thread = Thread::GetAndLock(threadID); 2347 BReference<Thread> threadReference(thread, true); 2348 ThreadLocker threadLocker(thread, true); 2349 2350 if (thread && thread->team == nubThread->team) { 2351 thread_debug_info &threadDebugInfo = thread->debug_info; 2352 2353 InterruptsSpinLocker threadDebugInfoLocker( 2354 threadDebugInfo.lock); 2355 2356 if (threadDebugInfo.profile.samples != NULL) { 2357 sampleArea = threadDebugInfo.profile.sample_area; 2358 samples = threadDebugInfo.profile.samples; 2359 sampleCount = threadDebugInfo.profile.sample_count; 2360 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2361 stackDepth = threadDebugInfo.profile.stack_depth; 2362 variableStackDepth 2363 = threadDebugInfo.profile.variable_stack_depth; 2364 imageEvent = threadDebugInfo.profile.image_event; 2365 threadDebugInfo.profile.sample_area = -1; 2366 threadDebugInfo.profile.samples = NULL; 2367 threadDebugInfo.profile.buffer_full = false; 2368 threadDebugInfo.profile.dropped_ticks = 0; 2369 } else 2370 result = B_BAD_VALUE; 2371 } else 2372 result = B_BAD_THREAD_ID; 2373 2374 threadLocker.Unlock(); 2375 2376 // prepare the reply 2377 if (result == B_OK) { 2378 reply.profiler_update.origin.thread = threadID; 2379 reply.profiler_update.image_event = imageEvent; 2380 reply.profiler_update.stack_depth = stackDepth; 2381 reply.profiler_update.variable_stack_depth 2382 = variableStackDepth; 2383 reply.profiler_update.sample_count = sampleCount; 2384 reply.profiler_update.dropped_ticks = droppedTicks; 2385 reply.profiler_update.stopped = true; 2386 } else 2387 reply.profiler_update.origin.thread = result; 2388 2389 replySize = sizeof(debug_profiler_update); 2390 sendReply = true; 2391 2392 if (sampleArea >= 0) { 2393 area_info areaInfo; 2394 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2395 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2396 delete_area(sampleArea); 2397 } 2398 } 2399 } 2400 } 2401 2402 // send the reply, if necessary 2403 if (sendReply) { 2404 status_t error = kill_interruptable_write_port(replyPort, command, 2405 &reply, replySize); 2406 2407 if (error != B_OK) { 2408 // The debugger port is either not longer existing or we got 2409 // interrupted by a kill signal. In either case we terminate. 2410 TRACE(("nub thread %ld: failed to send reply to port %ld: %s\n", 2411 nubThread->id, replyPort, strerror(error))); 2412 2413 nub_thread_cleanup(nubThread); 2414 return error; 2415 } 2416 } 2417 } 2418 } 2419 2420 2421 /** \brief Helper function for install_team_debugger(), that sets up the team 2422 and thread debug infos. 2423 2424 The caller must hold the team's lock as well as the team debug info lock. 2425 2426 The function also clears the arch specific team and thread debug infos 2427 (including among other things formerly set break/watchpoints). 2428 */ 2429 static void 2430 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2431 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2432 sem_id debuggerPortWriteLock, thread_id causingThread) 2433 { 2434 atomic_set(&team->debug_info.flags, 2435 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2436 team->debug_info.nub_port = nubPort; 2437 team->debug_info.nub_thread = nubThread; 2438 team->debug_info.debugger_team = debuggerTeam; 2439 team->debug_info.debugger_port = debuggerPort; 2440 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2441 team->debug_info.causing_thread = causingThread; 2442 2443 arch_clear_team_debug_info(&team->debug_info.arch_info); 2444 2445 // set the user debug flags and signal masks of all threads to the default 2446 for (Thread *thread = team->thread_list; thread; 2447 thread = thread->team_next) { 2448 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2449 2450 if (thread->id == nubThread) { 2451 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2452 } else { 2453 int32 flags = thread->debug_info.flags 2454 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2455 atomic_set(&thread->debug_info.flags, 2456 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2457 thread->debug_info.ignore_signals = 0; 2458 thread->debug_info.ignore_signals_once = 0; 2459 2460 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2461 } 2462 } 2463 2464 // update the thread::flags fields 2465 update_threads_debugger_installed_flag(team); 2466 } 2467 2468 2469 static port_id 2470 install_team_debugger(team_id teamID, port_id debuggerPort, 2471 thread_id causingThread, bool useDefault, bool dontReplace) 2472 { 2473 TRACE(("install_team_debugger(team: %ld, port: %ld, default: %d, " 2474 "dontReplace: %d)\n", teamID, debuggerPort, useDefault, dontReplace)); 2475 2476 if (useDefault) 2477 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2478 2479 // get the debugger team 2480 port_info debuggerPortInfo; 2481 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2482 if (error != B_OK) { 2483 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2484 "%lx\n", error)); 2485 return error; 2486 } 2487 team_id debuggerTeam = debuggerPortInfo.team; 2488 2489 // Check the debugger team: It must neither be the kernel team nor the 2490 // debugged team. 2491 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2492 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2493 "debugger: %ld, debugged: %ld\n", debuggerTeam, teamID)); 2494 return B_NOT_ALLOWED; 2495 } 2496 2497 // get the team 2498 Team* team; 2499 ConditionVariable debugChangeCondition; 2500 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2501 if (error != B_OK) 2502 return error; 2503 2504 // get the real team ID 2505 teamID = team->id; 2506 2507 // check, if a debugger is already installed 2508 2509 bool done = false; 2510 port_id result = B_ERROR; 2511 bool handOver = false; 2512 port_id oldDebuggerPort = -1; 2513 port_id nubPort = -1; 2514 2515 TeamLocker teamLocker(team); 2516 cpu_status state = disable_interrupts(); 2517 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2518 2519 int32 teamDebugFlags = team->debug_info.flags; 2520 2521 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2522 // There's already a debugger installed. 2523 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2524 if (dontReplace) { 2525 // We're fine with already having a debugger. 2526 error = B_OK; 2527 done = true; 2528 result = team->debug_info.nub_port; 2529 } else { 2530 // a handover to another debugger is requested 2531 // Set the handing-over flag -- we'll clear both flags after 2532 // having sent the handed-over message to the new debugger. 2533 atomic_or(&team->debug_info.flags, 2534 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2535 2536 oldDebuggerPort = team->debug_info.debugger_port; 2537 result = nubPort = team->debug_info.nub_port; 2538 if (causingThread < 0) 2539 causingThread = team->debug_info.causing_thread; 2540 2541 // set the new debugger 2542 install_team_debugger_init_debug_infos(team, debuggerTeam, 2543 debuggerPort, nubPort, team->debug_info.nub_thread, 2544 team->debug_info.debugger_write_lock, causingThread); 2545 2546 handOver = true; 2547 done = true; 2548 } 2549 } else { 2550 // there's already a debugger installed 2551 error = (dontReplace ? B_OK : B_BAD_VALUE); 2552 done = true; 2553 result = team->debug_info.nub_port; 2554 } 2555 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2556 && useDefault) { 2557 // No debugger yet, disable_debugger() had been invoked, and we 2558 // would install the default debugger. Just fail. 2559 error = B_BAD_VALUE; 2560 } 2561 2562 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2563 restore_interrupts(state); 2564 teamLocker.Unlock(); 2565 2566 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2567 // The old debugger must just have died. Just proceed as 2568 // if there was no debugger installed. We may still be too 2569 // early, in which case we'll fail, but this race condition 2570 // should be unbelievably rare and relatively harmless. 2571 handOver = false; 2572 done = false; 2573 } 2574 2575 if (handOver) { 2576 // prepare the handed-over message 2577 debug_handed_over notification; 2578 notification.origin.thread = -1; 2579 notification.origin.team = teamID; 2580 notification.origin.nub_port = nubPort; 2581 notification.debugger = debuggerTeam; 2582 notification.debugger_port = debuggerPort; 2583 notification.causing_thread = causingThread; 2584 2585 // notify the new debugger 2586 error = write_port_etc(debuggerPort, 2587 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2588 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2589 if (error != B_OK) { 2590 dprintf("install_team_debugger(): Failed to send message to new " 2591 "debugger: %s\n", strerror(error)); 2592 } 2593 2594 // clear the handed-over and handing-over flags 2595 state = disable_interrupts(); 2596 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2597 2598 atomic_and(&team->debug_info.flags, 2599 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2600 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2601 2602 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2603 restore_interrupts(state); 2604 2605 finish_debugger_change(team); 2606 2607 // notify the nub thread 2608 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2609 NULL, 0); 2610 2611 // notify the old debugger 2612 error = write_port_etc(oldDebuggerPort, 2613 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2614 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2615 if (error != B_OK) { 2616 TRACE(("install_team_debugger(): Failed to send message to old " 2617 "debugger: %s\n", strerror(error))); 2618 } 2619 2620 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2621 "%ld, port: %ld\n", debuggerTeam, debuggerPort)); 2622 2623 return result; 2624 } 2625 2626 if (done || error != B_OK) { 2627 TRACE(("install_team_debugger() done1: %ld\n", 2628 (error == B_OK ? result : error))); 2629 finish_debugger_change(team); 2630 return (error == B_OK ? result : error); 2631 } 2632 2633 // create the debugger write lock semaphore 2634 char nameBuffer[B_OS_NAME_LENGTH]; 2635 snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debugger port write", 2636 teamID); 2637 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2638 if (debuggerWriteLock < 0) 2639 error = debuggerWriteLock; 2640 2641 // create the nub port 2642 snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug", teamID); 2643 if (error == B_OK) { 2644 nubPort = create_port(1, nameBuffer); 2645 if (nubPort < 0) 2646 error = nubPort; 2647 else 2648 result = nubPort; 2649 } 2650 2651 // make the debugger team the port owner; thus we know, if the debugger is 2652 // gone and can cleanup 2653 if (error == B_OK) 2654 error = set_port_owner(nubPort, debuggerTeam); 2655 2656 // create the breakpoint manager 2657 BreakpointManager* breakpointManager = NULL; 2658 if (error == B_OK) { 2659 breakpointManager = new(std::nothrow) BreakpointManager; 2660 if (breakpointManager != NULL) 2661 error = breakpointManager->Init(); 2662 else 2663 error = B_NO_MEMORY; 2664 } 2665 2666 // spawn the nub thread 2667 thread_id nubThread = -1; 2668 if (error == B_OK) { 2669 snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug task", teamID); 2670 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2671 B_NORMAL_PRIORITY, NULL, teamID); 2672 if (nubThread < 0) 2673 error = nubThread; 2674 } 2675 2676 // now adjust the debug info accordingly 2677 if (error == B_OK) { 2678 TeamLocker teamLocker(team); 2679 state = disable_interrupts(); 2680 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2681 2682 team->debug_info.breakpoint_manager = breakpointManager; 2683 install_team_debugger_init_debug_infos(team, debuggerTeam, 2684 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2685 causingThread); 2686 2687 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2688 restore_interrupts(state); 2689 } 2690 2691 finish_debugger_change(team); 2692 2693 // if everything went fine, resume the nub thread, otherwise clean up 2694 if (error == B_OK) { 2695 resume_thread(nubThread); 2696 } else { 2697 // delete port and terminate thread 2698 if (nubPort >= 0) { 2699 set_port_owner(nubPort, B_CURRENT_TEAM); 2700 delete_port(nubPort); 2701 } 2702 if (nubThread >= 0) { 2703 int32 result; 2704 wait_for_thread(nubThread, &result); 2705 } 2706 2707 delete breakpointManager; 2708 } 2709 2710 TRACE(("install_team_debugger() done2: %ld\n", 2711 (error == B_OK ? result : error))); 2712 return (error == B_OK ? result : error); 2713 } 2714 2715 2716 static status_t 2717 ensure_debugger_installed() 2718 { 2719 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2720 thread_get_current_thread_id(), true, true); 2721 return port >= 0 ? B_OK : port; 2722 } 2723 2724 2725 // #pragma mark - 2726 2727 2728 void 2729 _user_debugger(const char *userMessage) 2730 { 2731 // install the default debugger, if there is none yet 2732 status_t error = ensure_debugger_installed(); 2733 if (error != B_OK) { 2734 // time to commit suicide 2735 char buffer[128]; 2736 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2737 if (length >= 0) { 2738 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2739 "`%s'\n", buffer); 2740 } else { 2741 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2742 "%p (%s)\n", userMessage, strerror(length)); 2743 } 2744 _user_exit_team(1); 2745 } 2746 2747 // prepare the message 2748 debug_debugger_call message; 2749 message.message = (void*)userMessage; 2750 2751 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2752 sizeof(message), true); 2753 } 2754 2755 2756 int 2757 _user_disable_debugger(int state) 2758 { 2759 Team *team = thread_get_current_thread()->team; 2760 2761 TRACE(("_user_disable_debugger(%d): team: %ld\n", state, team->id)); 2762 2763 cpu_status cpuState = disable_interrupts(); 2764 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2765 2766 int32 oldFlags; 2767 if (state) { 2768 oldFlags = atomic_or(&team->debug_info.flags, 2769 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2770 } else { 2771 oldFlags = atomic_and(&team->debug_info.flags, 2772 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2773 } 2774 2775 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2776 restore_interrupts(cpuState); 2777 2778 // TODO: Check, if the return value is really the old state. 2779 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2780 } 2781 2782 2783 status_t 2784 _user_install_default_debugger(port_id debuggerPort) 2785 { 2786 // if supplied, check whether the port is a valid port 2787 if (debuggerPort >= 0) { 2788 port_info portInfo; 2789 status_t error = get_port_info(debuggerPort, &portInfo); 2790 if (error != B_OK) 2791 return error; 2792 2793 // the debugger team must not be the kernel team 2794 if (portInfo.team == team_get_kernel_team_id()) 2795 return B_NOT_ALLOWED; 2796 } 2797 2798 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2799 2800 return B_OK; 2801 } 2802 2803 2804 port_id 2805 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2806 { 2807 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2808 } 2809 2810 2811 status_t 2812 _user_remove_team_debugger(team_id teamID) 2813 { 2814 Team* team; 2815 ConditionVariable debugChangeCondition; 2816 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2817 team); 2818 if (error != B_OK) 2819 return error; 2820 2821 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2822 2823 thread_id nubThread = -1; 2824 port_id nubPort = -1; 2825 2826 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2827 // there's a debugger installed 2828 nubThread = team->debug_info.nub_thread; 2829 nubPort = team->debug_info.nub_port; 2830 } else { 2831 // no debugger installed 2832 error = B_BAD_VALUE; 2833 } 2834 2835 debugInfoLocker.Unlock(); 2836 2837 // Delete the nub port -- this will cause the nub thread to terminate and 2838 // remove the debugger. 2839 if (nubPort >= 0) 2840 delete_port(nubPort); 2841 2842 finish_debugger_change(team); 2843 2844 // wait for the nub thread 2845 if (nubThread >= 0) 2846 wait_for_thread(nubThread, NULL); 2847 2848 return error; 2849 } 2850 2851 2852 status_t 2853 _user_debug_thread(thread_id threadID) 2854 { 2855 TRACE(("[%ld] _user_debug_thread(%ld)\n", find_thread(NULL), threadID)); 2856 2857 // get the thread 2858 Thread* thread = Thread::GetAndLock(threadID); 2859 if (thread == NULL) 2860 return B_BAD_THREAD_ID; 2861 BReference<Thread> threadReference(thread, true); 2862 ThreadLocker threadLocker(thread, true); 2863 2864 // we can't debug the kernel team 2865 if (thread->team == team_get_kernel_team()) 2866 return B_NOT_ALLOWED; 2867 2868 InterruptsLocker interruptsLocker; 2869 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2870 2871 // If the thread is already dying, it's too late to debug it. 2872 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2873 return B_BAD_THREAD_ID; 2874 2875 // don't debug the nub thread 2876 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2877 return B_NOT_ALLOWED; 2878 2879 // already marked stopped? 2880 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) 2881 return B_OK; 2882 2883 // set the flag that tells the thread to stop as soon as possible 2884 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2885 2886 update_thread_user_debug_flag(thread); 2887 2888 // resume/interrupt the thread, if necessary 2889 threadDebugInfoLocker.Unlock(); 2890 SpinLocker schedulerLocker(gSchedulerLock); 2891 2892 switch (thread->state) { 2893 case B_THREAD_SUSPENDED: 2894 // thread suspended: wake it up 2895 scheduler_enqueue_in_run_queue(thread); 2896 break; 2897 2898 default: 2899 // thread may be waiting: interrupt it 2900 thread_interrupt(thread, false); 2901 // TODO: If the thread is already in the kernel and e.g. 2902 // about to acquire a semaphore (before 2903 // thread_prepare_to_block()), we won't interrupt it. 2904 // Maybe we should rather send a signal (SIGTRAP). 2905 scheduler_reschedule_if_necessary_locked(); 2906 break; 2907 } 2908 2909 return B_OK; 2910 } 2911 2912 2913 void 2914 _user_wait_for_debugger(void) 2915 { 2916 debug_thread_debugged message; 2917 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2918 sizeof(message), false); 2919 } 2920 2921 2922 status_t 2923 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2924 bool watchpoint) 2925 { 2926 // check the address and size 2927 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2928 return B_BAD_ADDRESS; 2929 if (watchpoint && length < 0) 2930 return B_BAD_VALUE; 2931 2932 // check whether a debugger is installed already 2933 team_debug_info teamDebugInfo; 2934 get_team_debug_info(teamDebugInfo); 2935 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2936 return B_BAD_VALUE; 2937 2938 // We can't help it, here's a small but relatively harmless race condition, 2939 // since a debugger could be installed in the meantime. The worst case is 2940 // that we install a break/watchpoint the debugger doesn't know about. 2941 2942 // set the break/watchpoint 2943 status_t result; 2944 if (watchpoint) 2945 result = arch_set_watchpoint(address, type, length); 2946 else 2947 result = arch_set_breakpoint(address); 2948 2949 if (result == B_OK) 2950 update_threads_breakpoints_flag(); 2951 2952 return result; 2953 } 2954 2955 2956 status_t 2957 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 2958 { 2959 // check the address 2960 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2961 return B_BAD_ADDRESS; 2962 2963 // check whether a debugger is installed already 2964 team_debug_info teamDebugInfo; 2965 get_team_debug_info(teamDebugInfo); 2966 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2967 return B_BAD_VALUE; 2968 2969 // We can't help it, here's a small but relatively harmless race condition, 2970 // since a debugger could be installed in the meantime. The worst case is 2971 // that we clear a break/watchpoint the debugger has just installed. 2972 2973 // clear the break/watchpoint 2974 status_t result; 2975 if (watchpoint) 2976 result = arch_clear_watchpoint(address); 2977 else 2978 result = arch_clear_breakpoint(address); 2979 2980 if (result == B_OK) 2981 update_threads_breakpoints_flag(); 2982 2983 return result; 2984 } 2985