1 /* 2 * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2015, Rene Gollent, rene@gollent.com. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include <errno.h> 9 #include <signal.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <algorithm> 15 16 #include <arch/debug.h> 17 #include <arch/user_debugger.h> 18 #include <core_dump.h> 19 #include <cpu.h> 20 #include <debugger.h> 21 #include <kernel.h> 22 #include <KernelExport.h> 23 #include <kscheduler.h> 24 #include <ksignal.h> 25 #include <ksyscalls.h> 26 #include <port.h> 27 #include <sem.h> 28 #include <team.h> 29 #include <thread.h> 30 #include <thread_types.h> 31 #include <user_debugger.h> 32 #include <vm/vm.h> 33 #include <vm/vm_types.h> 34 35 #include <AutoDeleter.h> 36 #include <util/AutoLock.h> 37 #include <util/ThreadAutoLock.h> 38 39 #include "BreakpointManager.h" 40 41 42 //#define TRACE_USER_DEBUGGER 43 #ifdef TRACE_USER_DEBUGGER 44 # define TRACE(x) dprintf x 45 #else 46 # define TRACE(x) ; 47 #endif 48 49 50 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 51 // there's some potential for simplifications. E.g. clear_team_debug_info() and 52 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 53 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 54 55 56 static port_id sDefaultDebuggerPort = -1; 57 // accessed atomically 58 59 static timer sProfilingTimers[SMP_MAX_CPUS]; 60 // a profiling timer for each CPU -- used when a profiled thread is running 61 // on that CPU 62 63 64 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 65 static int32 profiling_event(timer* unused); 66 static status_t ensure_debugger_installed(); 67 static void get_team_debug_info(team_debug_info &teamDebugInfo); 68 69 70 static inline status_t 71 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 72 size_t bufferSize) 73 { 74 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 75 0); 76 } 77 78 79 static status_t 80 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 81 bool dontWait) 82 { 83 TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", " 84 "port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, " 85 "dontWait: %d\n", thread_get_current_thread()->id, 86 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 87 dontWait)); 88 89 status_t error = B_OK; 90 91 // get the team debug info 92 team_debug_info teamDebugInfo; 93 get_team_debug_info(teamDebugInfo); 94 sem_id writeLock = teamDebugInfo.debugger_write_lock; 95 96 // get the write lock 97 TRACE(("debugger_write(): acquiring write lock...\n")); 98 error = acquire_sem_etc(writeLock, 1, 99 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 100 if (error != B_OK) { 101 TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error)); 102 return error; 103 } 104 105 // re-get the team debug info 106 get_team_debug_info(teamDebugInfo); 107 108 if (teamDebugInfo.debugger_port != port 109 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 110 // The debugger has changed in the meantime or we are about to be 111 // handed over to a new debugger. In either case we don't send the 112 // message. 113 TRACE(("debugger_write(): %s\n", 114 (teamDebugInfo.debugger_port != port ? "debugger port changed" 115 : "handover flag set"))); 116 } else { 117 TRACE(("debugger_write(): writing to port...\n")); 118 119 error = write_port_etc(port, code, buffer, bufferSize, 120 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 121 } 122 123 // release the write lock 124 release_sem(writeLock); 125 126 TRACE(("debugger_write() done: %" B_PRIx32 "\n", error)); 127 128 return error; 129 } 130 131 132 /*! Updates the thread::flags field according to what user debugger flags are 133 set for the thread. 134 Interrupts must be disabled and the thread's debug info lock must be held. 135 */ 136 static void 137 update_thread_user_debug_flag(Thread* thread) 138 { 139 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 140 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 141 else 142 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 143 } 144 145 146 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 147 given thread. 148 Interrupts must be disabled and the thread debug info lock must be held. 149 */ 150 static void 151 update_thread_breakpoints_flag(Thread* thread) 152 { 153 Team* team = thread->team; 154 155 if (arch_has_breakpoints(&team->debug_info.arch_info)) 156 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 157 else 158 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 159 } 160 161 162 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 163 threads of the current team. 164 */ 165 static void 166 update_threads_breakpoints_flag() 167 { 168 Team* team = thread_get_current_thread()->team; 169 170 TeamLocker teamLocker(team); 171 172 Thread* thread = team->thread_list; 173 174 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 175 for (; thread != NULL; thread = thread->team_next) 176 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 177 } else { 178 for (; thread != NULL; thread = thread->team_next) 179 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 180 } 181 } 182 183 184 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 185 given thread, which must be the current thread. 186 */ 187 static void 188 update_thread_debugger_installed_flag(Thread* thread) 189 { 190 Team* team = thread->team; 191 192 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 193 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 194 else 195 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 196 } 197 198 199 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 200 threads of the given team. 201 The team's lock must be held. 202 */ 203 static void 204 update_threads_debugger_installed_flag(Team* team) 205 { 206 Thread* thread = team->thread_list; 207 208 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 209 for (; thread != NULL; thread = thread->team_next) 210 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 211 } else { 212 for (; thread != NULL; thread = thread->team_next) 213 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 214 } 215 } 216 217 218 /** 219 * For the first initialization the function must be called with \a initLock 220 * set to \c true. If it would be possible that another thread accesses the 221 * structure at the same time, `lock' must be held when calling the function. 222 */ 223 void 224 clear_team_debug_info(struct team_debug_info *info, bool initLock) 225 { 226 if (info) { 227 arch_clear_team_debug_info(&info->arch_info); 228 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 229 info->debugger_team = -1; 230 info->debugger_port = -1; 231 info->nub_thread = -1; 232 info->nub_port = -1; 233 info->debugger_write_lock = -1; 234 info->causing_thread = -1; 235 info->image_event = 0; 236 info->breakpoint_manager = NULL; 237 238 if (initLock) { 239 B_INITIALIZE_SPINLOCK(&info->lock); 240 info->debugger_changed_condition = NULL; 241 } 242 } 243 } 244 245 /** 246 * `lock' must not be held nor may interrupts be disabled. 247 * \a info must not be a member of a team struct (or the team struct must no 248 * longer be accessible, i.e. the team should already be removed). 249 * 250 * In case the team is still accessible, the procedure is: 251 * 1. get `lock' 252 * 2. copy the team debug info on stack 253 * 3. call clear_team_debug_info() on the team debug info 254 * 4. release `lock' 255 * 5. call destroy_team_debug_info() on the copied team debug info 256 */ 257 static void 258 destroy_team_debug_info(struct team_debug_info *info) 259 { 260 if (info) { 261 arch_destroy_team_debug_info(&info->arch_info); 262 263 // delete the breakpoint manager 264 delete info->breakpoint_manager ; 265 info->breakpoint_manager = NULL; 266 267 // delete the debugger port write lock 268 if (info->debugger_write_lock >= 0) { 269 delete_sem(info->debugger_write_lock); 270 info->debugger_write_lock = -1; 271 } 272 273 // delete the nub port 274 if (info->nub_port >= 0) { 275 set_port_owner(info->nub_port, B_CURRENT_TEAM); 276 delete_port(info->nub_port); 277 info->nub_port = -1; 278 } 279 280 // wait for the nub thread 281 if (info->nub_thread >= 0) { 282 if (info->nub_thread != thread_get_current_thread()->id) { 283 int32 result; 284 wait_for_thread(info->nub_thread, &result); 285 } 286 287 info->nub_thread = -1; 288 } 289 290 atomic_set(&info->flags, 0); 291 info->debugger_team = -1; 292 info->debugger_port = -1; 293 info->causing_thread = -1; 294 info->image_event = -1; 295 } 296 } 297 298 299 void 300 init_thread_debug_info(struct thread_debug_info *info) 301 { 302 if (info) { 303 B_INITIALIZE_SPINLOCK(&info->lock); 304 arch_clear_thread_debug_info(&info->arch_info); 305 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 306 info->debug_port = -1; 307 info->ignore_signals = 0; 308 info->ignore_signals_once = 0; 309 info->profile.sample_area = -1; 310 info->profile.samples = NULL; 311 info->profile.buffer_full = false; 312 info->profile.installed_timer = NULL; 313 } 314 } 315 316 317 /*! Clears the debug info for the current thread. 318 Invoked with thread debug info lock being held. 319 */ 320 void 321 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 322 { 323 if (info) { 324 // cancel profiling timer 325 if (info->profile.installed_timer != NULL) { 326 cancel_timer(info->profile.installed_timer); 327 info->profile.installed_timer = NULL; 328 } 329 330 arch_clear_thread_debug_info(&info->arch_info); 331 atomic_set(&info->flags, 332 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 333 info->debug_port = -1; 334 info->ignore_signals = 0; 335 info->ignore_signals_once = 0; 336 info->profile.sample_area = -1; 337 info->profile.samples = NULL; 338 info->profile.buffer_full = false; 339 } 340 } 341 342 343 void 344 destroy_thread_debug_info(struct thread_debug_info *info) 345 { 346 if (info) { 347 area_id sampleArea = info->profile.sample_area; 348 if (sampleArea >= 0) { 349 area_info areaInfo; 350 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 351 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 352 delete_area(sampleArea); 353 } 354 } 355 356 arch_destroy_thread_debug_info(&info->arch_info); 357 358 if (info->debug_port >= 0) { 359 delete_port(info->debug_port); 360 info->debug_port = -1; 361 } 362 363 info->ignore_signals = 0; 364 info->ignore_signals_once = 0; 365 366 atomic_set(&info->flags, 0); 367 } 368 } 369 370 371 static status_t 372 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 373 Team*& team) 374 { 375 // We look up the team by ID, even in case of the current team, so we can be 376 // sure, that the team is not already dying. 377 if (teamID == B_CURRENT_TEAM) 378 teamID = thread_get_current_thread()->team->id; 379 380 while (true) { 381 // get the team 382 team = Team::GetAndLock(teamID); 383 if (team == NULL) 384 return B_BAD_TEAM_ID; 385 BReference<Team> teamReference(team, true); 386 TeamLocker teamLocker(team, true); 387 388 // don't allow messing with the kernel team 389 if (team == team_get_kernel_team()) 390 return B_NOT_ALLOWED; 391 392 // check whether the condition is already set 393 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 394 395 if (team->debug_info.debugger_changed_condition == NULL) { 396 // nobody there yet -- set our condition variable and be done 397 team->debug_info.debugger_changed_condition = &condition; 398 return B_OK; 399 } 400 401 // we'll have to wait 402 ConditionVariableEntry entry; 403 team->debug_info.debugger_changed_condition->Add(&entry); 404 405 debugInfoLocker.Unlock(); 406 teamLocker.Unlock(); 407 408 entry.Wait(); 409 } 410 } 411 412 413 static void 414 prepare_debugger_change(Team* team, ConditionVariable& condition) 415 { 416 while (true) { 417 // check whether the condition is already set 418 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 419 420 if (team->debug_info.debugger_changed_condition == NULL) { 421 // nobody there yet -- set our condition variable and be done 422 team->debug_info.debugger_changed_condition = &condition; 423 return; 424 } 425 426 // we'll have to wait 427 ConditionVariableEntry entry; 428 team->debug_info.debugger_changed_condition->Add(&entry); 429 430 debugInfoLocker.Unlock(); 431 432 entry.Wait(); 433 } 434 } 435 436 437 static void 438 finish_debugger_change(Team* team) 439 { 440 // unset our condition variable and notify all threads waiting on it 441 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 442 443 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 444 team->debug_info.debugger_changed_condition = NULL; 445 446 condition->NotifyAll(); 447 } 448 449 450 void 451 user_debug_prepare_for_exec() 452 { 453 Thread *thread = thread_get_current_thread(); 454 Team *team = thread->team; 455 456 // If a debugger is installed for the team and the thread debug stuff 457 // initialized, change the ownership of the debug port for the thread 458 // to the kernel team, since exec_team() deletes all ports owned by this 459 // team. We change the ownership back later. 460 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 461 // get the port 462 port_id debugPort = -1; 463 464 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 465 466 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 467 debugPort = thread->debug_info.debug_port; 468 469 threadDebugInfoLocker.Unlock(); 470 471 // set the new port ownership 472 if (debugPort >= 0) 473 set_port_owner(debugPort, team_get_kernel_team_id()); 474 } 475 } 476 477 478 void 479 user_debug_finish_after_exec() 480 { 481 Thread *thread = thread_get_current_thread(); 482 Team *team = thread->team; 483 484 // If a debugger is installed for the team and the thread debug stuff 485 // initialized for this thread, change the ownership of its debug port 486 // back to this team. 487 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 488 // get the port 489 port_id debugPort = -1; 490 491 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 492 493 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 494 debugPort = thread->debug_info.debug_port; 495 496 threadDebugInfoLocker.Unlock(); 497 498 // set the new port ownership 499 if (debugPort >= 0) 500 set_port_owner(debugPort, team->id); 501 } 502 } 503 504 505 void 506 init_user_debug() 507 { 508 #ifdef ARCH_INIT_USER_DEBUG 509 ARCH_INIT_USER_DEBUG(); 510 #endif 511 } 512 513 514 static void 515 get_team_debug_info(team_debug_info &teamDebugInfo) 516 { 517 Thread *thread = thread_get_current_thread(); 518 519 cpu_status state = disable_interrupts(); 520 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 521 522 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 523 524 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 525 restore_interrupts(state); 526 } 527 528 529 static status_t 530 thread_hit_debug_event_internal(debug_debugger_message event, 531 const void *message, int32 size, bool requireDebugger, bool &restart) 532 { 533 restart = false; 534 Thread *thread = thread_get_current_thread(); 535 536 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32 537 ", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event, 538 message, size)); 539 540 // check, if there's a debug port already 541 bool setPort = !(atomic_get(&thread->debug_info.flags) 542 & B_THREAD_DEBUG_INITIALIZED); 543 544 // create a port, if there is none yet 545 port_id port = -1; 546 if (setPort) { 547 char nameBuffer[128]; 548 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32, 549 thread->id); 550 551 port = create_port(1, nameBuffer); 552 if (port < 0) { 553 dprintf("thread_hit_debug_event(): Failed to create debug port: " 554 "%s\n", strerror(port)); 555 return port; 556 } 557 } 558 559 // check the debug info structures once more: get the debugger port, set 560 // the thread's debug port, and update the thread's debug flags 561 port_id deletePort = port; 562 port_id debuggerPort = -1; 563 port_id nubPort = -1; 564 status_t error = B_OK; 565 cpu_status state = disable_interrupts(); 566 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 567 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 568 569 uint32 threadFlags = thread->debug_info.flags; 570 threadFlags &= ~B_THREAD_DEBUG_STOP; 571 bool debuggerInstalled 572 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 573 if (thread->id == thread->team->debug_info.nub_thread) { 574 // Ugh, we're the nub thread. We shouldn't be here. 575 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32 576 "\n", thread->id)); 577 578 error = B_ERROR; 579 } else if (debuggerInstalled || !requireDebugger) { 580 if (debuggerInstalled) { 581 debuggerPort = thread->team->debug_info.debugger_port; 582 nubPort = thread->team->debug_info.nub_port; 583 } 584 585 if (setPort) { 586 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 587 // someone created a port for us (the port we've created will 588 // be deleted below) 589 port = thread->debug_info.debug_port; 590 } else { 591 thread->debug_info.debug_port = port; 592 deletePort = -1; // keep the port 593 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 594 } 595 } else { 596 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 597 port = thread->debug_info.debug_port; 598 } else { 599 // someone deleted our port 600 error = B_ERROR; 601 } 602 } 603 } else 604 error = B_ERROR; 605 606 // update the flags 607 if (error == B_OK) 608 threadFlags |= B_THREAD_DEBUG_STOPPED; 609 atomic_set(&thread->debug_info.flags, threadFlags); 610 611 update_thread_user_debug_flag(thread); 612 613 threadDebugInfoLocker.Unlock(); 614 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 615 restore_interrupts(state); 616 617 // delete the superfluous port 618 if (deletePort >= 0) 619 delete_port(deletePort); 620 621 if (error != B_OK) { 622 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: " 623 "%" B_PRIx32 "\n", thread->id, error)); 624 return error; 625 } 626 627 // send a message to the debugger port 628 if (debuggerInstalled) { 629 // update the message's origin info first 630 debug_origin *origin = (debug_origin *)message; 631 origin->thread = thread->id; 632 origin->team = thread->team->id; 633 origin->nub_port = nubPort; 634 635 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending " 636 "message to debugger port %" B_PRId32 "\n", thread->id, 637 debuggerPort)); 638 639 error = debugger_write(debuggerPort, event, message, size, false); 640 } 641 642 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 643 bool singleStep = false; 644 645 if (error == B_OK) { 646 bool done = false; 647 while (!done) { 648 // read a command from the debug port 649 int32 command; 650 debugged_thread_message_data commandMessage; 651 ssize_t commandMessageSize = read_port_etc(port, &command, 652 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 653 0); 654 655 if (commandMessageSize < 0) { 656 error = commandMessageSize; 657 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed " 658 "to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n", 659 thread->id, port, error)); 660 break; 661 } 662 663 switch (command) { 664 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 665 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 666 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 667 thread->id)); 668 result = commandMessage.continue_thread.handle_event; 669 670 singleStep = commandMessage.continue_thread.single_step; 671 done = true; 672 break; 673 674 case B_DEBUGGED_THREAD_SET_CPU_STATE: 675 { 676 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 677 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 678 thread->id)); 679 arch_set_debug_cpu_state( 680 &commandMessage.set_cpu_state.cpu_state); 681 682 break; 683 } 684 685 case B_DEBUGGED_THREAD_GET_CPU_STATE: 686 { 687 port_id replyPort = commandMessage.get_cpu_state.reply_port; 688 689 // prepare the message 690 debug_nub_get_cpu_state_reply replyMessage; 691 replyMessage.error = B_OK; 692 replyMessage.message = event; 693 arch_get_debug_cpu_state(&replyMessage.cpu_state); 694 695 // send it 696 error = kill_interruptable_write_port(replyPort, event, 697 &replyMessage, sizeof(replyMessage)); 698 699 break; 700 } 701 702 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 703 { 704 // Check, if the debugger really changed, i.e. is different 705 // than the one we know. 706 team_debug_info teamDebugInfo; 707 get_team_debug_info(teamDebugInfo); 708 709 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 710 if (!debuggerInstalled 711 || teamDebugInfo.debugger_port != debuggerPort) { 712 // debugger was installed or has changed: restart 713 // this function 714 restart = true; 715 done = true; 716 } 717 } else { 718 if (debuggerInstalled) { 719 // debugger is gone: continue the thread normally 720 done = true; 721 } 722 } 723 724 break; 725 } 726 } 727 } 728 } else { 729 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send " 730 "message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n", 731 thread->id, debuggerPort, error)); 732 } 733 734 // update the thread debug info 735 bool destroyThreadInfo = false; 736 thread_debug_info threadDebugInfo; 737 738 state = disable_interrupts(); 739 threadDebugInfoLocker.Lock(); 740 741 // check, if the team is still being debugged 742 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 743 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 744 // update the single-step flag 745 if (singleStep) { 746 atomic_or(&thread->debug_info.flags, 747 B_THREAD_DEBUG_SINGLE_STEP); 748 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 749 } else { 750 atomic_and(&thread->debug_info.flags, 751 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 752 } 753 754 // unset the "stopped" state 755 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 756 757 update_thread_user_debug_flag(thread); 758 759 } else { 760 // the debugger is gone: cleanup our info completely 761 threadDebugInfo = thread->debug_info; 762 clear_thread_debug_info(&thread->debug_info, false); 763 destroyThreadInfo = true; 764 } 765 766 threadDebugInfoLocker.Unlock(); 767 restore_interrupts(state); 768 769 // enable/disable single stepping 770 arch_update_thread_single_step(); 771 772 if (destroyThreadInfo) 773 destroy_thread_debug_info(&threadDebugInfo); 774 775 return (error == B_OK ? result : error); 776 } 777 778 779 static status_t 780 thread_hit_debug_event(debug_debugger_message event, const void *message, 781 int32 size, bool requireDebugger) 782 { 783 status_t result; 784 bool restart; 785 do { 786 restart = false; 787 result = thread_hit_debug_event_internal(event, message, size, 788 requireDebugger, restart); 789 } while (result >= 0 && restart); 790 791 // Prepare to continue -- we install a debugger change condition, so no one 792 // will change the debugger while we're playing with the breakpoint manager. 793 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 794 Team* team = thread_get_current_thread()->team; 795 ConditionVariable debugChangeCondition; 796 debugChangeCondition.Init(team, "debug change condition"); 797 prepare_debugger_change(team, debugChangeCondition); 798 799 if (team->debug_info.breakpoint_manager != NULL) { 800 bool isSyscall; 801 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 802 if (pc != NULL && !isSyscall) 803 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 804 } 805 806 finish_debugger_change(team); 807 808 return result; 809 } 810 811 812 static status_t 813 thread_hit_serious_debug_event(debug_debugger_message event, 814 const void *message, int32 messageSize) 815 { 816 // ensure that a debugger is installed for this team 817 status_t error = ensure_debugger_installed(); 818 if (error != B_OK) { 819 Thread *thread = thread_get_current_thread(); 820 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 821 "thread: %" B_PRId32 " (%s): %s\n", thread->id, thread->name, 822 strerror(error)); 823 return error; 824 } 825 826 // enter the debug loop 827 return thread_hit_debug_event(event, message, messageSize, true); 828 } 829 830 831 void 832 user_debug_pre_syscall(uint32 syscall, void *args) 833 { 834 // check whether a debugger is installed 835 Thread *thread = thread_get_current_thread(); 836 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 837 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 838 return; 839 840 // check whether pre-syscall tracing is enabled for team or thread 841 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 842 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 843 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 844 return; 845 } 846 847 // prepare the message 848 debug_pre_syscall message; 849 message.syscall = syscall; 850 851 // copy the syscall args 852 if (syscall < (uint32)kSyscallCount) { 853 if (kSyscallInfos[syscall].parameter_size > 0) 854 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 855 } 856 857 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 858 sizeof(message), true); 859 } 860 861 862 void 863 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 864 bigtime_t startTime) 865 { 866 // check whether a debugger is installed 867 Thread *thread = thread_get_current_thread(); 868 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 869 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 870 return; 871 872 // check whether post-syscall tracing is enabled for team or thread 873 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 874 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 875 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 876 return; 877 } 878 879 // prepare the message 880 debug_post_syscall message; 881 message.start_time = startTime; 882 message.end_time = system_time(); 883 message.return_value = returnValue; 884 message.syscall = syscall; 885 886 // copy the syscall args 887 if (syscall < (uint32)kSyscallCount) { 888 if (kSyscallInfos[syscall].parameter_size > 0) 889 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 890 } 891 892 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 893 sizeof(message), true); 894 } 895 896 897 /** \brief To be called when an unhandled processor exception (error/fault) 898 * occurred. 899 * \param exception The debug_why_stopped value identifying the kind of fault. 900 * \param signal The signal corresponding to the exception. 901 * \return \c true, if the caller shall continue normally, i.e. usually send 902 * a deadly signal. \c false, if the debugger insists to continue the 903 * program (e.g. because it has solved the removed the cause of the 904 * problem). 905 */ 906 bool 907 user_debug_exception_occurred(debug_exception_type exception, int signal) 908 { 909 // First check whether there's a signal handler installed for the signal. 910 // If so, we don't want to install a debugger for the team. We always send 911 // the signal instead. An already installed debugger will be notified, if 912 // it has requested notifications of signal. 913 struct sigaction signalAction; 914 if (sigaction(signal, NULL, &signalAction) == 0 915 && signalAction.sa_handler != SIG_DFL) { 916 return true; 917 } 918 919 // prepare the message 920 debug_exception_occurred message; 921 message.exception = exception; 922 message.signal = signal; 923 924 status_t result = thread_hit_serious_debug_event( 925 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 926 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 927 } 928 929 930 bool 931 user_debug_handle_signal(int signal, struct sigaction *handler, siginfo_t *info, 932 bool deadly) 933 { 934 // check, if a debugger is installed and is interested in signals 935 Thread *thread = thread_get_current_thread(); 936 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 937 if (~teamDebugFlags 938 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 939 return true; 940 } 941 942 // prepare the message 943 debug_signal_received message; 944 message.signal = signal; 945 message.handler = *handler; 946 message.info = *info; 947 message.deadly = deadly; 948 949 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 950 &message, sizeof(message), true); 951 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 952 } 953 954 955 void 956 user_debug_stop_thread() 957 { 958 // check whether this is actually an emulated single-step notification 959 Thread* thread = thread_get_current_thread(); 960 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 961 962 bool singleStepped = false; 963 if ((atomic_and(&thread->debug_info.flags, 964 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 965 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 966 singleStepped = true; 967 } 968 969 threadDebugInfoLocker.Unlock(); 970 971 if (singleStepped) { 972 user_debug_single_stepped(); 973 } else { 974 debug_thread_debugged message; 975 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 976 &message, sizeof(message)); 977 } 978 } 979 980 981 void 982 user_debug_team_created(team_id teamID) 983 { 984 // check, if a debugger is installed and is interested in team creation 985 // events 986 Thread *thread = thread_get_current_thread(); 987 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 988 if (~teamDebugFlags 989 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 990 return; 991 } 992 993 // prepare the message 994 debug_team_created message; 995 message.new_team = teamID; 996 997 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 998 sizeof(message), true); 999 } 1000 1001 1002 void 1003 user_debug_team_deleted(team_id teamID, port_id debuggerPort, status_t status, int signal, 1004 team_usage_info* usageInfo) 1005 { 1006 if (debuggerPort >= 0) { 1007 TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: " 1008 "%" B_PRId32 ")\n", teamID, debuggerPort)); 1009 1010 debug_team_deleted message; 1011 message.origin.thread = -1; 1012 message.origin.team = teamID; 1013 message.origin.nub_port = -1; 1014 message.status = status; 1015 message.signal = signal; 1016 message.usage = *usageInfo; 1017 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1018 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1019 } 1020 } 1021 1022 1023 void 1024 user_debug_team_exec() 1025 { 1026 // check, if a debugger is installed and is interested in team creation 1027 // events 1028 Thread *thread = thread_get_current_thread(); 1029 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1030 if (~teamDebugFlags 1031 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1032 return; 1033 } 1034 1035 // prepare the message 1036 debug_team_exec message; 1037 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1038 + 1; 1039 1040 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1041 sizeof(message), true); 1042 } 1043 1044 1045 /*! Called by a new userland thread to update the debugging related flags of 1046 \c Thread::flags before the thread first enters userland. 1047 \param thread The calling thread. 1048 */ 1049 void 1050 user_debug_update_new_thread_flags(Thread* thread) 1051 { 1052 // lock it and update it's flags 1053 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1054 1055 update_thread_user_debug_flag(thread); 1056 update_thread_breakpoints_flag(thread); 1057 update_thread_debugger_installed_flag(thread); 1058 } 1059 1060 1061 void 1062 user_debug_thread_created(thread_id threadID) 1063 { 1064 // check, if a debugger is installed and is interested in thread events 1065 Thread *thread = thread_get_current_thread(); 1066 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1067 if (~teamDebugFlags 1068 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1069 return; 1070 } 1071 1072 // prepare the message 1073 debug_thread_created message; 1074 message.new_thread = threadID; 1075 1076 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1077 sizeof(message), true); 1078 } 1079 1080 1081 void 1082 user_debug_thread_deleted(team_id teamID, thread_id threadID, status_t status) 1083 { 1084 // Things are a bit complicated here, since this thread no longer belongs to 1085 // the debugged team (but to the kernel). So we can't use debugger_write(). 1086 1087 // get the team debug flags and debugger port 1088 Team* team = Team::Get(teamID); 1089 if (team == NULL) 1090 return; 1091 BReference<Team> teamReference(team, true); 1092 1093 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1094 1095 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1096 port_id debuggerPort = team->debug_info.debugger_port; 1097 sem_id writeLock = team->debug_info.debugger_write_lock; 1098 1099 debugInfoLocker.Unlock(); 1100 1101 // check, if a debugger is installed and is interested in thread events 1102 if (~teamDebugFlags 1103 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1104 return; 1105 } 1106 1107 // acquire the debugger write lock 1108 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1109 if (error != B_OK) 1110 return; 1111 1112 // re-get the team debug info -- we need to check whether anything changed 1113 debugInfoLocker.Lock(); 1114 1115 teamDebugFlags = atomic_get(&team->debug_info.flags); 1116 port_id newDebuggerPort = team->debug_info.debugger_port; 1117 1118 debugInfoLocker.Unlock(); 1119 1120 // Send the message only if the debugger hasn't changed in the meantime or 1121 // the team is about to be handed over. 1122 if (newDebuggerPort == debuggerPort 1123 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1124 debug_thread_deleted message; 1125 message.origin.thread = threadID; 1126 message.origin.team = teamID; 1127 message.origin.nub_port = -1; 1128 message.status = status; 1129 1130 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1131 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1132 } 1133 1134 // release the debugger write lock 1135 release_sem(writeLock); 1136 } 1137 1138 1139 /*! Called for a thread that is about to die, cleaning up all user debug 1140 facilities installed for the thread. 1141 \param thread The current thread, the one that is going to die. 1142 */ 1143 void 1144 user_debug_thread_exiting(Thread* thread) 1145 { 1146 // thread is the current thread, so using team is safe 1147 Team* team = thread->team; 1148 1149 InterruptsLocker interruptsLocker; 1150 1151 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1152 1153 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1154 port_id debuggerPort = team->debug_info.debugger_port; 1155 1156 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1157 1158 // check, if a debugger is installed 1159 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1160 || debuggerPort < 0) { 1161 return; 1162 } 1163 1164 // detach the profile info and mark the thread dying 1165 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1166 1167 thread_debug_info& threadDebugInfo = thread->debug_info; 1168 if (threadDebugInfo.profile.samples == NULL) 1169 return; 1170 1171 area_id sampleArea = threadDebugInfo.profile.sample_area; 1172 int32 sampleCount = threadDebugInfo.profile.sample_count; 1173 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1174 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1175 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1176 int32 imageEvent = threadDebugInfo.profile.image_event; 1177 threadDebugInfo.profile.sample_area = -1; 1178 threadDebugInfo.profile.samples = NULL; 1179 threadDebugInfo.profile.buffer_full = false; 1180 bigtime_t lastCPUTime; { 1181 SpinLocker threadTimeLocker(thread->time_lock); 1182 lastCPUTime = thread->CPUTime(false); 1183 } 1184 1185 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1186 1187 threadDebugInfoLocker.Unlock(); 1188 interruptsLocker.Unlock(); 1189 1190 // notify the debugger 1191 debug_profiler_update message; 1192 message.origin.thread = thread->id; 1193 message.origin.team = thread->team->id; 1194 message.origin.nub_port = -1; // asynchronous message 1195 message.sample_count = sampleCount; 1196 message.dropped_ticks = droppedTicks; 1197 message.stack_depth = stackDepth; 1198 message.variable_stack_depth = variableStackDepth; 1199 message.image_event = imageEvent; 1200 message.stopped = true; 1201 message.last_cpu_time = lastCPUTime; 1202 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1203 &message, sizeof(message), false); 1204 1205 if (sampleArea >= 0) { 1206 area_info areaInfo; 1207 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1208 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1209 delete_area(sampleArea); 1210 } 1211 } 1212 } 1213 1214 1215 void 1216 user_debug_image_created(const image_info *imageInfo) 1217 { 1218 // check, if a debugger is installed and is interested in image events 1219 Thread *thread = thread_get_current_thread(); 1220 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1221 if (~teamDebugFlags 1222 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1223 return; 1224 } 1225 1226 // prepare the message 1227 debug_image_created message; 1228 memcpy(&message.info, imageInfo, sizeof(image_info)); 1229 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1230 + 1; 1231 1232 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1233 sizeof(message), true); 1234 } 1235 1236 1237 void 1238 user_debug_image_deleted(const image_info *imageInfo) 1239 { 1240 // check, if a debugger is installed and is interested in image events 1241 Thread *thread = thread_get_current_thread(); 1242 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1243 if (~teamDebugFlags 1244 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1245 return; 1246 } 1247 1248 // prepare the message 1249 debug_image_deleted message; 1250 memcpy(&message.info, imageInfo, sizeof(image_info)); 1251 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1252 + 1; 1253 1254 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message, 1255 sizeof(message), true); 1256 } 1257 1258 1259 void 1260 user_debug_breakpoint_hit(bool software) 1261 { 1262 // prepare the message 1263 debug_breakpoint_hit message; 1264 arch_get_debug_cpu_state(&message.cpu_state); 1265 1266 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1267 sizeof(message)); 1268 } 1269 1270 1271 void 1272 user_debug_watchpoint_hit() 1273 { 1274 // prepare the message 1275 debug_watchpoint_hit message; 1276 arch_get_debug_cpu_state(&message.cpu_state); 1277 1278 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1279 sizeof(message)); 1280 } 1281 1282 1283 void 1284 user_debug_single_stepped() 1285 { 1286 // clear the single-step thread flag 1287 Thread* thread = thread_get_current_thread(); 1288 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1289 1290 // prepare the message 1291 debug_single_step message; 1292 arch_get_debug_cpu_state(&message.cpu_state); 1293 1294 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1295 sizeof(message)); 1296 } 1297 1298 1299 /*! Schedules the profiling timer for the current thread. 1300 The caller must hold the thread's debug info lock. 1301 \param thread The current thread. 1302 \param interval The time after which the timer should fire. 1303 */ 1304 static void 1305 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1306 { 1307 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1308 thread->debug_info.profile.installed_timer = timer; 1309 thread->debug_info.profile.timer_end = system_time() + interval; 1310 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1311 } 1312 1313 1314 /*! Samples the current thread's instruction pointer/stack trace. 1315 The caller must hold the current thread's debug info lock. 1316 \param flushBuffer Return parameter: Set to \c true when the sampling 1317 buffer must be flushed. 1318 */ 1319 static bool 1320 profiling_do_sample(bool& flushBuffer) 1321 { 1322 Thread* thread = thread_get_current_thread(); 1323 thread_debug_info& debugInfo = thread->debug_info; 1324 1325 if (debugInfo.profile.samples == NULL) 1326 return false; 1327 1328 // Check, whether the buffer is full or an image event occurred since the 1329 // last sample was taken. 1330 int32 maxSamples = debugInfo.profile.max_samples; 1331 int32 sampleCount = debugInfo.profile.sample_count; 1332 int32 stackDepth = debugInfo.profile.stack_depth; 1333 int32 imageEvent = thread->team->debug_info.image_event; 1334 if (debugInfo.profile.sample_count > 0) { 1335 if (debugInfo.profile.last_image_event < imageEvent 1336 && debugInfo.profile.variable_stack_depth 1337 && sampleCount + 2 <= maxSamples) { 1338 // an image event occurred, but we use variable stack depth and 1339 // have enough room in the buffer to indicate an image event 1340 addr_t* event = debugInfo.profile.samples + sampleCount; 1341 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1342 event[1] = imageEvent; 1343 sampleCount += 2; 1344 debugInfo.profile.sample_count = sampleCount; 1345 debugInfo.profile.last_image_event = imageEvent; 1346 } 1347 1348 if (debugInfo.profile.last_image_event < imageEvent 1349 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1350 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1351 flushBuffer = true; 1352 return true; 1353 } 1354 1355 // We can't flush the buffer now, since we interrupted a kernel 1356 // function. If the buffer is not full yet, we add the samples, 1357 // otherwise we have to drop them. 1358 if (maxSamples - sampleCount < stackDepth) { 1359 debugInfo.profile.dropped_ticks++; 1360 return true; 1361 } 1362 } 1363 } else { 1364 // first sample -- set the image event 1365 debugInfo.profile.image_event = imageEvent; 1366 debugInfo.profile.last_image_event = imageEvent; 1367 } 1368 1369 // get the samples 1370 addr_t* returnAddresses = debugInfo.profile.samples 1371 + debugInfo.profile.sample_count; 1372 if (debugInfo.profile.variable_stack_depth) { 1373 // variable sample count per hit 1374 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1375 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1376 1377 debugInfo.profile.sample_count += *returnAddresses + 1; 1378 } else { 1379 // fixed sample count per hit 1380 if (stackDepth > 1) { 1381 int32 count = arch_debug_get_stack_trace(returnAddresses, 1382 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1383 1384 for (int32 i = count; i < stackDepth; i++) 1385 returnAddresses[i] = 0; 1386 } else 1387 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1388 1389 debugInfo.profile.sample_count += stackDepth; 1390 } 1391 1392 return true; 1393 } 1394 1395 1396 static void 1397 profiling_buffer_full(void*) 1398 { 1399 // It is undefined whether the function is called with interrupts enabled 1400 // or disabled. We are allowed to enable interrupts, though. First make 1401 // sure interrupts are disabled. 1402 disable_interrupts(); 1403 1404 Thread* thread = thread_get_current_thread(); 1405 thread_debug_info& debugInfo = thread->debug_info; 1406 1407 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1408 1409 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1410 int32 sampleCount = debugInfo.profile.sample_count; 1411 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1412 int32 stackDepth = debugInfo.profile.stack_depth; 1413 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1414 int32 imageEvent = debugInfo.profile.image_event; 1415 1416 // notify the debugger 1417 debugInfo.profile.sample_count = 0; 1418 debugInfo.profile.dropped_ticks = 0; 1419 1420 threadDebugInfoLocker.Unlock(); 1421 enable_interrupts(); 1422 1423 // prepare the message 1424 debug_profiler_update message; 1425 message.sample_count = sampleCount; 1426 message.dropped_ticks = droppedTicks; 1427 message.stack_depth = stackDepth; 1428 message.variable_stack_depth = variableStackDepth; 1429 message.image_event = imageEvent; 1430 message.stopped = false; 1431 1432 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1433 sizeof(message), false); 1434 1435 disable_interrupts(); 1436 threadDebugInfoLocker.Lock(); 1437 1438 // do the sampling and reschedule timer, if still profiling this thread 1439 bool flushBuffer; 1440 if (profiling_do_sample(flushBuffer)) { 1441 debugInfo.profile.buffer_full = false; 1442 schedule_profiling_timer(thread, debugInfo.profile.interval); 1443 } 1444 } 1445 1446 threadDebugInfoLocker.Unlock(); 1447 enable_interrupts(); 1448 } 1449 1450 1451 /*! Profiling timer event callback. 1452 Called with interrupts disabled. 1453 */ 1454 static int32 1455 profiling_event(timer* /*unused*/) 1456 { 1457 Thread* thread = thread_get_current_thread(); 1458 thread_debug_info& debugInfo = thread->debug_info; 1459 1460 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1461 1462 bool flushBuffer = false; 1463 if (profiling_do_sample(flushBuffer)) { 1464 if (flushBuffer) { 1465 // The sample buffer needs to be flushed; we'll have to notify the 1466 // debugger. We can't do that right here. Instead we set a post 1467 // interrupt callback doing that for us, and don't reschedule the 1468 // timer yet. 1469 thread->post_interrupt_callback = profiling_buffer_full; 1470 debugInfo.profile.installed_timer = NULL; 1471 debugInfo.profile.buffer_full = true; 1472 } else 1473 schedule_profiling_timer(thread, debugInfo.profile.interval); 1474 } else 1475 debugInfo.profile.installed_timer = NULL; 1476 1477 return B_HANDLED_INTERRUPT; 1478 } 1479 1480 1481 /*! Called by the scheduler when a debugged thread has been unscheduled. 1482 The scheduler lock is being held. 1483 */ 1484 void 1485 user_debug_thread_unscheduled(Thread* thread) 1486 { 1487 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1488 1489 // if running, cancel the profiling timer 1490 struct timer* timer = thread->debug_info.profile.installed_timer; 1491 if (timer != NULL) { 1492 // track remaining time 1493 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1494 thread->debug_info.profile.interval_left = max_c(left, 0); 1495 thread->debug_info.profile.installed_timer = NULL; 1496 1497 // cancel timer 1498 threadDebugInfoLocker.Unlock(); 1499 // not necessary, but doesn't harm and reduces contention 1500 cancel_timer(timer); 1501 // since invoked on the same CPU, this will not possibly wait for 1502 // an already called timer hook 1503 } 1504 } 1505 1506 1507 /*! Called by the scheduler when a debugged thread has been scheduled. 1508 The scheduler lock is being held. 1509 */ 1510 void 1511 user_debug_thread_scheduled(Thread* thread) 1512 { 1513 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1514 1515 if (thread->debug_info.profile.samples != NULL 1516 && !thread->debug_info.profile.buffer_full) { 1517 // install profiling timer 1518 schedule_profiling_timer(thread, 1519 thread->debug_info.profile.interval_left); 1520 } 1521 } 1522 1523 1524 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1525 all threads of the team that are initialized for debugging (and 1526 thus have a debug port). 1527 */ 1528 static void 1529 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1530 const void *message, int32 size) 1531 { 1532 // iterate through the threads 1533 thread_info threadInfo; 1534 int32 cookie = 0; 1535 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1536 == B_OK) { 1537 // get the thread and lock it 1538 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1539 if (thread == NULL) 1540 continue; 1541 1542 BReference<Thread> threadReference(thread, true); 1543 ThreadLocker threadLocker(thread, true); 1544 1545 // get the thread's debug port 1546 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1547 1548 port_id threadDebugPort = -1; 1549 if (thread && thread != nubThread && thread->team == nubThread->team 1550 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1551 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1552 threadDebugPort = thread->debug_info.debug_port; 1553 } 1554 1555 threadDebugInfoLocker.Unlock(); 1556 threadLocker.Unlock(); 1557 1558 // send the message to the thread 1559 if (threadDebugPort >= 0) { 1560 status_t error = kill_interruptable_write_port(threadDebugPort, 1561 code, message, size); 1562 if (error != B_OK) { 1563 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1564 "message to thread %" B_PRId32 ": %" B_PRIx32 "\n", 1565 thread->id, error)); 1566 } 1567 } 1568 } 1569 } 1570 1571 1572 static void 1573 nub_thread_cleanup(Thread *nubThread) 1574 { 1575 TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n", 1576 nubThread->id, nubThread->team->debug_info.debugger_port)); 1577 1578 ConditionVariable debugChangeCondition; 1579 debugChangeCondition.Init(nubThread->team, "debug change condition"); 1580 prepare_debugger_change(nubThread->team, debugChangeCondition); 1581 1582 team_debug_info teamDebugInfo; 1583 bool destroyDebugInfo = false; 1584 1585 TeamLocker teamLocker(nubThread->team); 1586 // required by update_threads_debugger_installed_flag() 1587 1588 cpu_status state = disable_interrupts(); 1589 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1590 1591 team_debug_info &info = nubThread->team->debug_info; 1592 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1593 && info.nub_thread == nubThread->id) { 1594 teamDebugInfo = info; 1595 clear_team_debug_info(&info, false); 1596 destroyDebugInfo = true; 1597 } 1598 1599 // update the thread::flags fields 1600 update_threads_debugger_installed_flag(nubThread->team); 1601 1602 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1603 restore_interrupts(state); 1604 1605 teamLocker.Unlock(); 1606 1607 if (destroyDebugInfo) 1608 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1609 1610 finish_debugger_change(nubThread->team); 1611 1612 if (destroyDebugInfo) 1613 destroy_team_debug_info(&teamDebugInfo); 1614 1615 // notify all threads that the debugger is gone 1616 broadcast_debugged_thread_message(nubThread, 1617 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1618 } 1619 1620 1621 /** \brief Debug nub thread helper function that returns the debug port of 1622 * a thread of the same team. 1623 */ 1624 static status_t 1625 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1626 thread_id threadID, port_id &threadDebugPort) 1627 { 1628 threadDebugPort = -1; 1629 1630 // get the thread 1631 Thread* thread = Thread::GetAndLock(threadID); 1632 if (thread == NULL) 1633 return B_BAD_THREAD_ID; 1634 BReference<Thread> threadReference(thread, true); 1635 ThreadLocker threadLocker(thread, true); 1636 1637 // get the debug port 1638 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1639 1640 if (thread->team != nubThread->team) 1641 return B_BAD_VALUE; 1642 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1643 return B_BAD_THREAD_STATE; 1644 1645 threadDebugPort = thread->debug_info.debug_port; 1646 1647 threadDebugInfoLocker.Unlock(); 1648 1649 if (threadDebugPort < 0) 1650 return B_ERROR; 1651 1652 return B_OK; 1653 } 1654 1655 1656 static status_t 1657 debug_nub_thread(void *) 1658 { 1659 Thread *nubThread = thread_get_current_thread(); 1660 1661 // check, if we're still the current nub thread and get our port 1662 cpu_status state = disable_interrupts(); 1663 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1664 1665 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1666 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1667 restore_interrupts(state); 1668 return 0; 1669 } 1670 1671 port_id port = nubThread->team->debug_info.nub_port; 1672 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1673 BreakpointManager* breakpointManager 1674 = nubThread->team->debug_info.breakpoint_manager; 1675 1676 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1677 restore_interrupts(state); 1678 1679 TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub " 1680 "port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port)); 1681 1682 // notify all threads that a debugger has been installed 1683 broadcast_debugged_thread_message(nubThread, 1684 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1685 1686 // command processing loop 1687 while (true) { 1688 int32 command; 1689 debug_nub_message_data message; 1690 ssize_t messageSize = read_port_etc(port, &command, &message, 1691 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1692 1693 if (messageSize < 0) { 1694 // The port is no longer valid or we were interrupted by a kill 1695 // signal: If we are still listed in the team's debug info as nub 1696 // thread, we need to update that. 1697 nub_thread_cleanup(nubThread); 1698 1699 TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n", 1700 nubThread->id, messageSize)); 1701 1702 return messageSize; 1703 } 1704 1705 bool sendReply = false; 1706 union { 1707 debug_nub_read_memory_reply read_memory; 1708 debug_nub_write_memory_reply write_memory; 1709 debug_nub_get_cpu_state_reply get_cpu_state; 1710 debug_nub_set_breakpoint_reply set_breakpoint; 1711 debug_nub_set_watchpoint_reply set_watchpoint; 1712 debug_nub_get_signal_masks_reply get_signal_masks; 1713 debug_nub_get_signal_handler_reply get_signal_handler; 1714 debug_nub_start_profiler_reply start_profiler; 1715 debug_profiler_update profiler_update; 1716 debug_nub_write_core_file_reply write_core_file; 1717 } reply; 1718 int32 replySize = 0; 1719 port_id replyPort = -1; 1720 1721 // process the command 1722 switch (command) { 1723 case B_DEBUG_MESSAGE_READ_MEMORY: 1724 { 1725 // get the parameters 1726 replyPort = message.read_memory.reply_port; 1727 void *address = message.read_memory.address; 1728 int32 size = message.read_memory.size; 1729 status_t result = B_OK; 1730 1731 // check the parameters 1732 if (!BreakpointManager::CanAccessAddress(address, false)) 1733 result = B_BAD_ADDRESS; 1734 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1735 result = B_BAD_VALUE; 1736 1737 // read the memory 1738 size_t bytesRead = 0; 1739 if (result == B_OK) { 1740 result = breakpointManager->ReadMemory(address, 1741 reply.read_memory.data, size, bytesRead); 1742 } 1743 reply.read_memory.error = result; 1744 1745 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: " 1746 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1747 ", result: %" B_PRIx32 ", read: %ld\n", nubThread->id, 1748 replyPort, address, size, result, bytesRead)); 1749 1750 // send only as much data as necessary 1751 reply.read_memory.size = bytesRead; 1752 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1753 sendReply = true; 1754 break; 1755 } 1756 1757 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1758 { 1759 // get the parameters 1760 replyPort = message.write_memory.reply_port; 1761 void *address = message.write_memory.address; 1762 int32 size = message.write_memory.size; 1763 const char *data = message.write_memory.data; 1764 int32 realSize = (char*)&message + messageSize - data; 1765 status_t result = B_OK; 1766 1767 // check the parameters 1768 if (!BreakpointManager::CanAccessAddress(address, true)) 1769 result = B_BAD_ADDRESS; 1770 else if (size <= 0 || size > realSize) 1771 result = B_BAD_VALUE; 1772 1773 // write the memory 1774 size_t bytesWritten = 0; 1775 if (result == B_OK) { 1776 result = breakpointManager->WriteMemory(address, data, size, 1777 bytesWritten); 1778 } 1779 reply.write_memory.error = result; 1780 1781 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: " 1782 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1783 ", result: %" B_PRIx32 ", written: %ld\n", nubThread->id, 1784 replyPort, address, size, result, bytesWritten)); 1785 1786 reply.write_memory.size = bytesWritten; 1787 sendReply = true; 1788 replySize = sizeof(debug_nub_write_memory_reply); 1789 break; 1790 } 1791 1792 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1793 { 1794 // get the parameters 1795 int32 flags = message.set_team_flags.flags 1796 & B_TEAM_DEBUG_USER_FLAG_MASK; 1797 1798 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS" 1799 ": flags: %" B_PRIx32 "\n", nubThread->id, flags)); 1800 1801 Team *team = thread_get_current_thread()->team; 1802 1803 // set the flags 1804 cpu_status state = disable_interrupts(); 1805 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1806 1807 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1808 atomic_set(&team->debug_info.flags, flags); 1809 1810 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1811 restore_interrupts(state); 1812 1813 break; 1814 } 1815 1816 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1817 { 1818 // get the parameters 1819 thread_id threadID = message.set_thread_flags.thread; 1820 int32 flags = message.set_thread_flags.flags 1821 & B_THREAD_DEBUG_USER_FLAG_MASK; 1822 1823 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS" 1824 ": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n", 1825 nubThread->id, threadID, flags)); 1826 1827 // set the flags 1828 Thread* thread = Thread::GetAndLock(threadID); 1829 if (thread == NULL) 1830 break; 1831 BReference<Thread> threadReference(thread, true); 1832 ThreadLocker threadLocker(thread, true); 1833 1834 InterruptsSpinLocker threadDebugInfoLocker( 1835 thread->debug_info.lock); 1836 1837 if (thread->team == thread_get_current_thread()->team) { 1838 flags |= thread->debug_info.flags 1839 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1840 atomic_set(&thread->debug_info.flags, flags); 1841 } 1842 1843 break; 1844 } 1845 1846 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1847 { 1848 // get the parameters 1849 thread_id threadID; 1850 uint32 handleEvent; 1851 bool singleStep; 1852 1853 threadID = message.continue_thread.thread; 1854 handleEvent = message.continue_thread.handle_event; 1855 singleStep = message.continue_thread.single_step; 1856 1857 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD" 1858 ": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", " 1859 "single step: %d\n", nubThread->id, threadID, handleEvent, 1860 singleStep)); 1861 1862 // find the thread and get its debug port 1863 port_id threadDebugPort = -1; 1864 status_t result = debug_nub_thread_get_thread_debug_port( 1865 nubThread, threadID, threadDebugPort); 1866 1867 // send a message to the debugged thread 1868 if (result == B_OK) { 1869 debugged_thread_continue commandMessage; 1870 commandMessage.handle_event = handleEvent; 1871 commandMessage.single_step = singleStep; 1872 1873 result = write_port(threadDebugPort, 1874 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1875 &commandMessage, sizeof(commandMessage)); 1876 } else if (result == B_BAD_THREAD_STATE) { 1877 Thread* thread = Thread::GetAndLock(threadID); 1878 if (thread == NULL) 1879 break; 1880 1881 BReference<Thread> threadReference(thread, true); 1882 ThreadLocker threadLocker(thread, true); 1883 if (thread->state == B_THREAD_SUSPENDED) { 1884 threadLocker.Unlock(); 1885 resume_thread(threadID); 1886 break; 1887 } 1888 } 1889 1890 break; 1891 } 1892 1893 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1894 { 1895 // get the parameters 1896 thread_id threadID = message.set_cpu_state.thread; 1897 const debug_cpu_state &cpuState 1898 = message.set_cpu_state.cpu_state; 1899 1900 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE" 1901 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1902 1903 // find the thread and get its debug port 1904 port_id threadDebugPort = -1; 1905 status_t result = debug_nub_thread_get_thread_debug_port( 1906 nubThread, threadID, threadDebugPort); 1907 1908 // send a message to the debugged thread 1909 if (result == B_OK) { 1910 debugged_thread_set_cpu_state commandMessage; 1911 memcpy(&commandMessage.cpu_state, &cpuState, 1912 sizeof(debug_cpu_state)); 1913 write_port(threadDebugPort, 1914 B_DEBUGGED_THREAD_SET_CPU_STATE, 1915 &commandMessage, sizeof(commandMessage)); 1916 } 1917 1918 break; 1919 } 1920 1921 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1922 { 1923 // get the parameters 1924 thread_id threadID = message.get_cpu_state.thread; 1925 replyPort = message.get_cpu_state.reply_port; 1926 1927 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE" 1928 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1929 1930 // find the thread and get its debug port 1931 port_id threadDebugPort = -1; 1932 status_t result = debug_nub_thread_get_thread_debug_port( 1933 nubThread, threadID, threadDebugPort); 1934 1935 // send a message to the debugged thread 1936 if (threadDebugPort >= 0) { 1937 debugged_thread_get_cpu_state commandMessage; 1938 commandMessage.reply_port = replyPort; 1939 result = write_port(threadDebugPort, 1940 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1941 sizeof(commandMessage)); 1942 } 1943 1944 // send a reply to the debugger in case of error 1945 if (result != B_OK) { 1946 reply.get_cpu_state.error = result; 1947 sendReply = true; 1948 replySize = sizeof(reply.get_cpu_state); 1949 } 1950 1951 break; 1952 } 1953 1954 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1955 { 1956 // get the parameters 1957 replyPort = message.set_breakpoint.reply_port; 1958 void *address = message.set_breakpoint.address; 1959 1960 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT" 1961 ": address: %p\n", nubThread->id, address)); 1962 1963 // check the address 1964 status_t result = B_OK; 1965 if (address == NULL 1966 || !BreakpointManager::CanAccessAddress(address, false)) { 1967 result = B_BAD_ADDRESS; 1968 } 1969 1970 // set the breakpoint 1971 if (result == B_OK) 1972 result = breakpointManager->InstallBreakpoint(address); 1973 1974 if (result == B_OK) 1975 update_threads_breakpoints_flag(); 1976 1977 // prepare the reply 1978 reply.set_breakpoint.error = result; 1979 replySize = sizeof(reply.set_breakpoint); 1980 sendReply = true; 1981 1982 break; 1983 } 1984 1985 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1986 { 1987 // get the parameters 1988 void *address = message.clear_breakpoint.address; 1989 1990 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT" 1991 ": address: %p\n", nubThread->id, address)); 1992 1993 // check the address 1994 status_t result = B_OK; 1995 if (address == NULL 1996 || !BreakpointManager::CanAccessAddress(address, false)) { 1997 result = B_BAD_ADDRESS; 1998 } 1999 2000 // clear the breakpoint 2001 if (result == B_OK) 2002 result = breakpointManager->UninstallBreakpoint(address); 2003 2004 if (result == B_OK) 2005 update_threads_breakpoints_flag(); 2006 2007 break; 2008 } 2009 2010 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 2011 { 2012 // get the parameters 2013 replyPort = message.set_watchpoint.reply_port; 2014 void *address = message.set_watchpoint.address; 2015 uint32 type = message.set_watchpoint.type; 2016 int32 length = message.set_watchpoint.length; 2017 2018 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT" 2019 ": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n", 2020 nubThread->id, address, type, length)); 2021 2022 // check the address and size 2023 status_t result = B_OK; 2024 if (address == NULL 2025 || !BreakpointManager::CanAccessAddress(address, false)) { 2026 result = B_BAD_ADDRESS; 2027 } 2028 if (length < 0) 2029 result = B_BAD_VALUE; 2030 2031 // set the watchpoint 2032 if (result == B_OK) { 2033 result = breakpointManager->InstallWatchpoint(address, type, 2034 length); 2035 } 2036 2037 if (result == B_OK) 2038 update_threads_breakpoints_flag(); 2039 2040 // prepare the reply 2041 reply.set_watchpoint.error = result; 2042 replySize = sizeof(reply.set_watchpoint); 2043 sendReply = true; 2044 2045 break; 2046 } 2047 2048 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2049 { 2050 // get the parameters 2051 void *address = message.clear_watchpoint.address; 2052 2053 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT" 2054 ": address: %p\n", nubThread->id, address)); 2055 2056 // check the address 2057 status_t result = B_OK; 2058 if (address == NULL 2059 || !BreakpointManager::CanAccessAddress(address, false)) { 2060 result = B_BAD_ADDRESS; 2061 } 2062 2063 // clear the watchpoint 2064 if (result == B_OK) 2065 result = breakpointManager->UninstallWatchpoint(address); 2066 2067 if (result == B_OK) 2068 update_threads_breakpoints_flag(); 2069 2070 break; 2071 } 2072 2073 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2074 { 2075 // get the parameters 2076 thread_id threadID = message.set_signal_masks.thread; 2077 uint64 ignore = message.set_signal_masks.ignore_mask; 2078 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2079 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2080 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2081 2082 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS" 2083 ": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %" 2084 B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32 2085 ")\n", nubThread->id, threadID, ignore, ignoreOp, 2086 ignoreOnce, ignoreOnceOp)); 2087 2088 // set the masks 2089 Thread* thread = Thread::GetAndLock(threadID); 2090 if (thread == NULL) 2091 break; 2092 BReference<Thread> threadReference(thread, true); 2093 ThreadLocker threadLocker(thread, true); 2094 2095 InterruptsSpinLocker threadDebugInfoLocker( 2096 thread->debug_info.lock); 2097 2098 if (thread->team == thread_get_current_thread()->team) { 2099 thread_debug_info &threadDebugInfo = thread->debug_info; 2100 // set ignore mask 2101 switch (ignoreOp) { 2102 case B_DEBUG_SIGNAL_MASK_AND: 2103 threadDebugInfo.ignore_signals &= ignore; 2104 break; 2105 case B_DEBUG_SIGNAL_MASK_OR: 2106 threadDebugInfo.ignore_signals |= ignore; 2107 break; 2108 case B_DEBUG_SIGNAL_MASK_SET: 2109 threadDebugInfo.ignore_signals = ignore; 2110 break; 2111 } 2112 2113 // set ignore once mask 2114 switch (ignoreOnceOp) { 2115 case B_DEBUG_SIGNAL_MASK_AND: 2116 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2117 break; 2118 case B_DEBUG_SIGNAL_MASK_OR: 2119 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2120 break; 2121 case B_DEBUG_SIGNAL_MASK_SET: 2122 threadDebugInfo.ignore_signals_once = ignoreOnce; 2123 break; 2124 } 2125 } 2126 2127 break; 2128 } 2129 2130 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2131 { 2132 // get the parameters 2133 replyPort = message.get_signal_masks.reply_port; 2134 thread_id threadID = message.get_signal_masks.thread; 2135 status_t result = B_OK; 2136 2137 // get the masks 2138 uint64 ignore = 0; 2139 uint64 ignoreOnce = 0; 2140 2141 Thread* thread = Thread::GetAndLock(threadID); 2142 if (thread != NULL) { 2143 BReference<Thread> threadReference(thread, true); 2144 ThreadLocker threadLocker(thread, true); 2145 2146 InterruptsSpinLocker threadDebugInfoLocker( 2147 thread->debug_info.lock); 2148 2149 ignore = thread->debug_info.ignore_signals; 2150 ignoreOnce = thread->debug_info.ignore_signals_once; 2151 } else 2152 result = B_BAD_THREAD_ID; 2153 2154 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS" 2155 ": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", " 2156 "ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: " 2157 "%" B_PRIx32 "\n", nubThread->id, replyPort, threadID, 2158 ignore, ignoreOnce, result)); 2159 2160 // prepare the message 2161 reply.get_signal_masks.error = result; 2162 reply.get_signal_masks.ignore_mask = ignore; 2163 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2164 replySize = sizeof(reply.get_signal_masks); 2165 sendReply = true; 2166 break; 2167 } 2168 2169 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2170 { 2171 // get the parameters 2172 int signal = message.set_signal_handler.signal; 2173 struct sigaction &handler = message.set_signal_handler.handler; 2174 2175 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER" 2176 ": signal: %d, handler: %p\n", nubThread->id, signal, 2177 handler.sa_handler)); 2178 2179 // set the handler 2180 sigaction(signal, &handler, NULL); 2181 2182 break; 2183 } 2184 2185 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2186 { 2187 // get the parameters 2188 replyPort = message.get_signal_handler.reply_port; 2189 int signal = message.get_signal_handler.signal; 2190 status_t result = B_OK; 2191 2192 // get the handler 2193 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2194 != 0) { 2195 result = errno; 2196 } 2197 2198 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER" 2199 ": reply port: %" B_PRId32 ", signal: %d, handler: %p\n", 2200 nubThread->id, replyPort, signal, 2201 reply.get_signal_handler.handler.sa_handler)); 2202 2203 // prepare the message 2204 reply.get_signal_handler.error = result; 2205 replySize = sizeof(reply.get_signal_handler); 2206 sendReply = true; 2207 break; 2208 } 2209 2210 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2211 { 2212 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER" 2213 "\n", nubThread->id)); 2214 2215 Team *team = nubThread->team; 2216 2217 // Acquire the debugger write lock. As soon as we have it and 2218 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2219 // will write anything to the debugger port anymore. 2220 status_t result = acquire_sem_etc(writeLock, 1, 2221 B_KILL_CAN_INTERRUPT, 0); 2222 if (result == B_OK) { 2223 // set the respective team debug flag 2224 cpu_status state = disable_interrupts(); 2225 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2226 2227 atomic_or(&team->debug_info.flags, 2228 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2229 BreakpointManager* breakpointManager 2230 = team->debug_info.breakpoint_manager; 2231 2232 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2233 restore_interrupts(state); 2234 2235 // remove all installed breakpoints 2236 breakpointManager->RemoveAllBreakpoints(); 2237 2238 release_sem(writeLock); 2239 } else { 2240 // We probably got a SIGKILL. If so, we will terminate when 2241 // reading the next message fails. 2242 } 2243 2244 break; 2245 } 2246 2247 case B_DEBUG_MESSAGE_HANDED_OVER: 2248 { 2249 // notify all threads that the debugger has changed 2250 broadcast_debugged_thread_message(nubThread, 2251 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2252 2253 break; 2254 } 2255 2256 case B_DEBUG_START_PROFILER: 2257 { 2258 // get the parameters 2259 thread_id threadID = message.start_profiler.thread; 2260 replyPort = message.start_profiler.reply_port; 2261 area_id sampleArea = message.start_profiler.sample_area; 2262 int32 stackDepth = message.start_profiler.stack_depth; 2263 bool variableStackDepth 2264 = message.start_profiler.variable_stack_depth; 2265 bigtime_t interval = max_c(message.start_profiler.interval, 2266 B_DEBUG_MIN_PROFILE_INTERVAL); 2267 status_t result = B_OK; 2268 2269 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: " 2270 "thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n", 2271 nubThread->id, threadID, sampleArea)); 2272 2273 if (stackDepth < 1) 2274 stackDepth = 1; 2275 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2276 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2277 2278 // provision for an extra entry per hit (for the number of 2279 // samples), if variable stack depth 2280 if (variableStackDepth) 2281 stackDepth++; 2282 2283 // clone the sample area 2284 area_info areaInfo; 2285 if (result == B_OK) 2286 result = get_area_info(sampleArea, &areaInfo); 2287 2288 area_id clonedSampleArea = -1; 2289 void* samples = NULL; 2290 if (result == B_OK) { 2291 clonedSampleArea = clone_area("profiling samples", &samples, 2292 B_ANY_KERNEL_ADDRESS, 2293 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 2294 sampleArea); 2295 if (clonedSampleArea >= 0) { 2296 // we need the memory locked 2297 result = lock_memory(samples, areaInfo.size, 2298 B_READ_DEVICE); 2299 if (result != B_OK) { 2300 delete_area(clonedSampleArea); 2301 clonedSampleArea = -1; 2302 } 2303 } else 2304 result = clonedSampleArea; 2305 } 2306 2307 // get the thread and set the profile info 2308 int32 imageEvent = nubThread->team->debug_info.image_event; 2309 if (result == B_OK) { 2310 Thread* thread = Thread::GetAndLock(threadID); 2311 BReference<Thread> threadReference(thread, true); 2312 ThreadLocker threadLocker(thread, true); 2313 2314 if (thread != NULL && thread->team == nubThread->team) { 2315 thread_debug_info &threadDebugInfo = thread->debug_info; 2316 2317 InterruptsSpinLocker threadDebugInfoLocker( 2318 threadDebugInfo.lock); 2319 2320 if (threadDebugInfo.profile.samples == NULL) { 2321 threadDebugInfo.profile.interval = interval; 2322 threadDebugInfo.profile.sample_area 2323 = clonedSampleArea; 2324 threadDebugInfo.profile.samples = (addr_t*)samples; 2325 threadDebugInfo.profile.max_samples 2326 = areaInfo.size / sizeof(addr_t); 2327 threadDebugInfo.profile.flush_threshold 2328 = threadDebugInfo.profile.max_samples 2329 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2330 / 100; 2331 threadDebugInfo.profile.sample_count = 0; 2332 threadDebugInfo.profile.dropped_ticks = 0; 2333 threadDebugInfo.profile.stack_depth = stackDepth; 2334 threadDebugInfo.profile.variable_stack_depth 2335 = variableStackDepth; 2336 threadDebugInfo.profile.buffer_full = false; 2337 threadDebugInfo.profile.interval_left = interval; 2338 threadDebugInfo.profile.installed_timer = NULL; 2339 threadDebugInfo.profile.image_event = imageEvent; 2340 threadDebugInfo.profile.last_image_event 2341 = imageEvent; 2342 } else 2343 result = B_BAD_VALUE; 2344 } else 2345 result = B_BAD_THREAD_ID; 2346 } 2347 2348 // on error unlock and delete the sample area 2349 if (result != B_OK) { 2350 if (clonedSampleArea >= 0) { 2351 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2352 delete_area(clonedSampleArea); 2353 } 2354 } 2355 2356 // send a reply to the debugger 2357 reply.start_profiler.error = result; 2358 reply.start_profiler.interval = interval; 2359 reply.start_profiler.image_event = imageEvent; 2360 sendReply = true; 2361 replySize = sizeof(reply.start_profiler); 2362 2363 break; 2364 } 2365 2366 case B_DEBUG_STOP_PROFILER: 2367 { 2368 // get the parameters 2369 thread_id threadID = message.stop_profiler.thread; 2370 replyPort = message.stop_profiler.reply_port; 2371 status_t result = B_OK; 2372 2373 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: " 2374 "thread: %" B_PRId32 "\n", nubThread->id, threadID)); 2375 2376 area_id sampleArea = -1; 2377 addr_t* samples = NULL; 2378 int32 sampleCount = 0; 2379 int32 stackDepth = 0; 2380 bool variableStackDepth = false; 2381 int32 imageEvent = 0; 2382 int32 droppedTicks = 0; 2383 bigtime_t lastCPUTime = 0; 2384 2385 // get the thread and detach the profile info 2386 Thread* thread = Thread::GetAndLock(threadID); 2387 BReference<Thread> threadReference(thread, true); 2388 ThreadLocker threadLocker(thread, true); 2389 2390 if (thread && thread->team == nubThread->team) { 2391 thread_debug_info &threadDebugInfo = thread->debug_info; 2392 2393 InterruptsSpinLocker threadDebugInfoLocker( 2394 threadDebugInfo.lock); 2395 2396 if (threadDebugInfo.profile.samples != NULL) { 2397 sampleArea = threadDebugInfo.profile.sample_area; 2398 samples = threadDebugInfo.profile.samples; 2399 sampleCount = threadDebugInfo.profile.sample_count; 2400 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2401 stackDepth = threadDebugInfo.profile.stack_depth; 2402 variableStackDepth 2403 = threadDebugInfo.profile.variable_stack_depth; 2404 imageEvent = threadDebugInfo.profile.image_event; 2405 threadDebugInfo.profile.sample_area = -1; 2406 threadDebugInfo.profile.samples = NULL; 2407 threadDebugInfo.profile.buffer_full = false; 2408 threadDebugInfo.profile.dropped_ticks = 0; 2409 { 2410 SpinLocker threadTimeLocker(thread->time_lock); 2411 lastCPUTime = thread->CPUTime(false); 2412 } 2413 } else 2414 result = B_BAD_VALUE; 2415 } else 2416 result = B_BAD_THREAD_ID; 2417 2418 threadLocker.Unlock(); 2419 2420 // prepare the reply 2421 if (result == B_OK) { 2422 reply.profiler_update.origin.thread = threadID; 2423 reply.profiler_update.image_event = imageEvent; 2424 reply.profiler_update.stack_depth = stackDepth; 2425 reply.profiler_update.variable_stack_depth 2426 = variableStackDepth; 2427 reply.profiler_update.sample_count = sampleCount; 2428 reply.profiler_update.dropped_ticks = droppedTicks; 2429 reply.profiler_update.stopped = true; 2430 reply.profiler_update.last_cpu_time = lastCPUTime; 2431 } else 2432 reply.profiler_update.origin.thread = result; 2433 2434 replySize = sizeof(debug_profiler_update); 2435 sendReply = true; 2436 2437 if (sampleArea >= 0) { 2438 area_info areaInfo; 2439 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2440 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2441 delete_area(sampleArea); 2442 } 2443 } 2444 2445 break; 2446 } 2447 2448 case B_DEBUG_WRITE_CORE_FILE: 2449 { 2450 // get the parameters 2451 replyPort = message.write_core_file.reply_port; 2452 char* path = message.write_core_file.path; 2453 path[sizeof(message.write_core_file.path) - 1] = '\0'; 2454 2455 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE" 2456 ": path: %s\n", nubThread->id, path)); 2457 2458 // write the core file 2459 status_t result = core_dump_write_core_file(path, false); 2460 2461 // prepare the reply 2462 reply.write_core_file.error = result; 2463 replySize = sizeof(reply.write_core_file); 2464 sendReply = true; 2465 2466 break; 2467 } 2468 } 2469 2470 // send the reply, if necessary 2471 if (sendReply) { 2472 status_t error = kill_interruptable_write_port(replyPort, command, 2473 &reply, replySize); 2474 2475 if (error != B_OK) { 2476 // The debugger port is either not longer existing or we got 2477 // interrupted by a kill signal. In either case we terminate. 2478 TRACE(("nub thread %" B_PRId32 ": failed to send reply to port " 2479 "%" B_PRId32 ": %s\n", nubThread->id, replyPort, 2480 strerror(error))); 2481 2482 nub_thread_cleanup(nubThread); 2483 return error; 2484 } 2485 } 2486 } 2487 } 2488 2489 2490 /** \brief Helper function for install_team_debugger(), that sets up the team 2491 and thread debug infos. 2492 2493 The caller must hold the team's lock as well as the team debug info lock. 2494 2495 The function also clears the arch specific team and thread debug infos 2496 (including among other things formerly set break/watchpoints). 2497 */ 2498 static void 2499 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2500 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2501 sem_id debuggerPortWriteLock, thread_id causingThread) 2502 { 2503 atomic_set(&team->debug_info.flags, 2504 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2505 team->debug_info.nub_port = nubPort; 2506 team->debug_info.nub_thread = nubThread; 2507 team->debug_info.debugger_team = debuggerTeam; 2508 team->debug_info.debugger_port = debuggerPort; 2509 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2510 team->debug_info.causing_thread = causingThread; 2511 2512 arch_clear_team_debug_info(&team->debug_info.arch_info); 2513 2514 // set the user debug flags and signal masks of all threads to the default 2515 for (Thread *thread = team->thread_list; thread; 2516 thread = thread->team_next) { 2517 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2518 2519 if (thread->id == nubThread) { 2520 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2521 } else { 2522 int32 flags = thread->debug_info.flags 2523 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2524 atomic_set(&thread->debug_info.flags, 2525 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2526 thread->debug_info.ignore_signals = 0; 2527 thread->debug_info.ignore_signals_once = 0; 2528 2529 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2530 } 2531 } 2532 2533 // update the thread::flags fields 2534 update_threads_debugger_installed_flag(team); 2535 } 2536 2537 2538 static port_id 2539 install_team_debugger(team_id teamID, port_id debuggerPort, 2540 thread_id causingThread, bool useDefault, bool dontReplace) 2541 { 2542 TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", " 2543 "default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault, 2544 dontReplace)); 2545 2546 if (useDefault) 2547 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2548 2549 // get the debugger team 2550 port_info debuggerPortInfo; 2551 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2552 if (error != B_OK) { 2553 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2554 "%" B_PRIx32 "\n", error)); 2555 return error; 2556 } 2557 team_id debuggerTeam = debuggerPortInfo.team; 2558 2559 // Check the debugger team: It must neither be the kernel team nor the 2560 // debugged team. 2561 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2562 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2563 "debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam, 2564 teamID)); 2565 return B_NOT_ALLOWED; 2566 } 2567 2568 // get the team 2569 Team* team; 2570 ConditionVariable debugChangeCondition; 2571 debugChangeCondition.Init(NULL, "debug change condition"); 2572 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2573 if (error != B_OK) 2574 return error; 2575 2576 // get the real team ID 2577 teamID = team->id; 2578 2579 // check, if a debugger is already installed 2580 2581 bool done = false; 2582 port_id result = B_ERROR; 2583 bool handOver = false; 2584 port_id oldDebuggerPort = -1; 2585 port_id nubPort = -1; 2586 2587 TeamLocker teamLocker(team); 2588 cpu_status state = disable_interrupts(); 2589 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2590 2591 int32 teamDebugFlags = team->debug_info.flags; 2592 2593 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2594 // There's already a debugger installed. 2595 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2596 if (dontReplace) { 2597 // We're fine with already having a debugger. 2598 error = B_OK; 2599 done = true; 2600 result = team->debug_info.nub_port; 2601 } else { 2602 // a handover to another debugger is requested 2603 // Set the handing-over flag -- we'll clear both flags after 2604 // having sent the handed-over message to the new debugger. 2605 atomic_or(&team->debug_info.flags, 2606 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2607 2608 oldDebuggerPort = team->debug_info.debugger_port; 2609 result = nubPort = team->debug_info.nub_port; 2610 if (causingThread < 0) 2611 causingThread = team->debug_info.causing_thread; 2612 2613 // set the new debugger 2614 install_team_debugger_init_debug_infos(team, debuggerTeam, 2615 debuggerPort, nubPort, team->debug_info.nub_thread, 2616 team->debug_info.debugger_write_lock, causingThread); 2617 2618 handOver = true; 2619 done = true; 2620 } 2621 } else { 2622 // there's already a debugger installed 2623 error = (dontReplace ? B_OK : B_BAD_VALUE); 2624 done = true; 2625 result = team->debug_info.nub_port; 2626 } 2627 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2628 && useDefault) { 2629 // No debugger yet, disable_debugger() had been invoked, and we 2630 // would install the default debugger. Just fail. 2631 error = B_BAD_VALUE; 2632 } 2633 2634 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2635 restore_interrupts(state); 2636 teamLocker.Unlock(); 2637 2638 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2639 // The old debugger must just have died. Just proceed as 2640 // if there was no debugger installed. We may still be too 2641 // early, in which case we'll fail, but this race condition 2642 // should be unbelievably rare and relatively harmless. 2643 handOver = false; 2644 done = false; 2645 } 2646 2647 if (handOver) { 2648 // prepare the handed-over message 2649 debug_handed_over notification; 2650 notification.origin.thread = -1; 2651 notification.origin.team = teamID; 2652 notification.origin.nub_port = nubPort; 2653 notification.debugger = debuggerTeam; 2654 notification.debugger_port = debuggerPort; 2655 notification.causing_thread = causingThread; 2656 2657 // notify the new debugger 2658 error = write_port_etc(debuggerPort, 2659 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2660 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2661 if (error != B_OK) { 2662 dprintf("install_team_debugger(): Failed to send message to new " 2663 "debugger: %s\n", strerror(error)); 2664 } 2665 2666 // clear the handed-over and handing-over flags 2667 state = disable_interrupts(); 2668 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2669 2670 atomic_and(&team->debug_info.flags, 2671 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2672 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2673 2674 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2675 restore_interrupts(state); 2676 2677 finish_debugger_change(team); 2678 2679 // notify the nub thread 2680 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2681 NULL, 0); 2682 2683 // notify the old debugger 2684 error = write_port_etc(oldDebuggerPort, 2685 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2686 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2687 if (error != B_OK) { 2688 TRACE(("install_team_debugger(): Failed to send message to old " 2689 "debugger: %s\n", strerror(error))); 2690 } 2691 2692 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2693 "%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam, 2694 debuggerPort)); 2695 2696 return result; 2697 } 2698 2699 if (done || error != B_OK) { 2700 TRACE(("install_team_debugger() done1: %" B_PRId32 "\n", 2701 (error == B_OK ? result : error))); 2702 finish_debugger_change(team); 2703 return (error == B_OK ? result : error); 2704 } 2705 2706 // create the debugger write lock semaphore 2707 char nameBuffer[B_OS_NAME_LENGTH]; 2708 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port " 2709 "write", teamID); 2710 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2711 if (debuggerWriteLock < 0) 2712 error = debuggerWriteLock; 2713 2714 // create the nub port 2715 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID); 2716 if (error == B_OK) { 2717 nubPort = create_port(1, nameBuffer); 2718 if (nubPort < 0) 2719 error = nubPort; 2720 else 2721 result = nubPort; 2722 } 2723 2724 // make the debugger team the port owner; thus we know, if the debugger is 2725 // gone and can cleanup 2726 if (error == B_OK) 2727 error = set_port_owner(nubPort, debuggerTeam); 2728 2729 // create the breakpoint manager 2730 BreakpointManager* breakpointManager = NULL; 2731 if (error == B_OK) { 2732 breakpointManager = new(std::nothrow) BreakpointManager; 2733 if (breakpointManager != NULL) 2734 error = breakpointManager->Init(); 2735 else 2736 error = B_NO_MEMORY; 2737 } 2738 2739 // spawn the nub thread 2740 thread_id nubThread = -1; 2741 if (error == B_OK) { 2742 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task", 2743 teamID); 2744 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2745 B_NORMAL_PRIORITY, NULL, teamID); 2746 if (nubThread < 0) 2747 error = nubThread; 2748 } 2749 2750 // now adjust the debug info accordingly 2751 if (error == B_OK) { 2752 TeamLocker teamLocker(team); 2753 state = disable_interrupts(); 2754 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2755 2756 team->debug_info.breakpoint_manager = breakpointManager; 2757 install_team_debugger_init_debug_infos(team, debuggerTeam, 2758 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2759 causingThread); 2760 2761 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2762 restore_interrupts(state); 2763 } 2764 2765 finish_debugger_change(team); 2766 2767 // if everything went fine, resume the nub thread, otherwise clean up 2768 if (error == B_OK) { 2769 resume_thread(nubThread); 2770 } else { 2771 // delete port and terminate thread 2772 if (nubPort >= 0) { 2773 set_port_owner(nubPort, B_CURRENT_TEAM); 2774 delete_port(nubPort); 2775 } 2776 if (nubThread >= 0) { 2777 int32 result; 2778 wait_for_thread(nubThread, &result); 2779 } 2780 2781 delete breakpointManager; 2782 } 2783 2784 TRACE(("install_team_debugger() done2: %" B_PRId32 "\n", 2785 (error == B_OK ? result : error))); 2786 return (error == B_OK ? result : error); 2787 } 2788 2789 2790 static status_t 2791 ensure_debugger_installed() 2792 { 2793 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2794 thread_get_current_thread_id(), true, true); 2795 return port >= 0 ? B_OK : port; 2796 } 2797 2798 2799 // #pragma mark - 2800 2801 2802 void 2803 _user_debugger(const char *userMessage) 2804 { 2805 // install the default debugger, if there is none yet 2806 status_t error = ensure_debugger_installed(); 2807 if (error != B_OK) { 2808 // time to commit suicide 2809 char buffer[128]; 2810 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2811 if (length >= 0) { 2812 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2813 "`%s'\n", buffer); 2814 } else { 2815 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2816 "%p (%s)\n", userMessage, strerror(length)); 2817 } 2818 _user_exit_team(1); 2819 } 2820 2821 // prepare the message 2822 debug_debugger_call message; 2823 message.message = (void*)userMessage; 2824 2825 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2826 sizeof(message), true); 2827 } 2828 2829 2830 int 2831 _user_disable_debugger(int state) 2832 { 2833 Team *team = thread_get_current_thread()->team; 2834 2835 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state, 2836 team->id)); 2837 2838 cpu_status cpuState = disable_interrupts(); 2839 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2840 2841 int32 oldFlags; 2842 if (state) { 2843 oldFlags = atomic_or(&team->debug_info.flags, 2844 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2845 } else { 2846 oldFlags = atomic_and(&team->debug_info.flags, 2847 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2848 } 2849 2850 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2851 restore_interrupts(cpuState); 2852 2853 // TODO: Check, if the return value is really the old state. 2854 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2855 } 2856 2857 2858 status_t 2859 _user_install_default_debugger(port_id debuggerPort) 2860 { 2861 // Do not allow non-root processes to install a default debugger. 2862 if (geteuid() != 0) 2863 return B_PERMISSION_DENIED; 2864 2865 // if supplied, check whether the port is a valid port 2866 if (debuggerPort >= 0) { 2867 port_info portInfo; 2868 status_t error = get_port_info(debuggerPort, &portInfo); 2869 if (error != B_OK) 2870 return error; 2871 2872 // the debugger team must not be the kernel team 2873 if (portInfo.team == team_get_kernel_team_id()) 2874 return B_NOT_ALLOWED; 2875 } 2876 2877 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2878 2879 return B_OK; 2880 } 2881 2882 2883 port_id 2884 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2885 { 2886 if (geteuid() != 0 && team_geteuid(teamID) != geteuid()) 2887 return B_PERMISSION_DENIED; 2888 2889 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2890 } 2891 2892 2893 status_t 2894 _user_remove_team_debugger(team_id teamID) 2895 { 2896 Team* team; 2897 ConditionVariable debugChangeCondition; 2898 debugChangeCondition.Init(NULL, "debug change condition"); 2899 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2900 team); 2901 if (error != B_OK) 2902 return error; 2903 2904 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2905 2906 thread_id nubThread = -1; 2907 port_id nubPort = -1; 2908 2909 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2910 // there's a debugger installed 2911 nubThread = team->debug_info.nub_thread; 2912 nubPort = team->debug_info.nub_port; 2913 } else { 2914 // no debugger installed 2915 error = B_BAD_VALUE; 2916 } 2917 2918 debugInfoLocker.Unlock(); 2919 2920 // Delete the nub port -- this will cause the nub thread to terminate and 2921 // remove the debugger. 2922 if (nubPort >= 0) 2923 delete_port(nubPort); 2924 2925 finish_debugger_change(team); 2926 2927 // wait for the nub thread 2928 if (nubThread >= 0) 2929 wait_for_thread(nubThread, NULL); 2930 2931 return error; 2932 } 2933 2934 2935 status_t 2936 _user_debug_thread(thread_id threadID) 2937 { 2938 TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n", 2939 find_thread(NULL), threadID)); 2940 2941 // get the thread 2942 Thread* thread = Thread::GetAndLock(threadID); 2943 if (thread == NULL) 2944 return B_BAD_THREAD_ID; 2945 BReference<Thread> threadReference(thread, true); 2946 ThreadLocker threadLocker(thread, true); 2947 2948 // we can't debug the kernel team 2949 if (thread->team == team_get_kernel_team()) 2950 return B_NOT_ALLOWED; 2951 2952 InterruptsLocker interruptsLocker; 2953 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2954 2955 // If the thread is already dying, it's too late to debug it. 2956 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2957 return B_BAD_THREAD_ID; 2958 2959 // don't debug the nub thread 2960 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2961 return B_NOT_ALLOWED; 2962 2963 // already marked stopped or being told to stop? 2964 if ((thread->debug_info.flags 2965 & (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) { 2966 return B_OK; 2967 } 2968 2969 // set the flag that tells the thread to stop as soon as possible 2970 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2971 2972 update_thread_user_debug_flag(thread); 2973 2974 // send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or 2975 // continued) 2976 threadDebugInfoLocker.Unlock(); 2977 ReadSpinLocker teamLocker(thread->team_lock); 2978 SpinLocker locker(thread->team->signal_lock); 2979 2980 send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0); 2981 2982 return B_OK; 2983 } 2984 2985 2986 void 2987 _user_wait_for_debugger(void) 2988 { 2989 debug_thread_debugged message = {}; 2990 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2991 sizeof(message), false); 2992 } 2993 2994 2995 status_t 2996 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2997 bool watchpoint) 2998 { 2999 // check the address and size 3000 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3001 return B_BAD_ADDRESS; 3002 if (watchpoint && length < 0) 3003 return B_BAD_VALUE; 3004 3005 // check whether a debugger is installed already 3006 team_debug_info teamDebugInfo; 3007 get_team_debug_info(teamDebugInfo); 3008 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3009 return B_BAD_VALUE; 3010 3011 // We can't help it, here's a small but relatively harmless race condition, 3012 // since a debugger could be installed in the meantime. The worst case is 3013 // that we install a break/watchpoint the debugger doesn't know about. 3014 3015 // set the break/watchpoint 3016 status_t result; 3017 if (watchpoint) 3018 result = arch_set_watchpoint(address, type, length); 3019 else 3020 result = arch_set_breakpoint(address); 3021 3022 if (result == B_OK) 3023 update_threads_breakpoints_flag(); 3024 3025 return result; 3026 } 3027 3028 3029 status_t 3030 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 3031 { 3032 // check the address 3033 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3034 return B_BAD_ADDRESS; 3035 3036 // check whether a debugger is installed already 3037 team_debug_info teamDebugInfo; 3038 get_team_debug_info(teamDebugInfo); 3039 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3040 return B_BAD_VALUE; 3041 3042 // We can't help it, here's a small but relatively harmless race condition, 3043 // since a debugger could be installed in the meantime. The worst case is 3044 // that we clear a break/watchpoint the debugger has just installed. 3045 3046 // clear the break/watchpoint 3047 status_t result; 3048 if (watchpoint) 3049 result = arch_clear_watchpoint(address); 3050 else 3051 result = arch_clear_breakpoint(address); 3052 3053 if (result == B_OK) 3054 update_threads_breakpoints_flag(); 3055 3056 return result; 3057 } 3058