1 /* 2 * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2015, Rene Gollent, rene@gollent.com. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include <errno.h> 9 #include <signal.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <algorithm> 15 16 #include <arch/debug.h> 17 #include <arch/user_debugger.h> 18 #include <core_dump.h> 19 #include <cpu.h> 20 #include <debugger.h> 21 #include <kernel.h> 22 #include <KernelExport.h> 23 #include <kscheduler.h> 24 #include <ksignal.h> 25 #include <ksyscalls.h> 26 #include <port.h> 27 #include <sem.h> 28 #include <team.h> 29 #include <thread.h> 30 #include <thread_types.h> 31 #include <user_debugger.h> 32 #include <vm/vm.h> 33 #include <vm/vm_types.h> 34 35 #include <AutoDeleter.h> 36 #include <util/AutoLock.h> 37 #include <util/ThreadAutoLock.h> 38 39 #include "BreakpointManager.h" 40 41 42 //#define TRACE_USER_DEBUGGER 43 #ifdef TRACE_USER_DEBUGGER 44 # define TRACE(x) dprintf x 45 #else 46 # define TRACE(x) ; 47 #endif 48 49 50 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 51 // there's some potential for simplifications. E.g. clear_team_debug_info() and 52 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 53 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 54 55 56 static port_id sDefaultDebuggerPort = -1; 57 // accessed atomically 58 59 static timer sProfilingTimers[SMP_MAX_CPUS]; 60 // a profiling timer for each CPU -- used when a profiled thread is running 61 // on that CPU 62 63 64 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 65 static int32 profiling_event(timer* unused); 66 static status_t ensure_debugger_installed(); 67 static void get_team_debug_info(team_debug_info &teamDebugInfo); 68 69 70 static inline status_t 71 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 72 size_t bufferSize) 73 { 74 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 75 0); 76 } 77 78 79 static status_t 80 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 81 bool dontWait) 82 { 83 TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", " 84 "port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, " 85 "dontWait: %d\n", thread_get_current_thread()->id, 86 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 87 dontWait)); 88 89 status_t error = B_OK; 90 91 // get the team debug info 92 team_debug_info teamDebugInfo; 93 get_team_debug_info(teamDebugInfo); 94 sem_id writeLock = teamDebugInfo.debugger_write_lock; 95 96 // get the write lock 97 TRACE(("debugger_write(): acquiring write lock...\n")); 98 error = acquire_sem_etc(writeLock, 1, 99 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 100 if (error != B_OK) { 101 TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error)); 102 return error; 103 } 104 105 // re-get the team debug info 106 get_team_debug_info(teamDebugInfo); 107 108 if (teamDebugInfo.debugger_port != port 109 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 110 // The debugger has changed in the meantime or we are about to be 111 // handed over to a new debugger. In either case we don't send the 112 // message. 113 TRACE(("debugger_write(): %s\n", 114 (teamDebugInfo.debugger_port != port ? "debugger port changed" 115 : "handover flag set"))); 116 } else { 117 TRACE(("debugger_write(): writing to port...\n")); 118 119 error = write_port_etc(port, code, buffer, bufferSize, 120 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 121 } 122 123 // release the write lock 124 release_sem(writeLock); 125 126 TRACE(("debugger_write() done: %" B_PRIx32 "\n", error)); 127 128 return error; 129 } 130 131 132 /*! Updates the thread::flags field according to what user debugger flags are 133 set for the thread. 134 Interrupts must be disabled and the thread's debug info lock must be held. 135 */ 136 static void 137 update_thread_user_debug_flag(Thread* thread) 138 { 139 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 140 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 141 else 142 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 143 } 144 145 146 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 147 given thread. 148 Interrupts must be disabled and the thread debug info lock must be held. 149 */ 150 static void 151 update_thread_breakpoints_flag(Thread* thread) 152 { 153 Team* team = thread->team; 154 155 if (arch_has_breakpoints(&team->debug_info.arch_info)) 156 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 157 else 158 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 159 } 160 161 162 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 163 threads of the current team. 164 */ 165 static void 166 update_threads_breakpoints_flag() 167 { 168 Team* team = thread_get_current_thread()->team; 169 170 TeamLocker teamLocker(team); 171 172 Thread* thread = team->thread_list; 173 174 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 175 for (; thread != NULL; thread = thread->team_next) 176 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 177 } else { 178 for (; thread != NULL; thread = thread->team_next) 179 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 180 } 181 } 182 183 184 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 185 given thread, which must be the current thread. 186 */ 187 static void 188 update_thread_debugger_installed_flag(Thread* thread) 189 { 190 Team* team = thread->team; 191 192 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 193 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 194 else 195 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 196 } 197 198 199 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 200 threads of the given team. 201 The team's lock must be held. 202 */ 203 static void 204 update_threads_debugger_installed_flag(Team* team) 205 { 206 Thread* thread = team->thread_list; 207 208 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 209 for (; thread != NULL; thread = thread->team_next) 210 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 211 } else { 212 for (; thread != NULL; thread = thread->team_next) 213 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 214 } 215 } 216 217 218 /** 219 * For the first initialization the function must be called with \a initLock 220 * set to \c true. If it would be possible that another thread accesses the 221 * structure at the same time, `lock' must be held when calling the function. 222 */ 223 void 224 clear_team_debug_info(struct team_debug_info *info, bool initLock) 225 { 226 if (info) { 227 arch_clear_team_debug_info(&info->arch_info); 228 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 229 info->debugger_team = -1; 230 info->debugger_port = -1; 231 info->nub_thread = -1; 232 info->nub_port = -1; 233 info->debugger_write_lock = -1; 234 info->causing_thread = -1; 235 info->image_event = 0; 236 info->breakpoint_manager = NULL; 237 238 if (initLock) { 239 B_INITIALIZE_SPINLOCK(&info->lock); 240 info->debugger_changed_condition = NULL; 241 } 242 } 243 } 244 245 /** 246 * `lock' must not be held nor may interrupts be disabled. 247 * \a info must not be a member of a team struct (or the team struct must no 248 * longer be accessible, i.e. the team should already be removed). 249 * 250 * In case the team is still accessible, the procedure is: 251 * 1. get `lock' 252 * 2. copy the team debug info on stack 253 * 3. call clear_team_debug_info() on the team debug info 254 * 4. release `lock' 255 * 5. call destroy_team_debug_info() on the copied team debug info 256 */ 257 static void 258 destroy_team_debug_info(struct team_debug_info *info) 259 { 260 if (info) { 261 arch_destroy_team_debug_info(&info->arch_info); 262 263 // delete the breakpoint manager 264 delete info->breakpoint_manager ; 265 info->breakpoint_manager = NULL; 266 267 // delete the debugger port write lock 268 if (info->debugger_write_lock >= 0) { 269 delete_sem(info->debugger_write_lock); 270 info->debugger_write_lock = -1; 271 } 272 273 // delete the nub port 274 if (info->nub_port >= 0) { 275 set_port_owner(info->nub_port, B_CURRENT_TEAM); 276 delete_port(info->nub_port); 277 info->nub_port = -1; 278 } 279 280 // wait for the nub thread 281 if (info->nub_thread >= 0) { 282 if (info->nub_thread != thread_get_current_thread()->id) { 283 int32 result; 284 wait_for_thread(info->nub_thread, &result); 285 } 286 287 info->nub_thread = -1; 288 } 289 290 atomic_set(&info->flags, 0); 291 info->debugger_team = -1; 292 info->debugger_port = -1; 293 info->causing_thread = -1; 294 info->image_event = -1; 295 } 296 } 297 298 299 void 300 init_thread_debug_info(struct thread_debug_info *info) 301 { 302 if (info) { 303 B_INITIALIZE_SPINLOCK(&info->lock); 304 arch_clear_thread_debug_info(&info->arch_info); 305 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 306 info->debug_port = -1; 307 info->ignore_signals = 0; 308 info->ignore_signals_once = 0; 309 info->profile.sample_area = -1; 310 info->profile.samples = NULL; 311 info->profile.buffer_full = false; 312 info->profile.installed_timer = NULL; 313 } 314 } 315 316 317 /*! Clears the debug info for the current thread. 318 Invoked with thread debug info lock being held. 319 */ 320 void 321 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 322 { 323 if (info) { 324 // cancel profiling timer 325 if (info->profile.installed_timer != NULL) { 326 cancel_timer(info->profile.installed_timer); 327 info->profile.installed_timer = NULL; 328 } 329 330 arch_clear_thread_debug_info(&info->arch_info); 331 atomic_set(&info->flags, 332 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 333 info->debug_port = -1; 334 info->ignore_signals = 0; 335 info->ignore_signals_once = 0; 336 info->profile.sample_area = -1; 337 info->profile.samples = NULL; 338 info->profile.buffer_full = false; 339 } 340 } 341 342 343 void 344 destroy_thread_debug_info(struct thread_debug_info *info) 345 { 346 if (info) { 347 area_id sampleArea = info->profile.sample_area; 348 if (sampleArea >= 0) { 349 area_info areaInfo; 350 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 351 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 352 delete_area(sampleArea); 353 } 354 } 355 356 arch_destroy_thread_debug_info(&info->arch_info); 357 358 if (info->debug_port >= 0) { 359 delete_port(info->debug_port); 360 info->debug_port = -1; 361 } 362 363 info->ignore_signals = 0; 364 info->ignore_signals_once = 0; 365 366 atomic_set(&info->flags, 0); 367 } 368 } 369 370 371 static status_t 372 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 373 Team*& team) 374 { 375 // We look up the team by ID, even in case of the current team, so we can be 376 // sure, that the team is not already dying. 377 if (teamID == B_CURRENT_TEAM) 378 teamID = thread_get_current_thread()->team->id; 379 380 while (true) { 381 // get the team 382 team = Team::GetAndLock(teamID); 383 if (team == NULL) 384 return B_BAD_TEAM_ID; 385 BReference<Team> teamReference(team, true); 386 TeamLocker teamLocker(team, true); 387 388 // don't allow messing with the kernel team 389 if (team == team_get_kernel_team()) 390 return B_NOT_ALLOWED; 391 392 // check whether the condition is already set 393 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 394 395 if (team->debug_info.debugger_changed_condition == NULL) { 396 // nobody there yet -- set our condition variable and be done 397 team->debug_info.debugger_changed_condition = &condition; 398 return B_OK; 399 } 400 401 // we'll have to wait 402 ConditionVariableEntry entry; 403 team->debug_info.debugger_changed_condition->Add(&entry); 404 405 debugInfoLocker.Unlock(); 406 teamLocker.Unlock(); 407 408 entry.Wait(); 409 } 410 } 411 412 413 static void 414 prepare_debugger_change(Team* team, ConditionVariable& condition) 415 { 416 while (true) { 417 // check whether the condition is already set 418 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 419 420 if (team->debug_info.debugger_changed_condition == NULL) { 421 // nobody there yet -- set our condition variable and be done 422 team->debug_info.debugger_changed_condition = &condition; 423 return; 424 } 425 426 // we'll have to wait 427 ConditionVariableEntry entry; 428 team->debug_info.debugger_changed_condition->Add(&entry); 429 430 debugInfoLocker.Unlock(); 431 432 entry.Wait(); 433 } 434 } 435 436 437 static void 438 finish_debugger_change(Team* team) 439 { 440 // unset our condition variable and notify all threads waiting on it 441 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 442 443 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 444 team->debug_info.debugger_changed_condition = NULL; 445 446 condition->NotifyAll(); 447 } 448 449 450 void 451 user_debug_prepare_for_exec() 452 { 453 Thread *thread = thread_get_current_thread(); 454 Team *team = thread->team; 455 456 // If a debugger is installed for the team and the thread debug stuff 457 // initialized, change the ownership of the debug port for the thread 458 // to the kernel team, since exec_team() deletes all ports owned by this 459 // team. We change the ownership back later. 460 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 461 // get the port 462 port_id debugPort = -1; 463 464 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 465 466 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 467 debugPort = thread->debug_info.debug_port; 468 469 threadDebugInfoLocker.Unlock(); 470 471 // set the new port ownership 472 if (debugPort >= 0) 473 set_port_owner(debugPort, team_get_kernel_team_id()); 474 } 475 } 476 477 478 void 479 user_debug_finish_after_exec() 480 { 481 Thread *thread = thread_get_current_thread(); 482 Team *team = thread->team; 483 484 // If a debugger is installed for the team and the thread debug stuff 485 // initialized for this thread, change the ownership of its debug port 486 // back to this team. 487 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 488 // get the port 489 port_id debugPort = -1; 490 491 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 492 493 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 494 debugPort = thread->debug_info.debug_port; 495 496 threadDebugInfoLocker.Unlock(); 497 498 // set the new port ownership 499 if (debugPort >= 0) 500 set_port_owner(debugPort, team->id); 501 } 502 } 503 504 505 void 506 init_user_debug() 507 { 508 #ifdef ARCH_INIT_USER_DEBUG 509 ARCH_INIT_USER_DEBUG(); 510 #endif 511 } 512 513 514 static void 515 get_team_debug_info(team_debug_info &teamDebugInfo) 516 { 517 Thread *thread = thread_get_current_thread(); 518 519 cpu_status state = disable_interrupts(); 520 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 521 522 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 523 524 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 525 restore_interrupts(state); 526 } 527 528 529 static status_t 530 thread_hit_debug_event_internal(debug_debugger_message event, 531 const void *message, int32 size, bool requireDebugger, bool &restart) 532 { 533 restart = false; 534 Thread *thread = thread_get_current_thread(); 535 536 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32 537 ", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event, 538 message, size)); 539 540 // check, if there's a debug port already 541 bool setPort = !(atomic_get(&thread->debug_info.flags) 542 & B_THREAD_DEBUG_INITIALIZED); 543 544 // create a port, if there is none yet 545 port_id port = -1; 546 if (setPort) { 547 char nameBuffer[128]; 548 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32, 549 thread->id); 550 551 port = create_port(1, nameBuffer); 552 if (port < 0) { 553 dprintf("thread_hit_debug_event(): Failed to create debug port: " 554 "%s\n", strerror(port)); 555 return port; 556 } 557 } 558 559 // check the debug info structures once more: get the debugger port, set 560 // the thread's debug port, and update the thread's debug flags 561 port_id deletePort = port; 562 port_id debuggerPort = -1; 563 port_id nubPort = -1; 564 status_t error = B_OK; 565 cpu_status state = disable_interrupts(); 566 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 567 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 568 569 uint32 threadFlags = thread->debug_info.flags; 570 threadFlags &= ~B_THREAD_DEBUG_STOP; 571 bool debuggerInstalled 572 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 573 if (thread->id == thread->team->debug_info.nub_thread) { 574 // Ugh, we're the nub thread. We shouldn't be here. 575 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32 576 "\n", thread->id)); 577 578 error = B_ERROR; 579 } else if (debuggerInstalled || !requireDebugger) { 580 if (debuggerInstalled) { 581 debuggerPort = thread->team->debug_info.debugger_port; 582 nubPort = thread->team->debug_info.nub_port; 583 } 584 585 if (setPort) { 586 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 587 // someone created a port for us (the port we've created will 588 // be deleted below) 589 port = thread->debug_info.debug_port; 590 } else { 591 thread->debug_info.debug_port = port; 592 deletePort = -1; // keep the port 593 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 594 } 595 } else { 596 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 597 port = thread->debug_info.debug_port; 598 } else { 599 // someone deleted our port 600 error = B_ERROR; 601 } 602 } 603 } else 604 error = B_ERROR; 605 606 // update the flags 607 if (error == B_OK) 608 threadFlags |= B_THREAD_DEBUG_STOPPED; 609 atomic_set(&thread->debug_info.flags, threadFlags); 610 611 update_thread_user_debug_flag(thread); 612 613 threadDebugInfoLocker.Unlock(); 614 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 615 restore_interrupts(state); 616 617 // delete the superfluous port 618 if (deletePort >= 0) 619 delete_port(deletePort); 620 621 if (error != B_OK) { 622 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: " 623 "%" B_PRIx32 "\n", thread->id, error)); 624 return error; 625 } 626 627 // send a message to the debugger port 628 if (debuggerInstalled) { 629 // update the message's origin info first 630 debug_origin *origin = (debug_origin *)message; 631 origin->thread = thread->id; 632 origin->team = thread->team->id; 633 origin->nub_port = nubPort; 634 635 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending " 636 "message to debugger port %" B_PRId32 "\n", thread->id, 637 debuggerPort)); 638 639 error = debugger_write(debuggerPort, event, message, size, false); 640 } 641 642 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 643 bool singleStep = false; 644 645 if (error == B_OK) { 646 bool done = false; 647 while (!done) { 648 // read a command from the debug port 649 int32 command; 650 debugged_thread_message_data commandMessage; 651 ssize_t commandMessageSize = read_port_etc(port, &command, 652 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 653 0); 654 655 if (commandMessageSize < 0) { 656 error = commandMessageSize; 657 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed " 658 "to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n", 659 thread->id, port, error)); 660 break; 661 } 662 663 switch (command) { 664 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 665 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 666 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 667 thread->id)); 668 result = commandMessage.continue_thread.handle_event; 669 670 singleStep = commandMessage.continue_thread.single_step; 671 done = true; 672 break; 673 674 case B_DEBUGGED_THREAD_SET_CPU_STATE: 675 { 676 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 677 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 678 thread->id)); 679 arch_set_debug_cpu_state( 680 &commandMessage.set_cpu_state.cpu_state); 681 682 break; 683 } 684 685 case B_DEBUGGED_THREAD_GET_CPU_STATE: 686 { 687 port_id replyPort = commandMessage.get_cpu_state.reply_port; 688 689 // prepare the message 690 debug_nub_get_cpu_state_reply replyMessage; 691 replyMessage.error = B_OK; 692 replyMessage.message = event; 693 arch_get_debug_cpu_state(&replyMessage.cpu_state); 694 695 // send it 696 error = kill_interruptable_write_port(replyPort, event, 697 &replyMessage, sizeof(replyMessage)); 698 699 break; 700 } 701 702 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 703 { 704 // Check, if the debugger really changed, i.e. is different 705 // than the one we know. 706 team_debug_info teamDebugInfo; 707 get_team_debug_info(teamDebugInfo); 708 709 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 710 if (!debuggerInstalled 711 || teamDebugInfo.debugger_port != debuggerPort) { 712 // debugger was installed or has changed: restart 713 // this function 714 restart = true; 715 done = true; 716 } 717 } else { 718 if (debuggerInstalled) { 719 // debugger is gone: continue the thread normally 720 done = true; 721 } 722 } 723 724 break; 725 } 726 } 727 } 728 } else { 729 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send " 730 "message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n", 731 thread->id, debuggerPort, error)); 732 } 733 734 // update the thread debug info 735 bool destroyThreadInfo = false; 736 thread_debug_info threadDebugInfo; 737 738 state = disable_interrupts(); 739 threadDebugInfoLocker.Lock(); 740 741 // check, if the team is still being debugged 742 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 743 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 744 // update the single-step flag 745 if (singleStep) { 746 atomic_or(&thread->debug_info.flags, 747 B_THREAD_DEBUG_SINGLE_STEP); 748 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 749 } else { 750 atomic_and(&thread->debug_info.flags, 751 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 752 } 753 754 // unset the "stopped" state 755 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 756 757 update_thread_user_debug_flag(thread); 758 759 } else { 760 // the debugger is gone: cleanup our info completely 761 threadDebugInfo = thread->debug_info; 762 clear_thread_debug_info(&thread->debug_info, false); 763 destroyThreadInfo = true; 764 } 765 766 threadDebugInfoLocker.Unlock(); 767 restore_interrupts(state); 768 769 // enable/disable single stepping 770 arch_update_thread_single_step(); 771 772 if (destroyThreadInfo) 773 destroy_thread_debug_info(&threadDebugInfo); 774 775 return (error == B_OK ? result : error); 776 } 777 778 779 static status_t 780 thread_hit_debug_event(debug_debugger_message event, const void *message, 781 int32 size, bool requireDebugger) 782 { 783 status_t result; 784 bool restart; 785 do { 786 restart = false; 787 result = thread_hit_debug_event_internal(event, message, size, 788 requireDebugger, restart); 789 } while (result >= 0 && restart); 790 791 // Prepare to continue -- we install a debugger change condition, so no one 792 // will change the debugger while we're playing with the breakpoint manager. 793 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 794 Team* team = thread_get_current_thread()->team; 795 ConditionVariable debugChangeCondition; 796 debugChangeCondition.Init(team, "debug change condition"); 797 prepare_debugger_change(team, debugChangeCondition); 798 799 if (team->debug_info.breakpoint_manager != NULL) { 800 bool isSyscall; 801 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 802 if (pc != NULL && !isSyscall) 803 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 804 } 805 806 finish_debugger_change(team); 807 808 return result; 809 } 810 811 812 static status_t 813 thread_hit_serious_debug_event(debug_debugger_message event, 814 const void *message, int32 messageSize) 815 { 816 // ensure that a debugger is installed for this team 817 status_t error = ensure_debugger_installed(); 818 if (error != B_OK) { 819 Thread *thread = thread_get_current_thread(); 820 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 821 "thread: %" B_PRId32 " (%s): %s\n", thread->id, thread->name, 822 strerror(error)); 823 return error; 824 } 825 826 // enter the debug loop 827 return thread_hit_debug_event(event, message, messageSize, true); 828 } 829 830 831 void 832 user_debug_pre_syscall(uint32 syscall, void *args) 833 { 834 // check whether a debugger is installed 835 Thread *thread = thread_get_current_thread(); 836 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 837 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 838 return; 839 840 // check whether pre-syscall tracing is enabled for team or thread 841 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 842 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 843 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 844 return; 845 } 846 847 // prepare the message 848 debug_pre_syscall message; 849 message.syscall = syscall; 850 851 // copy the syscall args 852 if (syscall < (uint32)kSyscallCount) { 853 if (kSyscallInfos[syscall].parameter_size > 0) 854 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 855 } 856 857 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 858 sizeof(message), true); 859 } 860 861 862 void 863 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 864 bigtime_t startTime) 865 { 866 // check whether a debugger is installed 867 Thread *thread = thread_get_current_thread(); 868 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 869 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 870 return; 871 872 // check whether post-syscall tracing is enabled for team or thread 873 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 874 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 875 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 876 return; 877 } 878 879 // prepare the message 880 debug_post_syscall message; 881 message.start_time = startTime; 882 message.end_time = system_time(); 883 message.return_value = returnValue; 884 message.syscall = syscall; 885 886 // copy the syscall args 887 if (syscall < (uint32)kSyscallCount) { 888 if (kSyscallInfos[syscall].parameter_size > 0) 889 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 890 } 891 892 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 893 sizeof(message), true); 894 } 895 896 897 /** \brief To be called when an unhandled processor exception (error/fault) 898 * occurred. 899 * \param exception The debug_why_stopped value identifying the kind of fault. 900 * \param signal The signal corresponding to the exception. 901 * \return \c true, if the caller shall continue normally, i.e. usually send 902 * a deadly signal. \c false, if the debugger insists to continue the 903 * program (e.g. because it has solved the removed the cause of the 904 * problem). 905 */ 906 bool 907 user_debug_exception_occurred(debug_exception_type exception, int signal) 908 { 909 // First check whether there's a signal handler installed for the signal. 910 // If so, we don't want to install a debugger for the team. We always send 911 // the signal instead. An already installed debugger will be notified, if 912 // it has requested notifications of signal. 913 struct sigaction signalAction; 914 if (sigaction(signal, NULL, &signalAction) == 0 915 && signalAction.sa_handler != SIG_DFL) { 916 return true; 917 } 918 919 // prepare the message 920 debug_exception_occurred message; 921 message.exception = exception; 922 message.signal = signal; 923 924 status_t result = thread_hit_serious_debug_event( 925 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 926 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 927 } 928 929 930 bool 931 user_debug_handle_signal(int signal, struct sigaction *handler, siginfo_t *info, 932 bool deadly) 933 { 934 // check, if a debugger is installed and is interested in signals 935 Thread *thread = thread_get_current_thread(); 936 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 937 if (~teamDebugFlags 938 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 939 return true; 940 } 941 942 // prepare the message 943 debug_signal_received message; 944 message.signal = signal; 945 message.handler = *handler; 946 message.info = *info; 947 message.deadly = deadly; 948 949 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 950 &message, sizeof(message), true); 951 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 952 } 953 954 955 void 956 user_debug_stop_thread() 957 { 958 // check whether this is actually an emulated single-step notification 959 Thread* thread = thread_get_current_thread(); 960 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 961 962 bool singleStepped = false; 963 if ((atomic_and(&thread->debug_info.flags, 964 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 965 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 966 singleStepped = true; 967 } 968 969 threadDebugInfoLocker.Unlock(); 970 971 if (singleStepped) { 972 user_debug_single_stepped(); 973 } else { 974 debug_thread_debugged message; 975 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 976 &message, sizeof(message)); 977 } 978 } 979 980 981 void 982 user_debug_team_created(team_id teamID) 983 { 984 // check, if a debugger is installed and is interested in team creation 985 // events 986 Thread *thread = thread_get_current_thread(); 987 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 988 if (~teamDebugFlags 989 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 990 return; 991 } 992 993 // prepare the message 994 debug_team_created message; 995 message.new_team = teamID; 996 997 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 998 sizeof(message), true); 999 } 1000 1001 1002 void 1003 user_debug_team_deleted(team_id teamID, port_id debuggerPort, status_t status, 1004 team_usage_info* usageInfo) 1005 { 1006 if (debuggerPort >= 0) { 1007 TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: " 1008 "%" B_PRId32 ")\n", teamID, debuggerPort)); 1009 1010 debug_team_deleted message; 1011 message.origin.thread = -1; 1012 message.origin.team = teamID; 1013 message.origin.nub_port = -1; 1014 message.status = status; 1015 message.usage = *usageInfo; 1016 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1017 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1018 } 1019 } 1020 1021 1022 void 1023 user_debug_team_exec() 1024 { 1025 // check, if a debugger is installed and is interested in team creation 1026 // events 1027 Thread *thread = thread_get_current_thread(); 1028 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1029 if (~teamDebugFlags 1030 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1031 return; 1032 } 1033 1034 // prepare the message 1035 debug_team_exec message; 1036 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1037 + 1; 1038 1039 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1040 sizeof(message), true); 1041 } 1042 1043 1044 /*! Called by a new userland thread to update the debugging related flags of 1045 \c Thread::flags before the thread first enters userland. 1046 \param thread The calling thread. 1047 */ 1048 void 1049 user_debug_update_new_thread_flags(Thread* thread) 1050 { 1051 // lock it and update it's flags 1052 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1053 1054 update_thread_user_debug_flag(thread); 1055 update_thread_breakpoints_flag(thread); 1056 update_thread_debugger_installed_flag(thread); 1057 } 1058 1059 1060 void 1061 user_debug_thread_created(thread_id threadID) 1062 { 1063 // check, if a debugger is installed and is interested in thread events 1064 Thread *thread = thread_get_current_thread(); 1065 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1066 if (~teamDebugFlags 1067 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1068 return; 1069 } 1070 1071 // prepare the message 1072 debug_thread_created message; 1073 message.new_thread = threadID; 1074 1075 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1076 sizeof(message), true); 1077 } 1078 1079 1080 void 1081 user_debug_thread_deleted(team_id teamID, thread_id threadID, status_t status) 1082 { 1083 // Things are a bit complicated here, since this thread no longer belongs to 1084 // the debugged team (but to the kernel). So we can't use debugger_write(). 1085 1086 // get the team debug flags and debugger port 1087 Team* team = Team::Get(teamID); 1088 if (team == NULL) 1089 return; 1090 BReference<Team> teamReference(team, true); 1091 1092 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1093 1094 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1095 port_id debuggerPort = team->debug_info.debugger_port; 1096 sem_id writeLock = team->debug_info.debugger_write_lock; 1097 1098 debugInfoLocker.Unlock(); 1099 1100 // check, if a debugger is installed and is interested in thread events 1101 if (~teamDebugFlags 1102 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1103 return; 1104 } 1105 1106 // acquire the debugger write lock 1107 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1108 if (error != B_OK) 1109 return; 1110 1111 // re-get the team debug info -- we need to check whether anything changed 1112 debugInfoLocker.Lock(); 1113 1114 teamDebugFlags = atomic_get(&team->debug_info.flags); 1115 port_id newDebuggerPort = team->debug_info.debugger_port; 1116 1117 debugInfoLocker.Unlock(); 1118 1119 // Send the message only if the debugger hasn't changed in the meantime or 1120 // the team is about to be handed over. 1121 if (newDebuggerPort == debuggerPort 1122 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1123 debug_thread_deleted message; 1124 message.origin.thread = threadID; 1125 message.origin.team = teamID; 1126 message.origin.nub_port = -1; 1127 message.status = status; 1128 1129 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1130 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1131 } 1132 1133 // release the debugger write lock 1134 release_sem(writeLock); 1135 } 1136 1137 1138 /*! Called for a thread that is about to die, cleaning up all user debug 1139 facilities installed for the thread. 1140 \param thread The current thread, the one that is going to die. 1141 */ 1142 void 1143 user_debug_thread_exiting(Thread* thread) 1144 { 1145 // thread is the current thread, so using team is safe 1146 Team* team = thread->team; 1147 1148 InterruptsLocker interruptsLocker; 1149 1150 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1151 1152 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1153 port_id debuggerPort = team->debug_info.debugger_port; 1154 1155 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1156 1157 // check, if a debugger is installed 1158 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1159 || debuggerPort < 0) { 1160 return; 1161 } 1162 1163 // detach the profile info and mark the thread dying 1164 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1165 1166 thread_debug_info& threadDebugInfo = thread->debug_info; 1167 if (threadDebugInfo.profile.samples == NULL) 1168 return; 1169 1170 area_id sampleArea = threadDebugInfo.profile.sample_area; 1171 int32 sampleCount = threadDebugInfo.profile.sample_count; 1172 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1173 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1174 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1175 int32 imageEvent = threadDebugInfo.profile.image_event; 1176 threadDebugInfo.profile.sample_area = -1; 1177 threadDebugInfo.profile.samples = NULL; 1178 threadDebugInfo.profile.buffer_full = false; 1179 1180 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1181 1182 threadDebugInfoLocker.Unlock(); 1183 interruptsLocker.Unlock(); 1184 1185 // notify the debugger 1186 debug_profiler_update message; 1187 message.origin.thread = thread->id; 1188 message.origin.team = thread->team->id; 1189 message.origin.nub_port = -1; // asynchronous message 1190 message.sample_count = sampleCount; 1191 message.dropped_ticks = droppedTicks; 1192 message.stack_depth = stackDepth; 1193 message.variable_stack_depth = variableStackDepth; 1194 message.image_event = imageEvent; 1195 message.stopped = true; 1196 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1197 &message, sizeof(message), false); 1198 1199 if (sampleArea >= 0) { 1200 area_info areaInfo; 1201 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1202 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1203 delete_area(sampleArea); 1204 } 1205 } 1206 } 1207 1208 1209 void 1210 user_debug_image_created(const image_info *imageInfo) 1211 { 1212 // check, if a debugger is installed and is interested in image events 1213 Thread *thread = thread_get_current_thread(); 1214 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1215 if (~teamDebugFlags 1216 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1217 return; 1218 } 1219 1220 // prepare the message 1221 debug_image_created message; 1222 memcpy(&message.info, imageInfo, sizeof(image_info)); 1223 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1224 + 1; 1225 1226 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1227 sizeof(message), true); 1228 } 1229 1230 1231 void 1232 user_debug_image_deleted(const image_info *imageInfo) 1233 { 1234 // check, if a debugger is installed and is interested in image events 1235 Thread *thread = thread_get_current_thread(); 1236 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1237 if (~teamDebugFlags 1238 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1239 return; 1240 } 1241 1242 // prepare the message 1243 debug_image_deleted message; 1244 memcpy(&message.info, imageInfo, sizeof(image_info)); 1245 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1246 + 1; 1247 1248 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message, 1249 sizeof(message), true); 1250 } 1251 1252 1253 void 1254 user_debug_breakpoint_hit(bool software) 1255 { 1256 // prepare the message 1257 debug_breakpoint_hit message; 1258 arch_get_debug_cpu_state(&message.cpu_state); 1259 1260 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1261 sizeof(message)); 1262 } 1263 1264 1265 void 1266 user_debug_watchpoint_hit() 1267 { 1268 // prepare the message 1269 debug_watchpoint_hit message; 1270 arch_get_debug_cpu_state(&message.cpu_state); 1271 1272 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1273 sizeof(message)); 1274 } 1275 1276 1277 void 1278 user_debug_single_stepped() 1279 { 1280 // clear the single-step thread flag 1281 Thread* thread = thread_get_current_thread(); 1282 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1283 1284 // prepare the message 1285 debug_single_step message; 1286 arch_get_debug_cpu_state(&message.cpu_state); 1287 1288 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1289 sizeof(message)); 1290 } 1291 1292 1293 /*! Schedules the profiling timer for the current thread. 1294 The caller must hold the thread's debug info lock. 1295 \param thread The current thread. 1296 \param interval The time after which the timer should fire. 1297 */ 1298 static void 1299 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1300 { 1301 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1302 thread->debug_info.profile.installed_timer = timer; 1303 thread->debug_info.profile.timer_end = system_time() + interval; 1304 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1305 } 1306 1307 1308 /*! Samples the current thread's instruction pointer/stack trace. 1309 The caller must hold the current thread's debug info lock. 1310 \param flushBuffer Return parameter: Set to \c true when the sampling 1311 buffer must be flushed. 1312 */ 1313 static bool 1314 profiling_do_sample(bool& flushBuffer) 1315 { 1316 Thread* thread = thread_get_current_thread(); 1317 thread_debug_info& debugInfo = thread->debug_info; 1318 1319 if (debugInfo.profile.samples == NULL) 1320 return false; 1321 1322 // Check, whether the buffer is full or an image event occurred since the 1323 // last sample was taken. 1324 int32 maxSamples = debugInfo.profile.max_samples; 1325 int32 sampleCount = debugInfo.profile.sample_count; 1326 int32 stackDepth = debugInfo.profile.stack_depth; 1327 int32 imageEvent = thread->team->debug_info.image_event; 1328 if (debugInfo.profile.sample_count > 0) { 1329 if (debugInfo.profile.last_image_event < imageEvent 1330 && debugInfo.profile.variable_stack_depth 1331 && sampleCount + 2 <= maxSamples) { 1332 // an image event occurred, but we use variable stack depth and 1333 // have enough room in the buffer to indicate an image event 1334 addr_t* event = debugInfo.profile.samples + sampleCount; 1335 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1336 event[1] = imageEvent; 1337 sampleCount += 2; 1338 debugInfo.profile.sample_count = sampleCount; 1339 debugInfo.profile.last_image_event = imageEvent; 1340 } 1341 1342 if (debugInfo.profile.last_image_event < imageEvent 1343 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1344 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1345 flushBuffer = true; 1346 return true; 1347 } 1348 1349 // We can't flush the buffer now, since we interrupted a kernel 1350 // function. If the buffer is not full yet, we add the samples, 1351 // otherwise we have to drop them. 1352 if (maxSamples - sampleCount < stackDepth) { 1353 debugInfo.profile.dropped_ticks++; 1354 return true; 1355 } 1356 } 1357 } else { 1358 // first sample -- set the image event 1359 debugInfo.profile.image_event = imageEvent; 1360 debugInfo.profile.last_image_event = imageEvent; 1361 } 1362 1363 // get the samples 1364 addr_t* returnAddresses = debugInfo.profile.samples 1365 + debugInfo.profile.sample_count; 1366 if (debugInfo.profile.variable_stack_depth) { 1367 // variable sample count per hit 1368 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1369 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1370 1371 debugInfo.profile.sample_count += *returnAddresses + 1; 1372 } else { 1373 // fixed sample count per hit 1374 if (stackDepth > 1) { 1375 int32 count = arch_debug_get_stack_trace(returnAddresses, 1376 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1377 1378 for (int32 i = count; i < stackDepth; i++) 1379 returnAddresses[i] = 0; 1380 } else 1381 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1382 1383 debugInfo.profile.sample_count += stackDepth; 1384 } 1385 1386 return true; 1387 } 1388 1389 1390 static void 1391 profiling_buffer_full(void*) 1392 { 1393 // It is undefined whether the function is called with interrupts enabled 1394 // or disabled. We are allowed to enable interrupts, though. First make 1395 // sure interrupts are disabled. 1396 disable_interrupts(); 1397 1398 Thread* thread = thread_get_current_thread(); 1399 thread_debug_info& debugInfo = thread->debug_info; 1400 1401 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1402 1403 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1404 int32 sampleCount = debugInfo.profile.sample_count; 1405 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1406 int32 stackDepth = debugInfo.profile.stack_depth; 1407 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1408 int32 imageEvent = debugInfo.profile.image_event; 1409 1410 // notify the debugger 1411 debugInfo.profile.sample_count = 0; 1412 debugInfo.profile.dropped_ticks = 0; 1413 1414 threadDebugInfoLocker.Unlock(); 1415 enable_interrupts(); 1416 1417 // prepare the message 1418 debug_profiler_update message; 1419 message.sample_count = sampleCount; 1420 message.dropped_ticks = droppedTicks; 1421 message.stack_depth = stackDepth; 1422 message.variable_stack_depth = variableStackDepth; 1423 message.image_event = imageEvent; 1424 message.stopped = false; 1425 1426 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1427 sizeof(message), false); 1428 1429 disable_interrupts(); 1430 threadDebugInfoLocker.Lock(); 1431 1432 // do the sampling and reschedule timer, if still profiling this thread 1433 bool flushBuffer; 1434 if (profiling_do_sample(flushBuffer)) { 1435 debugInfo.profile.buffer_full = false; 1436 schedule_profiling_timer(thread, debugInfo.profile.interval); 1437 } 1438 } 1439 1440 threadDebugInfoLocker.Unlock(); 1441 enable_interrupts(); 1442 } 1443 1444 1445 /*! Profiling timer event callback. 1446 Called with interrupts disabled. 1447 */ 1448 static int32 1449 profiling_event(timer* /*unused*/) 1450 { 1451 Thread* thread = thread_get_current_thread(); 1452 thread_debug_info& debugInfo = thread->debug_info; 1453 1454 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1455 1456 bool flushBuffer = false; 1457 if (profiling_do_sample(flushBuffer)) { 1458 if (flushBuffer) { 1459 // The sample buffer needs to be flushed; we'll have to notify the 1460 // debugger. We can't do that right here. Instead we set a post 1461 // interrupt callback doing that for us, and don't reschedule the 1462 // timer yet. 1463 thread->post_interrupt_callback = profiling_buffer_full; 1464 debugInfo.profile.installed_timer = NULL; 1465 debugInfo.profile.buffer_full = true; 1466 } else 1467 schedule_profiling_timer(thread, debugInfo.profile.interval); 1468 } else 1469 debugInfo.profile.installed_timer = NULL; 1470 1471 return B_HANDLED_INTERRUPT; 1472 } 1473 1474 1475 /*! Called by the scheduler when a debugged thread has been unscheduled. 1476 The scheduler lock is being held. 1477 */ 1478 void 1479 user_debug_thread_unscheduled(Thread* thread) 1480 { 1481 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1482 1483 // if running, cancel the profiling timer 1484 struct timer* timer = thread->debug_info.profile.installed_timer; 1485 if (timer != NULL) { 1486 // track remaining time 1487 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1488 thread->debug_info.profile.interval_left = max_c(left, 0); 1489 thread->debug_info.profile.installed_timer = NULL; 1490 1491 // cancel timer 1492 threadDebugInfoLocker.Unlock(); 1493 // not necessary, but doesn't harm and reduces contention 1494 cancel_timer(timer); 1495 // since invoked on the same CPU, this will not possibly wait for 1496 // an already called timer hook 1497 } 1498 } 1499 1500 1501 /*! Called by the scheduler when a debugged thread has been scheduled. 1502 The scheduler lock is being held. 1503 */ 1504 void 1505 user_debug_thread_scheduled(Thread* thread) 1506 { 1507 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1508 1509 if (thread->debug_info.profile.samples != NULL 1510 && !thread->debug_info.profile.buffer_full) { 1511 // install profiling timer 1512 schedule_profiling_timer(thread, 1513 thread->debug_info.profile.interval_left); 1514 } 1515 } 1516 1517 1518 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1519 all threads of the team that are initialized for debugging (and 1520 thus have a debug port). 1521 */ 1522 static void 1523 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1524 const void *message, int32 size) 1525 { 1526 // iterate through the threads 1527 thread_info threadInfo; 1528 int32 cookie = 0; 1529 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1530 == B_OK) { 1531 // get the thread and lock it 1532 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1533 if (thread == NULL) 1534 continue; 1535 1536 BReference<Thread> threadReference(thread, true); 1537 ThreadLocker threadLocker(thread, true); 1538 1539 // get the thread's debug port 1540 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1541 1542 port_id threadDebugPort = -1; 1543 if (thread && thread != nubThread && thread->team == nubThread->team 1544 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1545 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1546 threadDebugPort = thread->debug_info.debug_port; 1547 } 1548 1549 threadDebugInfoLocker.Unlock(); 1550 threadLocker.Unlock(); 1551 1552 // send the message to the thread 1553 if (threadDebugPort >= 0) { 1554 status_t error = kill_interruptable_write_port(threadDebugPort, 1555 code, message, size); 1556 if (error != B_OK) { 1557 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1558 "message to thread %" B_PRId32 ": %" B_PRIx32 "\n", 1559 thread->id, error)); 1560 } 1561 } 1562 } 1563 } 1564 1565 1566 static void 1567 nub_thread_cleanup(Thread *nubThread) 1568 { 1569 TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n", 1570 nubThread->id, nubThread->team->debug_info.debugger_port)); 1571 1572 ConditionVariable debugChangeCondition; 1573 debugChangeCondition.Init(nubThread->team, "debug change condition"); 1574 prepare_debugger_change(nubThread->team, debugChangeCondition); 1575 1576 team_debug_info teamDebugInfo; 1577 bool destroyDebugInfo = false; 1578 1579 TeamLocker teamLocker(nubThread->team); 1580 // required by update_threads_debugger_installed_flag() 1581 1582 cpu_status state = disable_interrupts(); 1583 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1584 1585 team_debug_info &info = nubThread->team->debug_info; 1586 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1587 && info.nub_thread == nubThread->id) { 1588 teamDebugInfo = info; 1589 clear_team_debug_info(&info, false); 1590 destroyDebugInfo = true; 1591 } 1592 1593 // update the thread::flags fields 1594 update_threads_debugger_installed_flag(nubThread->team); 1595 1596 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1597 restore_interrupts(state); 1598 1599 teamLocker.Unlock(); 1600 1601 if (destroyDebugInfo) 1602 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1603 1604 finish_debugger_change(nubThread->team); 1605 1606 if (destroyDebugInfo) 1607 destroy_team_debug_info(&teamDebugInfo); 1608 1609 // notify all threads that the debugger is gone 1610 broadcast_debugged_thread_message(nubThread, 1611 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1612 } 1613 1614 1615 /** \brief Debug nub thread helper function that returns the debug port of 1616 * a thread of the same team. 1617 */ 1618 static status_t 1619 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1620 thread_id threadID, port_id &threadDebugPort) 1621 { 1622 threadDebugPort = -1; 1623 1624 // get the thread 1625 Thread* thread = Thread::GetAndLock(threadID); 1626 if (thread == NULL) 1627 return B_BAD_THREAD_ID; 1628 BReference<Thread> threadReference(thread, true); 1629 ThreadLocker threadLocker(thread, true); 1630 1631 // get the debug port 1632 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1633 1634 if (thread->team != nubThread->team) 1635 return B_BAD_VALUE; 1636 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1637 return B_BAD_THREAD_STATE; 1638 1639 threadDebugPort = thread->debug_info.debug_port; 1640 1641 threadDebugInfoLocker.Unlock(); 1642 1643 if (threadDebugPort < 0) 1644 return B_ERROR; 1645 1646 return B_OK; 1647 } 1648 1649 1650 static status_t 1651 debug_nub_thread(void *) 1652 { 1653 Thread *nubThread = thread_get_current_thread(); 1654 1655 // check, if we're still the current nub thread and get our port 1656 cpu_status state = disable_interrupts(); 1657 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1658 1659 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1660 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1661 restore_interrupts(state); 1662 return 0; 1663 } 1664 1665 port_id port = nubThread->team->debug_info.nub_port; 1666 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1667 BreakpointManager* breakpointManager 1668 = nubThread->team->debug_info.breakpoint_manager; 1669 1670 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1671 restore_interrupts(state); 1672 1673 TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub " 1674 "port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port)); 1675 1676 // notify all threads that a debugger has been installed 1677 broadcast_debugged_thread_message(nubThread, 1678 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1679 1680 // command processing loop 1681 while (true) { 1682 int32 command; 1683 debug_nub_message_data message; 1684 ssize_t messageSize = read_port_etc(port, &command, &message, 1685 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1686 1687 if (messageSize < 0) { 1688 // The port is no longer valid or we were interrupted by a kill 1689 // signal: If we are still listed in the team's debug info as nub 1690 // thread, we need to update that. 1691 nub_thread_cleanup(nubThread); 1692 1693 TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n", 1694 nubThread->id, messageSize)); 1695 1696 return messageSize; 1697 } 1698 1699 bool sendReply = false; 1700 union { 1701 debug_nub_read_memory_reply read_memory; 1702 debug_nub_write_memory_reply write_memory; 1703 debug_nub_get_cpu_state_reply get_cpu_state; 1704 debug_nub_set_breakpoint_reply set_breakpoint; 1705 debug_nub_set_watchpoint_reply set_watchpoint; 1706 debug_nub_get_signal_masks_reply get_signal_masks; 1707 debug_nub_get_signal_handler_reply get_signal_handler; 1708 debug_nub_start_profiler_reply start_profiler; 1709 debug_profiler_update profiler_update; 1710 debug_nub_write_core_file_reply write_core_file; 1711 } reply; 1712 int32 replySize = 0; 1713 port_id replyPort = -1; 1714 1715 // process the command 1716 switch (command) { 1717 case B_DEBUG_MESSAGE_READ_MEMORY: 1718 { 1719 // get the parameters 1720 replyPort = message.read_memory.reply_port; 1721 void *address = message.read_memory.address; 1722 int32 size = message.read_memory.size; 1723 status_t result = B_OK; 1724 1725 // check the parameters 1726 if (!BreakpointManager::CanAccessAddress(address, false)) 1727 result = B_BAD_ADDRESS; 1728 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1729 result = B_BAD_VALUE; 1730 1731 // read the memory 1732 size_t bytesRead = 0; 1733 if (result == B_OK) { 1734 result = breakpointManager->ReadMemory(address, 1735 reply.read_memory.data, size, bytesRead); 1736 } 1737 reply.read_memory.error = result; 1738 1739 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: " 1740 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1741 ", result: %" B_PRIx32 ", read: %ld\n", nubThread->id, 1742 replyPort, address, size, result, bytesRead)); 1743 1744 // send only as much data as necessary 1745 reply.read_memory.size = bytesRead; 1746 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1747 sendReply = true; 1748 break; 1749 } 1750 1751 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1752 { 1753 // get the parameters 1754 replyPort = message.write_memory.reply_port; 1755 void *address = message.write_memory.address; 1756 int32 size = message.write_memory.size; 1757 const char *data = message.write_memory.data; 1758 int32 realSize = (char*)&message + messageSize - data; 1759 status_t result = B_OK; 1760 1761 // check the parameters 1762 if (!BreakpointManager::CanAccessAddress(address, true)) 1763 result = B_BAD_ADDRESS; 1764 else if (size <= 0 || size > realSize) 1765 result = B_BAD_VALUE; 1766 1767 // write the memory 1768 size_t bytesWritten = 0; 1769 if (result == B_OK) { 1770 result = breakpointManager->WriteMemory(address, data, size, 1771 bytesWritten); 1772 } 1773 reply.write_memory.error = result; 1774 1775 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: " 1776 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1777 ", result: %" B_PRIx32 ", written: %ld\n", nubThread->id, 1778 replyPort, address, size, result, bytesWritten)); 1779 1780 reply.write_memory.size = bytesWritten; 1781 sendReply = true; 1782 replySize = sizeof(debug_nub_write_memory_reply); 1783 break; 1784 } 1785 1786 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1787 { 1788 // get the parameters 1789 int32 flags = message.set_team_flags.flags 1790 & B_TEAM_DEBUG_USER_FLAG_MASK; 1791 1792 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS" 1793 ": flags: %" B_PRIx32 "\n", nubThread->id, flags)); 1794 1795 Team *team = thread_get_current_thread()->team; 1796 1797 // set the flags 1798 cpu_status state = disable_interrupts(); 1799 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1800 1801 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1802 atomic_set(&team->debug_info.flags, flags); 1803 1804 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1805 restore_interrupts(state); 1806 1807 break; 1808 } 1809 1810 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1811 { 1812 // get the parameters 1813 thread_id threadID = message.set_thread_flags.thread; 1814 int32 flags = message.set_thread_flags.flags 1815 & B_THREAD_DEBUG_USER_FLAG_MASK; 1816 1817 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS" 1818 ": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n", 1819 nubThread->id, threadID, flags)); 1820 1821 // set the flags 1822 Thread* thread = Thread::GetAndLock(threadID); 1823 if (thread == NULL) 1824 break; 1825 BReference<Thread> threadReference(thread, true); 1826 ThreadLocker threadLocker(thread, true); 1827 1828 InterruptsSpinLocker threadDebugInfoLocker( 1829 thread->debug_info.lock); 1830 1831 if (thread->team == thread_get_current_thread()->team) { 1832 flags |= thread->debug_info.flags 1833 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1834 atomic_set(&thread->debug_info.flags, flags); 1835 } 1836 1837 break; 1838 } 1839 1840 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1841 { 1842 // get the parameters 1843 thread_id threadID; 1844 uint32 handleEvent; 1845 bool singleStep; 1846 1847 threadID = message.continue_thread.thread; 1848 handleEvent = message.continue_thread.handle_event; 1849 singleStep = message.continue_thread.single_step; 1850 1851 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD" 1852 ": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", " 1853 "single step: %d\n", nubThread->id, threadID, handleEvent, 1854 singleStep)); 1855 1856 // find the thread and get its debug port 1857 port_id threadDebugPort = -1; 1858 status_t result = debug_nub_thread_get_thread_debug_port( 1859 nubThread, threadID, threadDebugPort); 1860 1861 // send a message to the debugged thread 1862 if (result == B_OK) { 1863 debugged_thread_continue commandMessage; 1864 commandMessage.handle_event = handleEvent; 1865 commandMessage.single_step = singleStep; 1866 1867 result = write_port(threadDebugPort, 1868 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1869 &commandMessage, sizeof(commandMessage)); 1870 } else if (result == B_BAD_THREAD_STATE) { 1871 Thread* thread = Thread::GetAndLock(threadID); 1872 if (thread == NULL) 1873 break; 1874 1875 BReference<Thread> threadReference(thread, true); 1876 ThreadLocker threadLocker(thread, true); 1877 if (thread->state == B_THREAD_SUSPENDED) { 1878 threadLocker.Unlock(); 1879 resume_thread(threadID); 1880 break; 1881 } 1882 } 1883 1884 break; 1885 } 1886 1887 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1888 { 1889 // get the parameters 1890 thread_id threadID = message.set_cpu_state.thread; 1891 const debug_cpu_state &cpuState 1892 = message.set_cpu_state.cpu_state; 1893 1894 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE" 1895 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1896 1897 // find the thread and get its debug port 1898 port_id threadDebugPort = -1; 1899 status_t result = debug_nub_thread_get_thread_debug_port( 1900 nubThread, threadID, threadDebugPort); 1901 1902 // send a message to the debugged thread 1903 if (result == B_OK) { 1904 debugged_thread_set_cpu_state commandMessage; 1905 memcpy(&commandMessage.cpu_state, &cpuState, 1906 sizeof(debug_cpu_state)); 1907 write_port(threadDebugPort, 1908 B_DEBUGGED_THREAD_SET_CPU_STATE, 1909 &commandMessage, sizeof(commandMessage)); 1910 } 1911 1912 break; 1913 } 1914 1915 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1916 { 1917 // get the parameters 1918 thread_id threadID = message.get_cpu_state.thread; 1919 replyPort = message.get_cpu_state.reply_port; 1920 1921 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE" 1922 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1923 1924 // find the thread and get its debug port 1925 port_id threadDebugPort = -1; 1926 status_t result = debug_nub_thread_get_thread_debug_port( 1927 nubThread, threadID, threadDebugPort); 1928 1929 // send a message to the debugged thread 1930 if (threadDebugPort >= 0) { 1931 debugged_thread_get_cpu_state commandMessage; 1932 commandMessage.reply_port = replyPort; 1933 result = write_port(threadDebugPort, 1934 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1935 sizeof(commandMessage)); 1936 } 1937 1938 // send a reply to the debugger in case of error 1939 if (result != B_OK) { 1940 reply.get_cpu_state.error = result; 1941 sendReply = true; 1942 replySize = sizeof(reply.get_cpu_state); 1943 } 1944 1945 break; 1946 } 1947 1948 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1949 { 1950 // get the parameters 1951 replyPort = message.set_breakpoint.reply_port; 1952 void *address = message.set_breakpoint.address; 1953 1954 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT" 1955 ": address: %p\n", nubThread->id, address)); 1956 1957 // check the address 1958 status_t result = B_OK; 1959 if (address == NULL 1960 || !BreakpointManager::CanAccessAddress(address, false)) { 1961 result = B_BAD_ADDRESS; 1962 } 1963 1964 // set the breakpoint 1965 if (result == B_OK) 1966 result = breakpointManager->InstallBreakpoint(address); 1967 1968 if (result == B_OK) 1969 update_threads_breakpoints_flag(); 1970 1971 // prepare the reply 1972 reply.set_breakpoint.error = result; 1973 replySize = sizeof(reply.set_breakpoint); 1974 sendReply = true; 1975 1976 break; 1977 } 1978 1979 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1980 { 1981 // get the parameters 1982 void *address = message.clear_breakpoint.address; 1983 1984 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT" 1985 ": address: %p\n", nubThread->id, address)); 1986 1987 // check the address 1988 status_t result = B_OK; 1989 if (address == NULL 1990 || !BreakpointManager::CanAccessAddress(address, false)) { 1991 result = B_BAD_ADDRESS; 1992 } 1993 1994 // clear the breakpoint 1995 if (result == B_OK) 1996 result = breakpointManager->UninstallBreakpoint(address); 1997 1998 if (result == B_OK) 1999 update_threads_breakpoints_flag(); 2000 2001 break; 2002 } 2003 2004 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 2005 { 2006 // get the parameters 2007 replyPort = message.set_watchpoint.reply_port; 2008 void *address = message.set_watchpoint.address; 2009 uint32 type = message.set_watchpoint.type; 2010 int32 length = message.set_watchpoint.length; 2011 2012 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT" 2013 ": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n", 2014 nubThread->id, address, type, length)); 2015 2016 // check the address and size 2017 status_t result = B_OK; 2018 if (address == NULL 2019 || !BreakpointManager::CanAccessAddress(address, false)) { 2020 result = B_BAD_ADDRESS; 2021 } 2022 if (length < 0) 2023 result = B_BAD_VALUE; 2024 2025 // set the watchpoint 2026 if (result == B_OK) { 2027 result = breakpointManager->InstallWatchpoint(address, type, 2028 length); 2029 } 2030 2031 if (result == B_OK) 2032 update_threads_breakpoints_flag(); 2033 2034 // prepare the reply 2035 reply.set_watchpoint.error = result; 2036 replySize = sizeof(reply.set_watchpoint); 2037 sendReply = true; 2038 2039 break; 2040 } 2041 2042 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2043 { 2044 // get the parameters 2045 void *address = message.clear_watchpoint.address; 2046 2047 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT" 2048 ": address: %p\n", nubThread->id, address)); 2049 2050 // check the address 2051 status_t result = B_OK; 2052 if (address == NULL 2053 || !BreakpointManager::CanAccessAddress(address, false)) { 2054 result = B_BAD_ADDRESS; 2055 } 2056 2057 // clear the watchpoint 2058 if (result == B_OK) 2059 result = breakpointManager->UninstallWatchpoint(address); 2060 2061 if (result == B_OK) 2062 update_threads_breakpoints_flag(); 2063 2064 break; 2065 } 2066 2067 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2068 { 2069 // get the parameters 2070 thread_id threadID = message.set_signal_masks.thread; 2071 uint64 ignore = message.set_signal_masks.ignore_mask; 2072 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2073 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2074 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2075 2076 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS" 2077 ": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %" 2078 B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32 2079 ")\n", nubThread->id, threadID, ignore, ignoreOp, 2080 ignoreOnce, ignoreOnceOp)); 2081 2082 // set the masks 2083 Thread* thread = Thread::GetAndLock(threadID); 2084 if (thread == NULL) 2085 break; 2086 BReference<Thread> threadReference(thread, true); 2087 ThreadLocker threadLocker(thread, true); 2088 2089 InterruptsSpinLocker threadDebugInfoLocker( 2090 thread->debug_info.lock); 2091 2092 if (thread->team == thread_get_current_thread()->team) { 2093 thread_debug_info &threadDebugInfo = thread->debug_info; 2094 // set ignore mask 2095 switch (ignoreOp) { 2096 case B_DEBUG_SIGNAL_MASK_AND: 2097 threadDebugInfo.ignore_signals &= ignore; 2098 break; 2099 case B_DEBUG_SIGNAL_MASK_OR: 2100 threadDebugInfo.ignore_signals |= ignore; 2101 break; 2102 case B_DEBUG_SIGNAL_MASK_SET: 2103 threadDebugInfo.ignore_signals = ignore; 2104 break; 2105 } 2106 2107 // set ignore once mask 2108 switch (ignoreOnceOp) { 2109 case B_DEBUG_SIGNAL_MASK_AND: 2110 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2111 break; 2112 case B_DEBUG_SIGNAL_MASK_OR: 2113 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2114 break; 2115 case B_DEBUG_SIGNAL_MASK_SET: 2116 threadDebugInfo.ignore_signals_once = ignoreOnce; 2117 break; 2118 } 2119 } 2120 2121 break; 2122 } 2123 2124 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2125 { 2126 // get the parameters 2127 replyPort = message.get_signal_masks.reply_port; 2128 thread_id threadID = message.get_signal_masks.thread; 2129 status_t result = B_OK; 2130 2131 // get the masks 2132 uint64 ignore = 0; 2133 uint64 ignoreOnce = 0; 2134 2135 Thread* thread = Thread::GetAndLock(threadID); 2136 if (thread != NULL) { 2137 BReference<Thread> threadReference(thread, true); 2138 ThreadLocker threadLocker(thread, true); 2139 2140 InterruptsSpinLocker threadDebugInfoLocker( 2141 thread->debug_info.lock); 2142 2143 ignore = thread->debug_info.ignore_signals; 2144 ignoreOnce = thread->debug_info.ignore_signals_once; 2145 } else 2146 result = B_BAD_THREAD_ID; 2147 2148 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS" 2149 ": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", " 2150 "ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: " 2151 "%" B_PRIx32 "\n", nubThread->id, replyPort, threadID, 2152 ignore, ignoreOnce, result)); 2153 2154 // prepare the message 2155 reply.get_signal_masks.error = result; 2156 reply.get_signal_masks.ignore_mask = ignore; 2157 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2158 replySize = sizeof(reply.get_signal_masks); 2159 sendReply = true; 2160 break; 2161 } 2162 2163 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2164 { 2165 // get the parameters 2166 int signal = message.set_signal_handler.signal; 2167 struct sigaction &handler = message.set_signal_handler.handler; 2168 2169 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER" 2170 ": signal: %d, handler: %p\n", nubThread->id, signal, 2171 handler.sa_handler)); 2172 2173 // set the handler 2174 sigaction(signal, &handler, NULL); 2175 2176 break; 2177 } 2178 2179 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2180 { 2181 // get the parameters 2182 replyPort = message.get_signal_handler.reply_port; 2183 int signal = message.get_signal_handler.signal; 2184 status_t result = B_OK; 2185 2186 // get the handler 2187 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2188 != 0) { 2189 result = errno; 2190 } 2191 2192 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER" 2193 ": reply port: %" B_PRId32 ", signal: %d, handler: %p\n", 2194 nubThread->id, replyPort, signal, 2195 reply.get_signal_handler.handler.sa_handler)); 2196 2197 // prepare the message 2198 reply.get_signal_handler.error = result; 2199 replySize = sizeof(reply.get_signal_handler); 2200 sendReply = true; 2201 break; 2202 } 2203 2204 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2205 { 2206 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER" 2207 "\n", nubThread->id)); 2208 2209 Team *team = nubThread->team; 2210 2211 // Acquire the debugger write lock. As soon as we have it and 2212 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2213 // will write anything to the debugger port anymore. 2214 status_t result = acquire_sem_etc(writeLock, 1, 2215 B_KILL_CAN_INTERRUPT, 0); 2216 if (result == B_OK) { 2217 // set the respective team debug flag 2218 cpu_status state = disable_interrupts(); 2219 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2220 2221 atomic_or(&team->debug_info.flags, 2222 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2223 BreakpointManager* breakpointManager 2224 = team->debug_info.breakpoint_manager; 2225 2226 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2227 restore_interrupts(state); 2228 2229 // remove all installed breakpoints 2230 breakpointManager->RemoveAllBreakpoints(); 2231 2232 release_sem(writeLock); 2233 } else { 2234 // We probably got a SIGKILL. If so, we will terminate when 2235 // reading the next message fails. 2236 } 2237 2238 break; 2239 } 2240 2241 case B_DEBUG_MESSAGE_HANDED_OVER: 2242 { 2243 // notify all threads that the debugger has changed 2244 broadcast_debugged_thread_message(nubThread, 2245 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2246 2247 break; 2248 } 2249 2250 case B_DEBUG_START_PROFILER: 2251 { 2252 // get the parameters 2253 thread_id threadID = message.start_profiler.thread; 2254 replyPort = message.start_profiler.reply_port; 2255 area_id sampleArea = message.start_profiler.sample_area; 2256 int32 stackDepth = message.start_profiler.stack_depth; 2257 bool variableStackDepth 2258 = message.start_profiler.variable_stack_depth; 2259 bigtime_t interval = max_c(message.start_profiler.interval, 2260 B_DEBUG_MIN_PROFILE_INTERVAL); 2261 status_t result = B_OK; 2262 2263 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: " 2264 "thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n", 2265 nubThread->id, threadID, sampleArea)); 2266 2267 if (stackDepth < 1) 2268 stackDepth = 1; 2269 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2270 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2271 2272 // provision for an extra entry per hit (for the number of 2273 // samples), if variable stack depth 2274 if (variableStackDepth) 2275 stackDepth++; 2276 2277 // clone the sample area 2278 area_info areaInfo; 2279 if (result == B_OK) 2280 result = get_area_info(sampleArea, &areaInfo); 2281 2282 area_id clonedSampleArea = -1; 2283 void* samples = NULL; 2284 if (result == B_OK) { 2285 clonedSampleArea = clone_area("profiling samples", &samples, 2286 B_ANY_KERNEL_ADDRESS, 2287 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 2288 sampleArea); 2289 if (clonedSampleArea >= 0) { 2290 // we need the memory locked 2291 result = lock_memory(samples, areaInfo.size, 2292 B_READ_DEVICE); 2293 if (result != B_OK) { 2294 delete_area(clonedSampleArea); 2295 clonedSampleArea = -1; 2296 } 2297 } else 2298 result = clonedSampleArea; 2299 } 2300 2301 // get the thread and set the profile info 2302 int32 imageEvent = nubThread->team->debug_info.image_event; 2303 if (result == B_OK) { 2304 Thread* thread = Thread::GetAndLock(threadID); 2305 BReference<Thread> threadReference(thread, true); 2306 ThreadLocker threadLocker(thread, true); 2307 2308 if (thread != NULL && thread->team == nubThread->team) { 2309 thread_debug_info &threadDebugInfo = thread->debug_info; 2310 2311 InterruptsSpinLocker threadDebugInfoLocker( 2312 threadDebugInfo.lock); 2313 2314 if (threadDebugInfo.profile.samples == NULL) { 2315 threadDebugInfo.profile.interval = interval; 2316 threadDebugInfo.profile.sample_area 2317 = clonedSampleArea; 2318 threadDebugInfo.profile.samples = (addr_t*)samples; 2319 threadDebugInfo.profile.max_samples 2320 = areaInfo.size / sizeof(addr_t); 2321 threadDebugInfo.profile.flush_threshold 2322 = threadDebugInfo.profile.max_samples 2323 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2324 / 100; 2325 threadDebugInfo.profile.sample_count = 0; 2326 threadDebugInfo.profile.dropped_ticks = 0; 2327 threadDebugInfo.profile.stack_depth = stackDepth; 2328 threadDebugInfo.profile.variable_stack_depth 2329 = variableStackDepth; 2330 threadDebugInfo.profile.buffer_full = false; 2331 threadDebugInfo.profile.interval_left = interval; 2332 threadDebugInfo.profile.installed_timer = NULL; 2333 threadDebugInfo.profile.image_event = imageEvent; 2334 threadDebugInfo.profile.last_image_event 2335 = imageEvent; 2336 } else 2337 result = B_BAD_VALUE; 2338 } else 2339 result = B_BAD_THREAD_ID; 2340 } 2341 2342 // on error unlock and delete the sample area 2343 if (result != B_OK) { 2344 if (clonedSampleArea >= 0) { 2345 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2346 delete_area(clonedSampleArea); 2347 } 2348 } 2349 2350 // send a reply to the debugger 2351 reply.start_profiler.error = result; 2352 reply.start_profiler.interval = interval; 2353 reply.start_profiler.image_event = imageEvent; 2354 sendReply = true; 2355 replySize = sizeof(reply.start_profiler); 2356 2357 break; 2358 } 2359 2360 case B_DEBUG_STOP_PROFILER: 2361 { 2362 // get the parameters 2363 thread_id threadID = message.stop_profiler.thread; 2364 replyPort = message.stop_profiler.reply_port; 2365 status_t result = B_OK; 2366 2367 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: " 2368 "thread: %" B_PRId32 "\n", nubThread->id, threadID)); 2369 2370 area_id sampleArea = -1; 2371 addr_t* samples = NULL; 2372 int32 sampleCount = 0; 2373 int32 stackDepth = 0; 2374 bool variableStackDepth = false; 2375 int32 imageEvent = 0; 2376 int32 droppedTicks = 0; 2377 2378 // get the thread and detach the profile info 2379 Thread* thread = Thread::GetAndLock(threadID); 2380 BReference<Thread> threadReference(thread, true); 2381 ThreadLocker threadLocker(thread, true); 2382 2383 if (thread && thread->team == nubThread->team) { 2384 thread_debug_info &threadDebugInfo = thread->debug_info; 2385 2386 InterruptsSpinLocker threadDebugInfoLocker( 2387 threadDebugInfo.lock); 2388 2389 if (threadDebugInfo.profile.samples != NULL) { 2390 sampleArea = threadDebugInfo.profile.sample_area; 2391 samples = threadDebugInfo.profile.samples; 2392 sampleCount = threadDebugInfo.profile.sample_count; 2393 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2394 stackDepth = threadDebugInfo.profile.stack_depth; 2395 variableStackDepth 2396 = threadDebugInfo.profile.variable_stack_depth; 2397 imageEvent = threadDebugInfo.profile.image_event; 2398 threadDebugInfo.profile.sample_area = -1; 2399 threadDebugInfo.profile.samples = NULL; 2400 threadDebugInfo.profile.buffer_full = false; 2401 threadDebugInfo.profile.dropped_ticks = 0; 2402 } else 2403 result = B_BAD_VALUE; 2404 } else 2405 result = B_BAD_THREAD_ID; 2406 2407 threadLocker.Unlock(); 2408 2409 // prepare the reply 2410 if (result == B_OK) { 2411 reply.profiler_update.origin.thread = threadID; 2412 reply.profiler_update.image_event = imageEvent; 2413 reply.profiler_update.stack_depth = stackDepth; 2414 reply.profiler_update.variable_stack_depth 2415 = variableStackDepth; 2416 reply.profiler_update.sample_count = sampleCount; 2417 reply.profiler_update.dropped_ticks = droppedTicks; 2418 reply.profiler_update.stopped = true; 2419 } else 2420 reply.profiler_update.origin.thread = result; 2421 2422 replySize = sizeof(debug_profiler_update); 2423 sendReply = true; 2424 2425 if (sampleArea >= 0) { 2426 area_info areaInfo; 2427 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2428 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2429 delete_area(sampleArea); 2430 } 2431 } 2432 2433 break; 2434 } 2435 2436 case B_DEBUG_WRITE_CORE_FILE: 2437 { 2438 // get the parameters 2439 replyPort = message.write_core_file.reply_port; 2440 char* path = message.write_core_file.path; 2441 path[sizeof(message.write_core_file.path) - 1] = '\0'; 2442 2443 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE" 2444 ": path: %s\n", nubThread->id, path)); 2445 2446 // write the core file 2447 status_t result = core_dump_write_core_file(path, false); 2448 2449 // prepare the reply 2450 reply.write_core_file.error = result; 2451 replySize = sizeof(reply.write_core_file); 2452 sendReply = true; 2453 2454 break; 2455 } 2456 } 2457 2458 // send the reply, if necessary 2459 if (sendReply) { 2460 status_t error = kill_interruptable_write_port(replyPort, command, 2461 &reply, replySize); 2462 2463 if (error != B_OK) { 2464 // The debugger port is either not longer existing or we got 2465 // interrupted by a kill signal. In either case we terminate. 2466 TRACE(("nub thread %" B_PRId32 ": failed to send reply to port " 2467 "%" B_PRId32 ": %s\n", nubThread->id, replyPort, 2468 strerror(error))); 2469 2470 nub_thread_cleanup(nubThread); 2471 return error; 2472 } 2473 } 2474 } 2475 } 2476 2477 2478 /** \brief Helper function for install_team_debugger(), that sets up the team 2479 and thread debug infos. 2480 2481 The caller must hold the team's lock as well as the team debug info lock. 2482 2483 The function also clears the arch specific team and thread debug infos 2484 (including among other things formerly set break/watchpoints). 2485 */ 2486 static void 2487 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2488 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2489 sem_id debuggerPortWriteLock, thread_id causingThread) 2490 { 2491 atomic_set(&team->debug_info.flags, 2492 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2493 team->debug_info.nub_port = nubPort; 2494 team->debug_info.nub_thread = nubThread; 2495 team->debug_info.debugger_team = debuggerTeam; 2496 team->debug_info.debugger_port = debuggerPort; 2497 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2498 team->debug_info.causing_thread = causingThread; 2499 2500 arch_clear_team_debug_info(&team->debug_info.arch_info); 2501 2502 // set the user debug flags and signal masks of all threads to the default 2503 for (Thread *thread = team->thread_list; thread; 2504 thread = thread->team_next) { 2505 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2506 2507 if (thread->id == nubThread) { 2508 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2509 } else { 2510 int32 flags = thread->debug_info.flags 2511 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2512 atomic_set(&thread->debug_info.flags, 2513 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2514 thread->debug_info.ignore_signals = 0; 2515 thread->debug_info.ignore_signals_once = 0; 2516 2517 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2518 } 2519 } 2520 2521 // update the thread::flags fields 2522 update_threads_debugger_installed_flag(team); 2523 } 2524 2525 2526 static port_id 2527 install_team_debugger(team_id teamID, port_id debuggerPort, 2528 thread_id causingThread, bool useDefault, bool dontReplace) 2529 { 2530 TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", " 2531 "default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault, 2532 dontReplace)); 2533 2534 if (useDefault) 2535 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2536 2537 // get the debugger team 2538 port_info debuggerPortInfo; 2539 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2540 if (error != B_OK) { 2541 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2542 "%" B_PRIx32 "\n", error)); 2543 return error; 2544 } 2545 team_id debuggerTeam = debuggerPortInfo.team; 2546 2547 // Check the debugger team: It must neither be the kernel team nor the 2548 // debugged team. 2549 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2550 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2551 "debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam, 2552 teamID)); 2553 return B_NOT_ALLOWED; 2554 } 2555 2556 // get the team 2557 Team* team; 2558 ConditionVariable debugChangeCondition; 2559 debugChangeCondition.Init(NULL, "debug change condition"); 2560 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2561 if (error != B_OK) 2562 return error; 2563 2564 // get the real team ID 2565 teamID = team->id; 2566 2567 // check, if a debugger is already installed 2568 2569 bool done = false; 2570 port_id result = B_ERROR; 2571 bool handOver = false; 2572 port_id oldDebuggerPort = -1; 2573 port_id nubPort = -1; 2574 2575 TeamLocker teamLocker(team); 2576 cpu_status state = disable_interrupts(); 2577 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2578 2579 int32 teamDebugFlags = team->debug_info.flags; 2580 2581 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2582 // There's already a debugger installed. 2583 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2584 if (dontReplace) { 2585 // We're fine with already having a debugger. 2586 error = B_OK; 2587 done = true; 2588 result = team->debug_info.nub_port; 2589 } else { 2590 // a handover to another debugger is requested 2591 // Set the handing-over flag -- we'll clear both flags after 2592 // having sent the handed-over message to the new debugger. 2593 atomic_or(&team->debug_info.flags, 2594 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2595 2596 oldDebuggerPort = team->debug_info.debugger_port; 2597 result = nubPort = team->debug_info.nub_port; 2598 if (causingThread < 0) 2599 causingThread = team->debug_info.causing_thread; 2600 2601 // set the new debugger 2602 install_team_debugger_init_debug_infos(team, debuggerTeam, 2603 debuggerPort, nubPort, team->debug_info.nub_thread, 2604 team->debug_info.debugger_write_lock, causingThread); 2605 2606 handOver = true; 2607 done = true; 2608 } 2609 } else { 2610 // there's already a debugger installed 2611 error = (dontReplace ? B_OK : B_BAD_VALUE); 2612 done = true; 2613 result = team->debug_info.nub_port; 2614 } 2615 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2616 && useDefault) { 2617 // No debugger yet, disable_debugger() had been invoked, and we 2618 // would install the default debugger. Just fail. 2619 error = B_BAD_VALUE; 2620 } 2621 2622 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2623 restore_interrupts(state); 2624 teamLocker.Unlock(); 2625 2626 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2627 // The old debugger must just have died. Just proceed as 2628 // if there was no debugger installed. We may still be too 2629 // early, in which case we'll fail, but this race condition 2630 // should be unbelievably rare and relatively harmless. 2631 handOver = false; 2632 done = false; 2633 } 2634 2635 if (handOver) { 2636 // prepare the handed-over message 2637 debug_handed_over notification; 2638 notification.origin.thread = -1; 2639 notification.origin.team = teamID; 2640 notification.origin.nub_port = nubPort; 2641 notification.debugger = debuggerTeam; 2642 notification.debugger_port = debuggerPort; 2643 notification.causing_thread = causingThread; 2644 2645 // notify the new debugger 2646 error = write_port_etc(debuggerPort, 2647 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2648 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2649 if (error != B_OK) { 2650 dprintf("install_team_debugger(): Failed to send message to new " 2651 "debugger: %s\n", strerror(error)); 2652 } 2653 2654 // clear the handed-over and handing-over flags 2655 state = disable_interrupts(); 2656 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2657 2658 atomic_and(&team->debug_info.flags, 2659 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2660 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2661 2662 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2663 restore_interrupts(state); 2664 2665 finish_debugger_change(team); 2666 2667 // notify the nub thread 2668 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2669 NULL, 0); 2670 2671 // notify the old debugger 2672 error = write_port_etc(oldDebuggerPort, 2673 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2674 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2675 if (error != B_OK) { 2676 TRACE(("install_team_debugger(): Failed to send message to old " 2677 "debugger: %s\n", strerror(error))); 2678 } 2679 2680 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2681 "%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam, 2682 debuggerPort)); 2683 2684 return result; 2685 } 2686 2687 if (done || error != B_OK) { 2688 TRACE(("install_team_debugger() done1: %" B_PRId32 "\n", 2689 (error == B_OK ? result : error))); 2690 finish_debugger_change(team); 2691 return (error == B_OK ? result : error); 2692 } 2693 2694 // create the debugger write lock semaphore 2695 char nameBuffer[B_OS_NAME_LENGTH]; 2696 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port " 2697 "write", teamID); 2698 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2699 if (debuggerWriteLock < 0) 2700 error = debuggerWriteLock; 2701 2702 // create the nub port 2703 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID); 2704 if (error == B_OK) { 2705 nubPort = create_port(1, nameBuffer); 2706 if (nubPort < 0) 2707 error = nubPort; 2708 else 2709 result = nubPort; 2710 } 2711 2712 // make the debugger team the port owner; thus we know, if the debugger is 2713 // gone and can cleanup 2714 if (error == B_OK) 2715 error = set_port_owner(nubPort, debuggerTeam); 2716 2717 // create the breakpoint manager 2718 BreakpointManager* breakpointManager = NULL; 2719 if (error == B_OK) { 2720 breakpointManager = new(std::nothrow) BreakpointManager; 2721 if (breakpointManager != NULL) 2722 error = breakpointManager->Init(); 2723 else 2724 error = B_NO_MEMORY; 2725 } 2726 2727 // spawn the nub thread 2728 thread_id nubThread = -1; 2729 if (error == B_OK) { 2730 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task", 2731 teamID); 2732 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2733 B_NORMAL_PRIORITY, NULL, teamID); 2734 if (nubThread < 0) 2735 error = nubThread; 2736 } 2737 2738 // now adjust the debug info accordingly 2739 if (error == B_OK) { 2740 TeamLocker teamLocker(team); 2741 state = disable_interrupts(); 2742 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2743 2744 team->debug_info.breakpoint_manager = breakpointManager; 2745 install_team_debugger_init_debug_infos(team, debuggerTeam, 2746 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2747 causingThread); 2748 2749 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2750 restore_interrupts(state); 2751 } 2752 2753 finish_debugger_change(team); 2754 2755 // if everything went fine, resume the nub thread, otherwise clean up 2756 if (error == B_OK) { 2757 resume_thread(nubThread); 2758 } else { 2759 // delete port and terminate thread 2760 if (nubPort >= 0) { 2761 set_port_owner(nubPort, B_CURRENT_TEAM); 2762 delete_port(nubPort); 2763 } 2764 if (nubThread >= 0) { 2765 int32 result; 2766 wait_for_thread(nubThread, &result); 2767 } 2768 2769 delete breakpointManager; 2770 } 2771 2772 TRACE(("install_team_debugger() done2: %" B_PRId32 "\n", 2773 (error == B_OK ? result : error))); 2774 return (error == B_OK ? result : error); 2775 } 2776 2777 2778 static status_t 2779 ensure_debugger_installed() 2780 { 2781 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2782 thread_get_current_thread_id(), true, true); 2783 return port >= 0 ? B_OK : port; 2784 } 2785 2786 2787 // #pragma mark - 2788 2789 2790 void 2791 _user_debugger(const char *userMessage) 2792 { 2793 // install the default debugger, if there is none yet 2794 status_t error = ensure_debugger_installed(); 2795 if (error != B_OK) { 2796 // time to commit suicide 2797 char buffer[128]; 2798 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2799 if (length >= 0) { 2800 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2801 "`%s'\n", buffer); 2802 } else { 2803 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2804 "%p (%s)\n", userMessage, strerror(length)); 2805 } 2806 _user_exit_team(1); 2807 } 2808 2809 // prepare the message 2810 debug_debugger_call message; 2811 message.message = (void*)userMessage; 2812 2813 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2814 sizeof(message), true); 2815 } 2816 2817 2818 int 2819 _user_disable_debugger(int state) 2820 { 2821 Team *team = thread_get_current_thread()->team; 2822 2823 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state, 2824 team->id)); 2825 2826 cpu_status cpuState = disable_interrupts(); 2827 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2828 2829 int32 oldFlags; 2830 if (state) { 2831 oldFlags = atomic_or(&team->debug_info.flags, 2832 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2833 } else { 2834 oldFlags = atomic_and(&team->debug_info.flags, 2835 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2836 } 2837 2838 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2839 restore_interrupts(cpuState); 2840 2841 // TODO: Check, if the return value is really the old state. 2842 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2843 } 2844 2845 2846 status_t 2847 _user_install_default_debugger(port_id debuggerPort) 2848 { 2849 // Do not allow non-root processes to install a default debugger. 2850 if (geteuid() != 0) 2851 return B_PERMISSION_DENIED; 2852 2853 // if supplied, check whether the port is a valid port 2854 if (debuggerPort >= 0) { 2855 port_info portInfo; 2856 status_t error = get_port_info(debuggerPort, &portInfo); 2857 if (error != B_OK) 2858 return error; 2859 2860 // the debugger team must not be the kernel team 2861 if (portInfo.team == team_get_kernel_team_id()) 2862 return B_NOT_ALLOWED; 2863 } 2864 2865 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2866 2867 return B_OK; 2868 } 2869 2870 2871 port_id 2872 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2873 { 2874 if (geteuid() != 0 && team_geteuid(teamID) != geteuid()) 2875 return B_PERMISSION_DENIED; 2876 2877 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2878 } 2879 2880 2881 status_t 2882 _user_remove_team_debugger(team_id teamID) 2883 { 2884 Team* team; 2885 ConditionVariable debugChangeCondition; 2886 debugChangeCondition.Init(NULL, "debug change condition"); 2887 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2888 team); 2889 if (error != B_OK) 2890 return error; 2891 2892 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2893 2894 thread_id nubThread = -1; 2895 port_id nubPort = -1; 2896 2897 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2898 // there's a debugger installed 2899 nubThread = team->debug_info.nub_thread; 2900 nubPort = team->debug_info.nub_port; 2901 } else { 2902 // no debugger installed 2903 error = B_BAD_VALUE; 2904 } 2905 2906 debugInfoLocker.Unlock(); 2907 2908 // Delete the nub port -- this will cause the nub thread to terminate and 2909 // remove the debugger. 2910 if (nubPort >= 0) 2911 delete_port(nubPort); 2912 2913 finish_debugger_change(team); 2914 2915 // wait for the nub thread 2916 if (nubThread >= 0) 2917 wait_for_thread(nubThread, NULL); 2918 2919 return error; 2920 } 2921 2922 2923 status_t 2924 _user_debug_thread(thread_id threadID) 2925 { 2926 TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n", 2927 find_thread(NULL), threadID)); 2928 2929 // get the thread 2930 Thread* thread = Thread::GetAndLock(threadID); 2931 if (thread == NULL) 2932 return B_BAD_THREAD_ID; 2933 BReference<Thread> threadReference(thread, true); 2934 ThreadLocker threadLocker(thread, true); 2935 2936 // we can't debug the kernel team 2937 if (thread->team == team_get_kernel_team()) 2938 return B_NOT_ALLOWED; 2939 2940 InterruptsLocker interruptsLocker; 2941 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2942 2943 // If the thread is already dying, it's too late to debug it. 2944 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2945 return B_BAD_THREAD_ID; 2946 2947 // don't debug the nub thread 2948 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2949 return B_NOT_ALLOWED; 2950 2951 // already marked stopped or being told to stop? 2952 if ((thread->debug_info.flags 2953 & (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) { 2954 return B_OK; 2955 } 2956 2957 // set the flag that tells the thread to stop as soon as possible 2958 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2959 2960 update_thread_user_debug_flag(thread); 2961 2962 // send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or 2963 // continued) 2964 threadDebugInfoLocker.Unlock(); 2965 ReadSpinLocker teamLocker(thread->team_lock); 2966 SpinLocker locker(thread->team->signal_lock); 2967 2968 send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0); 2969 2970 return B_OK; 2971 } 2972 2973 2974 void 2975 _user_wait_for_debugger(void) 2976 { 2977 debug_thread_debugged message = {}; 2978 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2979 sizeof(message), false); 2980 } 2981 2982 2983 status_t 2984 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2985 bool watchpoint) 2986 { 2987 // check the address and size 2988 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2989 return B_BAD_ADDRESS; 2990 if (watchpoint && length < 0) 2991 return B_BAD_VALUE; 2992 2993 // check whether a debugger is installed already 2994 team_debug_info teamDebugInfo; 2995 get_team_debug_info(teamDebugInfo); 2996 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2997 return B_BAD_VALUE; 2998 2999 // We can't help it, here's a small but relatively harmless race condition, 3000 // since a debugger could be installed in the meantime. The worst case is 3001 // that we install a break/watchpoint the debugger doesn't know about. 3002 3003 // set the break/watchpoint 3004 status_t result; 3005 if (watchpoint) 3006 result = arch_set_watchpoint(address, type, length); 3007 else 3008 result = arch_set_breakpoint(address); 3009 3010 if (result == B_OK) 3011 update_threads_breakpoints_flag(); 3012 3013 return result; 3014 } 3015 3016 3017 status_t 3018 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 3019 { 3020 // check the address 3021 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3022 return B_BAD_ADDRESS; 3023 3024 // check whether a debugger is installed already 3025 team_debug_info teamDebugInfo; 3026 get_team_debug_info(teamDebugInfo); 3027 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3028 return B_BAD_VALUE; 3029 3030 // We can't help it, here's a small but relatively harmless race condition, 3031 // since a debugger could be installed in the meantime. The worst case is 3032 // that we clear a break/watchpoint the debugger has just installed. 3033 3034 // clear the break/watchpoint 3035 status_t result; 3036 if (watchpoint) 3037 result = arch_clear_watchpoint(address); 3038 else 3039 result = arch_clear_breakpoint(address); 3040 3041 if (result == B_OK) 3042 update_threads_breakpoints_flag(); 3043 3044 return result; 3045 } 3046