1 /* 2 * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2015, Rene Gollent, rene@gollent.com. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include <errno.h> 9 #include <signal.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <algorithm> 15 16 #include <arch/debug.h> 17 #include <arch/user_debugger.h> 18 #include <core_dump.h> 19 #include <cpu.h> 20 #include <debugger.h> 21 #include <kernel.h> 22 #include <KernelExport.h> 23 #include <kscheduler.h> 24 #include <ksignal.h> 25 #include <ksyscalls.h> 26 #include <port.h> 27 #include <sem.h> 28 #include <team.h> 29 #include <thread.h> 30 #include <thread_types.h> 31 #include <user_debugger.h> 32 #include <vm/vm.h> 33 #include <vm/vm_types.h> 34 35 #include <AutoDeleter.h> 36 #include <util/AutoLock.h> 37 #include <util/ThreadAutoLock.h> 38 39 #include "BreakpointManager.h" 40 41 42 //#define TRACE_USER_DEBUGGER 43 #ifdef TRACE_USER_DEBUGGER 44 # define TRACE(x) dprintf x 45 #else 46 # define TRACE(x) ; 47 #endif 48 49 50 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 51 // there's some potential for simplifications. E.g. clear_team_debug_info() and 52 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 53 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 54 55 56 static port_id sDefaultDebuggerPort = -1; 57 // accessed atomically 58 59 static timer sProfilingTimers[SMP_MAX_CPUS]; 60 // a profiling timer for each CPU -- used when a profiled thread is running 61 // on that CPU 62 63 64 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 65 static int32 profiling_event(timer* unused); 66 static status_t ensure_debugger_installed(); 67 static void get_team_debug_info(team_debug_info &teamDebugInfo); 68 69 70 static inline status_t 71 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 72 size_t bufferSize) 73 { 74 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 75 0); 76 } 77 78 79 static status_t 80 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 81 bool dontWait) 82 { 83 TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", " 84 "port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, " 85 "dontWait: %d\n", thread_get_current_thread()->id, 86 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 87 dontWait)); 88 89 status_t error = B_OK; 90 91 // get the team debug info 92 team_debug_info teamDebugInfo; 93 get_team_debug_info(teamDebugInfo); 94 sem_id writeLock = teamDebugInfo.debugger_write_lock; 95 96 // get the write lock 97 TRACE(("debugger_write(): acquiring write lock...\n")); 98 error = acquire_sem_etc(writeLock, 1, 99 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 100 if (error != B_OK) { 101 TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error)); 102 return error; 103 } 104 105 // re-get the team debug info 106 get_team_debug_info(teamDebugInfo); 107 108 if (teamDebugInfo.debugger_port != port 109 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 110 // The debugger has changed in the meantime or we are about to be 111 // handed over to a new debugger. In either case we don't send the 112 // message. 113 TRACE(("debugger_write(): %s\n", 114 (teamDebugInfo.debugger_port != port ? "debugger port changed" 115 : "handover flag set"))); 116 } else { 117 TRACE(("debugger_write(): writing to port...\n")); 118 119 error = write_port_etc(port, code, buffer, bufferSize, 120 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 121 } 122 123 // release the write lock 124 release_sem(writeLock); 125 126 TRACE(("debugger_write() done: %" B_PRIx32 "\n", error)); 127 128 return error; 129 } 130 131 132 /*! Updates the thread::flags field according to what user debugger flags are 133 set for the thread. 134 Interrupts must be disabled and the thread's debug info lock must be held. 135 */ 136 static void 137 update_thread_user_debug_flag(Thread* thread) 138 { 139 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 140 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 141 else 142 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 143 } 144 145 146 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 147 given thread. 148 Interrupts must be disabled and the thread debug info lock must be held. 149 */ 150 static void 151 update_thread_breakpoints_flag(Thread* thread) 152 { 153 Team* team = thread->team; 154 155 if (arch_has_breakpoints(&team->debug_info.arch_info)) 156 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 157 else 158 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 159 } 160 161 162 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 163 threads of the current team. 164 */ 165 static void 166 update_threads_breakpoints_flag() 167 { 168 Team* team = thread_get_current_thread()->team; 169 170 TeamLocker teamLocker(team); 171 172 Thread* thread = team->thread_list; 173 174 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 175 for (; thread != NULL; thread = thread->team_next) 176 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 177 } else { 178 for (; thread != NULL; thread = thread->team_next) 179 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 180 } 181 } 182 183 184 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 185 given thread, which must be the current thread. 186 */ 187 static void 188 update_thread_debugger_installed_flag(Thread* thread) 189 { 190 Team* team = thread->team; 191 192 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 193 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 194 else 195 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 196 } 197 198 199 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 200 threads of the given team. 201 The team's lock must be held. 202 */ 203 static void 204 update_threads_debugger_installed_flag(Team* team) 205 { 206 Thread* thread = team->thread_list; 207 208 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 209 for (; thread != NULL; thread = thread->team_next) 210 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 211 } else { 212 for (; thread != NULL; thread = thread->team_next) 213 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 214 } 215 } 216 217 218 /** 219 * For the first initialization the function must be called with \a initLock 220 * set to \c true. If it would be possible that another thread accesses the 221 * structure at the same time, `lock' must be held when calling the function. 222 */ 223 void 224 clear_team_debug_info(struct team_debug_info *info, bool initLock) 225 { 226 if (info) { 227 arch_clear_team_debug_info(&info->arch_info); 228 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 229 info->debugger_team = -1; 230 info->debugger_port = -1; 231 info->nub_thread = -1; 232 info->nub_port = -1; 233 info->debugger_write_lock = -1; 234 info->causing_thread = -1; 235 info->image_event = 0; 236 info->breakpoint_manager = NULL; 237 238 if (initLock) { 239 B_INITIALIZE_SPINLOCK(&info->lock); 240 info->debugger_changed_condition = NULL; 241 } 242 } 243 } 244 245 /** 246 * `lock' must not be held nor may interrupts be disabled. 247 * \a info must not be a member of a team struct (or the team struct must no 248 * longer be accessible, i.e. the team should already be removed). 249 * 250 * In case the team is still accessible, the procedure is: 251 * 1. get `lock' 252 * 2. copy the team debug info on stack 253 * 3. call clear_team_debug_info() on the team debug info 254 * 4. release `lock' 255 * 5. call destroy_team_debug_info() on the copied team debug info 256 */ 257 static void 258 destroy_team_debug_info(struct team_debug_info *info) 259 { 260 if (info) { 261 arch_destroy_team_debug_info(&info->arch_info); 262 263 // delete the breakpoint manager 264 delete info->breakpoint_manager ; 265 info->breakpoint_manager = NULL; 266 267 // delete the debugger port write lock 268 if (info->debugger_write_lock >= 0) { 269 delete_sem(info->debugger_write_lock); 270 info->debugger_write_lock = -1; 271 } 272 273 // delete the nub port 274 if (info->nub_port >= 0) { 275 set_port_owner(info->nub_port, B_CURRENT_TEAM); 276 delete_port(info->nub_port); 277 info->nub_port = -1; 278 } 279 280 // wait for the nub thread 281 if (info->nub_thread >= 0) { 282 if (info->nub_thread != thread_get_current_thread()->id) { 283 int32 result; 284 wait_for_thread(info->nub_thread, &result); 285 } 286 287 info->nub_thread = -1; 288 } 289 290 atomic_set(&info->flags, 0); 291 info->debugger_team = -1; 292 info->debugger_port = -1; 293 info->causing_thread = -1; 294 info->image_event = -1; 295 } 296 } 297 298 299 void 300 init_thread_debug_info(struct thread_debug_info *info) 301 { 302 if (info) { 303 B_INITIALIZE_SPINLOCK(&info->lock); 304 arch_clear_thread_debug_info(&info->arch_info); 305 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 306 info->debug_port = -1; 307 info->ignore_signals = 0; 308 info->ignore_signals_once = 0; 309 info->profile.sample_area = -1; 310 info->profile.samples = NULL; 311 info->profile.buffer_full = false; 312 info->profile.installed_timer = NULL; 313 } 314 } 315 316 317 /*! Clears the debug info for the current thread. 318 Invoked with thread debug info lock being held. 319 */ 320 void 321 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 322 { 323 if (info) { 324 // cancel profiling timer 325 if (info->profile.installed_timer != NULL) { 326 cancel_timer(info->profile.installed_timer); 327 info->profile.installed_timer = NULL; 328 } 329 330 arch_clear_thread_debug_info(&info->arch_info); 331 atomic_set(&info->flags, 332 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 333 info->debug_port = -1; 334 info->ignore_signals = 0; 335 info->ignore_signals_once = 0; 336 info->profile.sample_area = -1; 337 info->profile.samples = NULL; 338 info->profile.buffer_full = false; 339 } 340 } 341 342 343 void 344 destroy_thread_debug_info(struct thread_debug_info *info) 345 { 346 if (info) { 347 area_id sampleArea = info->profile.sample_area; 348 if (sampleArea >= 0) { 349 area_info areaInfo; 350 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 351 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 352 delete_area(sampleArea); 353 } 354 } 355 356 arch_destroy_thread_debug_info(&info->arch_info); 357 358 if (info->debug_port >= 0) { 359 delete_port(info->debug_port); 360 info->debug_port = -1; 361 } 362 363 info->ignore_signals = 0; 364 info->ignore_signals_once = 0; 365 366 atomic_set(&info->flags, 0); 367 } 368 } 369 370 371 static status_t 372 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 373 Team*& team) 374 { 375 // We look up the team by ID, even in case of the current team, so we can be 376 // sure, that the team is not already dying. 377 if (teamID == B_CURRENT_TEAM) 378 teamID = thread_get_current_thread()->team->id; 379 380 while (true) { 381 // get the team 382 team = Team::GetAndLock(teamID); 383 if (team == NULL) 384 return B_BAD_TEAM_ID; 385 BReference<Team> teamReference(team, true); 386 TeamLocker teamLocker(team, true); 387 388 // don't allow messing with the kernel team 389 if (team == team_get_kernel_team()) 390 return B_NOT_ALLOWED; 391 392 // check whether the condition is already set 393 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 394 395 if (team->debug_info.debugger_changed_condition == NULL) { 396 // nobody there yet -- set our condition variable and be done 397 team->debug_info.debugger_changed_condition = &condition; 398 return B_OK; 399 } 400 401 // we'll have to wait 402 ConditionVariableEntry entry; 403 team->debug_info.debugger_changed_condition->Add(&entry); 404 405 debugInfoLocker.Unlock(); 406 teamLocker.Unlock(); 407 408 entry.Wait(); 409 } 410 } 411 412 413 static void 414 prepare_debugger_change(Team* team, ConditionVariable& condition) 415 { 416 while (true) { 417 // check whether the condition is already set 418 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 419 420 if (team->debug_info.debugger_changed_condition == NULL) { 421 // nobody there yet -- set our condition variable and be done 422 team->debug_info.debugger_changed_condition = &condition; 423 return; 424 } 425 426 // we'll have to wait 427 ConditionVariableEntry entry; 428 team->debug_info.debugger_changed_condition->Add(&entry); 429 430 debugInfoLocker.Unlock(); 431 432 entry.Wait(); 433 } 434 } 435 436 437 static void 438 finish_debugger_change(Team* team) 439 { 440 // unset our condition variable and notify all threads waiting on it 441 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 442 443 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 444 team->debug_info.debugger_changed_condition = NULL; 445 446 condition->NotifyAll(); 447 } 448 449 450 void 451 user_debug_prepare_for_exec() 452 { 453 Thread *thread = thread_get_current_thread(); 454 Team *team = thread->team; 455 456 // If a debugger is installed for the team and the thread debug stuff 457 // initialized, change the ownership of the debug port for the thread 458 // to the kernel team, since exec_team() deletes all ports owned by this 459 // team. We change the ownership back later. 460 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 461 // get the port 462 port_id debugPort = -1; 463 464 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 465 466 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 467 debugPort = thread->debug_info.debug_port; 468 469 threadDebugInfoLocker.Unlock(); 470 471 // set the new port ownership 472 if (debugPort >= 0) 473 set_port_owner(debugPort, team_get_kernel_team_id()); 474 } 475 } 476 477 478 void 479 user_debug_finish_after_exec() 480 { 481 Thread *thread = thread_get_current_thread(); 482 Team *team = thread->team; 483 484 // If a debugger is installed for the team and the thread debug stuff 485 // initialized for this thread, change the ownership of its debug port 486 // back to this team. 487 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 488 // get the port 489 port_id debugPort = -1; 490 491 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 492 493 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 494 debugPort = thread->debug_info.debug_port; 495 496 threadDebugInfoLocker.Unlock(); 497 498 // set the new port ownership 499 if (debugPort >= 0) 500 set_port_owner(debugPort, team->id); 501 } 502 } 503 504 505 void 506 init_user_debug() 507 { 508 #ifdef ARCH_INIT_USER_DEBUG 509 ARCH_INIT_USER_DEBUG(); 510 #endif 511 } 512 513 514 static void 515 get_team_debug_info(team_debug_info &teamDebugInfo) 516 { 517 Thread *thread = thread_get_current_thread(); 518 519 cpu_status state = disable_interrupts(); 520 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 521 522 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 523 524 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 525 restore_interrupts(state); 526 } 527 528 529 static status_t 530 thread_hit_debug_event_internal(debug_debugger_message event, 531 const void *message, int32 size, bool requireDebugger, bool &restart) 532 { 533 restart = false; 534 Thread *thread = thread_get_current_thread(); 535 536 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32 537 ", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event, 538 message, size)); 539 540 // check, if there's a debug port already 541 bool setPort = !(atomic_get(&thread->debug_info.flags) 542 & B_THREAD_DEBUG_INITIALIZED); 543 544 // create a port, if there is none yet 545 port_id port = -1; 546 if (setPort) { 547 char nameBuffer[128]; 548 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32, 549 thread->id); 550 551 port = create_port(1, nameBuffer); 552 if (port < 0) { 553 dprintf("thread_hit_debug_event(): Failed to create debug port: " 554 "%s\n", strerror(port)); 555 return port; 556 } 557 } 558 559 // check the debug info structures once more: get the debugger port, set 560 // the thread's debug port, and update the thread's debug flags 561 port_id deletePort = port; 562 port_id debuggerPort = -1; 563 port_id nubPort = -1; 564 status_t error = B_OK; 565 cpu_status state = disable_interrupts(); 566 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 567 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 568 569 uint32 threadFlags = thread->debug_info.flags; 570 threadFlags &= ~B_THREAD_DEBUG_STOP; 571 bool debuggerInstalled 572 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 573 if (thread->id == thread->team->debug_info.nub_thread) { 574 // Ugh, we're the nub thread. We shouldn't be here. 575 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32 576 "\n", thread->id)); 577 578 error = B_ERROR; 579 } else if (debuggerInstalled || !requireDebugger) { 580 if (debuggerInstalled) { 581 debuggerPort = thread->team->debug_info.debugger_port; 582 nubPort = thread->team->debug_info.nub_port; 583 } 584 585 if (setPort) { 586 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 587 // someone created a port for us (the port we've created will 588 // be deleted below) 589 port = thread->debug_info.debug_port; 590 } else { 591 thread->debug_info.debug_port = port; 592 deletePort = -1; // keep the port 593 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 594 } 595 } else { 596 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 597 port = thread->debug_info.debug_port; 598 } else { 599 // someone deleted our port 600 error = B_ERROR; 601 } 602 } 603 } else 604 error = B_ERROR; 605 606 // update the flags 607 if (error == B_OK) 608 threadFlags |= B_THREAD_DEBUG_STOPPED; 609 atomic_set(&thread->debug_info.flags, threadFlags); 610 611 update_thread_user_debug_flag(thread); 612 613 threadDebugInfoLocker.Unlock(); 614 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 615 restore_interrupts(state); 616 617 // delete the superfluous port 618 if (deletePort >= 0) 619 delete_port(deletePort); 620 621 if (error != B_OK) { 622 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: " 623 "%" B_PRIx32 "\n", thread->id, error)); 624 return error; 625 } 626 627 // send a message to the debugger port 628 if (debuggerInstalled) { 629 // update the message's origin info first 630 debug_origin *origin = (debug_origin *)message; 631 origin->thread = thread->id; 632 origin->team = thread->team->id; 633 origin->nub_port = nubPort; 634 635 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending " 636 "message to debugger port %" B_PRId32 "\n", thread->id, 637 debuggerPort)); 638 639 error = debugger_write(debuggerPort, event, message, size, false); 640 } 641 642 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 643 bool singleStep = false; 644 645 if (error == B_OK) { 646 bool done = false; 647 while (!done) { 648 // read a command from the debug port 649 int32 command; 650 debugged_thread_message_data commandMessage; 651 ssize_t commandMessageSize = read_port_etc(port, &command, 652 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 653 0); 654 655 if (commandMessageSize < 0) { 656 error = commandMessageSize; 657 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed " 658 "to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n", 659 thread->id, port, error)); 660 break; 661 } 662 663 switch (command) { 664 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 665 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 666 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 667 thread->id)); 668 result = commandMessage.continue_thread.handle_event; 669 670 singleStep = commandMessage.continue_thread.single_step; 671 done = true; 672 break; 673 674 case B_DEBUGGED_THREAD_SET_CPU_STATE: 675 { 676 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 677 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 678 thread->id)); 679 arch_set_debug_cpu_state( 680 &commandMessage.set_cpu_state.cpu_state); 681 682 break; 683 } 684 685 case B_DEBUGGED_THREAD_GET_CPU_STATE: 686 { 687 port_id replyPort = commandMessage.get_cpu_state.reply_port; 688 689 // prepare the message 690 debug_nub_get_cpu_state_reply replyMessage; 691 replyMessage.error = B_OK; 692 replyMessage.message = event; 693 arch_get_debug_cpu_state(&replyMessage.cpu_state); 694 695 // send it 696 error = kill_interruptable_write_port(replyPort, event, 697 &replyMessage, sizeof(replyMessage)); 698 699 break; 700 } 701 702 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 703 { 704 // Check, if the debugger really changed, i.e. is different 705 // than the one we know. 706 team_debug_info teamDebugInfo; 707 get_team_debug_info(teamDebugInfo); 708 709 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 710 if (!debuggerInstalled 711 || teamDebugInfo.debugger_port != debuggerPort) { 712 // debugger was installed or has changed: restart 713 // this function 714 restart = true; 715 done = true; 716 } 717 } else { 718 if (debuggerInstalled) { 719 // debugger is gone: continue the thread normally 720 done = true; 721 } 722 } 723 724 break; 725 } 726 } 727 } 728 } else { 729 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send " 730 "message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n", 731 thread->id, debuggerPort, error)); 732 } 733 734 // update the thread debug info 735 bool destroyThreadInfo = false; 736 thread_debug_info threadDebugInfo; 737 738 state = disable_interrupts(); 739 threadDebugInfoLocker.Lock(); 740 741 // check, if the team is still being debugged 742 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 743 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 744 // update the single-step flag 745 if (singleStep) { 746 atomic_or(&thread->debug_info.flags, 747 B_THREAD_DEBUG_SINGLE_STEP); 748 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 749 } else { 750 atomic_and(&thread->debug_info.flags, 751 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 752 } 753 754 // unset the "stopped" state 755 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 756 757 update_thread_user_debug_flag(thread); 758 759 } else { 760 // the debugger is gone: cleanup our info completely 761 threadDebugInfo = thread->debug_info; 762 clear_thread_debug_info(&thread->debug_info, false); 763 destroyThreadInfo = true; 764 } 765 766 threadDebugInfoLocker.Unlock(); 767 restore_interrupts(state); 768 769 // enable/disable single stepping 770 arch_update_thread_single_step(); 771 772 if (destroyThreadInfo) 773 destroy_thread_debug_info(&threadDebugInfo); 774 775 return (error == B_OK ? result : error); 776 } 777 778 779 static status_t 780 thread_hit_debug_event(debug_debugger_message event, const void *message, 781 int32 size, bool requireDebugger) 782 { 783 status_t result; 784 bool restart; 785 do { 786 restart = false; 787 result = thread_hit_debug_event_internal(event, message, size, 788 requireDebugger, restart); 789 } while (result >= 0 && restart); 790 791 // Prepare to continue -- we install a debugger change condition, so no one 792 // will change the debugger while we're playing with the breakpoint manager. 793 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 794 Team* team = thread_get_current_thread()->team; 795 ConditionVariable debugChangeCondition; 796 debugChangeCondition.Init(team, "debug change condition"); 797 prepare_debugger_change(team, debugChangeCondition); 798 799 if (team->debug_info.breakpoint_manager != NULL) { 800 bool isSyscall; 801 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 802 if (pc != NULL && !isSyscall) 803 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 804 } 805 806 finish_debugger_change(team); 807 808 return result; 809 } 810 811 812 static status_t 813 thread_hit_serious_debug_event(debug_debugger_message event, 814 const void *message, int32 messageSize) 815 { 816 // ensure that a debugger is installed for this team 817 status_t error = ensure_debugger_installed(); 818 if (error != B_OK) { 819 Thread *thread = thread_get_current_thread(); 820 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 821 "thread: %" B_PRId32 " (%s): %s\n", thread->id, thread->name, 822 strerror(error)); 823 return error; 824 } 825 826 // enter the debug loop 827 return thread_hit_debug_event(event, message, messageSize, true); 828 } 829 830 831 void 832 user_debug_pre_syscall(uint32 syscall, void *args) 833 { 834 // check whether a debugger is installed 835 Thread *thread = thread_get_current_thread(); 836 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 837 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 838 return; 839 840 // check whether pre-syscall tracing is enabled for team or thread 841 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 842 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 843 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 844 return; 845 } 846 847 // prepare the message 848 debug_pre_syscall message; 849 message.syscall = syscall; 850 851 // copy the syscall args 852 if (syscall < (uint32)kSyscallCount) { 853 if (kSyscallInfos[syscall].parameter_size > 0) 854 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 855 } 856 857 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 858 sizeof(message), true); 859 } 860 861 862 void 863 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 864 bigtime_t startTime) 865 { 866 // check whether a debugger is installed 867 Thread *thread = thread_get_current_thread(); 868 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 869 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 870 return; 871 872 // check whether post-syscall tracing is enabled for team or thread 873 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 874 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 875 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 876 return; 877 } 878 879 // prepare the message 880 debug_post_syscall message; 881 message.start_time = startTime; 882 message.end_time = system_time(); 883 message.return_value = returnValue; 884 message.syscall = syscall; 885 886 // copy the syscall args 887 if (syscall < (uint32)kSyscallCount) { 888 if (kSyscallInfos[syscall].parameter_size > 0) 889 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 890 } 891 892 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 893 sizeof(message), true); 894 } 895 896 897 /** \brief To be called when an unhandled processor exception (error/fault) 898 * occurred. 899 * \param exception The debug_why_stopped value identifying the kind of fault. 900 * \param signal The signal corresponding to the exception. 901 * \return \c true, if the caller shall continue normally, i.e. usually send 902 * a deadly signal. \c false, if the debugger insists to continue the 903 * program (e.g. because it has solved the removed the cause of the 904 * problem). 905 */ 906 bool 907 user_debug_exception_occurred(debug_exception_type exception, int signal) 908 { 909 // First check whether there's a signal handler installed for the signal. 910 // If so, we don't want to install a debugger for the team. We always send 911 // the signal instead. An already installed debugger will be notified, if 912 // it has requested notifications of signal. 913 struct sigaction signalAction; 914 if (sigaction(signal, NULL, &signalAction) == 0 915 && signalAction.sa_handler != SIG_DFL) { 916 return true; 917 } 918 919 // prepare the message 920 debug_exception_occurred message; 921 message.exception = exception; 922 message.signal = signal; 923 924 status_t result = thread_hit_serious_debug_event( 925 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 926 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 927 } 928 929 930 bool 931 user_debug_handle_signal(int signal, struct sigaction *handler, siginfo_t *info, 932 bool deadly) 933 { 934 // check, if a debugger is installed and is interested in signals 935 Thread *thread = thread_get_current_thread(); 936 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 937 if (~teamDebugFlags 938 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 939 return true; 940 } 941 942 // prepare the message 943 debug_signal_received message; 944 message.signal = signal; 945 message.handler = *handler; 946 message.info = *info; 947 message.deadly = deadly; 948 949 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 950 &message, sizeof(message), true); 951 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 952 } 953 954 955 void 956 user_debug_stop_thread() 957 { 958 // check whether this is actually an emulated single-step notification 959 Thread* thread = thread_get_current_thread(); 960 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 961 962 bool singleStepped = false; 963 if ((atomic_and(&thread->debug_info.flags, 964 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 965 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 966 singleStepped = true; 967 } 968 969 threadDebugInfoLocker.Unlock(); 970 971 if (singleStepped) { 972 user_debug_single_stepped(); 973 } else { 974 debug_thread_debugged message; 975 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 976 &message, sizeof(message)); 977 } 978 } 979 980 981 void 982 user_debug_team_created(team_id teamID) 983 { 984 // check, if a debugger is installed and is interested in team creation 985 // events 986 Thread *thread = thread_get_current_thread(); 987 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 988 if (~teamDebugFlags 989 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 990 return; 991 } 992 993 // prepare the message 994 debug_team_created message; 995 message.new_team = teamID; 996 997 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 998 sizeof(message), true); 999 } 1000 1001 1002 void 1003 user_debug_team_deleted(team_id teamID, port_id debuggerPort, status_t status, int signal, 1004 team_usage_info* usageInfo) 1005 { 1006 if (debuggerPort >= 0) { 1007 TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: " 1008 "%" B_PRId32 ")\n", teamID, debuggerPort)); 1009 1010 debug_team_deleted message; 1011 message.origin.thread = -1; 1012 message.origin.team = teamID; 1013 message.origin.nub_port = -1; 1014 message.status = status; 1015 message.signal = signal; 1016 message.usage = *usageInfo; 1017 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1018 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1019 } 1020 } 1021 1022 1023 void 1024 user_debug_team_exec() 1025 { 1026 // check, if a debugger is installed and is interested in team creation 1027 // events 1028 Thread *thread = thread_get_current_thread(); 1029 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1030 if (~teamDebugFlags 1031 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1032 return; 1033 } 1034 1035 // prepare the message 1036 debug_team_exec message; 1037 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1038 + 1; 1039 1040 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1041 sizeof(message), true); 1042 } 1043 1044 1045 /*! Called by a new userland thread to update the debugging related flags of 1046 \c Thread::flags before the thread first enters userland. 1047 \param thread The calling thread. 1048 */ 1049 void 1050 user_debug_update_new_thread_flags(Thread* thread) 1051 { 1052 // lock it and update it's flags 1053 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1054 1055 update_thread_user_debug_flag(thread); 1056 update_thread_breakpoints_flag(thread); 1057 update_thread_debugger_installed_flag(thread); 1058 } 1059 1060 1061 void 1062 user_debug_thread_created(thread_id threadID) 1063 { 1064 // check, if a debugger is installed and is interested in thread events 1065 Thread *thread = thread_get_current_thread(); 1066 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1067 if (~teamDebugFlags 1068 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1069 return; 1070 } 1071 1072 // prepare the message 1073 debug_thread_created message; 1074 message.new_thread = threadID; 1075 1076 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1077 sizeof(message), true); 1078 } 1079 1080 1081 void 1082 user_debug_thread_deleted(team_id teamID, thread_id threadID, status_t status) 1083 { 1084 // Things are a bit complicated here, since this thread no longer belongs to 1085 // the debugged team (but to the kernel). So we can't use debugger_write(). 1086 1087 // get the team debug flags and debugger port 1088 Team* team = Team::Get(teamID); 1089 if (team == NULL) 1090 return; 1091 BReference<Team> teamReference(team, true); 1092 1093 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1094 1095 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1096 port_id debuggerPort = team->debug_info.debugger_port; 1097 sem_id writeLock = team->debug_info.debugger_write_lock; 1098 1099 debugInfoLocker.Unlock(); 1100 1101 // check, if a debugger is installed and is interested in thread events 1102 if (~teamDebugFlags 1103 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1104 return; 1105 } 1106 1107 // acquire the debugger write lock 1108 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1109 if (error != B_OK) 1110 return; 1111 1112 // re-get the team debug info -- we need to check whether anything changed 1113 debugInfoLocker.Lock(); 1114 1115 teamDebugFlags = atomic_get(&team->debug_info.flags); 1116 port_id newDebuggerPort = team->debug_info.debugger_port; 1117 1118 debugInfoLocker.Unlock(); 1119 1120 // Send the message only if the debugger hasn't changed in the meantime or 1121 // the team is about to be handed over. 1122 if (newDebuggerPort == debuggerPort 1123 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1124 debug_thread_deleted message; 1125 message.origin.thread = threadID; 1126 message.origin.team = teamID; 1127 message.origin.nub_port = -1; 1128 message.status = status; 1129 1130 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1131 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1132 } 1133 1134 // release the debugger write lock 1135 release_sem(writeLock); 1136 } 1137 1138 1139 /*! Called for a thread that is about to die, cleaning up all user debug 1140 facilities installed for the thread. 1141 \param thread The current thread, the one that is going to die. 1142 */ 1143 void 1144 user_debug_thread_exiting(Thread* thread) 1145 { 1146 // thread is the current thread, so using team is safe 1147 Team* team = thread->team; 1148 1149 InterruptsLocker interruptsLocker; 1150 1151 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1152 1153 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1154 port_id debuggerPort = team->debug_info.debugger_port; 1155 1156 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1157 1158 // check, if a debugger is installed 1159 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1160 || debuggerPort < 0) { 1161 return; 1162 } 1163 1164 // detach the profile info and mark the thread dying 1165 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1166 1167 thread_debug_info& threadDebugInfo = thread->debug_info; 1168 if (threadDebugInfo.profile.samples == NULL) 1169 return; 1170 1171 area_id sampleArea = threadDebugInfo.profile.sample_area; 1172 int32 sampleCount = threadDebugInfo.profile.sample_count; 1173 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1174 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1175 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1176 int32 imageEvent = threadDebugInfo.profile.image_event; 1177 threadDebugInfo.profile.sample_area = -1; 1178 threadDebugInfo.profile.samples = NULL; 1179 threadDebugInfo.profile.buffer_full = false; 1180 1181 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1182 1183 threadDebugInfoLocker.Unlock(); 1184 interruptsLocker.Unlock(); 1185 1186 // notify the debugger 1187 debug_profiler_update message; 1188 message.origin.thread = thread->id; 1189 message.origin.team = thread->team->id; 1190 message.origin.nub_port = -1; // asynchronous message 1191 message.sample_count = sampleCount; 1192 message.dropped_ticks = droppedTicks; 1193 message.stack_depth = stackDepth; 1194 message.variable_stack_depth = variableStackDepth; 1195 message.image_event = imageEvent; 1196 message.stopped = true; 1197 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1198 &message, sizeof(message), false); 1199 1200 if (sampleArea >= 0) { 1201 area_info areaInfo; 1202 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1203 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1204 delete_area(sampleArea); 1205 } 1206 } 1207 } 1208 1209 1210 void 1211 user_debug_image_created(const image_info *imageInfo) 1212 { 1213 // check, if a debugger is installed and is interested in image events 1214 Thread *thread = thread_get_current_thread(); 1215 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1216 if (~teamDebugFlags 1217 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1218 return; 1219 } 1220 1221 // prepare the message 1222 debug_image_created message; 1223 memcpy(&message.info, imageInfo, sizeof(image_info)); 1224 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1225 + 1; 1226 1227 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1228 sizeof(message), true); 1229 } 1230 1231 1232 void 1233 user_debug_image_deleted(const image_info *imageInfo) 1234 { 1235 // check, if a debugger is installed and is interested in image events 1236 Thread *thread = thread_get_current_thread(); 1237 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1238 if (~teamDebugFlags 1239 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1240 return; 1241 } 1242 1243 // prepare the message 1244 debug_image_deleted message; 1245 memcpy(&message.info, imageInfo, sizeof(image_info)); 1246 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1247 + 1; 1248 1249 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message, 1250 sizeof(message), true); 1251 } 1252 1253 1254 void 1255 user_debug_breakpoint_hit(bool software) 1256 { 1257 // prepare the message 1258 debug_breakpoint_hit message; 1259 arch_get_debug_cpu_state(&message.cpu_state); 1260 1261 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1262 sizeof(message)); 1263 } 1264 1265 1266 void 1267 user_debug_watchpoint_hit() 1268 { 1269 // prepare the message 1270 debug_watchpoint_hit message; 1271 arch_get_debug_cpu_state(&message.cpu_state); 1272 1273 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1274 sizeof(message)); 1275 } 1276 1277 1278 void 1279 user_debug_single_stepped() 1280 { 1281 // clear the single-step thread flag 1282 Thread* thread = thread_get_current_thread(); 1283 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1284 1285 // prepare the message 1286 debug_single_step message; 1287 arch_get_debug_cpu_state(&message.cpu_state); 1288 1289 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1290 sizeof(message)); 1291 } 1292 1293 1294 /*! Schedules the profiling timer for the current thread. 1295 The caller must hold the thread's debug info lock. 1296 \param thread The current thread. 1297 \param interval The time after which the timer should fire. 1298 */ 1299 static void 1300 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1301 { 1302 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1303 thread->debug_info.profile.installed_timer = timer; 1304 thread->debug_info.profile.timer_end = system_time() + interval; 1305 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1306 } 1307 1308 1309 /*! Samples the current thread's instruction pointer/stack trace. 1310 The caller must hold the current thread's debug info lock. 1311 \param flushBuffer Return parameter: Set to \c true when the sampling 1312 buffer must be flushed. 1313 */ 1314 static bool 1315 profiling_do_sample(bool& flushBuffer) 1316 { 1317 Thread* thread = thread_get_current_thread(); 1318 thread_debug_info& debugInfo = thread->debug_info; 1319 1320 if (debugInfo.profile.samples == NULL) 1321 return false; 1322 1323 // Check, whether the buffer is full or an image event occurred since the 1324 // last sample was taken. 1325 int32 maxSamples = debugInfo.profile.max_samples; 1326 int32 sampleCount = debugInfo.profile.sample_count; 1327 int32 stackDepth = debugInfo.profile.stack_depth; 1328 int32 imageEvent = thread->team->debug_info.image_event; 1329 if (debugInfo.profile.sample_count > 0) { 1330 if (debugInfo.profile.last_image_event < imageEvent 1331 && debugInfo.profile.variable_stack_depth 1332 && sampleCount + 2 <= maxSamples) { 1333 // an image event occurred, but we use variable stack depth and 1334 // have enough room in the buffer to indicate an image event 1335 addr_t* event = debugInfo.profile.samples + sampleCount; 1336 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1337 event[1] = imageEvent; 1338 sampleCount += 2; 1339 debugInfo.profile.sample_count = sampleCount; 1340 debugInfo.profile.last_image_event = imageEvent; 1341 } 1342 1343 if (debugInfo.profile.last_image_event < imageEvent 1344 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1345 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1346 flushBuffer = true; 1347 return true; 1348 } 1349 1350 // We can't flush the buffer now, since we interrupted a kernel 1351 // function. If the buffer is not full yet, we add the samples, 1352 // otherwise we have to drop them. 1353 if (maxSamples - sampleCount < stackDepth) { 1354 debugInfo.profile.dropped_ticks++; 1355 return true; 1356 } 1357 } 1358 } else { 1359 // first sample -- set the image event 1360 debugInfo.profile.image_event = imageEvent; 1361 debugInfo.profile.last_image_event = imageEvent; 1362 } 1363 1364 // get the samples 1365 addr_t* returnAddresses = debugInfo.profile.samples 1366 + debugInfo.profile.sample_count; 1367 if (debugInfo.profile.variable_stack_depth) { 1368 // variable sample count per hit 1369 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1370 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1371 1372 debugInfo.profile.sample_count += *returnAddresses + 1; 1373 } else { 1374 // fixed sample count per hit 1375 if (stackDepth > 1) { 1376 int32 count = arch_debug_get_stack_trace(returnAddresses, 1377 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1378 1379 for (int32 i = count; i < stackDepth; i++) 1380 returnAddresses[i] = 0; 1381 } else 1382 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1383 1384 debugInfo.profile.sample_count += stackDepth; 1385 } 1386 1387 return true; 1388 } 1389 1390 1391 static void 1392 profiling_buffer_full(void*) 1393 { 1394 // It is undefined whether the function is called with interrupts enabled 1395 // or disabled. We are allowed to enable interrupts, though. First make 1396 // sure interrupts are disabled. 1397 disable_interrupts(); 1398 1399 Thread* thread = thread_get_current_thread(); 1400 thread_debug_info& debugInfo = thread->debug_info; 1401 1402 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1403 1404 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1405 int32 sampleCount = debugInfo.profile.sample_count; 1406 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1407 int32 stackDepth = debugInfo.profile.stack_depth; 1408 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1409 int32 imageEvent = debugInfo.profile.image_event; 1410 1411 // notify the debugger 1412 debugInfo.profile.sample_count = 0; 1413 debugInfo.profile.dropped_ticks = 0; 1414 1415 threadDebugInfoLocker.Unlock(); 1416 enable_interrupts(); 1417 1418 // prepare the message 1419 debug_profiler_update message; 1420 message.sample_count = sampleCount; 1421 message.dropped_ticks = droppedTicks; 1422 message.stack_depth = stackDepth; 1423 message.variable_stack_depth = variableStackDepth; 1424 message.image_event = imageEvent; 1425 message.stopped = false; 1426 1427 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1428 sizeof(message), false); 1429 1430 disable_interrupts(); 1431 threadDebugInfoLocker.Lock(); 1432 1433 // do the sampling and reschedule timer, if still profiling this thread 1434 bool flushBuffer; 1435 if (profiling_do_sample(flushBuffer)) { 1436 debugInfo.profile.buffer_full = false; 1437 schedule_profiling_timer(thread, debugInfo.profile.interval); 1438 } 1439 } 1440 1441 threadDebugInfoLocker.Unlock(); 1442 enable_interrupts(); 1443 } 1444 1445 1446 /*! Profiling timer event callback. 1447 Called with interrupts disabled. 1448 */ 1449 static int32 1450 profiling_event(timer* /*unused*/) 1451 { 1452 Thread* thread = thread_get_current_thread(); 1453 thread_debug_info& debugInfo = thread->debug_info; 1454 1455 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1456 1457 bool flushBuffer = false; 1458 if (profiling_do_sample(flushBuffer)) { 1459 if (flushBuffer) { 1460 // The sample buffer needs to be flushed; we'll have to notify the 1461 // debugger. We can't do that right here. Instead we set a post 1462 // interrupt callback doing that for us, and don't reschedule the 1463 // timer yet. 1464 thread->post_interrupt_callback = profiling_buffer_full; 1465 debugInfo.profile.installed_timer = NULL; 1466 debugInfo.profile.buffer_full = true; 1467 } else 1468 schedule_profiling_timer(thread, debugInfo.profile.interval); 1469 } else 1470 debugInfo.profile.installed_timer = NULL; 1471 1472 return B_HANDLED_INTERRUPT; 1473 } 1474 1475 1476 /*! Called by the scheduler when a debugged thread has been unscheduled. 1477 The scheduler lock is being held. 1478 */ 1479 void 1480 user_debug_thread_unscheduled(Thread* thread) 1481 { 1482 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1483 1484 // if running, cancel the profiling timer 1485 struct timer* timer = thread->debug_info.profile.installed_timer; 1486 if (timer != NULL) { 1487 // track remaining time 1488 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1489 thread->debug_info.profile.interval_left = max_c(left, 0); 1490 thread->debug_info.profile.installed_timer = NULL; 1491 1492 // cancel timer 1493 threadDebugInfoLocker.Unlock(); 1494 // not necessary, but doesn't harm and reduces contention 1495 cancel_timer(timer); 1496 // since invoked on the same CPU, this will not possibly wait for 1497 // an already called timer hook 1498 } 1499 } 1500 1501 1502 /*! Called by the scheduler when a debugged thread has been scheduled. 1503 The scheduler lock is being held. 1504 */ 1505 void 1506 user_debug_thread_scheduled(Thread* thread) 1507 { 1508 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1509 1510 if (thread->debug_info.profile.samples != NULL 1511 && !thread->debug_info.profile.buffer_full) { 1512 // install profiling timer 1513 schedule_profiling_timer(thread, 1514 thread->debug_info.profile.interval_left); 1515 } 1516 } 1517 1518 1519 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1520 all threads of the team that are initialized for debugging (and 1521 thus have a debug port). 1522 */ 1523 static void 1524 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1525 const void *message, int32 size) 1526 { 1527 // iterate through the threads 1528 thread_info threadInfo; 1529 int32 cookie = 0; 1530 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1531 == B_OK) { 1532 // get the thread and lock it 1533 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1534 if (thread == NULL) 1535 continue; 1536 1537 BReference<Thread> threadReference(thread, true); 1538 ThreadLocker threadLocker(thread, true); 1539 1540 // get the thread's debug port 1541 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1542 1543 port_id threadDebugPort = -1; 1544 if (thread && thread != nubThread && thread->team == nubThread->team 1545 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1546 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1547 threadDebugPort = thread->debug_info.debug_port; 1548 } 1549 1550 threadDebugInfoLocker.Unlock(); 1551 threadLocker.Unlock(); 1552 1553 // send the message to the thread 1554 if (threadDebugPort >= 0) { 1555 status_t error = kill_interruptable_write_port(threadDebugPort, 1556 code, message, size); 1557 if (error != B_OK) { 1558 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1559 "message to thread %" B_PRId32 ": %" B_PRIx32 "\n", 1560 thread->id, error)); 1561 } 1562 } 1563 } 1564 } 1565 1566 1567 static void 1568 nub_thread_cleanup(Thread *nubThread) 1569 { 1570 TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n", 1571 nubThread->id, nubThread->team->debug_info.debugger_port)); 1572 1573 ConditionVariable debugChangeCondition; 1574 debugChangeCondition.Init(nubThread->team, "debug change condition"); 1575 prepare_debugger_change(nubThread->team, debugChangeCondition); 1576 1577 team_debug_info teamDebugInfo; 1578 bool destroyDebugInfo = false; 1579 1580 TeamLocker teamLocker(nubThread->team); 1581 // required by update_threads_debugger_installed_flag() 1582 1583 cpu_status state = disable_interrupts(); 1584 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1585 1586 team_debug_info &info = nubThread->team->debug_info; 1587 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1588 && info.nub_thread == nubThread->id) { 1589 teamDebugInfo = info; 1590 clear_team_debug_info(&info, false); 1591 destroyDebugInfo = true; 1592 } 1593 1594 // update the thread::flags fields 1595 update_threads_debugger_installed_flag(nubThread->team); 1596 1597 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1598 restore_interrupts(state); 1599 1600 teamLocker.Unlock(); 1601 1602 if (destroyDebugInfo) 1603 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1604 1605 finish_debugger_change(nubThread->team); 1606 1607 if (destroyDebugInfo) 1608 destroy_team_debug_info(&teamDebugInfo); 1609 1610 // notify all threads that the debugger is gone 1611 broadcast_debugged_thread_message(nubThread, 1612 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1613 } 1614 1615 1616 /** \brief Debug nub thread helper function that returns the debug port of 1617 * a thread of the same team. 1618 */ 1619 static status_t 1620 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1621 thread_id threadID, port_id &threadDebugPort) 1622 { 1623 threadDebugPort = -1; 1624 1625 // get the thread 1626 Thread* thread = Thread::GetAndLock(threadID); 1627 if (thread == NULL) 1628 return B_BAD_THREAD_ID; 1629 BReference<Thread> threadReference(thread, true); 1630 ThreadLocker threadLocker(thread, true); 1631 1632 // get the debug port 1633 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1634 1635 if (thread->team != nubThread->team) 1636 return B_BAD_VALUE; 1637 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1638 return B_BAD_THREAD_STATE; 1639 1640 threadDebugPort = thread->debug_info.debug_port; 1641 1642 threadDebugInfoLocker.Unlock(); 1643 1644 if (threadDebugPort < 0) 1645 return B_ERROR; 1646 1647 return B_OK; 1648 } 1649 1650 1651 static status_t 1652 debug_nub_thread(void *) 1653 { 1654 Thread *nubThread = thread_get_current_thread(); 1655 1656 // check, if we're still the current nub thread and get our port 1657 cpu_status state = disable_interrupts(); 1658 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1659 1660 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1661 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1662 restore_interrupts(state); 1663 return 0; 1664 } 1665 1666 port_id port = nubThread->team->debug_info.nub_port; 1667 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1668 BreakpointManager* breakpointManager 1669 = nubThread->team->debug_info.breakpoint_manager; 1670 1671 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1672 restore_interrupts(state); 1673 1674 TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub " 1675 "port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port)); 1676 1677 // notify all threads that a debugger has been installed 1678 broadcast_debugged_thread_message(nubThread, 1679 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1680 1681 // command processing loop 1682 while (true) { 1683 int32 command; 1684 debug_nub_message_data message; 1685 ssize_t messageSize = read_port_etc(port, &command, &message, 1686 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1687 1688 if (messageSize < 0) { 1689 // The port is no longer valid or we were interrupted by a kill 1690 // signal: If we are still listed in the team's debug info as nub 1691 // thread, we need to update that. 1692 nub_thread_cleanup(nubThread); 1693 1694 TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n", 1695 nubThread->id, messageSize)); 1696 1697 return messageSize; 1698 } 1699 1700 bool sendReply = false; 1701 union { 1702 debug_nub_read_memory_reply read_memory; 1703 debug_nub_write_memory_reply write_memory; 1704 debug_nub_get_cpu_state_reply get_cpu_state; 1705 debug_nub_set_breakpoint_reply set_breakpoint; 1706 debug_nub_set_watchpoint_reply set_watchpoint; 1707 debug_nub_get_signal_masks_reply get_signal_masks; 1708 debug_nub_get_signal_handler_reply get_signal_handler; 1709 debug_nub_start_profiler_reply start_profiler; 1710 debug_profiler_update profiler_update; 1711 debug_nub_write_core_file_reply write_core_file; 1712 } reply; 1713 int32 replySize = 0; 1714 port_id replyPort = -1; 1715 1716 // process the command 1717 switch (command) { 1718 case B_DEBUG_MESSAGE_READ_MEMORY: 1719 { 1720 // get the parameters 1721 replyPort = message.read_memory.reply_port; 1722 void *address = message.read_memory.address; 1723 int32 size = message.read_memory.size; 1724 status_t result = B_OK; 1725 1726 // check the parameters 1727 if (!BreakpointManager::CanAccessAddress(address, false)) 1728 result = B_BAD_ADDRESS; 1729 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1730 result = B_BAD_VALUE; 1731 1732 // read the memory 1733 size_t bytesRead = 0; 1734 if (result == B_OK) { 1735 result = breakpointManager->ReadMemory(address, 1736 reply.read_memory.data, size, bytesRead); 1737 } 1738 reply.read_memory.error = result; 1739 1740 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: " 1741 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1742 ", result: %" B_PRIx32 ", read: %ld\n", nubThread->id, 1743 replyPort, address, size, result, bytesRead)); 1744 1745 // send only as much data as necessary 1746 reply.read_memory.size = bytesRead; 1747 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1748 sendReply = true; 1749 break; 1750 } 1751 1752 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1753 { 1754 // get the parameters 1755 replyPort = message.write_memory.reply_port; 1756 void *address = message.write_memory.address; 1757 int32 size = message.write_memory.size; 1758 const char *data = message.write_memory.data; 1759 int32 realSize = (char*)&message + messageSize - data; 1760 status_t result = B_OK; 1761 1762 // check the parameters 1763 if (!BreakpointManager::CanAccessAddress(address, true)) 1764 result = B_BAD_ADDRESS; 1765 else if (size <= 0 || size > realSize) 1766 result = B_BAD_VALUE; 1767 1768 // write the memory 1769 size_t bytesWritten = 0; 1770 if (result == B_OK) { 1771 result = breakpointManager->WriteMemory(address, data, size, 1772 bytesWritten); 1773 } 1774 reply.write_memory.error = result; 1775 1776 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: " 1777 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1778 ", result: %" B_PRIx32 ", written: %ld\n", nubThread->id, 1779 replyPort, address, size, result, bytesWritten)); 1780 1781 reply.write_memory.size = bytesWritten; 1782 sendReply = true; 1783 replySize = sizeof(debug_nub_write_memory_reply); 1784 break; 1785 } 1786 1787 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1788 { 1789 // get the parameters 1790 int32 flags = message.set_team_flags.flags 1791 & B_TEAM_DEBUG_USER_FLAG_MASK; 1792 1793 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS" 1794 ": flags: %" B_PRIx32 "\n", nubThread->id, flags)); 1795 1796 Team *team = thread_get_current_thread()->team; 1797 1798 // set the flags 1799 cpu_status state = disable_interrupts(); 1800 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1801 1802 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1803 atomic_set(&team->debug_info.flags, flags); 1804 1805 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1806 restore_interrupts(state); 1807 1808 break; 1809 } 1810 1811 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1812 { 1813 // get the parameters 1814 thread_id threadID = message.set_thread_flags.thread; 1815 int32 flags = message.set_thread_flags.flags 1816 & B_THREAD_DEBUG_USER_FLAG_MASK; 1817 1818 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS" 1819 ": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n", 1820 nubThread->id, threadID, flags)); 1821 1822 // set the flags 1823 Thread* thread = Thread::GetAndLock(threadID); 1824 if (thread == NULL) 1825 break; 1826 BReference<Thread> threadReference(thread, true); 1827 ThreadLocker threadLocker(thread, true); 1828 1829 InterruptsSpinLocker threadDebugInfoLocker( 1830 thread->debug_info.lock); 1831 1832 if (thread->team == thread_get_current_thread()->team) { 1833 flags |= thread->debug_info.flags 1834 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1835 atomic_set(&thread->debug_info.flags, flags); 1836 } 1837 1838 break; 1839 } 1840 1841 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1842 { 1843 // get the parameters 1844 thread_id threadID; 1845 uint32 handleEvent; 1846 bool singleStep; 1847 1848 threadID = message.continue_thread.thread; 1849 handleEvent = message.continue_thread.handle_event; 1850 singleStep = message.continue_thread.single_step; 1851 1852 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD" 1853 ": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", " 1854 "single step: %d\n", nubThread->id, threadID, handleEvent, 1855 singleStep)); 1856 1857 // find the thread and get its debug port 1858 port_id threadDebugPort = -1; 1859 status_t result = debug_nub_thread_get_thread_debug_port( 1860 nubThread, threadID, threadDebugPort); 1861 1862 // send a message to the debugged thread 1863 if (result == B_OK) { 1864 debugged_thread_continue commandMessage; 1865 commandMessage.handle_event = handleEvent; 1866 commandMessage.single_step = singleStep; 1867 1868 result = write_port(threadDebugPort, 1869 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1870 &commandMessage, sizeof(commandMessage)); 1871 } else if (result == B_BAD_THREAD_STATE) { 1872 Thread* thread = Thread::GetAndLock(threadID); 1873 if (thread == NULL) 1874 break; 1875 1876 BReference<Thread> threadReference(thread, true); 1877 ThreadLocker threadLocker(thread, true); 1878 if (thread->state == B_THREAD_SUSPENDED) { 1879 threadLocker.Unlock(); 1880 resume_thread(threadID); 1881 break; 1882 } 1883 } 1884 1885 break; 1886 } 1887 1888 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1889 { 1890 // get the parameters 1891 thread_id threadID = message.set_cpu_state.thread; 1892 const debug_cpu_state &cpuState 1893 = message.set_cpu_state.cpu_state; 1894 1895 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE" 1896 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1897 1898 // find the thread and get its debug port 1899 port_id threadDebugPort = -1; 1900 status_t result = debug_nub_thread_get_thread_debug_port( 1901 nubThread, threadID, threadDebugPort); 1902 1903 // send a message to the debugged thread 1904 if (result == B_OK) { 1905 debugged_thread_set_cpu_state commandMessage; 1906 memcpy(&commandMessage.cpu_state, &cpuState, 1907 sizeof(debug_cpu_state)); 1908 write_port(threadDebugPort, 1909 B_DEBUGGED_THREAD_SET_CPU_STATE, 1910 &commandMessage, sizeof(commandMessage)); 1911 } 1912 1913 break; 1914 } 1915 1916 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1917 { 1918 // get the parameters 1919 thread_id threadID = message.get_cpu_state.thread; 1920 replyPort = message.get_cpu_state.reply_port; 1921 1922 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE" 1923 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1924 1925 // find the thread and get its debug port 1926 port_id threadDebugPort = -1; 1927 status_t result = debug_nub_thread_get_thread_debug_port( 1928 nubThread, threadID, threadDebugPort); 1929 1930 // send a message to the debugged thread 1931 if (threadDebugPort >= 0) { 1932 debugged_thread_get_cpu_state commandMessage; 1933 commandMessage.reply_port = replyPort; 1934 result = write_port(threadDebugPort, 1935 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1936 sizeof(commandMessage)); 1937 } 1938 1939 // send a reply to the debugger in case of error 1940 if (result != B_OK) { 1941 reply.get_cpu_state.error = result; 1942 sendReply = true; 1943 replySize = sizeof(reply.get_cpu_state); 1944 } 1945 1946 break; 1947 } 1948 1949 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1950 { 1951 // get the parameters 1952 replyPort = message.set_breakpoint.reply_port; 1953 void *address = message.set_breakpoint.address; 1954 1955 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT" 1956 ": address: %p\n", nubThread->id, address)); 1957 1958 // check the address 1959 status_t result = B_OK; 1960 if (address == NULL 1961 || !BreakpointManager::CanAccessAddress(address, false)) { 1962 result = B_BAD_ADDRESS; 1963 } 1964 1965 // set the breakpoint 1966 if (result == B_OK) 1967 result = breakpointManager->InstallBreakpoint(address); 1968 1969 if (result == B_OK) 1970 update_threads_breakpoints_flag(); 1971 1972 // prepare the reply 1973 reply.set_breakpoint.error = result; 1974 replySize = sizeof(reply.set_breakpoint); 1975 sendReply = true; 1976 1977 break; 1978 } 1979 1980 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1981 { 1982 // get the parameters 1983 void *address = message.clear_breakpoint.address; 1984 1985 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT" 1986 ": address: %p\n", nubThread->id, address)); 1987 1988 // check the address 1989 status_t result = B_OK; 1990 if (address == NULL 1991 || !BreakpointManager::CanAccessAddress(address, false)) { 1992 result = B_BAD_ADDRESS; 1993 } 1994 1995 // clear the breakpoint 1996 if (result == B_OK) 1997 result = breakpointManager->UninstallBreakpoint(address); 1998 1999 if (result == B_OK) 2000 update_threads_breakpoints_flag(); 2001 2002 break; 2003 } 2004 2005 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 2006 { 2007 // get the parameters 2008 replyPort = message.set_watchpoint.reply_port; 2009 void *address = message.set_watchpoint.address; 2010 uint32 type = message.set_watchpoint.type; 2011 int32 length = message.set_watchpoint.length; 2012 2013 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT" 2014 ": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n", 2015 nubThread->id, address, type, length)); 2016 2017 // check the address and size 2018 status_t result = B_OK; 2019 if (address == NULL 2020 || !BreakpointManager::CanAccessAddress(address, false)) { 2021 result = B_BAD_ADDRESS; 2022 } 2023 if (length < 0) 2024 result = B_BAD_VALUE; 2025 2026 // set the watchpoint 2027 if (result == B_OK) { 2028 result = breakpointManager->InstallWatchpoint(address, type, 2029 length); 2030 } 2031 2032 if (result == B_OK) 2033 update_threads_breakpoints_flag(); 2034 2035 // prepare the reply 2036 reply.set_watchpoint.error = result; 2037 replySize = sizeof(reply.set_watchpoint); 2038 sendReply = true; 2039 2040 break; 2041 } 2042 2043 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2044 { 2045 // get the parameters 2046 void *address = message.clear_watchpoint.address; 2047 2048 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT" 2049 ": address: %p\n", nubThread->id, address)); 2050 2051 // check the address 2052 status_t result = B_OK; 2053 if (address == NULL 2054 || !BreakpointManager::CanAccessAddress(address, false)) { 2055 result = B_BAD_ADDRESS; 2056 } 2057 2058 // clear the watchpoint 2059 if (result == B_OK) 2060 result = breakpointManager->UninstallWatchpoint(address); 2061 2062 if (result == B_OK) 2063 update_threads_breakpoints_flag(); 2064 2065 break; 2066 } 2067 2068 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2069 { 2070 // get the parameters 2071 thread_id threadID = message.set_signal_masks.thread; 2072 uint64 ignore = message.set_signal_masks.ignore_mask; 2073 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2074 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2075 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2076 2077 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS" 2078 ": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %" 2079 B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32 2080 ")\n", nubThread->id, threadID, ignore, ignoreOp, 2081 ignoreOnce, ignoreOnceOp)); 2082 2083 // set the masks 2084 Thread* thread = Thread::GetAndLock(threadID); 2085 if (thread == NULL) 2086 break; 2087 BReference<Thread> threadReference(thread, true); 2088 ThreadLocker threadLocker(thread, true); 2089 2090 InterruptsSpinLocker threadDebugInfoLocker( 2091 thread->debug_info.lock); 2092 2093 if (thread->team == thread_get_current_thread()->team) { 2094 thread_debug_info &threadDebugInfo = thread->debug_info; 2095 // set ignore mask 2096 switch (ignoreOp) { 2097 case B_DEBUG_SIGNAL_MASK_AND: 2098 threadDebugInfo.ignore_signals &= ignore; 2099 break; 2100 case B_DEBUG_SIGNAL_MASK_OR: 2101 threadDebugInfo.ignore_signals |= ignore; 2102 break; 2103 case B_DEBUG_SIGNAL_MASK_SET: 2104 threadDebugInfo.ignore_signals = ignore; 2105 break; 2106 } 2107 2108 // set ignore once mask 2109 switch (ignoreOnceOp) { 2110 case B_DEBUG_SIGNAL_MASK_AND: 2111 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2112 break; 2113 case B_DEBUG_SIGNAL_MASK_OR: 2114 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2115 break; 2116 case B_DEBUG_SIGNAL_MASK_SET: 2117 threadDebugInfo.ignore_signals_once = ignoreOnce; 2118 break; 2119 } 2120 } 2121 2122 break; 2123 } 2124 2125 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2126 { 2127 // get the parameters 2128 replyPort = message.get_signal_masks.reply_port; 2129 thread_id threadID = message.get_signal_masks.thread; 2130 status_t result = B_OK; 2131 2132 // get the masks 2133 uint64 ignore = 0; 2134 uint64 ignoreOnce = 0; 2135 2136 Thread* thread = Thread::GetAndLock(threadID); 2137 if (thread != NULL) { 2138 BReference<Thread> threadReference(thread, true); 2139 ThreadLocker threadLocker(thread, true); 2140 2141 InterruptsSpinLocker threadDebugInfoLocker( 2142 thread->debug_info.lock); 2143 2144 ignore = thread->debug_info.ignore_signals; 2145 ignoreOnce = thread->debug_info.ignore_signals_once; 2146 } else 2147 result = B_BAD_THREAD_ID; 2148 2149 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS" 2150 ": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", " 2151 "ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: " 2152 "%" B_PRIx32 "\n", nubThread->id, replyPort, threadID, 2153 ignore, ignoreOnce, result)); 2154 2155 // prepare the message 2156 reply.get_signal_masks.error = result; 2157 reply.get_signal_masks.ignore_mask = ignore; 2158 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2159 replySize = sizeof(reply.get_signal_masks); 2160 sendReply = true; 2161 break; 2162 } 2163 2164 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2165 { 2166 // get the parameters 2167 int signal = message.set_signal_handler.signal; 2168 struct sigaction &handler = message.set_signal_handler.handler; 2169 2170 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER" 2171 ": signal: %d, handler: %p\n", nubThread->id, signal, 2172 handler.sa_handler)); 2173 2174 // set the handler 2175 sigaction(signal, &handler, NULL); 2176 2177 break; 2178 } 2179 2180 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2181 { 2182 // get the parameters 2183 replyPort = message.get_signal_handler.reply_port; 2184 int signal = message.get_signal_handler.signal; 2185 status_t result = B_OK; 2186 2187 // get the handler 2188 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2189 != 0) { 2190 result = errno; 2191 } 2192 2193 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER" 2194 ": reply port: %" B_PRId32 ", signal: %d, handler: %p\n", 2195 nubThread->id, replyPort, signal, 2196 reply.get_signal_handler.handler.sa_handler)); 2197 2198 // prepare the message 2199 reply.get_signal_handler.error = result; 2200 replySize = sizeof(reply.get_signal_handler); 2201 sendReply = true; 2202 break; 2203 } 2204 2205 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2206 { 2207 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER" 2208 "\n", nubThread->id)); 2209 2210 Team *team = nubThread->team; 2211 2212 // Acquire the debugger write lock. As soon as we have it and 2213 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2214 // will write anything to the debugger port anymore. 2215 status_t result = acquire_sem_etc(writeLock, 1, 2216 B_KILL_CAN_INTERRUPT, 0); 2217 if (result == B_OK) { 2218 // set the respective team debug flag 2219 cpu_status state = disable_interrupts(); 2220 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2221 2222 atomic_or(&team->debug_info.flags, 2223 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2224 BreakpointManager* breakpointManager 2225 = team->debug_info.breakpoint_manager; 2226 2227 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2228 restore_interrupts(state); 2229 2230 // remove all installed breakpoints 2231 breakpointManager->RemoveAllBreakpoints(); 2232 2233 release_sem(writeLock); 2234 } else { 2235 // We probably got a SIGKILL. If so, we will terminate when 2236 // reading the next message fails. 2237 } 2238 2239 break; 2240 } 2241 2242 case B_DEBUG_MESSAGE_HANDED_OVER: 2243 { 2244 // notify all threads that the debugger has changed 2245 broadcast_debugged_thread_message(nubThread, 2246 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2247 2248 break; 2249 } 2250 2251 case B_DEBUG_START_PROFILER: 2252 { 2253 // get the parameters 2254 thread_id threadID = message.start_profiler.thread; 2255 replyPort = message.start_profiler.reply_port; 2256 area_id sampleArea = message.start_profiler.sample_area; 2257 int32 stackDepth = message.start_profiler.stack_depth; 2258 bool variableStackDepth 2259 = message.start_profiler.variable_stack_depth; 2260 bigtime_t interval = max_c(message.start_profiler.interval, 2261 B_DEBUG_MIN_PROFILE_INTERVAL); 2262 status_t result = B_OK; 2263 2264 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: " 2265 "thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n", 2266 nubThread->id, threadID, sampleArea)); 2267 2268 if (stackDepth < 1) 2269 stackDepth = 1; 2270 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2271 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2272 2273 // provision for an extra entry per hit (for the number of 2274 // samples), if variable stack depth 2275 if (variableStackDepth) 2276 stackDepth++; 2277 2278 // clone the sample area 2279 area_info areaInfo; 2280 if (result == B_OK) 2281 result = get_area_info(sampleArea, &areaInfo); 2282 2283 area_id clonedSampleArea = -1; 2284 void* samples = NULL; 2285 if (result == B_OK) { 2286 clonedSampleArea = clone_area("profiling samples", &samples, 2287 B_ANY_KERNEL_ADDRESS, 2288 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 2289 sampleArea); 2290 if (clonedSampleArea >= 0) { 2291 // we need the memory locked 2292 result = lock_memory(samples, areaInfo.size, 2293 B_READ_DEVICE); 2294 if (result != B_OK) { 2295 delete_area(clonedSampleArea); 2296 clonedSampleArea = -1; 2297 } 2298 } else 2299 result = clonedSampleArea; 2300 } 2301 2302 // get the thread and set the profile info 2303 int32 imageEvent = nubThread->team->debug_info.image_event; 2304 if (result == B_OK) { 2305 Thread* thread = Thread::GetAndLock(threadID); 2306 BReference<Thread> threadReference(thread, true); 2307 ThreadLocker threadLocker(thread, true); 2308 2309 if (thread != NULL && thread->team == nubThread->team) { 2310 thread_debug_info &threadDebugInfo = thread->debug_info; 2311 2312 InterruptsSpinLocker threadDebugInfoLocker( 2313 threadDebugInfo.lock); 2314 2315 if (threadDebugInfo.profile.samples == NULL) { 2316 threadDebugInfo.profile.interval = interval; 2317 threadDebugInfo.profile.sample_area 2318 = clonedSampleArea; 2319 threadDebugInfo.profile.samples = (addr_t*)samples; 2320 threadDebugInfo.profile.max_samples 2321 = areaInfo.size / sizeof(addr_t); 2322 threadDebugInfo.profile.flush_threshold 2323 = threadDebugInfo.profile.max_samples 2324 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2325 / 100; 2326 threadDebugInfo.profile.sample_count = 0; 2327 threadDebugInfo.profile.dropped_ticks = 0; 2328 threadDebugInfo.profile.stack_depth = stackDepth; 2329 threadDebugInfo.profile.variable_stack_depth 2330 = variableStackDepth; 2331 threadDebugInfo.profile.buffer_full = false; 2332 threadDebugInfo.profile.interval_left = interval; 2333 threadDebugInfo.profile.installed_timer = NULL; 2334 threadDebugInfo.profile.image_event = imageEvent; 2335 threadDebugInfo.profile.last_image_event 2336 = imageEvent; 2337 } else 2338 result = B_BAD_VALUE; 2339 } else 2340 result = B_BAD_THREAD_ID; 2341 } 2342 2343 // on error unlock and delete the sample area 2344 if (result != B_OK) { 2345 if (clonedSampleArea >= 0) { 2346 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2347 delete_area(clonedSampleArea); 2348 } 2349 } 2350 2351 // send a reply to the debugger 2352 reply.start_profiler.error = result; 2353 reply.start_profiler.interval = interval; 2354 reply.start_profiler.image_event = imageEvent; 2355 sendReply = true; 2356 replySize = sizeof(reply.start_profiler); 2357 2358 break; 2359 } 2360 2361 case B_DEBUG_STOP_PROFILER: 2362 { 2363 // get the parameters 2364 thread_id threadID = message.stop_profiler.thread; 2365 replyPort = message.stop_profiler.reply_port; 2366 status_t result = B_OK; 2367 2368 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: " 2369 "thread: %" B_PRId32 "\n", nubThread->id, threadID)); 2370 2371 area_id sampleArea = -1; 2372 addr_t* samples = NULL; 2373 int32 sampleCount = 0; 2374 int32 stackDepth = 0; 2375 bool variableStackDepth = false; 2376 int32 imageEvent = 0; 2377 int32 droppedTicks = 0; 2378 2379 // get the thread and detach the profile info 2380 Thread* thread = Thread::GetAndLock(threadID); 2381 BReference<Thread> threadReference(thread, true); 2382 ThreadLocker threadLocker(thread, true); 2383 2384 if (thread && thread->team == nubThread->team) { 2385 thread_debug_info &threadDebugInfo = thread->debug_info; 2386 2387 InterruptsSpinLocker threadDebugInfoLocker( 2388 threadDebugInfo.lock); 2389 2390 if (threadDebugInfo.profile.samples != NULL) { 2391 sampleArea = threadDebugInfo.profile.sample_area; 2392 samples = threadDebugInfo.profile.samples; 2393 sampleCount = threadDebugInfo.profile.sample_count; 2394 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2395 stackDepth = threadDebugInfo.profile.stack_depth; 2396 variableStackDepth 2397 = threadDebugInfo.profile.variable_stack_depth; 2398 imageEvent = threadDebugInfo.profile.image_event; 2399 threadDebugInfo.profile.sample_area = -1; 2400 threadDebugInfo.profile.samples = NULL; 2401 threadDebugInfo.profile.buffer_full = false; 2402 threadDebugInfo.profile.dropped_ticks = 0; 2403 } else 2404 result = B_BAD_VALUE; 2405 } else 2406 result = B_BAD_THREAD_ID; 2407 2408 threadLocker.Unlock(); 2409 2410 // prepare the reply 2411 if (result == B_OK) { 2412 reply.profiler_update.origin.thread = threadID; 2413 reply.profiler_update.image_event = imageEvent; 2414 reply.profiler_update.stack_depth = stackDepth; 2415 reply.profiler_update.variable_stack_depth 2416 = variableStackDepth; 2417 reply.profiler_update.sample_count = sampleCount; 2418 reply.profiler_update.dropped_ticks = droppedTicks; 2419 reply.profiler_update.stopped = true; 2420 } else 2421 reply.profiler_update.origin.thread = result; 2422 2423 replySize = sizeof(debug_profiler_update); 2424 sendReply = true; 2425 2426 if (sampleArea >= 0) { 2427 area_info areaInfo; 2428 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2429 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2430 delete_area(sampleArea); 2431 } 2432 } 2433 2434 break; 2435 } 2436 2437 case B_DEBUG_WRITE_CORE_FILE: 2438 { 2439 // get the parameters 2440 replyPort = message.write_core_file.reply_port; 2441 char* path = message.write_core_file.path; 2442 path[sizeof(message.write_core_file.path) - 1] = '\0'; 2443 2444 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE" 2445 ": path: %s\n", nubThread->id, path)); 2446 2447 // write the core file 2448 status_t result = core_dump_write_core_file(path, false); 2449 2450 // prepare the reply 2451 reply.write_core_file.error = result; 2452 replySize = sizeof(reply.write_core_file); 2453 sendReply = true; 2454 2455 break; 2456 } 2457 } 2458 2459 // send the reply, if necessary 2460 if (sendReply) { 2461 status_t error = kill_interruptable_write_port(replyPort, command, 2462 &reply, replySize); 2463 2464 if (error != B_OK) { 2465 // The debugger port is either not longer existing or we got 2466 // interrupted by a kill signal. In either case we terminate. 2467 TRACE(("nub thread %" B_PRId32 ": failed to send reply to port " 2468 "%" B_PRId32 ": %s\n", nubThread->id, replyPort, 2469 strerror(error))); 2470 2471 nub_thread_cleanup(nubThread); 2472 return error; 2473 } 2474 } 2475 } 2476 } 2477 2478 2479 /** \brief Helper function for install_team_debugger(), that sets up the team 2480 and thread debug infos. 2481 2482 The caller must hold the team's lock as well as the team debug info lock. 2483 2484 The function also clears the arch specific team and thread debug infos 2485 (including among other things formerly set break/watchpoints). 2486 */ 2487 static void 2488 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2489 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2490 sem_id debuggerPortWriteLock, thread_id causingThread) 2491 { 2492 atomic_set(&team->debug_info.flags, 2493 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2494 team->debug_info.nub_port = nubPort; 2495 team->debug_info.nub_thread = nubThread; 2496 team->debug_info.debugger_team = debuggerTeam; 2497 team->debug_info.debugger_port = debuggerPort; 2498 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2499 team->debug_info.causing_thread = causingThread; 2500 2501 arch_clear_team_debug_info(&team->debug_info.arch_info); 2502 2503 // set the user debug flags and signal masks of all threads to the default 2504 for (Thread *thread = team->thread_list; thread; 2505 thread = thread->team_next) { 2506 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2507 2508 if (thread->id == nubThread) { 2509 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2510 } else { 2511 int32 flags = thread->debug_info.flags 2512 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2513 atomic_set(&thread->debug_info.flags, 2514 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2515 thread->debug_info.ignore_signals = 0; 2516 thread->debug_info.ignore_signals_once = 0; 2517 2518 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2519 } 2520 } 2521 2522 // update the thread::flags fields 2523 update_threads_debugger_installed_flag(team); 2524 } 2525 2526 2527 static port_id 2528 install_team_debugger(team_id teamID, port_id debuggerPort, 2529 thread_id causingThread, bool useDefault, bool dontReplace) 2530 { 2531 TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", " 2532 "default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault, 2533 dontReplace)); 2534 2535 if (useDefault) 2536 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2537 2538 // get the debugger team 2539 port_info debuggerPortInfo; 2540 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2541 if (error != B_OK) { 2542 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2543 "%" B_PRIx32 "\n", error)); 2544 return error; 2545 } 2546 team_id debuggerTeam = debuggerPortInfo.team; 2547 2548 // Check the debugger team: It must neither be the kernel team nor the 2549 // debugged team. 2550 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2551 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2552 "debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam, 2553 teamID)); 2554 return B_NOT_ALLOWED; 2555 } 2556 2557 // get the team 2558 Team* team; 2559 ConditionVariable debugChangeCondition; 2560 debugChangeCondition.Init(NULL, "debug change condition"); 2561 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2562 if (error != B_OK) 2563 return error; 2564 2565 // get the real team ID 2566 teamID = team->id; 2567 2568 // check, if a debugger is already installed 2569 2570 bool done = false; 2571 port_id result = B_ERROR; 2572 bool handOver = false; 2573 port_id oldDebuggerPort = -1; 2574 port_id nubPort = -1; 2575 2576 TeamLocker teamLocker(team); 2577 cpu_status state = disable_interrupts(); 2578 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2579 2580 int32 teamDebugFlags = team->debug_info.flags; 2581 2582 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2583 // There's already a debugger installed. 2584 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2585 if (dontReplace) { 2586 // We're fine with already having a debugger. 2587 error = B_OK; 2588 done = true; 2589 result = team->debug_info.nub_port; 2590 } else { 2591 // a handover to another debugger is requested 2592 // Set the handing-over flag -- we'll clear both flags after 2593 // having sent the handed-over message to the new debugger. 2594 atomic_or(&team->debug_info.flags, 2595 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2596 2597 oldDebuggerPort = team->debug_info.debugger_port; 2598 result = nubPort = team->debug_info.nub_port; 2599 if (causingThread < 0) 2600 causingThread = team->debug_info.causing_thread; 2601 2602 // set the new debugger 2603 install_team_debugger_init_debug_infos(team, debuggerTeam, 2604 debuggerPort, nubPort, team->debug_info.nub_thread, 2605 team->debug_info.debugger_write_lock, causingThread); 2606 2607 handOver = true; 2608 done = true; 2609 } 2610 } else { 2611 // there's already a debugger installed 2612 error = (dontReplace ? B_OK : B_BAD_VALUE); 2613 done = true; 2614 result = team->debug_info.nub_port; 2615 } 2616 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2617 && useDefault) { 2618 // No debugger yet, disable_debugger() had been invoked, and we 2619 // would install the default debugger. Just fail. 2620 error = B_BAD_VALUE; 2621 } 2622 2623 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2624 restore_interrupts(state); 2625 teamLocker.Unlock(); 2626 2627 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2628 // The old debugger must just have died. Just proceed as 2629 // if there was no debugger installed. We may still be too 2630 // early, in which case we'll fail, but this race condition 2631 // should be unbelievably rare and relatively harmless. 2632 handOver = false; 2633 done = false; 2634 } 2635 2636 if (handOver) { 2637 // prepare the handed-over message 2638 debug_handed_over notification; 2639 notification.origin.thread = -1; 2640 notification.origin.team = teamID; 2641 notification.origin.nub_port = nubPort; 2642 notification.debugger = debuggerTeam; 2643 notification.debugger_port = debuggerPort; 2644 notification.causing_thread = causingThread; 2645 2646 // notify the new debugger 2647 error = write_port_etc(debuggerPort, 2648 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2649 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2650 if (error != B_OK) { 2651 dprintf("install_team_debugger(): Failed to send message to new " 2652 "debugger: %s\n", strerror(error)); 2653 } 2654 2655 // clear the handed-over and handing-over flags 2656 state = disable_interrupts(); 2657 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2658 2659 atomic_and(&team->debug_info.flags, 2660 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2661 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2662 2663 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2664 restore_interrupts(state); 2665 2666 finish_debugger_change(team); 2667 2668 // notify the nub thread 2669 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2670 NULL, 0); 2671 2672 // notify the old debugger 2673 error = write_port_etc(oldDebuggerPort, 2674 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2675 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2676 if (error != B_OK) { 2677 TRACE(("install_team_debugger(): Failed to send message to old " 2678 "debugger: %s\n", strerror(error))); 2679 } 2680 2681 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2682 "%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam, 2683 debuggerPort)); 2684 2685 return result; 2686 } 2687 2688 if (done || error != B_OK) { 2689 TRACE(("install_team_debugger() done1: %" B_PRId32 "\n", 2690 (error == B_OK ? result : error))); 2691 finish_debugger_change(team); 2692 return (error == B_OK ? result : error); 2693 } 2694 2695 // create the debugger write lock semaphore 2696 char nameBuffer[B_OS_NAME_LENGTH]; 2697 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port " 2698 "write", teamID); 2699 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2700 if (debuggerWriteLock < 0) 2701 error = debuggerWriteLock; 2702 2703 // create the nub port 2704 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID); 2705 if (error == B_OK) { 2706 nubPort = create_port(1, nameBuffer); 2707 if (nubPort < 0) 2708 error = nubPort; 2709 else 2710 result = nubPort; 2711 } 2712 2713 // make the debugger team the port owner; thus we know, if the debugger is 2714 // gone and can cleanup 2715 if (error == B_OK) 2716 error = set_port_owner(nubPort, debuggerTeam); 2717 2718 // create the breakpoint manager 2719 BreakpointManager* breakpointManager = NULL; 2720 if (error == B_OK) { 2721 breakpointManager = new(std::nothrow) BreakpointManager; 2722 if (breakpointManager != NULL) 2723 error = breakpointManager->Init(); 2724 else 2725 error = B_NO_MEMORY; 2726 } 2727 2728 // spawn the nub thread 2729 thread_id nubThread = -1; 2730 if (error == B_OK) { 2731 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task", 2732 teamID); 2733 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2734 B_NORMAL_PRIORITY, NULL, teamID); 2735 if (nubThread < 0) 2736 error = nubThread; 2737 } 2738 2739 // now adjust the debug info accordingly 2740 if (error == B_OK) { 2741 TeamLocker teamLocker(team); 2742 state = disable_interrupts(); 2743 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2744 2745 team->debug_info.breakpoint_manager = breakpointManager; 2746 install_team_debugger_init_debug_infos(team, debuggerTeam, 2747 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2748 causingThread); 2749 2750 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2751 restore_interrupts(state); 2752 } 2753 2754 finish_debugger_change(team); 2755 2756 // if everything went fine, resume the nub thread, otherwise clean up 2757 if (error == B_OK) { 2758 resume_thread(nubThread); 2759 } else { 2760 // delete port and terminate thread 2761 if (nubPort >= 0) { 2762 set_port_owner(nubPort, B_CURRENT_TEAM); 2763 delete_port(nubPort); 2764 } 2765 if (nubThread >= 0) { 2766 int32 result; 2767 wait_for_thread(nubThread, &result); 2768 } 2769 2770 delete breakpointManager; 2771 } 2772 2773 TRACE(("install_team_debugger() done2: %" B_PRId32 "\n", 2774 (error == B_OK ? result : error))); 2775 return (error == B_OK ? result : error); 2776 } 2777 2778 2779 static status_t 2780 ensure_debugger_installed() 2781 { 2782 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2783 thread_get_current_thread_id(), true, true); 2784 return port >= 0 ? B_OK : port; 2785 } 2786 2787 2788 // #pragma mark - 2789 2790 2791 void 2792 _user_debugger(const char *userMessage) 2793 { 2794 // install the default debugger, if there is none yet 2795 status_t error = ensure_debugger_installed(); 2796 if (error != B_OK) { 2797 // time to commit suicide 2798 char buffer[128]; 2799 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2800 if (length >= 0) { 2801 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2802 "`%s'\n", buffer); 2803 } else { 2804 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2805 "%p (%s)\n", userMessage, strerror(length)); 2806 } 2807 _user_exit_team(1); 2808 } 2809 2810 // prepare the message 2811 debug_debugger_call message; 2812 message.message = (void*)userMessage; 2813 2814 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2815 sizeof(message), true); 2816 } 2817 2818 2819 int 2820 _user_disable_debugger(int state) 2821 { 2822 Team *team = thread_get_current_thread()->team; 2823 2824 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state, 2825 team->id)); 2826 2827 cpu_status cpuState = disable_interrupts(); 2828 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2829 2830 int32 oldFlags; 2831 if (state) { 2832 oldFlags = atomic_or(&team->debug_info.flags, 2833 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2834 } else { 2835 oldFlags = atomic_and(&team->debug_info.flags, 2836 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2837 } 2838 2839 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2840 restore_interrupts(cpuState); 2841 2842 // TODO: Check, if the return value is really the old state. 2843 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2844 } 2845 2846 2847 status_t 2848 _user_install_default_debugger(port_id debuggerPort) 2849 { 2850 // Do not allow non-root processes to install a default debugger. 2851 if (geteuid() != 0) 2852 return B_PERMISSION_DENIED; 2853 2854 // if supplied, check whether the port is a valid port 2855 if (debuggerPort >= 0) { 2856 port_info portInfo; 2857 status_t error = get_port_info(debuggerPort, &portInfo); 2858 if (error != B_OK) 2859 return error; 2860 2861 // the debugger team must not be the kernel team 2862 if (portInfo.team == team_get_kernel_team_id()) 2863 return B_NOT_ALLOWED; 2864 } 2865 2866 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2867 2868 return B_OK; 2869 } 2870 2871 2872 port_id 2873 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2874 { 2875 if (geteuid() != 0 && team_geteuid(teamID) != geteuid()) 2876 return B_PERMISSION_DENIED; 2877 2878 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2879 } 2880 2881 2882 status_t 2883 _user_remove_team_debugger(team_id teamID) 2884 { 2885 Team* team; 2886 ConditionVariable debugChangeCondition; 2887 debugChangeCondition.Init(NULL, "debug change condition"); 2888 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2889 team); 2890 if (error != B_OK) 2891 return error; 2892 2893 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2894 2895 thread_id nubThread = -1; 2896 port_id nubPort = -1; 2897 2898 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2899 // there's a debugger installed 2900 nubThread = team->debug_info.nub_thread; 2901 nubPort = team->debug_info.nub_port; 2902 } else { 2903 // no debugger installed 2904 error = B_BAD_VALUE; 2905 } 2906 2907 debugInfoLocker.Unlock(); 2908 2909 // Delete the nub port -- this will cause the nub thread to terminate and 2910 // remove the debugger. 2911 if (nubPort >= 0) 2912 delete_port(nubPort); 2913 2914 finish_debugger_change(team); 2915 2916 // wait for the nub thread 2917 if (nubThread >= 0) 2918 wait_for_thread(nubThread, NULL); 2919 2920 return error; 2921 } 2922 2923 2924 status_t 2925 _user_debug_thread(thread_id threadID) 2926 { 2927 TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n", 2928 find_thread(NULL), threadID)); 2929 2930 // get the thread 2931 Thread* thread = Thread::GetAndLock(threadID); 2932 if (thread == NULL) 2933 return B_BAD_THREAD_ID; 2934 BReference<Thread> threadReference(thread, true); 2935 ThreadLocker threadLocker(thread, true); 2936 2937 // we can't debug the kernel team 2938 if (thread->team == team_get_kernel_team()) 2939 return B_NOT_ALLOWED; 2940 2941 InterruptsLocker interruptsLocker; 2942 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2943 2944 // If the thread is already dying, it's too late to debug it. 2945 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2946 return B_BAD_THREAD_ID; 2947 2948 // don't debug the nub thread 2949 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2950 return B_NOT_ALLOWED; 2951 2952 // already marked stopped or being told to stop? 2953 if ((thread->debug_info.flags 2954 & (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) { 2955 return B_OK; 2956 } 2957 2958 // set the flag that tells the thread to stop as soon as possible 2959 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2960 2961 update_thread_user_debug_flag(thread); 2962 2963 // send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or 2964 // continued) 2965 threadDebugInfoLocker.Unlock(); 2966 ReadSpinLocker teamLocker(thread->team_lock); 2967 SpinLocker locker(thread->team->signal_lock); 2968 2969 send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0); 2970 2971 return B_OK; 2972 } 2973 2974 2975 void 2976 _user_wait_for_debugger(void) 2977 { 2978 debug_thread_debugged message = {}; 2979 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2980 sizeof(message), false); 2981 } 2982 2983 2984 status_t 2985 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2986 bool watchpoint) 2987 { 2988 // check the address and size 2989 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2990 return B_BAD_ADDRESS; 2991 if (watchpoint && length < 0) 2992 return B_BAD_VALUE; 2993 2994 // check whether a debugger is installed already 2995 team_debug_info teamDebugInfo; 2996 get_team_debug_info(teamDebugInfo); 2997 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2998 return B_BAD_VALUE; 2999 3000 // We can't help it, here's a small but relatively harmless race condition, 3001 // since a debugger could be installed in the meantime. The worst case is 3002 // that we install a break/watchpoint the debugger doesn't know about. 3003 3004 // set the break/watchpoint 3005 status_t result; 3006 if (watchpoint) 3007 result = arch_set_watchpoint(address, type, length); 3008 else 3009 result = arch_set_breakpoint(address); 3010 3011 if (result == B_OK) 3012 update_threads_breakpoints_flag(); 3013 3014 return result; 3015 } 3016 3017 3018 status_t 3019 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 3020 { 3021 // check the address 3022 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3023 return B_BAD_ADDRESS; 3024 3025 // check whether a debugger is installed already 3026 team_debug_info teamDebugInfo; 3027 get_team_debug_info(teamDebugInfo); 3028 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3029 return B_BAD_VALUE; 3030 3031 // We can't help it, here's a small but relatively harmless race condition, 3032 // since a debugger could be installed in the meantime. The worst case is 3033 // that we clear a break/watchpoint the debugger has just installed. 3034 3035 // clear the break/watchpoint 3036 status_t result; 3037 if (watchpoint) 3038 result = arch_clear_watchpoint(address); 3039 else 3040 result = arch_clear_breakpoint(address); 3041 3042 if (result == B_OK) 3043 update_threads_breakpoints_flag(); 3044 3045 return result; 3046 } 3047