1 /* 2 * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2015, Rene Gollent, rene@gollent.com. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include <errno.h> 9 #include <signal.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <algorithm> 15 16 #include <arch/debug.h> 17 #include <arch/user_debugger.h> 18 #include <core_dump.h> 19 #include <cpu.h> 20 #include <debugger.h> 21 #include <kernel.h> 22 #include <KernelExport.h> 23 #include <kscheduler.h> 24 #include <ksignal.h> 25 #include <ksyscalls.h> 26 #include <port.h> 27 #include <sem.h> 28 #include <team.h> 29 #include <thread.h> 30 #include <thread_types.h> 31 #include <user_debugger.h> 32 #include <vm/vm.h> 33 #include <vm/vm_types.h> 34 35 #include <AutoDeleter.h> 36 #include <util/AutoLock.h> 37 #include <util/ThreadAutoLock.h> 38 39 #include "BreakpointManager.h" 40 41 42 //#define TRACE_USER_DEBUGGER 43 #ifdef TRACE_USER_DEBUGGER 44 # define TRACE(x) dprintf x 45 #else 46 # define TRACE(x) ; 47 #endif 48 49 50 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 51 // there's some potential for simplifications. E.g. clear_team_debug_info() and 52 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 53 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 54 55 56 static port_id sDefaultDebuggerPort = -1; 57 // accessed atomically 58 59 static timer sProfilingTimers[SMP_MAX_CPUS]; 60 // a profiling timer for each CPU -- used when a profiled thread is running 61 // on that CPU 62 63 64 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 65 static int32 profiling_event(timer* unused); 66 static void profiling_flush(void*); 67 68 static status_t ensure_debugger_installed(); 69 static void get_team_debug_info(team_debug_info &teamDebugInfo); 70 71 72 static inline status_t 73 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 74 size_t bufferSize) 75 { 76 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 77 0); 78 } 79 80 81 static status_t 82 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 83 bool dontWait) 84 { 85 TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", " 86 "port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, " 87 "dontWait: %d\n", thread_get_current_thread()->id, 88 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 89 dontWait)); 90 91 status_t error = B_OK; 92 93 // get the team debug info 94 team_debug_info teamDebugInfo; 95 get_team_debug_info(teamDebugInfo); 96 sem_id writeLock = teamDebugInfo.debugger_write_lock; 97 98 // get the write lock 99 TRACE(("debugger_write(): acquiring write lock...\n")); 100 error = acquire_sem_etc(writeLock, 1, 101 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 102 if (error != B_OK) { 103 TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error)); 104 return error; 105 } 106 107 // re-get the team debug info 108 get_team_debug_info(teamDebugInfo); 109 110 if (teamDebugInfo.debugger_port != port 111 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 112 // The debugger has changed in the meantime or we are about to be 113 // handed over to a new debugger. In either case we don't send the 114 // message. 115 TRACE(("debugger_write(): %s\n", 116 (teamDebugInfo.debugger_port != port ? "debugger port changed" 117 : "handover flag set"))); 118 } else { 119 TRACE(("debugger_write(): writing to port...\n")); 120 121 error = write_port_etc(port, code, buffer, bufferSize, 122 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 123 } 124 125 // release the write lock 126 release_sem(writeLock); 127 128 TRACE(("debugger_write() done: %" B_PRIx32 "\n", error)); 129 130 return error; 131 } 132 133 134 /*! Updates the thread::flags field according to what user debugger flags are 135 set for the thread. 136 Interrupts must be disabled and the thread's debug info lock must be held. 137 */ 138 static void 139 update_thread_user_debug_flag(Thread* thread) 140 { 141 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 142 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 143 else 144 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 145 } 146 147 148 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 149 given thread. 150 Interrupts must be disabled and the thread debug info lock must be held. 151 */ 152 static void 153 update_thread_breakpoints_flag(Thread* thread) 154 { 155 Team* team = thread->team; 156 157 if (arch_has_breakpoints(&team->debug_info.arch_info)) 158 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 159 else 160 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 161 } 162 163 164 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 165 threads of the current team. 166 */ 167 static void 168 update_threads_breakpoints_flag() 169 { 170 Team* team = thread_get_current_thread()->team; 171 172 TeamLocker teamLocker(team); 173 174 Thread* thread = team->thread_list; 175 176 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 177 for (; thread != NULL; thread = thread->team_next) 178 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 179 } else { 180 for (; thread != NULL; thread = thread->team_next) 181 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 182 } 183 } 184 185 186 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 187 given thread, which must be the current thread. 188 */ 189 static void 190 update_thread_debugger_installed_flag(Thread* thread) 191 { 192 Team* team = thread->team; 193 194 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 195 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 196 else 197 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 198 } 199 200 201 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 202 threads of the given team. 203 The team's lock must be held. 204 */ 205 static void 206 update_threads_debugger_installed_flag(Team* team) 207 { 208 Thread* thread = team->thread_list; 209 210 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 211 for (; thread != NULL; thread = thread->team_next) 212 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 213 } else { 214 for (; thread != NULL; thread = thread->team_next) 215 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 216 } 217 } 218 219 220 /** 221 * For the first initialization the function must be called with \a initLock 222 * set to \c true. If it would be possible that another thread accesses the 223 * structure at the same time, `lock' must be held when calling the function. 224 */ 225 void 226 clear_team_debug_info(struct team_debug_info *info, bool initLock) 227 { 228 if (info) { 229 arch_clear_team_debug_info(&info->arch_info); 230 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 231 info->debugger_team = -1; 232 info->debugger_port = -1; 233 info->nub_thread = -1; 234 info->nub_port = -1; 235 info->debugger_write_lock = -1; 236 info->causing_thread = -1; 237 info->image_event = 0; 238 info->breakpoint_manager = NULL; 239 240 if (initLock) { 241 B_INITIALIZE_SPINLOCK(&info->lock); 242 info->debugger_changed_condition = NULL; 243 } 244 } 245 } 246 247 /** 248 * `lock' must not be held nor may interrupts be disabled. 249 * \a info must not be a member of a team struct (or the team struct must no 250 * longer be accessible, i.e. the team should already be removed). 251 * 252 * In case the team is still accessible, the procedure is: 253 * 1. get `lock' 254 * 2. copy the team debug info on stack 255 * 3. call clear_team_debug_info() on the team debug info 256 * 4. release `lock' 257 * 5. call destroy_team_debug_info() on the copied team debug info 258 */ 259 static void 260 destroy_team_debug_info(struct team_debug_info *info) 261 { 262 if (info) { 263 arch_destroy_team_debug_info(&info->arch_info); 264 265 // delete the breakpoint manager 266 delete info->breakpoint_manager ; 267 info->breakpoint_manager = NULL; 268 269 // delete the debugger port write lock 270 if (info->debugger_write_lock >= 0) { 271 delete_sem(info->debugger_write_lock); 272 info->debugger_write_lock = -1; 273 } 274 275 // delete the nub port 276 if (info->nub_port >= 0) { 277 set_port_owner(info->nub_port, B_CURRENT_TEAM); 278 delete_port(info->nub_port); 279 info->nub_port = -1; 280 } 281 282 // wait for the nub thread 283 if (info->nub_thread >= 0) { 284 if (info->nub_thread != thread_get_current_thread()->id) { 285 int32 result; 286 wait_for_thread(info->nub_thread, &result); 287 } 288 289 info->nub_thread = -1; 290 } 291 292 atomic_set(&info->flags, 0); 293 info->debugger_team = -1; 294 info->debugger_port = -1; 295 info->causing_thread = -1; 296 info->image_event = -1; 297 } 298 } 299 300 301 void 302 init_thread_debug_info(struct thread_debug_info *info) 303 { 304 if (info) { 305 B_INITIALIZE_SPINLOCK(&info->lock); 306 arch_clear_thread_debug_info(&info->arch_info); 307 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 308 info->debug_port = -1; 309 info->ignore_signals = 0; 310 info->ignore_signals_once = 0; 311 info->profile.sample_area = -1; 312 info->profile.samples = NULL; 313 info->profile.flush_needed = false; 314 info->profile.installed_timer = NULL; 315 } 316 } 317 318 319 /*! Clears the debug info for the current thread. 320 Invoked with thread debug info lock being held. 321 */ 322 void 323 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 324 { 325 if (info) { 326 // cancel profiling timer 327 if (info->profile.installed_timer != NULL) { 328 cancel_timer(info->profile.installed_timer); 329 info->profile.installed_timer->hook = NULL; 330 info->profile.installed_timer = NULL; 331 } 332 333 arch_clear_thread_debug_info(&info->arch_info); 334 atomic_set(&info->flags, 335 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 336 info->debug_port = -1; 337 info->ignore_signals = 0; 338 info->ignore_signals_once = 0; 339 info->profile.sample_area = -1; 340 info->profile.samples = NULL; 341 info->profile.flush_needed = false; 342 } 343 } 344 345 346 void 347 destroy_thread_debug_info(struct thread_debug_info *info) 348 { 349 if (info) { 350 area_id sampleArea = info->profile.sample_area; 351 if (sampleArea >= 0) { 352 area_info areaInfo; 353 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 354 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 355 delete_area(sampleArea); 356 } 357 } 358 359 arch_destroy_thread_debug_info(&info->arch_info); 360 361 if (info->debug_port >= 0) { 362 delete_port(info->debug_port); 363 info->debug_port = -1; 364 } 365 366 info->ignore_signals = 0; 367 info->ignore_signals_once = 0; 368 369 atomic_set(&info->flags, 0); 370 } 371 } 372 373 374 static status_t 375 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 376 Team*& team) 377 { 378 // We look up the team by ID, even in case of the current team, so we can be 379 // sure, that the team is not already dying. 380 if (teamID == B_CURRENT_TEAM) 381 teamID = thread_get_current_thread()->team->id; 382 383 while (true) { 384 // get the team 385 team = Team::GetAndLock(teamID); 386 if (team == NULL) 387 return B_BAD_TEAM_ID; 388 BReference<Team> teamReference(team, true); 389 TeamLocker teamLocker(team, true); 390 391 // don't allow messing with the kernel team 392 if (team == team_get_kernel_team()) 393 return B_NOT_ALLOWED; 394 395 // check whether the condition is already set 396 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 397 398 if (team->debug_info.debugger_changed_condition == NULL) { 399 // nobody there yet -- set our condition variable and be done 400 team->debug_info.debugger_changed_condition = &condition; 401 return B_OK; 402 } 403 404 // we'll have to wait 405 ConditionVariableEntry entry; 406 team->debug_info.debugger_changed_condition->Add(&entry); 407 408 debugInfoLocker.Unlock(); 409 teamLocker.Unlock(); 410 411 entry.Wait(); 412 } 413 } 414 415 416 static void 417 prepare_debugger_change(Team* team, ConditionVariable& condition) 418 { 419 while (true) { 420 // check whether the condition is already set 421 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 422 423 if (team->debug_info.debugger_changed_condition == NULL) { 424 // nobody there yet -- set our condition variable and be done 425 team->debug_info.debugger_changed_condition = &condition; 426 return; 427 } 428 429 // we'll have to wait 430 ConditionVariableEntry entry; 431 team->debug_info.debugger_changed_condition->Add(&entry); 432 433 debugInfoLocker.Unlock(); 434 435 entry.Wait(); 436 } 437 } 438 439 440 static void 441 finish_debugger_change(Team* team) 442 { 443 // unset our condition variable and notify all threads waiting on it 444 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 445 446 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 447 team->debug_info.debugger_changed_condition = NULL; 448 449 condition->NotifyAll(); 450 } 451 452 453 void 454 user_debug_prepare_for_exec() 455 { 456 Thread *thread = thread_get_current_thread(); 457 Team *team = thread->team; 458 459 // If a debugger is installed for the team and the thread debug stuff 460 // initialized, change the ownership of the debug port for the thread 461 // to the kernel team, since exec_team() deletes all ports owned by this 462 // team. We change the ownership back later. 463 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 464 // get the port 465 port_id debugPort = -1; 466 467 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 468 469 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 470 debugPort = thread->debug_info.debug_port; 471 472 threadDebugInfoLocker.Unlock(); 473 474 // set the new port ownership 475 if (debugPort >= 0) 476 set_port_owner(debugPort, team_get_kernel_team_id()); 477 } 478 } 479 480 481 void 482 user_debug_finish_after_exec() 483 { 484 Thread *thread = thread_get_current_thread(); 485 Team *team = thread->team; 486 487 // If a debugger is installed for the team and the thread debug stuff 488 // initialized for this thread, change the ownership of its debug port 489 // back to this team. 490 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 491 // get the port 492 port_id debugPort = -1; 493 494 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 495 496 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 497 debugPort = thread->debug_info.debug_port; 498 499 threadDebugInfoLocker.Unlock(); 500 501 // set the new port ownership 502 if (debugPort >= 0) 503 set_port_owner(debugPort, team->id); 504 } 505 } 506 507 508 void 509 init_user_debug() 510 { 511 #ifdef ARCH_INIT_USER_DEBUG 512 ARCH_INIT_USER_DEBUG(); 513 #endif 514 } 515 516 517 static void 518 get_team_debug_info(team_debug_info &teamDebugInfo) 519 { 520 Thread *thread = thread_get_current_thread(); 521 522 cpu_status state = disable_interrupts(); 523 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 524 525 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 526 527 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 528 restore_interrupts(state); 529 } 530 531 532 static status_t 533 thread_hit_debug_event_internal(debug_debugger_message event, 534 const void *message, int32 size, bool requireDebugger, bool &restart) 535 { 536 restart = false; 537 Thread *thread = thread_get_current_thread(); 538 539 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32 540 ", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event, 541 message, size)); 542 543 // check, if there's a debug port already 544 bool setPort = !(atomic_get(&thread->debug_info.flags) 545 & B_THREAD_DEBUG_INITIALIZED); 546 547 // create a port, if there is none yet 548 port_id port = -1; 549 if (setPort) { 550 char nameBuffer[128]; 551 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32, 552 thread->id); 553 554 port = create_port(1, nameBuffer); 555 if (port < 0) { 556 dprintf("thread_hit_debug_event(): Failed to create debug port: " 557 "%s\n", strerror(port)); 558 return port; 559 } 560 } 561 562 // check the debug info structures once more: get the debugger port, set 563 // the thread's debug port, and update the thread's debug flags 564 port_id deletePort = port; 565 port_id debuggerPort = -1; 566 port_id nubPort = -1; 567 status_t error = B_OK; 568 cpu_status state = disable_interrupts(); 569 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 570 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 571 572 uint32 threadFlags = thread->debug_info.flags; 573 threadFlags &= ~B_THREAD_DEBUG_STOP; 574 bool debuggerInstalled 575 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 576 if (thread->id == thread->team->debug_info.nub_thread) { 577 // Ugh, we're the nub thread. We shouldn't be here. 578 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32 579 "\n", thread->id)); 580 581 error = B_ERROR; 582 } else if (debuggerInstalled || !requireDebugger) { 583 if (debuggerInstalled) { 584 debuggerPort = thread->team->debug_info.debugger_port; 585 nubPort = thread->team->debug_info.nub_port; 586 } 587 588 if (setPort) { 589 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 590 // someone created a port for us (the port we've created will 591 // be deleted below) 592 port = thread->debug_info.debug_port; 593 } else { 594 thread->debug_info.debug_port = port; 595 deletePort = -1; // keep the port 596 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 597 } 598 } else { 599 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 600 port = thread->debug_info.debug_port; 601 } else { 602 // someone deleted our port 603 error = B_ERROR; 604 } 605 } 606 } else 607 error = B_ERROR; 608 609 // update the flags 610 if (error == B_OK) 611 threadFlags |= B_THREAD_DEBUG_STOPPED; 612 atomic_set(&thread->debug_info.flags, threadFlags); 613 614 update_thread_user_debug_flag(thread); 615 616 threadDebugInfoLocker.Unlock(); 617 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 618 restore_interrupts(state); 619 620 // delete the superfluous port 621 if (deletePort >= 0) 622 delete_port(deletePort); 623 624 if (error != B_OK) { 625 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: " 626 "%" B_PRIx32 "\n", thread->id, error)); 627 return error; 628 } 629 630 // send a message to the debugger port 631 if (debuggerInstalled) { 632 // update the message's origin info first 633 debug_origin *origin = (debug_origin *)message; 634 origin->thread = thread->id; 635 origin->team = thread->team->id; 636 origin->nub_port = nubPort; 637 638 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending " 639 "message to debugger port %" B_PRId32 "\n", thread->id, 640 debuggerPort)); 641 642 error = debugger_write(debuggerPort, event, message, size, false); 643 } 644 645 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 646 bool singleStep = false; 647 648 if (error == B_OK) { 649 bool done = false; 650 while (!done) { 651 // read a command from the debug port 652 int32 command; 653 debugged_thread_message_data commandMessage; 654 ssize_t commandMessageSize = read_port_etc(port, &command, 655 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 656 0); 657 658 if (commandMessageSize < 0) { 659 error = commandMessageSize; 660 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed " 661 "to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n", 662 thread->id, port, error)); 663 break; 664 } 665 666 switch (command) { 667 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 668 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 669 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 670 thread->id)); 671 result = commandMessage.continue_thread.handle_event; 672 673 singleStep = commandMessage.continue_thread.single_step; 674 done = true; 675 break; 676 677 case B_DEBUGGED_THREAD_SET_CPU_STATE: 678 { 679 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 680 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 681 thread->id)); 682 arch_set_debug_cpu_state( 683 &commandMessage.set_cpu_state.cpu_state); 684 685 break; 686 } 687 688 case B_DEBUGGED_THREAD_GET_CPU_STATE: 689 { 690 port_id replyPort = commandMessage.get_cpu_state.reply_port; 691 692 // prepare the message 693 debug_nub_get_cpu_state_reply replyMessage; 694 replyMessage.error = B_OK; 695 replyMessage.message = event; 696 arch_get_debug_cpu_state(&replyMessage.cpu_state); 697 698 // send it 699 error = kill_interruptable_write_port(replyPort, event, 700 &replyMessage, sizeof(replyMessage)); 701 702 break; 703 } 704 705 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 706 { 707 // Check, if the debugger really changed, i.e. is different 708 // than the one we know. 709 team_debug_info teamDebugInfo; 710 get_team_debug_info(teamDebugInfo); 711 712 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 713 if (!debuggerInstalled 714 || teamDebugInfo.debugger_port != debuggerPort) { 715 // debugger was installed or has changed: restart 716 // this function 717 restart = true; 718 done = true; 719 } 720 } else { 721 if (debuggerInstalled) { 722 // debugger is gone: continue the thread normally 723 done = true; 724 } 725 } 726 727 break; 728 } 729 } 730 } 731 } else { 732 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send " 733 "message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n", 734 thread->id, debuggerPort, error)); 735 } 736 737 // update the thread debug info 738 bool destroyThreadInfo = false; 739 thread_debug_info threadDebugInfo; 740 741 state = disable_interrupts(); 742 threadDebugInfoLocker.Lock(); 743 744 // check, if the team is still being debugged 745 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 746 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 747 // update the single-step flag 748 if (singleStep) { 749 atomic_or(&thread->debug_info.flags, 750 B_THREAD_DEBUG_SINGLE_STEP); 751 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 752 } else { 753 atomic_and(&thread->debug_info.flags, 754 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 755 } 756 757 // unset the "stopped" state 758 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 759 760 update_thread_user_debug_flag(thread); 761 } else { 762 // the debugger is gone: cleanup our info completely 763 threadDebugInfo = thread->debug_info; 764 clear_thread_debug_info(&thread->debug_info, false); 765 destroyThreadInfo = true; 766 } 767 768 threadDebugInfoLocker.Unlock(); 769 restore_interrupts(state); 770 771 // enable/disable single stepping 772 arch_update_thread_single_step(); 773 774 if (destroyThreadInfo) 775 destroy_thread_debug_info(&threadDebugInfo); 776 777 return (error == B_OK ? result : error); 778 } 779 780 781 static status_t 782 thread_hit_debug_event(debug_debugger_message event, const void *message, 783 int32 size, bool requireDebugger) 784 { 785 status_t result; 786 bool restart; 787 do { 788 restart = false; 789 result = thread_hit_debug_event_internal(event, message, size, 790 requireDebugger, restart); 791 } while (result >= 0 && restart); 792 793 // Prepare to continue -- we install a debugger change condition, so no one 794 // will change the debugger while we're playing with the breakpoint manager. 795 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 796 Team* team = thread_get_current_thread()->team; 797 ConditionVariable debugChangeCondition; 798 debugChangeCondition.Init(team, "debug change condition"); 799 prepare_debugger_change(team, debugChangeCondition); 800 801 if (team->debug_info.breakpoint_manager != NULL) { 802 bool isSyscall; 803 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 804 if (pc != NULL && !isSyscall) 805 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 806 } 807 808 finish_debugger_change(team); 809 810 return result; 811 } 812 813 814 static status_t 815 thread_hit_serious_debug_event(debug_debugger_message event, 816 const void *message, int32 messageSize) 817 { 818 // ensure that a debugger is installed for this team 819 status_t error = ensure_debugger_installed(); 820 if (error != B_OK) { 821 Thread *thread = thread_get_current_thread(); 822 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 823 "thread: %" B_PRId32 " (%s): %s\n", thread->id, thread->name, 824 strerror(error)); 825 return error; 826 } 827 828 // enter the debug loop 829 return thread_hit_debug_event(event, message, messageSize, true); 830 } 831 832 833 void 834 user_debug_pre_syscall(uint32 syscall, void *args) 835 { 836 // check whether a debugger is installed 837 Thread *thread = thread_get_current_thread(); 838 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 839 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 840 return; 841 842 // check whether pre-syscall tracing is enabled for team or thread 843 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 844 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 845 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 846 return; 847 } 848 849 // prepare the message 850 debug_pre_syscall message; 851 message.syscall = syscall; 852 853 // copy the syscall args 854 if (syscall < (uint32)kSyscallCount) { 855 if (kSyscallInfos[syscall].parameter_size > 0) 856 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 857 } 858 859 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 860 sizeof(message), true); 861 } 862 863 864 void 865 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 866 bigtime_t startTime) 867 { 868 // check whether a debugger is installed 869 Thread *thread = thread_get_current_thread(); 870 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 871 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 872 return; 873 874 // check if we need to flush the profiling buffer 875 if (thread->debug_info.profile.flush_needed) 876 profiling_flush(NULL); 877 878 // check whether post-syscall tracing is enabled for team or thread 879 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 880 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 881 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 882 return; 883 } 884 885 // prepare the message 886 debug_post_syscall message; 887 message.start_time = startTime; 888 message.end_time = system_time(); 889 message.return_value = returnValue; 890 message.syscall = syscall; 891 892 // copy the syscall args 893 if (syscall < (uint32)kSyscallCount) { 894 if (kSyscallInfos[syscall].parameter_size > 0) 895 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 896 } 897 898 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 899 sizeof(message), true); 900 } 901 902 903 /** \brief To be called when an unhandled processor exception (error/fault) 904 * occurred. 905 * \param exception The debug_why_stopped value identifying the kind of fault. 906 * \param signal The signal corresponding to the exception. 907 * \return \c true, if the caller shall continue normally, i.e. usually send 908 * a deadly signal. \c false, if the debugger insists to continue the 909 * program (e.g. because it has solved the removed the cause of the 910 * problem). 911 */ 912 bool 913 user_debug_exception_occurred(debug_exception_type exception, int signal) 914 { 915 // First check whether there's a signal handler installed for the signal. 916 // If so, we don't want to install a debugger for the team. We always send 917 // the signal instead. An already installed debugger will be notified, if 918 // it has requested notifications of signal. 919 struct sigaction signalAction; 920 if (sigaction(signal, NULL, &signalAction) == 0 921 && signalAction.sa_handler != SIG_DFL) { 922 return true; 923 } 924 925 // prepare the message 926 debug_exception_occurred message; 927 message.exception = exception; 928 message.signal = signal; 929 930 status_t result = thread_hit_serious_debug_event( 931 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 932 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 933 } 934 935 936 bool 937 user_debug_handle_signal(int signal, struct sigaction *handler, siginfo_t *info, 938 bool deadly) 939 { 940 // check, if a debugger is installed and is interested in signals 941 Thread *thread = thread_get_current_thread(); 942 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 943 if (~teamDebugFlags 944 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 945 return true; 946 } 947 948 // prepare the message 949 debug_signal_received message; 950 message.signal = signal; 951 message.handler = *handler; 952 message.info = *info; 953 message.deadly = deadly; 954 955 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 956 &message, sizeof(message), true); 957 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 958 } 959 960 961 void 962 user_debug_stop_thread() 963 { 964 // check whether this is actually an emulated single-step notification 965 Thread* thread = thread_get_current_thread(); 966 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 967 968 bool singleStepped = false; 969 if ((atomic_and(&thread->debug_info.flags, 970 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 971 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 972 singleStepped = true; 973 } 974 975 threadDebugInfoLocker.Unlock(); 976 977 if (singleStepped) { 978 user_debug_single_stepped(); 979 } else { 980 debug_thread_debugged message; 981 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 982 &message, sizeof(message)); 983 } 984 } 985 986 987 void 988 user_debug_team_created(team_id teamID) 989 { 990 // check, if a debugger is installed and is interested in team creation 991 // events 992 Thread *thread = thread_get_current_thread(); 993 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 994 if (~teamDebugFlags 995 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 996 return; 997 } 998 999 // prepare the message 1000 debug_team_created message; 1001 message.new_team = teamID; 1002 1003 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 1004 sizeof(message), true); 1005 } 1006 1007 1008 void 1009 user_debug_team_deleted(team_id teamID, port_id debuggerPort, status_t status, int signal, 1010 team_usage_info* usageInfo) 1011 { 1012 if (debuggerPort >= 0) { 1013 TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: " 1014 "%" B_PRId32 ")\n", teamID, debuggerPort)); 1015 1016 debug_team_deleted message; 1017 message.origin.thread = -1; 1018 message.origin.team = teamID; 1019 message.origin.nub_port = -1; 1020 message.status = status; 1021 message.signal = signal; 1022 message.usage = *usageInfo; 1023 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1024 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1025 } 1026 } 1027 1028 1029 void 1030 user_debug_team_exec() 1031 { 1032 // check, if a debugger is installed and is interested in team creation 1033 // events 1034 Thread *thread = thread_get_current_thread(); 1035 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1036 if (~teamDebugFlags 1037 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1038 return; 1039 } 1040 1041 // prepare the message 1042 debug_team_exec message; 1043 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1044 + 1; 1045 1046 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1047 sizeof(message), true); 1048 } 1049 1050 1051 /*! Called by a new userland thread to update the debugging related flags of 1052 \c Thread::flags before the thread first enters userland. 1053 \param thread The calling thread. 1054 */ 1055 void 1056 user_debug_update_new_thread_flags(Thread* thread) 1057 { 1058 // lock it and update it's flags 1059 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1060 1061 update_thread_user_debug_flag(thread); 1062 update_thread_breakpoints_flag(thread); 1063 update_thread_debugger_installed_flag(thread); 1064 } 1065 1066 1067 void 1068 user_debug_thread_created(thread_id threadID) 1069 { 1070 // check, if a debugger is installed and is interested in thread events 1071 Thread *thread = thread_get_current_thread(); 1072 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1073 if (~teamDebugFlags 1074 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1075 return; 1076 } 1077 1078 // prepare the message 1079 debug_thread_created message; 1080 message.new_thread = threadID; 1081 1082 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1083 sizeof(message), true); 1084 } 1085 1086 1087 void 1088 user_debug_thread_deleted(team_id teamID, thread_id threadID, status_t status) 1089 { 1090 // Things are a bit complicated here, since this thread no longer belongs to 1091 // the debugged team (but to the kernel). So we can't use debugger_write(). 1092 1093 // get the team debug flags and debugger port 1094 Team* team = Team::Get(teamID); 1095 if (team == NULL) 1096 return; 1097 BReference<Team> teamReference(team, true); 1098 1099 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1100 1101 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1102 port_id debuggerPort = team->debug_info.debugger_port; 1103 sem_id writeLock = team->debug_info.debugger_write_lock; 1104 1105 debugInfoLocker.Unlock(); 1106 1107 // check, if a debugger is installed and is interested in thread events 1108 if (~teamDebugFlags 1109 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1110 return; 1111 } 1112 1113 // acquire the debugger write lock 1114 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1115 if (error != B_OK) 1116 return; 1117 1118 // re-get the team debug info -- we need to check whether anything changed 1119 debugInfoLocker.Lock(); 1120 1121 teamDebugFlags = atomic_get(&team->debug_info.flags); 1122 port_id newDebuggerPort = team->debug_info.debugger_port; 1123 1124 debugInfoLocker.Unlock(); 1125 1126 // Send the message only if the debugger hasn't changed in the meantime or 1127 // the team is about to be handed over. 1128 if (newDebuggerPort == debuggerPort 1129 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1130 debug_thread_deleted message; 1131 message.origin.thread = threadID; 1132 message.origin.team = teamID; 1133 message.origin.nub_port = -1; 1134 message.status = status; 1135 1136 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1137 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1138 } 1139 1140 // release the debugger write lock 1141 release_sem(writeLock); 1142 } 1143 1144 1145 /*! Called for a thread that is about to die, cleaning up all user debug 1146 facilities installed for the thread. 1147 \param thread The current thread, the one that is going to die. 1148 */ 1149 void 1150 user_debug_thread_exiting(Thread* thread) 1151 { 1152 // thread is the current thread, so using team is safe 1153 Team* team = thread->team; 1154 1155 InterruptsLocker interruptsLocker; 1156 1157 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1158 1159 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1160 port_id debuggerPort = team->debug_info.debugger_port; 1161 1162 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1163 1164 // check, if a debugger is installed 1165 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1166 || debuggerPort < 0) { 1167 return; 1168 } 1169 1170 // detach the profile info and mark the thread dying 1171 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1172 1173 thread_debug_info& threadDebugInfo = thread->debug_info; 1174 if (threadDebugInfo.profile.samples == NULL) 1175 return; 1176 1177 area_id sampleArea = threadDebugInfo.profile.sample_area; 1178 int32 sampleCount = threadDebugInfo.profile.sample_count; 1179 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1180 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1181 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1182 int32 imageEvent = threadDebugInfo.profile.image_event; 1183 threadDebugInfo.profile.sample_area = -1; 1184 threadDebugInfo.profile.samples = NULL; 1185 threadDebugInfo.profile.flush_needed = false; 1186 bigtime_t lastCPUTime; { 1187 SpinLocker threadTimeLocker(thread->time_lock); 1188 lastCPUTime = thread->CPUTime(false); 1189 } 1190 1191 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1192 1193 threadDebugInfoLocker.Unlock(); 1194 interruptsLocker.Unlock(); 1195 1196 // notify the debugger 1197 debug_profiler_update message; 1198 message.origin.thread = thread->id; 1199 message.origin.team = thread->team->id; 1200 message.origin.nub_port = -1; // asynchronous message 1201 message.sample_count = sampleCount; 1202 message.dropped_ticks = droppedTicks; 1203 message.stack_depth = stackDepth; 1204 message.variable_stack_depth = variableStackDepth; 1205 message.image_event = imageEvent; 1206 message.stopped = true; 1207 message.last_cpu_time = lastCPUTime; 1208 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1209 &message, sizeof(message), false); 1210 1211 if (sampleArea >= 0) { 1212 area_info areaInfo; 1213 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1214 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1215 delete_area(sampleArea); 1216 } 1217 } 1218 } 1219 1220 1221 void 1222 user_debug_image_created(const image_info *imageInfo) 1223 { 1224 // check, if a debugger is installed and is interested in image events 1225 Thread *thread = thread_get_current_thread(); 1226 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1227 if (~teamDebugFlags 1228 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1229 return; 1230 } 1231 1232 // prepare the message 1233 debug_image_created message; 1234 memcpy(&message.info, imageInfo, sizeof(image_info)); 1235 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1236 + 1; 1237 1238 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1239 sizeof(message), true); 1240 } 1241 1242 1243 void 1244 user_debug_image_deleted(const image_info *imageInfo) 1245 { 1246 // check, if a debugger is installed and is interested in image events 1247 Thread *thread = thread_get_current_thread(); 1248 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1249 if (~teamDebugFlags 1250 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1251 return; 1252 } 1253 1254 // prepare the message 1255 debug_image_deleted message; 1256 memcpy(&message.info, imageInfo, sizeof(image_info)); 1257 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1258 + 1; 1259 1260 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message, 1261 sizeof(message), true); 1262 } 1263 1264 1265 void 1266 user_debug_breakpoint_hit(bool software) 1267 { 1268 // prepare the message 1269 debug_breakpoint_hit message; 1270 arch_get_debug_cpu_state(&message.cpu_state); 1271 1272 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1273 sizeof(message)); 1274 } 1275 1276 1277 void 1278 user_debug_watchpoint_hit() 1279 { 1280 // prepare the message 1281 debug_watchpoint_hit message; 1282 arch_get_debug_cpu_state(&message.cpu_state); 1283 1284 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1285 sizeof(message)); 1286 } 1287 1288 1289 void 1290 user_debug_single_stepped() 1291 { 1292 // clear the single-step thread flag 1293 Thread* thread = thread_get_current_thread(); 1294 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1295 1296 // prepare the message 1297 debug_single_step message; 1298 arch_get_debug_cpu_state(&message.cpu_state); 1299 1300 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1301 sizeof(message)); 1302 } 1303 1304 1305 /*! Schedules the profiling timer for the current thread. 1306 The caller must hold the thread's debug info lock. 1307 \param thread The current thread. 1308 \param interval The time after which the timer should fire. 1309 */ 1310 static void 1311 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1312 { 1313 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1314 // Use the "hook" field to sanity-check that this timer is not scheduled. 1315 ASSERT(timer->hook == NULL); 1316 thread->debug_info.profile.installed_timer = timer; 1317 thread->debug_info.profile.timer_end = system_time() + interval; 1318 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1319 } 1320 1321 1322 /*! Returns the time remaining for the current profiling timer. 1323 The caller must hold the thread's debug info lock. 1324 \param thread The current thread. 1325 */ 1326 static bigtime_t 1327 profiling_timer_left(Thread* thread) 1328 { 1329 return thread->debug_info.profile.timer_end - system_time(); 1330 } 1331 1332 1333 /*! Samples the current thread's instruction pointer/stack trace. 1334 The caller must hold the current thread's debug info lock. 1335 \returns Whether the profiling timer should be rescheduled. 1336 */ 1337 static bool 1338 profiling_do_sample() 1339 { 1340 Thread* thread = thread_get_current_thread(); 1341 thread_debug_info& debugInfo = thread->debug_info; 1342 1343 if (debugInfo.profile.samples == NULL) 1344 return false; 1345 1346 // Check, whether the buffer is full or an image event occurred since the 1347 // last sample was taken. 1348 int32 maxSamples = debugInfo.profile.max_samples; 1349 int32 sampleCount = debugInfo.profile.sample_count; 1350 int32 stackDepth = debugInfo.profile.stack_depth; 1351 int32 imageEvent = thread->team->debug_info.image_event; 1352 if (debugInfo.profile.sample_count > 0) { 1353 if (debugInfo.profile.last_image_event < imageEvent 1354 && debugInfo.profile.variable_stack_depth 1355 && sampleCount + 2 <= maxSamples) { 1356 // an image event occurred, but we use variable stack depth and 1357 // have enough room in the buffer to indicate an image event 1358 addr_t* event = debugInfo.profile.samples + sampleCount; 1359 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1360 event[1] = imageEvent; 1361 sampleCount += 2; 1362 debugInfo.profile.sample_count = sampleCount; 1363 debugInfo.profile.last_image_event = imageEvent; 1364 } 1365 1366 if (debugInfo.profile.last_image_event < imageEvent 1367 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1368 debugInfo.profile.flush_needed = true; 1369 1370 // If the buffer is not full yet, we add the samples, 1371 // otherwise we have to drop them. 1372 if (maxSamples - sampleCount < stackDepth) { 1373 debugInfo.profile.dropped_ticks++; 1374 return true; 1375 } 1376 } 1377 } else { 1378 // first sample -- set the image event 1379 debugInfo.profile.image_event = imageEvent; 1380 debugInfo.profile.last_image_event = imageEvent; 1381 } 1382 1383 // get the samples 1384 uint32 flags = STACK_TRACE_USER; 1385 int32 skipIFrames = 0; 1386 if (debugInfo.profile.profile_kernel) { 1387 flags |= STACK_TRACE_KERNEL; 1388 skipIFrames = 1; 1389 } 1390 1391 addr_t* returnAddresses = debugInfo.profile.samples 1392 + debugInfo.profile.sample_count; 1393 if (debugInfo.profile.variable_stack_depth) { 1394 // variable sample count per hit 1395 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1396 stackDepth - 1, skipIFrames, 0, flags); 1397 1398 debugInfo.profile.sample_count += *returnAddresses + 1; 1399 } else { 1400 // fixed sample count per hit 1401 if (stackDepth > 1 || !debugInfo.profile.profile_kernel) { 1402 int32 count = arch_debug_get_stack_trace(returnAddresses, 1403 stackDepth, skipIFrames, 0, flags); 1404 1405 for (int32 i = count; i < stackDepth; i++) 1406 returnAddresses[i] = 0; 1407 } else 1408 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1409 1410 debugInfo.profile.sample_count += stackDepth; 1411 } 1412 1413 return true; 1414 } 1415 1416 1417 static void 1418 profiling_flush(void*) 1419 { 1420 // This function may be called as a post_interrupt_callback. When it is, 1421 // it is undefined whether the function is called with interrupts enabled 1422 // or disabled. (When called elsewhere, interrupts will always be enabled.) 1423 // We are allowed to enable interrupts, though. First make sure interrupts 1424 // are disabled. 1425 disable_interrupts(); 1426 1427 Thread* thread = thread_get_current_thread(); 1428 thread_debug_info& debugInfo = thread->debug_info; 1429 1430 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1431 1432 if (debugInfo.profile.samples != NULL && debugInfo.profile.flush_needed) { 1433 int32 sampleCount = debugInfo.profile.sample_count; 1434 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1435 int32 stackDepth = debugInfo.profile.stack_depth; 1436 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1437 int32 imageEvent = debugInfo.profile.image_event; 1438 1439 // prevent the timer from running until after we flush 1440 bigtime_t interval = debugInfo.profile.interval; 1441 if (debugInfo.profile.installed_timer != NULL) { 1442 interval = max_c(profiling_timer_left(thread), 0); 1443 cancel_timer(debugInfo.profile.installed_timer); 1444 debugInfo.profile.installed_timer->hook = NULL; 1445 debugInfo.profile.installed_timer = NULL; 1446 } 1447 debugInfo.profile.interval_left = -1; 1448 1449 // notify the debugger 1450 debugInfo.profile.sample_count = 0; 1451 debugInfo.profile.dropped_ticks = 0; 1452 debugInfo.profile.flush_needed = false; 1453 1454 threadDebugInfoLocker.Unlock(); 1455 enable_interrupts(); 1456 1457 // prepare the message 1458 debug_profiler_update message; 1459 message.sample_count = sampleCount; 1460 message.dropped_ticks = droppedTicks; 1461 message.stack_depth = stackDepth; 1462 message.variable_stack_depth = variableStackDepth; 1463 message.image_event = imageEvent; 1464 message.stopped = false; 1465 1466 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1467 sizeof(message), false); 1468 1469 disable_interrupts(); 1470 threadDebugInfoLocker.Lock(); 1471 if (debugInfo.profile.samples != NULL) 1472 schedule_profiling_timer(thread, interval); 1473 } 1474 1475 threadDebugInfoLocker.Unlock(); 1476 enable_interrupts(); 1477 } 1478 1479 1480 /*! Profiling timer event callback. 1481 Called with interrupts disabled. 1482 */ 1483 static int32 1484 profiling_event(timer* /*unused*/) 1485 { 1486 Thread* thread = thread_get_current_thread(); 1487 thread_debug_info& debugInfo = thread->debug_info; 1488 1489 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1490 debugInfo.profile.installed_timer->hook = NULL; 1491 debugInfo.profile.installed_timer = NULL; 1492 1493 if (profiling_do_sample()) { 1494 // Check if the sample buffer needs to be flushed. We can't do it here, 1495 // since we're in an interrupt handler, and we can't set the callback 1496 // if we interrupted a kernel function, since the callback will pause 1497 // this thread. (The post_syscall hook will do the flush in that case.) 1498 if (debugInfo.profile.flush_needed 1499 && !IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1500 thread->post_interrupt_callback = profiling_flush; 1501 1502 // We don't reschedule the timer here because profiling_flush() will 1503 // lead to the thread being descheduled until we are told to continue. 1504 // The timer will be rescheduled after the flush concludes. 1505 debugInfo.profile.interval_left = -1; 1506 } else 1507 schedule_profiling_timer(thread, debugInfo.profile.interval); 1508 } 1509 1510 return B_HANDLED_INTERRUPT; 1511 } 1512 1513 1514 /*! Called by the scheduler when a debugged thread has been unscheduled. 1515 The scheduler lock is being held. 1516 */ 1517 void 1518 user_debug_thread_unscheduled(Thread* thread) 1519 { 1520 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1521 1522 // if running, cancel the profiling timer 1523 struct timer* timer = thread->debug_info.profile.installed_timer; 1524 if (timer != NULL) { 1525 // track remaining time 1526 bigtime_t left = profiling_timer_left(thread); 1527 thread->debug_info.profile.interval_left = max_c(left, 0); 1528 thread->debug_info.profile.installed_timer->hook = NULL; 1529 thread->debug_info.profile.installed_timer = NULL; 1530 1531 // cancel timer 1532 threadDebugInfoLocker.Unlock(); 1533 // not necessary, but doesn't harm and reduces contention 1534 cancel_timer(timer); 1535 // since invoked on the same CPU, this will not possibly wait for 1536 // an already called timer hook 1537 } 1538 } 1539 1540 1541 /*! Called by the scheduler when a debugged thread has been scheduled. 1542 The scheduler lock is being held. 1543 */ 1544 void 1545 user_debug_thread_scheduled(Thread* thread) 1546 { 1547 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1548 1549 if (thread->debug_info.profile.samples != NULL 1550 && thread->debug_info.profile.interval_left >= 0) { 1551 // install profiling timer 1552 schedule_profiling_timer(thread, 1553 thread->debug_info.profile.interval_left); 1554 } 1555 } 1556 1557 1558 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1559 all threads of the team that are initialized for debugging (and 1560 thus have a debug port). 1561 */ 1562 static void 1563 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1564 const void *message, int32 size) 1565 { 1566 // iterate through the threads 1567 thread_info threadInfo; 1568 int32 cookie = 0; 1569 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1570 == B_OK) { 1571 // get the thread and lock it 1572 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1573 if (thread == NULL) 1574 continue; 1575 1576 BReference<Thread> threadReference(thread, true); 1577 ThreadLocker threadLocker(thread, true); 1578 1579 // get the thread's debug port 1580 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1581 1582 port_id threadDebugPort = -1; 1583 if (thread && thread != nubThread && thread->team == nubThread->team 1584 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1585 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1586 threadDebugPort = thread->debug_info.debug_port; 1587 } 1588 1589 threadDebugInfoLocker.Unlock(); 1590 threadLocker.Unlock(); 1591 1592 // send the message to the thread 1593 if (threadDebugPort >= 0) { 1594 status_t error = kill_interruptable_write_port(threadDebugPort, 1595 code, message, size); 1596 if (error != B_OK) { 1597 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1598 "message to thread %" B_PRId32 ": %" B_PRIx32 "\n", 1599 thread->id, error)); 1600 } 1601 } 1602 } 1603 } 1604 1605 1606 static void 1607 nub_thread_cleanup(Thread *nubThread) 1608 { 1609 TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n", 1610 nubThread->id, nubThread->team->debug_info.debugger_port)); 1611 1612 ConditionVariable debugChangeCondition; 1613 debugChangeCondition.Init(nubThread->team, "debug change condition"); 1614 prepare_debugger_change(nubThread->team, debugChangeCondition); 1615 1616 team_debug_info teamDebugInfo; 1617 bool destroyDebugInfo = false; 1618 1619 TeamLocker teamLocker(nubThread->team); 1620 // required by update_threads_debugger_installed_flag() 1621 1622 cpu_status state = disable_interrupts(); 1623 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1624 1625 team_debug_info &info = nubThread->team->debug_info; 1626 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1627 && info.nub_thread == nubThread->id) { 1628 teamDebugInfo = info; 1629 clear_team_debug_info(&info, false); 1630 destroyDebugInfo = true; 1631 } 1632 1633 // update the thread::flags fields 1634 update_threads_debugger_installed_flag(nubThread->team); 1635 1636 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1637 restore_interrupts(state); 1638 1639 teamLocker.Unlock(); 1640 1641 if (destroyDebugInfo) 1642 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1643 1644 finish_debugger_change(nubThread->team); 1645 1646 if (destroyDebugInfo) 1647 destroy_team_debug_info(&teamDebugInfo); 1648 1649 // notify all threads that the debugger is gone 1650 broadcast_debugged_thread_message(nubThread, 1651 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1652 } 1653 1654 1655 /** \brief Debug nub thread helper function that returns the debug port of 1656 * a thread of the same team. 1657 */ 1658 static status_t 1659 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1660 thread_id threadID, port_id &threadDebugPort) 1661 { 1662 threadDebugPort = -1; 1663 1664 // get the thread 1665 Thread* thread = Thread::GetAndLock(threadID); 1666 if (thread == NULL) 1667 return B_BAD_THREAD_ID; 1668 BReference<Thread> threadReference(thread, true); 1669 ThreadLocker threadLocker(thread, true); 1670 1671 // get the debug port 1672 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1673 1674 if (thread->team != nubThread->team) 1675 return B_BAD_VALUE; 1676 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1677 return B_BAD_THREAD_STATE; 1678 1679 threadDebugPort = thread->debug_info.debug_port; 1680 1681 threadDebugInfoLocker.Unlock(); 1682 1683 if (threadDebugPort < 0) 1684 return B_ERROR; 1685 1686 return B_OK; 1687 } 1688 1689 1690 static status_t 1691 debug_nub_thread(void *) 1692 { 1693 Thread *nubThread = thread_get_current_thread(); 1694 1695 // check, if we're still the current nub thread and get our port 1696 cpu_status state = disable_interrupts(); 1697 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1698 1699 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1700 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1701 restore_interrupts(state); 1702 return 0; 1703 } 1704 1705 port_id port = nubThread->team->debug_info.nub_port; 1706 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1707 BreakpointManager* breakpointManager 1708 = nubThread->team->debug_info.breakpoint_manager; 1709 1710 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1711 restore_interrupts(state); 1712 1713 TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub " 1714 "port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port)); 1715 1716 // notify all threads that a debugger has been installed 1717 broadcast_debugged_thread_message(nubThread, 1718 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1719 1720 // command processing loop 1721 while (true) { 1722 int32 command; 1723 debug_nub_message_data message; 1724 ssize_t messageSize = read_port_etc(port, &command, &message, 1725 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1726 1727 if (messageSize < 0) { 1728 // The port is no longer valid or we were interrupted by a kill 1729 // signal: If we are still listed in the team's debug info as nub 1730 // thread, we need to update that. 1731 nub_thread_cleanup(nubThread); 1732 1733 TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n", 1734 nubThread->id, messageSize)); 1735 1736 return messageSize; 1737 } 1738 1739 bool sendReply = false; 1740 union { 1741 debug_nub_read_memory_reply read_memory; 1742 debug_nub_write_memory_reply write_memory; 1743 debug_nub_get_cpu_state_reply get_cpu_state; 1744 debug_nub_set_breakpoint_reply set_breakpoint; 1745 debug_nub_set_watchpoint_reply set_watchpoint; 1746 debug_nub_get_signal_masks_reply get_signal_masks; 1747 debug_nub_get_signal_handler_reply get_signal_handler; 1748 debug_nub_start_profiler_reply start_profiler; 1749 debug_profiler_update profiler_update; 1750 debug_nub_write_core_file_reply write_core_file; 1751 } reply; 1752 int32 replySize = 0; 1753 port_id replyPort = -1; 1754 1755 // process the command 1756 switch (command) { 1757 case B_DEBUG_MESSAGE_READ_MEMORY: 1758 { 1759 // get the parameters 1760 replyPort = message.read_memory.reply_port; 1761 void *address = message.read_memory.address; 1762 int32 size = message.read_memory.size; 1763 status_t result = B_OK; 1764 1765 // check the parameters 1766 if (!BreakpointManager::CanAccessAddress(address, false)) 1767 result = B_BAD_ADDRESS; 1768 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1769 result = B_BAD_VALUE; 1770 1771 // read the memory 1772 size_t bytesRead = 0; 1773 if (result == B_OK) { 1774 result = breakpointManager->ReadMemory(address, 1775 reply.read_memory.data, size, bytesRead); 1776 } 1777 reply.read_memory.error = result; 1778 1779 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: " 1780 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1781 ", result: %" B_PRIx32 ", read: %ld\n", nubThread->id, 1782 replyPort, address, size, result, bytesRead)); 1783 1784 // send only as much data as necessary 1785 reply.read_memory.size = bytesRead; 1786 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1787 sendReply = true; 1788 break; 1789 } 1790 1791 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1792 { 1793 // get the parameters 1794 replyPort = message.write_memory.reply_port; 1795 void *address = message.write_memory.address; 1796 int32 size = message.write_memory.size; 1797 const char *data = message.write_memory.data; 1798 int32 realSize = (char*)&message + messageSize - data; 1799 status_t result = B_OK; 1800 1801 // check the parameters 1802 if (!BreakpointManager::CanAccessAddress(address, true)) 1803 result = B_BAD_ADDRESS; 1804 else if (size <= 0 || size > realSize) 1805 result = B_BAD_VALUE; 1806 1807 // write the memory 1808 size_t bytesWritten = 0; 1809 if (result == B_OK) { 1810 result = breakpointManager->WriteMemory(address, data, size, 1811 bytesWritten); 1812 } 1813 reply.write_memory.error = result; 1814 1815 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: " 1816 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1817 ", result: %" B_PRIx32 ", written: %ld\n", nubThread->id, 1818 replyPort, address, size, result, bytesWritten)); 1819 1820 reply.write_memory.size = bytesWritten; 1821 sendReply = true; 1822 replySize = sizeof(debug_nub_write_memory_reply); 1823 break; 1824 } 1825 1826 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1827 { 1828 // get the parameters 1829 int32 flags = message.set_team_flags.flags 1830 & B_TEAM_DEBUG_USER_FLAG_MASK; 1831 1832 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS" 1833 ": flags: %" B_PRIx32 "\n", nubThread->id, flags)); 1834 1835 Team *team = thread_get_current_thread()->team; 1836 1837 // set the flags 1838 cpu_status state = disable_interrupts(); 1839 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1840 1841 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1842 atomic_set(&team->debug_info.flags, flags); 1843 1844 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1845 restore_interrupts(state); 1846 1847 break; 1848 } 1849 1850 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1851 { 1852 // get the parameters 1853 thread_id threadID = message.set_thread_flags.thread; 1854 int32 flags = message.set_thread_flags.flags 1855 & B_THREAD_DEBUG_USER_FLAG_MASK; 1856 1857 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS" 1858 ": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n", 1859 nubThread->id, threadID, flags)); 1860 1861 // set the flags 1862 Thread* thread = Thread::GetAndLock(threadID); 1863 if (thread == NULL) 1864 break; 1865 BReference<Thread> threadReference(thread, true); 1866 ThreadLocker threadLocker(thread, true); 1867 1868 InterruptsSpinLocker threadDebugInfoLocker( 1869 thread->debug_info.lock); 1870 1871 if (thread->team == thread_get_current_thread()->team) { 1872 flags |= thread->debug_info.flags 1873 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1874 atomic_set(&thread->debug_info.flags, flags); 1875 } 1876 1877 break; 1878 } 1879 1880 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1881 { 1882 // get the parameters 1883 thread_id threadID; 1884 uint32 handleEvent; 1885 bool singleStep; 1886 1887 threadID = message.continue_thread.thread; 1888 handleEvent = message.continue_thread.handle_event; 1889 singleStep = message.continue_thread.single_step; 1890 1891 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD" 1892 ": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", " 1893 "single step: %d\n", nubThread->id, threadID, handleEvent, 1894 singleStep)); 1895 1896 // find the thread and get its debug port 1897 port_id threadDebugPort = -1; 1898 status_t result = debug_nub_thread_get_thread_debug_port( 1899 nubThread, threadID, threadDebugPort); 1900 1901 // send a message to the debugged thread 1902 if (result == B_OK) { 1903 debugged_thread_continue commandMessage; 1904 commandMessage.handle_event = handleEvent; 1905 commandMessage.single_step = singleStep; 1906 1907 result = write_port(threadDebugPort, 1908 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1909 &commandMessage, sizeof(commandMessage)); 1910 } else if (result == B_BAD_THREAD_STATE) { 1911 Thread* thread = Thread::GetAndLock(threadID); 1912 if (thread == NULL) 1913 break; 1914 1915 BReference<Thread> threadReference(thread, true); 1916 ThreadLocker threadLocker(thread, true); 1917 if (thread->state == B_THREAD_SUSPENDED) { 1918 threadLocker.Unlock(); 1919 resume_thread(threadID); 1920 break; 1921 } 1922 } 1923 1924 break; 1925 } 1926 1927 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1928 { 1929 // get the parameters 1930 thread_id threadID = message.set_cpu_state.thread; 1931 const debug_cpu_state &cpuState 1932 = message.set_cpu_state.cpu_state; 1933 1934 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE" 1935 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1936 1937 // find the thread and get its debug port 1938 port_id threadDebugPort = -1; 1939 status_t result = debug_nub_thread_get_thread_debug_port( 1940 nubThread, threadID, threadDebugPort); 1941 1942 // send a message to the debugged thread 1943 if (result == B_OK) { 1944 debugged_thread_set_cpu_state commandMessage; 1945 memcpy(&commandMessage.cpu_state, &cpuState, 1946 sizeof(debug_cpu_state)); 1947 write_port(threadDebugPort, 1948 B_DEBUGGED_THREAD_SET_CPU_STATE, 1949 &commandMessage, sizeof(commandMessage)); 1950 } 1951 1952 break; 1953 } 1954 1955 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1956 { 1957 // get the parameters 1958 thread_id threadID = message.get_cpu_state.thread; 1959 replyPort = message.get_cpu_state.reply_port; 1960 1961 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE" 1962 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1963 1964 // find the thread and get its debug port 1965 port_id threadDebugPort = -1; 1966 status_t result = debug_nub_thread_get_thread_debug_port( 1967 nubThread, threadID, threadDebugPort); 1968 1969 // send a message to the debugged thread 1970 if (threadDebugPort >= 0) { 1971 debugged_thread_get_cpu_state commandMessage; 1972 commandMessage.reply_port = replyPort; 1973 result = write_port(threadDebugPort, 1974 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1975 sizeof(commandMessage)); 1976 } 1977 1978 // send a reply to the debugger in case of error 1979 if (result != B_OK) { 1980 reply.get_cpu_state.error = result; 1981 sendReply = true; 1982 replySize = sizeof(reply.get_cpu_state); 1983 } 1984 1985 break; 1986 } 1987 1988 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1989 { 1990 // get the parameters 1991 replyPort = message.set_breakpoint.reply_port; 1992 void *address = message.set_breakpoint.address; 1993 1994 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT" 1995 ": address: %p\n", nubThread->id, address)); 1996 1997 // check the address 1998 status_t result = B_OK; 1999 if (address == NULL 2000 || !BreakpointManager::CanAccessAddress(address, false)) { 2001 result = B_BAD_ADDRESS; 2002 } 2003 2004 // set the breakpoint 2005 if (result == B_OK) 2006 result = breakpointManager->InstallBreakpoint(address); 2007 2008 if (result == B_OK) 2009 update_threads_breakpoints_flag(); 2010 2011 // prepare the reply 2012 reply.set_breakpoint.error = result; 2013 replySize = sizeof(reply.set_breakpoint); 2014 sendReply = true; 2015 2016 break; 2017 } 2018 2019 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 2020 { 2021 // get the parameters 2022 void *address = message.clear_breakpoint.address; 2023 2024 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT" 2025 ": address: %p\n", nubThread->id, address)); 2026 2027 // check the address 2028 status_t result = B_OK; 2029 if (address == NULL 2030 || !BreakpointManager::CanAccessAddress(address, false)) { 2031 result = B_BAD_ADDRESS; 2032 } 2033 2034 // clear the breakpoint 2035 if (result == B_OK) 2036 result = breakpointManager->UninstallBreakpoint(address); 2037 2038 if (result == B_OK) 2039 update_threads_breakpoints_flag(); 2040 2041 break; 2042 } 2043 2044 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 2045 { 2046 // get the parameters 2047 replyPort = message.set_watchpoint.reply_port; 2048 void *address = message.set_watchpoint.address; 2049 uint32 type = message.set_watchpoint.type; 2050 int32 length = message.set_watchpoint.length; 2051 2052 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT" 2053 ": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n", 2054 nubThread->id, address, type, length)); 2055 2056 // check the address and size 2057 status_t result = B_OK; 2058 if (address == NULL 2059 || !BreakpointManager::CanAccessAddress(address, false)) { 2060 result = B_BAD_ADDRESS; 2061 } 2062 if (length < 0) 2063 result = B_BAD_VALUE; 2064 2065 // set the watchpoint 2066 if (result == B_OK) { 2067 result = breakpointManager->InstallWatchpoint(address, type, 2068 length); 2069 } 2070 2071 if (result == B_OK) 2072 update_threads_breakpoints_flag(); 2073 2074 // prepare the reply 2075 reply.set_watchpoint.error = result; 2076 replySize = sizeof(reply.set_watchpoint); 2077 sendReply = true; 2078 2079 break; 2080 } 2081 2082 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2083 { 2084 // get the parameters 2085 void *address = message.clear_watchpoint.address; 2086 2087 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT" 2088 ": address: %p\n", nubThread->id, address)); 2089 2090 // check the address 2091 status_t result = B_OK; 2092 if (address == NULL 2093 || !BreakpointManager::CanAccessAddress(address, false)) { 2094 result = B_BAD_ADDRESS; 2095 } 2096 2097 // clear the watchpoint 2098 if (result == B_OK) 2099 result = breakpointManager->UninstallWatchpoint(address); 2100 2101 if (result == B_OK) 2102 update_threads_breakpoints_flag(); 2103 2104 break; 2105 } 2106 2107 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2108 { 2109 // get the parameters 2110 thread_id threadID = message.set_signal_masks.thread; 2111 uint64 ignore = message.set_signal_masks.ignore_mask; 2112 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2113 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2114 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2115 2116 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS" 2117 ": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %" 2118 B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32 2119 ")\n", nubThread->id, threadID, ignore, ignoreOp, 2120 ignoreOnce, ignoreOnceOp)); 2121 2122 // set the masks 2123 Thread* thread = Thread::GetAndLock(threadID); 2124 if (thread == NULL) 2125 break; 2126 BReference<Thread> threadReference(thread, true); 2127 ThreadLocker threadLocker(thread, true); 2128 2129 InterruptsSpinLocker threadDebugInfoLocker( 2130 thread->debug_info.lock); 2131 2132 if (thread->team == thread_get_current_thread()->team) { 2133 thread_debug_info &threadDebugInfo = thread->debug_info; 2134 // set ignore mask 2135 switch (ignoreOp) { 2136 case B_DEBUG_SIGNAL_MASK_AND: 2137 threadDebugInfo.ignore_signals &= ignore; 2138 break; 2139 case B_DEBUG_SIGNAL_MASK_OR: 2140 threadDebugInfo.ignore_signals |= ignore; 2141 break; 2142 case B_DEBUG_SIGNAL_MASK_SET: 2143 threadDebugInfo.ignore_signals = ignore; 2144 break; 2145 } 2146 2147 // set ignore once mask 2148 switch (ignoreOnceOp) { 2149 case B_DEBUG_SIGNAL_MASK_AND: 2150 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2151 break; 2152 case B_DEBUG_SIGNAL_MASK_OR: 2153 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2154 break; 2155 case B_DEBUG_SIGNAL_MASK_SET: 2156 threadDebugInfo.ignore_signals_once = ignoreOnce; 2157 break; 2158 } 2159 } 2160 2161 break; 2162 } 2163 2164 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2165 { 2166 // get the parameters 2167 replyPort = message.get_signal_masks.reply_port; 2168 thread_id threadID = message.get_signal_masks.thread; 2169 status_t result = B_OK; 2170 2171 // get the masks 2172 uint64 ignore = 0; 2173 uint64 ignoreOnce = 0; 2174 2175 Thread* thread = Thread::GetAndLock(threadID); 2176 if (thread != NULL) { 2177 BReference<Thread> threadReference(thread, true); 2178 ThreadLocker threadLocker(thread, true); 2179 2180 InterruptsSpinLocker threadDebugInfoLocker( 2181 thread->debug_info.lock); 2182 2183 ignore = thread->debug_info.ignore_signals; 2184 ignoreOnce = thread->debug_info.ignore_signals_once; 2185 } else 2186 result = B_BAD_THREAD_ID; 2187 2188 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS" 2189 ": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", " 2190 "ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: " 2191 "%" B_PRIx32 "\n", nubThread->id, replyPort, threadID, 2192 ignore, ignoreOnce, result)); 2193 2194 // prepare the message 2195 reply.get_signal_masks.error = result; 2196 reply.get_signal_masks.ignore_mask = ignore; 2197 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2198 replySize = sizeof(reply.get_signal_masks); 2199 sendReply = true; 2200 break; 2201 } 2202 2203 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2204 { 2205 // get the parameters 2206 int signal = message.set_signal_handler.signal; 2207 struct sigaction &handler = message.set_signal_handler.handler; 2208 2209 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER" 2210 ": signal: %d, handler: %p\n", nubThread->id, signal, 2211 handler.sa_handler)); 2212 2213 // set the handler 2214 sigaction(signal, &handler, NULL); 2215 2216 break; 2217 } 2218 2219 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2220 { 2221 // get the parameters 2222 replyPort = message.get_signal_handler.reply_port; 2223 int signal = message.get_signal_handler.signal; 2224 status_t result = B_OK; 2225 2226 // get the handler 2227 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2228 != 0) { 2229 result = errno; 2230 } 2231 2232 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER" 2233 ": reply port: %" B_PRId32 ", signal: %d, handler: %p\n", 2234 nubThread->id, replyPort, signal, 2235 reply.get_signal_handler.handler.sa_handler)); 2236 2237 // prepare the message 2238 reply.get_signal_handler.error = result; 2239 replySize = sizeof(reply.get_signal_handler); 2240 sendReply = true; 2241 break; 2242 } 2243 2244 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2245 { 2246 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER" 2247 "\n", nubThread->id)); 2248 2249 Team *team = nubThread->team; 2250 2251 // Acquire the debugger write lock. As soon as we have it and 2252 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2253 // will write anything to the debugger port anymore. 2254 status_t result = acquire_sem_etc(writeLock, 1, 2255 B_KILL_CAN_INTERRUPT, 0); 2256 if (result == B_OK) { 2257 // set the respective team debug flag 2258 cpu_status state = disable_interrupts(); 2259 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2260 2261 atomic_or(&team->debug_info.flags, 2262 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2263 BreakpointManager* breakpointManager 2264 = team->debug_info.breakpoint_manager; 2265 2266 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2267 restore_interrupts(state); 2268 2269 // remove all installed breakpoints 2270 breakpointManager->RemoveAllBreakpoints(); 2271 2272 release_sem(writeLock); 2273 } else { 2274 // We probably got a SIGKILL. If so, we will terminate when 2275 // reading the next message fails. 2276 } 2277 2278 break; 2279 } 2280 2281 case B_DEBUG_MESSAGE_HANDED_OVER: 2282 { 2283 // notify all threads that the debugger has changed 2284 broadcast_debugged_thread_message(nubThread, 2285 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2286 2287 break; 2288 } 2289 2290 case B_DEBUG_START_PROFILER: 2291 { 2292 // get the parameters 2293 thread_id threadID = message.start_profiler.thread; 2294 replyPort = message.start_profiler.reply_port; 2295 area_id sampleArea = message.start_profiler.sample_area; 2296 int32 stackDepth = message.start_profiler.stack_depth; 2297 bool variableStackDepth 2298 = message.start_profiler.variable_stack_depth; 2299 bool profileKernel = message.start_profiler.profile_kernel; 2300 bigtime_t interval = max_c(message.start_profiler.interval, 2301 B_DEBUG_MIN_PROFILE_INTERVAL); 2302 status_t result = B_OK; 2303 2304 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: " 2305 "thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n", 2306 nubThread->id, threadID, sampleArea)); 2307 2308 if (stackDepth < 1) 2309 stackDepth = 1; 2310 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2311 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2312 2313 // provision for an extra entry per hit (for the number of 2314 // samples), if variable stack depth 2315 if (variableStackDepth) 2316 stackDepth++; 2317 2318 // clone the sample area 2319 area_info areaInfo; 2320 if (result == B_OK) 2321 result = get_area_info(sampleArea, &areaInfo); 2322 2323 area_id clonedSampleArea = -1; 2324 void* samples = NULL; 2325 if (result == B_OK) { 2326 clonedSampleArea = clone_area("profiling samples", &samples, 2327 B_ANY_KERNEL_ADDRESS, 2328 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 2329 sampleArea); 2330 if (clonedSampleArea >= 0) { 2331 // we need the memory locked 2332 result = lock_memory(samples, areaInfo.size, 2333 B_READ_DEVICE); 2334 if (result != B_OK) { 2335 delete_area(clonedSampleArea); 2336 clonedSampleArea = -1; 2337 } 2338 } else 2339 result = clonedSampleArea; 2340 } 2341 2342 // get the thread and set the profile info 2343 int32 imageEvent = nubThread->team->debug_info.image_event; 2344 if (result == B_OK) { 2345 Thread* thread = Thread::GetAndLock(threadID); 2346 BReference<Thread> threadReference(thread, true); 2347 ThreadLocker threadLocker(thread, true); 2348 2349 if (thread != NULL && thread->team == nubThread->team) { 2350 thread_debug_info &threadDebugInfo = thread->debug_info; 2351 2352 InterruptsSpinLocker threadDebugInfoLocker( 2353 threadDebugInfo.lock); 2354 2355 if (threadDebugInfo.profile.samples == NULL) { 2356 threadDebugInfo.profile.interval = interval; 2357 threadDebugInfo.profile.sample_area 2358 = clonedSampleArea; 2359 threadDebugInfo.profile.samples = (addr_t*)samples; 2360 threadDebugInfo.profile.max_samples 2361 = areaInfo.size / sizeof(addr_t); 2362 threadDebugInfo.profile.flush_threshold 2363 = threadDebugInfo.profile.max_samples 2364 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2365 / 100; 2366 threadDebugInfo.profile.sample_count = 0; 2367 threadDebugInfo.profile.dropped_ticks = 0; 2368 threadDebugInfo.profile.stack_depth = stackDepth; 2369 threadDebugInfo.profile.variable_stack_depth 2370 = variableStackDepth; 2371 threadDebugInfo.profile.profile_kernel = profileKernel; 2372 threadDebugInfo.profile.flush_needed = false; 2373 threadDebugInfo.profile.interval_left = interval; 2374 threadDebugInfo.profile.installed_timer = NULL; 2375 threadDebugInfo.profile.image_event = imageEvent; 2376 threadDebugInfo.profile.last_image_event 2377 = imageEvent; 2378 } else 2379 result = B_BAD_VALUE; 2380 } else 2381 result = B_BAD_THREAD_ID; 2382 } 2383 2384 // on error unlock and delete the sample area 2385 if (result != B_OK) { 2386 if (clonedSampleArea >= 0) { 2387 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2388 delete_area(clonedSampleArea); 2389 } 2390 } 2391 2392 // send a reply to the debugger 2393 reply.start_profiler.error = result; 2394 reply.start_profiler.interval = interval; 2395 reply.start_profiler.image_event = imageEvent; 2396 sendReply = true; 2397 replySize = sizeof(reply.start_profiler); 2398 2399 break; 2400 } 2401 2402 case B_DEBUG_STOP_PROFILER: 2403 { 2404 // get the parameters 2405 thread_id threadID = message.stop_profiler.thread; 2406 replyPort = message.stop_profiler.reply_port; 2407 status_t result = B_OK; 2408 2409 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: " 2410 "thread: %" B_PRId32 "\n", nubThread->id, threadID)); 2411 2412 area_id sampleArea = -1; 2413 addr_t* samples = NULL; 2414 int32 sampleCount = 0; 2415 int32 stackDepth = 0; 2416 bool variableStackDepth = false; 2417 int32 imageEvent = 0; 2418 int32 droppedTicks = 0; 2419 bigtime_t lastCPUTime = 0; 2420 2421 // get the thread and detach the profile info 2422 Thread* thread = Thread::GetAndLock(threadID); 2423 BReference<Thread> threadReference(thread, true); 2424 ThreadLocker threadLocker(thread, true); 2425 2426 if (thread && thread->team == nubThread->team) { 2427 thread_debug_info &threadDebugInfo = thread->debug_info; 2428 2429 InterruptsSpinLocker threadDebugInfoLocker( 2430 threadDebugInfo.lock); 2431 2432 if (threadDebugInfo.profile.samples != NULL) { 2433 sampleArea = threadDebugInfo.profile.sample_area; 2434 samples = threadDebugInfo.profile.samples; 2435 sampleCount = threadDebugInfo.profile.sample_count; 2436 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2437 stackDepth = threadDebugInfo.profile.stack_depth; 2438 variableStackDepth 2439 = threadDebugInfo.profile.variable_stack_depth; 2440 imageEvent = threadDebugInfo.profile.image_event; 2441 threadDebugInfo.profile.sample_area = -1; 2442 threadDebugInfo.profile.samples = NULL; 2443 threadDebugInfo.profile.flush_needed = false; 2444 threadDebugInfo.profile.dropped_ticks = 0; 2445 { 2446 SpinLocker threadTimeLocker(thread->time_lock); 2447 lastCPUTime = thread->CPUTime(false); 2448 } 2449 } else 2450 result = B_BAD_VALUE; 2451 } else 2452 result = B_BAD_THREAD_ID; 2453 2454 threadLocker.Unlock(); 2455 2456 // prepare the reply 2457 if (result == B_OK) { 2458 reply.profiler_update.origin.thread = threadID; 2459 reply.profiler_update.image_event = imageEvent; 2460 reply.profiler_update.stack_depth = stackDepth; 2461 reply.profiler_update.variable_stack_depth 2462 = variableStackDepth; 2463 reply.profiler_update.sample_count = sampleCount; 2464 reply.profiler_update.dropped_ticks = droppedTicks; 2465 reply.profiler_update.stopped = true; 2466 reply.profiler_update.last_cpu_time = lastCPUTime; 2467 } else 2468 reply.profiler_update.origin.thread = result; 2469 2470 replySize = sizeof(debug_profiler_update); 2471 sendReply = true; 2472 2473 if (sampleArea >= 0) { 2474 area_info areaInfo; 2475 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2476 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2477 delete_area(sampleArea); 2478 } 2479 } 2480 2481 break; 2482 } 2483 2484 case B_DEBUG_WRITE_CORE_FILE: 2485 { 2486 // get the parameters 2487 replyPort = message.write_core_file.reply_port; 2488 char* path = message.write_core_file.path; 2489 path[sizeof(message.write_core_file.path) - 1] = '\0'; 2490 2491 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE" 2492 ": path: %s\n", nubThread->id, path)); 2493 2494 // write the core file 2495 status_t result = core_dump_write_core_file(path, false); 2496 2497 // prepare the reply 2498 reply.write_core_file.error = result; 2499 replySize = sizeof(reply.write_core_file); 2500 sendReply = true; 2501 2502 break; 2503 } 2504 } 2505 2506 // send the reply, if necessary 2507 if (sendReply) { 2508 status_t error = kill_interruptable_write_port(replyPort, command, 2509 &reply, replySize); 2510 2511 if (error != B_OK) { 2512 // The debugger port is either not longer existing or we got 2513 // interrupted by a kill signal. In either case we terminate. 2514 TRACE(("nub thread %" B_PRId32 ": failed to send reply to port " 2515 "%" B_PRId32 ": %s\n", nubThread->id, replyPort, 2516 strerror(error))); 2517 2518 nub_thread_cleanup(nubThread); 2519 return error; 2520 } 2521 } 2522 } 2523 } 2524 2525 2526 /** \brief Helper function for install_team_debugger(), that sets up the team 2527 and thread debug infos. 2528 2529 The caller must hold the team's lock as well as the team debug info lock. 2530 2531 The function also clears the arch specific team and thread debug infos 2532 (including among other things formerly set break/watchpoints). 2533 */ 2534 static void 2535 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2536 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2537 sem_id debuggerPortWriteLock, thread_id causingThread) 2538 { 2539 atomic_set(&team->debug_info.flags, 2540 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2541 team->debug_info.nub_port = nubPort; 2542 team->debug_info.nub_thread = nubThread; 2543 team->debug_info.debugger_team = debuggerTeam; 2544 team->debug_info.debugger_port = debuggerPort; 2545 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2546 team->debug_info.causing_thread = causingThread; 2547 2548 arch_clear_team_debug_info(&team->debug_info.arch_info); 2549 2550 // set the user debug flags and signal masks of all threads to the default 2551 for (Thread *thread = team->thread_list; thread; 2552 thread = thread->team_next) { 2553 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2554 2555 if (thread->id == nubThread) { 2556 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2557 } else { 2558 int32 flags = thread->debug_info.flags 2559 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2560 atomic_set(&thread->debug_info.flags, 2561 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2562 thread->debug_info.ignore_signals = 0; 2563 thread->debug_info.ignore_signals_once = 0; 2564 2565 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2566 } 2567 } 2568 2569 // update the thread::flags fields 2570 update_threads_debugger_installed_flag(team); 2571 } 2572 2573 2574 static port_id 2575 install_team_debugger(team_id teamID, port_id debuggerPort, 2576 thread_id causingThread, bool useDefault, bool dontReplace) 2577 { 2578 TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", " 2579 "default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault, 2580 dontReplace)); 2581 2582 if (useDefault) 2583 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2584 2585 // get the debugger team 2586 port_info debuggerPortInfo; 2587 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2588 if (error != B_OK) { 2589 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2590 "%" B_PRIx32 "\n", error)); 2591 return error; 2592 } 2593 team_id debuggerTeam = debuggerPortInfo.team; 2594 2595 // Check the debugger team: It must neither be the kernel team nor the 2596 // debugged team. 2597 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2598 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2599 "debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam, 2600 teamID)); 2601 return B_NOT_ALLOWED; 2602 } 2603 2604 // get the team 2605 Team* team; 2606 ConditionVariable debugChangeCondition; 2607 debugChangeCondition.Init(NULL, "debug change condition"); 2608 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2609 if (error != B_OK) 2610 return error; 2611 2612 // get the real team ID 2613 teamID = team->id; 2614 2615 // check, if a debugger is already installed 2616 2617 bool done = false; 2618 port_id result = B_ERROR; 2619 bool handOver = false; 2620 port_id oldDebuggerPort = -1; 2621 port_id nubPort = -1; 2622 2623 TeamLocker teamLocker(team); 2624 cpu_status state = disable_interrupts(); 2625 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2626 2627 int32 teamDebugFlags = team->debug_info.flags; 2628 2629 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2630 // There's already a debugger installed. 2631 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2632 if (dontReplace) { 2633 // We're fine with already having a debugger. 2634 error = B_OK; 2635 done = true; 2636 result = team->debug_info.nub_port; 2637 } else { 2638 // a handover to another debugger is requested 2639 // Set the handing-over flag -- we'll clear both flags after 2640 // having sent the handed-over message to the new debugger. 2641 atomic_or(&team->debug_info.flags, 2642 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2643 2644 oldDebuggerPort = team->debug_info.debugger_port; 2645 result = nubPort = team->debug_info.nub_port; 2646 if (causingThread < 0) 2647 causingThread = team->debug_info.causing_thread; 2648 2649 // set the new debugger 2650 install_team_debugger_init_debug_infos(team, debuggerTeam, 2651 debuggerPort, nubPort, team->debug_info.nub_thread, 2652 team->debug_info.debugger_write_lock, causingThread); 2653 2654 handOver = true; 2655 done = true; 2656 } 2657 } else { 2658 // there's already a debugger installed 2659 error = (dontReplace ? B_OK : B_BAD_VALUE); 2660 done = true; 2661 result = team->debug_info.nub_port; 2662 } 2663 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2664 && useDefault) { 2665 // No debugger yet, disable_debugger() had been invoked, and we 2666 // would install the default debugger. Just fail. 2667 error = B_BAD_VALUE; 2668 } 2669 2670 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2671 restore_interrupts(state); 2672 teamLocker.Unlock(); 2673 2674 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2675 // The old debugger must just have died. Just proceed as 2676 // if there was no debugger installed. We may still be too 2677 // early, in which case we'll fail, but this race condition 2678 // should be unbelievably rare and relatively harmless. 2679 handOver = false; 2680 done = false; 2681 } 2682 2683 if (handOver) { 2684 // prepare the handed-over message 2685 debug_handed_over notification; 2686 notification.origin.thread = -1; 2687 notification.origin.team = teamID; 2688 notification.origin.nub_port = nubPort; 2689 notification.debugger = debuggerTeam; 2690 notification.debugger_port = debuggerPort; 2691 notification.causing_thread = causingThread; 2692 2693 // notify the new debugger 2694 error = write_port_etc(debuggerPort, 2695 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2696 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2697 if (error != B_OK) { 2698 dprintf("install_team_debugger(): Failed to send message to new " 2699 "debugger: %s\n", strerror(error)); 2700 } 2701 2702 // clear the handed-over and handing-over flags 2703 state = disable_interrupts(); 2704 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2705 2706 atomic_and(&team->debug_info.flags, 2707 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2708 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2709 2710 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2711 restore_interrupts(state); 2712 2713 finish_debugger_change(team); 2714 2715 // notify the nub thread 2716 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2717 NULL, 0); 2718 2719 // notify the old debugger 2720 error = write_port_etc(oldDebuggerPort, 2721 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2722 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2723 if (error != B_OK) { 2724 TRACE(("install_team_debugger(): Failed to send message to old " 2725 "debugger: %s\n", strerror(error))); 2726 } 2727 2728 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2729 "%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam, 2730 debuggerPort)); 2731 2732 return result; 2733 } 2734 2735 if (done || error != B_OK) { 2736 TRACE(("install_team_debugger() done1: %" B_PRId32 "\n", 2737 (error == B_OK ? result : error))); 2738 finish_debugger_change(team); 2739 return (error == B_OK ? result : error); 2740 } 2741 2742 // create the debugger write lock semaphore 2743 char nameBuffer[B_OS_NAME_LENGTH]; 2744 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port " 2745 "write", teamID); 2746 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2747 if (debuggerWriteLock < 0) 2748 error = debuggerWriteLock; 2749 2750 // create the nub port 2751 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID); 2752 if (error == B_OK) { 2753 nubPort = create_port(1, nameBuffer); 2754 if (nubPort < 0) 2755 error = nubPort; 2756 else 2757 result = nubPort; 2758 } 2759 2760 // make the debugger team the port owner; thus we know, if the debugger is 2761 // gone and can cleanup 2762 if (error == B_OK) 2763 error = set_port_owner(nubPort, debuggerTeam); 2764 2765 // create the breakpoint manager 2766 BreakpointManager* breakpointManager = NULL; 2767 if (error == B_OK) { 2768 breakpointManager = new(std::nothrow) BreakpointManager; 2769 if (breakpointManager != NULL) 2770 error = breakpointManager->Init(); 2771 else 2772 error = B_NO_MEMORY; 2773 } 2774 2775 // spawn the nub thread 2776 thread_id nubThread = -1; 2777 if (error == B_OK) { 2778 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task", 2779 teamID); 2780 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2781 B_NORMAL_PRIORITY, NULL, teamID); 2782 if (nubThread < 0) 2783 error = nubThread; 2784 } 2785 2786 // now adjust the debug info accordingly 2787 if (error == B_OK) { 2788 TeamLocker teamLocker(team); 2789 state = disable_interrupts(); 2790 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2791 2792 team->debug_info.breakpoint_manager = breakpointManager; 2793 install_team_debugger_init_debug_infos(team, debuggerTeam, 2794 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2795 causingThread); 2796 2797 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2798 restore_interrupts(state); 2799 } 2800 2801 finish_debugger_change(team); 2802 2803 // if everything went fine, resume the nub thread, otherwise clean up 2804 if (error == B_OK) { 2805 resume_thread(nubThread); 2806 } else { 2807 // delete port and terminate thread 2808 if (nubPort >= 0) { 2809 set_port_owner(nubPort, B_CURRENT_TEAM); 2810 delete_port(nubPort); 2811 } 2812 if (nubThread >= 0) { 2813 int32 result; 2814 wait_for_thread(nubThread, &result); 2815 } 2816 2817 delete breakpointManager; 2818 } 2819 2820 TRACE(("install_team_debugger() done2: %" B_PRId32 "\n", 2821 (error == B_OK ? result : error))); 2822 return (error == B_OK ? result : error); 2823 } 2824 2825 2826 static status_t 2827 ensure_debugger_installed() 2828 { 2829 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2830 thread_get_current_thread_id(), true, true); 2831 return port >= 0 ? B_OK : port; 2832 } 2833 2834 2835 // #pragma mark - 2836 2837 2838 void 2839 _user_debugger(const char *userMessage) 2840 { 2841 // install the default debugger, if there is none yet 2842 status_t error = ensure_debugger_installed(); 2843 if (error != B_OK) { 2844 // time to commit suicide 2845 char buffer[128]; 2846 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2847 if (length >= 0) { 2848 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2849 "`%s'\n", buffer); 2850 } else { 2851 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2852 "%p (%s)\n", userMessage, strerror(length)); 2853 } 2854 _user_exit_team(1); 2855 } 2856 2857 // prepare the message 2858 debug_debugger_call message; 2859 message.message = (void*)userMessage; 2860 2861 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2862 sizeof(message), true); 2863 } 2864 2865 2866 int 2867 _user_disable_debugger(int state) 2868 { 2869 Team *team = thread_get_current_thread()->team; 2870 2871 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state, 2872 team->id)); 2873 2874 cpu_status cpuState = disable_interrupts(); 2875 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2876 2877 int32 oldFlags; 2878 if (state) { 2879 oldFlags = atomic_or(&team->debug_info.flags, 2880 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2881 } else { 2882 oldFlags = atomic_and(&team->debug_info.flags, 2883 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2884 } 2885 2886 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2887 restore_interrupts(cpuState); 2888 2889 // TODO: Check, if the return value is really the old state. 2890 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2891 } 2892 2893 2894 status_t 2895 _user_install_default_debugger(port_id debuggerPort) 2896 { 2897 // Do not allow non-root processes to install a default debugger. 2898 if (geteuid() != 0) 2899 return B_PERMISSION_DENIED; 2900 2901 // if supplied, check whether the port is a valid port 2902 if (debuggerPort >= 0) { 2903 port_info portInfo; 2904 status_t error = get_port_info(debuggerPort, &portInfo); 2905 if (error != B_OK) 2906 return error; 2907 2908 // the debugger team must not be the kernel team 2909 if (portInfo.team == team_get_kernel_team_id()) 2910 return B_NOT_ALLOWED; 2911 } 2912 2913 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2914 2915 return B_OK; 2916 } 2917 2918 2919 port_id 2920 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2921 { 2922 if (geteuid() != 0 && team_geteuid(teamID) != geteuid()) 2923 return B_PERMISSION_DENIED; 2924 2925 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2926 } 2927 2928 2929 status_t 2930 _user_remove_team_debugger(team_id teamID) 2931 { 2932 Team* team; 2933 ConditionVariable debugChangeCondition; 2934 debugChangeCondition.Init(NULL, "debug change condition"); 2935 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2936 team); 2937 if (error != B_OK) 2938 return error; 2939 2940 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2941 2942 thread_id nubThread = -1; 2943 port_id nubPort = -1; 2944 2945 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2946 // there's a debugger installed 2947 nubThread = team->debug_info.nub_thread; 2948 nubPort = team->debug_info.nub_port; 2949 } else { 2950 // no debugger installed 2951 error = B_BAD_VALUE; 2952 } 2953 2954 debugInfoLocker.Unlock(); 2955 2956 // Delete the nub port -- this will cause the nub thread to terminate and 2957 // remove the debugger. 2958 if (nubPort >= 0) 2959 delete_port(nubPort); 2960 2961 finish_debugger_change(team); 2962 2963 // wait for the nub thread 2964 if (nubThread >= 0) 2965 wait_for_thread(nubThread, NULL); 2966 2967 return error; 2968 } 2969 2970 2971 status_t 2972 _user_debug_thread(thread_id threadID) 2973 { 2974 TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n", 2975 find_thread(NULL), threadID)); 2976 2977 // get the thread 2978 Thread* thread = Thread::GetAndLock(threadID); 2979 if (thread == NULL) 2980 return B_BAD_THREAD_ID; 2981 BReference<Thread> threadReference(thread, true); 2982 ThreadLocker threadLocker(thread, true); 2983 2984 // we can't debug the kernel team 2985 if (thread->team == team_get_kernel_team()) 2986 return B_NOT_ALLOWED; 2987 2988 InterruptsLocker interruptsLocker; 2989 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2990 2991 // If the thread is already dying, it's too late to debug it. 2992 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2993 return B_BAD_THREAD_ID; 2994 2995 // don't debug the nub thread 2996 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2997 return B_NOT_ALLOWED; 2998 2999 // already marked stopped or being told to stop? 3000 if ((thread->debug_info.flags 3001 & (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) { 3002 return B_OK; 3003 } 3004 3005 // set the flag that tells the thread to stop as soon as possible 3006 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 3007 3008 update_thread_user_debug_flag(thread); 3009 3010 // send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or 3011 // continued) 3012 threadDebugInfoLocker.Unlock(); 3013 ReadSpinLocker teamLocker(thread->team_lock); 3014 SpinLocker locker(thread->team->signal_lock); 3015 3016 send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0); 3017 3018 return B_OK; 3019 } 3020 3021 3022 void 3023 _user_wait_for_debugger(void) 3024 { 3025 debug_thread_debugged message = {}; 3026 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 3027 sizeof(message), false); 3028 } 3029 3030 3031 status_t 3032 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 3033 bool watchpoint) 3034 { 3035 // check the address and size 3036 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3037 return B_BAD_ADDRESS; 3038 if (watchpoint && length < 0) 3039 return B_BAD_VALUE; 3040 3041 // check whether a debugger is installed already 3042 team_debug_info teamDebugInfo; 3043 get_team_debug_info(teamDebugInfo); 3044 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3045 return B_BAD_VALUE; 3046 3047 // We can't help it, here's a small but relatively harmless race condition, 3048 // since a debugger could be installed in the meantime. The worst case is 3049 // that we install a break/watchpoint the debugger doesn't know about. 3050 3051 // set the break/watchpoint 3052 status_t result; 3053 if (watchpoint) 3054 result = arch_set_watchpoint(address, type, length); 3055 else 3056 result = arch_set_breakpoint(address); 3057 3058 if (result == B_OK) 3059 update_threads_breakpoints_flag(); 3060 3061 return result; 3062 } 3063 3064 3065 status_t 3066 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 3067 { 3068 // check the address 3069 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3070 return B_BAD_ADDRESS; 3071 3072 // check whether a debugger is installed already 3073 team_debug_info teamDebugInfo; 3074 get_team_debug_info(teamDebugInfo); 3075 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3076 return B_BAD_VALUE; 3077 3078 // We can't help it, here's a small but relatively harmless race condition, 3079 // since a debugger could be installed in the meantime. The worst case is 3080 // that we clear a break/watchpoint the debugger has just installed. 3081 3082 // clear the break/watchpoint 3083 status_t result; 3084 if (watchpoint) 3085 result = arch_clear_watchpoint(address); 3086 else 3087 result = arch_clear_breakpoint(address); 3088 3089 if (result == B_OK) 3090 update_threads_breakpoints_flag(); 3091 3092 return result; 3093 } 3094