1 /* 2 * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2015, Rene Gollent, rene@gollent.com. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 #include <errno.h> 9 #include <signal.h> 10 #include <stdlib.h> 11 #include <stdio.h> 12 #include <string.h> 13 14 #include <algorithm> 15 16 #include <arch/debug.h> 17 #include <arch/user_debugger.h> 18 #include <core_dump.h> 19 #include <cpu.h> 20 #include <debugger.h> 21 #include <kernel.h> 22 #include <KernelExport.h> 23 #include <kscheduler.h> 24 #include <ksignal.h> 25 #include <ksyscalls.h> 26 #include <port.h> 27 #include <sem.h> 28 #include <team.h> 29 #include <thread.h> 30 #include <thread_types.h> 31 #include <user_debugger.h> 32 #include <vm/vm.h> 33 #include <vm/vm_types.h> 34 35 #include <AutoDeleter.h> 36 #include <util/AutoLock.h> 37 #include <util/ThreadAutoLock.h> 38 39 #include "BreakpointManager.h" 40 41 42 //#define TRACE_USER_DEBUGGER 43 #ifdef TRACE_USER_DEBUGGER 44 # define TRACE(x) dprintf x 45 #else 46 # define TRACE(x) ; 47 #endif 48 49 50 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 51 // there's some potential for simplifications. E.g. clear_team_debug_info() and 52 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 53 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 54 55 56 static port_id sDefaultDebuggerPort = -1; 57 // accessed atomically 58 59 static timer sProfilingTimers[SMP_MAX_CPUS]; 60 // a profiling timer for each CPU -- used when a profiled thread is running 61 // on that CPU 62 63 64 static void schedule_profiling_timer(Thread* thread, bigtime_t interval); 65 static int32 profiling_event(timer* unused); 66 static status_t ensure_debugger_installed(); 67 static void get_team_debug_info(team_debug_info &teamDebugInfo); 68 69 70 static inline status_t 71 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 72 size_t bufferSize) 73 { 74 return write_port_etc(port, code, buffer, bufferSize, B_KILL_CAN_INTERRUPT, 75 0); 76 } 77 78 79 static status_t 80 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 81 bool dontWait) 82 { 83 TRACE(("debugger_write(): thread: %" B_PRId32 ", team %" B_PRId32 ", " 84 "port: %" B_PRId32 ", code: %" B_PRIx32 ", message: %p, size: %lu, " 85 "dontWait: %d\n", thread_get_current_thread()->id, 86 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 87 dontWait)); 88 89 status_t error = B_OK; 90 91 // get the team debug info 92 team_debug_info teamDebugInfo; 93 get_team_debug_info(teamDebugInfo); 94 sem_id writeLock = teamDebugInfo.debugger_write_lock; 95 96 // get the write lock 97 TRACE(("debugger_write(): acquiring write lock...\n")); 98 error = acquire_sem_etc(writeLock, 1, 99 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 100 if (error != B_OK) { 101 TRACE(("debugger_write() done1: %" B_PRIx32 "\n", error)); 102 return error; 103 } 104 105 // re-get the team debug info 106 get_team_debug_info(teamDebugInfo); 107 108 if (teamDebugInfo.debugger_port != port 109 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 110 // The debugger has changed in the meantime or we are about to be 111 // handed over to a new debugger. In either case we don't send the 112 // message. 113 TRACE(("debugger_write(): %s\n", 114 (teamDebugInfo.debugger_port != port ? "debugger port changed" 115 : "handover flag set"))); 116 } else { 117 TRACE(("debugger_write(): writing to port...\n")); 118 119 error = write_port_etc(port, code, buffer, bufferSize, 120 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 121 } 122 123 // release the write lock 124 release_sem(writeLock); 125 126 TRACE(("debugger_write() done: %" B_PRIx32 "\n", error)); 127 128 return error; 129 } 130 131 132 /*! Updates the thread::flags field according to what user debugger flags are 133 set for the thread. 134 Interrupts must be disabled and the thread's debug info lock must be held. 135 */ 136 static void 137 update_thread_user_debug_flag(Thread* thread) 138 { 139 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 140 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 141 else 142 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 143 } 144 145 146 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 147 given thread. 148 Interrupts must be disabled and the thread debug info lock must be held. 149 */ 150 static void 151 update_thread_breakpoints_flag(Thread* thread) 152 { 153 Team* team = thread->team; 154 155 if (arch_has_breakpoints(&team->debug_info.arch_info)) 156 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 157 else 158 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 159 } 160 161 162 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 163 threads of the current team. 164 */ 165 static void 166 update_threads_breakpoints_flag() 167 { 168 Team* team = thread_get_current_thread()->team; 169 170 TeamLocker teamLocker(team); 171 172 Thread* thread = team->thread_list; 173 174 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 175 for (; thread != NULL; thread = thread->team_next) 176 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 177 } else { 178 for (; thread != NULL; thread = thread->team_next) 179 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 180 } 181 } 182 183 184 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 185 given thread, which must be the current thread. 186 */ 187 static void 188 update_thread_debugger_installed_flag(Thread* thread) 189 { 190 Team* team = thread->team; 191 192 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 193 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 194 else 195 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 196 } 197 198 199 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 200 threads of the given team. 201 The team's lock must be held. 202 */ 203 static void 204 update_threads_debugger_installed_flag(Team* team) 205 { 206 Thread* thread = team->thread_list; 207 208 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 209 for (; thread != NULL; thread = thread->team_next) 210 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 211 } else { 212 for (; thread != NULL; thread = thread->team_next) 213 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 214 } 215 } 216 217 218 /** 219 * For the first initialization the function must be called with \a initLock 220 * set to \c true. If it would be possible that another thread accesses the 221 * structure at the same time, `lock' must be held when calling the function. 222 */ 223 void 224 clear_team_debug_info(struct team_debug_info *info, bool initLock) 225 { 226 if (info) { 227 arch_clear_team_debug_info(&info->arch_info); 228 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 229 info->debugger_team = -1; 230 info->debugger_port = -1; 231 info->nub_thread = -1; 232 info->nub_port = -1; 233 info->debugger_write_lock = -1; 234 info->causing_thread = -1; 235 info->image_event = 0; 236 info->breakpoint_manager = NULL; 237 238 if (initLock) { 239 B_INITIALIZE_SPINLOCK(&info->lock); 240 info->debugger_changed_condition = NULL; 241 } 242 } 243 } 244 245 /** 246 * `lock' must not be held nor may interrupts be disabled. 247 * \a info must not be a member of a team struct (or the team struct must no 248 * longer be accessible, i.e. the team should already be removed). 249 * 250 * In case the team is still accessible, the procedure is: 251 * 1. get `lock' 252 * 2. copy the team debug info on stack 253 * 3. call clear_team_debug_info() on the team debug info 254 * 4. release `lock' 255 * 5. call destroy_team_debug_info() on the copied team debug info 256 */ 257 static void 258 destroy_team_debug_info(struct team_debug_info *info) 259 { 260 if (info) { 261 arch_destroy_team_debug_info(&info->arch_info); 262 263 // delete the breakpoint manager 264 delete info->breakpoint_manager ; 265 info->breakpoint_manager = NULL; 266 267 // delete the debugger port write lock 268 if (info->debugger_write_lock >= 0) { 269 delete_sem(info->debugger_write_lock); 270 info->debugger_write_lock = -1; 271 } 272 273 // delete the nub port 274 if (info->nub_port >= 0) { 275 set_port_owner(info->nub_port, B_CURRENT_TEAM); 276 delete_port(info->nub_port); 277 info->nub_port = -1; 278 } 279 280 // wait for the nub thread 281 if (info->nub_thread >= 0) { 282 if (info->nub_thread != thread_get_current_thread()->id) { 283 int32 result; 284 wait_for_thread(info->nub_thread, &result); 285 } 286 287 info->nub_thread = -1; 288 } 289 290 atomic_set(&info->flags, 0); 291 info->debugger_team = -1; 292 info->debugger_port = -1; 293 info->causing_thread = -1; 294 info->image_event = -1; 295 } 296 } 297 298 299 void 300 init_thread_debug_info(struct thread_debug_info *info) 301 { 302 if (info) { 303 B_INITIALIZE_SPINLOCK(&info->lock); 304 arch_clear_thread_debug_info(&info->arch_info); 305 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 306 info->debug_port = -1; 307 info->ignore_signals = 0; 308 info->ignore_signals_once = 0; 309 info->profile.sample_area = -1; 310 info->profile.samples = NULL; 311 info->profile.buffer_full = false; 312 info->profile.installed_timer = NULL; 313 } 314 } 315 316 317 /*! Clears the debug info for the current thread. 318 Invoked with thread debug info lock being held. 319 */ 320 void 321 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 322 { 323 if (info) { 324 // cancel profiling timer 325 if (info->profile.installed_timer != NULL) { 326 cancel_timer(info->profile.installed_timer); 327 info->profile.installed_timer = NULL; 328 } 329 330 arch_clear_thread_debug_info(&info->arch_info); 331 atomic_set(&info->flags, 332 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 333 info->debug_port = -1; 334 info->ignore_signals = 0; 335 info->ignore_signals_once = 0; 336 info->profile.sample_area = -1; 337 info->profile.samples = NULL; 338 info->profile.buffer_full = false; 339 } 340 } 341 342 343 void 344 destroy_thread_debug_info(struct thread_debug_info *info) 345 { 346 if (info) { 347 area_id sampleArea = info->profile.sample_area; 348 if (sampleArea >= 0) { 349 area_info areaInfo; 350 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 351 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 352 delete_area(sampleArea); 353 } 354 } 355 356 arch_destroy_thread_debug_info(&info->arch_info); 357 358 if (info->debug_port >= 0) { 359 delete_port(info->debug_port); 360 info->debug_port = -1; 361 } 362 363 info->ignore_signals = 0; 364 info->ignore_signals_once = 0; 365 366 atomic_set(&info->flags, 0); 367 } 368 } 369 370 371 static status_t 372 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 373 Team*& team) 374 { 375 // We look up the team by ID, even in case of the current team, so we can be 376 // sure, that the team is not already dying. 377 if (teamID == B_CURRENT_TEAM) 378 teamID = thread_get_current_thread()->team->id; 379 380 while (true) { 381 // get the team 382 team = Team::GetAndLock(teamID); 383 if (team == NULL) 384 return B_BAD_TEAM_ID; 385 BReference<Team> teamReference(team, true); 386 TeamLocker teamLocker(team, true); 387 388 // don't allow messing with the kernel team 389 if (team == team_get_kernel_team()) 390 return B_NOT_ALLOWED; 391 392 // check whether the condition is already set 393 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 394 395 if (team->debug_info.debugger_changed_condition == NULL) { 396 // nobody there yet -- set our condition variable and be done 397 team->debug_info.debugger_changed_condition = &condition; 398 return B_OK; 399 } 400 401 // we'll have to wait 402 ConditionVariableEntry entry; 403 team->debug_info.debugger_changed_condition->Add(&entry); 404 405 debugInfoLocker.Unlock(); 406 teamLocker.Unlock(); 407 408 entry.Wait(); 409 } 410 } 411 412 413 static void 414 prepare_debugger_change(Team* team, ConditionVariable& condition) 415 { 416 while (true) { 417 // check whether the condition is already set 418 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 419 420 if (team->debug_info.debugger_changed_condition == NULL) { 421 // nobody there yet -- set our condition variable and be done 422 team->debug_info.debugger_changed_condition = &condition; 423 return; 424 } 425 426 // we'll have to wait 427 ConditionVariableEntry entry; 428 team->debug_info.debugger_changed_condition->Add(&entry); 429 430 debugInfoLocker.Unlock(); 431 432 entry.Wait(); 433 } 434 } 435 436 437 static void 438 finish_debugger_change(Team* team) 439 { 440 // unset our condition variable and notify all threads waiting on it 441 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 442 443 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 444 team->debug_info.debugger_changed_condition = NULL; 445 446 condition->NotifyAll(); 447 } 448 449 450 void 451 user_debug_prepare_for_exec() 452 { 453 Thread *thread = thread_get_current_thread(); 454 Team *team = thread->team; 455 456 // If a debugger is installed for the team and the thread debug stuff 457 // initialized, change the ownership of the debug port for the thread 458 // to the kernel team, since exec_team() deletes all ports owned by this 459 // team. We change the ownership back later. 460 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 461 // get the port 462 port_id debugPort = -1; 463 464 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 465 466 if ((thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0) 467 debugPort = thread->debug_info.debug_port; 468 469 threadDebugInfoLocker.Unlock(); 470 471 // set the new port ownership 472 if (debugPort >= 0) 473 set_port_owner(debugPort, team_get_kernel_team_id()); 474 } 475 } 476 477 478 void 479 user_debug_finish_after_exec() 480 { 481 Thread *thread = thread_get_current_thread(); 482 Team *team = thread->team; 483 484 // If a debugger is installed for the team and the thread debug stuff 485 // initialized for this thread, change the ownership of its debug port 486 // back to this team. 487 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 488 // get the port 489 port_id debugPort = -1; 490 491 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 492 493 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 494 debugPort = thread->debug_info.debug_port; 495 496 threadDebugInfoLocker.Unlock(); 497 498 // set the new port ownership 499 if (debugPort >= 0) 500 set_port_owner(debugPort, team->id); 501 } 502 } 503 504 505 void 506 init_user_debug() 507 { 508 #ifdef ARCH_INIT_USER_DEBUG 509 ARCH_INIT_USER_DEBUG(); 510 #endif 511 } 512 513 514 static void 515 get_team_debug_info(team_debug_info &teamDebugInfo) 516 { 517 Thread *thread = thread_get_current_thread(); 518 519 cpu_status state = disable_interrupts(); 520 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 521 522 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 523 524 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 525 restore_interrupts(state); 526 } 527 528 529 static status_t 530 thread_hit_debug_event_internal(debug_debugger_message event, 531 const void *message, int32 size, bool requireDebugger, bool &restart) 532 { 533 restart = false; 534 Thread *thread = thread_get_current_thread(); 535 536 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", event: %" B_PRIu32 537 ", message: %p, size: %" B_PRId32 "\n", thread->id, (uint32)event, 538 message, size)); 539 540 // check, if there's a debug port already 541 bool setPort = !(atomic_get(&thread->debug_info.flags) 542 & B_THREAD_DEBUG_INITIALIZED); 543 544 // create a port, if there is none yet 545 port_id port = -1; 546 if (setPort) { 547 char nameBuffer[128]; 548 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %" B_PRId32, 549 thread->id); 550 551 port = create_port(1, nameBuffer); 552 if (port < 0) { 553 dprintf("thread_hit_debug_event(): Failed to create debug port: " 554 "%s\n", strerror(port)); 555 return port; 556 } 557 } 558 559 // check the debug info structures once more: get the debugger port, set 560 // the thread's debug port, and update the thread's debug flags 561 port_id deletePort = port; 562 port_id debuggerPort = -1; 563 port_id nubPort = -1; 564 status_t error = B_OK; 565 cpu_status state = disable_interrupts(); 566 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 567 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 568 569 uint32 threadFlags = thread->debug_info.flags; 570 threadFlags &= ~B_THREAD_DEBUG_STOP; 571 bool debuggerInstalled 572 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 573 if (thread->id == thread->team->debug_info.nub_thread) { 574 // Ugh, we're the nub thread. We shouldn't be here. 575 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32 576 "\n", thread->id)); 577 578 error = B_ERROR; 579 } else if (debuggerInstalled || !requireDebugger) { 580 if (debuggerInstalled) { 581 debuggerPort = thread->team->debug_info.debugger_port; 582 nubPort = thread->team->debug_info.nub_port; 583 } 584 585 if (setPort) { 586 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 587 // someone created a port for us (the port we've created will 588 // be deleted below) 589 port = thread->debug_info.debug_port; 590 } else { 591 thread->debug_info.debug_port = port; 592 deletePort = -1; // keep the port 593 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 594 } 595 } else { 596 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 597 port = thread->debug_info.debug_port; 598 } else { 599 // someone deleted our port 600 error = B_ERROR; 601 } 602 } 603 } else 604 error = B_ERROR; 605 606 // update the flags 607 if (error == B_OK) 608 threadFlags |= B_THREAD_DEBUG_STOPPED; 609 atomic_set(&thread->debug_info.flags, threadFlags); 610 611 update_thread_user_debug_flag(thread); 612 613 threadDebugInfoLocker.Unlock(); 614 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 615 restore_interrupts(state); 616 617 // delete the superfluous port 618 if (deletePort >= 0) 619 delete_port(deletePort); 620 621 if (error != B_OK) { 622 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32 ", error: " 623 "%" B_PRIx32 "\n", thread->id, error)); 624 return error; 625 } 626 627 // send a message to the debugger port 628 if (debuggerInstalled) { 629 // update the message's origin info first 630 debug_origin *origin = (debug_origin *)message; 631 origin->thread = thread->id; 632 origin->team = thread->team->id; 633 origin->nub_port = nubPort; 634 635 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", sending " 636 "message to debugger port %" B_PRId32 "\n", thread->id, 637 debuggerPort)); 638 639 error = debugger_write(debuggerPort, event, message, size, false); 640 } 641 642 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 643 bool singleStep = false; 644 645 if (error == B_OK) { 646 bool done = false; 647 while (!done) { 648 // read a command from the debug port 649 int32 command; 650 debugged_thread_message_data commandMessage; 651 ssize_t commandMessageSize = read_port_etc(port, &command, 652 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 653 0); 654 655 if (commandMessageSize < 0) { 656 error = commandMessageSize; 657 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed " 658 "to receive message from port %" B_PRId32 ": %" B_PRIx32 "\n", 659 thread->id, port, error)); 660 break; 661 } 662 663 switch (command) { 664 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 665 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 666 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 667 thread->id)); 668 result = commandMessage.continue_thread.handle_event; 669 670 singleStep = commandMessage.continue_thread.single_step; 671 done = true; 672 break; 673 674 case B_DEBUGGED_THREAD_SET_CPU_STATE: 675 { 676 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ": " 677 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 678 thread->id)); 679 arch_set_debug_cpu_state( 680 &commandMessage.set_cpu_state.cpu_state); 681 682 break; 683 } 684 685 case B_DEBUGGED_THREAD_GET_CPU_STATE: 686 { 687 port_id replyPort = commandMessage.get_cpu_state.reply_port; 688 689 // prepare the message 690 debug_nub_get_cpu_state_reply replyMessage; 691 replyMessage.error = B_OK; 692 replyMessage.message = event; 693 arch_get_debug_cpu_state(&replyMessage.cpu_state); 694 695 // send it 696 error = kill_interruptable_write_port(replyPort, event, 697 &replyMessage, sizeof(replyMessage)); 698 699 break; 700 } 701 702 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 703 { 704 // Check, if the debugger really changed, i.e. is different 705 // than the one we know. 706 team_debug_info teamDebugInfo; 707 get_team_debug_info(teamDebugInfo); 708 709 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 710 if (!debuggerInstalled 711 || teamDebugInfo.debugger_port != debuggerPort) { 712 // debugger was installed or has changed: restart 713 // this function 714 restart = true; 715 done = true; 716 } 717 } else { 718 if (debuggerInstalled) { 719 // debugger is gone: continue the thread normally 720 done = true; 721 } 722 } 723 724 break; 725 } 726 } 727 } 728 } else { 729 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32 ", failed to send " 730 "message to debugger port %" B_PRId32 ": %" B_PRIx32 "\n", 731 thread->id, debuggerPort, error)); 732 } 733 734 // update the thread debug info 735 bool destroyThreadInfo = false; 736 thread_debug_info threadDebugInfo; 737 738 state = disable_interrupts(); 739 threadDebugInfoLocker.Lock(); 740 741 // check, if the team is still being debugged 742 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 743 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 744 // update the single-step flag 745 if (singleStep) { 746 atomic_or(&thread->debug_info.flags, 747 B_THREAD_DEBUG_SINGLE_STEP); 748 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 749 } else { 750 atomic_and(&thread->debug_info.flags, 751 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 752 } 753 754 // unset the "stopped" state 755 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 756 757 update_thread_user_debug_flag(thread); 758 759 } else { 760 // the debugger is gone: cleanup our info completely 761 threadDebugInfo = thread->debug_info; 762 clear_thread_debug_info(&thread->debug_info, false); 763 destroyThreadInfo = true; 764 } 765 766 threadDebugInfoLocker.Unlock(); 767 restore_interrupts(state); 768 769 // enable/disable single stepping 770 arch_update_thread_single_step(); 771 772 if (destroyThreadInfo) 773 destroy_thread_debug_info(&threadDebugInfo); 774 775 return (error == B_OK ? result : error); 776 } 777 778 779 static status_t 780 thread_hit_debug_event(debug_debugger_message event, const void *message, 781 int32 size, bool requireDebugger) 782 { 783 status_t result; 784 bool restart; 785 do { 786 restart = false; 787 result = thread_hit_debug_event_internal(event, message, size, 788 requireDebugger, restart); 789 } while (result >= 0 && restart); 790 791 // Prepare to continue -- we install a debugger change condition, so no one 792 // will change the debugger while we're playing with the breakpoint manager. 793 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 794 Team* team = thread_get_current_thread()->team; 795 ConditionVariable debugChangeCondition; 796 debugChangeCondition.Init(team, "debug change condition"); 797 prepare_debugger_change(team, debugChangeCondition); 798 799 if (team->debug_info.breakpoint_manager != NULL) { 800 bool isSyscall; 801 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 802 if (pc != NULL && !isSyscall) 803 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 804 } 805 806 finish_debugger_change(team); 807 808 return result; 809 } 810 811 812 static status_t 813 thread_hit_serious_debug_event(debug_debugger_message event, 814 const void *message, int32 messageSize) 815 { 816 // ensure that a debugger is installed for this team 817 status_t error = ensure_debugger_installed(); 818 if (error != B_OK) { 819 Thread *thread = thread_get_current_thread(); 820 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 821 "thread: %" B_PRId32 ": %s\n", thread->id, strerror(error)); 822 return error; 823 } 824 825 // enter the debug loop 826 return thread_hit_debug_event(event, message, messageSize, true); 827 } 828 829 830 void 831 user_debug_pre_syscall(uint32 syscall, void *args) 832 { 833 // check whether a debugger is installed 834 Thread *thread = thread_get_current_thread(); 835 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 836 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 837 return; 838 839 // check whether pre-syscall tracing is enabled for team or thread 840 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 841 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 842 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 843 return; 844 } 845 846 // prepare the message 847 debug_pre_syscall message; 848 message.syscall = syscall; 849 850 // copy the syscall args 851 if (syscall < (uint32)kSyscallCount) { 852 if (kSyscallInfos[syscall].parameter_size > 0) 853 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 854 } 855 856 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 857 sizeof(message), true); 858 } 859 860 861 void 862 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 863 bigtime_t startTime) 864 { 865 // check whether a debugger is installed 866 Thread *thread = thread_get_current_thread(); 867 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 868 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 869 return; 870 871 // check whether post-syscall tracing is enabled for team or thread 872 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 873 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 874 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 875 return; 876 } 877 878 // prepare the message 879 debug_post_syscall message; 880 message.start_time = startTime; 881 message.end_time = system_time(); 882 message.return_value = returnValue; 883 message.syscall = syscall; 884 885 // copy the syscall args 886 if (syscall < (uint32)kSyscallCount) { 887 if (kSyscallInfos[syscall].parameter_size > 0) 888 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 889 } 890 891 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 892 sizeof(message), true); 893 } 894 895 896 /** \brief To be called when an unhandled processor exception (error/fault) 897 * occurred. 898 * \param exception The debug_why_stopped value identifying the kind of fault. 899 * \param signal The signal corresponding to the exception. 900 * \return \c true, if the caller shall continue normally, i.e. usually send 901 * a deadly signal. \c false, if the debugger insists to continue the 902 * program (e.g. because it has solved the removed the cause of the 903 * problem). 904 */ 905 bool 906 user_debug_exception_occurred(debug_exception_type exception, int signal) 907 { 908 // First check whether there's a signal handler installed for the signal. 909 // If so, we don't want to install a debugger for the team. We always send 910 // the signal instead. An already installed debugger will be notified, if 911 // it has requested notifications of signal. 912 struct sigaction signalAction; 913 if (sigaction(signal, NULL, &signalAction) == 0 914 && signalAction.sa_handler != SIG_DFL) { 915 return true; 916 } 917 918 // prepare the message 919 debug_exception_occurred message; 920 message.exception = exception; 921 message.signal = signal; 922 923 status_t result = thread_hit_serious_debug_event( 924 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 925 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 926 } 927 928 929 bool 930 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly) 931 { 932 // check, if a debugger is installed and is interested in signals 933 Thread *thread = thread_get_current_thread(); 934 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 935 if (~teamDebugFlags 936 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 937 return true; 938 } 939 940 // prepare the message 941 debug_signal_received message; 942 message.signal = signal; 943 message.handler = *handler; 944 message.deadly = deadly; 945 946 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 947 &message, sizeof(message), true); 948 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 949 } 950 951 952 void 953 user_debug_stop_thread() 954 { 955 // check whether this is actually an emulated single-step notification 956 Thread* thread = thread_get_current_thread(); 957 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 958 959 bool singleStepped = false; 960 if ((atomic_and(&thread->debug_info.flags, 961 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 962 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 963 singleStepped = true; 964 } 965 966 threadDebugInfoLocker.Unlock(); 967 968 if (singleStepped) { 969 user_debug_single_stepped(); 970 } else { 971 debug_thread_debugged message; 972 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 973 &message, sizeof(message)); 974 } 975 } 976 977 978 void 979 user_debug_team_created(team_id teamID) 980 { 981 // check, if a debugger is installed and is interested in team creation 982 // events 983 Thread *thread = thread_get_current_thread(); 984 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 985 if (~teamDebugFlags 986 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 987 return; 988 } 989 990 // prepare the message 991 debug_team_created message; 992 message.new_team = teamID; 993 994 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 995 sizeof(message), true); 996 } 997 998 999 void 1000 user_debug_team_deleted(team_id teamID, port_id debuggerPort) 1001 { 1002 if (debuggerPort >= 0) { 1003 TRACE(("user_debug_team_deleted(team: %" B_PRId32 ", debugger port: " 1004 "%" B_PRId32 ")\n", teamID, debuggerPort)); 1005 1006 debug_team_deleted message; 1007 message.origin.thread = -1; 1008 message.origin.team = teamID; 1009 message.origin.nub_port = -1; 1010 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1011 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1012 } 1013 } 1014 1015 1016 void 1017 user_debug_team_exec() 1018 { 1019 // check, if a debugger is installed and is interested in team creation 1020 // events 1021 Thread *thread = thread_get_current_thread(); 1022 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1023 if (~teamDebugFlags 1024 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1025 return; 1026 } 1027 1028 // prepare the message 1029 debug_team_exec message; 1030 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1031 + 1; 1032 1033 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1034 sizeof(message), true); 1035 } 1036 1037 1038 /*! Called by a new userland thread to update the debugging related flags of 1039 \c Thread::flags before the thread first enters userland. 1040 \param thread The calling thread. 1041 */ 1042 void 1043 user_debug_update_new_thread_flags(Thread* thread) 1044 { 1045 // lock it and update it's flags 1046 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1047 1048 update_thread_user_debug_flag(thread); 1049 update_thread_breakpoints_flag(thread); 1050 update_thread_debugger_installed_flag(thread); 1051 } 1052 1053 1054 void 1055 user_debug_thread_created(thread_id threadID) 1056 { 1057 // check, if a debugger is installed and is interested in thread events 1058 Thread *thread = thread_get_current_thread(); 1059 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1060 if (~teamDebugFlags 1061 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1062 return; 1063 } 1064 1065 // prepare the message 1066 debug_thread_created message; 1067 message.new_thread = threadID; 1068 1069 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1070 sizeof(message), true); 1071 } 1072 1073 1074 void 1075 user_debug_thread_deleted(team_id teamID, thread_id threadID) 1076 { 1077 // Things are a bit complicated here, since this thread no longer belongs to 1078 // the debugged team (but to the kernel). So we can't use debugger_write(). 1079 1080 // get the team debug flags and debugger port 1081 Team* team = Team::Get(teamID); 1082 if (team == NULL) 1083 return; 1084 BReference<Team> teamReference(team, true); 1085 1086 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1087 1088 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1089 port_id debuggerPort = team->debug_info.debugger_port; 1090 sem_id writeLock = team->debug_info.debugger_write_lock; 1091 1092 debugInfoLocker.Unlock(); 1093 1094 // check, if a debugger is installed and is interested in thread events 1095 if (~teamDebugFlags 1096 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1097 return; 1098 } 1099 1100 // acquire the debugger write lock 1101 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1102 if (error != B_OK) 1103 return; 1104 1105 // re-get the team debug info -- we need to check whether anything changed 1106 debugInfoLocker.Lock(); 1107 1108 teamDebugFlags = atomic_get(&team->debug_info.flags); 1109 port_id newDebuggerPort = team->debug_info.debugger_port; 1110 1111 debugInfoLocker.Unlock(); 1112 1113 // Send the message only if the debugger hasn't changed in the meantime or 1114 // the team is about to be handed over. 1115 if (newDebuggerPort == debuggerPort 1116 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1117 debug_thread_deleted message; 1118 message.origin.thread = threadID; 1119 message.origin.team = teamID; 1120 message.origin.nub_port = -1; 1121 1122 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1123 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1124 } 1125 1126 // release the debugger write lock 1127 release_sem(writeLock); 1128 } 1129 1130 1131 /*! Called for a thread that is about to die, cleaning up all user debug 1132 facilities installed for the thread. 1133 \param thread The current thread, the one that is going to die. 1134 */ 1135 void 1136 user_debug_thread_exiting(Thread* thread) 1137 { 1138 // thread is the current thread, so using team is safe 1139 Team* team = thread->team; 1140 1141 InterruptsLocker interruptsLocker; 1142 1143 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1144 1145 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1146 port_id debuggerPort = team->debug_info.debugger_port; 1147 1148 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1149 1150 // check, if a debugger is installed 1151 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1152 || debuggerPort < 0) { 1153 return; 1154 } 1155 1156 // detach the profile info and mark the thread dying 1157 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1158 1159 thread_debug_info& threadDebugInfo = thread->debug_info; 1160 if (threadDebugInfo.profile.samples == NULL) 1161 return; 1162 1163 area_id sampleArea = threadDebugInfo.profile.sample_area; 1164 int32 sampleCount = threadDebugInfo.profile.sample_count; 1165 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1166 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1167 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1168 int32 imageEvent = threadDebugInfo.profile.image_event; 1169 threadDebugInfo.profile.sample_area = -1; 1170 threadDebugInfo.profile.samples = NULL; 1171 threadDebugInfo.profile.buffer_full = false; 1172 1173 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1174 1175 threadDebugInfoLocker.Unlock(); 1176 interruptsLocker.Unlock(); 1177 1178 // notify the debugger 1179 debug_profiler_update message; 1180 message.origin.thread = thread->id; 1181 message.origin.team = thread->team->id; 1182 message.origin.nub_port = -1; // asynchronous message 1183 message.sample_count = sampleCount; 1184 message.dropped_ticks = droppedTicks; 1185 message.stack_depth = stackDepth; 1186 message.variable_stack_depth = variableStackDepth; 1187 message.image_event = imageEvent; 1188 message.stopped = true; 1189 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1190 &message, sizeof(message), false); 1191 1192 if (sampleArea >= 0) { 1193 area_info areaInfo; 1194 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1195 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1196 delete_area(sampleArea); 1197 } 1198 } 1199 } 1200 1201 1202 void 1203 user_debug_image_created(const image_info *imageInfo) 1204 { 1205 // check, if a debugger is installed and is interested in image events 1206 Thread *thread = thread_get_current_thread(); 1207 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1208 if (~teamDebugFlags 1209 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1210 return; 1211 } 1212 1213 // prepare the message 1214 debug_image_created message; 1215 memcpy(&message.info, imageInfo, sizeof(image_info)); 1216 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1217 + 1; 1218 1219 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1220 sizeof(message), true); 1221 } 1222 1223 1224 void 1225 user_debug_image_deleted(const image_info *imageInfo) 1226 { 1227 // check, if a debugger is installed and is interested in image events 1228 Thread *thread = thread_get_current_thread(); 1229 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1230 if (~teamDebugFlags 1231 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1232 return; 1233 } 1234 1235 // prepare the message 1236 debug_image_deleted message; 1237 memcpy(&message.info, imageInfo, sizeof(image_info)); 1238 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1239 + 1; 1240 1241 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED, &message, 1242 sizeof(message), true); 1243 } 1244 1245 1246 void 1247 user_debug_breakpoint_hit(bool software) 1248 { 1249 // prepare the message 1250 debug_breakpoint_hit message; 1251 arch_get_debug_cpu_state(&message.cpu_state); 1252 1253 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1254 sizeof(message)); 1255 } 1256 1257 1258 void 1259 user_debug_watchpoint_hit() 1260 { 1261 // prepare the message 1262 debug_watchpoint_hit message; 1263 arch_get_debug_cpu_state(&message.cpu_state); 1264 1265 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1266 sizeof(message)); 1267 } 1268 1269 1270 void 1271 user_debug_single_stepped() 1272 { 1273 // clear the single-step thread flag 1274 Thread* thread = thread_get_current_thread(); 1275 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1276 1277 // prepare the message 1278 debug_single_step message; 1279 arch_get_debug_cpu_state(&message.cpu_state); 1280 1281 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1282 sizeof(message)); 1283 } 1284 1285 1286 /*! Schedules the profiling timer for the current thread. 1287 The caller must hold the thread's debug info lock. 1288 \param thread The current thread. 1289 \param interval The time after which the timer should fire. 1290 */ 1291 static void 1292 schedule_profiling_timer(Thread* thread, bigtime_t interval) 1293 { 1294 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1295 thread->debug_info.profile.installed_timer = timer; 1296 thread->debug_info.profile.timer_end = system_time() + interval; 1297 add_timer(timer, &profiling_event, interval, B_ONE_SHOT_RELATIVE_TIMER); 1298 } 1299 1300 1301 /*! Samples the current thread's instruction pointer/stack trace. 1302 The caller must hold the current thread's debug info lock. 1303 \param flushBuffer Return parameter: Set to \c true when the sampling 1304 buffer must be flushed. 1305 */ 1306 static bool 1307 profiling_do_sample(bool& flushBuffer) 1308 { 1309 Thread* thread = thread_get_current_thread(); 1310 thread_debug_info& debugInfo = thread->debug_info; 1311 1312 if (debugInfo.profile.samples == NULL) 1313 return false; 1314 1315 // Check, whether the buffer is full or an image event occurred since the 1316 // last sample was taken. 1317 int32 maxSamples = debugInfo.profile.max_samples; 1318 int32 sampleCount = debugInfo.profile.sample_count; 1319 int32 stackDepth = debugInfo.profile.stack_depth; 1320 int32 imageEvent = thread->team->debug_info.image_event; 1321 if (debugInfo.profile.sample_count > 0) { 1322 if (debugInfo.profile.last_image_event < imageEvent 1323 && debugInfo.profile.variable_stack_depth 1324 && sampleCount + 2 <= maxSamples) { 1325 // an image event occurred, but we use variable stack depth and 1326 // have enough room in the buffer to indicate an image event 1327 addr_t* event = debugInfo.profile.samples + sampleCount; 1328 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1329 event[1] = imageEvent; 1330 sampleCount += 2; 1331 debugInfo.profile.sample_count = sampleCount; 1332 debugInfo.profile.last_image_event = imageEvent; 1333 } 1334 1335 if (debugInfo.profile.last_image_event < imageEvent 1336 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1337 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1338 flushBuffer = true; 1339 return true; 1340 } 1341 1342 // We can't flush the buffer now, since we interrupted a kernel 1343 // function. If the buffer is not full yet, we add the samples, 1344 // otherwise we have to drop them. 1345 if (maxSamples - sampleCount < stackDepth) { 1346 debugInfo.profile.dropped_ticks++; 1347 return true; 1348 } 1349 } 1350 } else { 1351 // first sample -- set the image event 1352 debugInfo.profile.image_event = imageEvent; 1353 debugInfo.profile.last_image_event = imageEvent; 1354 } 1355 1356 // get the samples 1357 addr_t* returnAddresses = debugInfo.profile.samples 1358 + debugInfo.profile.sample_count; 1359 if (debugInfo.profile.variable_stack_depth) { 1360 // variable sample count per hit 1361 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1362 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1363 1364 debugInfo.profile.sample_count += *returnAddresses + 1; 1365 } else { 1366 // fixed sample count per hit 1367 if (stackDepth > 1) { 1368 int32 count = arch_debug_get_stack_trace(returnAddresses, 1369 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1370 1371 for (int32 i = count; i < stackDepth; i++) 1372 returnAddresses[i] = 0; 1373 } else 1374 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1375 1376 debugInfo.profile.sample_count += stackDepth; 1377 } 1378 1379 return true; 1380 } 1381 1382 1383 static void 1384 profiling_buffer_full(void*) 1385 { 1386 // It is undefined whether the function is called with interrupts enabled 1387 // or disabled. We are allowed to enable interrupts, though. First make 1388 // sure interrupts are disabled. 1389 disable_interrupts(); 1390 1391 Thread* thread = thread_get_current_thread(); 1392 thread_debug_info& debugInfo = thread->debug_info; 1393 1394 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1395 1396 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1397 int32 sampleCount = debugInfo.profile.sample_count; 1398 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1399 int32 stackDepth = debugInfo.profile.stack_depth; 1400 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1401 int32 imageEvent = debugInfo.profile.image_event; 1402 1403 // notify the debugger 1404 debugInfo.profile.sample_count = 0; 1405 debugInfo.profile.dropped_ticks = 0; 1406 1407 threadDebugInfoLocker.Unlock(); 1408 enable_interrupts(); 1409 1410 // prepare the message 1411 debug_profiler_update message; 1412 message.sample_count = sampleCount; 1413 message.dropped_ticks = droppedTicks; 1414 message.stack_depth = stackDepth; 1415 message.variable_stack_depth = variableStackDepth; 1416 message.image_event = imageEvent; 1417 message.stopped = false; 1418 1419 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1420 sizeof(message), false); 1421 1422 disable_interrupts(); 1423 threadDebugInfoLocker.Lock(); 1424 1425 // do the sampling and reschedule timer, if still profiling this thread 1426 bool flushBuffer; 1427 if (profiling_do_sample(flushBuffer)) { 1428 debugInfo.profile.buffer_full = false; 1429 schedule_profiling_timer(thread, debugInfo.profile.interval); 1430 } 1431 } 1432 1433 threadDebugInfoLocker.Unlock(); 1434 enable_interrupts(); 1435 } 1436 1437 1438 /*! Profiling timer event callback. 1439 Called with interrupts disabled. 1440 */ 1441 static int32 1442 profiling_event(timer* /*unused*/) 1443 { 1444 Thread* thread = thread_get_current_thread(); 1445 thread_debug_info& debugInfo = thread->debug_info; 1446 1447 SpinLocker threadDebugInfoLocker(debugInfo.lock); 1448 1449 bool flushBuffer = false; 1450 if (profiling_do_sample(flushBuffer)) { 1451 if (flushBuffer) { 1452 // The sample buffer needs to be flushed; we'll have to notify the 1453 // debugger. We can't do that right here. Instead we set a post 1454 // interrupt callback doing that for us, and don't reschedule the 1455 // timer yet. 1456 thread->post_interrupt_callback = profiling_buffer_full; 1457 debugInfo.profile.installed_timer = NULL; 1458 debugInfo.profile.buffer_full = true; 1459 } else 1460 schedule_profiling_timer(thread, debugInfo.profile.interval); 1461 } else 1462 debugInfo.profile.installed_timer = NULL; 1463 1464 return B_HANDLED_INTERRUPT; 1465 } 1466 1467 1468 /*! Called by the scheduler when a debugged thread has been unscheduled. 1469 The scheduler lock is being held. 1470 */ 1471 void 1472 user_debug_thread_unscheduled(Thread* thread) 1473 { 1474 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1475 1476 // if running, cancel the profiling timer 1477 struct timer* timer = thread->debug_info.profile.installed_timer; 1478 if (timer != NULL) { 1479 // track remaining time 1480 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1481 thread->debug_info.profile.interval_left = max_c(left, 0); 1482 thread->debug_info.profile.installed_timer = NULL; 1483 1484 // cancel timer 1485 threadDebugInfoLocker.Unlock(); 1486 // not necessary, but doesn't harm and reduces contention 1487 cancel_timer(timer); 1488 // since invoked on the same CPU, this will not possibly wait for 1489 // an already called timer hook 1490 } 1491 } 1492 1493 1494 /*! Called by the scheduler when a debugged thread has been scheduled. 1495 The scheduler lock is being held. 1496 */ 1497 void 1498 user_debug_thread_scheduled(Thread* thread) 1499 { 1500 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1501 1502 if (thread->debug_info.profile.samples != NULL 1503 && !thread->debug_info.profile.buffer_full) { 1504 // install profiling timer 1505 schedule_profiling_timer(thread, 1506 thread->debug_info.profile.interval_left); 1507 } 1508 } 1509 1510 1511 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1512 all threads of the team that are initialized for debugging (and 1513 thus have a debug port). 1514 */ 1515 static void 1516 broadcast_debugged_thread_message(Thread *nubThread, int32 code, 1517 const void *message, int32 size) 1518 { 1519 // iterate through the threads 1520 thread_info threadInfo; 1521 int32 cookie = 0; 1522 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1523 == B_OK) { 1524 // get the thread and lock it 1525 Thread* thread = Thread::GetAndLock(threadInfo.thread); 1526 if (thread == NULL) 1527 continue; 1528 1529 BReference<Thread> threadReference(thread, true); 1530 ThreadLocker threadLocker(thread, true); 1531 1532 // get the thread's debug port 1533 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1534 1535 port_id threadDebugPort = -1; 1536 if (thread && thread != nubThread && thread->team == nubThread->team 1537 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1538 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1539 threadDebugPort = thread->debug_info.debug_port; 1540 } 1541 1542 threadDebugInfoLocker.Unlock(); 1543 threadLocker.Unlock(); 1544 1545 // send the message to the thread 1546 if (threadDebugPort >= 0) { 1547 status_t error = kill_interruptable_write_port(threadDebugPort, 1548 code, message, size); 1549 if (error != B_OK) { 1550 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1551 "message to thread %" B_PRId32 ": %" B_PRIx32 "\n", 1552 thread->id, error)); 1553 } 1554 } 1555 } 1556 } 1557 1558 1559 static void 1560 nub_thread_cleanup(Thread *nubThread) 1561 { 1562 TRACE(("nub_thread_cleanup(%" B_PRId32 "): debugger port: %" B_PRId32 "\n", 1563 nubThread->id, nubThread->team->debug_info.debugger_port)); 1564 1565 ConditionVariable debugChangeCondition; 1566 debugChangeCondition.Init(nubThread->team, "debug change condition"); 1567 prepare_debugger_change(nubThread->team, debugChangeCondition); 1568 1569 team_debug_info teamDebugInfo; 1570 bool destroyDebugInfo = false; 1571 1572 TeamLocker teamLocker(nubThread->team); 1573 // required by update_threads_debugger_installed_flag() 1574 1575 cpu_status state = disable_interrupts(); 1576 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1577 1578 team_debug_info &info = nubThread->team->debug_info; 1579 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1580 && info.nub_thread == nubThread->id) { 1581 teamDebugInfo = info; 1582 clear_team_debug_info(&info, false); 1583 destroyDebugInfo = true; 1584 } 1585 1586 // update the thread::flags fields 1587 update_threads_debugger_installed_flag(nubThread->team); 1588 1589 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1590 restore_interrupts(state); 1591 1592 teamLocker.Unlock(); 1593 1594 if (destroyDebugInfo) 1595 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1596 1597 finish_debugger_change(nubThread->team); 1598 1599 if (destroyDebugInfo) 1600 destroy_team_debug_info(&teamDebugInfo); 1601 1602 // notify all threads that the debugger is gone 1603 broadcast_debugged_thread_message(nubThread, 1604 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1605 } 1606 1607 1608 /** \brief Debug nub thread helper function that returns the debug port of 1609 * a thread of the same team. 1610 */ 1611 static status_t 1612 debug_nub_thread_get_thread_debug_port(Thread *nubThread, 1613 thread_id threadID, port_id &threadDebugPort) 1614 { 1615 threadDebugPort = -1; 1616 1617 // get the thread 1618 Thread* thread = Thread::GetAndLock(threadID); 1619 if (thread == NULL) 1620 return B_BAD_THREAD_ID; 1621 BReference<Thread> threadReference(thread, true); 1622 ThreadLocker threadLocker(thread, true); 1623 1624 // get the debug port 1625 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock); 1626 1627 if (thread->team != nubThread->team) 1628 return B_BAD_VALUE; 1629 if ((thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) == 0) 1630 return B_BAD_THREAD_STATE; 1631 1632 threadDebugPort = thread->debug_info.debug_port; 1633 1634 threadDebugInfoLocker.Unlock(); 1635 1636 if (threadDebugPort < 0) 1637 return B_ERROR; 1638 1639 return B_OK; 1640 } 1641 1642 1643 static status_t 1644 debug_nub_thread(void *) 1645 { 1646 Thread *nubThread = thread_get_current_thread(); 1647 1648 // check, if we're still the current nub thread and get our port 1649 cpu_status state = disable_interrupts(); 1650 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1651 1652 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1653 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1654 restore_interrupts(state); 1655 return 0; 1656 } 1657 1658 port_id port = nubThread->team->debug_info.nub_port; 1659 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1660 BreakpointManager* breakpointManager 1661 = nubThread->team->debug_info.breakpoint_manager; 1662 1663 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1664 restore_interrupts(state); 1665 1666 TRACE(("debug_nub_thread() thread: %" B_PRId32 ", team %" B_PRId32 ", nub " 1667 "port: %" B_PRId32 "\n", nubThread->id, nubThread->team->id, port)); 1668 1669 // notify all threads that a debugger has been installed 1670 broadcast_debugged_thread_message(nubThread, 1671 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1672 1673 // command processing loop 1674 while (true) { 1675 int32 command; 1676 debug_nub_message_data message; 1677 ssize_t messageSize = read_port_etc(port, &command, &message, 1678 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1679 1680 if (messageSize < 0) { 1681 // The port is no longer valid or we were interrupted by a kill 1682 // signal: If we are still listed in the team's debug info as nub 1683 // thread, we need to update that. 1684 nub_thread_cleanup(nubThread); 1685 1686 TRACE(("nub thread %" B_PRId32 ": terminating: %lx\n", 1687 nubThread->id, messageSize)); 1688 1689 return messageSize; 1690 } 1691 1692 bool sendReply = false; 1693 union { 1694 debug_nub_read_memory_reply read_memory; 1695 debug_nub_write_memory_reply write_memory; 1696 debug_nub_get_cpu_state_reply get_cpu_state; 1697 debug_nub_set_breakpoint_reply set_breakpoint; 1698 debug_nub_set_watchpoint_reply set_watchpoint; 1699 debug_nub_get_signal_masks_reply get_signal_masks; 1700 debug_nub_get_signal_handler_reply get_signal_handler; 1701 debug_nub_start_profiler_reply start_profiler; 1702 debug_profiler_update profiler_update; 1703 debug_nub_write_core_file_reply write_core_file; 1704 } reply; 1705 int32 replySize = 0; 1706 port_id replyPort = -1; 1707 1708 // process the command 1709 switch (command) { 1710 case B_DEBUG_MESSAGE_READ_MEMORY: 1711 { 1712 // get the parameters 1713 replyPort = message.read_memory.reply_port; 1714 void *address = message.read_memory.address; 1715 int32 size = message.read_memory.size; 1716 status_t result = B_OK; 1717 1718 // check the parameters 1719 if (!BreakpointManager::CanAccessAddress(address, false)) 1720 result = B_BAD_ADDRESS; 1721 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1722 result = B_BAD_VALUE; 1723 1724 // read the memory 1725 size_t bytesRead = 0; 1726 if (result == B_OK) { 1727 result = breakpointManager->ReadMemory(address, 1728 reply.read_memory.data, size, bytesRead); 1729 } 1730 reply.read_memory.error = result; 1731 1732 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_READ_MEMORY: " 1733 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1734 ", result: %" B_PRIx32 ", read: %ld\n", nubThread->id, 1735 replyPort, address, size, result, bytesRead)); 1736 1737 // send only as much data as necessary 1738 reply.read_memory.size = bytesRead; 1739 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1740 sendReply = true; 1741 break; 1742 } 1743 1744 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1745 { 1746 // get the parameters 1747 replyPort = message.write_memory.reply_port; 1748 void *address = message.write_memory.address; 1749 int32 size = message.write_memory.size; 1750 const char *data = message.write_memory.data; 1751 int32 realSize = (char*)&message + messageSize - data; 1752 status_t result = B_OK; 1753 1754 // check the parameters 1755 if (!BreakpointManager::CanAccessAddress(address, true)) 1756 result = B_BAD_ADDRESS; 1757 else if (size <= 0 || size > realSize) 1758 result = B_BAD_VALUE; 1759 1760 // write the memory 1761 size_t bytesWritten = 0; 1762 if (result == B_OK) { 1763 result = breakpointManager->WriteMemory(address, data, size, 1764 bytesWritten); 1765 } 1766 reply.write_memory.error = result; 1767 1768 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_WRITE_MEMORY: " 1769 "reply port: %" B_PRId32 ", address: %p, size: %" B_PRId32 1770 ", result: %" B_PRIx32 ", written: %ld\n", nubThread->id, 1771 replyPort, address, size, result, bytesWritten)); 1772 1773 reply.write_memory.size = bytesWritten; 1774 sendReply = true; 1775 replySize = sizeof(debug_nub_write_memory_reply); 1776 break; 1777 } 1778 1779 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1780 { 1781 // get the parameters 1782 int32 flags = message.set_team_flags.flags 1783 & B_TEAM_DEBUG_USER_FLAG_MASK; 1784 1785 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_TEAM_FLAGS" 1786 ": flags: %" B_PRIx32 "\n", nubThread->id, flags)); 1787 1788 Team *team = thread_get_current_thread()->team; 1789 1790 // set the flags 1791 cpu_status state = disable_interrupts(); 1792 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1793 1794 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1795 atomic_set(&team->debug_info.flags, flags); 1796 1797 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1798 restore_interrupts(state); 1799 1800 break; 1801 } 1802 1803 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1804 { 1805 // get the parameters 1806 thread_id threadID = message.set_thread_flags.thread; 1807 int32 flags = message.set_thread_flags.flags 1808 & B_THREAD_DEBUG_USER_FLAG_MASK; 1809 1810 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_THREAD_FLAGS" 1811 ": thread: %" B_PRId32 ", flags: %" B_PRIx32 "\n", 1812 nubThread->id, threadID, flags)); 1813 1814 // set the flags 1815 Thread* thread = Thread::GetAndLock(threadID); 1816 if (thread == NULL) 1817 break; 1818 BReference<Thread> threadReference(thread, true); 1819 ThreadLocker threadLocker(thread, true); 1820 1821 InterruptsSpinLocker threadDebugInfoLocker( 1822 thread->debug_info.lock); 1823 1824 if (thread->team == thread_get_current_thread()->team) { 1825 flags |= thread->debug_info.flags 1826 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1827 atomic_set(&thread->debug_info.flags, flags); 1828 } 1829 1830 break; 1831 } 1832 1833 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1834 { 1835 // get the parameters 1836 thread_id threadID; 1837 uint32 handleEvent; 1838 bool singleStep; 1839 1840 threadID = message.continue_thread.thread; 1841 handleEvent = message.continue_thread.handle_event; 1842 singleStep = message.continue_thread.single_step; 1843 1844 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CONTINUE_THREAD" 1845 ": thread: %" B_PRId32 ", handle event: %" B_PRIu32 ", " 1846 "single step: %d\n", nubThread->id, threadID, handleEvent, 1847 singleStep)); 1848 1849 // find the thread and get its debug port 1850 port_id threadDebugPort = -1; 1851 status_t result = debug_nub_thread_get_thread_debug_port( 1852 nubThread, threadID, threadDebugPort); 1853 1854 // send a message to the debugged thread 1855 if (result == B_OK) { 1856 debugged_thread_continue commandMessage; 1857 commandMessage.handle_event = handleEvent; 1858 commandMessage.single_step = singleStep; 1859 1860 result = write_port(threadDebugPort, 1861 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1862 &commandMessage, sizeof(commandMessage)); 1863 } else if (result == B_BAD_THREAD_STATE) { 1864 Thread* thread = Thread::GetAndLock(threadID); 1865 if (thread == NULL) 1866 break; 1867 1868 BReference<Thread> threadReference(thread, true); 1869 ThreadLocker threadLocker(thread, true); 1870 if (thread->state == B_THREAD_SUSPENDED) { 1871 threadLocker.Unlock(); 1872 resume_thread(threadID); 1873 break; 1874 } 1875 } 1876 1877 break; 1878 } 1879 1880 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1881 { 1882 // get the parameters 1883 thread_id threadID = message.set_cpu_state.thread; 1884 const debug_cpu_state &cpuState 1885 = message.set_cpu_state.cpu_state; 1886 1887 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_CPU_STATE" 1888 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1889 1890 // find the thread and get its debug port 1891 port_id threadDebugPort = -1; 1892 status_t result = debug_nub_thread_get_thread_debug_port( 1893 nubThread, threadID, threadDebugPort); 1894 1895 // send a message to the debugged thread 1896 if (result == B_OK) { 1897 debugged_thread_set_cpu_state commandMessage; 1898 memcpy(&commandMessage.cpu_state, &cpuState, 1899 sizeof(debug_cpu_state)); 1900 write_port(threadDebugPort, 1901 B_DEBUGGED_THREAD_SET_CPU_STATE, 1902 &commandMessage, sizeof(commandMessage)); 1903 } 1904 1905 break; 1906 } 1907 1908 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1909 { 1910 // get the parameters 1911 thread_id threadID = message.get_cpu_state.thread; 1912 replyPort = message.get_cpu_state.reply_port; 1913 1914 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_CPU_STATE" 1915 ": thread: %" B_PRId32 "\n", nubThread->id, threadID)); 1916 1917 // find the thread and get its debug port 1918 port_id threadDebugPort = -1; 1919 status_t result = debug_nub_thread_get_thread_debug_port( 1920 nubThread, threadID, threadDebugPort); 1921 1922 // send a message to the debugged thread 1923 if (threadDebugPort >= 0) { 1924 debugged_thread_get_cpu_state commandMessage; 1925 commandMessage.reply_port = replyPort; 1926 result = write_port(threadDebugPort, 1927 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1928 sizeof(commandMessage)); 1929 } 1930 1931 // send a reply to the debugger in case of error 1932 if (result != B_OK) { 1933 reply.get_cpu_state.error = result; 1934 sendReply = true; 1935 replySize = sizeof(reply.get_cpu_state); 1936 } 1937 1938 break; 1939 } 1940 1941 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1942 { 1943 // get the parameters 1944 replyPort = message.set_breakpoint.reply_port; 1945 void *address = message.set_breakpoint.address; 1946 1947 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_BREAKPOINT" 1948 ": address: %p\n", nubThread->id, address)); 1949 1950 // check the address 1951 status_t result = B_OK; 1952 if (address == NULL 1953 || !BreakpointManager::CanAccessAddress(address, false)) { 1954 result = B_BAD_ADDRESS; 1955 } 1956 1957 // set the breakpoint 1958 if (result == B_OK) 1959 result = breakpointManager->InstallBreakpoint(address); 1960 1961 if (result == B_OK) 1962 update_threads_breakpoints_flag(); 1963 1964 // prepare the reply 1965 reply.set_breakpoint.error = result; 1966 replySize = sizeof(reply.set_breakpoint); 1967 sendReply = true; 1968 1969 break; 1970 } 1971 1972 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1973 { 1974 // get the parameters 1975 void *address = message.clear_breakpoint.address; 1976 1977 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT" 1978 ": address: %p\n", nubThread->id, address)); 1979 1980 // check the address 1981 status_t result = B_OK; 1982 if (address == NULL 1983 || !BreakpointManager::CanAccessAddress(address, false)) { 1984 result = B_BAD_ADDRESS; 1985 } 1986 1987 // clear the breakpoint 1988 if (result == B_OK) 1989 result = breakpointManager->UninstallBreakpoint(address); 1990 1991 if (result == B_OK) 1992 update_threads_breakpoints_flag(); 1993 1994 break; 1995 } 1996 1997 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 1998 { 1999 // get the parameters 2000 replyPort = message.set_watchpoint.reply_port; 2001 void *address = message.set_watchpoint.address; 2002 uint32 type = message.set_watchpoint.type; 2003 int32 length = message.set_watchpoint.length; 2004 2005 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_WATCHPOINT" 2006 ": address: %p, type: %" B_PRIu32 ", length: %" B_PRId32 "\n", 2007 nubThread->id, address, type, length)); 2008 2009 // check the address and size 2010 status_t result = B_OK; 2011 if (address == NULL 2012 || !BreakpointManager::CanAccessAddress(address, false)) { 2013 result = B_BAD_ADDRESS; 2014 } 2015 if (length < 0) 2016 result = B_BAD_VALUE; 2017 2018 // set the watchpoint 2019 if (result == B_OK) { 2020 result = breakpointManager->InstallWatchpoint(address, type, 2021 length); 2022 } 2023 2024 if (result == B_OK) 2025 update_threads_breakpoints_flag(); 2026 2027 // prepare the reply 2028 reply.set_watchpoint.error = result; 2029 replySize = sizeof(reply.set_watchpoint); 2030 sendReply = true; 2031 2032 break; 2033 } 2034 2035 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 2036 { 2037 // get the parameters 2038 void *address = message.clear_watchpoint.address; 2039 2040 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT" 2041 ": address: %p\n", nubThread->id, address)); 2042 2043 // check the address 2044 status_t result = B_OK; 2045 if (address == NULL 2046 || !BreakpointManager::CanAccessAddress(address, false)) { 2047 result = B_BAD_ADDRESS; 2048 } 2049 2050 // clear the watchpoint 2051 if (result == B_OK) 2052 result = breakpointManager->UninstallWatchpoint(address); 2053 2054 if (result == B_OK) 2055 update_threads_breakpoints_flag(); 2056 2057 break; 2058 } 2059 2060 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2061 { 2062 // get the parameters 2063 thread_id threadID = message.set_signal_masks.thread; 2064 uint64 ignore = message.set_signal_masks.ignore_mask; 2065 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2066 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2067 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2068 2069 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS" 2070 ": thread: %" B_PRId32 ", ignore: %" B_PRIx64 " (op: %" 2071 B_PRIu32 "), ignore once: %" B_PRIx64 " (op: %" B_PRIu32 2072 ")\n", nubThread->id, threadID, ignore, ignoreOp, 2073 ignoreOnce, ignoreOnceOp)); 2074 2075 // set the masks 2076 Thread* thread = Thread::GetAndLock(threadID); 2077 if (thread == NULL) 2078 break; 2079 BReference<Thread> threadReference(thread, true); 2080 ThreadLocker threadLocker(thread, true); 2081 2082 InterruptsSpinLocker threadDebugInfoLocker( 2083 thread->debug_info.lock); 2084 2085 if (thread->team == thread_get_current_thread()->team) { 2086 thread_debug_info &threadDebugInfo = thread->debug_info; 2087 // set ignore mask 2088 switch (ignoreOp) { 2089 case B_DEBUG_SIGNAL_MASK_AND: 2090 threadDebugInfo.ignore_signals &= ignore; 2091 break; 2092 case B_DEBUG_SIGNAL_MASK_OR: 2093 threadDebugInfo.ignore_signals |= ignore; 2094 break; 2095 case B_DEBUG_SIGNAL_MASK_SET: 2096 threadDebugInfo.ignore_signals = ignore; 2097 break; 2098 } 2099 2100 // set ignore once mask 2101 switch (ignoreOnceOp) { 2102 case B_DEBUG_SIGNAL_MASK_AND: 2103 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2104 break; 2105 case B_DEBUG_SIGNAL_MASK_OR: 2106 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2107 break; 2108 case B_DEBUG_SIGNAL_MASK_SET: 2109 threadDebugInfo.ignore_signals_once = ignoreOnce; 2110 break; 2111 } 2112 } 2113 2114 break; 2115 } 2116 2117 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2118 { 2119 // get the parameters 2120 replyPort = message.get_signal_masks.reply_port; 2121 thread_id threadID = message.get_signal_masks.thread; 2122 status_t result = B_OK; 2123 2124 // get the masks 2125 uint64 ignore = 0; 2126 uint64 ignoreOnce = 0; 2127 2128 Thread* thread = Thread::GetAndLock(threadID); 2129 if (thread != NULL) { 2130 BReference<Thread> threadReference(thread, true); 2131 ThreadLocker threadLocker(thread, true); 2132 2133 InterruptsSpinLocker threadDebugInfoLocker( 2134 thread->debug_info.lock); 2135 2136 ignore = thread->debug_info.ignore_signals; 2137 ignoreOnce = thread->debug_info.ignore_signals_once; 2138 } else 2139 result = B_BAD_THREAD_ID; 2140 2141 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS" 2142 ": reply port: %" B_PRId32 ", thread: %" B_PRId32 ", " 2143 "ignore: %" B_PRIx64 ", ignore once: %" B_PRIx64 ", result: " 2144 "%" B_PRIx32 "\n", nubThread->id, replyPort, threadID, 2145 ignore, ignoreOnce, result)); 2146 2147 // prepare the message 2148 reply.get_signal_masks.error = result; 2149 reply.get_signal_masks.ignore_mask = ignore; 2150 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2151 replySize = sizeof(reply.get_signal_masks); 2152 sendReply = true; 2153 break; 2154 } 2155 2156 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2157 { 2158 // get the parameters 2159 int signal = message.set_signal_handler.signal; 2160 struct sigaction &handler = message.set_signal_handler.handler; 2161 2162 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER" 2163 ": signal: %d, handler: %p\n", nubThread->id, signal, 2164 handler.sa_handler)); 2165 2166 // set the handler 2167 sigaction(signal, &handler, NULL); 2168 2169 break; 2170 } 2171 2172 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2173 { 2174 // get the parameters 2175 replyPort = message.get_signal_handler.reply_port; 2176 int signal = message.get_signal_handler.signal; 2177 status_t result = B_OK; 2178 2179 // get the handler 2180 if (sigaction(signal, NULL, &reply.get_signal_handler.handler) 2181 != 0) { 2182 result = errno; 2183 } 2184 2185 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER" 2186 ": reply port: %" B_PRId32 ", signal: %d, handler: %p\n", 2187 nubThread->id, replyPort, signal, 2188 reply.get_signal_handler.handler.sa_handler)); 2189 2190 // prepare the message 2191 reply.get_signal_handler.error = result; 2192 replySize = sizeof(reply.get_signal_handler); 2193 sendReply = true; 2194 break; 2195 } 2196 2197 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2198 { 2199 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_MESSAGE_PREPARE_HANDOVER" 2200 "\n", nubThread->id)); 2201 2202 Team *team = nubThread->team; 2203 2204 // Acquire the debugger write lock. As soon as we have it and 2205 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2206 // will write anything to the debugger port anymore. 2207 status_t result = acquire_sem_etc(writeLock, 1, 2208 B_KILL_CAN_INTERRUPT, 0); 2209 if (result == B_OK) { 2210 // set the respective team debug flag 2211 cpu_status state = disable_interrupts(); 2212 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2213 2214 atomic_or(&team->debug_info.flags, 2215 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2216 BreakpointManager* breakpointManager 2217 = team->debug_info.breakpoint_manager; 2218 2219 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2220 restore_interrupts(state); 2221 2222 // remove all installed breakpoints 2223 breakpointManager->RemoveAllBreakpoints(); 2224 2225 release_sem(writeLock); 2226 } else { 2227 // We probably got a SIGKILL. If so, we will terminate when 2228 // reading the next message fails. 2229 } 2230 2231 break; 2232 } 2233 2234 case B_DEBUG_MESSAGE_HANDED_OVER: 2235 { 2236 // notify all threads that the debugger has changed 2237 broadcast_debugged_thread_message(nubThread, 2238 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2239 2240 break; 2241 } 2242 2243 case B_DEBUG_START_PROFILER: 2244 { 2245 // get the parameters 2246 thread_id threadID = message.start_profiler.thread; 2247 replyPort = message.start_profiler.reply_port; 2248 area_id sampleArea = message.start_profiler.sample_area; 2249 int32 stackDepth = message.start_profiler.stack_depth; 2250 bool variableStackDepth 2251 = message.start_profiler.variable_stack_depth; 2252 bigtime_t interval = max_c(message.start_profiler.interval, 2253 B_DEBUG_MIN_PROFILE_INTERVAL); 2254 status_t result = B_OK; 2255 2256 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_START_PROFILER: " 2257 "thread: %" B_PRId32 ", sample area: %" B_PRId32 "\n", 2258 nubThread->id, threadID, sampleArea)); 2259 2260 if (stackDepth < 1) 2261 stackDepth = 1; 2262 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2263 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2264 2265 // provision for an extra entry per hit (for the number of 2266 // samples), if variable stack depth 2267 if (variableStackDepth) 2268 stackDepth++; 2269 2270 // clone the sample area 2271 area_info areaInfo; 2272 if (result == B_OK) 2273 result = get_area_info(sampleArea, &areaInfo); 2274 2275 area_id clonedSampleArea = -1; 2276 void* samples = NULL; 2277 if (result == B_OK) { 2278 clonedSampleArea = clone_area("profiling samples", &samples, 2279 B_ANY_KERNEL_ADDRESS, 2280 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 2281 sampleArea); 2282 if (clonedSampleArea >= 0) { 2283 // we need the memory locked 2284 result = lock_memory(samples, areaInfo.size, 2285 B_READ_DEVICE); 2286 if (result != B_OK) { 2287 delete_area(clonedSampleArea); 2288 clonedSampleArea = -1; 2289 } 2290 } else 2291 result = clonedSampleArea; 2292 } 2293 2294 // get the thread and set the profile info 2295 int32 imageEvent = nubThread->team->debug_info.image_event; 2296 if (result == B_OK) { 2297 Thread* thread = Thread::GetAndLock(threadID); 2298 BReference<Thread> threadReference(thread, true); 2299 ThreadLocker threadLocker(thread, true); 2300 2301 if (thread != NULL && thread->team == nubThread->team) { 2302 thread_debug_info &threadDebugInfo = thread->debug_info; 2303 2304 InterruptsSpinLocker threadDebugInfoLocker( 2305 threadDebugInfo.lock); 2306 2307 if (threadDebugInfo.profile.samples == NULL) { 2308 threadDebugInfo.profile.interval = interval; 2309 threadDebugInfo.profile.sample_area 2310 = clonedSampleArea; 2311 threadDebugInfo.profile.samples = (addr_t*)samples; 2312 threadDebugInfo.profile.max_samples 2313 = areaInfo.size / sizeof(addr_t); 2314 threadDebugInfo.profile.flush_threshold 2315 = threadDebugInfo.profile.max_samples 2316 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2317 / 100; 2318 threadDebugInfo.profile.sample_count = 0; 2319 threadDebugInfo.profile.dropped_ticks = 0; 2320 threadDebugInfo.profile.stack_depth = stackDepth; 2321 threadDebugInfo.profile.variable_stack_depth 2322 = variableStackDepth; 2323 threadDebugInfo.profile.buffer_full = false; 2324 threadDebugInfo.profile.interval_left = interval; 2325 threadDebugInfo.profile.installed_timer = NULL; 2326 threadDebugInfo.profile.image_event = imageEvent; 2327 threadDebugInfo.profile.last_image_event 2328 = imageEvent; 2329 } else 2330 result = B_BAD_VALUE; 2331 } else 2332 result = B_BAD_THREAD_ID; 2333 } 2334 2335 // on error unlock and delete the sample area 2336 if (result != B_OK) { 2337 if (clonedSampleArea >= 0) { 2338 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2339 delete_area(clonedSampleArea); 2340 } 2341 } 2342 2343 // send a reply to the debugger 2344 reply.start_profiler.error = result; 2345 reply.start_profiler.interval = interval; 2346 reply.start_profiler.image_event = imageEvent; 2347 sendReply = true; 2348 replySize = sizeof(reply.start_profiler); 2349 2350 break; 2351 } 2352 2353 case B_DEBUG_STOP_PROFILER: 2354 { 2355 // get the parameters 2356 thread_id threadID = message.stop_profiler.thread; 2357 replyPort = message.stop_profiler.reply_port; 2358 status_t result = B_OK; 2359 2360 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_STOP_PROFILER: " 2361 "thread: %" B_PRId32 "\n", nubThread->id, threadID)); 2362 2363 area_id sampleArea = -1; 2364 addr_t* samples = NULL; 2365 int32 sampleCount = 0; 2366 int32 stackDepth = 0; 2367 bool variableStackDepth = false; 2368 int32 imageEvent = 0; 2369 int32 droppedTicks = 0; 2370 2371 // get the thread and detach the profile info 2372 Thread* thread = Thread::GetAndLock(threadID); 2373 BReference<Thread> threadReference(thread, true); 2374 ThreadLocker threadLocker(thread, true); 2375 2376 if (thread && thread->team == nubThread->team) { 2377 thread_debug_info &threadDebugInfo = thread->debug_info; 2378 2379 InterruptsSpinLocker threadDebugInfoLocker( 2380 threadDebugInfo.lock); 2381 2382 if (threadDebugInfo.profile.samples != NULL) { 2383 sampleArea = threadDebugInfo.profile.sample_area; 2384 samples = threadDebugInfo.profile.samples; 2385 sampleCount = threadDebugInfo.profile.sample_count; 2386 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2387 stackDepth = threadDebugInfo.profile.stack_depth; 2388 variableStackDepth 2389 = threadDebugInfo.profile.variable_stack_depth; 2390 imageEvent = threadDebugInfo.profile.image_event; 2391 threadDebugInfo.profile.sample_area = -1; 2392 threadDebugInfo.profile.samples = NULL; 2393 threadDebugInfo.profile.buffer_full = false; 2394 threadDebugInfo.profile.dropped_ticks = 0; 2395 } else 2396 result = B_BAD_VALUE; 2397 } else 2398 result = B_BAD_THREAD_ID; 2399 2400 threadLocker.Unlock(); 2401 2402 // prepare the reply 2403 if (result == B_OK) { 2404 reply.profiler_update.origin.thread = threadID; 2405 reply.profiler_update.image_event = imageEvent; 2406 reply.profiler_update.stack_depth = stackDepth; 2407 reply.profiler_update.variable_stack_depth 2408 = variableStackDepth; 2409 reply.profiler_update.sample_count = sampleCount; 2410 reply.profiler_update.dropped_ticks = droppedTicks; 2411 reply.profiler_update.stopped = true; 2412 } else 2413 reply.profiler_update.origin.thread = result; 2414 2415 replySize = sizeof(debug_profiler_update); 2416 sendReply = true; 2417 2418 if (sampleArea >= 0) { 2419 area_info areaInfo; 2420 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2421 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2422 delete_area(sampleArea); 2423 } 2424 } 2425 2426 break; 2427 } 2428 2429 case B_DEBUG_WRITE_CORE_FILE: 2430 { 2431 // get the parameters 2432 replyPort = message.write_core_file.reply_port; 2433 char* path = message.write_core_file.path; 2434 path[sizeof(message.write_core_file.path) - 1] = '\0'; 2435 2436 TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE" 2437 ": path: %s\n", nubThread->id, path)); 2438 2439 // write the core file 2440 status_t result = core_dump_write_core_file(path, false); 2441 2442 // prepare the reply 2443 reply.write_core_file.error = result; 2444 replySize = sizeof(reply.write_core_file); 2445 sendReply = true; 2446 2447 break; 2448 } 2449 } 2450 2451 // send the reply, if necessary 2452 if (sendReply) { 2453 status_t error = kill_interruptable_write_port(replyPort, command, 2454 &reply, replySize); 2455 2456 if (error != B_OK) { 2457 // The debugger port is either not longer existing or we got 2458 // interrupted by a kill signal. In either case we terminate. 2459 TRACE(("nub thread %" B_PRId32 ": failed to send reply to port " 2460 "%" B_PRId32 ": %s\n", nubThread->id, replyPort, 2461 strerror(error))); 2462 2463 nub_thread_cleanup(nubThread); 2464 return error; 2465 } 2466 } 2467 } 2468 } 2469 2470 2471 /** \brief Helper function for install_team_debugger(), that sets up the team 2472 and thread debug infos. 2473 2474 The caller must hold the team's lock as well as the team debug info lock. 2475 2476 The function also clears the arch specific team and thread debug infos 2477 (including among other things formerly set break/watchpoints). 2478 */ 2479 static void 2480 install_team_debugger_init_debug_infos(Team *team, team_id debuggerTeam, 2481 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2482 sem_id debuggerPortWriteLock, thread_id causingThread) 2483 { 2484 atomic_set(&team->debug_info.flags, 2485 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2486 team->debug_info.nub_port = nubPort; 2487 team->debug_info.nub_thread = nubThread; 2488 team->debug_info.debugger_team = debuggerTeam; 2489 team->debug_info.debugger_port = debuggerPort; 2490 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2491 team->debug_info.causing_thread = causingThread; 2492 2493 arch_clear_team_debug_info(&team->debug_info.arch_info); 2494 2495 // set the user debug flags and signal masks of all threads to the default 2496 for (Thread *thread = team->thread_list; thread; 2497 thread = thread->team_next) { 2498 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2499 2500 if (thread->id == nubThread) { 2501 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2502 } else { 2503 int32 flags = thread->debug_info.flags 2504 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2505 atomic_set(&thread->debug_info.flags, 2506 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2507 thread->debug_info.ignore_signals = 0; 2508 thread->debug_info.ignore_signals_once = 0; 2509 2510 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2511 } 2512 } 2513 2514 // update the thread::flags fields 2515 update_threads_debugger_installed_flag(team); 2516 } 2517 2518 2519 static port_id 2520 install_team_debugger(team_id teamID, port_id debuggerPort, 2521 thread_id causingThread, bool useDefault, bool dontReplace) 2522 { 2523 TRACE(("install_team_debugger(team: %" B_PRId32 ", port: %" B_PRId32 ", " 2524 "default: %d, dontReplace: %d)\n", teamID, debuggerPort, useDefault, 2525 dontReplace)); 2526 2527 if (useDefault) 2528 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2529 2530 // get the debugger team 2531 port_info debuggerPortInfo; 2532 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2533 if (error != B_OK) { 2534 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2535 "%" B_PRIx32 "\n", error)); 2536 return error; 2537 } 2538 team_id debuggerTeam = debuggerPortInfo.team; 2539 2540 // Check the debugger team: It must neither be the kernel team nor the 2541 // debugged team. 2542 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2543 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2544 "debugger: %" B_PRId32 ", debugged: %" B_PRId32 "\n", debuggerTeam, 2545 teamID)); 2546 return B_NOT_ALLOWED; 2547 } 2548 2549 // get the team 2550 Team* team; 2551 ConditionVariable debugChangeCondition; 2552 debugChangeCondition.Init(NULL, "debug change condition"); 2553 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2554 if (error != B_OK) 2555 return error; 2556 2557 // get the real team ID 2558 teamID = team->id; 2559 2560 // check, if a debugger is already installed 2561 2562 bool done = false; 2563 port_id result = B_ERROR; 2564 bool handOver = false; 2565 port_id oldDebuggerPort = -1; 2566 port_id nubPort = -1; 2567 2568 TeamLocker teamLocker(team); 2569 cpu_status state = disable_interrupts(); 2570 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2571 2572 int32 teamDebugFlags = team->debug_info.flags; 2573 2574 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2575 // There's already a debugger installed. 2576 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2577 if (dontReplace) { 2578 // We're fine with already having a debugger. 2579 error = B_OK; 2580 done = true; 2581 result = team->debug_info.nub_port; 2582 } else { 2583 // a handover to another debugger is requested 2584 // Set the handing-over flag -- we'll clear both flags after 2585 // having sent the handed-over message to the new debugger. 2586 atomic_or(&team->debug_info.flags, 2587 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2588 2589 oldDebuggerPort = team->debug_info.debugger_port; 2590 result = nubPort = team->debug_info.nub_port; 2591 if (causingThread < 0) 2592 causingThread = team->debug_info.causing_thread; 2593 2594 // set the new debugger 2595 install_team_debugger_init_debug_infos(team, debuggerTeam, 2596 debuggerPort, nubPort, team->debug_info.nub_thread, 2597 team->debug_info.debugger_write_lock, causingThread); 2598 2599 handOver = true; 2600 done = true; 2601 } 2602 } else { 2603 // there's already a debugger installed 2604 error = (dontReplace ? B_OK : B_BAD_VALUE); 2605 done = true; 2606 result = team->debug_info.nub_port; 2607 } 2608 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2609 && useDefault) { 2610 // No debugger yet, disable_debugger() had been invoked, and we 2611 // would install the default debugger. Just fail. 2612 error = B_BAD_VALUE; 2613 } 2614 2615 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2616 restore_interrupts(state); 2617 teamLocker.Unlock(); 2618 2619 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2620 // The old debugger must just have died. Just proceed as 2621 // if there was no debugger installed. We may still be too 2622 // early, in which case we'll fail, but this race condition 2623 // should be unbelievably rare and relatively harmless. 2624 handOver = false; 2625 done = false; 2626 } 2627 2628 if (handOver) { 2629 // prepare the handed-over message 2630 debug_handed_over notification; 2631 notification.origin.thread = -1; 2632 notification.origin.team = teamID; 2633 notification.origin.nub_port = nubPort; 2634 notification.debugger = debuggerTeam; 2635 notification.debugger_port = debuggerPort; 2636 notification.causing_thread = causingThread; 2637 2638 // notify the new debugger 2639 error = write_port_etc(debuggerPort, 2640 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2641 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2642 if (error != B_OK) { 2643 dprintf("install_team_debugger(): Failed to send message to new " 2644 "debugger: %s\n", strerror(error)); 2645 } 2646 2647 // clear the handed-over and handing-over flags 2648 state = disable_interrupts(); 2649 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2650 2651 atomic_and(&team->debug_info.flags, 2652 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2653 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2654 2655 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2656 restore_interrupts(state); 2657 2658 finish_debugger_change(team); 2659 2660 // notify the nub thread 2661 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2662 NULL, 0); 2663 2664 // notify the old debugger 2665 error = write_port_etc(oldDebuggerPort, 2666 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2667 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2668 if (error != B_OK) { 2669 TRACE(("install_team_debugger(): Failed to send message to old " 2670 "debugger: %s\n", strerror(error))); 2671 } 2672 2673 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2674 "%" B_PRId32 ", port: %" B_PRId32 "\n", debuggerTeam, 2675 debuggerPort)); 2676 2677 return result; 2678 } 2679 2680 if (done || error != B_OK) { 2681 TRACE(("install_team_debugger() done1: %" B_PRId32 "\n", 2682 (error == B_OK ? result : error))); 2683 finish_debugger_change(team); 2684 return (error == B_OK ? result : error); 2685 } 2686 2687 // create the debugger write lock semaphore 2688 char nameBuffer[B_OS_NAME_LENGTH]; 2689 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debugger port " 2690 "write", teamID); 2691 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2692 if (debuggerWriteLock < 0) 2693 error = debuggerWriteLock; 2694 2695 // create the nub port 2696 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug", teamID); 2697 if (error == B_OK) { 2698 nubPort = create_port(1, nameBuffer); 2699 if (nubPort < 0) 2700 error = nubPort; 2701 else 2702 result = nubPort; 2703 } 2704 2705 // make the debugger team the port owner; thus we know, if the debugger is 2706 // gone and can cleanup 2707 if (error == B_OK) 2708 error = set_port_owner(nubPort, debuggerTeam); 2709 2710 // create the breakpoint manager 2711 BreakpointManager* breakpointManager = NULL; 2712 if (error == B_OK) { 2713 breakpointManager = new(std::nothrow) BreakpointManager; 2714 if (breakpointManager != NULL) 2715 error = breakpointManager->Init(); 2716 else 2717 error = B_NO_MEMORY; 2718 } 2719 2720 // spawn the nub thread 2721 thread_id nubThread = -1; 2722 if (error == B_OK) { 2723 snprintf(nameBuffer, sizeof(nameBuffer), "team %" B_PRId32 " debug task", 2724 teamID); 2725 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2726 B_NORMAL_PRIORITY, NULL, teamID); 2727 if (nubThread < 0) 2728 error = nubThread; 2729 } 2730 2731 // now adjust the debug info accordingly 2732 if (error == B_OK) { 2733 TeamLocker teamLocker(team); 2734 state = disable_interrupts(); 2735 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2736 2737 team->debug_info.breakpoint_manager = breakpointManager; 2738 install_team_debugger_init_debug_infos(team, debuggerTeam, 2739 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2740 causingThread); 2741 2742 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2743 restore_interrupts(state); 2744 } 2745 2746 finish_debugger_change(team); 2747 2748 // if everything went fine, resume the nub thread, otherwise clean up 2749 if (error == B_OK) { 2750 resume_thread(nubThread); 2751 } else { 2752 // delete port and terminate thread 2753 if (nubPort >= 0) { 2754 set_port_owner(nubPort, B_CURRENT_TEAM); 2755 delete_port(nubPort); 2756 } 2757 if (nubThread >= 0) { 2758 int32 result; 2759 wait_for_thread(nubThread, &result); 2760 } 2761 2762 delete breakpointManager; 2763 } 2764 2765 TRACE(("install_team_debugger() done2: %" B_PRId32 "\n", 2766 (error == B_OK ? result : error))); 2767 return (error == B_OK ? result : error); 2768 } 2769 2770 2771 static status_t 2772 ensure_debugger_installed() 2773 { 2774 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2775 thread_get_current_thread_id(), true, true); 2776 return port >= 0 ? B_OK : port; 2777 } 2778 2779 2780 // #pragma mark - 2781 2782 2783 void 2784 _user_debugger(const char *userMessage) 2785 { 2786 // install the default debugger, if there is none yet 2787 status_t error = ensure_debugger_installed(); 2788 if (error != B_OK) { 2789 // time to commit suicide 2790 char buffer[128]; 2791 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2792 if (length >= 0) { 2793 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2794 "`%s'\n", buffer); 2795 } else { 2796 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2797 "%p (%s)\n", userMessage, strerror(length)); 2798 } 2799 _user_exit_team(1); 2800 } 2801 2802 // prepare the message 2803 debug_debugger_call message; 2804 message.message = (void*)userMessage; 2805 2806 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2807 sizeof(message), true); 2808 } 2809 2810 2811 int 2812 _user_disable_debugger(int state) 2813 { 2814 Team *team = thread_get_current_thread()->team; 2815 2816 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32 "\n", state, 2817 team->id)); 2818 2819 cpu_status cpuState = disable_interrupts(); 2820 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2821 2822 int32 oldFlags; 2823 if (state) { 2824 oldFlags = atomic_or(&team->debug_info.flags, 2825 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2826 } else { 2827 oldFlags = atomic_and(&team->debug_info.flags, 2828 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2829 } 2830 2831 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2832 restore_interrupts(cpuState); 2833 2834 // TODO: Check, if the return value is really the old state. 2835 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2836 } 2837 2838 2839 status_t 2840 _user_install_default_debugger(port_id debuggerPort) 2841 { 2842 // Do not allow non-root processes to install a default debugger. 2843 if (geteuid() != 0) 2844 return B_PERMISSION_DENIED; 2845 2846 // if supplied, check whether the port is a valid port 2847 if (debuggerPort >= 0) { 2848 port_info portInfo; 2849 status_t error = get_port_info(debuggerPort, &portInfo); 2850 if (error != B_OK) 2851 return error; 2852 2853 // the debugger team must not be the kernel team 2854 if (portInfo.team == team_get_kernel_team_id()) 2855 return B_NOT_ALLOWED; 2856 } 2857 2858 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2859 2860 return B_OK; 2861 } 2862 2863 2864 port_id 2865 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2866 { 2867 if (geteuid() != 0 && team_geteuid(teamID) != geteuid()) 2868 return B_PERMISSION_DENIED; 2869 2870 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2871 } 2872 2873 2874 status_t 2875 _user_remove_team_debugger(team_id teamID) 2876 { 2877 Team* team; 2878 ConditionVariable debugChangeCondition; 2879 debugChangeCondition.Init(NULL, "debug change condition"); 2880 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2881 team); 2882 if (error != B_OK) 2883 return error; 2884 2885 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2886 2887 thread_id nubThread = -1; 2888 port_id nubPort = -1; 2889 2890 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2891 // there's a debugger installed 2892 nubThread = team->debug_info.nub_thread; 2893 nubPort = team->debug_info.nub_port; 2894 } else { 2895 // no debugger installed 2896 error = B_BAD_VALUE; 2897 } 2898 2899 debugInfoLocker.Unlock(); 2900 2901 // Delete the nub port -- this will cause the nub thread to terminate and 2902 // remove the debugger. 2903 if (nubPort >= 0) 2904 delete_port(nubPort); 2905 2906 finish_debugger_change(team); 2907 2908 // wait for the nub thread 2909 if (nubThread >= 0) 2910 wait_for_thread(nubThread, NULL); 2911 2912 return error; 2913 } 2914 2915 2916 status_t 2917 _user_debug_thread(thread_id threadID) 2918 { 2919 TRACE(("[%" B_PRId32 "] _user_debug_thread(%" B_PRId32 ")\n", 2920 find_thread(NULL), threadID)); 2921 2922 // get the thread 2923 Thread* thread = Thread::GetAndLock(threadID); 2924 if (thread == NULL) 2925 return B_BAD_THREAD_ID; 2926 BReference<Thread> threadReference(thread, true); 2927 ThreadLocker threadLocker(thread, true); 2928 2929 // we can't debug the kernel team 2930 if (thread->team == team_get_kernel_team()) 2931 return B_NOT_ALLOWED; 2932 2933 InterruptsLocker interruptsLocker; 2934 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2935 2936 // If the thread is already dying, it's too late to debug it. 2937 if ((thread->debug_info.flags & B_THREAD_DEBUG_DYING) != 0) 2938 return B_BAD_THREAD_ID; 2939 2940 // don't debug the nub thread 2941 if ((thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) != 0) 2942 return B_NOT_ALLOWED; 2943 2944 // already marked stopped or being told to stop? 2945 if ((thread->debug_info.flags 2946 & (B_THREAD_DEBUG_STOPPED | B_THREAD_DEBUG_STOP)) != 0) { 2947 return B_OK; 2948 } 2949 2950 // set the flag that tells the thread to stop as soon as possible 2951 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2952 2953 update_thread_user_debug_flag(thread); 2954 2955 // send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or 2956 // continued) 2957 threadDebugInfoLocker.Unlock(); 2958 ReadSpinLocker teamLocker(thread->team_lock); 2959 SpinLocker locker(thread->team->signal_lock); 2960 2961 send_signal_to_thread_locked(thread, SIGNAL_DEBUG_THREAD, NULL, 0); 2962 2963 return B_OK; 2964 } 2965 2966 2967 void 2968 _user_wait_for_debugger(void) 2969 { 2970 debug_thread_debugged message; 2971 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2972 sizeof(message), false); 2973 } 2974 2975 2976 status_t 2977 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2978 bool watchpoint) 2979 { 2980 // check the address and size 2981 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2982 return B_BAD_ADDRESS; 2983 if (watchpoint && length < 0) 2984 return B_BAD_VALUE; 2985 2986 // check whether a debugger is installed already 2987 team_debug_info teamDebugInfo; 2988 get_team_debug_info(teamDebugInfo); 2989 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2990 return B_BAD_VALUE; 2991 2992 // We can't help it, here's a small but relatively harmless race condition, 2993 // since a debugger could be installed in the meantime. The worst case is 2994 // that we install a break/watchpoint the debugger doesn't know about. 2995 2996 // set the break/watchpoint 2997 status_t result; 2998 if (watchpoint) 2999 result = arch_set_watchpoint(address, type, length); 3000 else 3001 result = arch_set_breakpoint(address); 3002 3003 if (result == B_OK) 3004 update_threads_breakpoints_flag(); 3005 3006 return result; 3007 } 3008 3009 3010 status_t 3011 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 3012 { 3013 // check the address 3014 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 3015 return B_BAD_ADDRESS; 3016 3017 // check whether a debugger is installed already 3018 team_debug_info teamDebugInfo; 3019 get_team_debug_info(teamDebugInfo); 3020 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 3021 return B_BAD_VALUE; 3022 3023 // We can't help it, here's a small but relatively harmless race condition, 3024 // since a debugger could be installed in the meantime. The worst case is 3025 // that we clear a break/watchpoint the debugger has just installed. 3026 3027 // clear the break/watchpoint 3028 status_t result; 3029 if (watchpoint) 3030 result = arch_clear_watchpoint(address); 3031 else 3032 result = arch_clear_breakpoint(address); 3033 3034 if (result == B_OK) 3035 update_threads_breakpoints_flag(); 3036 3037 return result; 3038 } 3039