1 /* 2 * Copyright 2005-2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <errno.h> 8 #include <signal.h> 9 #include <stdlib.h> 10 #include <stdio.h> 11 #include <string.h> 12 13 #include <algorithm> 14 15 #include <arch/debug.h> 16 #include <arch/user_debugger.h> 17 #include <cpu.h> 18 #include <debugger.h> 19 #include <kernel.h> 20 #include <KernelExport.h> 21 #include <kscheduler.h> 22 #include <ksignal.h> 23 #include <ksyscalls.h> 24 #include <port.h> 25 #include <sem.h> 26 #include <team.h> 27 #include <thread.h> 28 #include <thread_types.h> 29 #include <user_debugger.h> 30 #include <vm/vm.h> 31 #include <vm/vm_types.h> 32 33 #include <AutoDeleter.h> 34 #include <util/AutoLock.h> 35 36 #include "BreakpointManager.h" 37 38 39 //#define TRACE_USER_DEBUGGER 40 #ifdef TRACE_USER_DEBUGGER 41 # define TRACE(x) dprintf x 42 #else 43 # define TRACE(x) ; 44 #endif 45 46 47 // TODO: Since the introduction of team_debug_info::debugger_changed_condition 48 // there's some potential for simplifications. E.g. clear_team_debug_info() and 49 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus 50 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()). 51 52 53 static port_id sDefaultDebuggerPort = -1; 54 // accessed atomically 55 56 static timer sProfilingTimers[B_MAX_CPU_COUNT]; 57 // a profiling timer for each CPU -- used when a profiled thread is running 58 // on that CPU 59 60 61 static void schedule_profiling_timer(struct thread* thread, 62 bigtime_t interval); 63 static int32 profiling_event(timer* unused); 64 static status_t ensure_debugger_installed(); 65 static void get_team_debug_info(team_debug_info &teamDebugInfo); 66 67 68 static status_t 69 kill_interruptable_write_port(port_id port, int32 code, const void *buffer, 70 size_t bufferSize) 71 { 72 return write_port_etc(port, code, buffer, bufferSize, 73 B_KILL_CAN_INTERRUPT, 0); 74 } 75 76 77 static status_t 78 debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize, 79 bool dontWait) 80 { 81 TRACE(("debugger_write(): thread: %ld, team %ld, port: %ld, code: %lx, message: %p, " 82 "size: %lu, dontWait: %d\n", thread_get_current_thread()->id, 83 thread_get_current_thread()->team->id, port, code, buffer, bufferSize, 84 dontWait)); 85 86 status_t error = B_OK; 87 88 // get the team debug info 89 team_debug_info teamDebugInfo; 90 get_team_debug_info(teamDebugInfo); 91 sem_id writeLock = teamDebugInfo.debugger_write_lock; 92 93 // get the write lock 94 TRACE(("debugger_write(): acquiring write lock...\n")); 95 error = acquire_sem_etc(writeLock, 1, 96 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 97 if (error != B_OK) { 98 TRACE(("debugger_write() done1: %lx\n", error)); 99 return error; 100 } 101 102 // re-get the team debug info 103 get_team_debug_info(teamDebugInfo); 104 105 if (teamDebugInfo.debugger_port != port 106 || (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_HANDOVER)) { 107 // The debugger has changed in the meantime or we are about to be 108 // handed over to a new debugger. In either case we don't send the 109 // message. 110 TRACE(("debugger_write(): %s\n", 111 (teamDebugInfo.debugger_port != port ? "debugger port changed" 112 : "handover flag set"))); 113 } else { 114 TRACE(("debugger_write(): writing to port...\n")); 115 116 error = write_port_etc(port, code, buffer, bufferSize, 117 dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0); 118 } 119 120 // release the write lock 121 release_sem(writeLock); 122 123 TRACE(("debugger_write() done: %lx\n", error)); 124 125 return error; 126 } 127 128 129 /*! Updates the thread::flags field according to what user debugger flags are 130 set for the thread. 131 Interrupts must be disabled and the thread lock must be held. 132 */ 133 static void 134 update_thread_user_debug_flag(struct thread* thread) 135 { 136 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) != 0) 137 atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD); 138 else 139 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD); 140 } 141 142 143 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the 144 given thread. 145 Interrupts must be disabled and the team lock must be held. 146 */ 147 static void 148 update_thread_breakpoints_flag(struct thread* thread) 149 { 150 struct team* team = thread->team; 151 152 if (arch_has_breakpoints(&team->debug_info.arch_info)) 153 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 154 else 155 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 156 } 157 158 159 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all 160 threads of the current team. 161 Interrupts must be disabled and the team lock must be held. 162 */ 163 static void 164 update_threads_breakpoints_flag() 165 { 166 InterruptsSpinLocker _(gTeamSpinlock); 167 168 struct team* team = thread_get_current_thread()->team; 169 struct thread* thread = team->thread_list; 170 171 if (arch_has_breakpoints(&team->debug_info.arch_info)) { 172 for (; thread != NULL; thread = thread->team_next) 173 atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED); 174 } else { 175 for (; thread != NULL; thread = thread->team_next) 176 atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED); 177 } 178 } 179 180 181 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the 182 given thread. 183 Interrupts must be disabled and the team lock must be held. 184 */ 185 static void 186 update_thread_debugger_installed_flag(struct thread* thread) 187 { 188 struct team* team = thread->team; 189 190 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 191 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 192 else 193 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 194 } 195 196 197 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all 198 threads of the given team. 199 Interrupts must be disabled and the team lock must be held. 200 */ 201 static void 202 update_threads_debugger_installed_flag(struct team* team) 203 { 204 struct thread* thread = team->thread_list; 205 206 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 207 for (; thread != NULL; thread = thread->team_next) 208 atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED); 209 } else { 210 for (; thread != NULL; thread = thread->team_next) 211 atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED); 212 } 213 } 214 215 216 /** 217 * For the first initialization the function must be called with \a initLock 218 * set to \c true. If it would be possible that another thread accesses the 219 * structure at the same time, `lock' must be held when calling the function. 220 */ 221 void 222 clear_team_debug_info(struct team_debug_info *info, bool initLock) 223 { 224 if (info) { 225 arch_clear_team_debug_info(&info->arch_info); 226 atomic_set(&info->flags, B_TEAM_DEBUG_DEFAULT_FLAGS); 227 info->debugger_team = -1; 228 info->debugger_port = -1; 229 info->nub_thread = -1; 230 info->nub_port = -1; 231 info->debugger_write_lock = -1; 232 info->causing_thread = -1; 233 info->image_event = 0; 234 info->breakpoint_manager = NULL; 235 236 if (initLock) { 237 B_INITIALIZE_SPINLOCK(&info->lock); 238 info->debugger_changed_condition = NULL; 239 } 240 } 241 } 242 243 /** 244 * `lock' must not be held nor may interrupts be disabled. 245 * \a info must not be a member of a team struct (or the team struct must no 246 * longer be accessible, i.e. the team should already be removed). 247 * 248 * In case the team is still accessible, the procedure is: 249 * 1. get `lock' 250 * 2. copy the team debug info on stack 251 * 3. call clear_team_debug_info() on the team debug info 252 * 4. release `lock' 253 * 5. call destroy_team_debug_info() on the copied team debug info 254 */ 255 static void 256 destroy_team_debug_info(struct team_debug_info *info) 257 { 258 if (info) { 259 arch_destroy_team_debug_info(&info->arch_info); 260 261 // delete the breakpoint manager 262 delete info->breakpoint_manager ; 263 info->breakpoint_manager = NULL; 264 265 // delete the debugger port write lock 266 if (info->debugger_write_lock >= 0) { 267 delete_sem(info->debugger_write_lock); 268 info->debugger_write_lock = -1; 269 } 270 271 // delete the nub port 272 if (info->nub_port >= 0) { 273 set_port_owner(info->nub_port, B_CURRENT_TEAM); 274 delete_port(info->nub_port); 275 info->nub_port = -1; 276 } 277 278 // wait for the nub thread 279 if (info->nub_thread >= 0) { 280 if (info->nub_thread != thread_get_current_thread()->id) { 281 int32 result; 282 wait_for_thread(info->nub_thread, &result); 283 } 284 285 info->nub_thread = -1; 286 } 287 288 atomic_set(&info->flags, 0); 289 info->debugger_team = -1; 290 info->debugger_port = -1; 291 info->causing_thread = -1; 292 info->image_event = -1; 293 } 294 } 295 296 297 void 298 init_thread_debug_info(struct thread_debug_info *info) 299 { 300 if (info) { 301 arch_clear_thread_debug_info(&info->arch_info); 302 info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS; 303 info->debug_port = -1; 304 info->ignore_signals = 0; 305 info->ignore_signals_once = 0; 306 info->profile.sample_area = -1; 307 info->profile.samples = NULL; 308 info->profile.buffer_full = false; 309 info->profile.installed_timer = NULL; 310 } 311 } 312 313 314 /*! Invoked with thread lock being held. 315 */ 316 void 317 clear_thread_debug_info(struct thread_debug_info *info, bool dying) 318 { 319 if (info) { 320 // cancel profiling timer 321 if (info->profile.installed_timer != NULL) { 322 cancel_timer(info->profile.installed_timer); 323 info->profile.installed_timer = NULL; 324 } 325 326 arch_clear_thread_debug_info(&info->arch_info); 327 atomic_set(&info->flags, 328 B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0)); 329 info->debug_port = -1; 330 info->ignore_signals = 0; 331 info->ignore_signals_once = 0; 332 info->profile.sample_area = -1; 333 info->profile.samples = NULL; 334 info->profile.buffer_full = false; 335 } 336 } 337 338 339 void 340 destroy_thread_debug_info(struct thread_debug_info *info) 341 { 342 if (info) { 343 area_id sampleArea = info->profile.sample_area; 344 if (sampleArea >= 0) { 345 area_info areaInfo; 346 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 347 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 348 delete_area(sampleArea); 349 } 350 } 351 352 arch_destroy_thread_debug_info(&info->arch_info); 353 354 if (info->debug_port >= 0) { 355 delete_port(info->debug_port); 356 info->debug_port = -1; 357 } 358 359 info->ignore_signals = 0; 360 info->ignore_signals_once = 0; 361 362 atomic_set(&info->flags, 0); 363 } 364 } 365 366 367 static status_t 368 prepare_debugger_change(team_id teamID, ConditionVariable& condition, 369 struct team*& team) 370 { 371 // We look up the team by ID, even in case of the current team, so we can be 372 // sure, that the team is not already dying. 373 if (teamID == B_CURRENT_TEAM) 374 teamID = thread_get_current_thread()->team->id; 375 376 while (true) { 377 // get the team 378 InterruptsSpinLocker teamLocker(gTeamSpinlock); 379 380 team = team_get_team_struct_locked(teamID); 381 if (team == NULL || team->death_entry != NULL) 382 return B_BAD_TEAM_ID; 383 384 // don't allow messing with the kernel team 385 if (team == team_get_kernel_team()) 386 return B_NOT_ALLOWED; 387 388 // check whether the condition is already set 389 SpinLocker threadLocker(gThreadSpinlock); 390 SpinLocker debugInfoLocker(team->debug_info.lock); 391 392 if (team->debug_info.debugger_changed_condition == NULL) { 393 // nobody there yet -- set our condition variable and be done 394 team->debug_info.debugger_changed_condition = &condition; 395 return B_OK; 396 } 397 398 // we'll have to wait 399 ConditionVariableEntry entry; 400 team->debug_info.debugger_changed_condition->Add(&entry); 401 402 debugInfoLocker.Unlock(); 403 threadLocker.Unlock(); 404 teamLocker.Unlock(); 405 406 entry.Wait(); 407 } 408 } 409 410 411 static void 412 prepare_debugger_change(struct team* team, ConditionVariable& condition) 413 { 414 while (true) { 415 // check whether the condition is already set 416 InterruptsSpinLocker threadLocker(gThreadSpinlock); 417 SpinLocker debugInfoLocker(team->debug_info.lock); 418 419 if (team->debug_info.debugger_changed_condition == NULL) { 420 // nobody there yet -- set our condition variable and be done 421 team->debug_info.debugger_changed_condition = &condition; 422 return; 423 } 424 425 // we'll have to wait 426 ConditionVariableEntry entry; 427 team->debug_info.debugger_changed_condition->Add(&entry); 428 429 debugInfoLocker.Unlock(); 430 threadLocker.Unlock(); 431 432 entry.Wait(); 433 } 434 } 435 436 437 static void 438 finish_debugger_change(struct team* team) 439 { 440 // unset our condition variable and notify all threads waiting on it 441 InterruptsSpinLocker threadLocker(gThreadSpinlock); 442 SpinLocker debugInfoLocker(team->debug_info.lock); 443 444 ConditionVariable* condition = team->debug_info.debugger_changed_condition; 445 team->debug_info.debugger_changed_condition = NULL; 446 447 condition->NotifyAll(true); 448 } 449 450 451 void 452 user_debug_prepare_for_exec() 453 { 454 struct thread *thread = thread_get_current_thread(); 455 struct team *team = thread->team; 456 457 // If a debugger is installed for the team and the thread debug stuff 458 // initialized, change the ownership of the debug port for the thread 459 // to the kernel team, since exec_team() deletes all ports owned by this 460 // team. We change the ownership back later. 461 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 462 // get the port 463 port_id debugPort = -1; 464 465 cpu_status state = disable_interrupts(); 466 GRAB_THREAD_LOCK(); 467 468 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 469 debugPort = thread->debug_info.debug_port; 470 471 RELEASE_THREAD_LOCK(); 472 restore_interrupts(state); 473 474 // set the new port ownership 475 if (debugPort >= 0) 476 set_port_owner(debugPort, team_get_kernel_team_id()); 477 } 478 } 479 480 481 void 482 user_debug_finish_after_exec() 483 { 484 struct thread *thread = thread_get_current_thread(); 485 struct team *team = thread->team; 486 487 // If a debugger is installed for the team and the thread debug stuff 488 // initialized for this thread, change the ownership of its debug port 489 // back to this team. 490 if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 491 // get the port 492 port_id debugPort = -1; 493 494 cpu_status state = disable_interrupts(); 495 GRAB_THREAD_LOCK(); 496 497 if (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) 498 debugPort = thread->debug_info.debug_port; 499 500 RELEASE_THREAD_LOCK(); 501 restore_interrupts(state); 502 503 // set the new port ownership 504 if (debugPort >= 0) 505 set_port_owner(debugPort, team->id); 506 } 507 } 508 509 510 void 511 init_user_debug() 512 { 513 #ifdef ARCH_INIT_USER_DEBUG 514 ARCH_INIT_USER_DEBUG(); 515 #endif 516 } 517 518 519 static void 520 get_team_debug_info(team_debug_info &teamDebugInfo) 521 { 522 struct thread *thread = thread_get_current_thread(); 523 524 cpu_status state = disable_interrupts(); 525 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 526 527 memcpy(&teamDebugInfo, &thread->team->debug_info, sizeof(team_debug_info)); 528 529 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 530 restore_interrupts(state); 531 } 532 533 534 static status_t 535 thread_hit_debug_event_internal(debug_debugger_message event, 536 const void *message, int32 size, bool requireDebugger, bool &restart) 537 { 538 restart = false; 539 struct thread *thread = thread_get_current_thread(); 540 541 TRACE(("thread_hit_debug_event(): thread: %ld, event: %lu, message: %p, " 542 "size: %ld\n", thread->id, (uint32)event, message, size)); 543 544 // check, if there's a debug port already 545 bool setPort = !(atomic_get(&thread->debug_info.flags) 546 & B_THREAD_DEBUG_INITIALIZED); 547 548 // create a port, if there is none yet 549 port_id port = -1; 550 if (setPort) { 551 char nameBuffer[128]; 552 snprintf(nameBuffer, sizeof(nameBuffer), "nub to thread %ld", 553 thread->id); 554 555 port = create_port(1, nameBuffer); 556 if (port < 0) { 557 dprintf("thread_hit_debug_event(): Failed to create debug port: " 558 "%s\n", strerror(port)); 559 return port; 560 } 561 } 562 563 // check the debug info structures once more: get the debugger port, set 564 // the thread's debug port, and update the thread's debug flags 565 port_id deletePort = port; 566 port_id debuggerPort = -1; 567 port_id nubPort = -1; 568 status_t error = B_OK; 569 cpu_status state = disable_interrupts(); 570 GRAB_THREAD_LOCK(); 571 GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 572 573 uint32 threadFlags = thread->debug_info.flags; 574 threadFlags &= ~B_THREAD_DEBUG_STOP; 575 bool debuggerInstalled 576 = (thread->team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED); 577 if (thread->id == thread->team->debug_info.nub_thread) { 578 // Ugh, we're the nub thread. We shouldn't be here. 579 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %ld\n", 580 thread->id)); 581 582 error = B_ERROR; 583 584 } else if (debuggerInstalled || !requireDebugger) { 585 if (debuggerInstalled) { 586 debuggerPort = thread->team->debug_info.debugger_port; 587 nubPort = thread->team->debug_info.nub_port; 588 } 589 590 if (setPort) { 591 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 592 // someone created a port for us (the port we've created will 593 // be deleted below) 594 port = thread->debug_info.debug_port; 595 } else { 596 thread->debug_info.debug_port = port; 597 deletePort = -1; // keep the port 598 threadFlags |= B_THREAD_DEBUG_INITIALIZED; 599 } 600 } else { 601 if (threadFlags & B_THREAD_DEBUG_INITIALIZED) { 602 port = thread->debug_info.debug_port; 603 } else { 604 // someone deleted our port 605 error = B_ERROR; 606 } 607 } 608 } else 609 error = B_ERROR; 610 611 // update the flags 612 if (error == B_OK) 613 threadFlags |= B_THREAD_DEBUG_STOPPED; 614 atomic_set(&thread->debug_info.flags, threadFlags); 615 616 update_thread_user_debug_flag(thread); 617 618 RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info); 619 RELEASE_THREAD_LOCK(); 620 restore_interrupts(state); 621 622 // delete the superfluous port 623 if (deletePort >= 0) 624 delete_port(deletePort); 625 626 if (error != B_OK) { 627 TRACE(("thread_hit_debug_event() error: thread: %ld, error: %lx\n", 628 thread->id, error)); 629 return error; 630 } 631 632 // send a message to the debugger port 633 if (debuggerInstalled) { 634 // update the message's origin info first 635 debug_origin *origin = (debug_origin *)message; 636 origin->thread = thread->id; 637 origin->team = thread->team->id; 638 origin->nub_port = nubPort; 639 640 TRACE(("thread_hit_debug_event(): thread: %ld, sending message to " 641 "debugger port %ld\n", thread->id, debuggerPort)); 642 643 error = debugger_write(debuggerPort, event, message, size, false); 644 } 645 646 status_t result = B_THREAD_DEBUG_HANDLE_EVENT; 647 bool singleStep = false; 648 649 if (error == B_OK) { 650 bool done = false; 651 while (!done) { 652 // read a command from the debug port 653 int32 command; 654 debugged_thread_message_data commandMessage; 655 ssize_t commandMessageSize = read_port_etc(port, &command, 656 &commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT, 657 0); 658 659 if (commandMessageSize < 0) { 660 error = commandMessageSize; 661 TRACE(("thread_hit_debug_event(): thread: %ld, failed " 662 "to receive message from port %ld: %lx\n", 663 thread->id, port, error)); 664 break; 665 } 666 667 switch (command) { 668 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE: 669 TRACE(("thread_hit_debug_event(): thread: %ld: " 670 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n", 671 thread->id)); 672 result = commandMessage.continue_thread.handle_event; 673 674 singleStep = commandMessage.continue_thread.single_step; 675 done = true; 676 break; 677 678 case B_DEBUGGED_THREAD_SET_CPU_STATE: 679 { 680 TRACE(("thread_hit_debug_event(): thread: %ld: " 681 "B_DEBUGGED_THREAD_SET_CPU_STATE\n", 682 thread->id)); 683 arch_set_debug_cpu_state( 684 &commandMessage.set_cpu_state.cpu_state); 685 686 break; 687 } 688 689 case B_DEBUGGED_THREAD_GET_CPU_STATE: 690 { 691 port_id replyPort = commandMessage.get_cpu_state.reply_port; 692 693 // prepare the message 694 debug_nub_get_cpu_state_reply replyMessage; 695 replyMessage.error = B_OK; 696 replyMessage.message = event; 697 arch_get_debug_cpu_state(&replyMessage.cpu_state); 698 699 // send it 700 error = kill_interruptable_write_port(replyPort, event, 701 &replyMessage, sizeof(replyMessage)); 702 703 break; 704 } 705 706 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED: 707 { 708 // Check, if the debugger really changed, i.e. is different 709 // than the one we know. 710 team_debug_info teamDebugInfo; 711 get_team_debug_info(teamDebugInfo); 712 713 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 714 if (!debuggerInstalled 715 || teamDebugInfo.debugger_port != debuggerPort) { 716 // debugger was installed or has changed: restart 717 // this function 718 restart = true; 719 done = true; 720 } 721 } else { 722 if (debuggerInstalled) { 723 // debugger is gone: continue the thread normally 724 done = true; 725 } 726 } 727 728 break; 729 } 730 } 731 } 732 } else { 733 TRACE(("thread_hit_debug_event(): thread: %ld, failed to send " 734 "message to debugger port %ld: %lx\n", thread->id, 735 debuggerPort, error)); 736 } 737 738 // update the thread debug info 739 bool destroyThreadInfo = false; 740 thread_debug_info threadDebugInfo; 741 742 state = disable_interrupts(); 743 GRAB_THREAD_LOCK(); 744 745 // check, if the team is still being debugged 746 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 747 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 748 // update the single-step flag 749 if (singleStep) { 750 atomic_or(&thread->debug_info.flags, 751 B_THREAD_DEBUG_SINGLE_STEP); 752 atomic_or(&thread->flags, THREAD_FLAGS_SINGLE_STEP); 753 } else { 754 atomic_and(&thread->debug_info.flags, 755 ~(int32)B_THREAD_DEBUG_SINGLE_STEP); 756 } 757 758 // unset the "stopped" state 759 atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED); 760 761 update_thread_user_debug_flag(thread); 762 763 } else { 764 // the debugger is gone: cleanup our info completely 765 threadDebugInfo = thread->debug_info; 766 clear_thread_debug_info(&thread->debug_info, false); 767 destroyThreadInfo = true; 768 } 769 770 RELEASE_THREAD_LOCK(); 771 restore_interrupts(state); 772 773 // enable/disable single stepping 774 arch_update_thread_single_step(); 775 776 if (destroyThreadInfo) 777 destroy_thread_debug_info(&threadDebugInfo); 778 779 return (error == B_OK ? result : error); 780 } 781 782 783 static status_t 784 thread_hit_debug_event(debug_debugger_message event, const void *message, 785 int32 size, bool requireDebugger) 786 { 787 status_t result; 788 bool restart; 789 do { 790 restart = false; 791 result = thread_hit_debug_event_internal(event, message, size, 792 requireDebugger, restart); 793 } while (result >= 0 && restart); 794 795 // Prepare to continue -- we install a debugger change condition, so no-one 796 // will change the debugger while we're playing with the breakpoint manager. 797 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager. 798 struct team* team = thread_get_current_thread()->team; 799 ConditionVariable debugChangeCondition; 800 prepare_debugger_change(team, debugChangeCondition); 801 802 if (team->debug_info.breakpoint_manager != NULL) { 803 bool isSyscall; 804 void* pc = arch_debug_get_interrupt_pc(&isSyscall); 805 if (pc != NULL && !isSyscall) 806 team->debug_info.breakpoint_manager->PrepareToContinue(pc); 807 } 808 809 finish_debugger_change(team); 810 811 return result; 812 } 813 814 815 static status_t 816 thread_hit_serious_debug_event(debug_debugger_message event, 817 const void *message, int32 messageSize) 818 { 819 // ensure that a debugger is installed for this team 820 status_t error = ensure_debugger_installed(); 821 if (error != B_OK) { 822 struct thread *thread = thread_get_current_thread(); 823 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: " 824 "thread: %ld: %s\n", thread->id, strerror(error)); 825 return error; 826 } 827 828 // enter the debug loop 829 return thread_hit_debug_event(event, message, messageSize, true); 830 } 831 832 833 void 834 user_debug_pre_syscall(uint32 syscall, void *args) 835 { 836 // check whether a debugger is installed 837 struct thread *thread = thread_get_current_thread(); 838 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 839 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 840 return; 841 842 // check whether pre-syscall tracing is enabled for team or thread 843 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 844 if (!(teamDebugFlags & B_TEAM_DEBUG_PRE_SYSCALL) 845 && !(threadDebugFlags & B_THREAD_DEBUG_PRE_SYSCALL)) { 846 return; 847 } 848 849 // prepare the message 850 debug_pre_syscall message; 851 message.syscall = syscall; 852 853 // copy the syscall args 854 if (syscall < (uint32)kSyscallCount) { 855 if (kSyscallInfos[syscall].parameter_size > 0) 856 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 857 } 858 859 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL, &message, 860 sizeof(message), true); 861 } 862 863 864 void 865 user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue, 866 bigtime_t startTime) 867 { 868 // check whether a debugger is installed 869 struct thread *thread = thread_get_current_thread(); 870 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 871 if (!(teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)) 872 return; 873 874 // check whether post-syscall tracing is enabled for team or thread 875 int32 threadDebugFlags = atomic_get(&thread->debug_info.flags); 876 if (!(teamDebugFlags & B_TEAM_DEBUG_POST_SYSCALL) 877 && !(threadDebugFlags & B_THREAD_DEBUG_POST_SYSCALL)) { 878 return; 879 } 880 881 // prepare the message 882 debug_post_syscall message; 883 message.start_time = startTime; 884 message.end_time = system_time(); 885 message.return_value = returnValue; 886 message.syscall = syscall; 887 888 // copy the syscall args 889 if (syscall < (uint32)kSyscallCount) { 890 if (kSyscallInfos[syscall].parameter_size > 0) 891 memcpy(message.args, args, kSyscallInfos[syscall].parameter_size); 892 } 893 894 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL, &message, 895 sizeof(message), true); 896 } 897 898 899 /** \brief To be called when an unhandled processor exception (error/fault) 900 * occurred. 901 * \param exception The debug_why_stopped value identifying the kind of fault. 902 * \param singal The signal corresponding to the exception. 903 * \return \c true, if the caller shall continue normally, i.e. usually send 904 * a deadly signal. \c false, if the debugger insists to continue the 905 * program (e.g. because it has solved the removed the cause of the 906 * problem). 907 */ 908 bool 909 user_debug_exception_occurred(debug_exception_type exception, int signal) 910 { 911 // First check whether there's a signal handler installed for the signal. 912 // If so, we don't want to install a debugger for the team. We always send 913 // the signal instead. An already installed debugger will be notified, if 914 // it has requested notifications of signal. 915 struct sigaction signalAction; 916 if (sigaction(signal, NULL, &signalAction) == 0 917 && signalAction.sa_handler != SIG_DFL) { 918 return true; 919 } 920 921 // prepare the message 922 debug_exception_occurred message; 923 message.exception = exception; 924 message.signal = signal; 925 926 status_t result = thread_hit_serious_debug_event( 927 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED, &message, sizeof(message)); 928 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 929 } 930 931 932 bool 933 user_debug_handle_signal(int signal, struct sigaction *handler, bool deadly) 934 { 935 // check, if a debugger is installed and is interested in signals 936 struct thread *thread = thread_get_current_thread(); 937 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 938 if (~teamDebugFlags 939 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS)) { 940 return true; 941 } 942 943 // prepare the message 944 debug_signal_received message; 945 message.signal = signal; 946 message.handler = *handler; 947 message.deadly = deadly; 948 949 status_t result = thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED, 950 &message, sizeof(message), true); 951 return (result != B_THREAD_DEBUG_IGNORE_EVENT); 952 } 953 954 955 void 956 user_debug_stop_thread() 957 { 958 // check whether this is actually an emulated single-step notification 959 InterruptsSpinLocker threadsLocker(gThreadSpinlock); 960 struct thread* thread = thread_get_current_thread(); 961 bool singleStepped = false; 962 if ((atomic_and(&thread->debug_info.flags, 963 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) 964 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP) != 0) { 965 singleStepped = true; 966 } 967 968 threadsLocker.Unlock(); 969 970 if (singleStepped) { 971 user_debug_single_stepped(); 972 } else { 973 debug_thread_debugged message; 974 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, 975 &message, sizeof(message)); 976 } 977 } 978 979 980 void 981 user_debug_team_created(team_id teamID) 982 { 983 // check, if a debugger is installed and is interested in team creation 984 // events 985 struct thread *thread = thread_get_current_thread(); 986 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 987 if (~teamDebugFlags 988 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 989 return; 990 } 991 992 // prepare the message 993 debug_team_created message; 994 message.new_team = teamID; 995 996 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED, &message, 997 sizeof(message), true); 998 } 999 1000 1001 void 1002 user_debug_team_deleted(team_id teamID, port_id debuggerPort) 1003 { 1004 if (debuggerPort >= 0) { 1005 TRACE(("user_debug_team_deleted(team: %ld, debugger port: %ld)\n", 1006 teamID, debuggerPort)); 1007 1008 debug_team_deleted message; 1009 message.origin.thread = -1; 1010 message.origin.team = teamID; 1011 message.origin.nub_port = -1; 1012 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_TEAM_DELETED, &message, 1013 sizeof(message), B_RELATIVE_TIMEOUT, 0); 1014 } 1015 } 1016 1017 1018 void 1019 user_debug_team_exec() 1020 { 1021 // check, if a debugger is installed and is interested in team creation 1022 // events 1023 struct thread *thread = thread_get_current_thread(); 1024 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1025 if (~teamDebugFlags 1026 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_TEAM_CREATION)) { 1027 return; 1028 } 1029 1030 // prepare the message 1031 debug_team_exec message; 1032 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1033 + 1; 1034 1035 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message, 1036 sizeof(message), true); 1037 } 1038 1039 1040 void 1041 user_debug_update_new_thread_flags(thread_id threadID) 1042 { 1043 // Update thread::flags of the thread. 1044 1045 InterruptsLocker interruptsLocker; 1046 1047 SpinLocker teamLocker(gTeamSpinlock); 1048 SpinLocker threadLocker(gThreadSpinlock); 1049 1050 struct thread *thread = thread_get_thread_struct_locked(threadID); 1051 if (!thread) 1052 return; 1053 1054 update_thread_user_debug_flag(thread); 1055 update_thread_breakpoints_flag(thread); 1056 update_thread_debugger_installed_flag(thread); 1057 } 1058 1059 1060 void 1061 user_debug_thread_created(thread_id threadID) 1062 { 1063 // check, if a debugger is installed and is interested in thread events 1064 struct thread *thread = thread_get_current_thread(); 1065 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1066 if (~teamDebugFlags 1067 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1068 return; 1069 } 1070 1071 // prepare the message 1072 debug_thread_created message; 1073 message.new_thread = threadID; 1074 1075 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED, &message, 1076 sizeof(message), true); 1077 } 1078 1079 1080 void 1081 user_debug_thread_deleted(team_id teamID, thread_id threadID) 1082 { 1083 // Things are a bit complicated here, since this thread no longer belongs to 1084 // the debugged team (but to the kernel). So we can't use debugger_write(). 1085 1086 // get the team debug flags and debugger port 1087 InterruptsSpinLocker teamLocker(gTeamSpinlock); 1088 1089 struct team *team = team_get_team_struct_locked(teamID); 1090 if (team == NULL) 1091 return; 1092 1093 SpinLocker debugInfoLocker(team->debug_info.lock); 1094 1095 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1096 port_id debuggerPort = team->debug_info.debugger_port; 1097 sem_id writeLock = team->debug_info.debugger_write_lock; 1098 1099 debugInfoLocker.Unlock(); 1100 teamLocker.Unlock(); 1101 1102 // check, if a debugger is installed and is interested in thread events 1103 if (~teamDebugFlags 1104 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_THREADS)) { 1105 return; 1106 } 1107 1108 // acquire the debugger write lock 1109 status_t error = acquire_sem_etc(writeLock, 1, B_KILL_CAN_INTERRUPT, 0); 1110 if (error != B_OK) 1111 return; 1112 1113 // re-get the team debug info -- we need to check whether anything changed 1114 teamLocker.Lock(); 1115 1116 team = team_get_team_struct_locked(teamID); 1117 if (team == NULL) 1118 return; 1119 1120 debugInfoLocker.Lock(); 1121 1122 teamDebugFlags = atomic_get(&team->debug_info.flags); 1123 port_id newDebuggerPort = team->debug_info.debugger_port; 1124 1125 debugInfoLocker.Unlock(); 1126 teamLocker.Unlock(); 1127 1128 // Send the message only if the debugger hasn't changed in the meantime or 1129 // the team is about to be handed over. 1130 if (newDebuggerPort == debuggerPort 1131 || (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) == 0) { 1132 debug_thread_deleted message; 1133 message.origin.thread = threadID; 1134 message.origin.team = teamID; 1135 message.origin.nub_port = -1; 1136 1137 write_port_etc(debuggerPort, B_DEBUGGER_MESSAGE_THREAD_DELETED, 1138 &message, sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1139 } 1140 1141 // release the debugger write lock 1142 release_sem(writeLock); 1143 } 1144 1145 1146 void 1147 user_debug_thread_exiting(struct thread* thread) 1148 { 1149 InterruptsLocker interruptsLocker; 1150 SpinLocker teamLocker(gTeamSpinlock); 1151 1152 struct team* team = thread->team; 1153 1154 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1155 1156 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1157 port_id debuggerPort = team->debug_info.debugger_port; 1158 1159 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1160 1161 teamLocker.Unlock(); 1162 1163 // check, if a debugger is installed 1164 if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0 1165 || debuggerPort < 0) { 1166 return; 1167 } 1168 1169 // detach the profile info and mark the thread dying 1170 SpinLocker threadLocker(gThreadSpinlock); 1171 1172 thread_debug_info& threadDebugInfo = thread->debug_info; 1173 if (threadDebugInfo.profile.samples == NULL) 1174 return; 1175 1176 area_id sampleArea = threadDebugInfo.profile.sample_area; 1177 int32 sampleCount = threadDebugInfo.profile.sample_count; 1178 int32 droppedTicks = threadDebugInfo.profile.dropped_ticks; 1179 int32 stackDepth = threadDebugInfo.profile.stack_depth; 1180 bool variableStackDepth = threadDebugInfo.profile.variable_stack_depth; 1181 int32 imageEvent = threadDebugInfo.profile.image_event; 1182 threadDebugInfo.profile.sample_area = -1; 1183 threadDebugInfo.profile.samples = NULL; 1184 threadDebugInfo.profile.buffer_full = false; 1185 1186 atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING); 1187 1188 threadLocker.Unlock(); 1189 interruptsLocker.Unlock(); 1190 1191 // notify the debugger 1192 debug_profiler_update message; 1193 message.origin.thread = thread->id; 1194 message.origin.team = thread->team->id; 1195 message.origin.nub_port = -1; // asynchronous message 1196 message.sample_count = sampleCount; 1197 message.dropped_ticks = droppedTicks; 1198 message.stack_depth = stackDepth; 1199 message.variable_stack_depth = variableStackDepth; 1200 message.image_event = imageEvent; 1201 message.stopped = true; 1202 debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE, 1203 &message, sizeof(message), false); 1204 1205 if (sampleArea >= 0) { 1206 area_info areaInfo; 1207 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 1208 unlock_memory(areaInfo.address, areaInfo.size, B_READ_DEVICE); 1209 delete_area(sampleArea); 1210 } 1211 } 1212 } 1213 1214 1215 void 1216 user_debug_image_created(const image_info *imageInfo) 1217 { 1218 // check, if a debugger is installed and is interested in image events 1219 struct thread *thread = thread_get_current_thread(); 1220 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1221 if (~teamDebugFlags 1222 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1223 return; 1224 } 1225 1226 // prepare the message 1227 debug_image_created message; 1228 memcpy(&message.info, imageInfo, sizeof(image_info)); 1229 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1230 + 1; 1231 1232 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1233 sizeof(message), true); 1234 } 1235 1236 1237 void 1238 user_debug_image_deleted(const image_info *imageInfo) 1239 { 1240 // check, if a debugger is installed and is interested in image events 1241 struct thread *thread = thread_get_current_thread(); 1242 int32 teamDebugFlags = atomic_get(&thread->team->debug_info.flags); 1243 if (~teamDebugFlags 1244 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_IMAGES)) { 1245 return; 1246 } 1247 1248 // prepare the message 1249 debug_image_deleted message; 1250 memcpy(&message.info, imageInfo, sizeof(image_info)); 1251 message.image_event = atomic_add(&thread->team->debug_info.image_event, 1) 1252 + 1; 1253 1254 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message, 1255 sizeof(message), true); 1256 } 1257 1258 1259 void 1260 user_debug_breakpoint_hit(bool software) 1261 { 1262 // prepare the message 1263 debug_breakpoint_hit message; 1264 arch_get_debug_cpu_state(&message.cpu_state); 1265 1266 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT, &message, 1267 sizeof(message)); 1268 } 1269 1270 1271 void 1272 user_debug_watchpoint_hit() 1273 { 1274 // prepare the message 1275 debug_watchpoint_hit message; 1276 arch_get_debug_cpu_state(&message.cpu_state); 1277 1278 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT, &message, 1279 sizeof(message)); 1280 } 1281 1282 1283 void 1284 user_debug_single_stepped() 1285 { 1286 // clear the single-step thread flag 1287 struct thread* thread = thread_get_current_thread(); 1288 atomic_and(&thread->flags, ~(int32)THREAD_FLAGS_SINGLE_STEP); 1289 1290 // prepare the message 1291 debug_single_step message; 1292 arch_get_debug_cpu_state(&message.cpu_state); 1293 1294 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP, &message, 1295 sizeof(message)); 1296 } 1297 1298 1299 static void 1300 schedule_profiling_timer(struct thread* thread, bigtime_t interval) 1301 { 1302 struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num]; 1303 thread->debug_info.profile.installed_timer = timer; 1304 thread->debug_info.profile.timer_end = system_time() + interval; 1305 add_timer(timer, &profiling_event, interval, 1306 B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK); 1307 } 1308 1309 1310 static bool 1311 profiling_do_sample(bool& flushBuffer) 1312 { 1313 struct thread* thread = thread_get_current_thread(); 1314 thread_debug_info& debugInfo = thread->debug_info; 1315 1316 if (debugInfo.profile.samples == NULL) 1317 return false; 1318 1319 // Check, whether the buffer is full or an image event occurred since the 1320 // last sample was taken. 1321 int32 maxSamples = debugInfo.profile.max_samples; 1322 int32 sampleCount = debugInfo.profile.sample_count; 1323 int32 stackDepth = debugInfo.profile.stack_depth; 1324 int32 imageEvent = thread->team->debug_info.image_event; 1325 if (debugInfo.profile.sample_count > 0) { 1326 if (debugInfo.profile.last_image_event < imageEvent 1327 && debugInfo.profile.variable_stack_depth 1328 && sampleCount + 2 <= maxSamples) { 1329 // an image event occurred, but we use variable stack depth and 1330 // have enough room in the buffer to indicate an image event 1331 addr_t* event = debugInfo.profile.samples + sampleCount; 1332 event[0] = B_DEBUG_PROFILE_IMAGE_EVENT; 1333 event[1] = imageEvent; 1334 sampleCount += 2; 1335 debugInfo.profile.sample_count = sampleCount; 1336 debugInfo.profile.last_image_event = imageEvent; 1337 } 1338 1339 if (debugInfo.profile.last_image_event < imageEvent 1340 || debugInfo.profile.flush_threshold - sampleCount < stackDepth) { 1341 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL))) { 1342 flushBuffer = true; 1343 return true; 1344 } 1345 1346 // We can't flush the buffer now, since we interrupted a kernel 1347 // function. If the buffer is not full yet, we add the samples, 1348 // otherwise we have to drop them. 1349 if (maxSamples - sampleCount < stackDepth) { 1350 debugInfo.profile.dropped_ticks++; 1351 return true; 1352 } 1353 } 1354 } else { 1355 // first sample -- set the image event 1356 debugInfo.profile.image_event = imageEvent; 1357 debugInfo.profile.last_image_event = imageEvent; 1358 } 1359 1360 // get the samples 1361 addr_t* returnAddresses = debugInfo.profile.samples 1362 + debugInfo.profile.sample_count; 1363 if (debugInfo.profile.variable_stack_depth) { 1364 // variable sample count per hit 1365 *returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1, 1366 stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1367 1368 debugInfo.profile.sample_count += *returnAddresses + 1; 1369 } else { 1370 // fixed sample count per hit 1371 if (stackDepth > 1) { 1372 int32 count = arch_debug_get_stack_trace(returnAddresses, 1373 stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1374 1375 for (int32 i = count; i < stackDepth; i++) 1376 returnAddresses[i] = 0; 1377 } else 1378 *returnAddresses = (addr_t)arch_debug_get_interrupt_pc(NULL); 1379 1380 debugInfo.profile.sample_count += stackDepth; 1381 } 1382 1383 return true; 1384 } 1385 1386 1387 static void 1388 profiling_buffer_full(void*) 1389 { 1390 struct thread* thread = thread_get_current_thread(); 1391 thread_debug_info& debugInfo = thread->debug_info; 1392 1393 GRAB_THREAD_LOCK(); 1394 1395 if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) { 1396 int32 sampleCount = debugInfo.profile.sample_count; 1397 int32 droppedTicks = debugInfo.profile.dropped_ticks; 1398 int32 stackDepth = debugInfo.profile.stack_depth; 1399 bool variableStackDepth = debugInfo.profile.variable_stack_depth; 1400 int32 imageEvent = debugInfo.profile.image_event; 1401 1402 // notify the debugger 1403 debugInfo.profile.sample_count = 0; 1404 debugInfo.profile.dropped_ticks = 0; 1405 1406 RELEASE_THREAD_LOCK(); 1407 enable_interrupts(); 1408 1409 // prepare the message 1410 debug_profiler_update message; 1411 message.sample_count = sampleCount; 1412 message.dropped_ticks = droppedTicks; 1413 message.stack_depth = stackDepth; 1414 message.variable_stack_depth = variableStackDepth; 1415 message.image_event = imageEvent; 1416 message.stopped = false; 1417 1418 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message, 1419 sizeof(message), false); 1420 1421 disable_interrupts(); 1422 GRAB_THREAD_LOCK(); 1423 1424 // do the sampling and reschedule timer, if still profiling this thread 1425 bool flushBuffer; 1426 if (profiling_do_sample(flushBuffer)) { 1427 debugInfo.profile.buffer_full = false; 1428 schedule_profiling_timer(thread, debugInfo.profile.interval); 1429 } 1430 } 1431 1432 RELEASE_THREAD_LOCK(); 1433 } 1434 1435 1436 /*! The thread spinlock is being held. 1437 */ 1438 static int32 1439 profiling_event(timer* /*unused*/) 1440 { 1441 struct thread* thread = thread_get_current_thread(); 1442 thread_debug_info& debugInfo = thread->debug_info; 1443 1444 bool flushBuffer = false; 1445 if (profiling_do_sample(flushBuffer)) { 1446 if (flushBuffer) { 1447 // The sample buffer needs to be flushed; we'll have to notify the 1448 // debugger. We can't do that right here. Instead we set a post 1449 // interrupt callback doing that for us, and don't reschedule the 1450 // timer yet. 1451 thread->post_interrupt_callback = profiling_buffer_full; 1452 debugInfo.profile.installed_timer = NULL; 1453 debugInfo.profile.buffer_full = true; 1454 } else 1455 schedule_profiling_timer(thread, debugInfo.profile.interval); 1456 } else 1457 debugInfo.profile.installed_timer = NULL; 1458 1459 return B_HANDLED_INTERRUPT; 1460 } 1461 1462 1463 void 1464 user_debug_thread_unscheduled(struct thread* thread) 1465 { 1466 // if running, cancel the profiling timer 1467 struct timer* timer = thread->debug_info.profile.installed_timer; 1468 if (timer != NULL) { 1469 // track remaining time 1470 bigtime_t left = thread->debug_info.profile.timer_end - system_time(); 1471 thread->debug_info.profile.interval_left = max_c(left, 0); 1472 thread->debug_info.profile.installed_timer = NULL; 1473 1474 // cancel timer 1475 cancel_timer(timer); 1476 } 1477 } 1478 1479 1480 void 1481 user_debug_thread_scheduled(struct thread* thread) 1482 { 1483 if (thread->debug_info.profile.samples != NULL 1484 && !thread->debug_info.profile.buffer_full) { 1485 // install profiling timer 1486 schedule_profiling_timer(thread, 1487 thread->debug_info.profile.interval_left); 1488 } 1489 } 1490 1491 1492 /*! \brief Called by the debug nub thread of a team to broadcast a message to 1493 all threads of the team that are initialized for debugging (and 1494 thus have a debug port). 1495 */ 1496 static void 1497 broadcast_debugged_thread_message(struct thread *nubThread, int32 code, 1498 const void *message, int32 size) 1499 { 1500 // iterate through the threads 1501 thread_info threadInfo; 1502 int32 cookie = 0; 1503 while (get_next_thread_info(nubThread->team->id, &cookie, &threadInfo) 1504 == B_OK) { 1505 // find the thread and get its debug port 1506 cpu_status state = disable_interrupts(); 1507 GRAB_THREAD_LOCK(); 1508 1509 port_id threadDebugPort = -1; 1510 thread_id threadID = -1; 1511 struct thread *thread 1512 = thread_get_thread_struct_locked(threadInfo.thread); 1513 if (thread && thread != nubThread && thread->team == nubThread->team 1514 && (thread->debug_info.flags & B_THREAD_DEBUG_INITIALIZED) != 0 1515 && (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) != 0) { 1516 threadDebugPort = thread->debug_info.debug_port; 1517 threadID = thread->id; 1518 } 1519 1520 RELEASE_THREAD_LOCK(); 1521 restore_interrupts(state); 1522 1523 // send the message to the thread 1524 if (threadDebugPort >= 0) { 1525 status_t error = kill_interruptable_write_port(threadDebugPort, 1526 code, message, size); 1527 if (error != B_OK) { 1528 TRACE(("broadcast_debugged_thread_message(): Failed to send " 1529 "message to thread %ld: %lx\n", threadID, error)); 1530 } 1531 } 1532 } 1533 } 1534 1535 1536 static void 1537 nub_thread_cleanup(struct thread *nubThread) 1538 { 1539 TRACE(("nub_thread_cleanup(%ld): debugger port: %ld\n", nubThread->id, 1540 nubThread->team->debug_info.debugger_port)); 1541 1542 ConditionVariable debugChangeCondition; 1543 prepare_debugger_change(nubThread->team, debugChangeCondition); 1544 1545 team_debug_info teamDebugInfo; 1546 bool destroyDebugInfo = false; 1547 1548 cpu_status state = disable_interrupts(); 1549 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1550 1551 team_debug_info &info = nubThread->team->debug_info; 1552 if (info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED 1553 && info.nub_thread == nubThread->id) { 1554 teamDebugInfo = info; 1555 clear_team_debug_info(&info, false); 1556 destroyDebugInfo = true; 1557 } 1558 1559 // update the thread::flags fields 1560 update_threads_debugger_installed_flag(nubThread->team); 1561 1562 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1563 restore_interrupts(state); 1564 1565 if (destroyDebugInfo) 1566 teamDebugInfo.breakpoint_manager->RemoveAllBreakpoints(); 1567 1568 finish_debugger_change(nubThread->team); 1569 1570 if (destroyDebugInfo) 1571 destroy_team_debug_info(&teamDebugInfo); 1572 1573 // notify all threads that the debugger is gone 1574 broadcast_debugged_thread_message(nubThread, 1575 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1576 } 1577 1578 1579 /** \brief Debug nub thread helper function that returns the debug port of 1580 * a thread of the same team. 1581 */ 1582 static status_t 1583 debug_nub_thread_get_thread_debug_port(struct thread *nubThread, 1584 thread_id threadID, port_id &threadDebugPort) 1585 { 1586 status_t result = B_OK; 1587 threadDebugPort = -1; 1588 1589 cpu_status state = disable_interrupts(); 1590 GRAB_THREAD_LOCK(); 1591 1592 struct thread *thread = thread_get_thread_struct_locked(threadID); 1593 if (thread) { 1594 if (thread->team != nubThread->team) 1595 result = B_BAD_VALUE; 1596 else if (thread->debug_info.flags & B_THREAD_DEBUG_STOPPED) 1597 threadDebugPort = thread->debug_info.debug_port; 1598 else 1599 result = B_BAD_THREAD_STATE; 1600 } else 1601 result = B_BAD_THREAD_ID; 1602 1603 RELEASE_THREAD_LOCK(); 1604 restore_interrupts(state); 1605 1606 if (result == B_OK && threadDebugPort < 0) 1607 result = B_ERROR; 1608 1609 return result; 1610 } 1611 1612 1613 static status_t 1614 debug_nub_thread(void *) 1615 { 1616 struct thread *nubThread = thread_get_current_thread(); 1617 1618 // check, if we're still the current nub thread and get our port 1619 cpu_status state = disable_interrupts(); 1620 1621 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1622 1623 if (nubThread->team->debug_info.nub_thread != nubThread->id) { 1624 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1625 restore_interrupts(state); 1626 return 0; 1627 } 1628 1629 port_id port = nubThread->team->debug_info.nub_port; 1630 sem_id writeLock = nubThread->team->debug_info.debugger_write_lock; 1631 BreakpointManager* breakpointManager 1632 = nubThread->team->debug_info.breakpoint_manager; 1633 1634 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info); 1635 restore_interrupts(state); 1636 1637 TRACE(("debug_nub_thread() thread: %ld, team %ld, nub port: %ld\n", 1638 nubThread->id, nubThread->team->id, port)); 1639 1640 // notify all threads that a debugger has been installed 1641 broadcast_debugged_thread_message(nubThread, 1642 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 1643 1644 // command processing loop 1645 while (true) { 1646 int32 command; 1647 debug_nub_message_data message; 1648 ssize_t messageSize = read_port_etc(port, &command, &message, 1649 sizeof(message), B_KILL_CAN_INTERRUPT, 0); 1650 1651 if (messageSize < 0) { 1652 // The port is no longer valid or we were interrupted by a kill 1653 // signal: If we are still listed in the team's debug info as nub 1654 // thread, we need to update that. 1655 nub_thread_cleanup(nubThread); 1656 1657 TRACE(("nub thread %ld: terminating: %lx\n", nubThread->id, 1658 messageSize)); 1659 1660 return messageSize; 1661 } 1662 1663 bool sendReply = false; 1664 union { 1665 debug_nub_read_memory_reply read_memory; 1666 debug_nub_write_memory_reply write_memory; 1667 debug_nub_get_cpu_state_reply get_cpu_state; 1668 debug_nub_set_breakpoint_reply set_breakpoint; 1669 debug_nub_set_watchpoint_reply set_watchpoint; 1670 debug_nub_get_signal_masks_reply get_signal_masks; 1671 debug_nub_get_signal_handler_reply get_signal_handler; 1672 debug_nub_start_profiler_reply start_profiler; 1673 debug_profiler_update profiler_update; 1674 } reply; 1675 int32 replySize = 0; 1676 port_id replyPort = -1; 1677 1678 // process the command 1679 switch (command) { 1680 case B_DEBUG_MESSAGE_READ_MEMORY: 1681 { 1682 // get the parameters 1683 replyPort = message.read_memory.reply_port; 1684 void *address = message.read_memory.address; 1685 int32 size = message.read_memory.size; 1686 status_t result = B_OK; 1687 1688 // check the parameters 1689 if (!BreakpointManager::CanAccessAddress(address, false)) 1690 result = B_BAD_ADDRESS; 1691 else if (size <= 0 || size > B_MAX_READ_WRITE_MEMORY_SIZE) 1692 result = B_BAD_VALUE; 1693 1694 // read the memory 1695 size_t bytesRead = 0; 1696 if (result == B_OK) { 1697 result = breakpointManager->ReadMemory(address, 1698 reply.read_memory.data, size, bytesRead); 1699 } 1700 reply.read_memory.error = result; 1701 1702 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_READ_MEMORY: " 1703 "reply port: %ld, address: %p, size: %ld, result: %lx, " 1704 "read: %ld\n", nubThread->id, replyPort, address, size, 1705 result, bytesRead)); 1706 1707 // send only as much data as necessary 1708 reply.read_memory.size = bytesRead; 1709 replySize = reply.read_memory.data + bytesRead - (char*)&reply; 1710 sendReply = true; 1711 break; 1712 } 1713 1714 case B_DEBUG_MESSAGE_WRITE_MEMORY: 1715 { 1716 // get the parameters 1717 replyPort = message.write_memory.reply_port; 1718 void *address = message.write_memory.address; 1719 int32 size = message.write_memory.size; 1720 const char *data = message.write_memory.data; 1721 int32 realSize = (char*)&message + messageSize - data; 1722 status_t result = B_OK; 1723 1724 // check the parameters 1725 if (!BreakpointManager::CanAccessAddress(address, true)) 1726 result = B_BAD_ADDRESS; 1727 else if (size <= 0 || size > realSize) 1728 result = B_BAD_VALUE; 1729 1730 // write the memory 1731 size_t bytesWritten = 0; 1732 if (result == B_OK) { 1733 result = breakpointManager->WriteMemory(address, data, size, 1734 bytesWritten); 1735 } 1736 reply.write_memory.error = result; 1737 1738 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_WRITE_MEMORY: " 1739 "reply port: %ld, address: %p, size: %ld, result: %lx, " 1740 "written: %ld\n", nubThread->id, replyPort, address, size, 1741 result, bytesWritten)); 1742 1743 reply.write_memory.size = bytesWritten; 1744 sendReply = true; 1745 replySize = sizeof(debug_nub_write_memory_reply); 1746 break; 1747 } 1748 1749 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS: 1750 { 1751 // get the parameters 1752 int32 flags = message.set_team_flags.flags 1753 & B_TEAM_DEBUG_USER_FLAG_MASK; 1754 1755 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_TEAM_FLAGS: " 1756 "flags: %lx\n", nubThread->id, flags)); 1757 1758 struct team *team = thread_get_current_thread()->team; 1759 1760 // set the flags 1761 cpu_status state = disable_interrupts(); 1762 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1763 1764 flags |= team->debug_info.flags & B_TEAM_DEBUG_KERNEL_FLAG_MASK; 1765 atomic_set(&team->debug_info.flags, flags); 1766 1767 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 1768 restore_interrupts(state); 1769 1770 break; 1771 } 1772 1773 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS: 1774 { 1775 // get the parameters 1776 thread_id threadID = message.set_thread_flags.thread; 1777 int32 flags = message.set_thread_flags.flags 1778 & B_THREAD_DEBUG_USER_FLAG_MASK; 1779 1780 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_THREAD_FLAGS: " 1781 "thread: %ld, flags: %lx\n", nubThread->id, threadID, 1782 flags)); 1783 1784 // set the flags 1785 cpu_status state = disable_interrupts(); 1786 GRAB_THREAD_LOCK(); 1787 1788 struct thread *thread 1789 = thread_get_thread_struct_locked(threadID); 1790 if (thread 1791 && thread->team == thread_get_current_thread()->team) { 1792 flags |= thread->debug_info.flags 1793 & B_THREAD_DEBUG_KERNEL_FLAG_MASK; 1794 atomic_set(&thread->debug_info.flags, flags); 1795 } 1796 1797 RELEASE_THREAD_LOCK(); 1798 restore_interrupts(state); 1799 1800 break; 1801 } 1802 1803 case B_DEBUG_MESSAGE_CONTINUE_THREAD: 1804 { 1805 // get the parameters 1806 thread_id threadID; 1807 uint32 handleEvent; 1808 bool singleStep; 1809 1810 threadID = message.continue_thread.thread; 1811 handleEvent = message.continue_thread.handle_event; 1812 singleStep = message.continue_thread.single_step; 1813 1814 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CONTINUE_THREAD: " 1815 "thread: %ld, handle event: %lu, single step: %d\n", 1816 nubThread->id, threadID, handleEvent, singleStep)); 1817 1818 // find the thread and get its debug port 1819 port_id threadDebugPort = -1; 1820 status_t result = debug_nub_thread_get_thread_debug_port( 1821 nubThread, threadID, threadDebugPort); 1822 1823 // send a message to the debugged thread 1824 if (result == B_OK) { 1825 debugged_thread_continue commandMessage; 1826 commandMessage.handle_event = handleEvent; 1827 commandMessage.single_step = singleStep; 1828 1829 result = write_port(threadDebugPort, 1830 B_DEBUGGED_THREAD_MESSAGE_CONTINUE, 1831 &commandMessage, sizeof(commandMessage)); 1832 } 1833 1834 break; 1835 } 1836 1837 case B_DEBUG_MESSAGE_SET_CPU_STATE: 1838 { 1839 // get the parameters 1840 thread_id threadID = message.set_cpu_state.thread; 1841 const debug_cpu_state &cpuState 1842 = message.set_cpu_state.cpu_state; 1843 1844 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_CPU_STATE: " 1845 "thread: %ld\n", nubThread->id, threadID)); 1846 1847 // find the thread and get its debug port 1848 port_id threadDebugPort = -1; 1849 status_t result = debug_nub_thread_get_thread_debug_port( 1850 nubThread, threadID, threadDebugPort); 1851 1852 // send a message to the debugged thread 1853 if (result == B_OK) { 1854 debugged_thread_set_cpu_state commandMessage; 1855 memcpy(&commandMessage.cpu_state, &cpuState, 1856 sizeof(debug_cpu_state)); 1857 write_port(threadDebugPort, 1858 B_DEBUGGED_THREAD_SET_CPU_STATE, 1859 &commandMessage, sizeof(commandMessage)); 1860 } 1861 1862 break; 1863 } 1864 1865 case B_DEBUG_MESSAGE_GET_CPU_STATE: 1866 { 1867 // get the parameters 1868 thread_id threadID = message.get_cpu_state.thread; 1869 replyPort = message.get_cpu_state.reply_port; 1870 1871 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_CPU_STATE: " 1872 "thread: %ld\n", nubThread->id, threadID)); 1873 1874 // find the thread and get its debug port 1875 port_id threadDebugPort = -1; 1876 status_t result = debug_nub_thread_get_thread_debug_port( 1877 nubThread, threadID, threadDebugPort); 1878 1879 // send a message to the debugged thread 1880 if (threadDebugPort >= 0) { 1881 debugged_thread_get_cpu_state commandMessage; 1882 commandMessage.reply_port = replyPort; 1883 result = write_port(threadDebugPort, 1884 B_DEBUGGED_THREAD_GET_CPU_STATE, &commandMessage, 1885 sizeof(commandMessage)); 1886 } 1887 1888 // send a reply to the debugger in case of error 1889 if (result != B_OK) { 1890 reply.get_cpu_state.error = result; 1891 sendReply = true; 1892 replySize = sizeof(reply.get_cpu_state); 1893 } 1894 1895 break; 1896 } 1897 1898 case B_DEBUG_MESSAGE_SET_BREAKPOINT: 1899 { 1900 // get the parameters 1901 replyPort = message.set_breakpoint.reply_port; 1902 void *address = message.set_breakpoint.address; 1903 1904 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_BREAKPOINT: " 1905 "address: %p\n", nubThread->id, address)); 1906 1907 // check the address 1908 status_t result = B_OK; 1909 if (address == NULL 1910 || !BreakpointManager::CanAccessAddress(address, false)) { 1911 result = B_BAD_ADDRESS; 1912 } 1913 1914 // set the breakpoint 1915 if (result == B_OK) 1916 result = breakpointManager->InstallBreakpoint(address); 1917 1918 if (result == B_OK) 1919 update_threads_breakpoints_flag(); 1920 1921 // prepare the reply 1922 reply.set_breakpoint.error = result; 1923 replySize = sizeof(reply.set_breakpoint); 1924 sendReply = true; 1925 1926 break; 1927 } 1928 1929 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: 1930 { 1931 // get the parameters 1932 void *address = message.clear_breakpoint.address; 1933 1934 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CLEAR_BREAKPOINT: " 1935 "address: %p\n", nubThread->id, address)); 1936 1937 // check the address 1938 status_t result = B_OK; 1939 if (address == NULL 1940 || !BreakpointManager::CanAccessAddress(address, false)) { 1941 result = B_BAD_ADDRESS; 1942 } 1943 1944 // clear the breakpoint 1945 if (result == B_OK) 1946 result = breakpointManager->UninstallBreakpoint(address); 1947 1948 if (result == B_OK) 1949 update_threads_breakpoints_flag(); 1950 1951 break; 1952 } 1953 1954 case B_DEBUG_MESSAGE_SET_WATCHPOINT: 1955 { 1956 // get the parameters 1957 replyPort = message.set_watchpoint.reply_port; 1958 void *address = message.set_watchpoint.address; 1959 uint32 type = message.set_watchpoint.type; 1960 int32 length = message.set_watchpoint.length; 1961 1962 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_WATCHPOINT: " 1963 "address: %p, type: %lu, length: %ld\n", nubThread->id, 1964 address, type, length)); 1965 1966 // check the address and size 1967 status_t result = B_OK; 1968 if (address == NULL 1969 || !BreakpointManager::CanAccessAddress(address, false)) { 1970 result = B_BAD_ADDRESS; 1971 } 1972 if (length < 0) 1973 result = B_BAD_VALUE; 1974 1975 // set the watchpoint 1976 if (result == B_OK) { 1977 result = breakpointManager->InstallWatchpoint(address, type, 1978 length); 1979 } 1980 1981 if (result == B_OK) 1982 update_threads_breakpoints_flag(); 1983 1984 // prepare the reply 1985 reply.set_watchpoint.error = result; 1986 replySize = sizeof(reply.set_watchpoint); 1987 sendReply = true; 1988 1989 break; 1990 } 1991 1992 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: 1993 { 1994 // get the parameters 1995 void *address = message.clear_watchpoint.address; 1996 1997 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_CLEAR_WATCHPOINT: " 1998 "address: %p\n", nubThread->id, address)); 1999 2000 // check the address 2001 status_t result = B_OK; 2002 if (address == NULL 2003 || !BreakpointManager::CanAccessAddress(address, false)) { 2004 result = B_BAD_ADDRESS; 2005 } 2006 2007 // clear the watchpoint 2008 if (result == B_OK) 2009 result = breakpointManager->UninstallWatchpoint(address); 2010 2011 if (result == B_OK) 2012 update_threads_breakpoints_flag(); 2013 2014 break; 2015 } 2016 2017 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: 2018 { 2019 // get the parameters 2020 thread_id threadID = message.set_signal_masks.thread; 2021 uint64 ignore = message.set_signal_masks.ignore_mask; 2022 uint64 ignoreOnce = message.set_signal_masks.ignore_once_mask; 2023 uint32 ignoreOp = message.set_signal_masks.ignore_op; 2024 uint32 ignoreOnceOp = message.set_signal_masks.ignore_once_op; 2025 2026 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_MASKS: " 2027 "thread: %ld, ignore: %llx (op: %lu), ignore once: %llx " 2028 "(op: %lu)\n", nubThread->id, threadID, ignore, 2029 ignoreOp, ignoreOnce, ignoreOnceOp)); 2030 2031 // set the masks 2032 cpu_status state = disable_interrupts(); 2033 GRAB_THREAD_LOCK(); 2034 2035 struct thread *thread 2036 = thread_get_thread_struct_locked(threadID); 2037 if (thread 2038 && thread->team == thread_get_current_thread()->team) { 2039 thread_debug_info &threadDebugInfo = thread->debug_info; 2040 // set ignore mask 2041 switch (ignoreOp) { 2042 case B_DEBUG_SIGNAL_MASK_AND: 2043 threadDebugInfo.ignore_signals &= ignore; 2044 break; 2045 case B_DEBUG_SIGNAL_MASK_OR: 2046 threadDebugInfo.ignore_signals |= ignore; 2047 break; 2048 case B_DEBUG_SIGNAL_MASK_SET: 2049 threadDebugInfo.ignore_signals = ignore; 2050 break; 2051 } 2052 2053 // set ignore once mask 2054 switch (ignoreOnceOp) { 2055 case B_DEBUG_SIGNAL_MASK_AND: 2056 threadDebugInfo.ignore_signals_once &= ignoreOnce; 2057 break; 2058 case B_DEBUG_SIGNAL_MASK_OR: 2059 threadDebugInfo.ignore_signals_once |= ignoreOnce; 2060 break; 2061 case B_DEBUG_SIGNAL_MASK_SET: 2062 threadDebugInfo.ignore_signals_once = ignoreOnce; 2063 break; 2064 } 2065 } 2066 2067 RELEASE_THREAD_LOCK(); 2068 restore_interrupts(state); 2069 2070 break; 2071 } 2072 2073 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: 2074 { 2075 // get the parameters 2076 replyPort = message.get_signal_masks.reply_port; 2077 thread_id threadID = message.get_signal_masks.thread; 2078 status_t result = B_OK; 2079 2080 // get the masks 2081 uint64 ignore = 0; 2082 uint64 ignoreOnce = 0; 2083 2084 cpu_status state = disable_interrupts(); 2085 GRAB_THREAD_LOCK(); 2086 2087 struct thread *thread 2088 = thread_get_thread_struct_locked(threadID); 2089 if (thread) { 2090 ignore = thread->debug_info.ignore_signals; 2091 ignoreOnce = thread->debug_info.ignore_signals_once; 2092 } else 2093 result = B_BAD_THREAD_ID; 2094 2095 RELEASE_THREAD_LOCK(); 2096 restore_interrupts(state); 2097 2098 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_MASKS: " 2099 "reply port: %ld, thread: %ld, ignore: %llx, " 2100 "ignore once: %llx, result: %lx\n", nubThread->id, 2101 replyPort, threadID, ignore, ignoreOnce, result)); 2102 2103 // prepare the message 2104 reply.get_signal_masks.error = result; 2105 reply.get_signal_masks.ignore_mask = ignore; 2106 reply.get_signal_masks.ignore_once_mask = ignoreOnce; 2107 replySize = sizeof(reply.get_signal_masks); 2108 sendReply = true; 2109 break; 2110 } 2111 2112 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: 2113 { 2114 // get the parameters 2115 thread_id threadID = message.set_signal_handler.thread; 2116 int signal = message.set_signal_handler.signal; 2117 struct sigaction &handler = message.set_signal_handler.handler; 2118 2119 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER: " 2120 "thread: %ld, signal: %d, handler: %p\n", nubThread->id, 2121 threadID, signal, handler.sa_handler)); 2122 2123 // check, if the thread exists and is ours 2124 cpu_status state = disable_interrupts(); 2125 GRAB_THREAD_LOCK(); 2126 2127 struct thread *thread 2128 = thread_get_thread_struct_locked(threadID); 2129 if (thread 2130 && thread->team != thread_get_current_thread()->team) { 2131 thread = NULL; 2132 } 2133 2134 RELEASE_THREAD_LOCK(); 2135 restore_interrupts(state); 2136 2137 // set the handler 2138 if (thread) 2139 sigaction_etc(threadID, signal, &handler, NULL); 2140 2141 break; 2142 } 2143 2144 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: 2145 { 2146 // get the parameters 2147 replyPort = message.get_signal_handler.reply_port; 2148 thread_id threadID = message.get_signal_handler.thread; 2149 int signal = message.get_signal_handler.signal; 2150 status_t result = B_OK; 2151 2152 // check, if the thread exists and is ours 2153 cpu_status state = disable_interrupts(); 2154 GRAB_THREAD_LOCK(); 2155 2156 struct thread *thread 2157 = thread_get_thread_struct_locked(threadID); 2158 if (thread) { 2159 if (thread->team != thread_get_current_thread()->team) 2160 result = B_BAD_VALUE; 2161 } else 2162 result = B_BAD_THREAD_ID; 2163 2164 RELEASE_THREAD_LOCK(); 2165 restore_interrupts(state); 2166 2167 // get the handler 2168 if (result == B_OK 2169 && sigaction_etc(threadID, signal, NULL, 2170 &reply.get_signal_handler.handler) != 0) { 2171 result = errno; 2172 } 2173 2174 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER: " 2175 "reply port: %ld, thread: %ld, signal: %d, " 2176 "handler: %p\n", nubThread->id, replyPort, 2177 threadID, signal, 2178 reply.get_signal_handler.handler.sa_handler)); 2179 2180 // prepare the message 2181 reply.get_signal_handler.error = result; 2182 replySize = sizeof(reply.get_signal_handler); 2183 sendReply = true; 2184 break; 2185 } 2186 2187 case B_DEBUG_MESSAGE_PREPARE_HANDOVER: 2188 { 2189 TRACE(("nub thread %ld: B_DEBUG_MESSAGE_PREPARE_HANDOVER\n", 2190 nubThread->id)); 2191 2192 struct team *team = nubThread->team; 2193 2194 // Acquire the debugger write lock. As soon as we have it and 2195 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread 2196 // will write anything to the debugger port anymore. 2197 status_t result = acquire_sem_etc(writeLock, 1, 2198 B_KILL_CAN_INTERRUPT, 0); 2199 if (result == B_OK) { 2200 // set the respective team debug flag 2201 cpu_status state = disable_interrupts(); 2202 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2203 2204 atomic_or(&team->debug_info.flags, 2205 B_TEAM_DEBUG_DEBUGGER_HANDOVER); 2206 BreakpointManager* breakpointManager 2207 = team->debug_info.breakpoint_manager; 2208 2209 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2210 restore_interrupts(state); 2211 2212 // remove all installed breakpoints 2213 breakpointManager->RemoveAllBreakpoints(); 2214 2215 release_sem(writeLock); 2216 } else { 2217 // We probably got a SIGKILL. If so, we will terminate when 2218 // reading the next message fails. 2219 } 2220 2221 break; 2222 } 2223 2224 case B_DEBUG_MESSAGE_HANDED_OVER: 2225 { 2226 // notify all threads that the debugger has changed 2227 broadcast_debugged_thread_message(nubThread, 2228 B_DEBUGGED_THREAD_DEBUGGER_CHANGED, NULL, 0); 2229 2230 break; 2231 } 2232 2233 case B_DEBUG_START_PROFILER: 2234 { 2235 // get the parameters 2236 thread_id threadID = message.start_profiler.thread; 2237 replyPort = message.start_profiler.reply_port; 2238 area_id sampleArea = message.start_profiler.sample_area; 2239 int32 stackDepth = message.start_profiler.stack_depth; 2240 bool variableStackDepth 2241 = message.start_profiler.variable_stack_depth; 2242 bigtime_t interval = max_c(message.start_profiler.interval, 2243 B_DEBUG_MIN_PROFILE_INTERVAL); 2244 status_t result = B_OK; 2245 2246 TRACE(("nub thread %ld: B_DEBUG_START_PROFILER: " 2247 "thread: %ld, sample area: %ld\n", nubThread->id, threadID, 2248 sampleArea)); 2249 2250 if (stackDepth < 1) 2251 stackDepth = 1; 2252 else if (stackDepth > B_DEBUG_STACK_TRACE_DEPTH) 2253 stackDepth = B_DEBUG_STACK_TRACE_DEPTH; 2254 2255 // provision for an extra entry per hit (for the number of 2256 // samples), if variable stack depth 2257 if (variableStackDepth) 2258 stackDepth++; 2259 2260 // clone the sample area 2261 area_info areaInfo; 2262 if (result == B_OK) 2263 result = get_area_info(sampleArea, &areaInfo); 2264 2265 area_id clonedSampleArea = -1; 2266 void* samples = NULL; 2267 if (result == B_OK) { 2268 clonedSampleArea = clone_area("profiling samples", &samples, 2269 B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, 2270 sampleArea); 2271 if (clonedSampleArea >= 0) { 2272 // we need the memory locked 2273 result = lock_memory(samples, areaInfo.size, 2274 B_READ_DEVICE); 2275 if (result != B_OK) { 2276 delete_area(clonedSampleArea); 2277 clonedSampleArea = -1; 2278 } 2279 } else 2280 result = clonedSampleArea; 2281 } 2282 2283 // get the thread and set the profile info 2284 int32 imageEvent = nubThread->team->debug_info.image_event; 2285 if (result == B_OK) { 2286 cpu_status state = disable_interrupts(); 2287 GRAB_THREAD_LOCK(); 2288 2289 struct thread *thread 2290 = thread_get_thread_struct_locked(threadID); 2291 if (thread && thread->team == nubThread->team) { 2292 thread_debug_info &threadDebugInfo = thread->debug_info; 2293 if (threadDebugInfo.profile.samples == NULL) { 2294 threadDebugInfo.profile.interval = interval; 2295 threadDebugInfo.profile.sample_area 2296 = clonedSampleArea; 2297 threadDebugInfo.profile.samples = (addr_t*)samples; 2298 threadDebugInfo.profile.max_samples 2299 = areaInfo.size / sizeof(addr_t); 2300 threadDebugInfo.profile.flush_threshold 2301 = threadDebugInfo.profile.max_samples 2302 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 2303 / 100; 2304 threadDebugInfo.profile.sample_count = 0; 2305 threadDebugInfo.profile.dropped_ticks = 0; 2306 threadDebugInfo.profile.stack_depth = stackDepth; 2307 threadDebugInfo.profile.variable_stack_depth 2308 = variableStackDepth; 2309 threadDebugInfo.profile.buffer_full = false; 2310 threadDebugInfo.profile.interval_left = interval; 2311 threadDebugInfo.profile.installed_timer = NULL; 2312 threadDebugInfo.profile.image_event = imageEvent; 2313 threadDebugInfo.profile.last_image_event 2314 = imageEvent; 2315 } else 2316 result = B_BAD_VALUE; 2317 } else 2318 result = B_BAD_THREAD_ID; 2319 2320 RELEASE_THREAD_LOCK(); 2321 restore_interrupts(state); 2322 } 2323 2324 // on error unlock and delete the sample area 2325 if (result != B_OK) { 2326 if (clonedSampleArea >= 0) { 2327 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2328 delete_area(clonedSampleArea); 2329 } 2330 } 2331 2332 // send a reply to the debugger 2333 reply.start_profiler.error = result; 2334 reply.start_profiler.interval = interval; 2335 reply.start_profiler.image_event = imageEvent; 2336 sendReply = true; 2337 replySize = sizeof(reply.start_profiler); 2338 2339 break; 2340 } 2341 2342 case B_DEBUG_STOP_PROFILER: 2343 { 2344 // get the parameters 2345 thread_id threadID = message.stop_profiler.thread; 2346 replyPort = message.stop_profiler.reply_port; 2347 status_t result = B_OK; 2348 2349 TRACE(("nub thread %ld: B_DEBUG_STOP_PROFILER: " 2350 "thread: %ld\n", nubThread->id, threadID)); 2351 2352 area_id sampleArea = -1; 2353 addr_t* samples = NULL; 2354 int32 sampleCount = 0; 2355 int32 stackDepth = 0; 2356 bool variableStackDepth = false; 2357 int32 imageEvent = 0; 2358 int32 droppedTicks = 0; 2359 2360 // get the thread and detach the profile info 2361 cpu_status state = disable_interrupts(); 2362 GRAB_THREAD_LOCK(); 2363 2364 struct thread *thread 2365 = thread_get_thread_struct_locked(threadID); 2366 if (thread && thread->team == nubThread->team) { 2367 thread_debug_info &threadDebugInfo = thread->debug_info; 2368 if (threadDebugInfo.profile.samples != NULL) { 2369 sampleArea = threadDebugInfo.profile.sample_area; 2370 samples = threadDebugInfo.profile.samples; 2371 sampleCount = threadDebugInfo.profile.sample_count; 2372 droppedTicks = threadDebugInfo.profile.dropped_ticks; 2373 stackDepth = threadDebugInfo.profile.stack_depth; 2374 variableStackDepth 2375 = threadDebugInfo.profile.variable_stack_depth; 2376 imageEvent = threadDebugInfo.profile.image_event; 2377 threadDebugInfo.profile.sample_area = -1; 2378 threadDebugInfo.profile.samples = NULL; 2379 threadDebugInfo.profile.buffer_full = false; 2380 threadDebugInfo.profile.dropped_ticks = 0; 2381 } else 2382 result = B_BAD_VALUE; 2383 } else 2384 result = B_BAD_THREAD_ID; 2385 2386 RELEASE_THREAD_LOCK(); 2387 restore_interrupts(state); 2388 2389 // prepare the reply 2390 if (result == B_OK) { 2391 reply.profiler_update.origin.thread = threadID; 2392 reply.profiler_update.image_event = imageEvent; 2393 reply.profiler_update.stack_depth = stackDepth; 2394 reply.profiler_update.variable_stack_depth 2395 = variableStackDepth; 2396 reply.profiler_update.sample_count = sampleCount; 2397 reply.profiler_update.dropped_ticks = droppedTicks; 2398 reply.profiler_update.stopped = true; 2399 } else 2400 reply.profiler_update.origin.thread = result; 2401 2402 replySize = sizeof(debug_profiler_update); 2403 sendReply = true; 2404 2405 if (sampleArea >= 0) { 2406 area_info areaInfo; 2407 if (get_area_info(sampleArea, &areaInfo) == B_OK) { 2408 unlock_memory(samples, areaInfo.size, B_READ_DEVICE); 2409 delete_area(sampleArea); 2410 } 2411 } 2412 } 2413 } 2414 2415 // send the reply, if necessary 2416 if (sendReply) { 2417 status_t error = kill_interruptable_write_port(replyPort, command, 2418 &reply, replySize); 2419 2420 if (error != B_OK) { 2421 // The debugger port is either not longer existing or we got 2422 // interrupted by a kill signal. In either case we terminate. 2423 TRACE(("nub thread %ld: failed to send reply to port %ld: %s\n", 2424 nubThread->id, replyPort, strerror(error))); 2425 2426 nub_thread_cleanup(nubThread); 2427 return error; 2428 } 2429 } 2430 } 2431 } 2432 2433 2434 /** \brief Helper function for install_team_debugger(), that sets up the team 2435 and thread debug infos. 2436 2437 Interrupts must be disabled and the team debug info lock of the team to be 2438 debugged must be held. The function will release the lock, but leave 2439 interrupts disabled. 2440 2441 The function also clears the arch specific team and thread debug infos 2442 (including among other things formerly set break/watchpoints). 2443 */ 2444 static void 2445 install_team_debugger_init_debug_infos(struct team *team, team_id debuggerTeam, 2446 port_id debuggerPort, port_id nubPort, thread_id nubThread, 2447 sem_id debuggerPortWriteLock, thread_id causingThread) 2448 { 2449 atomic_set(&team->debug_info.flags, 2450 B_TEAM_DEBUG_DEFAULT_FLAGS | B_TEAM_DEBUG_DEBUGGER_INSTALLED); 2451 team->debug_info.nub_port = nubPort; 2452 team->debug_info.nub_thread = nubThread; 2453 team->debug_info.debugger_team = debuggerTeam; 2454 team->debug_info.debugger_port = debuggerPort; 2455 team->debug_info.debugger_write_lock = debuggerPortWriteLock; 2456 team->debug_info.causing_thread = causingThread; 2457 2458 arch_clear_team_debug_info(&team->debug_info.arch_info); 2459 2460 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2461 2462 // set the user debug flags and signal masks of all threads to the default 2463 GRAB_THREAD_LOCK(); 2464 2465 for (struct thread *thread = team->thread_list; 2466 thread; 2467 thread = thread->team_next) { 2468 if (thread->id == nubThread) { 2469 atomic_set(&thread->debug_info.flags, B_THREAD_DEBUG_NUB_THREAD); 2470 } else { 2471 int32 flags = thread->debug_info.flags 2472 & ~B_THREAD_DEBUG_USER_FLAG_MASK; 2473 atomic_set(&thread->debug_info.flags, 2474 flags | B_THREAD_DEBUG_DEFAULT_FLAGS); 2475 thread->debug_info.ignore_signals = 0; 2476 thread->debug_info.ignore_signals_once = 0; 2477 2478 arch_clear_thread_debug_info(&thread->debug_info.arch_info); 2479 } 2480 } 2481 2482 RELEASE_THREAD_LOCK(); 2483 2484 // update the thread::flags fields 2485 update_threads_debugger_installed_flag(team); 2486 } 2487 2488 2489 static port_id 2490 install_team_debugger(team_id teamID, port_id debuggerPort, 2491 thread_id causingThread, bool useDefault, bool dontReplace) 2492 { 2493 TRACE(("install_team_debugger(team: %ld, port: %ld, default: %d, " 2494 "dontReplace: %d)\n", teamID, debuggerPort, useDefault, dontReplace)); 2495 2496 if (useDefault) 2497 debuggerPort = atomic_get(&sDefaultDebuggerPort); 2498 2499 // get the debugger team 2500 port_info debuggerPortInfo; 2501 status_t error = get_port_info(debuggerPort, &debuggerPortInfo); 2502 if (error != B_OK) { 2503 TRACE(("install_team_debugger(): Failed to get debugger port info: " 2504 "%lx\n", error)); 2505 return error; 2506 } 2507 team_id debuggerTeam = debuggerPortInfo.team; 2508 2509 // Check the debugger team: It must neither be the kernel team nor the 2510 // debugged team. 2511 if (debuggerTeam == team_get_kernel_team_id() || debuggerTeam == teamID) { 2512 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. " 2513 "debugger: %ld, debugged: %ld\n", debuggerTeam, teamID)); 2514 return B_NOT_ALLOWED; 2515 } 2516 2517 // get the team 2518 struct team* team; 2519 ConditionVariable debugChangeCondition; 2520 error = prepare_debugger_change(teamID, debugChangeCondition, team); 2521 if (error != B_OK) 2522 return error; 2523 2524 // get the real team ID 2525 teamID = team->id; 2526 2527 // check, if a debugger is already installed 2528 2529 bool done = false; 2530 port_id result = B_ERROR; 2531 bool handOver = false; 2532 bool releaseDebugInfoLock = true; 2533 port_id oldDebuggerPort = -1; 2534 port_id nubPort = -1; 2535 2536 cpu_status state = disable_interrupts(); 2537 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2538 2539 int32 teamDebugFlags = team->debug_info.flags; 2540 2541 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2542 // There's already a debugger installed. 2543 if (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_HANDOVER) { 2544 if (dontReplace) { 2545 // We're fine with already having a debugger. 2546 error = B_OK; 2547 done = true; 2548 result = team->debug_info.nub_port; 2549 } else { 2550 // a handover to another debugger is requested 2551 // Set the handing-over flag -- we'll clear both flags after 2552 // having sent the handed-over message to the new debugger. 2553 atomic_or(&team->debug_info.flags, 2554 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER); 2555 2556 oldDebuggerPort = team->debug_info.debugger_port; 2557 result = nubPort = team->debug_info.nub_port; 2558 if (causingThread < 0) 2559 causingThread = team->debug_info.causing_thread; 2560 2561 // set the new debugger 2562 install_team_debugger_init_debug_infos(team, debuggerTeam, 2563 debuggerPort, nubPort, team->debug_info.nub_thread, 2564 team->debug_info.debugger_write_lock, causingThread); 2565 2566 releaseDebugInfoLock = false; 2567 handOver = true; 2568 done = true; 2569 } 2570 } else { 2571 // there's already a debugger installed 2572 error = (dontReplace ? B_OK : B_BAD_VALUE); 2573 done = true; 2574 result = team->debug_info.nub_port; 2575 } 2576 } else if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED) != 0 2577 && useDefault) { 2578 // No debugger yet, disable_debugger() had been invoked, and we 2579 // would install the default debugger. Just fail. 2580 error = B_BAD_VALUE; 2581 } 2582 2583 // in case of a handover the lock has already been released 2584 if (releaseDebugInfoLock) 2585 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2586 2587 restore_interrupts(state); 2588 2589 if (handOver && set_port_owner(nubPort, debuggerTeam) != B_OK) { 2590 // The old debugger must just have died. Just proceed as 2591 // if there was no debugger installed. We may still be too 2592 // early, in which case we'll fail, but this race condition 2593 // should be unbelievably rare and relatively harmless. 2594 handOver = false; 2595 done = false; 2596 } 2597 2598 if (handOver) { 2599 // prepare the handed-over message 2600 debug_handed_over notification; 2601 notification.origin.thread = -1; 2602 notification.origin.team = teamID; 2603 notification.origin.nub_port = nubPort; 2604 notification.debugger = debuggerTeam; 2605 notification.debugger_port = debuggerPort; 2606 notification.causing_thread = causingThread; 2607 2608 // notify the new debugger 2609 error = write_port_etc(debuggerPort, 2610 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2611 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2612 if (error != B_OK) { 2613 dprintf("install_team_debugger(): Failed to send message to new " 2614 "debugger: %s\n", strerror(error)); 2615 } 2616 2617 // clear the handed-over and handing-over flags 2618 state = disable_interrupts(); 2619 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2620 2621 atomic_and(&team->debug_info.flags, 2622 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER 2623 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER)); 2624 2625 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2626 restore_interrupts(state); 2627 2628 finish_debugger_change(team); 2629 2630 // notify the nub thread 2631 kill_interruptable_write_port(nubPort, B_DEBUG_MESSAGE_HANDED_OVER, 2632 NULL, 0); 2633 2634 // notify the old debugger 2635 error = write_port_etc(oldDebuggerPort, 2636 B_DEBUGGER_MESSAGE_HANDED_OVER, ¬ification, 2637 sizeof(notification), B_RELATIVE_TIMEOUT, 0); 2638 if (error != B_OK) { 2639 TRACE(("install_team_debugger(): Failed to send message to old " 2640 "debugger: %s\n", strerror(error))); 2641 } 2642 2643 TRACE(("install_team_debugger() done: handed over to debugger: team: " 2644 "%ld, port: %ld\n", debuggerTeam, debuggerPort)); 2645 2646 return result; 2647 } 2648 2649 if (done || error != B_OK) { 2650 TRACE(("install_team_debugger() done1: %ld\n", 2651 (error == B_OK ? result : error))); 2652 finish_debugger_change(team); 2653 return (error == B_OK ? result : error); 2654 } 2655 2656 // create the debugger write lock semaphore 2657 char nameBuffer[B_OS_NAME_LENGTH]; 2658 snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debugger port write", 2659 teamID); 2660 sem_id debuggerWriteLock = create_sem(1, nameBuffer); 2661 if (debuggerWriteLock < 0) 2662 error = debuggerWriteLock; 2663 2664 // create the nub port 2665 snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug", teamID); 2666 if (error == B_OK) { 2667 nubPort = create_port(1, nameBuffer); 2668 if (nubPort < 0) 2669 error = nubPort; 2670 else 2671 result = nubPort; 2672 } 2673 2674 // make the debugger team the port owner; thus we know, if the debugger is 2675 // gone and can cleanup 2676 if (error == B_OK) 2677 error = set_port_owner(nubPort, debuggerTeam); 2678 2679 // create the breakpoint manager 2680 BreakpointManager* breakpointManager = NULL; 2681 if (error == B_OK) { 2682 breakpointManager = new(std::nothrow) BreakpointManager; 2683 if (breakpointManager != NULL) 2684 error = breakpointManager->Init(); 2685 else 2686 error = B_NO_MEMORY; 2687 } 2688 2689 // spawn the nub thread 2690 thread_id nubThread = -1; 2691 if (error == B_OK) { 2692 snprintf(nameBuffer, sizeof(nameBuffer), "team %ld debug task", teamID); 2693 nubThread = spawn_kernel_thread_etc(debug_nub_thread, nameBuffer, 2694 B_NORMAL_PRIORITY, NULL, teamID, -1); 2695 if (nubThread < 0) 2696 error = nubThread; 2697 } 2698 2699 // now adjust the debug info accordingly 2700 if (error == B_OK) { 2701 state = disable_interrupts(); 2702 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2703 2704 team->debug_info.breakpoint_manager = breakpointManager; 2705 install_team_debugger_init_debug_infos(team, debuggerTeam, 2706 debuggerPort, nubPort, nubThread, debuggerWriteLock, 2707 causingThread); 2708 2709 restore_interrupts(state); 2710 } 2711 2712 finish_debugger_change(team); 2713 2714 // if everything went fine, resume the nub thread, otherwise clean up 2715 if (error == B_OK) { 2716 resume_thread(nubThread); 2717 } else { 2718 // delete port and terminate thread 2719 if (nubPort >= 0) { 2720 set_port_owner(nubPort, B_CURRENT_TEAM); 2721 delete_port(nubPort); 2722 } 2723 if (nubThread >= 0) { 2724 int32 result; 2725 wait_for_thread(nubThread, &result); 2726 } 2727 2728 delete breakpointManager; 2729 } 2730 2731 TRACE(("install_team_debugger() done2: %ld\n", 2732 (error == B_OK ? result : error))); 2733 return (error == B_OK ? result : error); 2734 } 2735 2736 2737 static status_t 2738 ensure_debugger_installed() 2739 { 2740 port_id port = install_team_debugger(B_CURRENT_TEAM, -1, 2741 thread_get_current_thread_id(), true, true); 2742 return port >= 0 ? B_OK : port; 2743 } 2744 2745 2746 // #pragma mark - 2747 2748 2749 void 2750 _user_debugger(const char *userMessage) 2751 { 2752 // install the default debugger, if there is none yet 2753 status_t error = ensure_debugger_installed(); 2754 if (error != B_OK) { 2755 // time to commit suicide 2756 char buffer[128]; 2757 ssize_t length = user_strlcpy(buffer, userMessage, sizeof(buffer)); 2758 if (length >= 0) { 2759 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2760 "`%s'\n", buffer); 2761 } else { 2762 dprintf("_user_debugger(): Failed to install debugger. Message is: " 2763 "%p (%s)\n", userMessage, strerror(length)); 2764 } 2765 _user_exit_team(1); 2766 } 2767 2768 // prepare the message 2769 debug_debugger_call message; 2770 message.message = (void*)userMessage; 2771 2772 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL, &message, 2773 sizeof(message), true); 2774 } 2775 2776 2777 int 2778 _user_disable_debugger(int state) 2779 { 2780 struct team *team = thread_get_current_thread()->team; 2781 2782 TRACE(("_user_disable_debugger(%d): team: %ld\n", state, team->id)); 2783 2784 cpu_status cpuState = disable_interrupts(); 2785 GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2786 2787 int32 oldFlags; 2788 if (state) { 2789 oldFlags = atomic_or(&team->debug_info.flags, 2790 B_TEAM_DEBUG_DEBUGGER_DISABLED); 2791 } else { 2792 oldFlags = atomic_and(&team->debug_info.flags, 2793 ~B_TEAM_DEBUG_DEBUGGER_DISABLED); 2794 } 2795 2796 RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info); 2797 restore_interrupts(cpuState); 2798 2799 // TODO: Check, if the return value is really the old state. 2800 return !(oldFlags & B_TEAM_DEBUG_DEBUGGER_DISABLED); 2801 } 2802 2803 2804 status_t 2805 _user_install_default_debugger(port_id debuggerPort) 2806 { 2807 // if supplied, check whether the port is a valid port 2808 if (debuggerPort >= 0) { 2809 port_info portInfo; 2810 status_t error = get_port_info(debuggerPort, &portInfo); 2811 if (error != B_OK) 2812 return error; 2813 2814 // the debugger team must not be the kernel team 2815 if (portInfo.team == team_get_kernel_team_id()) 2816 return B_NOT_ALLOWED; 2817 } 2818 2819 atomic_set(&sDefaultDebuggerPort, debuggerPort); 2820 2821 return B_OK; 2822 } 2823 2824 2825 port_id 2826 _user_install_team_debugger(team_id teamID, port_id debuggerPort) 2827 { 2828 return install_team_debugger(teamID, debuggerPort, -1, false, false); 2829 } 2830 2831 2832 status_t 2833 _user_remove_team_debugger(team_id teamID) 2834 { 2835 struct team* team; 2836 ConditionVariable debugChangeCondition; 2837 status_t error = prepare_debugger_change(teamID, debugChangeCondition, 2838 team); 2839 if (error != B_OK) 2840 return error; 2841 2842 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2843 2844 thread_id nubThread = -1; 2845 port_id nubPort = -1; 2846 2847 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 2848 // there's a debugger installed 2849 nubThread = team->debug_info.nub_thread; 2850 nubPort = team->debug_info.nub_port; 2851 } else { 2852 // no debugger installed 2853 error = B_BAD_VALUE; 2854 } 2855 2856 debugInfoLocker.Unlock(); 2857 2858 // Delete the nub port -- this will cause the nub thread to terminate and 2859 // remove the debugger. 2860 if (nubPort >= 0) 2861 delete_port(nubPort); 2862 2863 finish_debugger_change(team); 2864 2865 // wait for the nub thread 2866 if (nubThread >= 0) 2867 wait_for_thread(nubThread, NULL); 2868 2869 return error; 2870 } 2871 2872 2873 status_t 2874 _user_debug_thread(thread_id threadID) 2875 { 2876 TRACE(("[%ld] _user_debug_thread(%ld)\n", find_thread(NULL), threadID)); 2877 2878 // tell the thread to stop as soon as possible 2879 status_t error = B_OK; 2880 cpu_status state = disable_interrupts(); 2881 GRAB_THREAD_LOCK(); 2882 2883 struct thread *thread = thread_get_thread_struct_locked(threadID); 2884 if (!thread) { 2885 // thread doesn't exist any longer 2886 error = B_BAD_THREAD_ID; 2887 } else if (thread->team == team_get_kernel_team()) { 2888 // we can't debug the kernel team 2889 error = B_NOT_ALLOWED; 2890 } else if (thread->debug_info.flags & B_THREAD_DEBUG_DYING) { 2891 // the thread is already dying -- too late to debug it 2892 error = B_BAD_THREAD_ID; 2893 } else if (thread->debug_info.flags & B_THREAD_DEBUG_NUB_THREAD) { 2894 // don't debug the nub thread 2895 error = B_NOT_ALLOWED; 2896 } else if (!(thread->debug_info.flags & B_THREAD_DEBUG_STOPPED)) { 2897 // set the flag that tells the thread to stop as soon as possible 2898 atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP); 2899 2900 update_thread_user_debug_flag(thread); 2901 2902 switch (thread->state) { 2903 case B_THREAD_SUSPENDED: 2904 // thread suspended: wake it up 2905 scheduler_enqueue_in_run_queue(thread); 2906 break; 2907 2908 default: 2909 // thread may be waiting: interrupt it 2910 thread_interrupt(thread, false); 2911 // TODO: If the thread is already in the kernel and e.g. 2912 // about to acquire a semaphore (before 2913 // thread_prepare_to_block()), we won't interrupt it. 2914 // Maybe we should rather send a signal (SIGTRAP). 2915 scheduler_reschedule_if_necessary_locked(); 2916 break; 2917 } 2918 } 2919 2920 RELEASE_THREAD_LOCK(); 2921 restore_interrupts(state); 2922 2923 return error; 2924 } 2925 2926 2927 void 2928 _user_wait_for_debugger(void) 2929 { 2930 debug_thread_debugged message; 2931 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED, &message, 2932 sizeof(message), false); 2933 } 2934 2935 2936 status_t 2937 _user_set_debugger_breakpoint(void *address, uint32 type, int32 length, 2938 bool watchpoint) 2939 { 2940 // check the address and size 2941 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2942 return B_BAD_ADDRESS; 2943 if (watchpoint && length < 0) 2944 return B_BAD_VALUE; 2945 2946 // check whether a debugger is installed already 2947 team_debug_info teamDebugInfo; 2948 get_team_debug_info(teamDebugInfo); 2949 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2950 return B_BAD_VALUE; 2951 2952 // We can't help it, here's a small but relatively harmless race condition, 2953 // since a debugger could be installed in the meantime. The worst case is 2954 // that we install a break/watchpoint the debugger doesn't know about. 2955 2956 // set the break/watchpoint 2957 status_t result; 2958 if (watchpoint) 2959 result = arch_set_watchpoint(address, type, length); 2960 else 2961 result = arch_set_breakpoint(address); 2962 2963 if (result == B_OK) 2964 update_threads_breakpoints_flag(); 2965 2966 return result; 2967 } 2968 2969 2970 status_t 2971 _user_clear_debugger_breakpoint(void *address, bool watchpoint) 2972 { 2973 // check the address 2974 if (address == NULL || !BreakpointManager::CanAccessAddress(address, false)) 2975 return B_BAD_ADDRESS; 2976 2977 // check whether a debugger is installed already 2978 team_debug_info teamDebugInfo; 2979 get_team_debug_info(teamDebugInfo); 2980 if (teamDebugInfo.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 2981 return B_BAD_VALUE; 2982 2983 // We can't help it, here's a small but relatively harmless race condition, 2984 // since a debugger could be installed in the meantime. The worst case is 2985 // that we clear a break/watchpoint the debugger has just installed. 2986 2987 // clear the break/watchpoint 2988 status_t result; 2989 if (watchpoint) 2990 result = arch_clear_watchpoint(address); 2991 else 2992 result = arch_clear_breakpoint(address); 2993 2994 if (result == B_OK) 2995 update_threads_breakpoints_flag(); 2996 2997 return result; 2998 } 2999