1 /* 2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <system_profiler.h> 8 9 #include <AutoDeleter.h> 10 #include <Referenceable.h> 11 12 #include <util/AutoLock.h> 13 #include <util/ThreadAutoLock.h> 14 15 #include <system_profiler_defs.h> 16 17 #include <cpu.h> 18 #include <kernel.h> 19 #include <kimage.h> 20 #include <kscheduler.h> 21 #include <listeners.h> 22 #include <Notifications.h> 23 #include <sem.h> 24 #include <team.h> 25 #include <thread.h> 26 #include <user_debugger.h> 27 #include <vm/vm.h> 28 29 #include <arch/debug.h> 30 31 #include "IOSchedulerRoster.h" 32 33 34 // This is the kernel-side implementation of the system profiling support. 35 // A userland team can register as system profiler, providing an area as buffer 36 // for events. Those events are team, thread, and image changes (added/removed), 37 // periodic sampling of the return address stack for each CPU, as well as 38 // scheduling and I/O scheduling events. 39 40 41 class SystemProfiler; 42 43 44 // minimum/maximum size of the table used for wait object caching 45 #define MIN_WAIT_OBJECT_COUNT 128 46 #define MAX_WAIT_OBJECT_COUNT 1024 47 48 49 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER; 50 static SystemProfiler* sProfiler = NULL; 51 static struct system_profiler_parameters* sRecordedParameters = NULL; 52 53 54 class SystemProfiler : public BReferenceable, private NotificationListener, 55 private SchedulerListener, private WaitObjectListener { 56 public: 57 SystemProfiler(team_id team, 58 const area_info& userAreaInfo, 59 const system_profiler_parameters& 60 parameters); 61 ~SystemProfiler(); 62 63 team_id TeamID() const { return fTeam; } 64 65 status_t Init(); 66 status_t NextBuffer(size_t bytesRead, 67 uint64* _droppedEvents); 68 69 private: 70 virtual void EventOccurred(NotificationService& service, 71 const KMessage* event); 72 73 virtual void ThreadEnqueuedInRunQueue(Thread* thread); 74 virtual void ThreadRemovedFromRunQueue(Thread* thread); 75 virtual void ThreadScheduled(Thread* oldThread, 76 Thread* newThread); 77 78 virtual void SemaphoreCreated(sem_id id, 79 const char* name); 80 virtual void ConditionVariableInitialized( 81 ConditionVariable* variable); 82 virtual void MutexInitialized(mutex* lock); 83 virtual void RWLockInitialized(rw_lock* lock); 84 85 bool _TeamAdded(Team* team); 86 bool _TeamRemoved(Team* team); 87 bool _TeamExec(Team* team); 88 89 bool _ThreadAdded(Thread* thread); 90 bool _ThreadRemoved(Thread* thread); 91 92 bool _ImageAdded(struct image* image); 93 bool _ImageRemoved(struct image* image); 94 95 bool _IOSchedulerAdded(IOScheduler* scheduler); 96 bool _IOSchedulerRemoved(IOScheduler* scheduler); 97 bool _IORequestScheduled(IOScheduler* scheduler, 98 IORequest* request); 99 bool _IORequestFinished(IOScheduler* scheduler, 100 IORequest* request); 101 bool _IOOperationStarted(IOScheduler* scheduler, 102 IORequest* request, IOOperation* operation); 103 bool _IOOperationFinished(IOScheduler* scheduler, 104 IORequest* request, IOOperation* operation); 105 106 void _WaitObjectCreated(addr_t object, uint32 type); 107 void _WaitObjectUsed(addr_t object, uint32 type); 108 109 inline void _MaybeNotifyProfilerThreadLocked(); 110 inline void _MaybeNotifyProfilerThread(); 111 112 static bool _InitialImageIterator(struct image* image, 113 void* cookie); 114 115 void* _AllocateBuffer(size_t size, int event, int cpu, 116 int count); 117 118 static void _InitTimers(void* cookie, int cpu); 119 static void _UninitTimers(void* cookie, int cpu); 120 void _ScheduleTimer(int cpu); 121 122 void _DoSample(); 123 124 static int32 _ProfilingEvent(struct timer* timer); 125 126 private: 127 struct CPUProfileData { 128 struct timer timer; 129 bigtime_t timerEnd; 130 bool timerScheduled; 131 addr_t buffer[B_DEBUG_STACK_TRACE_DEPTH]; 132 }; 133 134 struct WaitObjectKey { 135 addr_t object; 136 uint32 type; 137 }; 138 139 struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>, 140 WaitObjectKey { 141 struct WaitObject* hash_link; 142 }; 143 144 struct WaitObjectTableDefinition { 145 typedef WaitObjectKey KeyType; 146 typedef WaitObject ValueType; 147 148 size_t HashKey(const WaitObjectKey& key) const 149 { 150 return (size_t)key.object ^ (size_t)key.type; 151 } 152 153 size_t Hash(const WaitObject* value) const 154 { 155 return HashKey(*value); 156 } 157 158 bool Compare(const WaitObjectKey& key, 159 const WaitObject* value) const 160 { 161 return value->type == key.type 162 && value->object == key.object; 163 } 164 165 WaitObject*& GetLink(WaitObject* value) const 166 { 167 return value->hash_link; 168 } 169 }; 170 171 typedef DoublyLinkedList<WaitObject> WaitObjectList; 172 typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable; 173 174 private: 175 spinlock fLock; 176 team_id fTeam; 177 area_id fUserArea; 178 area_id fKernelArea; 179 size_t fAreaSize; 180 uint32 fFlags; 181 uint32 fStackDepth; 182 bigtime_t fInterval; 183 bool fProfileKernel; 184 system_profiler_buffer_header* fHeader; 185 uint8* fBufferBase; 186 size_t fBufferCapacity; 187 size_t fBufferStart; 188 size_t fBufferSize; 189 uint64 fDroppedEvents; 190 int64 fLastTeamAddedSerialNumber; 191 int64 fLastThreadAddedSerialNumber; 192 bool fTeamNotificationsRequested; 193 bool fTeamNotificationsEnabled; 194 bool fThreadNotificationsRequested; 195 bool fThreadNotificationsEnabled; 196 bool fImageNotificationsRequested; 197 bool fImageNotificationsEnabled; 198 bool fIONotificationsRequested; 199 bool fIONotificationsEnabled; 200 bool fSchedulerNotificationsRequested; 201 bool fWaitObjectNotificationsRequested; 202 Thread* volatile fWaitingProfilerThread; 203 bool fProfilingActive; 204 bool fReentered[SMP_MAX_CPUS]; 205 CPUProfileData fCPUData[SMP_MAX_CPUS]; 206 WaitObject* fWaitObjectBuffer; 207 int32 fWaitObjectCount; 208 WaitObjectList fUsedWaitObjects; 209 WaitObjectList fFreeWaitObjects; 210 WaitObjectTable fWaitObjectTable; 211 }; 212 213 214 /*! Notifies the profiler thread when the profiling buffer is full enough. 215 The caller must hold fLock. 216 */ 217 inline void 218 SystemProfiler::_MaybeNotifyProfilerThreadLocked() 219 { 220 // If the buffer is full enough, notify the profiler. 221 if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) { 222 int cpu = smp_get_current_cpu(); 223 fReentered[cpu] = true; 224 225 Thread* profilerThread = fWaitingProfilerThread; 226 fWaitingProfilerThread = NULL; 227 228 SpinLocker _(profilerThread->scheduler_lock); 229 thread_unblock_locked(profilerThread, B_OK); 230 231 fReentered[cpu] = false; 232 } 233 } 234 235 236 inline void 237 SystemProfiler::_MaybeNotifyProfilerThread() 238 { 239 if (fWaitingProfilerThread == NULL) 240 return; 241 242 InterruptsSpinLocker locker(fLock); 243 244 _MaybeNotifyProfilerThreadLocked(); 245 } 246 247 248 // #pragma mark - SystemProfiler public 249 250 251 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo, 252 const system_profiler_parameters& parameters) 253 : 254 fTeam(team), 255 fUserArea(userAreaInfo.area), 256 fKernelArea(-1), 257 fAreaSize(userAreaInfo.size), 258 fFlags(parameters.flags), 259 fStackDepth(parameters.stack_depth), 260 fInterval(parameters.interval), 261 fProfileKernel(parameters.profile_kernel), 262 fHeader(NULL), 263 fBufferBase(NULL), 264 fBufferCapacity(0), 265 fBufferStart(0), 266 fBufferSize(0), 267 fDroppedEvents(0), 268 fLastTeamAddedSerialNumber(0), 269 fLastThreadAddedSerialNumber(0), 270 fTeamNotificationsRequested(false), 271 fTeamNotificationsEnabled(false), 272 fThreadNotificationsRequested(false), 273 fThreadNotificationsEnabled(false), 274 fImageNotificationsRequested(false), 275 fImageNotificationsEnabled(false), 276 fIONotificationsRequested(false), 277 fIONotificationsEnabled(false), 278 fSchedulerNotificationsRequested(false), 279 fWaitObjectNotificationsRequested(false), 280 fWaitingProfilerThread(NULL), 281 fWaitObjectBuffer(NULL), 282 fWaitObjectCount(0), 283 fUsedWaitObjects(), 284 fFreeWaitObjects(), 285 fWaitObjectTable() 286 { 287 B_INITIALIZE_SPINLOCK(&fLock); 288 289 memset(fReentered, 0, sizeof(fReentered)); 290 291 // compute the number wait objects we want to cache 292 if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) { 293 fWaitObjectCount = parameters.locking_lookup_size 294 / (sizeof(WaitObject) + (sizeof(void*) * 3 / 2)); 295 if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT) 296 fWaitObjectCount = MIN_WAIT_OBJECT_COUNT; 297 if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT) 298 fWaitObjectCount = MAX_WAIT_OBJECT_COUNT; 299 } 300 } 301 302 303 SystemProfiler::~SystemProfiler() 304 { 305 // Wake up the user thread, if it is waiting, and mark profiling 306 // inactive. 307 InterruptsSpinLocker locker(fLock); 308 if (fWaitingProfilerThread != NULL) { 309 thread_unblock(fWaitingProfilerThread, B_OK); 310 fWaitingProfilerThread = NULL; 311 } 312 fProfilingActive = false; 313 locker.Unlock(); 314 315 // stop scheduler listening 316 if (fSchedulerNotificationsRequested) 317 scheduler_remove_listener(this); 318 319 // stop wait object listening 320 if (fWaitObjectNotificationsRequested) { 321 InterruptsSpinLocker locker(gWaitObjectListenerLock); 322 remove_wait_object_listener(this); 323 } 324 325 // deactivate the profiling timers on all CPUs 326 if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) 327 call_all_cpus(_UninitTimers, this); 328 329 // cancel notifications 330 NotificationManager& notificationManager 331 = NotificationManager::Manager(); 332 333 // images 334 if (fImageNotificationsRequested) { 335 fImageNotificationsRequested = false; 336 notificationManager.RemoveListener("images", NULL, *this); 337 } 338 339 // threads 340 if (fThreadNotificationsRequested) { 341 fThreadNotificationsRequested = false; 342 notificationManager.RemoveListener("threads", NULL, *this); 343 } 344 345 // teams 346 if (fTeamNotificationsRequested) { 347 fTeamNotificationsRequested = false; 348 notificationManager.RemoveListener("teams", NULL, *this); 349 } 350 351 // I/O 352 if (fIONotificationsRequested) { 353 fIONotificationsRequested = false; 354 notificationManager.RemoveListener("I/O", NULL, *this); 355 } 356 357 // delete wait object related allocations 358 fWaitObjectTable.Clear(); 359 delete[] fWaitObjectBuffer; 360 361 // unlock the memory and delete the area 362 if (fKernelArea >= 0) { 363 unlock_memory(fHeader, fAreaSize, B_READ_DEVICE); 364 delete_area(fKernelArea); 365 fKernelArea = -1; 366 } 367 } 368 369 370 status_t 371 SystemProfiler::Init() 372 { 373 // clone the user area 374 void* areaBase; 375 fKernelArea = clone_area("profiling samples", &areaBase, 376 B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 377 fUserArea); 378 if (fKernelArea < 0) 379 return fKernelArea; 380 381 // we need the memory locked 382 status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE); 383 if (error != B_OK) { 384 delete_area(fKernelArea); 385 fKernelArea = -1; 386 return error; 387 } 388 389 // the buffer is ready for use 390 fHeader = (system_profiler_buffer_header*)areaBase; 391 fBufferBase = (uint8*)(fHeader + 1); 392 fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase); 393 fHeader->start = 0; 394 fHeader->size = 0; 395 396 // allocate the wait object buffer and init the hash table 397 if (fWaitObjectCount > 0) { 398 fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount]; 399 if (fWaitObjectBuffer == NULL) 400 return B_NO_MEMORY; 401 402 for (int32 i = 0; i < fWaitObjectCount; i++) 403 fFreeWaitObjects.Add(fWaitObjectBuffer + i); 404 405 error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2); 406 if (error != B_OK) 407 return error; 408 } 409 410 // start listening for notifications 411 412 // teams 413 NotificationManager& notificationManager 414 = NotificationManager::Manager(); 415 if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) { 416 error = notificationManager.AddListener("teams", 417 TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this); 418 if (error != B_OK) 419 return error; 420 fTeamNotificationsRequested = true; 421 } 422 423 // threads 424 if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) { 425 error = notificationManager.AddListener("threads", 426 THREAD_ADDED | THREAD_REMOVED, *this); 427 if (error != B_OK) 428 return error; 429 fThreadNotificationsRequested = true; 430 } 431 432 // images 433 if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) { 434 error = notificationManager.AddListener("images", 435 IMAGE_ADDED | IMAGE_REMOVED, *this); 436 if (error != B_OK) 437 return error; 438 fImageNotificationsRequested = true; 439 } 440 441 // I/O events 442 if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) { 443 error = notificationManager.AddListener("I/O", 444 IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED 445 | IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED 446 | IO_SCHEDULER_OPERATION_STARTED 447 | IO_SCHEDULER_OPERATION_FINISHED, 448 *this); 449 if (error != B_OK) 450 return error; 451 fIONotificationsRequested = true; 452 } 453 454 // We need to fill the buffer with the initial state of teams, threads, 455 // and images. 456 457 // teams 458 if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) { 459 InterruptsSpinLocker locker(fLock); 460 461 TeamListIterator iterator; 462 while (Team* team = iterator.Next()) { 463 locker.Unlock(); 464 465 bool added = _TeamAdded(team); 466 467 // release the reference returned by the iterator 468 team->ReleaseReference(); 469 470 if (!added) 471 return B_BUFFER_OVERFLOW; 472 473 locker.Lock(); 474 } 475 476 fTeamNotificationsEnabled = true; 477 } 478 479 // images 480 if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) { 481 if (image_iterate_through_images(&_InitialImageIterator, this) != NULL) 482 return B_BUFFER_OVERFLOW; 483 } 484 485 // threads 486 if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) { 487 InterruptsSpinLocker locker(fLock); 488 489 ThreadListIterator iterator; 490 while (Thread* thread = iterator.Next()) { 491 locker.Unlock(); 492 493 bool added = _ThreadAdded(thread); 494 495 // release the reference returned by the iterator 496 thread->ReleaseReference(); 497 498 if (!added) 499 return B_BUFFER_OVERFLOW; 500 501 locker.Lock(); 502 } 503 504 fThreadNotificationsEnabled = true; 505 } 506 507 fProfilingActive = true; 508 509 // start scheduler and wait object listening 510 if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) { 511 scheduler_add_listener(this); 512 fSchedulerNotificationsRequested = true; 513 514 InterruptsSpinLocker waitObjectLocker(gWaitObjectListenerLock); 515 add_wait_object_listener(this); 516 fWaitObjectNotificationsRequested = true; 517 waitObjectLocker.Unlock(); 518 519 // fake schedule events for the initially running threads 520 int32 cpuCount = smp_get_num_cpus(); 521 for (int32 i = 0; i < cpuCount; i++) { 522 Thread* thread = gCPU[i].running_thread; 523 if (thread != NULL) 524 ThreadScheduled(thread, thread); 525 } 526 } 527 528 // I/O scheduling 529 if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) { 530 IOSchedulerRoster* roster = IOSchedulerRoster::Default(); 531 AutoLocker<IOSchedulerRoster> rosterLocker(roster); 532 533 for (IOSchedulerList::ConstIterator it 534 = roster->SchedulerList().GetIterator(); 535 IOScheduler* scheduler = it.Next();) { 536 _IOSchedulerAdded(scheduler); 537 } 538 539 fIONotificationsEnabled = true; 540 } 541 542 // activate the profiling timers on all CPUs 543 if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) 544 call_all_cpus(_InitTimers, this); 545 546 return B_OK; 547 } 548 549 550 status_t 551 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents) 552 { 553 InterruptsSpinLocker locker(fLock); 554 555 if (fWaitingProfilerThread != NULL || !fProfilingActive 556 || bytesRead > fBufferSize) { 557 return B_BAD_VALUE; 558 } 559 560 fBufferSize -= bytesRead; 561 fBufferStart += bytesRead; 562 if (fBufferStart > fBufferCapacity) 563 fBufferStart -= fBufferCapacity; 564 fHeader->size = fBufferSize; 565 fHeader->start = fBufferStart; 566 567 // already enough data in the buffer to return? 568 if (fBufferSize > fBufferCapacity / 2) 569 return B_OK; 570 571 // Wait until the buffer gets too full or an error or a timeout occurs. 572 while (true) { 573 Thread* thread = thread_get_current_thread(); 574 fWaitingProfilerThread = thread; 575 576 thread_prepare_to_block(thread, B_CAN_INTERRUPT, 577 THREAD_BLOCK_TYPE_OTHER, "system profiler buffer"); 578 579 locker.Unlock(); 580 581 status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000); 582 583 locker.Lock(); 584 585 if (error == B_OK) { 586 // the caller has unset fWaitingProfilerThread for us 587 break; 588 } 589 590 fWaitingProfilerThread = NULL; 591 592 if (error != B_TIMED_OUT) 593 return error; 594 595 // just the timeout -- return, if the buffer is not empty 596 if (fBufferSize > 0) 597 break; 598 } 599 600 if (_droppedEvents != NULL) { 601 *_droppedEvents = fDroppedEvents; 602 fDroppedEvents = 0; 603 } 604 605 return B_OK; 606 } 607 608 609 // #pragma mark - NotificationListener interface 610 611 612 void 613 SystemProfiler::EventOccurred(NotificationService& service, 614 const KMessage* event) 615 { 616 int32 eventCode; 617 if (event->FindInt32("event", &eventCode) != B_OK) 618 return; 619 620 if (strcmp(service.Name(), "teams") == 0) { 621 Team* team = (Team*)event->GetPointer("teamStruct", NULL); 622 if (team == NULL) 623 return; 624 625 switch (eventCode) { 626 case TEAM_ADDED: 627 if (fTeamNotificationsEnabled) 628 _TeamAdded(team); 629 break; 630 631 case TEAM_REMOVED: 632 if (team->id == fTeam) { 633 // The profiling team is gone -- uninstall the profiler! 634 InterruptsSpinLocker locker(sProfilerLock); 635 if (sProfiler != this) 636 return; 637 638 sProfiler = NULL; 639 locker.Unlock(); 640 641 ReleaseReference(); 642 return; 643 } 644 645 // When we're still doing the initial team list scan, we are 646 // also interested in removals that happened to teams we have 647 // already seen. 648 if (fTeamNotificationsEnabled 649 || team->serial_number <= fLastTeamAddedSerialNumber) { 650 _TeamRemoved(team); 651 } 652 break; 653 654 case TEAM_EXEC: 655 if (fTeamNotificationsEnabled) 656 _TeamExec(team); 657 break; 658 } 659 } else if (strcmp(service.Name(), "threads") == 0) { 660 Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL); 661 if (thread == NULL) 662 return; 663 664 switch (eventCode) { 665 case THREAD_ADDED: 666 if (fThreadNotificationsEnabled) 667 _ThreadAdded(thread); 668 break; 669 670 case THREAD_REMOVED: 671 // When we're still doing the initial thread list scan, we are 672 // also interested in removals that happened to threads we have 673 // already seen. 674 if (fThreadNotificationsEnabled 675 || thread->serial_number <= fLastThreadAddedSerialNumber) { 676 _ThreadRemoved(thread); 677 } 678 break; 679 } 680 } else if (strcmp(service.Name(), "images") == 0) { 681 if (!fImageNotificationsEnabled) 682 return; 683 684 struct image* image = (struct image*)event->GetPointer( 685 "imageStruct", NULL); 686 if (image == NULL) 687 return; 688 689 switch (eventCode) { 690 case IMAGE_ADDED: 691 _ImageAdded(image); 692 break; 693 694 case IMAGE_REMOVED: 695 _ImageRemoved(image); 696 break; 697 } 698 } else if (strcmp(service.Name(), "I/O") == 0) { 699 if (!fIONotificationsEnabled) 700 return; 701 702 IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler", 703 NULL); 704 if (scheduler == NULL) 705 return; 706 707 IORequest* request = (IORequest*)event->GetPointer("request", NULL); 708 IOOperation* operation = (IOOperation*)event->GetPointer("operation", 709 NULL); 710 711 switch (eventCode) { 712 case IO_SCHEDULER_ADDED: 713 _IOSchedulerAdded(scheduler); 714 break; 715 716 case IO_SCHEDULER_REMOVED: 717 _IOSchedulerRemoved(scheduler); 718 break; 719 720 case IO_SCHEDULER_REQUEST_SCHEDULED: 721 _IORequestScheduled(scheduler, request); 722 break; 723 724 case IO_SCHEDULER_REQUEST_FINISHED: 725 _IORequestFinished(scheduler, request); 726 break; 727 728 case IO_SCHEDULER_OPERATION_STARTED: 729 _IOOperationStarted(scheduler, request, operation); 730 break; 731 732 case IO_SCHEDULER_OPERATION_FINISHED: 733 _IOOperationFinished(scheduler, request, operation); 734 break; 735 } 736 } 737 738 _MaybeNotifyProfilerThread(); 739 } 740 741 742 // #pragma mark - SchedulerListener interface 743 744 745 void 746 SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread) 747 { 748 int cpu = smp_get_current_cpu(); 749 750 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]); 751 // When re-entering, we already hold the lock. 752 753 system_profiler_thread_enqueued_in_run_queue* event 754 = (system_profiler_thread_enqueued_in_run_queue*) 755 _AllocateBuffer( 756 sizeof(system_profiler_thread_enqueued_in_run_queue), 757 B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0); 758 if (event == NULL) 759 return; 760 761 event->time = system_time_nsecs(); 762 event->thread = thread->id; 763 event->priority = thread->priority; 764 765 fHeader->size = fBufferSize; 766 767 // Unblock the profiler thread, if necessary, but don't unblock the thread, 768 // if it had been waiting on a condition variable, since then we'd likely 769 // deadlock in ConditionVariable::NotifyOne(), as it acquires a static 770 // spinlock. 771 if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE) 772 _MaybeNotifyProfilerThreadLocked(); 773 } 774 775 776 void 777 SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread) 778 { 779 int cpu = smp_get_current_cpu(); 780 781 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]); 782 // When re-entering, we already hold the lock. 783 784 system_profiler_thread_removed_from_run_queue* event 785 = (system_profiler_thread_removed_from_run_queue*) 786 _AllocateBuffer( 787 sizeof(system_profiler_thread_removed_from_run_queue), 788 B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0); 789 if (event == NULL) 790 return; 791 792 event->time = system_time_nsecs(); 793 event->thread = thread->id; 794 795 fHeader->size = fBufferSize; 796 797 // unblock the profiler thread, if necessary 798 _MaybeNotifyProfilerThreadLocked(); 799 } 800 801 802 void 803 SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread) 804 { 805 int cpu = smp_get_current_cpu(); 806 807 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]); 808 // When re-entering, we already hold the lock. 809 810 // If the old thread starts waiting, handle the wait object. 811 if (oldThread->state == B_THREAD_WAITING) 812 _WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type); 813 814 system_profiler_thread_scheduled* event 815 = (system_profiler_thread_scheduled*) 816 _AllocateBuffer(sizeof(system_profiler_thread_scheduled), 817 B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0); 818 if (event == NULL) 819 return; 820 821 event->time = system_time_nsecs(); 822 event->thread = newThread->id; 823 event->previous_thread = oldThread->id; 824 event->previous_thread_state = oldThread->state; 825 event->previous_thread_wait_object_type = oldThread->wait.type; 826 event->previous_thread_wait_object = (addr_t)oldThread->wait.object; 827 828 fHeader->size = fBufferSize; 829 830 // unblock the profiler thread, if necessary 831 _MaybeNotifyProfilerThreadLocked(); 832 } 833 834 835 // #pragma mark - WaitObjectListener interface 836 837 838 void 839 SystemProfiler::SemaphoreCreated(sem_id id, const char* name) 840 { 841 _WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE); 842 } 843 844 845 void 846 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable) 847 { 848 _WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE); 849 } 850 851 852 void 853 SystemProfiler::MutexInitialized(mutex* lock) 854 { 855 _WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX); 856 } 857 858 859 void 860 SystemProfiler::RWLockInitialized(rw_lock* lock) 861 { 862 _WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK); 863 } 864 865 866 // #pragma mark - SystemProfiler private 867 868 869 bool 870 SystemProfiler::_TeamAdded(Team* team) 871 { 872 TeamLocker teamLocker(team); 873 874 size_t nameLen = strlen(team->Name()); 875 size_t argsLen = strlen(team->Args()); 876 877 InterruptsSpinLocker locker(fLock); 878 879 // During the initial scan check whether the team is already gone again. 880 // Later this cannot happen, since the team creator notifies us before 881 // actually starting the team. 882 if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH) 883 return true; 884 885 if (team->serial_number > fLastTeamAddedSerialNumber) 886 fLastTeamAddedSerialNumber = team->serial_number; 887 888 system_profiler_team_added* event = (system_profiler_team_added*) 889 _AllocateBuffer( 890 sizeof(system_profiler_team_added) + nameLen + 1 + argsLen, 891 B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0); 892 if (event == NULL) 893 return false; 894 895 event->team = team->id; 896 strcpy(event->name, team->Name()); 897 event->args_offset = nameLen + 1; 898 strcpy(event->name + nameLen + 1, team->Args()); 899 900 fHeader->size = fBufferSize; 901 902 return true; 903 } 904 905 906 bool 907 SystemProfiler::_TeamRemoved(Team* team) 908 { 909 // TODO: It is possible that we get remove notifications for teams that 910 // had already been removed from the global team list when we did the 911 // initial scan, but were still in the process of dying. ATM it is not 912 // really possible to identify such a case. 913 914 TeamLocker teamLocker(team); 915 InterruptsSpinLocker locker(fLock); 916 917 system_profiler_team_removed* event = (system_profiler_team_removed*) 918 _AllocateBuffer(sizeof(system_profiler_team_removed), 919 B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0); 920 if (event == NULL) 921 return false; 922 923 event->team = team->id; 924 925 fHeader->size = fBufferSize; 926 927 return true; 928 } 929 930 931 bool 932 SystemProfiler::_TeamExec(Team* team) 933 { 934 TeamLocker teamLocker(team); 935 936 size_t argsLen = strlen(team->Args()); 937 938 InterruptsSpinLocker locker(fLock); 939 940 system_profiler_team_exec* event = (system_profiler_team_exec*) 941 _AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen, 942 B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0); 943 if (event == NULL) 944 return false; 945 946 event->team = team->id; 947 strlcpy(event->thread_name, team->main_thread->name, 948 sizeof(event->thread_name)); 949 strcpy(event->args, team->Args()); 950 951 fHeader->size = fBufferSize; 952 953 return true; 954 } 955 956 957 bool 958 SystemProfiler::_ThreadAdded(Thread* thread) 959 { 960 ThreadLocker threadLocker(thread); 961 InterruptsSpinLocker locker(fLock); 962 963 // During the initial scan check whether the team is already gone again. 964 // Later this cannot happen, since the team creator notifies us before 965 // actually starting the thread. 966 if (!fThreadNotificationsEnabled && !thread->IsAlive()) 967 return true; 968 969 if (thread->serial_number > fLastThreadAddedSerialNumber) 970 fLastThreadAddedSerialNumber = thread->serial_number; 971 972 system_profiler_thread_added* event = (system_profiler_thread_added*) 973 _AllocateBuffer(sizeof(system_profiler_thread_added), 974 B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0); 975 if (event == NULL) 976 return false; 977 978 event->team = thread->team->id; 979 event->thread = thread->id; 980 strlcpy(event->name, thread->name, sizeof(event->name)); 981 { 982 SpinLocker timeLocker(thread->time_lock); 983 event->cpu_time = thread->CPUTime(false); 984 } 985 986 fHeader->size = fBufferSize; 987 988 return true; 989 } 990 991 992 bool 993 SystemProfiler::_ThreadRemoved(Thread* thread) 994 { 995 // TODO: It is possible that we get remove notifications for threads that 996 // had already been removed from the global thread list when we did the 997 // initial scan, but were still in the process of dying. ATM it is not 998 // really possible to identify such a case. 999 1000 ThreadLocker threadLocker(thread); 1001 InterruptsSpinLocker locker(fLock); 1002 1003 system_profiler_thread_removed* event 1004 = (system_profiler_thread_removed*) 1005 _AllocateBuffer(sizeof(system_profiler_thread_removed), 1006 B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0); 1007 if (event == NULL) 1008 return false; 1009 1010 event->team = thread->team->id; 1011 event->thread = thread->id; 1012 { 1013 SpinLocker timeLocker(thread->time_lock); 1014 event->cpu_time = thread->CPUTime(false); 1015 } 1016 1017 fHeader->size = fBufferSize; 1018 1019 return true; 1020 } 1021 1022 1023 bool 1024 SystemProfiler::_ImageAdded(struct image* image) 1025 { 1026 InterruptsSpinLocker locker(fLock); 1027 1028 system_profiler_image_added* event = (system_profiler_image_added*) 1029 _AllocateBuffer(sizeof(system_profiler_image_added), 1030 B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0); 1031 if (event == NULL) 1032 return false; 1033 1034 event->team = image->team; 1035 event->info = image->info.basic_info; 1036 1037 fHeader->size = fBufferSize; 1038 1039 return true; 1040 } 1041 1042 1043 bool 1044 SystemProfiler::_ImageRemoved(struct image* image) 1045 { 1046 InterruptsSpinLocker locker(fLock); 1047 1048 system_profiler_image_removed* event = (system_profiler_image_removed*) 1049 _AllocateBuffer(sizeof(system_profiler_image_removed), 1050 B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0); 1051 if (event == NULL) 1052 return false; 1053 1054 event->team = image->team; 1055 event->image = image->info.basic_info.id; 1056 1057 fHeader->size = fBufferSize; 1058 1059 return true; 1060 } 1061 1062 1063 bool 1064 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler) 1065 { 1066 size_t nameLen = strlen(scheduler->Name()); 1067 1068 InterruptsSpinLocker locker(fLock); 1069 1070 system_profiler_io_scheduler_added* event 1071 = (system_profiler_io_scheduler_added*)_AllocateBuffer( 1072 sizeof(system_profiler_io_scheduler_added) + nameLen, 1073 B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0); 1074 if (event == NULL) 1075 return false; 1076 1077 event->scheduler = scheduler->ID(); 1078 strcpy(event->name, scheduler->Name()); 1079 1080 fHeader->size = fBufferSize; 1081 1082 return true; 1083 } 1084 1085 1086 bool 1087 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler) 1088 { 1089 InterruptsSpinLocker locker(fLock); 1090 1091 system_profiler_io_scheduler_removed* event 1092 = (system_profiler_io_scheduler_removed*)_AllocateBuffer( 1093 sizeof(system_profiler_io_scheduler_removed), 1094 B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0); 1095 if (event == NULL) 1096 return false; 1097 1098 event->scheduler = scheduler->ID(); 1099 1100 fHeader->size = fBufferSize; 1101 1102 return true; 1103 } 1104 1105 1106 bool 1107 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request) 1108 { 1109 InterruptsSpinLocker locker(fLock); 1110 1111 system_profiler_io_request_scheduled* event 1112 = (system_profiler_io_request_scheduled*)_AllocateBuffer( 1113 sizeof(system_profiler_io_request_scheduled), 1114 B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0); 1115 if (event == NULL) 1116 return false; 1117 1118 IORequestOwner* owner = request->Owner(); 1119 1120 event->time = system_time_nsecs(); 1121 event->scheduler = scheduler->ID(); 1122 event->team = owner->team; 1123 event->thread = owner->thread; 1124 event->request = request; 1125 event->offset = request->Offset(); 1126 event->length = request->Length(); 1127 event->write = request->IsWrite(); 1128 event->priority = owner->priority; 1129 1130 fHeader->size = fBufferSize; 1131 1132 return true; 1133 } 1134 1135 1136 bool 1137 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request) 1138 { 1139 InterruptsSpinLocker locker(fLock); 1140 1141 system_profiler_io_request_finished* event 1142 = (system_profiler_io_request_finished*)_AllocateBuffer( 1143 sizeof(system_profiler_io_request_finished), 1144 B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0); 1145 if (event == NULL) 1146 return false; 1147 1148 event->time = system_time_nsecs(); 1149 event->scheduler = scheduler->ID(); 1150 event->request = request; 1151 event->status = request->Status(); 1152 event->transferred = request->TransferredBytes(); 1153 1154 fHeader->size = fBufferSize; 1155 1156 return true; 1157 } 1158 1159 1160 bool 1161 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request, 1162 IOOperation* operation) 1163 { 1164 InterruptsSpinLocker locker(fLock); 1165 1166 system_profiler_io_operation_started* event 1167 = (system_profiler_io_operation_started*)_AllocateBuffer( 1168 sizeof(system_profiler_io_operation_started), 1169 B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0); 1170 if (event == NULL) 1171 return false; 1172 1173 event->time = system_time_nsecs(); 1174 event->scheduler = scheduler->ID(); 1175 event->request = request; 1176 event->operation = operation; 1177 event->offset = request->Offset(); 1178 event->length = request->Length(); 1179 event->write = request->IsWrite(); 1180 1181 fHeader->size = fBufferSize; 1182 1183 return true; 1184 } 1185 1186 1187 bool 1188 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request, 1189 IOOperation* operation) 1190 { 1191 InterruptsSpinLocker locker(fLock); 1192 1193 system_profiler_io_operation_finished* event 1194 = (system_profiler_io_operation_finished*)_AllocateBuffer( 1195 sizeof(system_profiler_io_operation_finished), 1196 B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0); 1197 if (event == NULL) 1198 return false; 1199 1200 event->time = system_time_nsecs(); 1201 event->scheduler = scheduler->ID(); 1202 event->request = request; 1203 event->operation = operation; 1204 event->status = request->Status(); 1205 event->transferred = request->TransferredBytes(); 1206 1207 fHeader->size = fBufferSize; 1208 1209 return true; 1210 } 1211 1212 1213 void 1214 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type) 1215 { 1216 SpinLocker locker(fLock); 1217 1218 // look up the object 1219 WaitObjectKey key; 1220 key.object = object; 1221 key.type = type; 1222 WaitObject* waitObject = fWaitObjectTable.Lookup(key); 1223 1224 // If found, remove it and add it to the free list. This might sound weird, 1225 // but it makes sense, since we lazily track *used* wait objects only. 1226 // I.e. the object in the table is now guaranteedly obsolete. 1227 if (waitObject) { 1228 fWaitObjectTable.RemoveUnchecked(waitObject); 1229 fUsedWaitObjects.Remove(waitObject); 1230 fFreeWaitObjects.Add(waitObject, false); 1231 } 1232 } 1233 1234 void 1235 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type) 1236 { 1237 // look up the object 1238 WaitObjectKey key; 1239 key.object = object; 1240 key.type = type; 1241 WaitObject* waitObject = fWaitObjectTable.Lookup(key); 1242 1243 // If already known, re-queue it as most recently used and be done. 1244 if (waitObject != NULL) { 1245 fUsedWaitObjects.Remove(waitObject); 1246 fUsedWaitObjects.Add(waitObject); 1247 return; 1248 } 1249 1250 // not known yet -- get the info 1251 const char* name = NULL; 1252 const void* referencedObject = NULL; 1253 1254 switch (type) { 1255 case THREAD_BLOCK_TYPE_SEMAPHORE: 1256 { 1257 name = sem_get_name_unsafe((sem_id)object); 1258 break; 1259 } 1260 1261 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1262 { 1263 ConditionVariable* variable = (ConditionVariable*)object; 1264 name = variable->ObjectType(); 1265 referencedObject = variable->Object(); 1266 break; 1267 } 1268 1269 case THREAD_BLOCK_TYPE_MUTEX: 1270 { 1271 mutex* lock = (mutex*)object; 1272 name = lock->name; 1273 break; 1274 } 1275 1276 case THREAD_BLOCK_TYPE_RW_LOCK: 1277 { 1278 rw_lock* lock = (rw_lock*)object; 1279 name = lock->name; 1280 break; 1281 } 1282 1283 case THREAD_BLOCK_TYPE_OTHER: 1284 { 1285 name = (const char*)(void*)object; 1286 break; 1287 } 1288 1289 case THREAD_BLOCK_TYPE_OTHER_OBJECT: 1290 case THREAD_BLOCK_TYPE_SNOOZE: 1291 case THREAD_BLOCK_TYPE_SIGNAL: 1292 default: 1293 return; 1294 } 1295 1296 // add the event 1297 size_t nameLen = name != NULL ? strlen(name) : 0; 1298 1299 system_profiler_wait_object_info* event 1300 = (system_profiler_wait_object_info*) 1301 _AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen, 1302 B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0); 1303 if (event == NULL) 1304 return; 1305 1306 event->type = type; 1307 event->object = object; 1308 event->referenced_object = (addr_t)referencedObject; 1309 if (name != NULL) 1310 strcpy(event->name, name); 1311 else 1312 event->name[0] = '\0'; 1313 1314 fHeader->size = fBufferSize; 1315 1316 // add the wait object 1317 1318 // get a free one or steal the least recently used one 1319 waitObject = fFreeWaitObjects.RemoveHead(); 1320 if (waitObject == NULL) { 1321 waitObject = fUsedWaitObjects.RemoveHead(); 1322 fWaitObjectTable.RemoveUnchecked(waitObject); 1323 } 1324 1325 waitObject->object = object; 1326 waitObject->type = type; 1327 fWaitObjectTable.InsertUnchecked(waitObject); 1328 fUsedWaitObjects.Add(waitObject); 1329 } 1330 1331 1332 /*static*/ bool 1333 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie) 1334 { 1335 SystemProfiler* self = (SystemProfiler*)cookie; 1336 self->fImageNotificationsEnabled = true; 1337 // Set that here, since the image lock is being held now. 1338 return !self->_ImageAdded(image); 1339 } 1340 1341 1342 void* 1343 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count) 1344 { 1345 size = (size + 3) / 4 * 4; 1346 size += sizeof(system_profiler_event_header); 1347 1348 size_t end = fBufferStart + fBufferSize; 1349 if (end + size > fBufferCapacity) { 1350 // Buffer is wrapped or needs wrapping. 1351 if (end < fBufferCapacity) { 1352 // not wrapped yet, but needed 1353 system_profiler_event_header* header 1354 = (system_profiler_event_header*)(fBufferBase + end); 1355 header->event = B_SYSTEM_PROFILER_BUFFER_END; 1356 fBufferSize = fBufferCapacity - fBufferStart; 1357 end = 0; 1358 } else 1359 end -= fBufferCapacity; 1360 1361 if (end + size > fBufferStart) { 1362 fDroppedEvents++; 1363 return NULL; 1364 } 1365 } 1366 1367 system_profiler_event_header* header 1368 = (system_profiler_event_header*)(fBufferBase + end); 1369 header->event = event; 1370 header->cpu = cpu; 1371 header->size = size - sizeof(system_profiler_event_header); 1372 1373 fBufferSize += size; 1374 1375 return header + 1; 1376 } 1377 1378 1379 /*static*/ void 1380 SystemProfiler::_InitTimers(void* cookie, int cpu) 1381 { 1382 SystemProfiler* self = (SystemProfiler*)cookie; 1383 self->_ScheduleTimer(cpu); 1384 } 1385 1386 1387 /*static*/ void 1388 SystemProfiler::_UninitTimers(void* cookie, int cpu) 1389 { 1390 SystemProfiler* self = (SystemProfiler*)cookie; 1391 1392 CPUProfileData& cpuData = self->fCPUData[cpu]; 1393 cancel_timer(&cpuData.timer); 1394 cpuData.timerScheduled = false; 1395 } 1396 1397 1398 void 1399 SystemProfiler::_ScheduleTimer(int cpu) 1400 { 1401 CPUProfileData& cpuData = fCPUData[cpu]; 1402 cpuData.timerEnd = system_time() + fInterval; 1403 cpuData.timer.user_data = this; 1404 add_timer(&cpuData.timer, &_ProfilingEvent, fInterval, 1405 B_ONE_SHOT_RELATIVE_TIMER); 1406 cpuData.timerScheduled = true; 1407 } 1408 1409 1410 void 1411 SystemProfiler::_DoSample() 1412 { 1413 Thread* thread = thread_get_current_thread(); 1414 int cpu = thread->cpu->cpu_num; 1415 CPUProfileData& cpuData = fCPUData[cpu]; 1416 1417 // get the samples 1418 uint32 flags = STACK_TRACE_USER; 1419 int32 skipIFrames = 0; 1420 if (fProfileKernel) { 1421 flags |= STACK_TRACE_KERNEL; 1422 skipIFrames = 1; 1423 } 1424 int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1425 skipIFrames, 0, flags); 1426 1427 InterruptsSpinLocker locker(fLock); 1428 1429 system_profiler_samples* event = (system_profiler_samples*) 1430 _AllocateBuffer(sizeof(system_profiler_samples) 1431 + count * sizeof(addr_t), 1432 B_SYSTEM_PROFILER_SAMPLES, cpu, count); 1433 if (event == NULL) 1434 return; 1435 1436 event->thread = thread->id; 1437 memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t)); 1438 1439 fHeader->size = fBufferSize; 1440 } 1441 1442 1443 /*static*/ int32 1444 SystemProfiler::_ProfilingEvent(struct timer* timer) 1445 { 1446 SystemProfiler* self = (SystemProfiler*)timer->user_data; 1447 1448 self->_DoSample(); 1449 self->_ScheduleTimer(timer->cpu); 1450 1451 return B_HANDLED_INTERRUPT; 1452 } 1453 1454 1455 // #pragma mark - private kernel API 1456 1457 1458 #if SYSTEM_PROFILER 1459 1460 status_t 1461 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval) 1462 { 1463 struct ParameterDeleter { 1464 ParameterDeleter(area_id area) 1465 : 1466 fArea(area), 1467 fDetached(false) 1468 { 1469 } 1470 1471 ~ParameterDeleter() 1472 { 1473 if (!fDetached) { 1474 delete_area(fArea); 1475 delete sRecordedParameters; 1476 sRecordedParameters = NULL; 1477 } 1478 } 1479 1480 void Detach() 1481 { 1482 fDetached = true; 1483 } 1484 1485 private: 1486 area_id fArea; 1487 bool fDetached; 1488 }; 1489 1490 void* address; 1491 area_id area = create_area("kernel profile data", &address, 1492 B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK, 1493 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 1494 if (area < 0) 1495 return area; 1496 1497 ParameterDeleter parameterDeleter(area); 1498 1499 sRecordedParameters = new(std::nothrow) system_profiler_parameters; 1500 if (sRecordedParameters == NULL) 1501 return B_NO_MEMORY; 1502 1503 sRecordedParameters->buffer_area = area; 1504 sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS 1505 | B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS 1506 | B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS 1507 | B_SYSTEM_PROFILER_SAMPLING_EVENTS; 1508 sRecordedParameters->locking_lookup_size = 4096; 1509 sRecordedParameters->interval = interval; 1510 sRecordedParameters->stack_depth = stackDepth; 1511 1512 area_info areaInfo; 1513 get_area_info(area, &areaInfo); 1514 1515 // initialize the profiler 1516 SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM, 1517 areaInfo, *sRecordedParameters); 1518 if (profiler == NULL) 1519 return B_NO_MEMORY; 1520 1521 ObjectDeleter<SystemProfiler> profilerDeleter(profiler); 1522 1523 status_t error = profiler->Init(); 1524 if (error != B_OK) 1525 return error; 1526 1527 // set the new profiler 1528 InterruptsSpinLocker locker(sProfilerLock); 1529 if (sProfiler != NULL) 1530 return B_BUSY; 1531 1532 parameterDeleter.Detach(); 1533 profilerDeleter.Detach(); 1534 sProfiler = profiler; 1535 locker.Unlock(); 1536 1537 return B_OK; 1538 } 1539 1540 1541 void 1542 stop_system_profiler() 1543 { 1544 InterruptsSpinLocker locker(sProfilerLock); 1545 if (sProfiler == NULL) 1546 return; 1547 1548 SystemProfiler* profiler = sProfiler; 1549 sProfiler = NULL; 1550 locker.Unlock(); 1551 1552 profiler->ReleaseReference(); 1553 } 1554 1555 #endif // SYSTEM_PROFILER 1556 1557 1558 // #pragma mark - syscalls 1559 1560 1561 status_t 1562 _user_system_profiler_start(struct system_profiler_parameters* userParameters) 1563 { 1564 if (geteuid() != 0) 1565 return B_PERMISSION_DENIED; 1566 1567 // copy params to the kernel 1568 struct system_profiler_parameters parameters; 1569 if (userParameters == NULL || !IS_USER_ADDRESS(userParameters) 1570 || user_memcpy(¶meters, userParameters, sizeof(parameters)) 1571 != B_OK) { 1572 return B_BAD_ADDRESS; 1573 } 1574 1575 // check the parameters 1576 team_id team = thread_get_current_thread()->team->id; 1577 1578 area_info areaInfo; 1579 status_t error = get_area_info(parameters.buffer_area, &areaInfo); 1580 if (error != B_OK) 1581 return error; 1582 1583 if (areaInfo.team != team) 1584 return B_BAD_VALUE; 1585 1586 if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) { 1587 if (parameters.stack_depth < 1) 1588 return B_BAD_VALUE; 1589 1590 if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL) 1591 parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL; 1592 1593 if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH) 1594 parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH; 1595 } 1596 1597 // quick check to see whether we do already have a profiler installed 1598 InterruptsSpinLocker locker(sProfilerLock); 1599 if (sProfiler != NULL) 1600 return B_BUSY; 1601 locker.Unlock(); 1602 1603 // initialize the profiler 1604 SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo, 1605 parameters); 1606 if (profiler == NULL) 1607 return B_NO_MEMORY; 1608 ObjectDeleter<SystemProfiler> profilerDeleter(profiler); 1609 1610 error = profiler->Init(); 1611 if (error != B_OK) 1612 return error; 1613 1614 // set the new profiler 1615 locker.Lock(); 1616 if (sProfiler != NULL) 1617 return B_BUSY; 1618 1619 profilerDeleter.Detach(); 1620 sProfiler = profiler; 1621 locker.Unlock(); 1622 1623 return B_OK; 1624 } 1625 1626 1627 status_t 1628 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents) 1629 { 1630 if (geteuid() != 0) 1631 return B_PERMISSION_DENIED; 1632 1633 if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents)) 1634 return B_BAD_ADDRESS; 1635 1636 team_id team = thread_get_current_thread()->team->id; 1637 1638 InterruptsSpinLocker locker(sProfilerLock); 1639 if (sProfiler == NULL || sProfiler->TeamID() != team) 1640 return B_BAD_VALUE; 1641 1642 // get a reference to the profiler 1643 SystemProfiler* profiler = sProfiler; 1644 BReference<SystemProfiler> reference(profiler); 1645 locker.Unlock(); 1646 1647 uint64 droppedEvents = 0; 1648 status_t error = profiler->NextBuffer(bytesRead, 1649 _droppedEvents != NULL ? &droppedEvents : NULL); 1650 if (error == B_OK && _droppedEvents != NULL) 1651 user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents)); 1652 1653 return error; 1654 } 1655 1656 1657 status_t 1658 _user_system_profiler_stop() 1659 { 1660 if (geteuid() != 0) 1661 return B_PERMISSION_DENIED; 1662 1663 team_id team = thread_get_current_thread()->team->id; 1664 1665 InterruptsSpinLocker locker(sProfilerLock); 1666 if (sProfiler == NULL || sProfiler->TeamID() != team) 1667 return B_BAD_VALUE; 1668 1669 SystemProfiler* profiler = sProfiler; 1670 sProfiler = NULL; 1671 locker.Unlock(); 1672 1673 profiler->ReleaseReference(); 1674 1675 return B_OK; 1676 } 1677 1678 1679 status_t 1680 _user_system_profiler_recorded(system_profiler_parameters* userParameters) 1681 { 1682 if (geteuid() != 0) 1683 return B_PERMISSION_DENIED; 1684 1685 if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)) 1686 return B_BAD_ADDRESS; 1687 if (sRecordedParameters == NULL) 1688 return B_ERROR; 1689 1690 #if SYSTEM_PROFILER 1691 stop_system_profiler(); 1692 1693 // Transfer the area to the userland process 1694 1695 void* address; 1696 area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address, 1697 B_ANY_ADDRESS, team_get_current_team_id(), true); 1698 if (newArea < 0) 1699 return newArea; 1700 1701 status_t status = set_area_protection(newArea, B_READ_AREA); 1702 if (status == B_OK) { 1703 sRecordedParameters->buffer_area = newArea; 1704 1705 status = user_memcpy(userParameters, sRecordedParameters, 1706 sizeof(system_profiler_parameters)); 1707 } 1708 if (status != B_OK) 1709 delete_area(newArea); 1710 1711 delete sRecordedParameters; 1712 sRecordedParameters = NULL; 1713 1714 return status; 1715 #else 1716 return B_NOT_SUPPORTED; 1717 #endif // SYSTEM_PROFILER 1718 } 1719