1 /* 2 * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <system_profiler.h> 8 9 #include <AutoDeleter.h> 10 #include <Referenceable.h> 11 12 #include <util/AutoLock.h> 13 14 #include <system_profiler_defs.h> 15 16 #include <cpu.h> 17 #include <kernel.h> 18 #include <kimage.h> 19 #include <kscheduler.h> 20 #include <listeners.h> 21 #include <Notifications.h> 22 #include <sem.h> 23 #include <team.h> 24 #include <thread.h> 25 #include <user_debugger.h> 26 #include <vm/vm.h> 27 28 #include <arch/debug.h> 29 30 #include "IOSchedulerRoster.h" 31 32 33 // This is the kernel-side implementation of the system profiling support. 34 // A userland team can register as system profiler, providing an area as buffer 35 // for events. Those events are team, thread, and image changes (added/removed), 36 // periodic sampling of the return address stack for each CPU, as well as 37 // scheduling and I/O scheduling events. 38 39 40 class SystemProfiler; 41 42 43 // minimum/maximum size of the table used for wait object caching 44 #define MIN_WAIT_OBJECT_COUNT 128 45 #define MAX_WAIT_OBJECT_COUNT 1024 46 47 48 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER; 49 static SystemProfiler* sProfiler = NULL; 50 static struct system_profiler_parameters* sRecordedParameters = NULL; 51 52 53 class SystemProfiler : public Referenceable, private NotificationListener, 54 private SchedulerListener, private WaitObjectListener { 55 public: 56 SystemProfiler(team_id team, 57 const area_info& userAreaInfo, 58 const system_profiler_parameters& 59 parameters); 60 ~SystemProfiler(); 61 62 team_id Team() const { return fTeam; } 63 64 status_t Init(); 65 status_t NextBuffer(size_t bytesRead, 66 uint64* _droppedEvents); 67 68 private: 69 virtual void EventOccurred(NotificationService& service, 70 const KMessage* event); 71 72 virtual void ThreadEnqueuedInRunQueue(struct thread* thread); 73 virtual void ThreadRemovedFromRunQueue( 74 struct thread* thread); 75 virtual void ThreadScheduled(struct thread* oldThread, 76 struct thread* newThread); 77 78 virtual void SemaphoreCreated(sem_id id, 79 const char* name); 80 virtual void ConditionVariableInitialized( 81 ConditionVariable* variable); 82 virtual void MutexInitialized(mutex* lock); 83 virtual void RWLockInitialized(rw_lock* lock); 84 85 bool _TeamAdded(struct team* team); 86 bool _TeamRemoved(struct team* team); 87 bool _TeamExec(struct team* team); 88 89 bool _ThreadAdded(struct thread* thread); 90 bool _ThreadRemoved(struct thread* thread); 91 92 bool _ImageAdded(struct image* image); 93 bool _ImageRemoved(struct image* image); 94 95 bool _IOSchedulerAdded(IOScheduler* scheduler); 96 bool _IOSchedulerRemoved(IOScheduler* scheduler); 97 bool _IORequestScheduled(IOScheduler* scheduler, 98 IORequest* request); 99 bool _IORequestFinished(IOScheduler* scheduler, 100 IORequest* request); 101 bool _IOOperationStarted(IOScheduler* scheduler, 102 IORequest* request, IOOperation* operation); 103 bool _IOOperationFinished(IOScheduler* scheduler, 104 IORequest* request, IOOperation* operation); 105 106 void _WaitObjectCreated(addr_t object, uint32 type); 107 void _WaitObjectUsed(addr_t object, uint32 type); 108 109 inline void _MaybeNotifyProfilerThreadLocked(); 110 inline void _MaybeNotifyProfilerThread(); 111 112 static bool _InitialTeamIterator(struct team* team, 113 void* cookie); 114 static bool _InitialThreadIterator(struct thread* thread, 115 void* cookie); 116 static bool _InitialImageIterator(struct image* image, 117 void* cookie); 118 119 void* _AllocateBuffer(size_t size, int event, int cpu, 120 int count); 121 122 static void _InitTimers(void* cookie, int cpu); 123 static void _UninitTimers(void* cookie, int cpu); 124 void _ScheduleTimer(int cpu); 125 126 void _DoSample(); 127 128 static int32 _ProfilingEvent(struct timer* timer); 129 130 private: 131 struct CPUProfileData { 132 struct timer timer; 133 bigtime_t timerEnd; 134 bool timerScheduled; 135 addr_t buffer[B_DEBUG_STACK_TRACE_DEPTH]; 136 }; 137 138 struct WaitObjectKey { 139 addr_t object; 140 uint32 type; 141 }; 142 143 struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>, 144 WaitObjectKey { 145 struct WaitObject* hash_link; 146 }; 147 148 struct WaitObjectTableDefinition { 149 typedef WaitObjectKey KeyType; 150 typedef WaitObject ValueType; 151 152 size_t HashKey(const WaitObjectKey& key) const 153 { 154 return (size_t)key.object ^ (size_t)key.type; 155 } 156 157 size_t Hash(const WaitObject* value) const 158 { 159 return HashKey(*value); 160 } 161 162 bool Compare(const WaitObjectKey& key, 163 const WaitObject* value) const 164 { 165 return value->type == key.type 166 && value->object == key.object; 167 } 168 169 WaitObject*& GetLink(WaitObject* value) const 170 { 171 return value->hash_link; 172 } 173 }; 174 175 typedef DoublyLinkedList<WaitObject> WaitObjectList; 176 typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable; 177 178 private: 179 spinlock fLock; 180 team_id fTeam; 181 area_id fUserArea; 182 area_id fKernelArea; 183 size_t fAreaSize; 184 uint32 fFlags; 185 uint32 fStackDepth; 186 bigtime_t fInterval; 187 system_profiler_buffer_header* fHeader; 188 uint8* fBufferBase; 189 size_t fBufferCapacity; 190 size_t fBufferStart; 191 size_t fBufferSize; 192 uint64 fDroppedEvents; 193 bool fTeamNotificationsRequested; 194 bool fTeamNotificationsEnabled; 195 bool fThreadNotificationsRequested; 196 bool fThreadNotificationsEnabled; 197 bool fImageNotificationsRequested; 198 bool fImageNotificationsEnabled; 199 bool fIONotificationsRequested; 200 bool fIONotificationsEnabled; 201 bool fSchedulerNotificationsRequested; 202 bool fWaitObjectNotificationsRequested; 203 struct thread* volatile fWaitingProfilerThread; 204 bool fProfilingActive; 205 bool fReentered[B_MAX_CPU_COUNT]; 206 CPUProfileData fCPUData[B_MAX_CPU_COUNT]; 207 struct thread** fRunningThreads; 208 WaitObject* fWaitObjectBuffer; 209 int32 fWaitObjectCount; 210 WaitObjectList fUsedWaitObjects; 211 WaitObjectList fFreeWaitObjects; 212 WaitObjectTable fWaitObjectTable; 213 }; 214 215 216 inline void 217 SystemProfiler::_MaybeNotifyProfilerThreadLocked() 218 { 219 // If the buffer is full enough, notify the profiler. 220 if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) { 221 int cpu = smp_get_current_cpu(); 222 fReentered[cpu] = true; 223 thread_unblock_locked(fWaitingProfilerThread, B_OK); 224 fWaitingProfilerThread = NULL; 225 fReentered[cpu] = false; 226 } 227 } 228 229 230 inline void 231 SystemProfiler::_MaybeNotifyProfilerThread() 232 { 233 if (fWaitingProfilerThread == NULL) 234 return; 235 236 InterruptsSpinLocker threadsLocker(gThreadSpinlock); 237 SpinLocker locker(fLock); 238 239 _MaybeNotifyProfilerThreadLocked(); 240 } 241 242 243 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo, 244 const system_profiler_parameters& parameters) 245 : 246 fTeam(team), 247 fUserArea(userAreaInfo.area), 248 fKernelArea(-1), 249 fAreaSize(userAreaInfo.size), 250 fFlags(parameters.flags), 251 fStackDepth(parameters.stack_depth), 252 fInterval(parameters.interval), 253 fHeader(NULL), 254 fBufferBase(NULL), 255 fBufferCapacity(0), 256 fBufferStart(0), 257 fBufferSize(0), 258 fDroppedEvents(0), 259 fTeamNotificationsRequested(false), 260 fTeamNotificationsEnabled(false), 261 fThreadNotificationsRequested(false), 262 fThreadNotificationsEnabled(false), 263 fImageNotificationsRequested(false), 264 fImageNotificationsEnabled(false), 265 fIONotificationsRequested(false), 266 fIONotificationsEnabled(false), 267 fSchedulerNotificationsRequested(false), 268 fWaitObjectNotificationsRequested(false), 269 fWaitingProfilerThread(NULL), 270 fWaitObjectBuffer(NULL), 271 fWaitObjectCount(0), 272 fUsedWaitObjects(), 273 fFreeWaitObjects(), 274 fWaitObjectTable() 275 { 276 B_INITIALIZE_SPINLOCK(&fLock); 277 278 memset(fReentered, 0, sizeof(fReentered)); 279 280 // compute the number wait objects we want to cache 281 if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) { 282 fWaitObjectCount = parameters.locking_lookup_size 283 / (sizeof(WaitObject) + (sizeof(void*) * 3 / 2)); 284 if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT) 285 fWaitObjectCount = MIN_WAIT_OBJECT_COUNT; 286 if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT) 287 fWaitObjectCount = MAX_WAIT_OBJECT_COUNT; 288 } 289 } 290 291 292 SystemProfiler::~SystemProfiler() 293 { 294 // Wake up the user thread, if it is waiting, and mark profiling 295 // inactive. 296 InterruptsSpinLocker locker(fLock); 297 if (fWaitingProfilerThread != NULL) { 298 thread_unblock_locked(fWaitingProfilerThread, B_OK); 299 fWaitingProfilerThread = NULL; 300 } 301 fProfilingActive = false; 302 locker.Unlock(); 303 304 // stop scheduler listening 305 if (fSchedulerNotificationsRequested) { 306 InterruptsSpinLocker threadsLocker(gThreadSpinlock); 307 scheduler_remove_listener(this); 308 } 309 310 // stop wait object listening 311 if (fWaitObjectNotificationsRequested) { 312 InterruptsSpinLocker locker(gWaitObjectListenerLock); 313 remove_wait_object_listener(this); 314 } 315 316 // deactivate the profiling timers on all CPUs 317 if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) 318 call_all_cpus(_UninitTimers, this); 319 320 // cancel notifications 321 NotificationManager& notificationManager 322 = NotificationManager::Manager(); 323 324 // images 325 if (fImageNotificationsRequested) { 326 fImageNotificationsRequested = false; 327 notificationManager.RemoveListener("images", NULL, *this); 328 } 329 330 // threads 331 if (fThreadNotificationsRequested) { 332 fThreadNotificationsRequested = false; 333 notificationManager.RemoveListener("threads", NULL, *this); 334 } 335 336 // teams 337 if (fTeamNotificationsRequested) { 338 fTeamNotificationsRequested = false; 339 notificationManager.RemoveListener("teams", NULL, *this); 340 } 341 342 // I/O 343 if (fIONotificationsRequested) { 344 fIONotificationsRequested = false; 345 notificationManager.RemoveListener("I/O", NULL, *this); 346 } 347 348 // delete wait object related allocations 349 fWaitObjectTable.Clear(); 350 delete[] fWaitObjectBuffer; 351 352 // unlock the memory and delete the area 353 if (fKernelArea >= 0) { 354 unlock_memory(fHeader, fAreaSize, B_READ_DEVICE); 355 delete_area(fKernelArea); 356 fKernelArea = -1; 357 } 358 } 359 360 361 status_t 362 SystemProfiler::Init() 363 { 364 // clone the user area 365 void* areaBase; 366 fKernelArea = clone_area("profiling samples", &areaBase, 367 B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, 368 fUserArea); 369 if (fKernelArea < 0) 370 return fKernelArea; 371 372 // we need the memory locked 373 status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE); 374 if (error != B_OK) { 375 delete_area(fKernelArea); 376 fKernelArea = -1; 377 return error; 378 } 379 380 // the buffer is ready for use 381 fHeader = (system_profiler_buffer_header*)areaBase; 382 fBufferBase = (uint8*)(fHeader + 1); 383 fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase); 384 fHeader->start = 0; 385 fHeader->size = 0; 386 387 // allocate the wait object buffer and init the hash table 388 if (fWaitObjectCount > 0) { 389 fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount]; 390 if (fWaitObjectBuffer == NULL) 391 return B_NO_MEMORY; 392 393 for (int32 i = 0; i < fWaitObjectCount; i++) 394 fFreeWaitObjects.Add(fWaitObjectBuffer + i); 395 396 error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2); 397 if (error != B_OK) 398 return error; 399 } 400 401 // start listening for notifications 402 403 // teams 404 NotificationManager& notificationManager 405 = NotificationManager::Manager(); 406 if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) { 407 error = notificationManager.AddListener("teams", 408 TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this); 409 if (error != B_OK) 410 return error; 411 fTeamNotificationsRequested = true; 412 } 413 414 // threads 415 if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) { 416 error = notificationManager.AddListener("threads", 417 THREAD_ADDED | THREAD_REMOVED, *this); 418 if (error != B_OK) 419 return error; 420 fThreadNotificationsRequested = true; 421 } 422 423 // images 424 if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) { 425 error = notificationManager.AddListener("images", 426 IMAGE_ADDED | IMAGE_REMOVED, *this); 427 if (error != B_OK) 428 return error; 429 fImageNotificationsRequested = true; 430 } 431 432 // I/O events 433 if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) { 434 error = notificationManager.AddListener("I/O", 435 IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED 436 | IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED 437 | IO_SCHEDULER_OPERATION_STARTED 438 | IO_SCHEDULER_OPERATION_FINISHED, 439 *this); 440 if (error != B_OK) 441 return error; 442 fIONotificationsRequested = true; 443 } 444 445 // We need to fill the buffer with the initial state of teams, threads, 446 // and images. 447 448 // teams 449 if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) { 450 InterruptsSpinLocker teamsLocker(gTeamSpinlock); 451 if (team_iterate_through_teams(&_InitialTeamIterator, this) != NULL) 452 return B_BUFFER_OVERFLOW; 453 fTeamNotificationsEnabled = true; 454 teamsLocker.Unlock(); 455 } 456 457 // images 458 if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) { 459 if (image_iterate_through_images(&_InitialImageIterator, this) != NULL) 460 return B_BUFFER_OVERFLOW; 461 } 462 463 // threads 464 struct thread* runningThreads[B_MAX_CPU_COUNT]; 465 memset(runningThreads, 0, sizeof(runningThreads)); 466 fRunningThreads = runningThreads; 467 468 InterruptsSpinLocker threadsLocker(gThreadSpinlock); 469 if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0 470 || (fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) { 471 if (thread_iterate_through_threads(&_InitialThreadIterator, this) 472 != NULL) { 473 return B_BUFFER_OVERFLOW; 474 } 475 fThreadNotificationsEnabled 476 = (fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0; 477 } 478 479 fProfilingActive = true; 480 481 // start scheduler and wait object listening 482 if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) { 483 scheduler_add_listener(this); 484 fSchedulerNotificationsRequested = true; 485 486 SpinLocker waitObjectLocker(gWaitObjectListenerLock); 487 add_wait_object_listener(this); 488 fWaitObjectNotificationsRequested = true; 489 waitObjectLocker.Unlock(); 490 491 // fake schedule events for the initially running threads 492 int32 cpuCount = smp_get_num_cpus(); 493 for (int32 i = 0; i < cpuCount; i++) { 494 if (runningThreads[i] != NULL) 495 ThreadScheduled(runningThreads[i], runningThreads[i]); 496 } 497 } 498 499 threadsLocker.Unlock(); 500 501 // I/O scheduling 502 if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) { 503 IOSchedulerRoster* roster = IOSchedulerRoster::Default(); 504 AutoLocker<IOSchedulerRoster> rosterLocker(roster); 505 506 for (IOSchedulerList::ConstIterator it 507 = roster->SchedulerList().GetIterator(); 508 IOScheduler* scheduler = it.Next();) { 509 _IOSchedulerAdded(scheduler); 510 } 511 512 fIONotificationsEnabled = true; 513 } 514 515 // activate the profiling timers on all CPUs 516 if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) 517 call_all_cpus(_InitTimers, this); 518 519 return B_OK; 520 } 521 522 523 status_t 524 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents) 525 { 526 InterruptsSpinLocker locker(fLock); 527 528 if (fWaitingProfilerThread != NULL || !fProfilingActive 529 || bytesRead > fBufferSize) { 530 return B_BAD_VALUE; 531 } 532 533 fBufferSize -= bytesRead; 534 fBufferStart += bytesRead; 535 if (fBufferStart > fBufferCapacity) 536 fBufferStart -= fBufferCapacity; 537 fHeader->size = fBufferSize; 538 fHeader->start = fBufferStart; 539 540 // already enough data in the buffer to return? 541 if (fBufferSize > fBufferCapacity / 2) 542 return B_OK; 543 544 // Wait until the buffer gets too full or an error or a timeout occurs. 545 while (true) { 546 struct thread* thread = thread_get_current_thread(); 547 fWaitingProfilerThread = thread; 548 549 thread_prepare_to_block(thread, B_CAN_INTERRUPT, 550 THREAD_BLOCK_TYPE_OTHER, "system profiler buffer"); 551 552 locker.Unlock(); 553 554 status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000); 555 556 locker.Lock(); 557 558 if (error == B_OK) { 559 // the caller has unset fWaitingProfilerThread for us 560 break; 561 } 562 563 fWaitingProfilerThread = NULL; 564 565 if (error != B_TIMED_OUT) 566 return error; 567 568 // just the timeout -- return, if the buffer is not empty 569 if (fBufferSize > 0) 570 break; 571 } 572 573 if (_droppedEvents != NULL) { 574 *_droppedEvents = fDroppedEvents; 575 fDroppedEvents = 0; 576 } 577 578 return B_OK; 579 } 580 581 582 void 583 SystemProfiler::EventOccurred(NotificationService& service, 584 const KMessage* event) 585 { 586 int32 eventCode; 587 if (event->FindInt32("event", &eventCode) != B_OK) 588 return; 589 590 if (strcmp(service.Name(), "teams") == 0) { 591 if (!fTeamNotificationsEnabled) 592 return; 593 594 struct team* team = (struct team*)event->GetPointer("teamStruct", 595 NULL); 596 if (team == NULL) 597 return; 598 599 switch (eventCode) { 600 case TEAM_ADDED: 601 _TeamAdded(team); 602 break; 603 604 case TEAM_REMOVED: 605 if (team->id == fTeam) { 606 // The profiling team is gone -- uninstall the profiler! 607 InterruptsSpinLocker locker(sProfilerLock); 608 if (sProfiler != this) 609 return; 610 611 sProfiler = NULL; 612 locker.Unlock(); 613 614 RemoveReference(); 615 return; 616 } 617 618 _TeamRemoved(team); 619 break; 620 621 case TEAM_EXEC: 622 _TeamExec(team); 623 break; 624 } 625 } else if (strcmp(service.Name(), "threads") == 0) { 626 if (!fThreadNotificationsEnabled) 627 return; 628 629 struct thread* thread = (struct thread*)event->GetPointer( 630 "threadStruct", NULL); 631 if (thread == NULL) 632 return; 633 634 switch (eventCode) { 635 case THREAD_ADDED: 636 _ThreadAdded(thread); 637 break; 638 639 case THREAD_REMOVED: 640 _ThreadRemoved(thread); 641 break; 642 } 643 } else if (strcmp(service.Name(), "images") == 0) { 644 if (!fImageNotificationsEnabled) 645 return; 646 647 struct image* image = (struct image*)event->GetPointer( 648 "imageStruct", NULL); 649 if (image == NULL) 650 return; 651 652 switch (eventCode) { 653 case IMAGE_ADDED: 654 _ImageAdded(image); 655 break; 656 657 case IMAGE_REMOVED: 658 _ImageRemoved(image); 659 break; 660 } 661 } else if (strcmp(service.Name(), "I/O") == 0) { 662 if (!fIONotificationsEnabled) 663 return; 664 665 IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler", 666 NULL); 667 if (scheduler == NULL) 668 return; 669 670 IORequest* request = (IORequest*)event->GetPointer("request", NULL); 671 IOOperation* operation = (IOOperation*)event->GetPointer("operation", 672 NULL); 673 674 switch (eventCode) { 675 case IO_SCHEDULER_ADDED: 676 _IOSchedulerAdded(scheduler); 677 break; 678 679 case IO_SCHEDULER_REMOVED: 680 _IOSchedulerRemoved(scheduler); 681 break; 682 683 case IO_SCHEDULER_REQUEST_SCHEDULED: 684 _IORequestScheduled(scheduler, request); 685 break; 686 687 case IO_SCHEDULER_REQUEST_FINISHED: 688 _IORequestFinished(scheduler, request); 689 break; 690 691 case IO_SCHEDULER_OPERATION_STARTED: 692 _IOOperationStarted(scheduler, request, operation); 693 break; 694 695 case IO_SCHEDULER_OPERATION_FINISHED: 696 _IOOperationFinished(scheduler, request, operation); 697 break; 698 } 699 } 700 701 _MaybeNotifyProfilerThread(); 702 } 703 704 705 void 706 SystemProfiler::ThreadEnqueuedInRunQueue(struct thread* thread) 707 { 708 int cpu = smp_get_current_cpu(); 709 710 SpinLocker locker(fLock, false, !fReentered[cpu]); 711 // When re-entering, we already hold the lock. 712 713 system_profiler_thread_enqueued_in_run_queue* event 714 = (system_profiler_thread_enqueued_in_run_queue*) 715 _AllocateBuffer( 716 sizeof(system_profiler_thread_enqueued_in_run_queue), 717 B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0); 718 if (event == NULL) 719 return; 720 721 event->time = system_time_nsecs(); 722 event->thread = thread->id; 723 event->priority = thread->priority; 724 725 fHeader->size = fBufferSize; 726 727 // Unblock the profiler thread, if necessary, but don't unblock the thread, 728 // if it had been waiting on a condition variable, since then we'd likely 729 // deadlock in ConditionVariable::NotifyOne(), as it acquires a static 730 // spinlock. 731 if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE) 732 _MaybeNotifyProfilerThreadLocked(); 733 } 734 735 736 void 737 SystemProfiler::ThreadRemovedFromRunQueue(struct thread* thread) 738 { 739 int cpu = smp_get_current_cpu(); 740 741 SpinLocker locker(fLock, false, !fReentered[cpu]); 742 // When re-entering, we already hold the lock. 743 744 system_profiler_thread_removed_from_run_queue* event 745 = (system_profiler_thread_removed_from_run_queue*) 746 _AllocateBuffer( 747 sizeof(system_profiler_thread_removed_from_run_queue), 748 B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0); 749 if (event == NULL) 750 return; 751 752 event->time = system_time_nsecs(); 753 event->thread = thread->id; 754 755 fHeader->size = fBufferSize; 756 757 // unblock the profiler thread, if necessary 758 _MaybeNotifyProfilerThreadLocked(); 759 } 760 761 762 void 763 SystemProfiler::ThreadScheduled(struct thread* oldThread, 764 struct thread* newThread) 765 { 766 int cpu = smp_get_current_cpu(); 767 768 SpinLocker locker(fLock, false, !fReentered[cpu]); 769 // When re-entering, we already hold the lock. 770 771 // If the old thread starts waiting, handle the wait object. 772 if (oldThread->state == B_THREAD_WAITING) 773 _WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type); 774 775 system_profiler_thread_scheduled* event 776 = (system_profiler_thread_scheduled*) 777 _AllocateBuffer(sizeof(system_profiler_thread_scheduled), 778 B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0); 779 if (event == NULL) 780 return; 781 782 event->time = system_time_nsecs(); 783 event->thread = newThread->id; 784 event->previous_thread = oldThread->id; 785 event->previous_thread_state = oldThread->state; 786 event->previous_thread_wait_object_type = oldThread->wait.type; 787 event->previous_thread_wait_object = (addr_t)oldThread->wait.object; 788 789 fHeader->size = fBufferSize; 790 791 // unblock the profiler thread, if necessary 792 _MaybeNotifyProfilerThreadLocked(); 793 } 794 795 796 void 797 SystemProfiler::SemaphoreCreated(sem_id id, const char* name) 798 { 799 _WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE); 800 } 801 802 803 void 804 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable) 805 { 806 _WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE); 807 } 808 809 810 void 811 SystemProfiler::MutexInitialized(mutex* lock) 812 { 813 _WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX); 814 } 815 816 817 void 818 SystemProfiler::RWLockInitialized(rw_lock* lock) 819 { 820 _WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK); 821 } 822 823 824 bool 825 SystemProfiler::_TeamAdded(struct team* team) 826 { 827 size_t nameLen = strlen(team->name); 828 size_t argsLen = strlen(team->args); 829 830 InterruptsSpinLocker locker(fLock); 831 832 system_profiler_team_added* event = (system_profiler_team_added*) 833 _AllocateBuffer( 834 sizeof(system_profiler_team_added) + nameLen + 1 + argsLen, 835 B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0); 836 if (event == NULL) 837 return false; 838 839 event->team = team->id; 840 strcpy(event->name, team->name); 841 event->args_offset = nameLen + 1; 842 strcpy(event->name + nameLen + 1, team->args); 843 844 fHeader->size = fBufferSize; 845 846 return true; 847 } 848 849 850 bool 851 SystemProfiler::_TeamRemoved(struct team* team) 852 { 853 InterruptsSpinLocker locker(fLock); 854 855 system_profiler_team_removed* event = (system_profiler_team_removed*) 856 _AllocateBuffer(sizeof(system_profiler_team_removed), 857 B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0); 858 if (event == NULL) 859 return false; 860 861 event->team = team->id; 862 863 fHeader->size = fBufferSize; 864 865 return true; 866 } 867 868 869 bool 870 SystemProfiler::_TeamExec(struct team* team) 871 { 872 size_t argsLen = strlen(team->args); 873 874 InterruptsSpinLocker locker(fLock); 875 876 system_profiler_team_exec* event = (system_profiler_team_exec*) 877 _AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen, 878 B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0); 879 if (event == NULL) 880 return false; 881 882 event->team = team->id; 883 strlcpy(event->thread_name, team->main_thread->name, 884 sizeof(event->thread_name)); 885 strcpy(event->args, team->args); 886 887 fHeader->size = fBufferSize; 888 889 return true; 890 } 891 892 893 bool 894 SystemProfiler::_ThreadAdded(struct thread* thread) 895 { 896 InterruptsSpinLocker locker(fLock); 897 898 system_profiler_thread_added* event = (system_profiler_thread_added*) 899 _AllocateBuffer(sizeof(system_profiler_thread_added), 900 B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0); 901 if (event == NULL) 902 return false; 903 904 event->team = thread->team->id; 905 event->thread = thread->id; 906 strlcpy(event->name, thread->name, sizeof(event->name)); 907 908 fHeader->size = fBufferSize; 909 910 return true; 911 } 912 913 914 bool 915 SystemProfiler::_ThreadRemoved(struct thread* thread) 916 { 917 InterruptsSpinLocker locker(fLock); 918 919 system_profiler_thread_removed* event 920 = (system_profiler_thread_removed*) 921 _AllocateBuffer(sizeof(system_profiler_thread_removed), 922 B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0); 923 if (event == NULL) 924 return false; 925 926 event->team = thread->team->id; 927 event->thread = thread->id; 928 929 fHeader->size = fBufferSize; 930 931 return true; 932 } 933 934 935 bool 936 SystemProfiler::_ImageAdded(struct image* image) 937 { 938 InterruptsSpinLocker locker(fLock); 939 940 system_profiler_image_added* event = (system_profiler_image_added*) 941 _AllocateBuffer(sizeof(system_profiler_image_added), 942 B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0); 943 if (event == NULL) 944 return false; 945 946 event->team = image->team; 947 event->info = image->info; 948 949 fHeader->size = fBufferSize; 950 951 return true; 952 } 953 954 955 bool 956 SystemProfiler::_ImageRemoved(struct image* image) 957 { 958 InterruptsSpinLocker locker(fLock); 959 960 system_profiler_image_removed* event = (system_profiler_image_removed*) 961 _AllocateBuffer(sizeof(system_profiler_image_removed), 962 B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0); 963 if (event == NULL) 964 return false; 965 966 event->team = image->team; 967 event->image = image->info.id; 968 969 fHeader->size = fBufferSize; 970 971 return true; 972 } 973 974 975 bool 976 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler) 977 { 978 size_t nameLen = strlen(scheduler->Name()); 979 980 InterruptsSpinLocker locker(fLock); 981 982 system_profiler_io_scheduler_added* event 983 = (system_profiler_io_scheduler_added*)_AllocateBuffer( 984 sizeof(system_profiler_io_scheduler_added) + nameLen, 985 B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0); 986 if (event == NULL) 987 return false; 988 989 event->scheduler = scheduler->ID(); 990 strcpy(event->name, scheduler->Name()); 991 992 fHeader->size = fBufferSize; 993 994 return true; 995 } 996 997 998 bool 999 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler) 1000 { 1001 InterruptsSpinLocker locker(fLock); 1002 1003 system_profiler_io_scheduler_removed* event 1004 = (system_profiler_io_scheduler_removed*)_AllocateBuffer( 1005 sizeof(system_profiler_io_scheduler_removed), 1006 B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0); 1007 if (event == NULL) 1008 return false; 1009 1010 event->scheduler = scheduler->ID(); 1011 1012 fHeader->size = fBufferSize; 1013 1014 return true; 1015 } 1016 1017 1018 bool 1019 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request) 1020 { 1021 InterruptsSpinLocker locker(fLock); 1022 1023 system_profiler_io_request_scheduled* event 1024 = (system_profiler_io_request_scheduled*)_AllocateBuffer( 1025 sizeof(system_profiler_io_request_scheduled), 1026 B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0); 1027 if (event == NULL) 1028 return false; 1029 1030 IORequestOwner* owner = request->Owner(); 1031 1032 event->time = system_time_nsecs(); 1033 event->scheduler = scheduler->ID(); 1034 event->team = owner->team; 1035 event->thread = owner->thread; 1036 event->request = request; 1037 event->offset = request->Offset(); 1038 event->length = request->Length(); 1039 event->write = request->IsWrite(); 1040 event->priority = owner->priority; 1041 1042 fHeader->size = fBufferSize; 1043 1044 return true; 1045 } 1046 1047 1048 bool 1049 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request) 1050 { 1051 InterruptsSpinLocker locker(fLock); 1052 1053 system_profiler_io_request_finished* event 1054 = (system_profiler_io_request_finished*)_AllocateBuffer( 1055 sizeof(system_profiler_io_request_finished), 1056 B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0); 1057 if (event == NULL) 1058 return false; 1059 1060 event->time = system_time_nsecs(); 1061 event->scheduler = scheduler->ID(); 1062 event->request = request; 1063 event->status = request->Status(); 1064 event->transferred = request->TransferredBytes(); 1065 1066 fHeader->size = fBufferSize; 1067 1068 return true; 1069 } 1070 1071 1072 bool 1073 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request, 1074 IOOperation* operation) 1075 { 1076 InterruptsSpinLocker locker(fLock); 1077 1078 system_profiler_io_operation_started* event 1079 = (system_profiler_io_operation_started*)_AllocateBuffer( 1080 sizeof(system_profiler_io_operation_started), 1081 B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0); 1082 if (event == NULL) 1083 return false; 1084 1085 event->time = system_time_nsecs(); 1086 event->scheduler = scheduler->ID(); 1087 event->request = request; 1088 event->operation = operation; 1089 event->offset = request->Offset(); 1090 event->length = request->Length(); 1091 event->write = request->IsWrite(); 1092 1093 fHeader->size = fBufferSize; 1094 1095 return true; 1096 } 1097 1098 1099 bool 1100 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request, 1101 IOOperation* operation) 1102 { 1103 InterruptsSpinLocker locker(fLock); 1104 1105 system_profiler_io_operation_finished* event 1106 = (system_profiler_io_operation_finished*)_AllocateBuffer( 1107 sizeof(system_profiler_io_operation_finished), 1108 B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0); 1109 if (event == NULL) 1110 return false; 1111 1112 event->time = system_time_nsecs(); 1113 event->scheduler = scheduler->ID(); 1114 event->request = request; 1115 event->operation = operation; 1116 event->status = request->Status(); 1117 event->transferred = request->TransferredBytes(); 1118 1119 fHeader->size = fBufferSize; 1120 1121 return true; 1122 } 1123 1124 1125 void 1126 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type) 1127 { 1128 SpinLocker locker(fLock); 1129 1130 // look up the object 1131 WaitObjectKey key; 1132 key.object = object; 1133 key.type = type; 1134 WaitObject* waitObject = fWaitObjectTable.Lookup(key); 1135 1136 // If found, remove it and add it to the free list. This might sound weird, 1137 // but it makes sense, since we lazily track *used* wait objects only. 1138 // I.e. the object in the table is now guaranteedly obsolete. 1139 if (waitObject) { 1140 fWaitObjectTable.RemoveUnchecked(waitObject); 1141 fUsedWaitObjects.Remove(waitObject); 1142 fFreeWaitObjects.Add(waitObject, false); 1143 } 1144 } 1145 1146 void 1147 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type) 1148 { 1149 // look up the object 1150 WaitObjectKey key; 1151 key.object = object; 1152 key.type = type; 1153 WaitObject* waitObject = fWaitObjectTable.Lookup(key); 1154 1155 // If already known, re-queue it as most recently used and be done. 1156 if (waitObject != NULL) { 1157 fUsedWaitObjects.Remove(waitObject); 1158 fUsedWaitObjects.Add(waitObject); 1159 return; 1160 } 1161 1162 // not known yet -- get the info 1163 const char* name = NULL; 1164 const void* referencedObject = NULL; 1165 1166 switch (type) { 1167 case THREAD_BLOCK_TYPE_SEMAPHORE: 1168 { 1169 name = sem_get_name_unsafe((sem_id)object); 1170 break; 1171 } 1172 1173 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1174 { 1175 ConditionVariable* variable = (ConditionVariable*)object; 1176 name = variable->ObjectType(); 1177 referencedObject = variable->Object(); 1178 break; 1179 } 1180 1181 case THREAD_BLOCK_TYPE_MUTEX: 1182 { 1183 mutex* lock = (mutex*)object; 1184 name = lock->name; 1185 break; 1186 } 1187 1188 case THREAD_BLOCK_TYPE_RW_LOCK: 1189 { 1190 rw_lock* lock = (rw_lock*)object; 1191 name = lock->name; 1192 break; 1193 } 1194 1195 case THREAD_BLOCK_TYPE_OTHER: 1196 { 1197 name = (const char*)(void*)object; 1198 break; 1199 } 1200 1201 case THREAD_BLOCK_TYPE_SNOOZE: 1202 case THREAD_BLOCK_TYPE_SIGNAL: 1203 default: 1204 return; 1205 } 1206 1207 // add the event 1208 size_t nameLen = name != NULL ? strlen(name) : 0; 1209 1210 system_profiler_wait_object_info* event 1211 = (system_profiler_wait_object_info*) 1212 _AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen, 1213 B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0); 1214 if (event == NULL) 1215 return; 1216 1217 event->type = type; 1218 event->object = object; 1219 event->referenced_object = (addr_t)referencedObject; 1220 if (name != NULL) 1221 strcpy(event->name, name); 1222 else 1223 event->name[0] = '\0'; 1224 1225 fHeader->size = fBufferSize; 1226 1227 // add the wait object 1228 1229 // get a free one or steal the least recently used one 1230 waitObject = fFreeWaitObjects.RemoveHead(); 1231 if (waitObject == NULL) { 1232 waitObject = fUsedWaitObjects.RemoveHead(); 1233 fWaitObjectTable.RemoveUnchecked(waitObject); 1234 } 1235 1236 waitObject->object = object; 1237 waitObject->type = type; 1238 fWaitObjectTable.InsertUnchecked(waitObject); 1239 fUsedWaitObjects.Add(waitObject); 1240 } 1241 1242 1243 /*static*/ bool 1244 SystemProfiler::_InitialTeamIterator(struct team* team, void* cookie) 1245 { 1246 SystemProfiler* self = (SystemProfiler*)cookie; 1247 return !self->_TeamAdded(team); 1248 } 1249 1250 1251 /*static*/ bool 1252 SystemProfiler::_InitialThreadIterator(struct thread* thread, void* cookie) 1253 { 1254 SystemProfiler* self = (SystemProfiler*)cookie; 1255 1256 if ((self->fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0 1257 && thread->state == B_THREAD_RUNNING && thread->cpu != NULL) { 1258 self->fRunningThreads[thread->cpu->cpu_num] = thread; 1259 } 1260 1261 return !self->_ThreadAdded(thread); 1262 } 1263 1264 1265 /*static*/ bool 1266 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie) 1267 { 1268 SystemProfiler* self = (SystemProfiler*)cookie; 1269 self->fImageNotificationsEnabled = true; 1270 // Set that here, since the image lock is being held now. 1271 return !self->_ImageAdded(image); 1272 } 1273 1274 1275 void* 1276 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count) 1277 { 1278 size = (size + 3) / 4 * 4; 1279 size += sizeof(system_profiler_event_header); 1280 1281 size_t end = fBufferStart + fBufferSize; 1282 if (end + size > fBufferCapacity) { 1283 // Buffer is wrapped or needs wrapping. 1284 if (end < fBufferCapacity) { 1285 // not wrapped yet, but needed 1286 system_profiler_event_header* header 1287 = (system_profiler_event_header*)(fBufferBase + end); 1288 header->event = B_SYSTEM_PROFILER_BUFFER_END; 1289 fBufferSize = fBufferCapacity - fBufferStart; 1290 end = 0; 1291 } else 1292 end -= fBufferCapacity; 1293 1294 if (end + size > fBufferStart) { 1295 fDroppedEvents++; 1296 return NULL; 1297 } 1298 } 1299 1300 system_profiler_event_header* header 1301 = (system_profiler_event_header*)(fBufferBase + end); 1302 header->event = event; 1303 header->cpu = cpu; 1304 header->size = size - sizeof(system_profiler_event_header); 1305 1306 fBufferSize += size; 1307 1308 return header + 1; 1309 } 1310 1311 1312 /*static*/ void 1313 SystemProfiler::_InitTimers(void* cookie, int cpu) 1314 { 1315 SystemProfiler* self = (SystemProfiler*)cookie; 1316 self->_ScheduleTimer(cpu); 1317 } 1318 1319 1320 /*static*/ void 1321 SystemProfiler::_UninitTimers(void* cookie, int cpu) 1322 { 1323 SystemProfiler* self = (SystemProfiler*)cookie; 1324 1325 CPUProfileData& cpuData = self->fCPUData[cpu]; 1326 cancel_timer(&cpuData.timer); 1327 cpuData.timerScheduled = false; 1328 } 1329 1330 1331 void 1332 SystemProfiler::_ScheduleTimer(int cpu) 1333 { 1334 CPUProfileData& cpuData = fCPUData[cpu]; 1335 cpuData.timerEnd = system_time() + fInterval; 1336 cpuData.timer.user_data = this; 1337 add_timer(&cpuData.timer, &_ProfilingEvent, fInterval, 1338 B_ONE_SHOT_RELATIVE_TIMER); 1339 cpuData.timerScheduled = true; 1340 } 1341 1342 1343 void 1344 SystemProfiler::_DoSample() 1345 { 1346 struct thread* thread = thread_get_current_thread(); 1347 int cpu = thread->cpu->cpu_num; 1348 CPUProfileData& cpuData = fCPUData[cpu]; 1349 1350 // get the samples 1351 int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1, 1352 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); 1353 1354 InterruptsSpinLocker locker(fLock); 1355 1356 system_profiler_samples* event = (system_profiler_samples*) 1357 _AllocateBuffer(sizeof(system_profiler_samples) 1358 + count * sizeof(addr_t), 1359 B_SYSTEM_PROFILER_SAMPLES, cpu, count); 1360 if (event == NULL) 1361 return; 1362 1363 event->thread = thread->id; 1364 memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t)); 1365 1366 fHeader->size = fBufferSize; 1367 } 1368 1369 1370 /*static*/ int32 1371 SystemProfiler::_ProfilingEvent(struct timer* timer) 1372 { 1373 SystemProfiler* self = (SystemProfiler*)timer->user_data; 1374 1375 self->_DoSample(); 1376 self->_ScheduleTimer(timer->cpu); 1377 1378 return B_HANDLED_INTERRUPT; 1379 } 1380 1381 1382 // #pragma mark - private kernel API 1383 1384 1385 status_t 1386 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval) 1387 { 1388 struct ParameterDeleter { 1389 ParameterDeleter(area_id area) 1390 : 1391 fArea(area), 1392 fDetached(false) 1393 { 1394 } 1395 1396 ~ParameterDeleter() 1397 { 1398 if (!fDetached) { 1399 delete_area(fArea); 1400 delete sRecordedParameters; 1401 sRecordedParameters = NULL; 1402 } 1403 } 1404 1405 void Detach() 1406 { 1407 fDetached = true; 1408 } 1409 1410 private: 1411 area_id fArea; 1412 bool fDetached; 1413 }; 1414 1415 void* address; 1416 area_id area = create_area("kernel profile data", &address, 1417 B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK, 1418 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 1419 if (area < 0) 1420 return area; 1421 1422 ParameterDeleter parameterDeleter(area); 1423 1424 sRecordedParameters = new(std::nothrow) system_profiler_parameters; 1425 if (sRecordedParameters == NULL) 1426 return B_NO_MEMORY; 1427 1428 sRecordedParameters->buffer_area = area; 1429 sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS 1430 | B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS 1431 | B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS 1432 | B_SYSTEM_PROFILER_SAMPLING_EVENTS; 1433 sRecordedParameters->locking_lookup_size = 4096; 1434 sRecordedParameters->interval = interval; 1435 sRecordedParameters->stack_depth = stackDepth; 1436 1437 area_info areaInfo; 1438 get_area_info(area, &areaInfo); 1439 1440 // initialize the profiler 1441 SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM, 1442 areaInfo, *sRecordedParameters); 1443 if (profiler == NULL) 1444 return B_NO_MEMORY; 1445 1446 ObjectDeleter<SystemProfiler> profilerDeleter(profiler); 1447 1448 status_t error = profiler->Init(); 1449 if (error != B_OK) 1450 return error; 1451 1452 // set the new profiler 1453 InterruptsSpinLocker locker(sProfilerLock); 1454 if (sProfiler != NULL) 1455 return B_BUSY; 1456 1457 parameterDeleter.Detach(); 1458 profilerDeleter.Detach(); 1459 sProfiler = profiler; 1460 locker.Unlock(); 1461 1462 return B_OK; 1463 } 1464 1465 1466 void 1467 stop_system_profiler() 1468 { 1469 InterruptsSpinLocker locker(sProfilerLock); 1470 if (sProfiler == NULL) 1471 return; 1472 1473 SystemProfiler* profiler = sProfiler; 1474 sProfiler = NULL; 1475 locker.Unlock(); 1476 1477 profiler->RemoveReference(); 1478 } 1479 1480 1481 // #pragma mark - syscalls 1482 1483 1484 status_t 1485 _user_system_profiler_start(struct system_profiler_parameters* userParameters) 1486 { 1487 // copy params to the kernel 1488 struct system_profiler_parameters parameters; 1489 if (userParameters == NULL || !IS_USER_ADDRESS(userParameters) 1490 || user_memcpy(¶meters, userParameters, sizeof(parameters)) 1491 != B_OK) { 1492 return B_BAD_ADDRESS; 1493 } 1494 1495 // check the parameters 1496 team_id team = thread_get_current_thread()->team->id; 1497 1498 area_info areaInfo; 1499 status_t error = get_area_info(parameters.buffer_area, &areaInfo); 1500 if (error != B_OK) 1501 return error; 1502 1503 if (areaInfo.team != team) 1504 return B_BAD_VALUE; 1505 1506 if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) { 1507 if (parameters.stack_depth < 1) 1508 return B_BAD_VALUE; 1509 1510 if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL) 1511 parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL; 1512 1513 if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH) 1514 parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH; 1515 } 1516 1517 // quick check to see whether we do already have a profiler installed 1518 InterruptsSpinLocker locker(sProfilerLock); 1519 if (sProfiler != NULL) 1520 return B_BUSY; 1521 locker.Unlock(); 1522 1523 // initialize the profiler 1524 SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo, 1525 parameters); 1526 if (profiler == NULL) 1527 return B_NO_MEMORY; 1528 ObjectDeleter<SystemProfiler> profilerDeleter(profiler); 1529 1530 error = profiler->Init(); 1531 if (error != B_OK) 1532 return error; 1533 1534 // set the new profiler 1535 locker.Lock(); 1536 if (sProfiler != NULL) 1537 return B_BUSY; 1538 1539 profilerDeleter.Detach(); 1540 sProfiler = profiler; 1541 locker.Unlock(); 1542 1543 return B_OK; 1544 } 1545 1546 1547 status_t 1548 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents) 1549 { 1550 if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents)) 1551 return B_BAD_ADDRESS; 1552 1553 team_id team = thread_get_current_thread()->team->id; 1554 1555 InterruptsSpinLocker locker(sProfilerLock); 1556 if (sProfiler == NULL || sProfiler->Team() != team) 1557 return B_BAD_VALUE; 1558 1559 // get a reference to the profiler 1560 SystemProfiler* profiler = sProfiler; 1561 Reference<SystemProfiler> reference(profiler); 1562 locker.Unlock(); 1563 1564 uint64 droppedEvents; 1565 status_t error = profiler->NextBuffer(bytesRead, 1566 _droppedEvents != NULL ? &droppedEvents : NULL); 1567 if (error == B_OK && _droppedEvents != NULL) 1568 user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents)); 1569 1570 return error; 1571 } 1572 1573 1574 status_t 1575 _user_system_profiler_stop() 1576 { 1577 team_id team = thread_get_current_thread()->team->id; 1578 1579 InterruptsSpinLocker locker(sProfilerLock); 1580 if (sProfiler == NULL || sProfiler->Team() != team) 1581 return B_BAD_VALUE; 1582 1583 SystemProfiler* profiler = sProfiler; 1584 sProfiler = NULL; 1585 locker.Unlock(); 1586 1587 profiler->RemoveReference(); 1588 1589 return B_OK; 1590 } 1591 1592 1593 status_t 1594 _user_system_profiler_recorded(struct system_profiler_parameters* userParameters) 1595 { 1596 if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)) 1597 return B_BAD_ADDRESS; 1598 if (sRecordedParameters == NULL) 1599 return B_ERROR; 1600 1601 // Transfer the area to the userland process 1602 1603 void* address; 1604 area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address, 1605 B_ANY_ADDRESS, team_get_current_team_id(), true); 1606 if (newArea < 0) 1607 return newArea; 1608 1609 status_t status = set_area_protection(newArea, B_READ_AREA); 1610 if (status == B_OK) { 1611 sRecordedParameters->buffer_area = newArea; 1612 1613 status = user_memcpy(userParameters, sRecordedParameters, 1614 sizeof(system_profiler_parameters)); 1615 } 1616 if (status != B_OK) 1617 delete_area(newArea); 1618 1619 delete sRecordedParameters; 1620 sRecordedParameters = NULL; 1621 1622 return status; 1623 } 1624