1 /*
2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7 #include <system_profiler.h>
8
9 #include <AutoDeleter.h>
10 #include <Referenceable.h>
11
12 #include <util/AutoLock.h>
13 #include <util/ThreadAutoLock.h>
14
15 #include <system_profiler_defs.h>
16
17 #include <cpu.h>
18 #include <kernel.h>
19 #include <kimage.h>
20 #include <kscheduler.h>
21 #include <listeners.h>
22 #include <Notifications.h>
23 #include <sem.h>
24 #include <team.h>
25 #include <thread.h>
26 #include <user_debugger.h>
27 #include <vm/vm.h>
28
29 #include <arch/debug.h>
30
31 #include "IOSchedulerRoster.h"
32
33
34 // This is the kernel-side implementation of the system profiling support.
35 // A userland team can register as system profiler, providing an area as buffer
36 // for events. Those events are team, thread, and image changes (added/removed),
37 // periodic sampling of the return address stack for each CPU, as well as
38 // scheduling and I/O scheduling events.
39
40
41 class SystemProfiler;
42
43
44 // minimum/maximum size of the table used for wait object caching
45 #define MIN_WAIT_OBJECT_COUNT 128
46 #define MAX_WAIT_OBJECT_COUNT 1024
47
48
49 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER;
50 static SystemProfiler* sProfiler = NULL;
51
52 #if SYSTEM_PROFILER
53 static struct system_profiler_parameters* sRecordedParameters = NULL;
54 #endif
55
56
57 class SystemProfiler : public BReferenceable, private NotificationListener,
58 private SchedulerListener, private WaitObjectListener {
59 public:
60 SystemProfiler(team_id team,
61 const area_info& userAreaInfo,
62 const system_profiler_parameters&
63 parameters);
64 ~SystemProfiler();
65
TeamID() const66 team_id TeamID() const { return fTeam; }
67
68 status_t Init();
69 status_t NextBuffer(size_t bytesRead,
70 uint64* _droppedEvents);
71
72 private:
73 virtual void EventOccurred(NotificationService& service,
74 const KMessage* event);
75
76 virtual void ThreadEnqueuedInRunQueue(Thread* thread);
77 virtual void ThreadRemovedFromRunQueue(Thread* thread);
78 virtual void ThreadScheduled(Thread* oldThread,
79 Thread* newThread);
80
81 virtual void SemaphoreCreated(sem_id id,
82 const char* name);
83 virtual void ConditionVariableInitialized(
84 ConditionVariable* variable);
85 virtual void MutexInitialized(mutex* lock);
86 virtual void RWLockInitialized(rw_lock* lock);
87
88 bool _TeamAdded(Team* team);
89 bool _TeamRemoved(Team* team);
90 bool _TeamExec(Team* team);
91
92 bool _ThreadAdded(Thread* thread);
93 bool _ThreadRemoved(Thread* thread);
94
95 bool _ImageAdded(struct image* image);
96 bool _ImageRemoved(struct image* image);
97
98 bool _IOSchedulerAdded(IOScheduler* scheduler);
99 bool _IOSchedulerRemoved(IOScheduler* scheduler);
100 bool _IORequestScheduled(IOScheduler* scheduler,
101 IORequest* request);
102 bool _IORequestFinished(IOScheduler* scheduler,
103 IORequest* request);
104 bool _IOOperationStarted(IOScheduler* scheduler,
105 IORequest* request, IOOperation* operation);
106 bool _IOOperationFinished(IOScheduler* scheduler,
107 IORequest* request, IOOperation* operation);
108
109 void _WaitObjectCreated(addr_t object, uint32 type);
110 void _WaitObjectUsed(addr_t object, uint32 type);
111
112 inline void _MaybeNotifyProfilerThreadLocked();
113 inline void _MaybeNotifyProfilerThread();
114
115 static bool _InitialImageIterator(struct image* image,
116 void* cookie);
117
118 void* _AllocateBuffer(size_t size, int event, int cpu,
119 int count);
120
121 static void _InitTimers(void* cookie, int cpu);
122 static void _UninitTimers(void* cookie, int cpu);
123 void _ScheduleTimer(int cpu);
124
125 void _DoSample();
126
127 static int32 _ProfilingEvent(struct timer* timer);
128
129 private:
130 struct CPUProfileData {
131 struct timer timer;
132 bigtime_t timerEnd;
133 bool timerScheduled;
134 addr_t buffer[B_DEBUG_STACK_TRACE_DEPTH];
135 };
136
137 struct WaitObjectKey {
138 addr_t object;
139 uint32 type;
140 };
141
142 struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>,
143 WaitObjectKey {
144 struct WaitObject* hash_link;
145 };
146
147 struct WaitObjectTableDefinition {
148 typedef WaitObjectKey KeyType;
149 typedef WaitObject ValueType;
150
HashKeySystemProfiler::WaitObjectTableDefinition151 size_t HashKey(const WaitObjectKey& key) const
152 {
153 return (size_t)key.object ^ (size_t)key.type;
154 }
155
HashSystemProfiler::WaitObjectTableDefinition156 size_t Hash(const WaitObject* value) const
157 {
158 return HashKey(*value);
159 }
160
CompareSystemProfiler::WaitObjectTableDefinition161 bool Compare(const WaitObjectKey& key,
162 const WaitObject* value) const
163 {
164 return value->type == key.type
165 && value->object == key.object;
166 }
167
GetLinkSystemProfiler::WaitObjectTableDefinition168 WaitObject*& GetLink(WaitObject* value) const
169 {
170 return value->hash_link;
171 }
172 };
173
174 typedef DoublyLinkedList<WaitObject> WaitObjectList;
175 typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable;
176
177 private:
178 spinlock fLock;
179 team_id fTeam;
180 area_id fUserArea;
181 area_id fKernelArea;
182 size_t fAreaSize;
183 uint32 fFlags;
184 uint32 fStackDepth;
185 bigtime_t fInterval;
186 bool fProfileKernel;
187 system_profiler_buffer_header* fHeader;
188 uint8* fBufferBase;
189 size_t fBufferCapacity;
190 size_t fBufferStart;
191 size_t fBufferSize;
192 uint64 fDroppedEvents;
193 int64 fLastTeamAddedSerialNumber;
194 int64 fLastThreadAddedSerialNumber;
195 bool fTeamNotificationsRequested;
196 bool fTeamNotificationsEnabled;
197 bool fThreadNotificationsRequested;
198 bool fThreadNotificationsEnabled;
199 bool fImageNotificationsRequested;
200 bool fImageNotificationsEnabled;
201 bool fIONotificationsRequested;
202 bool fIONotificationsEnabled;
203 bool fSchedulerNotificationsRequested;
204 bool fWaitObjectNotificationsRequested;
205 Thread* volatile fWaitingProfilerThread;
206 bool fProfilingActive;
207 bool fReentered[SMP_MAX_CPUS];
208 CPUProfileData fCPUData[SMP_MAX_CPUS];
209 WaitObject* fWaitObjectBuffer;
210 int32 fWaitObjectCount;
211 WaitObjectList fUsedWaitObjects;
212 WaitObjectList fFreeWaitObjects;
213 WaitObjectTable fWaitObjectTable;
214 };
215
216
217 /*! Notifies the profiler thread when the profiling buffer is full enough.
218 The caller must hold fLock.
219 */
220 inline void
_MaybeNotifyProfilerThreadLocked()221 SystemProfiler::_MaybeNotifyProfilerThreadLocked()
222 {
223 // If the buffer is full enough, notify the profiler.
224 if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
225 int cpu = smp_get_current_cpu();
226 fReentered[cpu] = true;
227
228 Thread* profilerThread = fWaitingProfilerThread;
229 fWaitingProfilerThread = NULL;
230
231 SpinLocker _(profilerThread->scheduler_lock);
232 thread_unblock_locked(profilerThread, B_OK);
233
234 fReentered[cpu] = false;
235 }
236 }
237
238
239 inline void
_MaybeNotifyProfilerThread()240 SystemProfiler::_MaybeNotifyProfilerThread()
241 {
242 if (fWaitingProfilerThread == NULL)
243 return;
244
245 InterruptsSpinLocker locker(fLock);
246
247 _MaybeNotifyProfilerThreadLocked();
248 }
249
250
251 // #pragma mark - SystemProfiler public
252
253
SystemProfiler(team_id team,const area_info & userAreaInfo,const system_profiler_parameters & parameters)254 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
255 const system_profiler_parameters& parameters)
256 :
257 fTeam(team),
258 fUserArea(userAreaInfo.area),
259 fKernelArea(-1),
260 fAreaSize(userAreaInfo.size),
261 fFlags(parameters.flags),
262 fStackDepth(parameters.stack_depth),
263 fInterval(parameters.interval),
264 fProfileKernel(parameters.profile_kernel),
265 fHeader(NULL),
266 fBufferBase(NULL),
267 fBufferCapacity(0),
268 fBufferStart(0),
269 fBufferSize(0),
270 fDroppedEvents(0),
271 fLastTeamAddedSerialNumber(0),
272 fLastThreadAddedSerialNumber(0),
273 fTeamNotificationsRequested(false),
274 fTeamNotificationsEnabled(false),
275 fThreadNotificationsRequested(false),
276 fThreadNotificationsEnabled(false),
277 fImageNotificationsRequested(false),
278 fImageNotificationsEnabled(false),
279 fIONotificationsRequested(false),
280 fIONotificationsEnabled(false),
281 fSchedulerNotificationsRequested(false),
282 fWaitObjectNotificationsRequested(false),
283 fWaitingProfilerThread(NULL),
284 fWaitObjectBuffer(NULL),
285 fWaitObjectCount(0),
286 fUsedWaitObjects(),
287 fFreeWaitObjects(),
288 fWaitObjectTable()
289 {
290 B_INITIALIZE_SPINLOCK(&fLock);
291
292 memset(fReentered, 0, sizeof(fReentered));
293
294 // compute the number wait objects we want to cache
295 if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
296 fWaitObjectCount = parameters.locking_lookup_size
297 / (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
298 if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
299 fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
300 if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
301 fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
302 }
303 }
304
305
~SystemProfiler()306 SystemProfiler::~SystemProfiler()
307 {
308 // Wake up the user thread, if it is waiting, and mark profiling
309 // inactive.
310 InterruptsSpinLocker locker(fLock);
311 if (fWaitingProfilerThread != NULL) {
312 thread_unblock(fWaitingProfilerThread, B_OK);
313 fWaitingProfilerThread = NULL;
314 }
315 fProfilingActive = false;
316 locker.Unlock();
317
318 // stop scheduler listening
319 if (fSchedulerNotificationsRequested)
320 scheduler_remove_listener(this);
321
322 // stop wait object listening
323 if (fWaitObjectNotificationsRequested) {
324 InterruptsSpinLocker locker(gWaitObjectListenerLock);
325 remove_wait_object_listener(this);
326 }
327
328 // deactivate the profiling timers on all CPUs
329 if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
330 call_all_cpus(_UninitTimers, this);
331
332 // cancel notifications
333 NotificationManager& notificationManager
334 = NotificationManager::Manager();
335
336 // images
337 if (fImageNotificationsRequested) {
338 fImageNotificationsRequested = false;
339 notificationManager.RemoveListener("images", NULL, *this);
340 }
341
342 // threads
343 if (fThreadNotificationsRequested) {
344 fThreadNotificationsRequested = false;
345 notificationManager.RemoveListener("threads", NULL, *this);
346 }
347
348 // teams
349 if (fTeamNotificationsRequested) {
350 fTeamNotificationsRequested = false;
351 notificationManager.RemoveListener("teams", NULL, *this);
352 }
353
354 // I/O
355 if (fIONotificationsRequested) {
356 fIONotificationsRequested = false;
357 notificationManager.RemoveListener("I/O", NULL, *this);
358 }
359
360 // delete wait object related allocations
361 fWaitObjectTable.Clear();
362 delete[] fWaitObjectBuffer;
363
364 // unlock the memory and delete the area
365 if (fKernelArea >= 0) {
366 unlock_memory(fHeader, fAreaSize, B_READ_DEVICE);
367 delete_area(fKernelArea);
368 fKernelArea = -1;
369 }
370 }
371
372
373 status_t
Init()374 SystemProfiler::Init()
375 {
376 // clone the user area
377 void* areaBase;
378 fKernelArea = clone_area("profiling samples", &areaBase,
379 B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
380 fUserArea);
381 if (fKernelArea < 0)
382 return fKernelArea;
383
384 // we need the memory locked
385 status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE);
386 if (error != B_OK) {
387 delete_area(fKernelArea);
388 fKernelArea = -1;
389 return error;
390 }
391
392 // the buffer is ready for use
393 fHeader = (system_profiler_buffer_header*)areaBase;
394 fBufferBase = (uint8*)(fHeader + 1);
395 fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase);
396 fHeader->start = 0;
397 fHeader->size = 0;
398
399 // allocate the wait object buffer and init the hash table
400 if (fWaitObjectCount > 0) {
401 fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount];
402 if (fWaitObjectBuffer == NULL)
403 return B_NO_MEMORY;
404
405 for (int32 i = 0; i < fWaitObjectCount; i++)
406 fFreeWaitObjects.Add(fWaitObjectBuffer + i);
407
408 error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2);
409 if (error != B_OK)
410 return error;
411 }
412
413 // start listening for notifications
414
415 // teams
416 NotificationManager& notificationManager
417 = NotificationManager::Manager();
418 if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
419 error = notificationManager.AddListener("teams",
420 TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this);
421 if (error != B_OK)
422 return error;
423 fTeamNotificationsRequested = true;
424 }
425
426 // threads
427 if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
428 error = notificationManager.AddListener("threads",
429 THREAD_ADDED | THREAD_REMOVED, *this);
430 if (error != B_OK)
431 return error;
432 fThreadNotificationsRequested = true;
433 }
434
435 // images
436 if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
437 error = notificationManager.AddListener("images",
438 IMAGE_ADDED | IMAGE_REMOVED, *this);
439 if (error != B_OK)
440 return error;
441 fImageNotificationsRequested = true;
442 }
443
444 // I/O events
445 if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
446 error = notificationManager.AddListener("I/O",
447 IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED
448 | IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED
449 | IO_SCHEDULER_OPERATION_STARTED
450 | IO_SCHEDULER_OPERATION_FINISHED,
451 *this);
452 if (error != B_OK)
453 return error;
454 fIONotificationsRequested = true;
455 }
456
457 // We need to fill the buffer with the initial state of teams, threads,
458 // and images.
459
460 // teams
461 if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
462 InterruptsSpinLocker locker(fLock);
463
464 TeamListIterator iterator;
465 while (Team* team = iterator.Next()) {
466 locker.Unlock();
467
468 bool added = _TeamAdded(team);
469
470 // release the reference returned by the iterator
471 team->ReleaseReference();
472
473 if (!added)
474 return B_BUFFER_OVERFLOW;
475
476 locker.Lock();
477 }
478
479 fTeamNotificationsEnabled = true;
480 }
481
482 // images
483 if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
484 if (image_iterate_through_images(&_InitialImageIterator, this) != NULL)
485 return B_BUFFER_OVERFLOW;
486 }
487
488 // threads
489 if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
490 InterruptsSpinLocker locker(fLock);
491
492 ThreadListIterator iterator;
493 while (Thread* thread = iterator.Next()) {
494 locker.Unlock();
495
496 bool added = _ThreadAdded(thread);
497
498 // release the reference returned by the iterator
499 thread->ReleaseReference();
500
501 if (!added)
502 return B_BUFFER_OVERFLOW;
503
504 locker.Lock();
505 }
506
507 fThreadNotificationsEnabled = true;
508 }
509
510 fProfilingActive = true;
511
512 // start scheduler and wait object listening
513 if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
514 scheduler_add_listener(this);
515 fSchedulerNotificationsRequested = true;
516
517 InterruptsSpinLocker waitObjectLocker(gWaitObjectListenerLock);
518 add_wait_object_listener(this);
519 fWaitObjectNotificationsRequested = true;
520 waitObjectLocker.Unlock();
521
522 // fake schedule events for the initially running threads
523 int32 cpuCount = smp_get_num_cpus();
524 for (int32 i = 0; i < cpuCount; i++) {
525 Thread* thread = gCPU[i].running_thread;
526 if (thread != NULL)
527 ThreadScheduled(thread, thread);
528 }
529 }
530
531 // I/O scheduling
532 if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
533 IOSchedulerRoster* roster = IOSchedulerRoster::Default();
534 AutoLocker<IOSchedulerRoster> rosterLocker(roster);
535
536 for (IOSchedulerList::ConstIterator it
537 = roster->SchedulerList().GetIterator();
538 IOScheduler* scheduler = it.Next();) {
539 _IOSchedulerAdded(scheduler);
540 }
541
542 fIONotificationsEnabled = true;
543 }
544
545 // activate the profiling timers on all CPUs
546 if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
547 call_all_cpus(_InitTimers, this);
548
549 return B_OK;
550 }
551
552
553 status_t
NextBuffer(size_t bytesRead,uint64 * _droppedEvents)554 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
555 {
556 InterruptsSpinLocker locker(fLock);
557
558 if (fWaitingProfilerThread != NULL || !fProfilingActive
559 || bytesRead > fBufferSize) {
560 return B_BAD_VALUE;
561 }
562
563 fBufferSize -= bytesRead;
564 fBufferStart += bytesRead;
565 if (fBufferStart > fBufferCapacity)
566 fBufferStart -= fBufferCapacity;
567 fHeader->size = fBufferSize;
568 fHeader->start = fBufferStart;
569
570 // already enough data in the buffer to return?
571 if (fBufferSize > fBufferCapacity / 2)
572 return B_OK;
573
574 // Wait until the buffer gets too full or an error or a timeout occurs.
575 while (true) {
576 Thread* thread = thread_get_current_thread();
577 fWaitingProfilerThread = thread;
578
579 thread_prepare_to_block(thread, B_CAN_INTERRUPT,
580 THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
581
582 locker.Unlock();
583
584 status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
585
586 locker.Lock();
587
588 if (error == B_OK) {
589 // the caller has unset fWaitingProfilerThread for us
590 break;
591 }
592
593 fWaitingProfilerThread = NULL;
594
595 if (error != B_TIMED_OUT)
596 return error;
597
598 // just the timeout -- return, if the buffer is not empty
599 if (fBufferSize > 0)
600 break;
601 }
602
603 if (_droppedEvents != NULL) {
604 *_droppedEvents = fDroppedEvents;
605 fDroppedEvents = 0;
606 }
607
608 return B_OK;
609 }
610
611
612 // #pragma mark - NotificationListener interface
613
614
615 void
EventOccurred(NotificationService & service,const KMessage * event)616 SystemProfiler::EventOccurred(NotificationService& service,
617 const KMessage* event)
618 {
619 int32 eventCode;
620 if (event->FindInt32("event", &eventCode) != B_OK)
621 return;
622
623 if (strcmp(service.Name(), "teams") == 0) {
624 Team* team = (Team*)event->GetPointer("teamStruct", NULL);
625 if (team == NULL)
626 return;
627
628 switch (eventCode) {
629 case TEAM_ADDED:
630 if (fTeamNotificationsEnabled)
631 _TeamAdded(team);
632 break;
633
634 case TEAM_REMOVED:
635 if (team->id == fTeam) {
636 // The profiling team is gone -- uninstall the profiler!
637 InterruptsSpinLocker locker(sProfilerLock);
638 if (sProfiler != this)
639 return;
640
641 sProfiler = NULL;
642 locker.Unlock();
643
644 ReleaseReference();
645 return;
646 }
647
648 // When we're still doing the initial team list scan, we are
649 // also interested in removals that happened to teams we have
650 // already seen.
651 if (fTeamNotificationsEnabled
652 || team->serial_number <= fLastTeamAddedSerialNumber) {
653 _TeamRemoved(team);
654 }
655 break;
656
657 case TEAM_EXEC:
658 if (fTeamNotificationsEnabled)
659 _TeamExec(team);
660 break;
661 }
662 } else if (strcmp(service.Name(), "threads") == 0) {
663 Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
664 if (thread == NULL)
665 return;
666
667 switch (eventCode) {
668 case THREAD_ADDED:
669 if (fThreadNotificationsEnabled)
670 _ThreadAdded(thread);
671 break;
672
673 case THREAD_REMOVED:
674 // When we're still doing the initial thread list scan, we are
675 // also interested in removals that happened to threads we have
676 // already seen.
677 if (fThreadNotificationsEnabled
678 || thread->serial_number <= fLastThreadAddedSerialNumber) {
679 _ThreadRemoved(thread);
680 }
681 break;
682 }
683 } else if (strcmp(service.Name(), "images") == 0) {
684 if (!fImageNotificationsEnabled)
685 return;
686
687 struct image* image = (struct image*)event->GetPointer(
688 "imageStruct", NULL);
689 if (image == NULL)
690 return;
691
692 switch (eventCode) {
693 case IMAGE_ADDED:
694 _ImageAdded(image);
695 break;
696
697 case IMAGE_REMOVED:
698 _ImageRemoved(image);
699 break;
700 }
701 } else if (strcmp(service.Name(), "I/O") == 0) {
702 if (!fIONotificationsEnabled)
703 return;
704
705 IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler",
706 NULL);
707 if (scheduler == NULL)
708 return;
709
710 IORequest* request = (IORequest*)event->GetPointer("request", NULL);
711 IOOperation* operation = (IOOperation*)event->GetPointer("operation",
712 NULL);
713
714 switch (eventCode) {
715 case IO_SCHEDULER_ADDED:
716 _IOSchedulerAdded(scheduler);
717 break;
718
719 case IO_SCHEDULER_REMOVED:
720 _IOSchedulerRemoved(scheduler);
721 break;
722
723 case IO_SCHEDULER_REQUEST_SCHEDULED:
724 _IORequestScheduled(scheduler, request);
725 break;
726
727 case IO_SCHEDULER_REQUEST_FINISHED:
728 _IORequestFinished(scheduler, request);
729 break;
730
731 case IO_SCHEDULER_OPERATION_STARTED:
732 _IOOperationStarted(scheduler, request, operation);
733 break;
734
735 case IO_SCHEDULER_OPERATION_FINISHED:
736 _IOOperationFinished(scheduler, request, operation);
737 break;
738 }
739 }
740
741 _MaybeNotifyProfilerThread();
742 }
743
744
745 // #pragma mark - SchedulerListener interface
746
747
748 void
ThreadEnqueuedInRunQueue(Thread * thread)749 SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
750 {
751 int cpu = smp_get_current_cpu();
752
753 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
754 // When re-entering, we already hold the lock.
755
756 system_profiler_thread_enqueued_in_run_queue* event
757 = (system_profiler_thread_enqueued_in_run_queue*)
758 _AllocateBuffer(
759 sizeof(system_profiler_thread_enqueued_in_run_queue),
760 B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
761 if (event == NULL)
762 return;
763
764 event->time = system_time_nsecs();
765 event->thread = thread->id;
766 event->priority = thread->priority;
767
768 fHeader->size = fBufferSize;
769
770 // Unblock the profiler thread, if necessary, but don't unblock the thread,
771 // if it had been waiting on a condition variable, since then we'd likely
772 // deadlock in ConditionVariable::NotifyOne(), as it acquires a static
773 // spinlock.
774 if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
775 _MaybeNotifyProfilerThreadLocked();
776 }
777
778
779 void
ThreadRemovedFromRunQueue(Thread * thread)780 SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
781 {
782 int cpu = smp_get_current_cpu();
783
784 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
785 // When re-entering, we already hold the lock.
786
787 system_profiler_thread_removed_from_run_queue* event
788 = (system_profiler_thread_removed_from_run_queue*)
789 _AllocateBuffer(
790 sizeof(system_profiler_thread_removed_from_run_queue),
791 B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
792 if (event == NULL)
793 return;
794
795 event->time = system_time_nsecs();
796 event->thread = thread->id;
797
798 fHeader->size = fBufferSize;
799
800 // unblock the profiler thread, if necessary
801 _MaybeNotifyProfilerThreadLocked();
802 }
803
804
805 void
ThreadScheduled(Thread * oldThread,Thread * newThread)806 SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
807 {
808 int cpu = smp_get_current_cpu();
809
810 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
811 // When re-entering, we already hold the lock.
812
813 // If the old thread starts waiting, handle the wait object.
814 if (oldThread->state == B_THREAD_WAITING)
815 _WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);
816
817 system_profiler_thread_scheduled* event
818 = (system_profiler_thread_scheduled*)
819 _AllocateBuffer(sizeof(system_profiler_thread_scheduled),
820 B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
821 if (event == NULL)
822 return;
823
824 event->time = system_time_nsecs();
825 event->thread = newThread->id;
826 event->previous_thread = oldThread->id;
827 event->previous_thread_state = oldThread->state;
828 event->previous_thread_wait_object_type = oldThread->wait.type;
829 event->previous_thread_wait_object = (addr_t)oldThread->wait.object;
830
831 fHeader->size = fBufferSize;
832
833 // unblock the profiler thread, if necessary
834 _MaybeNotifyProfilerThreadLocked();
835 }
836
837
838 // #pragma mark - WaitObjectListener interface
839
840
841 void
SemaphoreCreated(sem_id id,const char * name)842 SystemProfiler::SemaphoreCreated(sem_id id, const char* name)
843 {
844 _WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE);
845 }
846
847
848 void
ConditionVariableInitialized(ConditionVariable * variable)849 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable)
850 {
851 _WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE);
852 }
853
854
855 void
MutexInitialized(mutex * lock)856 SystemProfiler::MutexInitialized(mutex* lock)
857 {
858 _WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX);
859 }
860
861
862 void
RWLockInitialized(rw_lock * lock)863 SystemProfiler::RWLockInitialized(rw_lock* lock)
864 {
865 _WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK);
866 }
867
868
869 // #pragma mark - SystemProfiler private
870
871
872 bool
_TeamAdded(Team * team)873 SystemProfiler::_TeamAdded(Team* team)
874 {
875 TeamLocker teamLocker(team);
876
877 size_t nameLen = strlen(team->Name());
878 size_t argsLen = strlen(team->Args());
879
880 InterruptsSpinLocker locker(fLock);
881
882 // During the initial scan check whether the team is already gone again.
883 // Later this cannot happen, since the team creator notifies us before
884 // actually starting the team.
885 if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
886 return true;
887
888 if (team->serial_number > fLastTeamAddedSerialNumber)
889 fLastTeamAddedSerialNumber = team->serial_number;
890
891 system_profiler_team_added* event = (system_profiler_team_added*)
892 _AllocateBuffer(
893 sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
894 B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0);
895 if (event == NULL)
896 return false;
897
898 event->team = team->id;
899 strcpy(event->name, team->Name());
900 event->args_offset = nameLen + 1;
901 strcpy(event->name + nameLen + 1, team->Args());
902
903 fHeader->size = fBufferSize;
904
905 return true;
906 }
907
908
909 bool
_TeamRemoved(Team * team)910 SystemProfiler::_TeamRemoved(Team* team)
911 {
912 // TODO: It is possible that we get remove notifications for teams that
913 // had already been removed from the global team list when we did the
914 // initial scan, but were still in the process of dying. ATM it is not
915 // really possible to identify such a case.
916
917 TeamLocker teamLocker(team);
918 InterruptsSpinLocker locker(fLock);
919
920 system_profiler_team_removed* event = (system_profiler_team_removed*)
921 _AllocateBuffer(sizeof(system_profiler_team_removed),
922 B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0);
923 if (event == NULL)
924 return false;
925
926 event->team = team->id;
927
928 fHeader->size = fBufferSize;
929
930 return true;
931 }
932
933
934 bool
_TeamExec(Team * team)935 SystemProfiler::_TeamExec(Team* team)
936 {
937 TeamLocker teamLocker(team);
938
939 size_t argsLen = strlen(team->Args());
940
941 InterruptsSpinLocker locker(fLock);
942
943 system_profiler_team_exec* event = (system_profiler_team_exec*)
944 _AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen,
945 B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0);
946 if (event == NULL)
947 return false;
948
949 event->team = team->id;
950 strlcpy(event->thread_name, team->main_thread->name,
951 sizeof(event->thread_name));
952 strcpy(event->args, team->Args());
953
954 fHeader->size = fBufferSize;
955
956 return true;
957 }
958
959
960 bool
_ThreadAdded(Thread * thread)961 SystemProfiler::_ThreadAdded(Thread* thread)
962 {
963 ThreadLocker threadLocker(thread);
964 InterruptsSpinLocker locker(fLock);
965
966 // During the initial scan check whether the team is already gone again.
967 // Later this cannot happen, since the team creator notifies us before
968 // actually starting the thread.
969 if (!fThreadNotificationsEnabled && !thread->IsAlive())
970 return true;
971
972 if (thread->serial_number > fLastThreadAddedSerialNumber)
973 fLastThreadAddedSerialNumber = thread->serial_number;
974
975 system_profiler_thread_added* event = (system_profiler_thread_added*)
976 _AllocateBuffer(sizeof(system_profiler_thread_added),
977 B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
978 if (event == NULL)
979 return false;
980
981 event->team = thread->team->id;
982 event->thread = thread->id;
983 strlcpy(event->name, thread->name, sizeof(event->name));
984 {
985 SpinLocker timeLocker(thread->time_lock);
986 event->cpu_time = thread->CPUTime(false);
987 }
988
989 fHeader->size = fBufferSize;
990
991 return true;
992 }
993
994
995 bool
_ThreadRemoved(Thread * thread)996 SystemProfiler::_ThreadRemoved(Thread* thread)
997 {
998 // TODO: It is possible that we get remove notifications for threads that
999 // had already been removed from the global thread list when we did the
1000 // initial scan, but were still in the process of dying. ATM it is not
1001 // really possible to identify such a case.
1002
1003 ThreadLocker threadLocker(thread);
1004 InterruptsSpinLocker locker(fLock);
1005
1006 system_profiler_thread_removed* event
1007 = (system_profiler_thread_removed*)
1008 _AllocateBuffer(sizeof(system_profiler_thread_removed),
1009 B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0);
1010 if (event == NULL)
1011 return false;
1012
1013 event->team = thread->team->id;
1014 event->thread = thread->id;
1015 {
1016 SpinLocker timeLocker(thread->time_lock);
1017 event->cpu_time = thread->CPUTime(false);
1018 }
1019
1020 fHeader->size = fBufferSize;
1021
1022 return true;
1023 }
1024
1025
1026 bool
_ImageAdded(struct image * image)1027 SystemProfiler::_ImageAdded(struct image* image)
1028 {
1029 InterruptsSpinLocker locker(fLock);
1030
1031 system_profiler_image_added* event = (system_profiler_image_added*)
1032 _AllocateBuffer(sizeof(system_profiler_image_added),
1033 B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0);
1034 if (event == NULL)
1035 return false;
1036
1037 event->team = image->team;
1038 event->info = image->info.basic_info;
1039
1040 fHeader->size = fBufferSize;
1041
1042 return true;
1043 }
1044
1045
1046 bool
_ImageRemoved(struct image * image)1047 SystemProfiler::_ImageRemoved(struct image* image)
1048 {
1049 InterruptsSpinLocker locker(fLock);
1050
1051 system_profiler_image_removed* event = (system_profiler_image_removed*)
1052 _AllocateBuffer(sizeof(system_profiler_image_removed),
1053 B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0);
1054 if (event == NULL)
1055 return false;
1056
1057 event->team = image->team;
1058 event->image = image->info.basic_info.id;
1059
1060 fHeader->size = fBufferSize;
1061
1062 return true;
1063 }
1064
1065
1066 bool
_IOSchedulerAdded(IOScheduler * scheduler)1067 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler)
1068 {
1069 size_t nameLen = strlen(scheduler->Name());
1070
1071 InterruptsSpinLocker locker(fLock);
1072
1073 system_profiler_io_scheduler_added* event
1074 = (system_profiler_io_scheduler_added*)_AllocateBuffer(
1075 sizeof(system_profiler_io_scheduler_added) + nameLen,
1076 B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0);
1077 if (event == NULL)
1078 return false;
1079
1080 event->scheduler = scheduler->ID();
1081 strcpy(event->name, scheduler->Name());
1082
1083 fHeader->size = fBufferSize;
1084
1085 return true;
1086 }
1087
1088
1089 bool
_IOSchedulerRemoved(IOScheduler * scheduler)1090 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler)
1091 {
1092 InterruptsSpinLocker locker(fLock);
1093
1094 system_profiler_io_scheduler_removed* event
1095 = (system_profiler_io_scheduler_removed*)_AllocateBuffer(
1096 sizeof(system_profiler_io_scheduler_removed),
1097 B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0);
1098 if (event == NULL)
1099 return false;
1100
1101 event->scheduler = scheduler->ID();
1102
1103 fHeader->size = fBufferSize;
1104
1105 return true;
1106 }
1107
1108
1109 bool
_IORequestScheduled(IOScheduler * scheduler,IORequest * request)1110 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request)
1111 {
1112 InterruptsSpinLocker locker(fLock);
1113
1114 system_profiler_io_request_scheduled* event
1115 = (system_profiler_io_request_scheduled*)_AllocateBuffer(
1116 sizeof(system_profiler_io_request_scheduled),
1117 B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0);
1118 if (event == NULL)
1119 return false;
1120
1121 IORequestOwner* owner = request->Owner();
1122
1123 event->time = system_time_nsecs();
1124 event->scheduler = scheduler->ID();
1125 event->team = owner->team;
1126 event->thread = owner->thread;
1127 event->request = request;
1128 event->offset = request->Offset();
1129 event->length = request->Length();
1130 event->write = request->IsWrite();
1131 event->priority = owner->priority;
1132
1133 fHeader->size = fBufferSize;
1134
1135 return true;
1136 }
1137
1138
1139 bool
_IORequestFinished(IOScheduler * scheduler,IORequest * request)1140 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request)
1141 {
1142 InterruptsSpinLocker locker(fLock);
1143
1144 system_profiler_io_request_finished* event
1145 = (system_profiler_io_request_finished*)_AllocateBuffer(
1146 sizeof(system_profiler_io_request_finished),
1147 B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0);
1148 if (event == NULL)
1149 return false;
1150
1151 event->time = system_time_nsecs();
1152 event->scheduler = scheduler->ID();
1153 event->request = request;
1154 event->status = request->Status();
1155 event->transferred = request->TransferredBytes();
1156
1157 fHeader->size = fBufferSize;
1158
1159 return true;
1160 }
1161
1162
1163 bool
_IOOperationStarted(IOScheduler * scheduler,IORequest * request,IOOperation * operation)1164 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request,
1165 IOOperation* operation)
1166 {
1167 InterruptsSpinLocker locker(fLock);
1168
1169 system_profiler_io_operation_started* event
1170 = (system_profiler_io_operation_started*)_AllocateBuffer(
1171 sizeof(system_profiler_io_operation_started),
1172 B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0);
1173 if (event == NULL)
1174 return false;
1175
1176 event->time = system_time_nsecs();
1177 event->scheduler = scheduler->ID();
1178 event->request = request;
1179 event->operation = operation;
1180 event->offset = request->Offset();
1181 event->length = request->Length();
1182 event->write = request->IsWrite();
1183
1184 fHeader->size = fBufferSize;
1185
1186 return true;
1187 }
1188
1189
1190 bool
_IOOperationFinished(IOScheduler * scheduler,IORequest * request,IOOperation * operation)1191 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request,
1192 IOOperation* operation)
1193 {
1194 InterruptsSpinLocker locker(fLock);
1195
1196 system_profiler_io_operation_finished* event
1197 = (system_profiler_io_operation_finished*)_AllocateBuffer(
1198 sizeof(system_profiler_io_operation_finished),
1199 B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0);
1200 if (event == NULL)
1201 return false;
1202
1203 event->time = system_time_nsecs();
1204 event->scheduler = scheduler->ID();
1205 event->request = request;
1206 event->operation = operation;
1207 event->status = request->Status();
1208 event->transferred = request->TransferredBytes();
1209
1210 fHeader->size = fBufferSize;
1211
1212 return true;
1213 }
1214
1215
1216 void
_WaitObjectCreated(addr_t object,uint32 type)1217 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type)
1218 {
1219 SpinLocker locker(fLock);
1220
1221 // look up the object
1222 WaitObjectKey key;
1223 key.object = object;
1224 key.type = type;
1225 WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1226
1227 // If found, remove it and add it to the free list. This might sound weird,
1228 // but it makes sense, since we lazily track *used* wait objects only.
1229 // I.e. the object in the table is now guaranteedly obsolete.
1230 if (waitObject) {
1231 fWaitObjectTable.RemoveUnchecked(waitObject);
1232 fUsedWaitObjects.Remove(waitObject);
1233 fFreeWaitObjects.Add(waitObject, false);
1234 }
1235 }
1236
1237 void
_WaitObjectUsed(addr_t object,uint32 type)1238 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
1239 {
1240 // look up the object
1241 WaitObjectKey key;
1242 key.object = object;
1243 key.type = type;
1244 WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1245
1246 // If already known, re-queue it as most recently used and be done.
1247 if (waitObject != NULL) {
1248 fUsedWaitObjects.Remove(waitObject);
1249 fUsedWaitObjects.Add(waitObject);
1250 return;
1251 }
1252
1253 // not known yet -- get the info
1254 const char* name = NULL;
1255 const void* referencedObject = NULL;
1256
1257 switch (type) {
1258 case THREAD_BLOCK_TYPE_SEMAPHORE:
1259 {
1260 name = sem_get_name_unsafe((sem_id)object);
1261 break;
1262 }
1263
1264 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1265 {
1266 ConditionVariable* variable = (ConditionVariable*)object;
1267 name = variable->ObjectType();
1268 referencedObject = variable->Object();
1269 break;
1270 }
1271
1272 case THREAD_BLOCK_TYPE_MUTEX:
1273 {
1274 mutex* lock = (mutex*)object;
1275 name = lock->name;
1276 break;
1277 }
1278
1279 case THREAD_BLOCK_TYPE_RW_LOCK:
1280 {
1281 rw_lock* lock = (rw_lock*)object;
1282 name = lock->name;
1283 break;
1284 }
1285
1286 case THREAD_BLOCK_TYPE_OTHER:
1287 {
1288 name = (const char*)(void*)object;
1289 break;
1290 }
1291
1292 case THREAD_BLOCK_TYPE_OTHER_OBJECT:
1293 case THREAD_BLOCK_TYPE_SNOOZE:
1294 case THREAD_BLOCK_TYPE_SIGNAL:
1295 default:
1296 return;
1297 }
1298
1299 // add the event
1300 size_t nameLen = name != NULL ? strlen(name) : 0;
1301
1302 system_profiler_wait_object_info* event
1303 = (system_profiler_wait_object_info*)
1304 _AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen,
1305 B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0);
1306 if (event == NULL)
1307 return;
1308
1309 event->type = type;
1310 event->object = object;
1311 event->referenced_object = (addr_t)referencedObject;
1312 if (name != NULL)
1313 strcpy(event->name, name);
1314 else
1315 event->name[0] = '\0';
1316
1317 fHeader->size = fBufferSize;
1318
1319 // add the wait object
1320
1321 // get a free one or steal the least recently used one
1322 waitObject = fFreeWaitObjects.RemoveHead();
1323 if (waitObject == NULL) {
1324 waitObject = fUsedWaitObjects.RemoveHead();
1325 fWaitObjectTable.RemoveUnchecked(waitObject);
1326 }
1327
1328 waitObject->object = object;
1329 waitObject->type = type;
1330 fWaitObjectTable.InsertUnchecked(waitObject);
1331 fUsedWaitObjects.Add(waitObject);
1332 }
1333
1334
1335 /*static*/ bool
_InitialImageIterator(struct image * image,void * cookie)1336 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
1337 {
1338 SystemProfiler* self = (SystemProfiler*)cookie;
1339 self->fImageNotificationsEnabled = true;
1340 // Set that here, since the image lock is being held now.
1341 return !self->_ImageAdded(image);
1342 }
1343
1344
1345 void*
_AllocateBuffer(size_t size,int event,int cpu,int count)1346 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count)
1347 {
1348 size = (size + 3) / 4 * 4;
1349 size += sizeof(system_profiler_event_header);
1350
1351 size_t end = fBufferStart + fBufferSize;
1352 if (end + size > fBufferCapacity) {
1353 // Buffer is wrapped or needs wrapping.
1354 if (end < fBufferCapacity) {
1355 // not wrapped yet, but needed
1356 system_profiler_event_header* header
1357 = (system_profiler_event_header*)(fBufferBase + end);
1358 header->event = B_SYSTEM_PROFILER_BUFFER_END;
1359 fBufferSize = fBufferCapacity - fBufferStart;
1360 end = 0;
1361 } else
1362 end -= fBufferCapacity;
1363
1364 if (end + size > fBufferStart) {
1365 fDroppedEvents++;
1366 return NULL;
1367 }
1368 }
1369
1370 system_profiler_event_header* header
1371 = (system_profiler_event_header*)(fBufferBase + end);
1372 header->event = event;
1373 header->cpu = cpu;
1374 header->size = size - sizeof(system_profiler_event_header);
1375
1376 fBufferSize += size;
1377
1378 return header + 1;
1379 }
1380
1381
1382 /*static*/ void
_InitTimers(void * cookie,int cpu)1383 SystemProfiler::_InitTimers(void* cookie, int cpu)
1384 {
1385 SystemProfiler* self = (SystemProfiler*)cookie;
1386 self->_ScheduleTimer(cpu);
1387 }
1388
1389
1390 /*static*/ void
_UninitTimers(void * cookie,int cpu)1391 SystemProfiler::_UninitTimers(void* cookie, int cpu)
1392 {
1393 SystemProfiler* self = (SystemProfiler*)cookie;
1394
1395 CPUProfileData& cpuData = self->fCPUData[cpu];
1396 cancel_timer(&cpuData.timer);
1397 cpuData.timerScheduled = false;
1398 }
1399
1400
1401 void
_ScheduleTimer(int cpu)1402 SystemProfiler::_ScheduleTimer(int cpu)
1403 {
1404 CPUProfileData& cpuData = fCPUData[cpu];
1405 cpuData.timerEnd = system_time() + fInterval;
1406 cpuData.timer.user_data = this;
1407 add_timer(&cpuData.timer, &_ProfilingEvent, fInterval,
1408 B_ONE_SHOT_RELATIVE_TIMER);
1409 cpuData.timerScheduled = true;
1410 }
1411
1412
1413 void
_DoSample()1414 SystemProfiler::_DoSample()
1415 {
1416 Thread* thread = thread_get_current_thread();
1417 int cpu = thread->cpu->cpu_num;
1418 CPUProfileData& cpuData = fCPUData[cpu];
1419
1420 // get the samples
1421 uint32 flags = STACK_TRACE_USER;
1422 int32 skipIFrames = 0;
1423 if (fProfileKernel) {
1424 flags |= STACK_TRACE_KERNEL;
1425 skipIFrames = 1;
1426 }
1427 int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth,
1428 skipIFrames, 0, flags);
1429
1430 InterruptsSpinLocker locker(fLock);
1431
1432 system_profiler_samples* event = (system_profiler_samples*)
1433 _AllocateBuffer(sizeof(system_profiler_samples)
1434 + count * sizeof(addr_t),
1435 B_SYSTEM_PROFILER_SAMPLES, cpu, count);
1436 if (event == NULL)
1437 return;
1438
1439 event->thread = thread->id;
1440 memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t));
1441
1442 fHeader->size = fBufferSize;
1443 }
1444
1445
1446 /*static*/ int32
_ProfilingEvent(struct timer * timer)1447 SystemProfiler::_ProfilingEvent(struct timer* timer)
1448 {
1449 SystemProfiler* self = (SystemProfiler*)timer->user_data;
1450
1451 self->_DoSample();
1452 self->_ScheduleTimer(timer->cpu);
1453
1454 return B_HANDLED_INTERRUPT;
1455 }
1456
1457
1458 // #pragma mark - private kernel API
1459
1460
1461 #if SYSTEM_PROFILER
1462
1463 status_t
start_system_profiler(size_t areaSize,uint32 stackDepth,bigtime_t interval)1464 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval)
1465 {
1466 struct ParameterDeleter {
1467 ParameterDeleter(area_id area)
1468 :
1469 fArea(area),
1470 fDetached(false)
1471 {
1472 }
1473
1474 ~ParameterDeleter()
1475 {
1476 if (!fDetached) {
1477 delete_area(fArea);
1478 delete sRecordedParameters;
1479 sRecordedParameters = NULL;
1480 }
1481 }
1482
1483 void Detach()
1484 {
1485 fDetached = true;
1486 }
1487
1488 private:
1489 area_id fArea;
1490 bool fDetached;
1491 };
1492
1493 void* address;
1494 area_id area = create_area("kernel profile data", &address,
1495 B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK,
1496 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1497 if (area < 0)
1498 return area;
1499
1500 ParameterDeleter parameterDeleter(area);
1501
1502 sRecordedParameters = new(std::nothrow) system_profiler_parameters;
1503 if (sRecordedParameters == NULL)
1504 return B_NO_MEMORY;
1505
1506 sRecordedParameters->buffer_area = area;
1507 sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS
1508 | B_SYSTEM_PROFILER_THREAD_EVENTS;
1509 if (interval > 0 && stackDepth > 0) {
1510 sRecordedParameters->flags |= B_SYSTEM_PROFILER_SAMPLING_EVENTS
1511 | B_SYSTEM_PROFILER_IMAGE_EVENTS;
1512 }
1513 sRecordedParameters->locking_lookup_size = 4096;
1514 sRecordedParameters->interval = interval;
1515 sRecordedParameters->stack_depth = stackDepth;
1516
1517 #if SYSTEM_PROFILE_SCHEDULING
1518 sRecordedParameters->flags |= B_SYSTEM_PROFILER_SCHEDULING_EVENTS;
1519 sRecordedParameters->locking_lookup_size = 64 * 1024;
1520 #endif
1521
1522 area_info areaInfo;
1523 get_area_info(area, &areaInfo);
1524
1525 // initialize the profiler
1526 SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM,
1527 areaInfo, *sRecordedParameters);
1528 if (profiler == NULL)
1529 return B_NO_MEMORY;
1530
1531 ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1532
1533 status_t error = profiler->Init();
1534 if (error != B_OK)
1535 return error;
1536
1537 // set the new profiler
1538 InterruptsSpinLocker locker(sProfilerLock);
1539 if (sProfiler != NULL)
1540 return B_BUSY;
1541
1542 parameterDeleter.Detach();
1543 profilerDeleter.Detach();
1544 sProfiler = profiler;
1545 locker.Unlock();
1546
1547 return B_OK;
1548 }
1549
1550
1551 void
stop_system_profiler()1552 stop_system_profiler()
1553 {
1554 InterruptsSpinLocker locker(sProfilerLock);
1555 if (sProfiler == NULL)
1556 return;
1557
1558 SystemProfiler* profiler = sProfiler;
1559 sProfiler = NULL;
1560 locker.Unlock();
1561
1562 profiler->ReleaseReference();
1563 }
1564
1565 #endif // SYSTEM_PROFILER
1566
1567
1568 // #pragma mark - syscalls
1569
1570
1571 status_t
_user_system_profiler_start(struct system_profiler_parameters * userParameters)1572 _user_system_profiler_start(struct system_profiler_parameters* userParameters)
1573 {
1574 if (geteuid() != 0)
1575 return B_PERMISSION_DENIED;
1576
1577 // copy params to the kernel
1578 struct system_profiler_parameters parameters;
1579 if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)
1580 || user_memcpy(¶meters, userParameters, sizeof(parameters))
1581 != B_OK) {
1582 return B_BAD_ADDRESS;
1583 }
1584
1585 // check the parameters
1586 team_id team = thread_get_current_thread()->team->id;
1587
1588 area_info areaInfo;
1589 status_t error = get_area_info(parameters.buffer_area, &areaInfo);
1590 if (error != B_OK)
1591 return error;
1592
1593 if (areaInfo.team != team)
1594 return B_BAD_VALUE;
1595
1596 if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) {
1597 if (parameters.stack_depth < 1)
1598 return B_BAD_VALUE;
1599
1600 if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL)
1601 parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL;
1602
1603 if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH)
1604 parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH;
1605 }
1606
1607 // quick check to see whether we do already have a profiler installed
1608 InterruptsSpinLocker locker(sProfilerLock);
1609 if (sProfiler != NULL)
1610 return B_BUSY;
1611 locker.Unlock();
1612
1613 // initialize the profiler
1614 SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo,
1615 parameters);
1616 if (profiler == NULL)
1617 return B_NO_MEMORY;
1618 ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1619
1620 error = profiler->Init();
1621 if (error != B_OK)
1622 return error;
1623
1624 // set the new profiler
1625 locker.Lock();
1626 if (sProfiler != NULL)
1627 return B_BUSY;
1628
1629 profilerDeleter.Detach();
1630 sProfiler = profiler;
1631 locker.Unlock();
1632
1633 return B_OK;
1634 }
1635
1636
1637 status_t
_user_system_profiler_next_buffer(size_t bytesRead,uint64 * _droppedEvents)1638 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents)
1639 {
1640 if (geteuid() != 0)
1641 return B_PERMISSION_DENIED;
1642
1643 if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents))
1644 return B_BAD_ADDRESS;
1645
1646 team_id team = thread_get_current_thread()->team->id;
1647
1648 InterruptsSpinLocker locker(sProfilerLock);
1649 if (sProfiler == NULL || sProfiler->TeamID() != team)
1650 return B_BAD_VALUE;
1651
1652 // get a reference to the profiler
1653 SystemProfiler* profiler = sProfiler;
1654 BReference<SystemProfiler> reference(profiler);
1655 locker.Unlock();
1656
1657 uint64 droppedEvents = 0;
1658 status_t error = profiler->NextBuffer(bytesRead,
1659 _droppedEvents != NULL ? &droppedEvents : NULL);
1660 if (error == B_OK && _droppedEvents != NULL)
1661 user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents));
1662
1663 return error;
1664 }
1665
1666
1667 status_t
_user_system_profiler_stop()1668 _user_system_profiler_stop()
1669 {
1670 if (geteuid() != 0)
1671 return B_PERMISSION_DENIED;
1672
1673 team_id team = thread_get_current_thread()->team->id;
1674
1675 InterruptsSpinLocker locker(sProfilerLock);
1676 if (sProfiler == NULL || sProfiler->TeamID() != team)
1677 return B_BAD_VALUE;
1678
1679 SystemProfiler* profiler = sProfiler;
1680 sProfiler = NULL;
1681 locker.Unlock();
1682
1683 profiler->ReleaseReference();
1684
1685 return B_OK;
1686 }
1687
1688
1689 status_t
_user_system_profiler_recorded(system_profiler_parameters * userParameters)1690 _user_system_profiler_recorded(system_profiler_parameters* userParameters)
1691 {
1692 if (geteuid() != 0)
1693 return B_PERMISSION_DENIED;
1694
1695 if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
1696 return B_BAD_ADDRESS;
1697
1698 #if SYSTEM_PROFILER
1699 if (sRecordedParameters == NULL)
1700 return B_ERROR;
1701
1702 stop_system_profiler();
1703
1704 // Transfer the area to the userland process
1705
1706 void* address;
1707 area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
1708 B_ANY_ADDRESS, team_get_current_team_id(), true);
1709 if (newArea < 0)
1710 return newArea;
1711
1712 status_t status = set_area_protection(newArea, B_READ_AREA);
1713 if (status == B_OK) {
1714 sRecordedParameters->buffer_area = newArea;
1715
1716 status = user_memcpy(userParameters, sRecordedParameters,
1717 sizeof(system_profiler_parameters));
1718 }
1719 if (status != B_OK)
1720 delete_area(newArea);
1721
1722 delete sRecordedParameters;
1723 sRecordedParameters = NULL;
1724
1725 return status;
1726 #else
1727 return B_NOT_SUPPORTED;
1728 #endif // SYSTEM_PROFILER
1729 }
1730