xref: /haiku/src/system/kernel/debug/system_profiler.cpp (revision b247f935d133a42c427cad8a759a1bf2f65bc290)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <system_profiler.h>
8 
9 #include <AutoDeleter.h>
10 #include <Referenceable.h>
11 
12 #include <util/AutoLock.h>
13 
14 #include <system_profiler_defs.h>
15 
16 #include <cpu.h>
17 #include <kernel.h>
18 #include <kimage.h>
19 #include <kscheduler.h>
20 #include <listeners.h>
21 #include <Notifications.h>
22 #include <sem.h>
23 #include <team.h>
24 #include <thread.h>
25 #include <user_debugger.h>
26 #include <vm/vm.h>
27 
28 #include <arch/debug.h>
29 
30 #include "IOSchedulerRoster.h"
31 
32 
33 // This is the kernel-side implementation of the system profiling support.
34 // A userland team can register as system profiler, providing an area as buffer
35 // for events. Those events are team, thread, and image changes (added/removed),
36 // periodic sampling of the return address stack for each CPU, as well as
37 // scheduling and I/O scheduling events.
38 
39 
40 class SystemProfiler;
41 
42 
43 // minimum/maximum size of the table used for wait object caching
44 #define MIN_WAIT_OBJECT_COUNT	128
45 #define MAX_WAIT_OBJECT_COUNT	1024
46 
47 
48 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER;
49 static SystemProfiler* sProfiler = NULL;
50 static struct system_profiler_parameters* sRecordedParameters = NULL;
51 
52 
53 class SystemProfiler : public BReferenceable, private NotificationListener,
54 	private SchedulerListener, private WaitObjectListener {
55 public:
56 								SystemProfiler(team_id team,
57 									const area_info& userAreaInfo,
58 									const system_profiler_parameters&
59 										parameters);
60 								~SystemProfiler();
61 
62 			team_id				TeamID() const	{ return fTeam; }
63 
64 			status_t			Init();
65 			status_t			NextBuffer(size_t bytesRead,
66 									uint64* _droppedEvents);
67 
68 private:
69     virtual	void				EventOccurred(NotificationService& service,
70 									const KMessage* event);
71 
72 	virtual	void				ThreadEnqueuedInRunQueue(Thread* thread);
73 	virtual	void				ThreadRemovedFromRunQueue(Thread* thread);
74 	virtual	void				ThreadScheduled(Thread* oldThread,
75 									Thread* newThread);
76 
77 	virtual	void				SemaphoreCreated(sem_id id,
78 									const char* name);
79 	virtual	void				ConditionVariableInitialized(
80 									ConditionVariable* variable);
81 	virtual	void				MutexInitialized(mutex* lock);
82 	virtual	void				RWLockInitialized(rw_lock* lock);
83 
84 			bool				_TeamAdded(Team* team);
85 			bool				_TeamRemoved(Team* team);
86 			bool				_TeamExec(Team* team);
87 
88 			bool				_ThreadAdded(Thread* thread);
89 			bool				_ThreadRemoved(Thread* thread);
90 
91 			bool				_ImageAdded(struct image* image);
92 			bool				_ImageRemoved(struct image* image);
93 
94 			bool				_IOSchedulerAdded(IOScheduler* scheduler);
95 			bool				_IOSchedulerRemoved(IOScheduler* scheduler);
96 			bool				_IORequestScheduled(IOScheduler* scheduler,
97 									IORequest* request);
98 			bool				_IORequestFinished(IOScheduler* scheduler,
99 									IORequest* request);
100 			bool				_IOOperationStarted(IOScheduler* scheduler,
101 									IORequest* request, IOOperation* operation);
102 			bool				_IOOperationFinished(IOScheduler* scheduler,
103 									IORequest* request, IOOperation* operation);
104 
105 			void				_WaitObjectCreated(addr_t object, uint32 type);
106 			void				_WaitObjectUsed(addr_t object, uint32 type);
107 
108 	inline	void				_MaybeNotifyProfilerThreadLocked();
109 	inline	void				_MaybeNotifyProfilerThread();
110 
111 	static	bool				_InitialImageIterator(struct image* image,
112 									void* cookie);
113 
114 			void*				_AllocateBuffer(size_t size, int event, int cpu,
115 									int count);
116 
117 	static	void				_InitTimers(void* cookie, int cpu);
118 	static	void				_UninitTimers(void* cookie, int cpu);
119 			void				_ScheduleTimer(int cpu);
120 
121 			void				_DoSample();
122 
123 	static	int32				_ProfilingEvent(struct timer* timer);
124 
125 private:
126 			struct CPUProfileData {
127 				struct timer	timer;
128 				bigtime_t		timerEnd;
129 				bool			timerScheduled;
130 				addr_t			buffer[B_DEBUG_STACK_TRACE_DEPTH];
131 			};
132 
133 			struct WaitObjectKey {
134 				addr_t	object;
135 				uint32	type;
136 			};
137 
138 			struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>,
139 					WaitObjectKey {
140 				struct WaitObject* hash_link;
141 			};
142 
143 			struct WaitObjectTableDefinition {
144 				typedef WaitObjectKey	KeyType;
145 				typedef	WaitObject		ValueType;
146 
147 				size_t HashKey(const WaitObjectKey& key) const
148 				{
149 					return (size_t)key.object ^ (size_t)key.type;
150 				}
151 
152 				size_t Hash(const WaitObject* value) const
153 				{
154 					return HashKey(*value);
155 				}
156 
157 				bool Compare(const WaitObjectKey& key,
158 					const WaitObject* value) const
159 				{
160 					return value->type == key.type
161 						&& value->object == key.object;
162 				}
163 
164 				WaitObject*& GetLink(WaitObject* value) const
165 				{
166 					return value->hash_link;
167 				}
168 			};
169 
170 			typedef DoublyLinkedList<WaitObject> WaitObjectList;
171 			typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable;
172 
173 private:
174 			spinlock			fLock;
175 			team_id				fTeam;
176 			area_id				fUserArea;
177 			area_id				fKernelArea;
178 			size_t				fAreaSize;
179 			uint32				fFlags;
180 			uint32				fStackDepth;
181 			bigtime_t			fInterval;
182 			system_profiler_buffer_header* fHeader;
183 			uint8*				fBufferBase;
184 			size_t				fBufferCapacity;
185 			size_t				fBufferStart;
186 			size_t				fBufferSize;
187 			uint64				fDroppedEvents;
188 			int64				fLastTeamAddedSerialNumber;
189 			int64				fLastThreadAddedSerialNumber;
190 			bool				fTeamNotificationsRequested;
191 			bool				fTeamNotificationsEnabled;
192 			bool				fThreadNotificationsRequested;
193 			bool				fThreadNotificationsEnabled;
194 			bool				fImageNotificationsRequested;
195 			bool				fImageNotificationsEnabled;
196 			bool				fIONotificationsRequested;
197 			bool				fIONotificationsEnabled;
198 			bool				fSchedulerNotificationsRequested;
199 			bool				fWaitObjectNotificationsRequested;
200 			Thread* volatile	fWaitingProfilerThread;
201 			bool				fProfilingActive;
202 			bool				fReentered[SMP_MAX_CPUS];
203 			CPUProfileData		fCPUData[SMP_MAX_CPUS];
204 			WaitObject*			fWaitObjectBuffer;
205 			int32				fWaitObjectCount;
206 			WaitObjectList		fUsedWaitObjects;
207 			WaitObjectList		fFreeWaitObjects;
208 			WaitObjectTable		fWaitObjectTable;
209 };
210 
211 
212 /*!	Notifies the profiler thread when the profiling buffer is full enough.
213 	The caller must hold fLock.
214 */
215 inline void
216 SystemProfiler::_MaybeNotifyProfilerThreadLocked()
217 {
218 	// If the buffer is full enough, notify the profiler.
219 	if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
220 		int cpu = smp_get_current_cpu();
221 		fReentered[cpu] = true;
222 
223 		SpinLocker _(fWaitingProfilerThread->scheduler_lock);
224 		thread_unblock_locked(fWaitingProfilerThread, B_OK);
225 
226 		fWaitingProfilerThread = NULL;
227 		fReentered[cpu] = false;
228 	}
229 }
230 
231 
232 inline void
233 SystemProfiler::_MaybeNotifyProfilerThread()
234 {
235 	if (fWaitingProfilerThread == NULL)
236 		return;
237 
238 	InterruptsSpinLocker locker(fLock);
239 
240 	_MaybeNotifyProfilerThreadLocked();
241 }
242 
243 
244 // #pragma mark - SystemProfiler public
245 
246 
247 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
248 	const system_profiler_parameters& parameters)
249 	:
250 	fTeam(team),
251 	fUserArea(userAreaInfo.area),
252 	fKernelArea(-1),
253 	fAreaSize(userAreaInfo.size),
254 	fFlags(parameters.flags),
255 	fStackDepth(parameters.stack_depth),
256 	fInterval(parameters.interval),
257 	fHeader(NULL),
258 	fBufferBase(NULL),
259 	fBufferCapacity(0),
260 	fBufferStart(0),
261 	fBufferSize(0),
262 	fDroppedEvents(0),
263 	fLastTeamAddedSerialNumber(0),
264 	fLastThreadAddedSerialNumber(0),
265 	fTeamNotificationsRequested(false),
266 	fTeamNotificationsEnabled(false),
267 	fThreadNotificationsRequested(false),
268 	fThreadNotificationsEnabled(false),
269 	fImageNotificationsRequested(false),
270 	fImageNotificationsEnabled(false),
271 	fIONotificationsRequested(false),
272 	fIONotificationsEnabled(false),
273 	fSchedulerNotificationsRequested(false),
274 	fWaitObjectNotificationsRequested(false),
275 	fWaitingProfilerThread(NULL),
276 	fWaitObjectBuffer(NULL),
277 	fWaitObjectCount(0),
278 	fUsedWaitObjects(),
279 	fFreeWaitObjects(),
280 	fWaitObjectTable()
281 {
282 	B_INITIALIZE_SPINLOCK(&fLock);
283 
284 	memset(fReentered, 0, sizeof(fReentered));
285 
286 	// compute the number wait objects we want to cache
287 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
288 		fWaitObjectCount = parameters.locking_lookup_size
289 			/ (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
290 		if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
291 			fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
292 		if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
293 			fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
294 	}
295 }
296 
297 
298 SystemProfiler::~SystemProfiler()
299 {
300 	// Wake up the user thread, if it is waiting, and mark profiling
301 	// inactive.
302 	InterruptsSpinLocker locker(fLock);
303 	if (fWaitingProfilerThread != NULL) {
304 		thread_unblock(fWaitingProfilerThread, B_OK);
305 		fWaitingProfilerThread = NULL;
306 	}
307 	fProfilingActive = false;
308 	locker.Unlock();
309 
310 	// stop scheduler listening
311 	if (fSchedulerNotificationsRequested)
312 		scheduler_remove_listener(this);
313 
314 	// stop wait object listening
315 	if (fWaitObjectNotificationsRequested) {
316 		InterruptsSpinLocker locker(gWaitObjectListenerLock);
317 		remove_wait_object_listener(this);
318 	}
319 
320 	// deactivate the profiling timers on all CPUs
321 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
322 		call_all_cpus(_UninitTimers, this);
323 
324 	// cancel notifications
325 	NotificationManager& notificationManager
326 		= NotificationManager::Manager();
327 
328 	// images
329 	if (fImageNotificationsRequested) {
330 		fImageNotificationsRequested = false;
331 		notificationManager.RemoveListener("images", NULL, *this);
332 	}
333 
334 	// threads
335 	if (fThreadNotificationsRequested) {
336 		fThreadNotificationsRequested = false;
337 		notificationManager.RemoveListener("threads", NULL, *this);
338 	}
339 
340 	// teams
341 	if (fTeamNotificationsRequested) {
342 		fTeamNotificationsRequested = false;
343 		notificationManager.RemoveListener("teams", NULL, *this);
344 	}
345 
346 	// I/O
347 	if (fIONotificationsRequested) {
348 		fIONotificationsRequested = false;
349 		notificationManager.RemoveListener("I/O", NULL, *this);
350 	}
351 
352 	// delete wait object related allocations
353 	fWaitObjectTable.Clear();
354 	delete[] fWaitObjectBuffer;
355 
356 	// unlock the memory and delete the area
357 	if (fKernelArea >= 0) {
358 		unlock_memory(fHeader, fAreaSize, B_READ_DEVICE);
359 		delete_area(fKernelArea);
360 		fKernelArea = -1;
361 	}
362 }
363 
364 
365 status_t
366 SystemProfiler::Init()
367 {
368 	// clone the user area
369 	void* areaBase;
370 	fKernelArea = clone_area("profiling samples", &areaBase,
371 		B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
372 		fUserArea);
373 	if (fKernelArea < 0)
374 		return fKernelArea;
375 
376 	// we need the memory locked
377 	status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE);
378 	if (error != B_OK) {
379 		delete_area(fKernelArea);
380 		fKernelArea = -1;
381 		return error;
382 	}
383 
384 	// the buffer is ready for use
385 	fHeader = (system_profiler_buffer_header*)areaBase;
386 	fBufferBase = (uint8*)(fHeader + 1);
387 	fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase);
388 	fHeader->start = 0;
389 	fHeader->size = 0;
390 
391 	// allocate the wait object buffer and init the hash table
392 	if (fWaitObjectCount > 0) {
393 		fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount];
394 		if (fWaitObjectBuffer == NULL)
395 			return B_NO_MEMORY;
396 
397 		for (int32 i = 0; i < fWaitObjectCount; i++)
398 			fFreeWaitObjects.Add(fWaitObjectBuffer + i);
399 
400 		error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2);
401 		if (error != B_OK)
402 			return error;
403 	}
404 
405 	// start listening for notifications
406 
407 	// teams
408 	NotificationManager& notificationManager
409 		= NotificationManager::Manager();
410 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
411 		error = notificationManager.AddListener("teams",
412 			TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this);
413 		if (error != B_OK)
414 			return error;
415 		fTeamNotificationsRequested = true;
416 	}
417 
418 	// threads
419 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
420 		error = notificationManager.AddListener("threads",
421 			THREAD_ADDED | THREAD_REMOVED, *this);
422 		if (error != B_OK)
423 			return error;
424 		fThreadNotificationsRequested = true;
425 	}
426 
427 	// images
428 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
429 		error = notificationManager.AddListener("images",
430 			IMAGE_ADDED | IMAGE_REMOVED, *this);
431 		if (error != B_OK)
432 			return error;
433 		fImageNotificationsRequested = true;
434 	}
435 
436 	// I/O events
437 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
438 		error = notificationManager.AddListener("I/O",
439 			IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED
440 				| IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED
441 				| IO_SCHEDULER_OPERATION_STARTED
442 				| IO_SCHEDULER_OPERATION_FINISHED,
443 			*this);
444 		if (error != B_OK)
445 			return error;
446 		fIONotificationsRequested = true;
447 	}
448 
449 	// We need to fill the buffer with the initial state of teams, threads,
450 	// and images.
451 
452 	// teams
453 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
454 		InterruptsSpinLocker locker(fLock);
455 
456 		TeamListIterator iterator;
457 		while (Team* team = iterator.Next()) {
458 			locker.Unlock();
459 
460 			bool added = _TeamAdded(team);
461 
462 			// release the reference returned by the iterator
463 			team->ReleaseReference();
464 
465 			if (!added)
466 				return B_BUFFER_OVERFLOW;
467 
468 			locker.Lock();
469 		}
470 
471 		fTeamNotificationsEnabled = true;
472 	}
473 
474 	// images
475 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
476 		if (image_iterate_through_images(&_InitialImageIterator, this) != NULL)
477 			return B_BUFFER_OVERFLOW;
478 	}
479 
480 	// threads
481 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
482 		InterruptsSpinLocker locker(fLock);
483 
484 		ThreadListIterator iterator;
485 		while (Thread* thread = iterator.Next()) {
486 			locker.Unlock();
487 
488 			bool added = _ThreadAdded(thread);
489 
490 			// release the reference returned by the iterator
491 			thread->ReleaseReference();
492 
493 			if (!added)
494 				return B_BUFFER_OVERFLOW;
495 
496 			locker.Lock();
497 		}
498 
499 		fThreadNotificationsEnabled = true;
500 	}
501 
502 	fProfilingActive = true;
503 
504 	// start scheduler and wait object listening
505 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
506 		scheduler_add_listener(this);
507 		fSchedulerNotificationsRequested = true;
508 
509 		InterruptsSpinLocker waitObjectLocker(gWaitObjectListenerLock);
510 		add_wait_object_listener(this);
511 		fWaitObjectNotificationsRequested = true;
512 		waitObjectLocker.Unlock();
513 
514 		// fake schedule events for the initially running threads
515 		int32 cpuCount = smp_get_num_cpus();
516 		for (int32 i = 0; i < cpuCount; i++) {
517 			Thread* thread = gCPU[i].running_thread;
518 			if (thread != NULL)
519 				ThreadScheduled(thread, thread);
520 		}
521 	}
522 
523 	// I/O scheduling
524 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
525 		IOSchedulerRoster* roster = IOSchedulerRoster::Default();
526 		AutoLocker<IOSchedulerRoster> rosterLocker(roster);
527 
528 		for (IOSchedulerList::ConstIterator it
529 				= roster->SchedulerList().GetIterator();
530 			IOScheduler* scheduler = it.Next();) {
531 			_IOSchedulerAdded(scheduler);
532 		}
533 
534 		fIONotificationsEnabled = true;
535 	}
536 
537 	// activate the profiling timers on all CPUs
538 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
539 		call_all_cpus(_InitTimers, this);
540 
541 	return B_OK;
542 }
543 
544 
545 status_t
546 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
547 {
548 	InterruptsSpinLocker locker(fLock);
549 
550 	if (fWaitingProfilerThread != NULL || !fProfilingActive
551 		|| bytesRead > fBufferSize) {
552 		return B_BAD_VALUE;
553 	}
554 
555 	fBufferSize -= bytesRead;
556 	fBufferStart += bytesRead;
557 	if (fBufferStart > fBufferCapacity)
558 		fBufferStart -= fBufferCapacity;
559 	fHeader->size = fBufferSize;
560 	fHeader->start = fBufferStart;
561 
562 	// already enough data in the buffer to return?
563 	if (fBufferSize > fBufferCapacity / 2)
564 		return B_OK;
565 
566 	// Wait until the buffer gets too full or an error or a timeout occurs.
567 	while (true) {
568 		Thread* thread = thread_get_current_thread();
569 		fWaitingProfilerThread = thread;
570 
571 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
572 			THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
573 
574 		locker.Unlock();
575 
576 		status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
577 
578 		locker.Lock();
579 
580 		if (error == B_OK) {
581 			// the caller has unset fWaitingProfilerThread for us
582 			break;
583 		}
584 
585 		fWaitingProfilerThread = NULL;
586 
587 		if (error != B_TIMED_OUT)
588 			return error;
589 
590 		// just the timeout -- return, if the buffer is not empty
591 		if (fBufferSize > 0)
592 			break;
593 	}
594 
595 	if (_droppedEvents != NULL) {
596 		*_droppedEvents = fDroppedEvents;
597 		fDroppedEvents = 0;
598 	}
599 
600 	return B_OK;
601 }
602 
603 
604 // #pragma mark - NotificationListener interface
605 
606 
607 void
608 SystemProfiler::EventOccurred(NotificationService& service,
609 	const KMessage* event)
610 {
611 	int32 eventCode;
612 	if (event->FindInt32("event", &eventCode) != B_OK)
613 		return;
614 
615 	if (strcmp(service.Name(), "teams") == 0) {
616 		Team* team = (Team*)event->GetPointer("teamStruct", NULL);
617 		if (team == NULL)
618 			return;
619 
620 		switch (eventCode) {
621 			case TEAM_ADDED:
622 				if (fTeamNotificationsEnabled)
623 					_TeamAdded(team);
624 				break;
625 
626 			case TEAM_REMOVED:
627 				if (team->id == fTeam) {
628 					// The profiling team is gone -- uninstall the profiler!
629 					InterruptsSpinLocker locker(sProfilerLock);
630 					if (sProfiler != this)
631 						return;
632 
633 					sProfiler = NULL;
634 					locker.Unlock();
635 
636 					ReleaseReference();
637 					return;
638 				}
639 
640 				// When we're still doing the initial team list scan, we are
641 				// also interested in removals that happened to teams we have
642 				// already seen.
643 				if (fTeamNotificationsEnabled
644 					|| team->serial_number <= fLastTeamAddedSerialNumber) {
645 					_TeamRemoved(team);
646 				}
647 				break;
648 
649 			case TEAM_EXEC:
650 				if (fTeamNotificationsEnabled)
651 					_TeamExec(team);
652 				break;
653 		}
654 	} else if (strcmp(service.Name(), "threads") == 0) {
655 		Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
656 		if (thread == NULL)
657 			return;
658 
659 		switch (eventCode) {
660 			case THREAD_ADDED:
661 				if (fThreadNotificationsEnabled)
662 					_ThreadAdded(thread);
663 				break;
664 
665 			case THREAD_REMOVED:
666 				// When we're still doing the initial thread list scan, we are
667 				// also interested in removals that happened to threads we have
668 				// already seen.
669 				if (fThreadNotificationsEnabled
670 					|| thread->serial_number <= fLastThreadAddedSerialNumber) {
671 					_ThreadRemoved(thread);
672 				}
673 				break;
674 		}
675 	} else if (strcmp(service.Name(), "images") == 0) {
676 		if (!fImageNotificationsEnabled)
677 			return;
678 
679 		struct image* image = (struct image*)event->GetPointer(
680 			"imageStruct", NULL);
681 		if (image == NULL)
682 			return;
683 
684 		switch (eventCode) {
685 			case IMAGE_ADDED:
686 				_ImageAdded(image);
687 				break;
688 
689 			case IMAGE_REMOVED:
690 				_ImageRemoved(image);
691 				break;
692 		}
693 	} else if (strcmp(service.Name(), "I/O") == 0) {
694 		if (!fIONotificationsEnabled)
695 			return;
696 
697 		IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler",
698 			NULL);
699 		if (scheduler == NULL)
700 			return;
701 
702 		IORequest* request = (IORequest*)event->GetPointer("request", NULL);
703 		IOOperation* operation = (IOOperation*)event->GetPointer("operation",
704 			NULL);
705 
706 		switch (eventCode) {
707 			case IO_SCHEDULER_ADDED:
708 				_IOSchedulerAdded(scheduler);
709 				break;
710 
711 			case IO_SCHEDULER_REMOVED:
712 				_IOSchedulerRemoved(scheduler);
713 				break;
714 
715 			case IO_SCHEDULER_REQUEST_SCHEDULED:
716 				_IORequestScheduled(scheduler, request);
717 				break;
718 
719 			case IO_SCHEDULER_REQUEST_FINISHED:
720 				_IORequestFinished(scheduler, request);
721 				break;
722 
723 			case IO_SCHEDULER_OPERATION_STARTED:
724 				_IOOperationStarted(scheduler, request, operation);
725 				break;
726 
727 			case IO_SCHEDULER_OPERATION_FINISHED:
728 				_IOOperationFinished(scheduler, request, operation);
729 				break;
730 		}
731 	}
732 
733 	_MaybeNotifyProfilerThread();
734 }
735 
736 
737 // #pragma mark - SchedulerListener interface
738 
739 
740 void
741 SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
742 {
743 	int cpu = smp_get_current_cpu();
744 
745 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
746 		// When re-entering, we already hold the lock.
747 
748 	system_profiler_thread_enqueued_in_run_queue* event
749 		= (system_profiler_thread_enqueued_in_run_queue*)
750 			_AllocateBuffer(
751 				sizeof(system_profiler_thread_enqueued_in_run_queue),
752 				B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
753 	if (event == NULL)
754 		return;
755 
756 	event->time = system_time_nsecs();
757 	event->thread = thread->id;
758 	event->priority = thread->priority;
759 
760 	fHeader->size = fBufferSize;
761 
762 	// Unblock the profiler thread, if necessary, but don't unblock the thread,
763 	// if it had been waiting on a condition variable, since then we'd likely
764 	// deadlock in ConditionVariable::NotifyOne(), as it acquires a static
765 	// spinlock.
766 	if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
767 		_MaybeNotifyProfilerThreadLocked();
768 }
769 
770 
771 void
772 SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
773 {
774 	int cpu = smp_get_current_cpu();
775 
776 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
777 		// When re-entering, we already hold the lock.
778 
779 	system_profiler_thread_removed_from_run_queue* event
780 		= (system_profiler_thread_removed_from_run_queue*)
781 			_AllocateBuffer(
782 				sizeof(system_profiler_thread_removed_from_run_queue),
783 				B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
784 	if (event == NULL)
785 		return;
786 
787 	event->time = system_time_nsecs();
788 	event->thread = thread->id;
789 
790 	fHeader->size = fBufferSize;
791 
792 	// unblock the profiler thread, if necessary
793 	_MaybeNotifyProfilerThreadLocked();
794 }
795 
796 
797 void
798 SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
799 {
800 	int cpu = smp_get_current_cpu();
801 
802 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
803 		// When re-entering, we already hold the lock.
804 
805 	// If the old thread starts waiting, handle the wait object.
806 	if (oldThread->state == B_THREAD_WAITING)
807 		_WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);
808 
809 	system_profiler_thread_scheduled* event
810 		= (system_profiler_thread_scheduled*)
811 			_AllocateBuffer(sizeof(system_profiler_thread_scheduled),
812 				B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
813 	if (event == NULL)
814 		return;
815 
816 	event->time = system_time_nsecs();
817 	event->thread = newThread->id;
818 	event->previous_thread = oldThread->id;
819 	event->previous_thread_state = oldThread->state;
820 	event->previous_thread_wait_object_type = oldThread->wait.type;
821 	event->previous_thread_wait_object = (addr_t)oldThread->wait.object;
822 
823 	fHeader->size = fBufferSize;
824 
825 	// unblock the profiler thread, if necessary
826 	_MaybeNotifyProfilerThreadLocked();
827 }
828 
829 
830 // #pragma mark - WaitObjectListener interface
831 
832 
833 void
834 SystemProfiler::SemaphoreCreated(sem_id id, const char* name)
835 {
836 	_WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE);
837 }
838 
839 
840 void
841 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable)
842 {
843 	_WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE);
844 }
845 
846 
847 void
848 SystemProfiler::MutexInitialized(mutex* lock)
849 {
850 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX);
851 }
852 
853 
854 void
855 SystemProfiler::RWLockInitialized(rw_lock* lock)
856 {
857 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK);
858 }
859 
860 
861 // #pragma mark - SystemProfiler private
862 
863 
864 bool
865 SystemProfiler::_TeamAdded(Team* team)
866 {
867 	TeamLocker teamLocker(team);
868 
869 	size_t nameLen = strlen(team->Name());
870 	size_t argsLen = strlen(team->Args());
871 
872 	InterruptsSpinLocker locker(fLock);
873 
874 	// During the initial scan check whether the team is already gone again.
875 	// Later this cannot happen, since the team creator notifies us before
876 	// actually starting the team.
877 	if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
878 		return true;
879 
880 	if (team->serial_number > fLastTeamAddedSerialNumber)
881 		fLastTeamAddedSerialNumber = team->serial_number;
882 
883 	system_profiler_team_added* event = (system_profiler_team_added*)
884 		_AllocateBuffer(
885 			sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
886 			B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0);
887 	if (event == NULL)
888 		return false;
889 
890 	event->team = team->id;
891 	strcpy(event->name, team->Name());
892 	event->args_offset = nameLen + 1;
893 	strcpy(event->name + nameLen + 1, team->Args());
894 
895 	fHeader->size = fBufferSize;
896 
897 	return true;
898 }
899 
900 
901 bool
902 SystemProfiler::_TeamRemoved(Team* team)
903 {
904 	// TODO: It is possible that we get remove notifications for teams that
905 	// had already been removed from the global team list when we did the
906 	// initial scan, but were still in the process of dying. ATM it is not
907 	// really possible to identify such a case.
908 
909 	TeamLocker teamLocker(team);
910 	InterruptsSpinLocker locker(fLock);
911 
912 	system_profiler_team_removed* event = (system_profiler_team_removed*)
913 		_AllocateBuffer(sizeof(system_profiler_team_removed),
914 			B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0);
915 	if (event == NULL)
916 		return false;
917 
918 	event->team = team->id;
919 
920 	fHeader->size = fBufferSize;
921 
922 	return true;
923 }
924 
925 
926 bool
927 SystemProfiler::_TeamExec(Team* team)
928 {
929 	TeamLocker teamLocker(team);
930 
931 	size_t argsLen = strlen(team->Args());
932 
933 	InterruptsSpinLocker locker(fLock);
934 
935 	system_profiler_team_exec* event = (system_profiler_team_exec*)
936 		_AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen,
937 			B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0);
938 	if (event == NULL)
939 		return false;
940 
941 	event->team = team->id;
942 	strlcpy(event->thread_name, team->main_thread->name,
943 		sizeof(event->thread_name));
944 	strcpy(event->args, team->Args());
945 
946 	fHeader->size = fBufferSize;
947 
948 	return true;
949 }
950 
951 
952 bool
953 SystemProfiler::_ThreadAdded(Thread* thread)
954 {
955 	ThreadLocker threadLocker(thread);
956 	InterruptsSpinLocker locker(fLock);
957 
958 	// During the initial scan check whether the team is already gone again.
959 	// Later this cannot happen, since the team creator notifies us before
960 	// actually starting the thread.
961 	if (!fThreadNotificationsEnabled && !thread->IsAlive())
962 		return true;
963 
964 	if (thread->serial_number > fLastThreadAddedSerialNumber)
965 		fLastThreadAddedSerialNumber = thread->serial_number;
966 
967 	system_profiler_thread_added* event = (system_profiler_thread_added*)
968 		_AllocateBuffer(sizeof(system_profiler_thread_added),
969 			B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
970 	if (event == NULL)
971 		return false;
972 
973 	event->team = thread->team->id;
974 	event->thread = thread->id;
975 	strlcpy(event->name, thread->name, sizeof(event->name));
976 
977 	fHeader->size = fBufferSize;
978 
979 	return true;
980 }
981 
982 
983 bool
984 SystemProfiler::_ThreadRemoved(Thread* thread)
985 {
986 	// TODO: It is possible that we get remove notifications for threads that
987 	// had already been removed from the global thread list when we did the
988 	// initial scan, but were still in the process of dying. ATM it is not
989 	// really possible to identify such a case.
990 
991 	ThreadLocker threadLocker(thread);
992 	InterruptsSpinLocker locker(fLock);
993 
994 	system_profiler_thread_removed* event
995 		= (system_profiler_thread_removed*)
996 			_AllocateBuffer(sizeof(system_profiler_thread_removed),
997 				B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0);
998 	if (event == NULL)
999 		return false;
1000 
1001 	event->team = thread->team->id;
1002 	event->thread = thread->id;
1003 
1004 	fHeader->size = fBufferSize;
1005 
1006 	return true;
1007 }
1008 
1009 
1010 bool
1011 SystemProfiler::_ImageAdded(struct image* image)
1012 {
1013 	InterruptsSpinLocker locker(fLock);
1014 
1015 	system_profiler_image_added* event = (system_profiler_image_added*)
1016 		_AllocateBuffer(sizeof(system_profiler_image_added),
1017 			B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0);
1018 	if (event == NULL)
1019 		return false;
1020 
1021 	event->team = image->team;
1022 	event->info = image->info.basic_info;
1023 
1024 	fHeader->size = fBufferSize;
1025 
1026 	return true;
1027 }
1028 
1029 
1030 bool
1031 SystemProfiler::_ImageRemoved(struct image* image)
1032 {
1033 	InterruptsSpinLocker locker(fLock);
1034 
1035 	system_profiler_image_removed* event = (system_profiler_image_removed*)
1036 		_AllocateBuffer(sizeof(system_profiler_image_removed),
1037 			B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0);
1038 	if (event == NULL)
1039 		return false;
1040 
1041 	event->team = image->team;
1042 	event->image = image->info.basic_info.id;
1043 
1044 	fHeader->size = fBufferSize;
1045 
1046 	return true;
1047 }
1048 
1049 
1050 bool
1051 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler)
1052 {
1053 	size_t nameLen = strlen(scheduler->Name());
1054 
1055 	InterruptsSpinLocker locker(fLock);
1056 
1057 	system_profiler_io_scheduler_added* event
1058 		= (system_profiler_io_scheduler_added*)_AllocateBuffer(
1059 			sizeof(system_profiler_io_scheduler_added) + nameLen,
1060 			B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0);
1061 	if (event == NULL)
1062 		return false;
1063 
1064 	event->scheduler = scheduler->ID();
1065 	strcpy(event->name, scheduler->Name());
1066 
1067 	fHeader->size = fBufferSize;
1068 
1069 	return true;
1070 }
1071 
1072 
1073 bool
1074 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler)
1075 {
1076 	InterruptsSpinLocker locker(fLock);
1077 
1078 	system_profiler_io_scheduler_removed* event
1079 		= (system_profiler_io_scheduler_removed*)_AllocateBuffer(
1080 			sizeof(system_profiler_io_scheduler_removed),
1081 			B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0);
1082 	if (event == NULL)
1083 		return false;
1084 
1085 	event->scheduler = scheduler->ID();
1086 
1087 	fHeader->size = fBufferSize;
1088 
1089 	return true;
1090 }
1091 
1092 
1093 bool
1094 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request)
1095 {
1096 	InterruptsSpinLocker locker(fLock);
1097 
1098 	system_profiler_io_request_scheduled* event
1099 		= (system_profiler_io_request_scheduled*)_AllocateBuffer(
1100 			sizeof(system_profiler_io_request_scheduled),
1101 			B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0);
1102 	if (event == NULL)
1103 		return false;
1104 
1105 	IORequestOwner* owner = request->Owner();
1106 
1107 	event->time = system_time_nsecs();
1108 	event->scheduler = scheduler->ID();
1109 	event->team = owner->team;
1110 	event->thread = owner->thread;
1111 	event->request = request;
1112 	event->offset = request->Offset();
1113 	event->length = request->Length();
1114 	event->write = request->IsWrite();
1115 	event->priority = owner->priority;
1116 
1117 	fHeader->size = fBufferSize;
1118 
1119 	return true;
1120 }
1121 
1122 
1123 bool
1124 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request)
1125 {
1126 	InterruptsSpinLocker locker(fLock);
1127 
1128 	system_profiler_io_request_finished* event
1129 		= (system_profiler_io_request_finished*)_AllocateBuffer(
1130 			sizeof(system_profiler_io_request_finished),
1131 			B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0);
1132 	if (event == NULL)
1133 		return false;
1134 
1135 	event->time = system_time_nsecs();
1136 	event->scheduler = scheduler->ID();
1137 	event->request = request;
1138 	event->status = request->Status();
1139 	event->transferred = request->TransferredBytes();
1140 
1141 	fHeader->size = fBufferSize;
1142 
1143 	return true;
1144 }
1145 
1146 
1147 bool
1148 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request,
1149 	IOOperation* operation)
1150 {
1151 	InterruptsSpinLocker locker(fLock);
1152 
1153 	system_profiler_io_operation_started* event
1154 		= (system_profiler_io_operation_started*)_AllocateBuffer(
1155 			sizeof(system_profiler_io_operation_started),
1156 			B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0);
1157 	if (event == NULL)
1158 		return false;
1159 
1160 	event->time = system_time_nsecs();
1161 	event->scheduler = scheduler->ID();
1162 	event->request = request;
1163 	event->operation = operation;
1164 	event->offset = request->Offset();
1165 	event->length = request->Length();
1166 	event->write = request->IsWrite();
1167 
1168 	fHeader->size = fBufferSize;
1169 
1170 	return true;
1171 }
1172 
1173 
1174 bool
1175 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request,
1176 	IOOperation* operation)
1177 {
1178 	InterruptsSpinLocker locker(fLock);
1179 
1180 	system_profiler_io_operation_finished* event
1181 		= (system_profiler_io_operation_finished*)_AllocateBuffer(
1182 			sizeof(system_profiler_io_operation_finished),
1183 			B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0);
1184 	if (event == NULL)
1185 		return false;
1186 
1187 	event->time = system_time_nsecs();
1188 	event->scheduler = scheduler->ID();
1189 	event->request = request;
1190 	event->operation = operation;
1191 	event->status = request->Status();
1192 	event->transferred = request->TransferredBytes();
1193 
1194 	fHeader->size = fBufferSize;
1195 
1196 	return true;
1197 }
1198 
1199 
1200 void
1201 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type)
1202 {
1203 	SpinLocker locker(fLock);
1204 
1205 	// look up the object
1206 	WaitObjectKey key;
1207 	key.object = object;
1208 	key.type = type;
1209 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1210 
1211 	// If found, remove it and add it to the free list. This might sound weird,
1212 	// but it makes sense, since we lazily track *used* wait objects only.
1213 	// I.e. the object in the table is now guaranteedly obsolete.
1214 	if (waitObject) {
1215 		fWaitObjectTable.RemoveUnchecked(waitObject);
1216 		fUsedWaitObjects.Remove(waitObject);
1217 		fFreeWaitObjects.Add(waitObject, false);
1218 	}
1219 }
1220 
1221 void
1222 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
1223 {
1224 	// look up the object
1225 	WaitObjectKey key;
1226 	key.object = object;
1227 	key.type = type;
1228 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1229 
1230 	// If already known, re-queue it as most recently used and be done.
1231 	if (waitObject != NULL) {
1232 		fUsedWaitObjects.Remove(waitObject);
1233 		fUsedWaitObjects.Add(waitObject);
1234 		return;
1235 	}
1236 
1237 	// not known yet -- get the info
1238 	const char* name = NULL;
1239 	const void* referencedObject = NULL;
1240 
1241 	switch (type) {
1242 		case THREAD_BLOCK_TYPE_SEMAPHORE:
1243 		{
1244 			name = sem_get_name_unsafe((sem_id)object);
1245 			break;
1246 		}
1247 
1248 		case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1249 		{
1250 			ConditionVariable* variable = (ConditionVariable*)object;
1251 			name = variable->ObjectType();
1252 			referencedObject = variable->Object();
1253 			break;
1254 		}
1255 
1256 		case THREAD_BLOCK_TYPE_MUTEX:
1257 		{
1258 			mutex* lock = (mutex*)object;
1259 			name = lock->name;
1260 			break;
1261 		}
1262 
1263 		case THREAD_BLOCK_TYPE_RW_LOCK:
1264 		{
1265 			rw_lock* lock = (rw_lock*)object;
1266 			name = lock->name;
1267 			break;
1268 		}
1269 
1270 		case THREAD_BLOCK_TYPE_OTHER:
1271 		{
1272 			name = (const char*)(void*)object;
1273 			break;
1274 		}
1275 
1276 		case THREAD_BLOCK_TYPE_SNOOZE:
1277 		case THREAD_BLOCK_TYPE_SIGNAL:
1278 		default:
1279 			return;
1280 	}
1281 
1282 	// add the event
1283 	size_t nameLen = name != NULL ? strlen(name) : 0;
1284 
1285 	system_profiler_wait_object_info* event
1286 		= (system_profiler_wait_object_info*)
1287 			_AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen,
1288 				B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0);
1289 	if (event == NULL)
1290 		return;
1291 
1292 	event->type = type;
1293 	event->object = object;
1294 	event->referenced_object = (addr_t)referencedObject;
1295 	if (name != NULL)
1296 		strcpy(event->name, name);
1297 	else
1298 		event->name[0] = '\0';
1299 
1300 	fHeader->size = fBufferSize;
1301 
1302 	// add the wait object
1303 
1304 	// get a free one or steal the least recently used one
1305 	waitObject = fFreeWaitObjects.RemoveHead();
1306 	if (waitObject == NULL) {
1307 		waitObject = fUsedWaitObjects.RemoveHead();
1308 		fWaitObjectTable.RemoveUnchecked(waitObject);
1309 	}
1310 
1311 	waitObject->object = object;
1312 	waitObject->type = type;
1313 	fWaitObjectTable.InsertUnchecked(waitObject);
1314 	fUsedWaitObjects.Add(waitObject);
1315 }
1316 
1317 
1318 /*static*/ bool
1319 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
1320 {
1321 	SystemProfiler* self = (SystemProfiler*)cookie;
1322 	self->fImageNotificationsEnabled = true;
1323 		// Set that here, since the image lock is being held now.
1324 	return !self->_ImageAdded(image);
1325 }
1326 
1327 
1328 void*
1329 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count)
1330 {
1331 	size = (size + 3) / 4 * 4;
1332 	size += sizeof(system_profiler_event_header);
1333 
1334 	size_t end = fBufferStart + fBufferSize;
1335 	if (end + size > fBufferCapacity) {
1336 		// Buffer is wrapped or needs wrapping.
1337 		if (end < fBufferCapacity) {
1338 			// not wrapped yet, but needed
1339 			system_profiler_event_header* header
1340 				= (system_profiler_event_header*)(fBufferBase + end);
1341 			header->event = B_SYSTEM_PROFILER_BUFFER_END;
1342 			fBufferSize = fBufferCapacity - fBufferStart;
1343 			end = 0;
1344 		} else
1345 			end -= fBufferCapacity;
1346 
1347 		if (end + size > fBufferStart) {
1348 			fDroppedEvents++;
1349 			return NULL;
1350 		}
1351 	}
1352 
1353 	system_profiler_event_header* header
1354 		= (system_profiler_event_header*)(fBufferBase + end);
1355 	header->event = event;
1356 	header->cpu = cpu;
1357 	header->size = size - sizeof(system_profiler_event_header);
1358 
1359 	fBufferSize += size;
1360 
1361 	return header + 1;
1362 }
1363 
1364 
1365 /*static*/ void
1366 SystemProfiler::_InitTimers(void* cookie, int cpu)
1367 {
1368 	SystemProfiler* self = (SystemProfiler*)cookie;
1369 	self->_ScheduleTimer(cpu);
1370 }
1371 
1372 
1373 /*static*/ void
1374 SystemProfiler::_UninitTimers(void* cookie, int cpu)
1375 {
1376 	SystemProfiler* self = (SystemProfiler*)cookie;
1377 
1378 	CPUProfileData& cpuData = self->fCPUData[cpu];
1379 	cancel_timer(&cpuData.timer);
1380 	cpuData.timerScheduled = false;
1381 }
1382 
1383 
1384 void
1385 SystemProfiler::_ScheduleTimer(int cpu)
1386 {
1387 	CPUProfileData& cpuData = fCPUData[cpu];
1388 	cpuData.timerEnd = system_time() + fInterval;
1389 	cpuData.timer.user_data = this;
1390 	add_timer(&cpuData.timer, &_ProfilingEvent, fInterval,
1391 		B_ONE_SHOT_RELATIVE_TIMER);
1392 	cpuData.timerScheduled = true;
1393 }
1394 
1395 
1396 void
1397 SystemProfiler::_DoSample()
1398 {
1399 	Thread* thread = thread_get_current_thread();
1400 	int cpu = thread->cpu->cpu_num;
1401 	CPUProfileData& cpuData = fCPUData[cpu];
1402 
1403 	// get the samples
1404 	int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1,
1405 		0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1406 
1407 	InterruptsSpinLocker locker(fLock);
1408 
1409 	system_profiler_samples* event = (system_profiler_samples*)
1410 		_AllocateBuffer(sizeof(system_profiler_samples)
1411 				+ count * sizeof(addr_t),
1412 			B_SYSTEM_PROFILER_SAMPLES, cpu, count);
1413 	if (event == NULL)
1414 		return;
1415 
1416 	event->thread = thread->id;
1417 	memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t));
1418 
1419 	fHeader->size = fBufferSize;
1420 }
1421 
1422 
1423 /*static*/ int32
1424 SystemProfiler::_ProfilingEvent(struct timer* timer)
1425 {
1426 	SystemProfiler* self = (SystemProfiler*)timer->user_data;
1427 
1428 	self->_DoSample();
1429 	self->_ScheduleTimer(timer->cpu);
1430 
1431 	return B_HANDLED_INTERRUPT;
1432 }
1433 
1434 
1435 // #pragma mark - private kernel API
1436 
1437 
1438 #if SYSTEM_PROFILER
1439 
1440 status_t
1441 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval)
1442 {
1443 	struct ParameterDeleter {
1444 		ParameterDeleter(area_id area)
1445 			:
1446 			fArea(area),
1447 			fDetached(false)
1448 		{
1449 		}
1450 
1451 		~ParameterDeleter()
1452 		{
1453 			if (!fDetached) {
1454 				delete_area(fArea);
1455 				delete sRecordedParameters;
1456 				sRecordedParameters = NULL;
1457 			}
1458 		}
1459 
1460 		void Detach()
1461 		{
1462 			fDetached = true;
1463 		}
1464 
1465 	private:
1466 		area_id	fArea;
1467 		bool	fDetached;
1468 	};
1469 
1470 	void* address;
1471 	area_id area = create_area("kernel profile data", &address,
1472 		B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK,
1473 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1474 	if (area < 0)
1475 		return area;
1476 
1477 	ParameterDeleter parameterDeleter(area);
1478 
1479 	sRecordedParameters = new(std::nothrow) system_profiler_parameters;
1480 	if (sRecordedParameters == NULL)
1481 		return B_NO_MEMORY;
1482 
1483 	sRecordedParameters->buffer_area = area;
1484 	sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS
1485 		| B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS
1486 		| B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
1487 		| B_SYSTEM_PROFILER_SAMPLING_EVENTS;
1488 	sRecordedParameters->locking_lookup_size = 4096;
1489 	sRecordedParameters->interval = interval;
1490 	sRecordedParameters->stack_depth = stackDepth;
1491 
1492 	area_info areaInfo;
1493 	get_area_info(area, &areaInfo);
1494 
1495 	// initialize the profiler
1496 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM,
1497 		areaInfo, *sRecordedParameters);
1498 	if (profiler == NULL)
1499 		return B_NO_MEMORY;
1500 
1501 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1502 
1503 	status_t error = profiler->Init();
1504 	if (error != B_OK)
1505 		return error;
1506 
1507 	// set the new profiler
1508 	InterruptsSpinLocker locker(sProfilerLock);
1509 	if (sProfiler != NULL)
1510 		return B_BUSY;
1511 
1512 	parameterDeleter.Detach();
1513 	profilerDeleter.Detach();
1514 	sProfiler = profiler;
1515 	locker.Unlock();
1516 
1517 	return B_OK;
1518 }
1519 
1520 
1521 void
1522 stop_system_profiler()
1523 {
1524 	InterruptsSpinLocker locker(sProfilerLock);
1525 	if (sProfiler == NULL)
1526 		return;
1527 
1528 	SystemProfiler* profiler = sProfiler;
1529 	sProfiler = NULL;
1530 	locker.Unlock();
1531 
1532 	profiler->ReleaseReference();
1533 }
1534 
1535 #endif	// SYSTEM_PROFILER
1536 
1537 
1538 // #pragma mark - syscalls
1539 
1540 
1541 status_t
1542 _user_system_profiler_start(struct system_profiler_parameters* userParameters)
1543 {
1544 	// copy params to the kernel
1545 	struct system_profiler_parameters parameters;
1546 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)
1547 		|| user_memcpy(&parameters, userParameters, sizeof(parameters))
1548 			!= B_OK) {
1549 		return B_BAD_ADDRESS;
1550 	}
1551 
1552 	// check the parameters
1553 	team_id team = thread_get_current_thread()->team->id;
1554 
1555 	area_info areaInfo;
1556 	status_t error = get_area_info(parameters.buffer_area, &areaInfo);
1557 	if (error != B_OK)
1558 		return error;
1559 
1560 	if (areaInfo.team != team)
1561 		return B_BAD_VALUE;
1562 
1563 	if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) {
1564 		if (parameters.stack_depth < 1)
1565 			return B_BAD_VALUE;
1566 
1567 		if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL)
1568 			parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL;
1569 
1570 		if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH)
1571 			parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH;
1572 	}
1573 
1574 	// quick check to see whether we do already have a profiler installed
1575 	InterruptsSpinLocker locker(sProfilerLock);
1576 	if (sProfiler != NULL)
1577 		return B_BUSY;
1578 	locker.Unlock();
1579 
1580 	// initialize the profiler
1581 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo,
1582 		parameters);
1583 	if (profiler == NULL)
1584 		return B_NO_MEMORY;
1585 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1586 
1587 	error = profiler->Init();
1588 	if (error != B_OK)
1589 		return error;
1590 
1591 	// set the new profiler
1592 	locker.Lock();
1593 	if (sProfiler != NULL)
1594 		return B_BUSY;
1595 
1596 	profilerDeleter.Detach();
1597 	sProfiler = profiler;
1598 	locker.Unlock();
1599 
1600 	return B_OK;
1601 }
1602 
1603 
1604 status_t
1605 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents)
1606 {
1607 	if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents))
1608 		return B_BAD_ADDRESS;
1609 
1610 	team_id team = thread_get_current_thread()->team->id;
1611 
1612 	InterruptsSpinLocker locker(sProfilerLock);
1613 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1614 		return B_BAD_VALUE;
1615 
1616 	// get a reference to the profiler
1617 	SystemProfiler* profiler = sProfiler;
1618 	BReference<SystemProfiler> reference(profiler);
1619 	locker.Unlock();
1620 
1621 	uint64 droppedEvents;
1622 	status_t error = profiler->NextBuffer(bytesRead,
1623 		_droppedEvents != NULL ? &droppedEvents : NULL);
1624 	if (error == B_OK && _droppedEvents != NULL)
1625 		user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents));
1626 
1627 	return error;
1628 }
1629 
1630 
1631 status_t
1632 _user_system_profiler_stop()
1633 {
1634 	team_id team = thread_get_current_thread()->team->id;
1635 
1636 	InterruptsSpinLocker locker(sProfilerLock);
1637 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1638 		return B_BAD_VALUE;
1639 
1640 	SystemProfiler* profiler = sProfiler;
1641 	sProfiler = NULL;
1642 	locker.Unlock();
1643 
1644 	profiler->ReleaseReference();
1645 
1646 	return B_OK;
1647 }
1648 
1649 
1650 status_t
1651 _user_system_profiler_recorded(system_profiler_parameters* userParameters)
1652 {
1653 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
1654 		return B_BAD_ADDRESS;
1655 	if (sRecordedParameters == NULL)
1656 		return B_ERROR;
1657 
1658 #if SYSTEM_PROFILER
1659 	stop_system_profiler();
1660 
1661 	// Transfer the area to the userland process
1662 
1663 	void* address;
1664 	area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
1665 		B_ANY_ADDRESS, team_get_current_team_id(), true);
1666 	if (newArea < 0)
1667 		return newArea;
1668 
1669 	status_t status = set_area_protection(newArea, B_READ_AREA);
1670 	if (status == B_OK) {
1671 		sRecordedParameters->buffer_area = newArea;
1672 
1673 		status = user_memcpy(userParameters, sRecordedParameters,
1674 			sizeof(system_profiler_parameters));
1675 	}
1676 	if (status != B_OK)
1677 		delete_area(newArea);
1678 
1679 	delete sRecordedParameters;
1680 	sRecordedParameters = NULL;
1681 
1682 	return status;
1683 #else
1684 	return B_NOT_SUPPORTED;
1685 #endif // SYSTEM_PROFILER
1686 }
1687