xref: /haiku/src/system/kernel/debug/system_profiler.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <system_profiler.h>
8 
9 #include <AutoDeleter.h>
10 #include <Referenceable.h>
11 
12 #include <util/AutoLock.h>
13 #include <util/ThreadAutoLock.h>
14 
15 #include <system_profiler_defs.h>
16 
17 #include <cpu.h>
18 #include <kernel.h>
19 #include <kimage.h>
20 #include <kscheduler.h>
21 #include <listeners.h>
22 #include <Notifications.h>
23 #include <sem.h>
24 #include <team.h>
25 #include <thread.h>
26 #include <user_debugger.h>
27 #include <vm/vm.h>
28 
29 #include <arch/debug.h>
30 
31 #include "IOSchedulerRoster.h"
32 
33 
34 // This is the kernel-side implementation of the system profiling support.
35 // A userland team can register as system profiler, providing an area as buffer
36 // for events. Those events are team, thread, and image changes (added/removed),
37 // periodic sampling of the return address stack for each CPU, as well as
38 // scheduling and I/O scheduling events.
39 
40 
41 class SystemProfiler;
42 
43 
44 // minimum/maximum size of the table used for wait object caching
45 #define MIN_WAIT_OBJECT_COUNT	128
46 #define MAX_WAIT_OBJECT_COUNT	1024
47 
48 
49 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER;
50 static SystemProfiler* sProfiler = NULL;
51 static struct system_profiler_parameters* sRecordedParameters = NULL;
52 
53 
54 class SystemProfiler : public BReferenceable, private NotificationListener,
55 	private SchedulerListener, private WaitObjectListener {
56 public:
57 								SystemProfiler(team_id team,
58 									const area_info& userAreaInfo,
59 									const system_profiler_parameters&
60 										parameters);
61 								~SystemProfiler();
62 
63 			team_id				TeamID() const	{ return fTeam; }
64 
65 			status_t			Init();
66 			status_t			NextBuffer(size_t bytesRead,
67 									uint64* _droppedEvents);
68 
69 private:
70 	virtual	void				EventOccurred(NotificationService& service,
71 									const KMessage* event);
72 
73 	virtual	void				ThreadEnqueuedInRunQueue(Thread* thread);
74 	virtual	void				ThreadRemovedFromRunQueue(Thread* thread);
75 	virtual	void				ThreadScheduled(Thread* oldThread,
76 									Thread* newThread);
77 
78 	virtual	void				SemaphoreCreated(sem_id id,
79 									const char* name);
80 	virtual	void				ConditionVariableInitialized(
81 									ConditionVariable* variable);
82 	virtual	void				MutexInitialized(mutex* lock);
83 	virtual	void				RWLockInitialized(rw_lock* lock);
84 
85 			bool				_TeamAdded(Team* team);
86 			bool				_TeamRemoved(Team* team);
87 			bool				_TeamExec(Team* team);
88 
89 			bool				_ThreadAdded(Thread* thread);
90 			bool				_ThreadRemoved(Thread* thread);
91 
92 			bool				_ImageAdded(struct image* image);
93 			bool				_ImageRemoved(struct image* image);
94 
95 			bool				_IOSchedulerAdded(IOScheduler* scheduler);
96 			bool				_IOSchedulerRemoved(IOScheduler* scheduler);
97 			bool				_IORequestScheduled(IOScheduler* scheduler,
98 									IORequest* request);
99 			bool				_IORequestFinished(IOScheduler* scheduler,
100 									IORequest* request);
101 			bool				_IOOperationStarted(IOScheduler* scheduler,
102 									IORequest* request, IOOperation* operation);
103 			bool				_IOOperationFinished(IOScheduler* scheduler,
104 									IORequest* request, IOOperation* operation);
105 
106 			void				_WaitObjectCreated(addr_t object, uint32 type);
107 			void				_WaitObjectUsed(addr_t object, uint32 type);
108 
109 	inline	void				_MaybeNotifyProfilerThreadLocked();
110 	inline	void				_MaybeNotifyProfilerThread();
111 
112 	static	bool				_InitialImageIterator(struct image* image,
113 									void* cookie);
114 
115 			void*				_AllocateBuffer(size_t size, int event, int cpu,
116 									int count);
117 
118 	static	void				_InitTimers(void* cookie, int cpu);
119 	static	void				_UninitTimers(void* cookie, int cpu);
120 			void				_ScheduleTimer(int cpu);
121 
122 			void				_DoSample();
123 
124 	static	int32				_ProfilingEvent(struct timer* timer);
125 
126 private:
127 			struct CPUProfileData {
128 				struct timer	timer;
129 				bigtime_t		timerEnd;
130 				bool			timerScheduled;
131 				addr_t			buffer[B_DEBUG_STACK_TRACE_DEPTH];
132 			};
133 
134 			struct WaitObjectKey {
135 				addr_t	object;
136 				uint32	type;
137 			};
138 
139 			struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>,
140 					WaitObjectKey {
141 				struct WaitObject* hash_link;
142 			};
143 
144 			struct WaitObjectTableDefinition {
145 				typedef WaitObjectKey	KeyType;
146 				typedef	WaitObject		ValueType;
147 
148 				size_t HashKey(const WaitObjectKey& key) const
149 				{
150 					return (size_t)key.object ^ (size_t)key.type;
151 				}
152 
153 				size_t Hash(const WaitObject* value) const
154 				{
155 					return HashKey(*value);
156 				}
157 
158 				bool Compare(const WaitObjectKey& key,
159 					const WaitObject* value) const
160 				{
161 					return value->type == key.type
162 						&& value->object == key.object;
163 				}
164 
165 				WaitObject*& GetLink(WaitObject* value) const
166 				{
167 					return value->hash_link;
168 				}
169 			};
170 
171 			typedef DoublyLinkedList<WaitObject> WaitObjectList;
172 			typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable;
173 
174 private:
175 			spinlock			fLock;
176 			team_id				fTeam;
177 			area_id				fUserArea;
178 			area_id				fKernelArea;
179 			size_t				fAreaSize;
180 			uint32				fFlags;
181 			uint32				fStackDepth;
182 			bigtime_t			fInterval;
183 			system_profiler_buffer_header* fHeader;
184 			uint8*				fBufferBase;
185 			size_t				fBufferCapacity;
186 			size_t				fBufferStart;
187 			size_t				fBufferSize;
188 			uint64				fDroppedEvents;
189 			int64				fLastTeamAddedSerialNumber;
190 			int64				fLastThreadAddedSerialNumber;
191 			bool				fTeamNotificationsRequested;
192 			bool				fTeamNotificationsEnabled;
193 			bool				fThreadNotificationsRequested;
194 			bool				fThreadNotificationsEnabled;
195 			bool				fImageNotificationsRequested;
196 			bool				fImageNotificationsEnabled;
197 			bool				fIONotificationsRequested;
198 			bool				fIONotificationsEnabled;
199 			bool				fSchedulerNotificationsRequested;
200 			bool				fWaitObjectNotificationsRequested;
201 			Thread* volatile	fWaitingProfilerThread;
202 			bool				fProfilingActive;
203 			bool				fReentered[SMP_MAX_CPUS];
204 			CPUProfileData		fCPUData[SMP_MAX_CPUS];
205 			WaitObject*			fWaitObjectBuffer;
206 			int32				fWaitObjectCount;
207 			WaitObjectList		fUsedWaitObjects;
208 			WaitObjectList		fFreeWaitObjects;
209 			WaitObjectTable		fWaitObjectTable;
210 };
211 
212 
213 /*!	Notifies the profiler thread when the profiling buffer is full enough.
214 	The caller must hold fLock.
215 */
216 inline void
217 SystemProfiler::_MaybeNotifyProfilerThreadLocked()
218 {
219 	// If the buffer is full enough, notify the profiler.
220 	if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
221 		int cpu = smp_get_current_cpu();
222 		fReentered[cpu] = true;
223 
224 		Thread* profilerThread = fWaitingProfilerThread;
225 		fWaitingProfilerThread = NULL;
226 
227 		SpinLocker _(profilerThread->scheduler_lock);
228 		thread_unblock_locked(profilerThread, B_OK);
229 
230 		fReentered[cpu] = false;
231 	}
232 }
233 
234 
235 inline void
236 SystemProfiler::_MaybeNotifyProfilerThread()
237 {
238 	if (fWaitingProfilerThread == NULL)
239 		return;
240 
241 	InterruptsSpinLocker locker(fLock);
242 
243 	_MaybeNotifyProfilerThreadLocked();
244 }
245 
246 
247 // #pragma mark - SystemProfiler public
248 
249 
250 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
251 	const system_profiler_parameters& parameters)
252 	:
253 	fTeam(team),
254 	fUserArea(userAreaInfo.area),
255 	fKernelArea(-1),
256 	fAreaSize(userAreaInfo.size),
257 	fFlags(parameters.flags),
258 	fStackDepth(parameters.stack_depth),
259 	fInterval(parameters.interval),
260 	fHeader(NULL),
261 	fBufferBase(NULL),
262 	fBufferCapacity(0),
263 	fBufferStart(0),
264 	fBufferSize(0),
265 	fDroppedEvents(0),
266 	fLastTeamAddedSerialNumber(0),
267 	fLastThreadAddedSerialNumber(0),
268 	fTeamNotificationsRequested(false),
269 	fTeamNotificationsEnabled(false),
270 	fThreadNotificationsRequested(false),
271 	fThreadNotificationsEnabled(false),
272 	fImageNotificationsRequested(false),
273 	fImageNotificationsEnabled(false),
274 	fIONotificationsRequested(false),
275 	fIONotificationsEnabled(false),
276 	fSchedulerNotificationsRequested(false),
277 	fWaitObjectNotificationsRequested(false),
278 	fWaitingProfilerThread(NULL),
279 	fWaitObjectBuffer(NULL),
280 	fWaitObjectCount(0),
281 	fUsedWaitObjects(),
282 	fFreeWaitObjects(),
283 	fWaitObjectTable()
284 {
285 	B_INITIALIZE_SPINLOCK(&fLock);
286 
287 	memset(fReentered, 0, sizeof(fReentered));
288 
289 	// compute the number wait objects we want to cache
290 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
291 		fWaitObjectCount = parameters.locking_lookup_size
292 			/ (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
293 		if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
294 			fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
295 		if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
296 			fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
297 	}
298 }
299 
300 
301 SystemProfiler::~SystemProfiler()
302 {
303 	// Wake up the user thread, if it is waiting, and mark profiling
304 	// inactive.
305 	InterruptsSpinLocker locker(fLock);
306 	if (fWaitingProfilerThread != NULL) {
307 		thread_unblock(fWaitingProfilerThread, B_OK);
308 		fWaitingProfilerThread = NULL;
309 	}
310 	fProfilingActive = false;
311 	locker.Unlock();
312 
313 	// stop scheduler listening
314 	if (fSchedulerNotificationsRequested)
315 		scheduler_remove_listener(this);
316 
317 	// stop wait object listening
318 	if (fWaitObjectNotificationsRequested) {
319 		InterruptsSpinLocker locker(gWaitObjectListenerLock);
320 		remove_wait_object_listener(this);
321 	}
322 
323 	// deactivate the profiling timers on all CPUs
324 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
325 		call_all_cpus(_UninitTimers, this);
326 
327 	// cancel notifications
328 	NotificationManager& notificationManager
329 		= NotificationManager::Manager();
330 
331 	// images
332 	if (fImageNotificationsRequested) {
333 		fImageNotificationsRequested = false;
334 		notificationManager.RemoveListener("images", NULL, *this);
335 	}
336 
337 	// threads
338 	if (fThreadNotificationsRequested) {
339 		fThreadNotificationsRequested = false;
340 		notificationManager.RemoveListener("threads", NULL, *this);
341 	}
342 
343 	// teams
344 	if (fTeamNotificationsRequested) {
345 		fTeamNotificationsRequested = false;
346 		notificationManager.RemoveListener("teams", NULL, *this);
347 	}
348 
349 	// I/O
350 	if (fIONotificationsRequested) {
351 		fIONotificationsRequested = false;
352 		notificationManager.RemoveListener("I/O", NULL, *this);
353 	}
354 
355 	// delete wait object related allocations
356 	fWaitObjectTable.Clear();
357 	delete[] fWaitObjectBuffer;
358 
359 	// unlock the memory and delete the area
360 	if (fKernelArea >= 0) {
361 		unlock_memory(fHeader, fAreaSize, B_READ_DEVICE);
362 		delete_area(fKernelArea);
363 		fKernelArea = -1;
364 	}
365 }
366 
367 
368 status_t
369 SystemProfiler::Init()
370 {
371 	// clone the user area
372 	void* areaBase;
373 	fKernelArea = clone_area("profiling samples", &areaBase,
374 		B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
375 		fUserArea);
376 	if (fKernelArea < 0)
377 		return fKernelArea;
378 
379 	// we need the memory locked
380 	status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE);
381 	if (error != B_OK) {
382 		delete_area(fKernelArea);
383 		fKernelArea = -1;
384 		return error;
385 	}
386 
387 	// the buffer is ready for use
388 	fHeader = (system_profiler_buffer_header*)areaBase;
389 	fBufferBase = (uint8*)(fHeader + 1);
390 	fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase);
391 	fHeader->start = 0;
392 	fHeader->size = 0;
393 
394 	// allocate the wait object buffer and init the hash table
395 	if (fWaitObjectCount > 0) {
396 		fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount];
397 		if (fWaitObjectBuffer == NULL)
398 			return B_NO_MEMORY;
399 
400 		for (int32 i = 0; i < fWaitObjectCount; i++)
401 			fFreeWaitObjects.Add(fWaitObjectBuffer + i);
402 
403 		error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2);
404 		if (error != B_OK)
405 			return error;
406 	}
407 
408 	// start listening for notifications
409 
410 	// teams
411 	NotificationManager& notificationManager
412 		= NotificationManager::Manager();
413 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
414 		error = notificationManager.AddListener("teams",
415 			TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this);
416 		if (error != B_OK)
417 			return error;
418 		fTeamNotificationsRequested = true;
419 	}
420 
421 	// threads
422 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
423 		error = notificationManager.AddListener("threads",
424 			THREAD_ADDED | THREAD_REMOVED, *this);
425 		if (error != B_OK)
426 			return error;
427 		fThreadNotificationsRequested = true;
428 	}
429 
430 	// images
431 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
432 		error = notificationManager.AddListener("images",
433 			IMAGE_ADDED | IMAGE_REMOVED, *this);
434 		if (error != B_OK)
435 			return error;
436 		fImageNotificationsRequested = true;
437 	}
438 
439 	// I/O events
440 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
441 		error = notificationManager.AddListener("I/O",
442 			IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED
443 				| IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED
444 				| IO_SCHEDULER_OPERATION_STARTED
445 				| IO_SCHEDULER_OPERATION_FINISHED,
446 			*this);
447 		if (error != B_OK)
448 			return error;
449 		fIONotificationsRequested = true;
450 	}
451 
452 	// We need to fill the buffer with the initial state of teams, threads,
453 	// and images.
454 
455 	// teams
456 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
457 		InterruptsSpinLocker locker(fLock);
458 
459 		TeamListIterator iterator;
460 		while (Team* team = iterator.Next()) {
461 			locker.Unlock();
462 
463 			bool added = _TeamAdded(team);
464 
465 			// release the reference returned by the iterator
466 			team->ReleaseReference();
467 
468 			if (!added)
469 				return B_BUFFER_OVERFLOW;
470 
471 			locker.Lock();
472 		}
473 
474 		fTeamNotificationsEnabled = true;
475 	}
476 
477 	// images
478 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
479 		if (image_iterate_through_images(&_InitialImageIterator, this) != NULL)
480 			return B_BUFFER_OVERFLOW;
481 	}
482 
483 	// threads
484 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
485 		InterruptsSpinLocker locker(fLock);
486 
487 		ThreadListIterator iterator;
488 		while (Thread* thread = iterator.Next()) {
489 			locker.Unlock();
490 
491 			bool added = _ThreadAdded(thread);
492 
493 			// release the reference returned by the iterator
494 			thread->ReleaseReference();
495 
496 			if (!added)
497 				return B_BUFFER_OVERFLOW;
498 
499 			locker.Lock();
500 		}
501 
502 		fThreadNotificationsEnabled = true;
503 	}
504 
505 	fProfilingActive = true;
506 
507 	// start scheduler and wait object listening
508 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
509 		scheduler_add_listener(this);
510 		fSchedulerNotificationsRequested = true;
511 
512 		InterruptsSpinLocker waitObjectLocker(gWaitObjectListenerLock);
513 		add_wait_object_listener(this);
514 		fWaitObjectNotificationsRequested = true;
515 		waitObjectLocker.Unlock();
516 
517 		// fake schedule events for the initially running threads
518 		int32 cpuCount = smp_get_num_cpus();
519 		for (int32 i = 0; i < cpuCount; i++) {
520 			Thread* thread = gCPU[i].running_thread;
521 			if (thread != NULL)
522 				ThreadScheduled(thread, thread);
523 		}
524 	}
525 
526 	// I/O scheduling
527 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
528 		IOSchedulerRoster* roster = IOSchedulerRoster::Default();
529 		AutoLocker<IOSchedulerRoster> rosterLocker(roster);
530 
531 		for (IOSchedulerList::ConstIterator it
532 				= roster->SchedulerList().GetIterator();
533 			IOScheduler* scheduler = it.Next();) {
534 			_IOSchedulerAdded(scheduler);
535 		}
536 
537 		fIONotificationsEnabled = true;
538 	}
539 
540 	// activate the profiling timers on all CPUs
541 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
542 		call_all_cpus(_InitTimers, this);
543 
544 	return B_OK;
545 }
546 
547 
548 status_t
549 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
550 {
551 	InterruptsSpinLocker locker(fLock);
552 
553 	if (fWaitingProfilerThread != NULL || !fProfilingActive
554 		|| bytesRead > fBufferSize) {
555 		return B_BAD_VALUE;
556 	}
557 
558 	fBufferSize -= bytesRead;
559 	fBufferStart += bytesRead;
560 	if (fBufferStart > fBufferCapacity)
561 		fBufferStart -= fBufferCapacity;
562 	fHeader->size = fBufferSize;
563 	fHeader->start = fBufferStart;
564 
565 	// already enough data in the buffer to return?
566 	if (fBufferSize > fBufferCapacity / 2)
567 		return B_OK;
568 
569 	// Wait until the buffer gets too full or an error or a timeout occurs.
570 	while (true) {
571 		Thread* thread = thread_get_current_thread();
572 		fWaitingProfilerThread = thread;
573 
574 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
575 			THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
576 
577 		locker.Unlock();
578 
579 		status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
580 
581 		locker.Lock();
582 
583 		if (error == B_OK) {
584 			// the caller has unset fWaitingProfilerThread for us
585 			break;
586 		}
587 
588 		fWaitingProfilerThread = NULL;
589 
590 		if (error != B_TIMED_OUT)
591 			return error;
592 
593 		// just the timeout -- return, if the buffer is not empty
594 		if (fBufferSize > 0)
595 			break;
596 	}
597 
598 	if (_droppedEvents != NULL) {
599 		*_droppedEvents = fDroppedEvents;
600 		fDroppedEvents = 0;
601 	}
602 
603 	return B_OK;
604 }
605 
606 
607 // #pragma mark - NotificationListener interface
608 
609 
610 void
611 SystemProfiler::EventOccurred(NotificationService& service,
612 	const KMessage* event)
613 {
614 	int32 eventCode;
615 	if (event->FindInt32("event", &eventCode) != B_OK)
616 		return;
617 
618 	if (strcmp(service.Name(), "teams") == 0) {
619 		Team* team = (Team*)event->GetPointer("teamStruct", NULL);
620 		if (team == NULL)
621 			return;
622 
623 		switch (eventCode) {
624 			case TEAM_ADDED:
625 				if (fTeamNotificationsEnabled)
626 					_TeamAdded(team);
627 				break;
628 
629 			case TEAM_REMOVED:
630 				if (team->id == fTeam) {
631 					// The profiling team is gone -- uninstall the profiler!
632 					InterruptsSpinLocker locker(sProfilerLock);
633 					if (sProfiler != this)
634 						return;
635 
636 					sProfiler = NULL;
637 					locker.Unlock();
638 
639 					ReleaseReference();
640 					return;
641 				}
642 
643 				// When we're still doing the initial team list scan, we are
644 				// also interested in removals that happened to teams we have
645 				// already seen.
646 				if (fTeamNotificationsEnabled
647 					|| team->serial_number <= fLastTeamAddedSerialNumber) {
648 					_TeamRemoved(team);
649 				}
650 				break;
651 
652 			case TEAM_EXEC:
653 				if (fTeamNotificationsEnabled)
654 					_TeamExec(team);
655 				break;
656 		}
657 	} else if (strcmp(service.Name(), "threads") == 0) {
658 		Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
659 		if (thread == NULL)
660 			return;
661 
662 		switch (eventCode) {
663 			case THREAD_ADDED:
664 				if (fThreadNotificationsEnabled)
665 					_ThreadAdded(thread);
666 				break;
667 
668 			case THREAD_REMOVED:
669 				// When we're still doing the initial thread list scan, we are
670 				// also interested in removals that happened to threads we have
671 				// already seen.
672 				if (fThreadNotificationsEnabled
673 					|| thread->serial_number <= fLastThreadAddedSerialNumber) {
674 					_ThreadRemoved(thread);
675 				}
676 				break;
677 		}
678 	} else if (strcmp(service.Name(), "images") == 0) {
679 		if (!fImageNotificationsEnabled)
680 			return;
681 
682 		struct image* image = (struct image*)event->GetPointer(
683 			"imageStruct", NULL);
684 		if (image == NULL)
685 			return;
686 
687 		switch (eventCode) {
688 			case IMAGE_ADDED:
689 				_ImageAdded(image);
690 				break;
691 
692 			case IMAGE_REMOVED:
693 				_ImageRemoved(image);
694 				break;
695 		}
696 	} else if (strcmp(service.Name(), "I/O") == 0) {
697 		if (!fIONotificationsEnabled)
698 			return;
699 
700 		IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler",
701 			NULL);
702 		if (scheduler == NULL)
703 			return;
704 
705 		IORequest* request = (IORequest*)event->GetPointer("request", NULL);
706 		IOOperation* operation = (IOOperation*)event->GetPointer("operation",
707 			NULL);
708 
709 		switch (eventCode) {
710 			case IO_SCHEDULER_ADDED:
711 				_IOSchedulerAdded(scheduler);
712 				break;
713 
714 			case IO_SCHEDULER_REMOVED:
715 				_IOSchedulerRemoved(scheduler);
716 				break;
717 
718 			case IO_SCHEDULER_REQUEST_SCHEDULED:
719 				_IORequestScheduled(scheduler, request);
720 				break;
721 
722 			case IO_SCHEDULER_REQUEST_FINISHED:
723 				_IORequestFinished(scheduler, request);
724 				break;
725 
726 			case IO_SCHEDULER_OPERATION_STARTED:
727 				_IOOperationStarted(scheduler, request, operation);
728 				break;
729 
730 			case IO_SCHEDULER_OPERATION_FINISHED:
731 				_IOOperationFinished(scheduler, request, operation);
732 				break;
733 		}
734 	}
735 
736 	_MaybeNotifyProfilerThread();
737 }
738 
739 
740 // #pragma mark - SchedulerListener interface
741 
742 
743 void
744 SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
745 {
746 	int cpu = smp_get_current_cpu();
747 
748 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
749 		// When re-entering, we already hold the lock.
750 
751 	system_profiler_thread_enqueued_in_run_queue* event
752 		= (system_profiler_thread_enqueued_in_run_queue*)
753 			_AllocateBuffer(
754 				sizeof(system_profiler_thread_enqueued_in_run_queue),
755 				B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
756 	if (event == NULL)
757 		return;
758 
759 	event->time = system_time_nsecs();
760 	event->thread = thread->id;
761 	event->priority = thread->priority;
762 
763 	fHeader->size = fBufferSize;
764 
765 	// Unblock the profiler thread, if necessary, but don't unblock the thread,
766 	// if it had been waiting on a condition variable, since then we'd likely
767 	// deadlock in ConditionVariable::NotifyOne(), as it acquires a static
768 	// spinlock.
769 	if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
770 		_MaybeNotifyProfilerThreadLocked();
771 }
772 
773 
774 void
775 SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
776 {
777 	int cpu = smp_get_current_cpu();
778 
779 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
780 		// When re-entering, we already hold the lock.
781 
782 	system_profiler_thread_removed_from_run_queue* event
783 		= (system_profiler_thread_removed_from_run_queue*)
784 			_AllocateBuffer(
785 				sizeof(system_profiler_thread_removed_from_run_queue),
786 				B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
787 	if (event == NULL)
788 		return;
789 
790 	event->time = system_time_nsecs();
791 	event->thread = thread->id;
792 
793 	fHeader->size = fBufferSize;
794 
795 	// unblock the profiler thread, if necessary
796 	_MaybeNotifyProfilerThreadLocked();
797 }
798 
799 
800 void
801 SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
802 {
803 	int cpu = smp_get_current_cpu();
804 
805 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
806 		// When re-entering, we already hold the lock.
807 
808 	// If the old thread starts waiting, handle the wait object.
809 	if (oldThread->state == B_THREAD_WAITING)
810 		_WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);
811 
812 	system_profiler_thread_scheduled* event
813 		= (system_profiler_thread_scheduled*)
814 			_AllocateBuffer(sizeof(system_profiler_thread_scheduled),
815 				B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
816 	if (event == NULL)
817 		return;
818 
819 	event->time = system_time_nsecs();
820 	event->thread = newThread->id;
821 	event->previous_thread = oldThread->id;
822 	event->previous_thread_state = oldThread->state;
823 	event->previous_thread_wait_object_type = oldThread->wait.type;
824 	event->previous_thread_wait_object = (addr_t)oldThread->wait.object;
825 
826 	fHeader->size = fBufferSize;
827 
828 	// unblock the profiler thread, if necessary
829 	_MaybeNotifyProfilerThreadLocked();
830 }
831 
832 
833 // #pragma mark - WaitObjectListener interface
834 
835 
836 void
837 SystemProfiler::SemaphoreCreated(sem_id id, const char* name)
838 {
839 	_WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE);
840 }
841 
842 
843 void
844 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable)
845 {
846 	_WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE);
847 }
848 
849 
850 void
851 SystemProfiler::MutexInitialized(mutex* lock)
852 {
853 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX);
854 }
855 
856 
857 void
858 SystemProfiler::RWLockInitialized(rw_lock* lock)
859 {
860 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK);
861 }
862 
863 
864 // #pragma mark - SystemProfiler private
865 
866 
867 bool
868 SystemProfiler::_TeamAdded(Team* team)
869 {
870 	TeamLocker teamLocker(team);
871 
872 	size_t nameLen = strlen(team->Name());
873 	size_t argsLen = strlen(team->Args());
874 
875 	InterruptsSpinLocker locker(fLock);
876 
877 	// During the initial scan check whether the team is already gone again.
878 	// Later this cannot happen, since the team creator notifies us before
879 	// actually starting the team.
880 	if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
881 		return true;
882 
883 	if (team->serial_number > fLastTeamAddedSerialNumber)
884 		fLastTeamAddedSerialNumber = team->serial_number;
885 
886 	system_profiler_team_added* event = (system_profiler_team_added*)
887 		_AllocateBuffer(
888 			sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
889 			B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0);
890 	if (event == NULL)
891 		return false;
892 
893 	event->team = team->id;
894 	strcpy(event->name, team->Name());
895 	event->args_offset = nameLen + 1;
896 	strcpy(event->name + nameLen + 1, team->Args());
897 
898 	fHeader->size = fBufferSize;
899 
900 	return true;
901 }
902 
903 
904 bool
905 SystemProfiler::_TeamRemoved(Team* team)
906 {
907 	// TODO: It is possible that we get remove notifications for teams that
908 	// had already been removed from the global team list when we did the
909 	// initial scan, but were still in the process of dying. ATM it is not
910 	// really possible to identify such a case.
911 
912 	TeamLocker teamLocker(team);
913 	InterruptsSpinLocker locker(fLock);
914 
915 	system_profiler_team_removed* event = (system_profiler_team_removed*)
916 		_AllocateBuffer(sizeof(system_profiler_team_removed),
917 			B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0);
918 	if (event == NULL)
919 		return false;
920 
921 	event->team = team->id;
922 
923 	fHeader->size = fBufferSize;
924 
925 	return true;
926 }
927 
928 
929 bool
930 SystemProfiler::_TeamExec(Team* team)
931 {
932 	TeamLocker teamLocker(team);
933 
934 	size_t argsLen = strlen(team->Args());
935 
936 	InterruptsSpinLocker locker(fLock);
937 
938 	system_profiler_team_exec* event = (system_profiler_team_exec*)
939 		_AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen,
940 			B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0);
941 	if (event == NULL)
942 		return false;
943 
944 	event->team = team->id;
945 	strlcpy(event->thread_name, team->main_thread->name,
946 		sizeof(event->thread_name));
947 	strcpy(event->args, team->Args());
948 
949 	fHeader->size = fBufferSize;
950 
951 	return true;
952 }
953 
954 
955 bool
956 SystemProfiler::_ThreadAdded(Thread* thread)
957 {
958 	ThreadLocker threadLocker(thread);
959 	InterruptsSpinLocker locker(fLock);
960 
961 	// During the initial scan check whether the team is already gone again.
962 	// Later this cannot happen, since the team creator notifies us before
963 	// actually starting the thread.
964 	if (!fThreadNotificationsEnabled && !thread->IsAlive())
965 		return true;
966 
967 	if (thread->serial_number > fLastThreadAddedSerialNumber)
968 		fLastThreadAddedSerialNumber = thread->serial_number;
969 
970 	system_profiler_thread_added* event = (system_profiler_thread_added*)
971 		_AllocateBuffer(sizeof(system_profiler_thread_added),
972 			B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
973 	if (event == NULL)
974 		return false;
975 
976 	event->team = thread->team->id;
977 	event->thread = thread->id;
978 	strlcpy(event->name, thread->name, sizeof(event->name));
979 
980 	fHeader->size = fBufferSize;
981 
982 	return true;
983 }
984 
985 
986 bool
987 SystemProfiler::_ThreadRemoved(Thread* thread)
988 {
989 	// TODO: It is possible that we get remove notifications for threads that
990 	// had already been removed from the global thread list when we did the
991 	// initial scan, but were still in the process of dying. ATM it is not
992 	// really possible to identify such a case.
993 
994 	ThreadLocker threadLocker(thread);
995 	InterruptsSpinLocker locker(fLock);
996 
997 	system_profiler_thread_removed* event
998 		= (system_profiler_thread_removed*)
999 			_AllocateBuffer(sizeof(system_profiler_thread_removed),
1000 				B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0);
1001 	if (event == NULL)
1002 		return false;
1003 
1004 	event->team = thread->team->id;
1005 	event->thread = thread->id;
1006 
1007 	fHeader->size = fBufferSize;
1008 
1009 	return true;
1010 }
1011 
1012 
1013 bool
1014 SystemProfiler::_ImageAdded(struct image* image)
1015 {
1016 	InterruptsSpinLocker locker(fLock);
1017 
1018 	system_profiler_image_added* event = (system_profiler_image_added*)
1019 		_AllocateBuffer(sizeof(system_profiler_image_added),
1020 			B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0);
1021 	if (event == NULL)
1022 		return false;
1023 
1024 	event->team = image->team;
1025 	event->info = image->info.basic_info;
1026 
1027 	fHeader->size = fBufferSize;
1028 
1029 	return true;
1030 }
1031 
1032 
1033 bool
1034 SystemProfiler::_ImageRemoved(struct image* image)
1035 {
1036 	InterruptsSpinLocker locker(fLock);
1037 
1038 	system_profiler_image_removed* event = (system_profiler_image_removed*)
1039 		_AllocateBuffer(sizeof(system_profiler_image_removed),
1040 			B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0);
1041 	if (event == NULL)
1042 		return false;
1043 
1044 	event->team = image->team;
1045 	event->image = image->info.basic_info.id;
1046 
1047 	fHeader->size = fBufferSize;
1048 
1049 	return true;
1050 }
1051 
1052 
1053 bool
1054 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler)
1055 {
1056 	size_t nameLen = strlen(scheduler->Name());
1057 
1058 	InterruptsSpinLocker locker(fLock);
1059 
1060 	system_profiler_io_scheduler_added* event
1061 		= (system_profiler_io_scheduler_added*)_AllocateBuffer(
1062 			sizeof(system_profiler_io_scheduler_added) + nameLen,
1063 			B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0);
1064 	if (event == NULL)
1065 		return false;
1066 
1067 	event->scheduler = scheduler->ID();
1068 	strcpy(event->name, scheduler->Name());
1069 
1070 	fHeader->size = fBufferSize;
1071 
1072 	return true;
1073 }
1074 
1075 
1076 bool
1077 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler)
1078 {
1079 	InterruptsSpinLocker locker(fLock);
1080 
1081 	system_profiler_io_scheduler_removed* event
1082 		= (system_profiler_io_scheduler_removed*)_AllocateBuffer(
1083 			sizeof(system_profiler_io_scheduler_removed),
1084 			B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0);
1085 	if (event == NULL)
1086 		return false;
1087 
1088 	event->scheduler = scheduler->ID();
1089 
1090 	fHeader->size = fBufferSize;
1091 
1092 	return true;
1093 }
1094 
1095 
1096 bool
1097 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request)
1098 {
1099 	InterruptsSpinLocker locker(fLock);
1100 
1101 	system_profiler_io_request_scheduled* event
1102 		= (system_profiler_io_request_scheduled*)_AllocateBuffer(
1103 			sizeof(system_profiler_io_request_scheduled),
1104 			B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0);
1105 	if (event == NULL)
1106 		return false;
1107 
1108 	IORequestOwner* owner = request->Owner();
1109 
1110 	event->time = system_time_nsecs();
1111 	event->scheduler = scheduler->ID();
1112 	event->team = owner->team;
1113 	event->thread = owner->thread;
1114 	event->request = request;
1115 	event->offset = request->Offset();
1116 	event->length = request->Length();
1117 	event->write = request->IsWrite();
1118 	event->priority = owner->priority;
1119 
1120 	fHeader->size = fBufferSize;
1121 
1122 	return true;
1123 }
1124 
1125 
1126 bool
1127 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request)
1128 {
1129 	InterruptsSpinLocker locker(fLock);
1130 
1131 	system_profiler_io_request_finished* event
1132 		= (system_profiler_io_request_finished*)_AllocateBuffer(
1133 			sizeof(system_profiler_io_request_finished),
1134 			B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0);
1135 	if (event == NULL)
1136 		return false;
1137 
1138 	event->time = system_time_nsecs();
1139 	event->scheduler = scheduler->ID();
1140 	event->request = request;
1141 	event->status = request->Status();
1142 	event->transferred = request->TransferredBytes();
1143 
1144 	fHeader->size = fBufferSize;
1145 
1146 	return true;
1147 }
1148 
1149 
1150 bool
1151 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request,
1152 	IOOperation* operation)
1153 {
1154 	InterruptsSpinLocker locker(fLock);
1155 
1156 	system_profiler_io_operation_started* event
1157 		= (system_profiler_io_operation_started*)_AllocateBuffer(
1158 			sizeof(system_profiler_io_operation_started),
1159 			B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0);
1160 	if (event == NULL)
1161 		return false;
1162 
1163 	event->time = system_time_nsecs();
1164 	event->scheduler = scheduler->ID();
1165 	event->request = request;
1166 	event->operation = operation;
1167 	event->offset = request->Offset();
1168 	event->length = request->Length();
1169 	event->write = request->IsWrite();
1170 
1171 	fHeader->size = fBufferSize;
1172 
1173 	return true;
1174 }
1175 
1176 
1177 bool
1178 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request,
1179 	IOOperation* operation)
1180 {
1181 	InterruptsSpinLocker locker(fLock);
1182 
1183 	system_profiler_io_operation_finished* event
1184 		= (system_profiler_io_operation_finished*)_AllocateBuffer(
1185 			sizeof(system_profiler_io_operation_finished),
1186 			B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0);
1187 	if (event == NULL)
1188 		return false;
1189 
1190 	event->time = system_time_nsecs();
1191 	event->scheduler = scheduler->ID();
1192 	event->request = request;
1193 	event->operation = operation;
1194 	event->status = request->Status();
1195 	event->transferred = request->TransferredBytes();
1196 
1197 	fHeader->size = fBufferSize;
1198 
1199 	return true;
1200 }
1201 
1202 
1203 void
1204 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type)
1205 {
1206 	SpinLocker locker(fLock);
1207 
1208 	// look up the object
1209 	WaitObjectKey key;
1210 	key.object = object;
1211 	key.type = type;
1212 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1213 
1214 	// If found, remove it and add it to the free list. This might sound weird,
1215 	// but it makes sense, since we lazily track *used* wait objects only.
1216 	// I.e. the object in the table is now guaranteedly obsolete.
1217 	if (waitObject) {
1218 		fWaitObjectTable.RemoveUnchecked(waitObject);
1219 		fUsedWaitObjects.Remove(waitObject);
1220 		fFreeWaitObjects.Add(waitObject, false);
1221 	}
1222 }
1223 
1224 void
1225 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
1226 {
1227 	// look up the object
1228 	WaitObjectKey key;
1229 	key.object = object;
1230 	key.type = type;
1231 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1232 
1233 	// If already known, re-queue it as most recently used and be done.
1234 	if (waitObject != NULL) {
1235 		fUsedWaitObjects.Remove(waitObject);
1236 		fUsedWaitObjects.Add(waitObject);
1237 		return;
1238 	}
1239 
1240 	// not known yet -- get the info
1241 	const char* name = NULL;
1242 	const void* referencedObject = NULL;
1243 
1244 	switch (type) {
1245 		case THREAD_BLOCK_TYPE_SEMAPHORE:
1246 		{
1247 			name = sem_get_name_unsafe((sem_id)object);
1248 			break;
1249 		}
1250 
1251 		case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1252 		{
1253 			ConditionVariable* variable = (ConditionVariable*)object;
1254 			name = variable->ObjectType();
1255 			referencedObject = variable->Object();
1256 			break;
1257 		}
1258 
1259 		case THREAD_BLOCK_TYPE_MUTEX:
1260 		{
1261 			mutex* lock = (mutex*)object;
1262 			name = lock->name;
1263 			break;
1264 		}
1265 
1266 		case THREAD_BLOCK_TYPE_RW_LOCK:
1267 		{
1268 			rw_lock* lock = (rw_lock*)object;
1269 			name = lock->name;
1270 			break;
1271 		}
1272 
1273 		case THREAD_BLOCK_TYPE_OTHER:
1274 		{
1275 			name = (const char*)(void*)object;
1276 			break;
1277 		}
1278 
1279 		case THREAD_BLOCK_TYPE_SNOOZE:
1280 		case THREAD_BLOCK_TYPE_SIGNAL:
1281 		default:
1282 			return;
1283 	}
1284 
1285 	// add the event
1286 	size_t nameLen = name != NULL ? strlen(name) : 0;
1287 
1288 	system_profiler_wait_object_info* event
1289 		= (system_profiler_wait_object_info*)
1290 			_AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen,
1291 				B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0);
1292 	if (event == NULL)
1293 		return;
1294 
1295 	event->type = type;
1296 	event->object = object;
1297 	event->referenced_object = (addr_t)referencedObject;
1298 	if (name != NULL)
1299 		strcpy(event->name, name);
1300 	else
1301 		event->name[0] = '\0';
1302 
1303 	fHeader->size = fBufferSize;
1304 
1305 	// add the wait object
1306 
1307 	// get a free one or steal the least recently used one
1308 	waitObject = fFreeWaitObjects.RemoveHead();
1309 	if (waitObject == NULL) {
1310 		waitObject = fUsedWaitObjects.RemoveHead();
1311 		fWaitObjectTable.RemoveUnchecked(waitObject);
1312 	}
1313 
1314 	waitObject->object = object;
1315 	waitObject->type = type;
1316 	fWaitObjectTable.InsertUnchecked(waitObject);
1317 	fUsedWaitObjects.Add(waitObject);
1318 }
1319 
1320 
1321 /*static*/ bool
1322 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
1323 {
1324 	SystemProfiler* self = (SystemProfiler*)cookie;
1325 	self->fImageNotificationsEnabled = true;
1326 		// Set that here, since the image lock is being held now.
1327 	return !self->_ImageAdded(image);
1328 }
1329 
1330 
1331 void*
1332 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count)
1333 {
1334 	size = (size + 3) / 4 * 4;
1335 	size += sizeof(system_profiler_event_header);
1336 
1337 	size_t end = fBufferStart + fBufferSize;
1338 	if (end + size > fBufferCapacity) {
1339 		// Buffer is wrapped or needs wrapping.
1340 		if (end < fBufferCapacity) {
1341 			// not wrapped yet, but needed
1342 			system_profiler_event_header* header
1343 				= (system_profiler_event_header*)(fBufferBase + end);
1344 			header->event = B_SYSTEM_PROFILER_BUFFER_END;
1345 			fBufferSize = fBufferCapacity - fBufferStart;
1346 			end = 0;
1347 		} else
1348 			end -= fBufferCapacity;
1349 
1350 		if (end + size > fBufferStart) {
1351 			fDroppedEvents++;
1352 			return NULL;
1353 		}
1354 	}
1355 
1356 	system_profiler_event_header* header
1357 		= (system_profiler_event_header*)(fBufferBase + end);
1358 	header->event = event;
1359 	header->cpu = cpu;
1360 	header->size = size - sizeof(system_profiler_event_header);
1361 
1362 	fBufferSize += size;
1363 
1364 	return header + 1;
1365 }
1366 
1367 
1368 /*static*/ void
1369 SystemProfiler::_InitTimers(void* cookie, int cpu)
1370 {
1371 	SystemProfiler* self = (SystemProfiler*)cookie;
1372 	self->_ScheduleTimer(cpu);
1373 }
1374 
1375 
1376 /*static*/ void
1377 SystemProfiler::_UninitTimers(void* cookie, int cpu)
1378 {
1379 	SystemProfiler* self = (SystemProfiler*)cookie;
1380 
1381 	CPUProfileData& cpuData = self->fCPUData[cpu];
1382 	cancel_timer(&cpuData.timer);
1383 	cpuData.timerScheduled = false;
1384 }
1385 
1386 
1387 void
1388 SystemProfiler::_ScheduleTimer(int cpu)
1389 {
1390 	CPUProfileData& cpuData = fCPUData[cpu];
1391 	cpuData.timerEnd = system_time() + fInterval;
1392 	cpuData.timer.user_data = this;
1393 	add_timer(&cpuData.timer, &_ProfilingEvent, fInterval,
1394 		B_ONE_SHOT_RELATIVE_TIMER);
1395 	cpuData.timerScheduled = true;
1396 }
1397 
1398 
1399 void
1400 SystemProfiler::_DoSample()
1401 {
1402 	Thread* thread = thread_get_current_thread();
1403 	int cpu = thread->cpu->cpu_num;
1404 	CPUProfileData& cpuData = fCPUData[cpu];
1405 
1406 	// get the samples
1407 	int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1,
1408 		0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1409 
1410 	InterruptsSpinLocker locker(fLock);
1411 
1412 	system_profiler_samples* event = (system_profiler_samples*)
1413 		_AllocateBuffer(sizeof(system_profiler_samples)
1414 				+ count * sizeof(addr_t),
1415 			B_SYSTEM_PROFILER_SAMPLES, cpu, count);
1416 	if (event == NULL)
1417 		return;
1418 
1419 	event->thread = thread->id;
1420 	memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t));
1421 
1422 	fHeader->size = fBufferSize;
1423 }
1424 
1425 
1426 /*static*/ int32
1427 SystemProfiler::_ProfilingEvent(struct timer* timer)
1428 {
1429 	SystemProfiler* self = (SystemProfiler*)timer->user_data;
1430 
1431 	self->_DoSample();
1432 	self->_ScheduleTimer(timer->cpu);
1433 
1434 	return B_HANDLED_INTERRUPT;
1435 }
1436 
1437 
1438 // #pragma mark - private kernel API
1439 
1440 
1441 #if SYSTEM_PROFILER
1442 
1443 status_t
1444 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval)
1445 {
1446 	struct ParameterDeleter {
1447 		ParameterDeleter(area_id area)
1448 			:
1449 			fArea(area),
1450 			fDetached(false)
1451 		{
1452 		}
1453 
1454 		~ParameterDeleter()
1455 		{
1456 			if (!fDetached) {
1457 				delete_area(fArea);
1458 				delete sRecordedParameters;
1459 				sRecordedParameters = NULL;
1460 			}
1461 		}
1462 
1463 		void Detach()
1464 		{
1465 			fDetached = true;
1466 		}
1467 
1468 	private:
1469 		area_id	fArea;
1470 		bool	fDetached;
1471 	};
1472 
1473 	void* address;
1474 	area_id area = create_area("kernel profile data", &address,
1475 		B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK,
1476 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1477 	if (area < 0)
1478 		return area;
1479 
1480 	ParameterDeleter parameterDeleter(area);
1481 
1482 	sRecordedParameters = new(std::nothrow) system_profiler_parameters;
1483 	if (sRecordedParameters == NULL)
1484 		return B_NO_MEMORY;
1485 
1486 	sRecordedParameters->buffer_area = area;
1487 	sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS
1488 		| B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS
1489 		| B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
1490 		| B_SYSTEM_PROFILER_SAMPLING_EVENTS;
1491 	sRecordedParameters->locking_lookup_size = 4096;
1492 	sRecordedParameters->interval = interval;
1493 	sRecordedParameters->stack_depth = stackDepth;
1494 
1495 	area_info areaInfo;
1496 	get_area_info(area, &areaInfo);
1497 
1498 	// initialize the profiler
1499 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM,
1500 		areaInfo, *sRecordedParameters);
1501 	if (profiler == NULL)
1502 		return B_NO_MEMORY;
1503 
1504 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1505 
1506 	status_t error = profiler->Init();
1507 	if (error != B_OK)
1508 		return error;
1509 
1510 	// set the new profiler
1511 	InterruptsSpinLocker locker(sProfilerLock);
1512 	if (sProfiler != NULL)
1513 		return B_BUSY;
1514 
1515 	parameterDeleter.Detach();
1516 	profilerDeleter.Detach();
1517 	sProfiler = profiler;
1518 	locker.Unlock();
1519 
1520 	return B_OK;
1521 }
1522 
1523 
1524 void
1525 stop_system_profiler()
1526 {
1527 	InterruptsSpinLocker locker(sProfilerLock);
1528 	if (sProfiler == NULL)
1529 		return;
1530 
1531 	SystemProfiler* profiler = sProfiler;
1532 	sProfiler = NULL;
1533 	locker.Unlock();
1534 
1535 	profiler->ReleaseReference();
1536 }
1537 
1538 #endif	// SYSTEM_PROFILER
1539 
1540 
1541 // #pragma mark - syscalls
1542 
1543 
1544 status_t
1545 _user_system_profiler_start(struct system_profiler_parameters* userParameters)
1546 {
1547 	if (geteuid() != 0)
1548 		return B_PERMISSION_DENIED;
1549 
1550 	// copy params to the kernel
1551 	struct system_profiler_parameters parameters;
1552 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)
1553 		|| user_memcpy(&parameters, userParameters, sizeof(parameters))
1554 			!= B_OK) {
1555 		return B_BAD_ADDRESS;
1556 	}
1557 
1558 	// check the parameters
1559 	team_id team = thread_get_current_thread()->team->id;
1560 
1561 	area_info areaInfo;
1562 	status_t error = get_area_info(parameters.buffer_area, &areaInfo);
1563 	if (error != B_OK)
1564 		return error;
1565 
1566 	if (areaInfo.team != team)
1567 		return B_BAD_VALUE;
1568 
1569 	if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) {
1570 		if (parameters.stack_depth < 1)
1571 			return B_BAD_VALUE;
1572 
1573 		if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL)
1574 			parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL;
1575 
1576 		if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH)
1577 			parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH;
1578 	}
1579 
1580 	// quick check to see whether we do already have a profiler installed
1581 	InterruptsSpinLocker locker(sProfilerLock);
1582 	if (sProfiler != NULL)
1583 		return B_BUSY;
1584 	locker.Unlock();
1585 
1586 	// initialize the profiler
1587 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo,
1588 		parameters);
1589 	if (profiler == NULL)
1590 		return B_NO_MEMORY;
1591 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1592 
1593 	error = profiler->Init();
1594 	if (error != B_OK)
1595 		return error;
1596 
1597 	// set the new profiler
1598 	locker.Lock();
1599 	if (sProfiler != NULL)
1600 		return B_BUSY;
1601 
1602 	profilerDeleter.Detach();
1603 	sProfiler = profiler;
1604 	locker.Unlock();
1605 
1606 	return B_OK;
1607 }
1608 
1609 
1610 status_t
1611 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents)
1612 {
1613 	if (geteuid() != 0)
1614 		return B_PERMISSION_DENIED;
1615 
1616 	if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents))
1617 		return B_BAD_ADDRESS;
1618 
1619 	team_id team = thread_get_current_thread()->team->id;
1620 
1621 	InterruptsSpinLocker locker(sProfilerLock);
1622 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1623 		return B_BAD_VALUE;
1624 
1625 	// get a reference to the profiler
1626 	SystemProfiler* profiler = sProfiler;
1627 	BReference<SystemProfiler> reference(profiler);
1628 	locker.Unlock();
1629 
1630 	uint64 droppedEvents = 0;
1631 	status_t error = profiler->NextBuffer(bytesRead,
1632 		_droppedEvents != NULL ? &droppedEvents : NULL);
1633 	if (error == B_OK && _droppedEvents != NULL)
1634 		user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents));
1635 
1636 	return error;
1637 }
1638 
1639 
1640 status_t
1641 _user_system_profiler_stop()
1642 {
1643 	if (geteuid() != 0)
1644 		return B_PERMISSION_DENIED;
1645 
1646 	team_id team = thread_get_current_thread()->team->id;
1647 
1648 	InterruptsSpinLocker locker(sProfilerLock);
1649 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1650 		return B_BAD_VALUE;
1651 
1652 	SystemProfiler* profiler = sProfiler;
1653 	sProfiler = NULL;
1654 	locker.Unlock();
1655 
1656 	profiler->ReleaseReference();
1657 
1658 	return B_OK;
1659 }
1660 
1661 
1662 status_t
1663 _user_system_profiler_recorded(system_profiler_parameters* userParameters)
1664 {
1665 	if (geteuid() != 0)
1666 		return B_PERMISSION_DENIED;
1667 
1668 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
1669 		return B_BAD_ADDRESS;
1670 	if (sRecordedParameters == NULL)
1671 		return B_ERROR;
1672 
1673 #if SYSTEM_PROFILER
1674 	stop_system_profiler();
1675 
1676 	// Transfer the area to the userland process
1677 
1678 	void* address;
1679 	area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
1680 		B_ANY_ADDRESS, team_get_current_team_id(), true);
1681 	if (newArea < 0)
1682 		return newArea;
1683 
1684 	status_t status = set_area_protection(newArea, B_READ_AREA);
1685 	if (status == B_OK) {
1686 		sRecordedParameters->buffer_area = newArea;
1687 
1688 		status = user_memcpy(userParameters, sRecordedParameters,
1689 			sizeof(system_profiler_parameters));
1690 	}
1691 	if (status != B_OK)
1692 		delete_area(newArea);
1693 
1694 	delete sRecordedParameters;
1695 	sRecordedParameters = NULL;
1696 
1697 	return status;
1698 #else
1699 	return B_NOT_SUPPORTED;
1700 #endif // SYSTEM_PROFILER
1701 }
1702