xref: /haiku/src/system/kernel/debug/system_profiler.cpp (revision 342a1b221b5bb385410f758df2c625b70cafdd03)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <system_profiler.h>
8 
9 #include <AutoDeleter.h>
10 #include <Referenceable.h>
11 
12 #include <util/AutoLock.h>
13 #include <util/ThreadAutoLock.h>
14 
15 #include <system_profiler_defs.h>
16 
17 #include <cpu.h>
18 #include <kernel.h>
19 #include <kimage.h>
20 #include <kscheduler.h>
21 #include <listeners.h>
22 #include <Notifications.h>
23 #include <sem.h>
24 #include <team.h>
25 #include <thread.h>
26 #include <user_debugger.h>
27 #include <vm/vm.h>
28 
29 #include <arch/debug.h>
30 
31 #include "IOSchedulerRoster.h"
32 
33 
34 // This is the kernel-side implementation of the system profiling support.
35 // A userland team can register as system profiler, providing an area as buffer
36 // for events. Those events are team, thread, and image changes (added/removed),
37 // periodic sampling of the return address stack for each CPU, as well as
38 // scheduling and I/O scheduling events.
39 
40 
41 class SystemProfiler;
42 
43 
44 // minimum/maximum size of the table used for wait object caching
45 #define MIN_WAIT_OBJECT_COUNT	128
46 #define MAX_WAIT_OBJECT_COUNT	1024
47 
48 
49 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER;
50 static SystemProfiler* sProfiler = NULL;
51 static struct system_profiler_parameters* sRecordedParameters = NULL;
52 
53 
54 class SystemProfiler : public BReferenceable, private NotificationListener,
55 	private SchedulerListener, private WaitObjectListener {
56 public:
57 								SystemProfiler(team_id team,
58 									const area_info& userAreaInfo,
59 									const system_profiler_parameters&
60 										parameters);
61 								~SystemProfiler();
62 
63 			team_id				TeamID() const	{ return fTeam; }
64 
65 			status_t			Init();
66 			status_t			NextBuffer(size_t bytesRead,
67 									uint64* _droppedEvents);
68 
69 private:
70 	virtual	void				EventOccurred(NotificationService& service,
71 									const KMessage* event);
72 
73 	virtual	void				ThreadEnqueuedInRunQueue(Thread* thread);
74 	virtual	void				ThreadRemovedFromRunQueue(Thread* thread);
75 	virtual	void				ThreadScheduled(Thread* oldThread,
76 									Thread* newThread);
77 
78 	virtual	void				SemaphoreCreated(sem_id id,
79 									const char* name);
80 	virtual	void				ConditionVariableInitialized(
81 									ConditionVariable* variable);
82 	virtual	void				MutexInitialized(mutex* lock);
83 	virtual	void				RWLockInitialized(rw_lock* lock);
84 
85 			bool				_TeamAdded(Team* team);
86 			bool				_TeamRemoved(Team* team);
87 			bool				_TeamExec(Team* team);
88 
89 			bool				_ThreadAdded(Thread* thread);
90 			bool				_ThreadRemoved(Thread* thread);
91 
92 			bool				_ImageAdded(struct image* image);
93 			bool				_ImageRemoved(struct image* image);
94 
95 			bool				_IOSchedulerAdded(IOScheduler* scheduler);
96 			bool				_IOSchedulerRemoved(IOScheduler* scheduler);
97 			bool				_IORequestScheduled(IOScheduler* scheduler,
98 									IORequest* request);
99 			bool				_IORequestFinished(IOScheduler* scheduler,
100 									IORequest* request);
101 			bool				_IOOperationStarted(IOScheduler* scheduler,
102 									IORequest* request, IOOperation* operation);
103 			bool				_IOOperationFinished(IOScheduler* scheduler,
104 									IORequest* request, IOOperation* operation);
105 
106 			void				_WaitObjectCreated(addr_t object, uint32 type);
107 			void				_WaitObjectUsed(addr_t object, uint32 type);
108 
109 	inline	void				_MaybeNotifyProfilerThreadLocked();
110 	inline	void				_MaybeNotifyProfilerThread();
111 
112 	static	bool				_InitialImageIterator(struct image* image,
113 									void* cookie);
114 
115 			void*				_AllocateBuffer(size_t size, int event, int cpu,
116 									int count);
117 
118 	static	void				_InitTimers(void* cookie, int cpu);
119 	static	void				_UninitTimers(void* cookie, int cpu);
120 			void				_ScheduleTimer(int cpu);
121 
122 			void				_DoSample();
123 
124 	static	int32				_ProfilingEvent(struct timer* timer);
125 
126 private:
127 			struct CPUProfileData {
128 				struct timer	timer;
129 				bigtime_t		timerEnd;
130 				bool			timerScheduled;
131 				addr_t			buffer[B_DEBUG_STACK_TRACE_DEPTH];
132 			};
133 
134 			struct WaitObjectKey {
135 				addr_t	object;
136 				uint32	type;
137 			};
138 
139 			struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>,
140 					WaitObjectKey {
141 				struct WaitObject* hash_link;
142 			};
143 
144 			struct WaitObjectTableDefinition {
145 				typedef WaitObjectKey	KeyType;
146 				typedef	WaitObject		ValueType;
147 
148 				size_t HashKey(const WaitObjectKey& key) const
149 				{
150 					return (size_t)key.object ^ (size_t)key.type;
151 				}
152 
153 				size_t Hash(const WaitObject* value) const
154 				{
155 					return HashKey(*value);
156 				}
157 
158 				bool Compare(const WaitObjectKey& key,
159 					const WaitObject* value) const
160 				{
161 					return value->type == key.type
162 						&& value->object == key.object;
163 				}
164 
165 				WaitObject*& GetLink(WaitObject* value) const
166 				{
167 					return value->hash_link;
168 				}
169 			};
170 
171 			typedef DoublyLinkedList<WaitObject> WaitObjectList;
172 			typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable;
173 
174 private:
175 			spinlock			fLock;
176 			team_id				fTeam;
177 			area_id				fUserArea;
178 			area_id				fKernelArea;
179 			size_t				fAreaSize;
180 			uint32				fFlags;
181 			uint32				fStackDepth;
182 			bigtime_t			fInterval;
183 			system_profiler_buffer_header* fHeader;
184 			uint8*				fBufferBase;
185 			size_t				fBufferCapacity;
186 			size_t				fBufferStart;
187 			size_t				fBufferSize;
188 			uint64				fDroppedEvents;
189 			int64				fLastTeamAddedSerialNumber;
190 			int64				fLastThreadAddedSerialNumber;
191 			bool				fTeamNotificationsRequested;
192 			bool				fTeamNotificationsEnabled;
193 			bool				fThreadNotificationsRequested;
194 			bool				fThreadNotificationsEnabled;
195 			bool				fImageNotificationsRequested;
196 			bool				fImageNotificationsEnabled;
197 			bool				fIONotificationsRequested;
198 			bool				fIONotificationsEnabled;
199 			bool				fSchedulerNotificationsRequested;
200 			bool				fWaitObjectNotificationsRequested;
201 			Thread* volatile	fWaitingProfilerThread;
202 			bool				fProfilingActive;
203 			bool				fReentered[SMP_MAX_CPUS];
204 			CPUProfileData		fCPUData[SMP_MAX_CPUS];
205 			WaitObject*			fWaitObjectBuffer;
206 			int32				fWaitObjectCount;
207 			WaitObjectList		fUsedWaitObjects;
208 			WaitObjectList		fFreeWaitObjects;
209 			WaitObjectTable		fWaitObjectTable;
210 };
211 
212 
213 /*!	Notifies the profiler thread when the profiling buffer is full enough.
214 	The caller must hold fLock.
215 */
216 inline void
217 SystemProfiler::_MaybeNotifyProfilerThreadLocked()
218 {
219 	// If the buffer is full enough, notify the profiler.
220 	if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
221 		int cpu = smp_get_current_cpu();
222 		fReentered[cpu] = true;
223 
224 		Thread* profilerThread = fWaitingProfilerThread;
225 		fWaitingProfilerThread = NULL;
226 
227 		SpinLocker _(profilerThread->scheduler_lock);
228 		thread_unblock_locked(profilerThread, B_OK);
229 
230 		fReentered[cpu] = false;
231 	}
232 }
233 
234 
235 inline void
236 SystemProfiler::_MaybeNotifyProfilerThread()
237 {
238 	if (fWaitingProfilerThread == NULL)
239 		return;
240 
241 	InterruptsSpinLocker locker(fLock);
242 
243 	_MaybeNotifyProfilerThreadLocked();
244 }
245 
246 
247 // #pragma mark - SystemProfiler public
248 
249 
250 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
251 	const system_profiler_parameters& parameters)
252 	:
253 	fTeam(team),
254 	fUserArea(userAreaInfo.area),
255 	fKernelArea(-1),
256 	fAreaSize(userAreaInfo.size),
257 	fFlags(parameters.flags),
258 	fStackDepth(parameters.stack_depth),
259 	fInterval(parameters.interval),
260 	fHeader(NULL),
261 	fBufferBase(NULL),
262 	fBufferCapacity(0),
263 	fBufferStart(0),
264 	fBufferSize(0),
265 	fDroppedEvents(0),
266 	fLastTeamAddedSerialNumber(0),
267 	fLastThreadAddedSerialNumber(0),
268 	fTeamNotificationsRequested(false),
269 	fTeamNotificationsEnabled(false),
270 	fThreadNotificationsRequested(false),
271 	fThreadNotificationsEnabled(false),
272 	fImageNotificationsRequested(false),
273 	fImageNotificationsEnabled(false),
274 	fIONotificationsRequested(false),
275 	fIONotificationsEnabled(false),
276 	fSchedulerNotificationsRequested(false),
277 	fWaitObjectNotificationsRequested(false),
278 	fWaitingProfilerThread(NULL),
279 	fWaitObjectBuffer(NULL),
280 	fWaitObjectCount(0),
281 	fUsedWaitObjects(),
282 	fFreeWaitObjects(),
283 	fWaitObjectTable()
284 {
285 	B_INITIALIZE_SPINLOCK(&fLock);
286 
287 	memset(fReentered, 0, sizeof(fReentered));
288 
289 	// compute the number wait objects we want to cache
290 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
291 		fWaitObjectCount = parameters.locking_lookup_size
292 			/ (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
293 		if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
294 			fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
295 		if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
296 			fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
297 	}
298 }
299 
300 
301 SystemProfiler::~SystemProfiler()
302 {
303 	// Wake up the user thread, if it is waiting, and mark profiling
304 	// inactive.
305 	InterruptsSpinLocker locker(fLock);
306 	if (fWaitingProfilerThread != NULL) {
307 		thread_unblock(fWaitingProfilerThread, B_OK);
308 		fWaitingProfilerThread = NULL;
309 	}
310 	fProfilingActive = false;
311 	locker.Unlock();
312 
313 	// stop scheduler listening
314 	if (fSchedulerNotificationsRequested)
315 		scheduler_remove_listener(this);
316 
317 	// stop wait object listening
318 	if (fWaitObjectNotificationsRequested) {
319 		InterruptsSpinLocker locker(gWaitObjectListenerLock);
320 		remove_wait_object_listener(this);
321 	}
322 
323 	// deactivate the profiling timers on all CPUs
324 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
325 		call_all_cpus(_UninitTimers, this);
326 
327 	// cancel notifications
328 	NotificationManager& notificationManager
329 		= NotificationManager::Manager();
330 
331 	// images
332 	if (fImageNotificationsRequested) {
333 		fImageNotificationsRequested = false;
334 		notificationManager.RemoveListener("images", NULL, *this);
335 	}
336 
337 	// threads
338 	if (fThreadNotificationsRequested) {
339 		fThreadNotificationsRequested = false;
340 		notificationManager.RemoveListener("threads", NULL, *this);
341 	}
342 
343 	// teams
344 	if (fTeamNotificationsRequested) {
345 		fTeamNotificationsRequested = false;
346 		notificationManager.RemoveListener("teams", NULL, *this);
347 	}
348 
349 	// I/O
350 	if (fIONotificationsRequested) {
351 		fIONotificationsRequested = false;
352 		notificationManager.RemoveListener("I/O", NULL, *this);
353 	}
354 
355 	// delete wait object related allocations
356 	fWaitObjectTable.Clear();
357 	delete[] fWaitObjectBuffer;
358 
359 	// unlock the memory and delete the area
360 	if (fKernelArea >= 0) {
361 		unlock_memory(fHeader, fAreaSize, B_READ_DEVICE);
362 		delete_area(fKernelArea);
363 		fKernelArea = -1;
364 	}
365 }
366 
367 
368 status_t
369 SystemProfiler::Init()
370 {
371 	// clone the user area
372 	void* areaBase;
373 	fKernelArea = clone_area("profiling samples", &areaBase,
374 		B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
375 		fUserArea);
376 	if (fKernelArea < 0)
377 		return fKernelArea;
378 
379 	// we need the memory locked
380 	status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE);
381 	if (error != B_OK) {
382 		delete_area(fKernelArea);
383 		fKernelArea = -1;
384 		return error;
385 	}
386 
387 	// the buffer is ready for use
388 	fHeader = (system_profiler_buffer_header*)areaBase;
389 	fBufferBase = (uint8*)(fHeader + 1);
390 	fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase);
391 	fHeader->start = 0;
392 	fHeader->size = 0;
393 
394 	// allocate the wait object buffer and init the hash table
395 	if (fWaitObjectCount > 0) {
396 		fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount];
397 		if (fWaitObjectBuffer == NULL)
398 			return B_NO_MEMORY;
399 
400 		for (int32 i = 0; i < fWaitObjectCount; i++)
401 			fFreeWaitObjects.Add(fWaitObjectBuffer + i);
402 
403 		error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2);
404 		if (error != B_OK)
405 			return error;
406 	}
407 
408 	// start listening for notifications
409 
410 	// teams
411 	NotificationManager& notificationManager
412 		= NotificationManager::Manager();
413 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
414 		error = notificationManager.AddListener("teams",
415 			TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this);
416 		if (error != B_OK)
417 			return error;
418 		fTeamNotificationsRequested = true;
419 	}
420 
421 	// threads
422 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
423 		error = notificationManager.AddListener("threads",
424 			THREAD_ADDED | THREAD_REMOVED, *this);
425 		if (error != B_OK)
426 			return error;
427 		fThreadNotificationsRequested = true;
428 	}
429 
430 	// images
431 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
432 		error = notificationManager.AddListener("images",
433 			IMAGE_ADDED | IMAGE_REMOVED, *this);
434 		if (error != B_OK)
435 			return error;
436 		fImageNotificationsRequested = true;
437 	}
438 
439 	// I/O events
440 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
441 		error = notificationManager.AddListener("I/O",
442 			IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED
443 				| IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED
444 				| IO_SCHEDULER_OPERATION_STARTED
445 				| IO_SCHEDULER_OPERATION_FINISHED,
446 			*this);
447 		if (error != B_OK)
448 			return error;
449 		fIONotificationsRequested = true;
450 	}
451 
452 	// We need to fill the buffer with the initial state of teams, threads,
453 	// and images.
454 
455 	// teams
456 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
457 		InterruptsSpinLocker locker(fLock);
458 
459 		TeamListIterator iterator;
460 		while (Team* team = iterator.Next()) {
461 			locker.Unlock();
462 
463 			bool added = _TeamAdded(team);
464 
465 			// release the reference returned by the iterator
466 			team->ReleaseReference();
467 
468 			if (!added)
469 				return B_BUFFER_OVERFLOW;
470 
471 			locker.Lock();
472 		}
473 
474 		fTeamNotificationsEnabled = true;
475 	}
476 
477 	// images
478 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
479 		if (image_iterate_through_images(&_InitialImageIterator, this) != NULL)
480 			return B_BUFFER_OVERFLOW;
481 	}
482 
483 	// threads
484 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
485 		InterruptsSpinLocker locker(fLock);
486 
487 		ThreadListIterator iterator;
488 		while (Thread* thread = iterator.Next()) {
489 			locker.Unlock();
490 
491 			bool added = _ThreadAdded(thread);
492 
493 			// release the reference returned by the iterator
494 			thread->ReleaseReference();
495 
496 			if (!added)
497 				return B_BUFFER_OVERFLOW;
498 
499 			locker.Lock();
500 		}
501 
502 		fThreadNotificationsEnabled = true;
503 	}
504 
505 	fProfilingActive = true;
506 
507 	// start scheduler and wait object listening
508 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
509 		scheduler_add_listener(this);
510 		fSchedulerNotificationsRequested = true;
511 
512 		InterruptsSpinLocker waitObjectLocker(gWaitObjectListenerLock);
513 		add_wait_object_listener(this);
514 		fWaitObjectNotificationsRequested = true;
515 		waitObjectLocker.Unlock();
516 
517 		// fake schedule events for the initially running threads
518 		int32 cpuCount = smp_get_num_cpus();
519 		for (int32 i = 0; i < cpuCount; i++) {
520 			Thread* thread = gCPU[i].running_thread;
521 			if (thread != NULL)
522 				ThreadScheduled(thread, thread);
523 		}
524 	}
525 
526 	// I/O scheduling
527 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
528 		IOSchedulerRoster* roster = IOSchedulerRoster::Default();
529 		AutoLocker<IOSchedulerRoster> rosterLocker(roster);
530 
531 		for (IOSchedulerList::ConstIterator it
532 				= roster->SchedulerList().GetIterator();
533 			IOScheduler* scheduler = it.Next();) {
534 			_IOSchedulerAdded(scheduler);
535 		}
536 
537 		fIONotificationsEnabled = true;
538 	}
539 
540 	// activate the profiling timers on all CPUs
541 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
542 		call_all_cpus(_InitTimers, this);
543 
544 	return B_OK;
545 }
546 
547 
548 status_t
549 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
550 {
551 	InterruptsSpinLocker locker(fLock);
552 
553 	if (fWaitingProfilerThread != NULL || !fProfilingActive
554 		|| bytesRead > fBufferSize) {
555 		return B_BAD_VALUE;
556 	}
557 
558 	fBufferSize -= bytesRead;
559 	fBufferStart += bytesRead;
560 	if (fBufferStart > fBufferCapacity)
561 		fBufferStart -= fBufferCapacity;
562 	fHeader->size = fBufferSize;
563 	fHeader->start = fBufferStart;
564 
565 	// already enough data in the buffer to return?
566 	if (fBufferSize > fBufferCapacity / 2)
567 		return B_OK;
568 
569 	// Wait until the buffer gets too full or an error or a timeout occurs.
570 	while (true) {
571 		Thread* thread = thread_get_current_thread();
572 		fWaitingProfilerThread = thread;
573 
574 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
575 			THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
576 
577 		locker.Unlock();
578 
579 		status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
580 
581 		locker.Lock();
582 
583 		if (error == B_OK) {
584 			// the caller has unset fWaitingProfilerThread for us
585 			break;
586 		}
587 
588 		fWaitingProfilerThread = NULL;
589 
590 		if (error != B_TIMED_OUT)
591 			return error;
592 
593 		// just the timeout -- return, if the buffer is not empty
594 		if (fBufferSize > 0)
595 			break;
596 	}
597 
598 	if (_droppedEvents != NULL) {
599 		*_droppedEvents = fDroppedEvents;
600 		fDroppedEvents = 0;
601 	}
602 
603 	return B_OK;
604 }
605 
606 
607 // #pragma mark - NotificationListener interface
608 
609 
610 void
611 SystemProfiler::EventOccurred(NotificationService& service,
612 	const KMessage* event)
613 {
614 	int32 eventCode;
615 	if (event->FindInt32("event", &eventCode) != B_OK)
616 		return;
617 
618 	if (strcmp(service.Name(), "teams") == 0) {
619 		Team* team = (Team*)event->GetPointer("teamStruct", NULL);
620 		if (team == NULL)
621 			return;
622 
623 		switch (eventCode) {
624 			case TEAM_ADDED:
625 				if (fTeamNotificationsEnabled)
626 					_TeamAdded(team);
627 				break;
628 
629 			case TEAM_REMOVED:
630 				if (team->id == fTeam) {
631 					// The profiling team is gone -- uninstall the profiler!
632 					InterruptsSpinLocker locker(sProfilerLock);
633 					if (sProfiler != this)
634 						return;
635 
636 					sProfiler = NULL;
637 					locker.Unlock();
638 
639 					ReleaseReference();
640 					return;
641 				}
642 
643 				// When we're still doing the initial team list scan, we are
644 				// also interested in removals that happened to teams we have
645 				// already seen.
646 				if (fTeamNotificationsEnabled
647 					|| team->serial_number <= fLastTeamAddedSerialNumber) {
648 					_TeamRemoved(team);
649 				}
650 				break;
651 
652 			case TEAM_EXEC:
653 				if (fTeamNotificationsEnabled)
654 					_TeamExec(team);
655 				break;
656 		}
657 	} else if (strcmp(service.Name(), "threads") == 0) {
658 		Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
659 		if (thread == NULL)
660 			return;
661 
662 		switch (eventCode) {
663 			case THREAD_ADDED:
664 				if (fThreadNotificationsEnabled)
665 					_ThreadAdded(thread);
666 				break;
667 
668 			case THREAD_REMOVED:
669 				// When we're still doing the initial thread list scan, we are
670 				// also interested in removals that happened to threads we have
671 				// already seen.
672 				if (fThreadNotificationsEnabled
673 					|| thread->serial_number <= fLastThreadAddedSerialNumber) {
674 					_ThreadRemoved(thread);
675 				}
676 				break;
677 		}
678 	} else if (strcmp(service.Name(), "images") == 0) {
679 		if (!fImageNotificationsEnabled)
680 			return;
681 
682 		struct image* image = (struct image*)event->GetPointer(
683 			"imageStruct", NULL);
684 		if (image == NULL)
685 			return;
686 
687 		switch (eventCode) {
688 			case IMAGE_ADDED:
689 				_ImageAdded(image);
690 				break;
691 
692 			case IMAGE_REMOVED:
693 				_ImageRemoved(image);
694 				break;
695 		}
696 	} else if (strcmp(service.Name(), "I/O") == 0) {
697 		if (!fIONotificationsEnabled)
698 			return;
699 
700 		IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler",
701 			NULL);
702 		if (scheduler == NULL)
703 			return;
704 
705 		IORequest* request = (IORequest*)event->GetPointer("request", NULL);
706 		IOOperation* operation = (IOOperation*)event->GetPointer("operation",
707 			NULL);
708 
709 		switch (eventCode) {
710 			case IO_SCHEDULER_ADDED:
711 				_IOSchedulerAdded(scheduler);
712 				break;
713 
714 			case IO_SCHEDULER_REMOVED:
715 				_IOSchedulerRemoved(scheduler);
716 				break;
717 
718 			case IO_SCHEDULER_REQUEST_SCHEDULED:
719 				_IORequestScheduled(scheduler, request);
720 				break;
721 
722 			case IO_SCHEDULER_REQUEST_FINISHED:
723 				_IORequestFinished(scheduler, request);
724 				break;
725 
726 			case IO_SCHEDULER_OPERATION_STARTED:
727 				_IOOperationStarted(scheduler, request, operation);
728 				break;
729 
730 			case IO_SCHEDULER_OPERATION_FINISHED:
731 				_IOOperationFinished(scheduler, request, operation);
732 				break;
733 		}
734 	}
735 
736 	_MaybeNotifyProfilerThread();
737 }
738 
739 
740 // #pragma mark - SchedulerListener interface
741 
742 
743 void
744 SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
745 {
746 	int cpu = smp_get_current_cpu();
747 
748 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
749 		// When re-entering, we already hold the lock.
750 
751 	system_profiler_thread_enqueued_in_run_queue* event
752 		= (system_profiler_thread_enqueued_in_run_queue*)
753 			_AllocateBuffer(
754 				sizeof(system_profiler_thread_enqueued_in_run_queue),
755 				B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
756 	if (event == NULL)
757 		return;
758 
759 	event->time = system_time_nsecs();
760 	event->thread = thread->id;
761 	event->priority = thread->priority;
762 
763 	fHeader->size = fBufferSize;
764 
765 	// Unblock the profiler thread, if necessary, but don't unblock the thread,
766 	// if it had been waiting on a condition variable, since then we'd likely
767 	// deadlock in ConditionVariable::NotifyOne(), as it acquires a static
768 	// spinlock.
769 	if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
770 		_MaybeNotifyProfilerThreadLocked();
771 }
772 
773 
774 void
775 SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
776 {
777 	int cpu = smp_get_current_cpu();
778 
779 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
780 		// When re-entering, we already hold the lock.
781 
782 	system_profiler_thread_removed_from_run_queue* event
783 		= (system_profiler_thread_removed_from_run_queue*)
784 			_AllocateBuffer(
785 				sizeof(system_profiler_thread_removed_from_run_queue),
786 				B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
787 	if (event == NULL)
788 		return;
789 
790 	event->time = system_time_nsecs();
791 	event->thread = thread->id;
792 
793 	fHeader->size = fBufferSize;
794 
795 	// unblock the profiler thread, if necessary
796 	_MaybeNotifyProfilerThreadLocked();
797 }
798 
799 
800 void
801 SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
802 {
803 	int cpu = smp_get_current_cpu();
804 
805 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
806 		// When re-entering, we already hold the lock.
807 
808 	// If the old thread starts waiting, handle the wait object.
809 	if (oldThread->state == B_THREAD_WAITING)
810 		_WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);
811 
812 	system_profiler_thread_scheduled* event
813 		= (system_profiler_thread_scheduled*)
814 			_AllocateBuffer(sizeof(system_profiler_thread_scheduled),
815 				B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
816 	if (event == NULL)
817 		return;
818 
819 	event->time = system_time_nsecs();
820 	event->thread = newThread->id;
821 	event->previous_thread = oldThread->id;
822 	event->previous_thread_state = oldThread->state;
823 	event->previous_thread_wait_object_type = oldThread->wait.type;
824 	event->previous_thread_wait_object = (addr_t)oldThread->wait.object;
825 
826 	fHeader->size = fBufferSize;
827 
828 	// unblock the profiler thread, if necessary
829 	_MaybeNotifyProfilerThreadLocked();
830 }
831 
832 
833 // #pragma mark - WaitObjectListener interface
834 
835 
836 void
837 SystemProfiler::SemaphoreCreated(sem_id id, const char* name)
838 {
839 	_WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE);
840 }
841 
842 
843 void
844 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable)
845 {
846 	_WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE);
847 }
848 
849 
850 void
851 SystemProfiler::MutexInitialized(mutex* lock)
852 {
853 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX);
854 }
855 
856 
857 void
858 SystemProfiler::RWLockInitialized(rw_lock* lock)
859 {
860 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK);
861 }
862 
863 
864 // #pragma mark - SystemProfiler private
865 
866 
867 bool
868 SystemProfiler::_TeamAdded(Team* team)
869 {
870 	TeamLocker teamLocker(team);
871 
872 	size_t nameLen = strlen(team->Name());
873 	size_t argsLen = strlen(team->Args());
874 
875 	InterruptsSpinLocker locker(fLock);
876 
877 	// During the initial scan check whether the team is already gone again.
878 	// Later this cannot happen, since the team creator notifies us before
879 	// actually starting the team.
880 	if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
881 		return true;
882 
883 	if (team->serial_number > fLastTeamAddedSerialNumber)
884 		fLastTeamAddedSerialNumber = team->serial_number;
885 
886 	system_profiler_team_added* event = (system_profiler_team_added*)
887 		_AllocateBuffer(
888 			sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
889 			B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0);
890 	if (event == NULL)
891 		return false;
892 
893 	event->team = team->id;
894 	strcpy(event->name, team->Name());
895 	event->args_offset = nameLen + 1;
896 	strcpy(event->name + nameLen + 1, team->Args());
897 
898 	fHeader->size = fBufferSize;
899 
900 	return true;
901 }
902 
903 
904 bool
905 SystemProfiler::_TeamRemoved(Team* team)
906 {
907 	// TODO: It is possible that we get remove notifications for teams that
908 	// had already been removed from the global team list when we did the
909 	// initial scan, but were still in the process of dying. ATM it is not
910 	// really possible to identify such a case.
911 
912 	TeamLocker teamLocker(team);
913 	InterruptsSpinLocker locker(fLock);
914 
915 	system_profiler_team_removed* event = (system_profiler_team_removed*)
916 		_AllocateBuffer(sizeof(system_profiler_team_removed),
917 			B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0);
918 	if (event == NULL)
919 		return false;
920 
921 	event->team = team->id;
922 
923 	fHeader->size = fBufferSize;
924 
925 	return true;
926 }
927 
928 
929 bool
930 SystemProfiler::_TeamExec(Team* team)
931 {
932 	TeamLocker teamLocker(team);
933 
934 	size_t argsLen = strlen(team->Args());
935 
936 	InterruptsSpinLocker locker(fLock);
937 
938 	system_profiler_team_exec* event = (system_profiler_team_exec*)
939 		_AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen,
940 			B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0);
941 	if (event == NULL)
942 		return false;
943 
944 	event->team = team->id;
945 	strlcpy(event->thread_name, team->main_thread->name,
946 		sizeof(event->thread_name));
947 	strcpy(event->args, team->Args());
948 
949 	fHeader->size = fBufferSize;
950 
951 	return true;
952 }
953 
954 
955 bool
956 SystemProfiler::_ThreadAdded(Thread* thread)
957 {
958 	ThreadLocker threadLocker(thread);
959 	InterruptsSpinLocker locker(fLock);
960 
961 	// During the initial scan check whether the team is already gone again.
962 	// Later this cannot happen, since the team creator notifies us before
963 	// actually starting the thread.
964 	if (!fThreadNotificationsEnabled && !thread->IsAlive())
965 		return true;
966 
967 	if (thread->serial_number > fLastThreadAddedSerialNumber)
968 		fLastThreadAddedSerialNumber = thread->serial_number;
969 
970 	system_profiler_thread_added* event = (system_profiler_thread_added*)
971 		_AllocateBuffer(sizeof(system_profiler_thread_added),
972 			B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
973 	if (event == NULL)
974 		return false;
975 
976 	event->team = thread->team->id;
977 	event->thread = thread->id;
978 	strlcpy(event->name, thread->name, sizeof(event->name));
979 	{
980 		SpinLocker timeLocker(thread->time_lock);
981 		event->cpu_time = thread->CPUTime(false);
982 	}
983 
984 	fHeader->size = fBufferSize;
985 
986 	return true;
987 }
988 
989 
990 bool
991 SystemProfiler::_ThreadRemoved(Thread* thread)
992 {
993 	// TODO: It is possible that we get remove notifications for threads that
994 	// had already been removed from the global thread list when we did the
995 	// initial scan, but were still in the process of dying. ATM it is not
996 	// really possible to identify such a case.
997 
998 	ThreadLocker threadLocker(thread);
999 	InterruptsSpinLocker locker(fLock);
1000 
1001 	system_profiler_thread_removed* event
1002 		= (system_profiler_thread_removed*)
1003 			_AllocateBuffer(sizeof(system_profiler_thread_removed),
1004 				B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0);
1005 	if (event == NULL)
1006 		return false;
1007 
1008 	event->team = thread->team->id;
1009 	event->thread = thread->id;
1010 	{
1011 		SpinLocker timeLocker(thread->time_lock);
1012 		event->cpu_time = thread->CPUTime(false);
1013 	}
1014 
1015 	fHeader->size = fBufferSize;
1016 
1017 	return true;
1018 }
1019 
1020 
1021 bool
1022 SystemProfiler::_ImageAdded(struct image* image)
1023 {
1024 	InterruptsSpinLocker locker(fLock);
1025 
1026 	system_profiler_image_added* event = (system_profiler_image_added*)
1027 		_AllocateBuffer(sizeof(system_profiler_image_added),
1028 			B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0);
1029 	if (event == NULL)
1030 		return false;
1031 
1032 	event->team = image->team;
1033 	event->info = image->info.basic_info;
1034 
1035 	fHeader->size = fBufferSize;
1036 
1037 	return true;
1038 }
1039 
1040 
1041 bool
1042 SystemProfiler::_ImageRemoved(struct image* image)
1043 {
1044 	InterruptsSpinLocker locker(fLock);
1045 
1046 	system_profiler_image_removed* event = (system_profiler_image_removed*)
1047 		_AllocateBuffer(sizeof(system_profiler_image_removed),
1048 			B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0);
1049 	if (event == NULL)
1050 		return false;
1051 
1052 	event->team = image->team;
1053 	event->image = image->info.basic_info.id;
1054 
1055 	fHeader->size = fBufferSize;
1056 
1057 	return true;
1058 }
1059 
1060 
1061 bool
1062 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler)
1063 {
1064 	size_t nameLen = strlen(scheduler->Name());
1065 
1066 	InterruptsSpinLocker locker(fLock);
1067 
1068 	system_profiler_io_scheduler_added* event
1069 		= (system_profiler_io_scheduler_added*)_AllocateBuffer(
1070 			sizeof(system_profiler_io_scheduler_added) + nameLen,
1071 			B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0);
1072 	if (event == NULL)
1073 		return false;
1074 
1075 	event->scheduler = scheduler->ID();
1076 	strcpy(event->name, scheduler->Name());
1077 
1078 	fHeader->size = fBufferSize;
1079 
1080 	return true;
1081 }
1082 
1083 
1084 bool
1085 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler)
1086 {
1087 	InterruptsSpinLocker locker(fLock);
1088 
1089 	system_profiler_io_scheduler_removed* event
1090 		= (system_profiler_io_scheduler_removed*)_AllocateBuffer(
1091 			sizeof(system_profiler_io_scheduler_removed),
1092 			B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0);
1093 	if (event == NULL)
1094 		return false;
1095 
1096 	event->scheduler = scheduler->ID();
1097 
1098 	fHeader->size = fBufferSize;
1099 
1100 	return true;
1101 }
1102 
1103 
1104 bool
1105 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request)
1106 {
1107 	InterruptsSpinLocker locker(fLock);
1108 
1109 	system_profiler_io_request_scheduled* event
1110 		= (system_profiler_io_request_scheduled*)_AllocateBuffer(
1111 			sizeof(system_profiler_io_request_scheduled),
1112 			B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0);
1113 	if (event == NULL)
1114 		return false;
1115 
1116 	IORequestOwner* owner = request->Owner();
1117 
1118 	event->time = system_time_nsecs();
1119 	event->scheduler = scheduler->ID();
1120 	event->team = owner->team;
1121 	event->thread = owner->thread;
1122 	event->request = request;
1123 	event->offset = request->Offset();
1124 	event->length = request->Length();
1125 	event->write = request->IsWrite();
1126 	event->priority = owner->priority;
1127 
1128 	fHeader->size = fBufferSize;
1129 
1130 	return true;
1131 }
1132 
1133 
1134 bool
1135 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request)
1136 {
1137 	InterruptsSpinLocker locker(fLock);
1138 
1139 	system_profiler_io_request_finished* event
1140 		= (system_profiler_io_request_finished*)_AllocateBuffer(
1141 			sizeof(system_profiler_io_request_finished),
1142 			B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0);
1143 	if (event == NULL)
1144 		return false;
1145 
1146 	event->time = system_time_nsecs();
1147 	event->scheduler = scheduler->ID();
1148 	event->request = request;
1149 	event->status = request->Status();
1150 	event->transferred = request->TransferredBytes();
1151 
1152 	fHeader->size = fBufferSize;
1153 
1154 	return true;
1155 }
1156 
1157 
1158 bool
1159 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request,
1160 	IOOperation* operation)
1161 {
1162 	InterruptsSpinLocker locker(fLock);
1163 
1164 	system_profiler_io_operation_started* event
1165 		= (system_profiler_io_operation_started*)_AllocateBuffer(
1166 			sizeof(system_profiler_io_operation_started),
1167 			B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0);
1168 	if (event == NULL)
1169 		return false;
1170 
1171 	event->time = system_time_nsecs();
1172 	event->scheduler = scheduler->ID();
1173 	event->request = request;
1174 	event->operation = operation;
1175 	event->offset = request->Offset();
1176 	event->length = request->Length();
1177 	event->write = request->IsWrite();
1178 
1179 	fHeader->size = fBufferSize;
1180 
1181 	return true;
1182 }
1183 
1184 
1185 bool
1186 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request,
1187 	IOOperation* operation)
1188 {
1189 	InterruptsSpinLocker locker(fLock);
1190 
1191 	system_profiler_io_operation_finished* event
1192 		= (system_profiler_io_operation_finished*)_AllocateBuffer(
1193 			sizeof(system_profiler_io_operation_finished),
1194 			B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0);
1195 	if (event == NULL)
1196 		return false;
1197 
1198 	event->time = system_time_nsecs();
1199 	event->scheduler = scheduler->ID();
1200 	event->request = request;
1201 	event->operation = operation;
1202 	event->status = request->Status();
1203 	event->transferred = request->TransferredBytes();
1204 
1205 	fHeader->size = fBufferSize;
1206 
1207 	return true;
1208 }
1209 
1210 
1211 void
1212 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type)
1213 {
1214 	SpinLocker locker(fLock);
1215 
1216 	// look up the object
1217 	WaitObjectKey key;
1218 	key.object = object;
1219 	key.type = type;
1220 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1221 
1222 	// If found, remove it and add it to the free list. This might sound weird,
1223 	// but it makes sense, since we lazily track *used* wait objects only.
1224 	// I.e. the object in the table is now guaranteedly obsolete.
1225 	if (waitObject) {
1226 		fWaitObjectTable.RemoveUnchecked(waitObject);
1227 		fUsedWaitObjects.Remove(waitObject);
1228 		fFreeWaitObjects.Add(waitObject, false);
1229 	}
1230 }
1231 
1232 void
1233 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
1234 {
1235 	// look up the object
1236 	WaitObjectKey key;
1237 	key.object = object;
1238 	key.type = type;
1239 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1240 
1241 	// If already known, re-queue it as most recently used and be done.
1242 	if (waitObject != NULL) {
1243 		fUsedWaitObjects.Remove(waitObject);
1244 		fUsedWaitObjects.Add(waitObject);
1245 		return;
1246 	}
1247 
1248 	// not known yet -- get the info
1249 	const char* name = NULL;
1250 	const void* referencedObject = NULL;
1251 
1252 	switch (type) {
1253 		case THREAD_BLOCK_TYPE_SEMAPHORE:
1254 		{
1255 			name = sem_get_name_unsafe((sem_id)object);
1256 			break;
1257 		}
1258 
1259 		case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1260 		{
1261 			ConditionVariable* variable = (ConditionVariable*)object;
1262 			name = variable->ObjectType();
1263 			referencedObject = variable->Object();
1264 			break;
1265 		}
1266 
1267 		case THREAD_BLOCK_TYPE_MUTEX:
1268 		{
1269 			mutex* lock = (mutex*)object;
1270 			name = lock->name;
1271 			break;
1272 		}
1273 
1274 		case THREAD_BLOCK_TYPE_RW_LOCK:
1275 		{
1276 			rw_lock* lock = (rw_lock*)object;
1277 			name = lock->name;
1278 			break;
1279 		}
1280 
1281 		case THREAD_BLOCK_TYPE_OTHER:
1282 		{
1283 			name = (const char*)(void*)object;
1284 			break;
1285 		}
1286 
1287 		case THREAD_BLOCK_TYPE_OTHER_OBJECT:
1288 		case THREAD_BLOCK_TYPE_SNOOZE:
1289 		case THREAD_BLOCK_TYPE_SIGNAL:
1290 		default:
1291 			return;
1292 	}
1293 
1294 	// add the event
1295 	size_t nameLen = name != NULL ? strlen(name) : 0;
1296 
1297 	system_profiler_wait_object_info* event
1298 		= (system_profiler_wait_object_info*)
1299 			_AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen,
1300 				B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0);
1301 	if (event == NULL)
1302 		return;
1303 
1304 	event->type = type;
1305 	event->object = object;
1306 	event->referenced_object = (addr_t)referencedObject;
1307 	if (name != NULL)
1308 		strcpy(event->name, name);
1309 	else
1310 		event->name[0] = '\0';
1311 
1312 	fHeader->size = fBufferSize;
1313 
1314 	// add the wait object
1315 
1316 	// get a free one or steal the least recently used one
1317 	waitObject = fFreeWaitObjects.RemoveHead();
1318 	if (waitObject == NULL) {
1319 		waitObject = fUsedWaitObjects.RemoveHead();
1320 		fWaitObjectTable.RemoveUnchecked(waitObject);
1321 	}
1322 
1323 	waitObject->object = object;
1324 	waitObject->type = type;
1325 	fWaitObjectTable.InsertUnchecked(waitObject);
1326 	fUsedWaitObjects.Add(waitObject);
1327 }
1328 
1329 
1330 /*static*/ bool
1331 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
1332 {
1333 	SystemProfiler* self = (SystemProfiler*)cookie;
1334 	self->fImageNotificationsEnabled = true;
1335 		// Set that here, since the image lock is being held now.
1336 	return !self->_ImageAdded(image);
1337 }
1338 
1339 
1340 void*
1341 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count)
1342 {
1343 	size = (size + 3) / 4 * 4;
1344 	size += sizeof(system_profiler_event_header);
1345 
1346 	size_t end = fBufferStart + fBufferSize;
1347 	if (end + size > fBufferCapacity) {
1348 		// Buffer is wrapped or needs wrapping.
1349 		if (end < fBufferCapacity) {
1350 			// not wrapped yet, but needed
1351 			system_profiler_event_header* header
1352 				= (system_profiler_event_header*)(fBufferBase + end);
1353 			header->event = B_SYSTEM_PROFILER_BUFFER_END;
1354 			fBufferSize = fBufferCapacity - fBufferStart;
1355 			end = 0;
1356 		} else
1357 			end -= fBufferCapacity;
1358 
1359 		if (end + size > fBufferStart) {
1360 			fDroppedEvents++;
1361 			return NULL;
1362 		}
1363 	}
1364 
1365 	system_profiler_event_header* header
1366 		= (system_profiler_event_header*)(fBufferBase + end);
1367 	header->event = event;
1368 	header->cpu = cpu;
1369 	header->size = size - sizeof(system_profiler_event_header);
1370 
1371 	fBufferSize += size;
1372 
1373 	return header + 1;
1374 }
1375 
1376 
1377 /*static*/ void
1378 SystemProfiler::_InitTimers(void* cookie, int cpu)
1379 {
1380 	SystemProfiler* self = (SystemProfiler*)cookie;
1381 	self->_ScheduleTimer(cpu);
1382 }
1383 
1384 
1385 /*static*/ void
1386 SystemProfiler::_UninitTimers(void* cookie, int cpu)
1387 {
1388 	SystemProfiler* self = (SystemProfiler*)cookie;
1389 
1390 	CPUProfileData& cpuData = self->fCPUData[cpu];
1391 	cancel_timer(&cpuData.timer);
1392 	cpuData.timerScheduled = false;
1393 }
1394 
1395 
1396 void
1397 SystemProfiler::_ScheduleTimer(int cpu)
1398 {
1399 	CPUProfileData& cpuData = fCPUData[cpu];
1400 	cpuData.timerEnd = system_time() + fInterval;
1401 	cpuData.timer.user_data = this;
1402 	add_timer(&cpuData.timer, &_ProfilingEvent, fInterval,
1403 		B_ONE_SHOT_RELATIVE_TIMER);
1404 	cpuData.timerScheduled = true;
1405 }
1406 
1407 
1408 void
1409 SystemProfiler::_DoSample()
1410 {
1411 	Thread* thread = thread_get_current_thread();
1412 	int cpu = thread->cpu->cpu_num;
1413 	CPUProfileData& cpuData = fCPUData[cpu];
1414 
1415 	// get the samples
1416 	int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1,
1417 		0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1418 
1419 	InterruptsSpinLocker locker(fLock);
1420 
1421 	system_profiler_samples* event = (system_profiler_samples*)
1422 		_AllocateBuffer(sizeof(system_profiler_samples)
1423 				+ count * sizeof(addr_t),
1424 			B_SYSTEM_PROFILER_SAMPLES, cpu, count);
1425 	if (event == NULL)
1426 		return;
1427 
1428 	event->thread = thread->id;
1429 	memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t));
1430 
1431 	fHeader->size = fBufferSize;
1432 }
1433 
1434 
1435 /*static*/ int32
1436 SystemProfiler::_ProfilingEvent(struct timer* timer)
1437 {
1438 	SystemProfiler* self = (SystemProfiler*)timer->user_data;
1439 
1440 	self->_DoSample();
1441 	self->_ScheduleTimer(timer->cpu);
1442 
1443 	return B_HANDLED_INTERRUPT;
1444 }
1445 
1446 
1447 // #pragma mark - private kernel API
1448 
1449 
1450 #if SYSTEM_PROFILER
1451 
1452 status_t
1453 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval)
1454 {
1455 	struct ParameterDeleter {
1456 		ParameterDeleter(area_id area)
1457 			:
1458 			fArea(area),
1459 			fDetached(false)
1460 		{
1461 		}
1462 
1463 		~ParameterDeleter()
1464 		{
1465 			if (!fDetached) {
1466 				delete_area(fArea);
1467 				delete sRecordedParameters;
1468 				sRecordedParameters = NULL;
1469 			}
1470 		}
1471 
1472 		void Detach()
1473 		{
1474 			fDetached = true;
1475 		}
1476 
1477 	private:
1478 		area_id	fArea;
1479 		bool	fDetached;
1480 	};
1481 
1482 	void* address;
1483 	area_id area = create_area("kernel profile data", &address,
1484 		B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK,
1485 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1486 	if (area < 0)
1487 		return area;
1488 
1489 	ParameterDeleter parameterDeleter(area);
1490 
1491 	sRecordedParameters = new(std::nothrow) system_profiler_parameters;
1492 	if (sRecordedParameters == NULL)
1493 		return B_NO_MEMORY;
1494 
1495 	sRecordedParameters->buffer_area = area;
1496 	sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS
1497 		| B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS
1498 		| B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
1499 		| B_SYSTEM_PROFILER_SAMPLING_EVENTS;
1500 	sRecordedParameters->locking_lookup_size = 4096;
1501 	sRecordedParameters->interval = interval;
1502 	sRecordedParameters->stack_depth = stackDepth;
1503 
1504 	area_info areaInfo;
1505 	get_area_info(area, &areaInfo);
1506 
1507 	// initialize the profiler
1508 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM,
1509 		areaInfo, *sRecordedParameters);
1510 	if (profiler == NULL)
1511 		return B_NO_MEMORY;
1512 
1513 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1514 
1515 	status_t error = profiler->Init();
1516 	if (error != B_OK)
1517 		return error;
1518 
1519 	// set the new profiler
1520 	InterruptsSpinLocker locker(sProfilerLock);
1521 	if (sProfiler != NULL)
1522 		return B_BUSY;
1523 
1524 	parameterDeleter.Detach();
1525 	profilerDeleter.Detach();
1526 	sProfiler = profiler;
1527 	locker.Unlock();
1528 
1529 	return B_OK;
1530 }
1531 
1532 
1533 void
1534 stop_system_profiler()
1535 {
1536 	InterruptsSpinLocker locker(sProfilerLock);
1537 	if (sProfiler == NULL)
1538 		return;
1539 
1540 	SystemProfiler* profiler = sProfiler;
1541 	sProfiler = NULL;
1542 	locker.Unlock();
1543 
1544 	profiler->ReleaseReference();
1545 }
1546 
1547 #endif	// SYSTEM_PROFILER
1548 
1549 
1550 // #pragma mark - syscalls
1551 
1552 
1553 status_t
1554 _user_system_profiler_start(struct system_profiler_parameters* userParameters)
1555 {
1556 	if (geteuid() != 0)
1557 		return B_PERMISSION_DENIED;
1558 
1559 	// copy params to the kernel
1560 	struct system_profiler_parameters parameters;
1561 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)
1562 		|| user_memcpy(&parameters, userParameters, sizeof(parameters))
1563 			!= B_OK) {
1564 		return B_BAD_ADDRESS;
1565 	}
1566 
1567 	// check the parameters
1568 	team_id team = thread_get_current_thread()->team->id;
1569 
1570 	area_info areaInfo;
1571 	status_t error = get_area_info(parameters.buffer_area, &areaInfo);
1572 	if (error != B_OK)
1573 		return error;
1574 
1575 	if (areaInfo.team != team)
1576 		return B_BAD_VALUE;
1577 
1578 	if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) {
1579 		if (parameters.stack_depth < 1)
1580 			return B_BAD_VALUE;
1581 
1582 		if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL)
1583 			parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL;
1584 
1585 		if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH)
1586 			parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH;
1587 	}
1588 
1589 	// quick check to see whether we do already have a profiler installed
1590 	InterruptsSpinLocker locker(sProfilerLock);
1591 	if (sProfiler != NULL)
1592 		return B_BUSY;
1593 	locker.Unlock();
1594 
1595 	// initialize the profiler
1596 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo,
1597 		parameters);
1598 	if (profiler == NULL)
1599 		return B_NO_MEMORY;
1600 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1601 
1602 	error = profiler->Init();
1603 	if (error != B_OK)
1604 		return error;
1605 
1606 	// set the new profiler
1607 	locker.Lock();
1608 	if (sProfiler != NULL)
1609 		return B_BUSY;
1610 
1611 	profilerDeleter.Detach();
1612 	sProfiler = profiler;
1613 	locker.Unlock();
1614 
1615 	return B_OK;
1616 }
1617 
1618 
1619 status_t
1620 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents)
1621 {
1622 	if (geteuid() != 0)
1623 		return B_PERMISSION_DENIED;
1624 
1625 	if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents))
1626 		return B_BAD_ADDRESS;
1627 
1628 	team_id team = thread_get_current_thread()->team->id;
1629 
1630 	InterruptsSpinLocker locker(sProfilerLock);
1631 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1632 		return B_BAD_VALUE;
1633 
1634 	// get a reference to the profiler
1635 	SystemProfiler* profiler = sProfiler;
1636 	BReference<SystemProfiler> reference(profiler);
1637 	locker.Unlock();
1638 
1639 	uint64 droppedEvents = 0;
1640 	status_t error = profiler->NextBuffer(bytesRead,
1641 		_droppedEvents != NULL ? &droppedEvents : NULL);
1642 	if (error == B_OK && _droppedEvents != NULL)
1643 		user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents));
1644 
1645 	return error;
1646 }
1647 
1648 
1649 status_t
1650 _user_system_profiler_stop()
1651 {
1652 	if (geteuid() != 0)
1653 		return B_PERMISSION_DENIED;
1654 
1655 	team_id team = thread_get_current_thread()->team->id;
1656 
1657 	InterruptsSpinLocker locker(sProfilerLock);
1658 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1659 		return B_BAD_VALUE;
1660 
1661 	SystemProfiler* profiler = sProfiler;
1662 	sProfiler = NULL;
1663 	locker.Unlock();
1664 
1665 	profiler->ReleaseReference();
1666 
1667 	return B_OK;
1668 }
1669 
1670 
1671 status_t
1672 _user_system_profiler_recorded(system_profiler_parameters* userParameters)
1673 {
1674 	if (geteuid() != 0)
1675 		return B_PERMISSION_DENIED;
1676 
1677 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
1678 		return B_BAD_ADDRESS;
1679 	if (sRecordedParameters == NULL)
1680 		return B_ERROR;
1681 
1682 #if SYSTEM_PROFILER
1683 	stop_system_profiler();
1684 
1685 	// Transfer the area to the userland process
1686 
1687 	void* address;
1688 	area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
1689 		B_ANY_ADDRESS, team_get_current_team_id(), true);
1690 	if (newArea < 0)
1691 		return newArea;
1692 
1693 	status_t status = set_area_protection(newArea, B_READ_AREA);
1694 	if (status == B_OK) {
1695 		sRecordedParameters->buffer_area = newArea;
1696 
1697 		status = user_memcpy(userParameters, sRecordedParameters,
1698 			sizeof(system_profiler_parameters));
1699 	}
1700 	if (status != B_OK)
1701 		delete_area(newArea);
1702 
1703 	delete sRecordedParameters;
1704 	sRecordedParameters = NULL;
1705 
1706 	return status;
1707 #else
1708 	return B_NOT_SUPPORTED;
1709 #endif // SYSTEM_PROFILER
1710 }
1711