xref: /haiku/src/system/kernel/debug/system_profiler.cpp (revision 1e60bdeab63fa7a57bc9a55b032052e95a18bd2c)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <system_profiler.h>
8 
9 #include <AutoDeleter.h>
10 #include <Referenceable.h>
11 
12 #include <util/AutoLock.h>
13 
14 #include <system_profiler_defs.h>
15 
16 #include <cpu.h>
17 #include <kernel.h>
18 #include <kimage.h>
19 #include <kscheduler.h>
20 #include <listeners.h>
21 #include <Notifications.h>
22 #include <sem.h>
23 #include <team.h>
24 #include <thread.h>
25 #include <user_debugger.h>
26 #include <vm/vm.h>
27 
28 #include <arch/debug.h>
29 
30 #include "IOSchedulerRoster.h"
31 
32 
33 // This is the kernel-side implementation of the system profiling support.
34 // A userland team can register as system profiler, providing an area as buffer
35 // for events. Those events are team, thread, and image changes (added/removed),
36 // periodic sampling of the return address stack for each CPU, as well as
37 // scheduling and I/O scheduling events.
38 
39 
40 class SystemProfiler;
41 
42 
43 // minimum/maximum size of the table used for wait object caching
44 #define MIN_WAIT_OBJECT_COUNT	128
45 #define MAX_WAIT_OBJECT_COUNT	1024
46 
47 
48 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER;
49 static SystemProfiler* sProfiler = NULL;
50 static struct system_profiler_parameters* sRecordedParameters = NULL;
51 
52 
53 class SystemProfiler : public BReferenceable, private NotificationListener,
54 	private SchedulerListener, private WaitObjectListener {
55 public:
56 								SystemProfiler(team_id team,
57 									const area_info& userAreaInfo,
58 									const system_profiler_parameters&
59 										parameters);
60 								~SystemProfiler();
61 
62 			team_id				TeamID() const	{ return fTeam; }
63 
64 			status_t			Init();
65 			status_t			NextBuffer(size_t bytesRead,
66 									uint64* _droppedEvents);
67 
68 private:
69 	virtual	void				EventOccurred(NotificationService& service,
70 									const KMessage* event);
71 
72 	virtual	void				ThreadEnqueuedInRunQueue(Thread* thread);
73 	virtual	void				ThreadRemovedFromRunQueue(Thread* thread);
74 	virtual	void				ThreadScheduled(Thread* oldThread,
75 									Thread* newThread);
76 
77 	virtual	void				SemaphoreCreated(sem_id id,
78 									const char* name);
79 	virtual	void				ConditionVariableInitialized(
80 									ConditionVariable* variable);
81 	virtual	void				MutexInitialized(mutex* lock);
82 	virtual	void				RWLockInitialized(rw_lock* lock);
83 
84 			bool				_TeamAdded(Team* team);
85 			bool				_TeamRemoved(Team* team);
86 			bool				_TeamExec(Team* team);
87 
88 			bool				_ThreadAdded(Thread* thread);
89 			bool				_ThreadRemoved(Thread* thread);
90 
91 			bool				_ImageAdded(struct image* image);
92 			bool				_ImageRemoved(struct image* image);
93 
94 			bool				_IOSchedulerAdded(IOScheduler* scheduler);
95 			bool				_IOSchedulerRemoved(IOScheduler* scheduler);
96 			bool				_IORequestScheduled(IOScheduler* scheduler,
97 									IORequest* request);
98 			bool				_IORequestFinished(IOScheduler* scheduler,
99 									IORequest* request);
100 			bool				_IOOperationStarted(IOScheduler* scheduler,
101 									IORequest* request, IOOperation* operation);
102 			bool				_IOOperationFinished(IOScheduler* scheduler,
103 									IORequest* request, IOOperation* operation);
104 
105 			void				_WaitObjectCreated(addr_t object, uint32 type);
106 			void				_WaitObjectUsed(addr_t object, uint32 type);
107 
108 	inline	void				_MaybeNotifyProfilerThreadLocked();
109 	inline	void				_MaybeNotifyProfilerThread();
110 
111 	static	bool				_InitialImageIterator(struct image* image,
112 									void* cookie);
113 
114 			void*				_AllocateBuffer(size_t size, int event, int cpu,
115 									int count);
116 
117 	static	void				_InitTimers(void* cookie, int cpu);
118 	static	void				_UninitTimers(void* cookie, int cpu);
119 			void				_ScheduleTimer(int cpu);
120 
121 			void				_DoSample();
122 
123 	static	int32				_ProfilingEvent(struct timer* timer);
124 
125 private:
126 			struct CPUProfileData {
127 				struct timer	timer;
128 				bigtime_t		timerEnd;
129 				bool			timerScheduled;
130 				addr_t			buffer[B_DEBUG_STACK_TRACE_DEPTH];
131 			};
132 
133 			struct WaitObjectKey {
134 				addr_t	object;
135 				uint32	type;
136 			};
137 
138 			struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>,
139 					WaitObjectKey {
140 				struct WaitObject* hash_link;
141 			};
142 
143 			struct WaitObjectTableDefinition {
144 				typedef WaitObjectKey	KeyType;
145 				typedef	WaitObject		ValueType;
146 
147 				size_t HashKey(const WaitObjectKey& key) const
148 				{
149 					return (size_t)key.object ^ (size_t)key.type;
150 				}
151 
152 				size_t Hash(const WaitObject* value) const
153 				{
154 					return HashKey(*value);
155 				}
156 
157 				bool Compare(const WaitObjectKey& key,
158 					const WaitObject* value) const
159 				{
160 					return value->type == key.type
161 						&& value->object == key.object;
162 				}
163 
164 				WaitObject*& GetLink(WaitObject* value) const
165 				{
166 					return value->hash_link;
167 				}
168 			};
169 
170 			typedef DoublyLinkedList<WaitObject> WaitObjectList;
171 			typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable;
172 
173 private:
174 			spinlock			fLock;
175 			team_id				fTeam;
176 			area_id				fUserArea;
177 			area_id				fKernelArea;
178 			size_t				fAreaSize;
179 			uint32				fFlags;
180 			uint32				fStackDepth;
181 			bigtime_t			fInterval;
182 			system_profiler_buffer_header* fHeader;
183 			uint8*				fBufferBase;
184 			size_t				fBufferCapacity;
185 			size_t				fBufferStart;
186 			size_t				fBufferSize;
187 			uint64				fDroppedEvents;
188 			int64				fLastTeamAddedSerialNumber;
189 			int64				fLastThreadAddedSerialNumber;
190 			bool				fTeamNotificationsRequested;
191 			bool				fTeamNotificationsEnabled;
192 			bool				fThreadNotificationsRequested;
193 			bool				fThreadNotificationsEnabled;
194 			bool				fImageNotificationsRequested;
195 			bool				fImageNotificationsEnabled;
196 			bool				fIONotificationsRequested;
197 			bool				fIONotificationsEnabled;
198 			bool				fSchedulerNotificationsRequested;
199 			bool				fWaitObjectNotificationsRequested;
200 			Thread* volatile	fWaitingProfilerThread;
201 			bool				fProfilingActive;
202 			bool				fReentered[SMP_MAX_CPUS];
203 			CPUProfileData		fCPUData[SMP_MAX_CPUS];
204 			WaitObject*			fWaitObjectBuffer;
205 			int32				fWaitObjectCount;
206 			WaitObjectList		fUsedWaitObjects;
207 			WaitObjectList		fFreeWaitObjects;
208 			WaitObjectTable		fWaitObjectTable;
209 };
210 
211 
212 /*!	Notifies the profiler thread when the profiling buffer is full enough.
213 	The caller must hold fLock.
214 */
215 inline void
216 SystemProfiler::_MaybeNotifyProfilerThreadLocked()
217 {
218 	// If the buffer is full enough, notify the profiler.
219 	if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
220 		int cpu = smp_get_current_cpu();
221 		fReentered[cpu] = true;
222 
223 		Thread* profilerThread = fWaitingProfilerThread;
224 		fWaitingProfilerThread = NULL;
225 
226 		SpinLocker _(profilerThread->scheduler_lock);
227 		thread_unblock_locked(profilerThread, B_OK);
228 
229 		fReentered[cpu] = false;
230 	}
231 }
232 
233 
234 inline void
235 SystemProfiler::_MaybeNotifyProfilerThread()
236 {
237 	if (fWaitingProfilerThread == NULL)
238 		return;
239 
240 	InterruptsSpinLocker locker(fLock);
241 
242 	_MaybeNotifyProfilerThreadLocked();
243 }
244 
245 
246 // #pragma mark - SystemProfiler public
247 
248 
249 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
250 	const system_profiler_parameters& parameters)
251 	:
252 	fTeam(team),
253 	fUserArea(userAreaInfo.area),
254 	fKernelArea(-1),
255 	fAreaSize(userAreaInfo.size),
256 	fFlags(parameters.flags),
257 	fStackDepth(parameters.stack_depth),
258 	fInterval(parameters.interval),
259 	fHeader(NULL),
260 	fBufferBase(NULL),
261 	fBufferCapacity(0),
262 	fBufferStart(0),
263 	fBufferSize(0),
264 	fDroppedEvents(0),
265 	fLastTeamAddedSerialNumber(0),
266 	fLastThreadAddedSerialNumber(0),
267 	fTeamNotificationsRequested(false),
268 	fTeamNotificationsEnabled(false),
269 	fThreadNotificationsRequested(false),
270 	fThreadNotificationsEnabled(false),
271 	fImageNotificationsRequested(false),
272 	fImageNotificationsEnabled(false),
273 	fIONotificationsRequested(false),
274 	fIONotificationsEnabled(false),
275 	fSchedulerNotificationsRequested(false),
276 	fWaitObjectNotificationsRequested(false),
277 	fWaitingProfilerThread(NULL),
278 	fWaitObjectBuffer(NULL),
279 	fWaitObjectCount(0),
280 	fUsedWaitObjects(),
281 	fFreeWaitObjects(),
282 	fWaitObjectTable()
283 {
284 	B_INITIALIZE_SPINLOCK(&fLock);
285 
286 	memset(fReentered, 0, sizeof(fReentered));
287 
288 	// compute the number wait objects we want to cache
289 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
290 		fWaitObjectCount = parameters.locking_lookup_size
291 			/ (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
292 		if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
293 			fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
294 		if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
295 			fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
296 	}
297 }
298 
299 
300 SystemProfiler::~SystemProfiler()
301 {
302 	// Wake up the user thread, if it is waiting, and mark profiling
303 	// inactive.
304 	InterruptsSpinLocker locker(fLock);
305 	if (fWaitingProfilerThread != NULL) {
306 		thread_unblock(fWaitingProfilerThread, B_OK);
307 		fWaitingProfilerThread = NULL;
308 	}
309 	fProfilingActive = false;
310 	locker.Unlock();
311 
312 	// stop scheduler listening
313 	if (fSchedulerNotificationsRequested)
314 		scheduler_remove_listener(this);
315 
316 	// stop wait object listening
317 	if (fWaitObjectNotificationsRequested) {
318 		InterruptsSpinLocker locker(gWaitObjectListenerLock);
319 		remove_wait_object_listener(this);
320 	}
321 
322 	// deactivate the profiling timers on all CPUs
323 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
324 		call_all_cpus(_UninitTimers, this);
325 
326 	// cancel notifications
327 	NotificationManager& notificationManager
328 		= NotificationManager::Manager();
329 
330 	// images
331 	if (fImageNotificationsRequested) {
332 		fImageNotificationsRequested = false;
333 		notificationManager.RemoveListener("images", NULL, *this);
334 	}
335 
336 	// threads
337 	if (fThreadNotificationsRequested) {
338 		fThreadNotificationsRequested = false;
339 		notificationManager.RemoveListener("threads", NULL, *this);
340 	}
341 
342 	// teams
343 	if (fTeamNotificationsRequested) {
344 		fTeamNotificationsRequested = false;
345 		notificationManager.RemoveListener("teams", NULL, *this);
346 	}
347 
348 	// I/O
349 	if (fIONotificationsRequested) {
350 		fIONotificationsRequested = false;
351 		notificationManager.RemoveListener("I/O", NULL, *this);
352 	}
353 
354 	// delete wait object related allocations
355 	fWaitObjectTable.Clear();
356 	delete[] fWaitObjectBuffer;
357 
358 	// unlock the memory and delete the area
359 	if (fKernelArea >= 0) {
360 		unlock_memory(fHeader, fAreaSize, B_READ_DEVICE);
361 		delete_area(fKernelArea);
362 		fKernelArea = -1;
363 	}
364 }
365 
366 
367 status_t
368 SystemProfiler::Init()
369 {
370 	// clone the user area
371 	void* areaBase;
372 	fKernelArea = clone_area("profiling samples", &areaBase,
373 		B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
374 		fUserArea);
375 	if (fKernelArea < 0)
376 		return fKernelArea;
377 
378 	// we need the memory locked
379 	status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE);
380 	if (error != B_OK) {
381 		delete_area(fKernelArea);
382 		fKernelArea = -1;
383 		return error;
384 	}
385 
386 	// the buffer is ready for use
387 	fHeader = (system_profiler_buffer_header*)areaBase;
388 	fBufferBase = (uint8*)(fHeader + 1);
389 	fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase);
390 	fHeader->start = 0;
391 	fHeader->size = 0;
392 
393 	// allocate the wait object buffer and init the hash table
394 	if (fWaitObjectCount > 0) {
395 		fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount];
396 		if (fWaitObjectBuffer == NULL)
397 			return B_NO_MEMORY;
398 
399 		for (int32 i = 0; i < fWaitObjectCount; i++)
400 			fFreeWaitObjects.Add(fWaitObjectBuffer + i);
401 
402 		error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2);
403 		if (error != B_OK)
404 			return error;
405 	}
406 
407 	// start listening for notifications
408 
409 	// teams
410 	NotificationManager& notificationManager
411 		= NotificationManager::Manager();
412 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
413 		error = notificationManager.AddListener("teams",
414 			TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this);
415 		if (error != B_OK)
416 			return error;
417 		fTeamNotificationsRequested = true;
418 	}
419 
420 	// threads
421 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
422 		error = notificationManager.AddListener("threads",
423 			THREAD_ADDED | THREAD_REMOVED, *this);
424 		if (error != B_OK)
425 			return error;
426 		fThreadNotificationsRequested = true;
427 	}
428 
429 	// images
430 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
431 		error = notificationManager.AddListener("images",
432 			IMAGE_ADDED | IMAGE_REMOVED, *this);
433 		if (error != B_OK)
434 			return error;
435 		fImageNotificationsRequested = true;
436 	}
437 
438 	// I/O events
439 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
440 		error = notificationManager.AddListener("I/O",
441 			IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED
442 				| IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED
443 				| IO_SCHEDULER_OPERATION_STARTED
444 				| IO_SCHEDULER_OPERATION_FINISHED,
445 			*this);
446 		if (error != B_OK)
447 			return error;
448 		fIONotificationsRequested = true;
449 	}
450 
451 	// We need to fill the buffer with the initial state of teams, threads,
452 	// and images.
453 
454 	// teams
455 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
456 		InterruptsSpinLocker locker(fLock);
457 
458 		TeamListIterator iterator;
459 		while (Team* team = iterator.Next()) {
460 			locker.Unlock();
461 
462 			bool added = _TeamAdded(team);
463 
464 			// release the reference returned by the iterator
465 			team->ReleaseReference();
466 
467 			if (!added)
468 				return B_BUFFER_OVERFLOW;
469 
470 			locker.Lock();
471 		}
472 
473 		fTeamNotificationsEnabled = true;
474 	}
475 
476 	// images
477 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
478 		if (image_iterate_through_images(&_InitialImageIterator, this) != NULL)
479 			return B_BUFFER_OVERFLOW;
480 	}
481 
482 	// threads
483 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
484 		InterruptsSpinLocker locker(fLock);
485 
486 		ThreadListIterator iterator;
487 		while (Thread* thread = iterator.Next()) {
488 			locker.Unlock();
489 
490 			bool added = _ThreadAdded(thread);
491 
492 			// release the reference returned by the iterator
493 			thread->ReleaseReference();
494 
495 			if (!added)
496 				return B_BUFFER_OVERFLOW;
497 
498 			locker.Lock();
499 		}
500 
501 		fThreadNotificationsEnabled = true;
502 	}
503 
504 	fProfilingActive = true;
505 
506 	// start scheduler and wait object listening
507 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
508 		scheduler_add_listener(this);
509 		fSchedulerNotificationsRequested = true;
510 
511 		InterruptsSpinLocker waitObjectLocker(gWaitObjectListenerLock);
512 		add_wait_object_listener(this);
513 		fWaitObjectNotificationsRequested = true;
514 		waitObjectLocker.Unlock();
515 
516 		// fake schedule events for the initially running threads
517 		int32 cpuCount = smp_get_num_cpus();
518 		for (int32 i = 0; i < cpuCount; i++) {
519 			Thread* thread = gCPU[i].running_thread;
520 			if (thread != NULL)
521 				ThreadScheduled(thread, thread);
522 		}
523 	}
524 
525 	// I/O scheduling
526 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
527 		IOSchedulerRoster* roster = IOSchedulerRoster::Default();
528 		AutoLocker<IOSchedulerRoster> rosterLocker(roster);
529 
530 		for (IOSchedulerList::ConstIterator it
531 				= roster->SchedulerList().GetIterator();
532 			IOScheduler* scheduler = it.Next();) {
533 			_IOSchedulerAdded(scheduler);
534 		}
535 
536 		fIONotificationsEnabled = true;
537 	}
538 
539 	// activate the profiling timers on all CPUs
540 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
541 		call_all_cpus(_InitTimers, this);
542 
543 	return B_OK;
544 }
545 
546 
547 status_t
548 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
549 {
550 	InterruptsSpinLocker locker(fLock);
551 
552 	if (fWaitingProfilerThread != NULL || !fProfilingActive
553 		|| bytesRead > fBufferSize) {
554 		return B_BAD_VALUE;
555 	}
556 
557 	fBufferSize -= bytesRead;
558 	fBufferStart += bytesRead;
559 	if (fBufferStart > fBufferCapacity)
560 		fBufferStart -= fBufferCapacity;
561 	fHeader->size = fBufferSize;
562 	fHeader->start = fBufferStart;
563 
564 	// already enough data in the buffer to return?
565 	if (fBufferSize > fBufferCapacity / 2)
566 		return B_OK;
567 
568 	// Wait until the buffer gets too full or an error or a timeout occurs.
569 	while (true) {
570 		Thread* thread = thread_get_current_thread();
571 		fWaitingProfilerThread = thread;
572 
573 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
574 			THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
575 
576 		locker.Unlock();
577 
578 		status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
579 
580 		locker.Lock();
581 
582 		if (error == B_OK) {
583 			// the caller has unset fWaitingProfilerThread for us
584 			break;
585 		}
586 
587 		fWaitingProfilerThread = NULL;
588 
589 		if (error != B_TIMED_OUT)
590 			return error;
591 
592 		// just the timeout -- return, if the buffer is not empty
593 		if (fBufferSize > 0)
594 			break;
595 	}
596 
597 	if (_droppedEvents != NULL) {
598 		*_droppedEvents = fDroppedEvents;
599 		fDroppedEvents = 0;
600 	}
601 
602 	return B_OK;
603 }
604 
605 
606 // #pragma mark - NotificationListener interface
607 
608 
609 void
610 SystemProfiler::EventOccurred(NotificationService& service,
611 	const KMessage* event)
612 {
613 	int32 eventCode;
614 	if (event->FindInt32("event", &eventCode) != B_OK)
615 		return;
616 
617 	if (strcmp(service.Name(), "teams") == 0) {
618 		Team* team = (Team*)event->GetPointer("teamStruct", NULL);
619 		if (team == NULL)
620 			return;
621 
622 		switch (eventCode) {
623 			case TEAM_ADDED:
624 				if (fTeamNotificationsEnabled)
625 					_TeamAdded(team);
626 				break;
627 
628 			case TEAM_REMOVED:
629 				if (team->id == fTeam) {
630 					// The profiling team is gone -- uninstall the profiler!
631 					InterruptsSpinLocker locker(sProfilerLock);
632 					if (sProfiler != this)
633 						return;
634 
635 					sProfiler = NULL;
636 					locker.Unlock();
637 
638 					ReleaseReference();
639 					return;
640 				}
641 
642 				// When we're still doing the initial team list scan, we are
643 				// also interested in removals that happened to teams we have
644 				// already seen.
645 				if (fTeamNotificationsEnabled
646 					|| team->serial_number <= fLastTeamAddedSerialNumber) {
647 					_TeamRemoved(team);
648 				}
649 				break;
650 
651 			case TEAM_EXEC:
652 				if (fTeamNotificationsEnabled)
653 					_TeamExec(team);
654 				break;
655 		}
656 	} else if (strcmp(service.Name(), "threads") == 0) {
657 		Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
658 		if (thread == NULL)
659 			return;
660 
661 		switch (eventCode) {
662 			case THREAD_ADDED:
663 				if (fThreadNotificationsEnabled)
664 					_ThreadAdded(thread);
665 				break;
666 
667 			case THREAD_REMOVED:
668 				// When we're still doing the initial thread list scan, we are
669 				// also interested in removals that happened to threads we have
670 				// already seen.
671 				if (fThreadNotificationsEnabled
672 					|| thread->serial_number <= fLastThreadAddedSerialNumber) {
673 					_ThreadRemoved(thread);
674 				}
675 				break;
676 		}
677 	} else if (strcmp(service.Name(), "images") == 0) {
678 		if (!fImageNotificationsEnabled)
679 			return;
680 
681 		struct image* image = (struct image*)event->GetPointer(
682 			"imageStruct", NULL);
683 		if (image == NULL)
684 			return;
685 
686 		switch (eventCode) {
687 			case IMAGE_ADDED:
688 				_ImageAdded(image);
689 				break;
690 
691 			case IMAGE_REMOVED:
692 				_ImageRemoved(image);
693 				break;
694 		}
695 	} else if (strcmp(service.Name(), "I/O") == 0) {
696 		if (!fIONotificationsEnabled)
697 			return;
698 
699 		IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler",
700 			NULL);
701 		if (scheduler == NULL)
702 			return;
703 
704 		IORequest* request = (IORequest*)event->GetPointer("request", NULL);
705 		IOOperation* operation = (IOOperation*)event->GetPointer("operation",
706 			NULL);
707 
708 		switch (eventCode) {
709 			case IO_SCHEDULER_ADDED:
710 				_IOSchedulerAdded(scheduler);
711 				break;
712 
713 			case IO_SCHEDULER_REMOVED:
714 				_IOSchedulerRemoved(scheduler);
715 				break;
716 
717 			case IO_SCHEDULER_REQUEST_SCHEDULED:
718 				_IORequestScheduled(scheduler, request);
719 				break;
720 
721 			case IO_SCHEDULER_REQUEST_FINISHED:
722 				_IORequestFinished(scheduler, request);
723 				break;
724 
725 			case IO_SCHEDULER_OPERATION_STARTED:
726 				_IOOperationStarted(scheduler, request, operation);
727 				break;
728 
729 			case IO_SCHEDULER_OPERATION_FINISHED:
730 				_IOOperationFinished(scheduler, request, operation);
731 				break;
732 		}
733 	}
734 
735 	_MaybeNotifyProfilerThread();
736 }
737 
738 
739 // #pragma mark - SchedulerListener interface
740 
741 
742 void
743 SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
744 {
745 	int cpu = smp_get_current_cpu();
746 
747 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
748 		// When re-entering, we already hold the lock.
749 
750 	system_profiler_thread_enqueued_in_run_queue* event
751 		= (system_profiler_thread_enqueued_in_run_queue*)
752 			_AllocateBuffer(
753 				sizeof(system_profiler_thread_enqueued_in_run_queue),
754 				B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
755 	if (event == NULL)
756 		return;
757 
758 	event->time = system_time_nsecs();
759 	event->thread = thread->id;
760 	event->priority = thread->priority;
761 
762 	fHeader->size = fBufferSize;
763 
764 	// Unblock the profiler thread, if necessary, but don't unblock the thread,
765 	// if it had been waiting on a condition variable, since then we'd likely
766 	// deadlock in ConditionVariable::NotifyOne(), as it acquires a static
767 	// spinlock.
768 	if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
769 		_MaybeNotifyProfilerThreadLocked();
770 }
771 
772 
773 void
774 SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
775 {
776 	int cpu = smp_get_current_cpu();
777 
778 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
779 		// When re-entering, we already hold the lock.
780 
781 	system_profiler_thread_removed_from_run_queue* event
782 		= (system_profiler_thread_removed_from_run_queue*)
783 			_AllocateBuffer(
784 				sizeof(system_profiler_thread_removed_from_run_queue),
785 				B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
786 	if (event == NULL)
787 		return;
788 
789 	event->time = system_time_nsecs();
790 	event->thread = thread->id;
791 
792 	fHeader->size = fBufferSize;
793 
794 	// unblock the profiler thread, if necessary
795 	_MaybeNotifyProfilerThreadLocked();
796 }
797 
798 
799 void
800 SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
801 {
802 	int cpu = smp_get_current_cpu();
803 
804 	InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
805 		// When re-entering, we already hold the lock.
806 
807 	// If the old thread starts waiting, handle the wait object.
808 	if (oldThread->state == B_THREAD_WAITING)
809 		_WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);
810 
811 	system_profiler_thread_scheduled* event
812 		= (system_profiler_thread_scheduled*)
813 			_AllocateBuffer(sizeof(system_profiler_thread_scheduled),
814 				B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
815 	if (event == NULL)
816 		return;
817 
818 	event->time = system_time_nsecs();
819 	event->thread = newThread->id;
820 	event->previous_thread = oldThread->id;
821 	event->previous_thread_state = oldThread->state;
822 	event->previous_thread_wait_object_type = oldThread->wait.type;
823 	event->previous_thread_wait_object = (addr_t)oldThread->wait.object;
824 
825 	fHeader->size = fBufferSize;
826 
827 	// unblock the profiler thread, if necessary
828 	_MaybeNotifyProfilerThreadLocked();
829 }
830 
831 
832 // #pragma mark - WaitObjectListener interface
833 
834 
835 void
836 SystemProfiler::SemaphoreCreated(sem_id id, const char* name)
837 {
838 	_WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE);
839 }
840 
841 
842 void
843 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable)
844 {
845 	_WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE);
846 }
847 
848 
849 void
850 SystemProfiler::MutexInitialized(mutex* lock)
851 {
852 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX);
853 }
854 
855 
856 void
857 SystemProfiler::RWLockInitialized(rw_lock* lock)
858 {
859 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK);
860 }
861 
862 
863 // #pragma mark - SystemProfiler private
864 
865 
866 bool
867 SystemProfiler::_TeamAdded(Team* team)
868 {
869 	TeamLocker teamLocker(team);
870 
871 	size_t nameLen = strlen(team->Name());
872 	size_t argsLen = strlen(team->Args());
873 
874 	InterruptsSpinLocker locker(fLock);
875 
876 	// During the initial scan check whether the team is already gone again.
877 	// Later this cannot happen, since the team creator notifies us before
878 	// actually starting the team.
879 	if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
880 		return true;
881 
882 	if (team->serial_number > fLastTeamAddedSerialNumber)
883 		fLastTeamAddedSerialNumber = team->serial_number;
884 
885 	system_profiler_team_added* event = (system_profiler_team_added*)
886 		_AllocateBuffer(
887 			sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
888 			B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0);
889 	if (event == NULL)
890 		return false;
891 
892 	event->team = team->id;
893 	strcpy(event->name, team->Name());
894 	event->args_offset = nameLen + 1;
895 	strcpy(event->name + nameLen + 1, team->Args());
896 
897 	fHeader->size = fBufferSize;
898 
899 	return true;
900 }
901 
902 
903 bool
904 SystemProfiler::_TeamRemoved(Team* team)
905 {
906 	// TODO: It is possible that we get remove notifications for teams that
907 	// had already been removed from the global team list when we did the
908 	// initial scan, but were still in the process of dying. ATM it is not
909 	// really possible to identify such a case.
910 
911 	TeamLocker teamLocker(team);
912 	InterruptsSpinLocker locker(fLock);
913 
914 	system_profiler_team_removed* event = (system_profiler_team_removed*)
915 		_AllocateBuffer(sizeof(system_profiler_team_removed),
916 			B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0);
917 	if (event == NULL)
918 		return false;
919 
920 	event->team = team->id;
921 
922 	fHeader->size = fBufferSize;
923 
924 	return true;
925 }
926 
927 
928 bool
929 SystemProfiler::_TeamExec(Team* team)
930 {
931 	TeamLocker teamLocker(team);
932 
933 	size_t argsLen = strlen(team->Args());
934 
935 	InterruptsSpinLocker locker(fLock);
936 
937 	system_profiler_team_exec* event = (system_profiler_team_exec*)
938 		_AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen,
939 			B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0);
940 	if (event == NULL)
941 		return false;
942 
943 	event->team = team->id;
944 	strlcpy(event->thread_name, team->main_thread->name,
945 		sizeof(event->thread_name));
946 	strcpy(event->args, team->Args());
947 
948 	fHeader->size = fBufferSize;
949 
950 	return true;
951 }
952 
953 
954 bool
955 SystemProfiler::_ThreadAdded(Thread* thread)
956 {
957 	ThreadLocker threadLocker(thread);
958 	InterruptsSpinLocker locker(fLock);
959 
960 	// During the initial scan check whether the team is already gone again.
961 	// Later this cannot happen, since the team creator notifies us before
962 	// actually starting the thread.
963 	if (!fThreadNotificationsEnabled && !thread->IsAlive())
964 		return true;
965 
966 	if (thread->serial_number > fLastThreadAddedSerialNumber)
967 		fLastThreadAddedSerialNumber = thread->serial_number;
968 
969 	system_profiler_thread_added* event = (system_profiler_thread_added*)
970 		_AllocateBuffer(sizeof(system_profiler_thread_added),
971 			B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
972 	if (event == NULL)
973 		return false;
974 
975 	event->team = thread->team->id;
976 	event->thread = thread->id;
977 	strlcpy(event->name, thread->name, sizeof(event->name));
978 
979 	fHeader->size = fBufferSize;
980 
981 	return true;
982 }
983 
984 
985 bool
986 SystemProfiler::_ThreadRemoved(Thread* thread)
987 {
988 	// TODO: It is possible that we get remove notifications for threads that
989 	// had already been removed from the global thread list when we did the
990 	// initial scan, but were still in the process of dying. ATM it is not
991 	// really possible to identify such a case.
992 
993 	ThreadLocker threadLocker(thread);
994 	InterruptsSpinLocker locker(fLock);
995 
996 	system_profiler_thread_removed* event
997 		= (system_profiler_thread_removed*)
998 			_AllocateBuffer(sizeof(system_profiler_thread_removed),
999 				B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0);
1000 	if (event == NULL)
1001 		return false;
1002 
1003 	event->team = thread->team->id;
1004 	event->thread = thread->id;
1005 
1006 	fHeader->size = fBufferSize;
1007 
1008 	return true;
1009 }
1010 
1011 
1012 bool
1013 SystemProfiler::_ImageAdded(struct image* image)
1014 {
1015 	InterruptsSpinLocker locker(fLock);
1016 
1017 	system_profiler_image_added* event = (system_profiler_image_added*)
1018 		_AllocateBuffer(sizeof(system_profiler_image_added),
1019 			B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0);
1020 	if (event == NULL)
1021 		return false;
1022 
1023 	event->team = image->team;
1024 	event->info = image->info.basic_info;
1025 
1026 	fHeader->size = fBufferSize;
1027 
1028 	return true;
1029 }
1030 
1031 
1032 bool
1033 SystemProfiler::_ImageRemoved(struct image* image)
1034 {
1035 	InterruptsSpinLocker locker(fLock);
1036 
1037 	system_profiler_image_removed* event = (system_profiler_image_removed*)
1038 		_AllocateBuffer(sizeof(system_profiler_image_removed),
1039 			B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0);
1040 	if (event == NULL)
1041 		return false;
1042 
1043 	event->team = image->team;
1044 	event->image = image->info.basic_info.id;
1045 
1046 	fHeader->size = fBufferSize;
1047 
1048 	return true;
1049 }
1050 
1051 
1052 bool
1053 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler)
1054 {
1055 	size_t nameLen = strlen(scheduler->Name());
1056 
1057 	InterruptsSpinLocker locker(fLock);
1058 
1059 	system_profiler_io_scheduler_added* event
1060 		= (system_profiler_io_scheduler_added*)_AllocateBuffer(
1061 			sizeof(system_profiler_io_scheduler_added) + nameLen,
1062 			B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0);
1063 	if (event == NULL)
1064 		return false;
1065 
1066 	event->scheduler = scheduler->ID();
1067 	strcpy(event->name, scheduler->Name());
1068 
1069 	fHeader->size = fBufferSize;
1070 
1071 	return true;
1072 }
1073 
1074 
1075 bool
1076 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler)
1077 {
1078 	InterruptsSpinLocker locker(fLock);
1079 
1080 	system_profiler_io_scheduler_removed* event
1081 		= (system_profiler_io_scheduler_removed*)_AllocateBuffer(
1082 			sizeof(system_profiler_io_scheduler_removed),
1083 			B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0);
1084 	if (event == NULL)
1085 		return false;
1086 
1087 	event->scheduler = scheduler->ID();
1088 
1089 	fHeader->size = fBufferSize;
1090 
1091 	return true;
1092 }
1093 
1094 
1095 bool
1096 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request)
1097 {
1098 	InterruptsSpinLocker locker(fLock);
1099 
1100 	system_profiler_io_request_scheduled* event
1101 		= (system_profiler_io_request_scheduled*)_AllocateBuffer(
1102 			sizeof(system_profiler_io_request_scheduled),
1103 			B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0);
1104 	if (event == NULL)
1105 		return false;
1106 
1107 	IORequestOwner* owner = request->Owner();
1108 
1109 	event->time = system_time_nsecs();
1110 	event->scheduler = scheduler->ID();
1111 	event->team = owner->team;
1112 	event->thread = owner->thread;
1113 	event->request = request;
1114 	event->offset = request->Offset();
1115 	event->length = request->Length();
1116 	event->write = request->IsWrite();
1117 	event->priority = owner->priority;
1118 
1119 	fHeader->size = fBufferSize;
1120 
1121 	return true;
1122 }
1123 
1124 
1125 bool
1126 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request)
1127 {
1128 	InterruptsSpinLocker locker(fLock);
1129 
1130 	system_profiler_io_request_finished* event
1131 		= (system_profiler_io_request_finished*)_AllocateBuffer(
1132 			sizeof(system_profiler_io_request_finished),
1133 			B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0);
1134 	if (event == NULL)
1135 		return false;
1136 
1137 	event->time = system_time_nsecs();
1138 	event->scheduler = scheduler->ID();
1139 	event->request = request;
1140 	event->status = request->Status();
1141 	event->transferred = request->TransferredBytes();
1142 
1143 	fHeader->size = fBufferSize;
1144 
1145 	return true;
1146 }
1147 
1148 
1149 bool
1150 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request,
1151 	IOOperation* operation)
1152 {
1153 	InterruptsSpinLocker locker(fLock);
1154 
1155 	system_profiler_io_operation_started* event
1156 		= (system_profiler_io_operation_started*)_AllocateBuffer(
1157 			sizeof(system_profiler_io_operation_started),
1158 			B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0);
1159 	if (event == NULL)
1160 		return false;
1161 
1162 	event->time = system_time_nsecs();
1163 	event->scheduler = scheduler->ID();
1164 	event->request = request;
1165 	event->operation = operation;
1166 	event->offset = request->Offset();
1167 	event->length = request->Length();
1168 	event->write = request->IsWrite();
1169 
1170 	fHeader->size = fBufferSize;
1171 
1172 	return true;
1173 }
1174 
1175 
1176 bool
1177 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request,
1178 	IOOperation* operation)
1179 {
1180 	InterruptsSpinLocker locker(fLock);
1181 
1182 	system_profiler_io_operation_finished* event
1183 		= (system_profiler_io_operation_finished*)_AllocateBuffer(
1184 			sizeof(system_profiler_io_operation_finished),
1185 			B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0);
1186 	if (event == NULL)
1187 		return false;
1188 
1189 	event->time = system_time_nsecs();
1190 	event->scheduler = scheduler->ID();
1191 	event->request = request;
1192 	event->operation = operation;
1193 	event->status = request->Status();
1194 	event->transferred = request->TransferredBytes();
1195 
1196 	fHeader->size = fBufferSize;
1197 
1198 	return true;
1199 }
1200 
1201 
1202 void
1203 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type)
1204 {
1205 	SpinLocker locker(fLock);
1206 
1207 	// look up the object
1208 	WaitObjectKey key;
1209 	key.object = object;
1210 	key.type = type;
1211 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1212 
1213 	// If found, remove it and add it to the free list. This might sound weird,
1214 	// but it makes sense, since we lazily track *used* wait objects only.
1215 	// I.e. the object in the table is now guaranteedly obsolete.
1216 	if (waitObject) {
1217 		fWaitObjectTable.RemoveUnchecked(waitObject);
1218 		fUsedWaitObjects.Remove(waitObject);
1219 		fFreeWaitObjects.Add(waitObject, false);
1220 	}
1221 }
1222 
1223 void
1224 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
1225 {
1226 	// look up the object
1227 	WaitObjectKey key;
1228 	key.object = object;
1229 	key.type = type;
1230 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1231 
1232 	// If already known, re-queue it as most recently used and be done.
1233 	if (waitObject != NULL) {
1234 		fUsedWaitObjects.Remove(waitObject);
1235 		fUsedWaitObjects.Add(waitObject);
1236 		return;
1237 	}
1238 
1239 	// not known yet -- get the info
1240 	const char* name = NULL;
1241 	const void* referencedObject = NULL;
1242 
1243 	switch (type) {
1244 		case THREAD_BLOCK_TYPE_SEMAPHORE:
1245 		{
1246 			name = sem_get_name_unsafe((sem_id)object);
1247 			break;
1248 		}
1249 
1250 		case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1251 		{
1252 			ConditionVariable* variable = (ConditionVariable*)object;
1253 			name = variable->ObjectType();
1254 			referencedObject = variable->Object();
1255 			break;
1256 		}
1257 
1258 		case THREAD_BLOCK_TYPE_MUTEX:
1259 		{
1260 			mutex* lock = (mutex*)object;
1261 			name = lock->name;
1262 			break;
1263 		}
1264 
1265 		case THREAD_BLOCK_TYPE_RW_LOCK:
1266 		{
1267 			rw_lock* lock = (rw_lock*)object;
1268 			name = lock->name;
1269 			break;
1270 		}
1271 
1272 		case THREAD_BLOCK_TYPE_OTHER:
1273 		{
1274 			name = (const char*)(void*)object;
1275 			break;
1276 		}
1277 
1278 		case THREAD_BLOCK_TYPE_SNOOZE:
1279 		case THREAD_BLOCK_TYPE_SIGNAL:
1280 		default:
1281 			return;
1282 	}
1283 
1284 	// add the event
1285 	size_t nameLen = name != NULL ? strlen(name) : 0;
1286 
1287 	system_profiler_wait_object_info* event
1288 		= (system_profiler_wait_object_info*)
1289 			_AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen,
1290 				B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0);
1291 	if (event == NULL)
1292 		return;
1293 
1294 	event->type = type;
1295 	event->object = object;
1296 	event->referenced_object = (addr_t)referencedObject;
1297 	if (name != NULL)
1298 		strcpy(event->name, name);
1299 	else
1300 		event->name[0] = '\0';
1301 
1302 	fHeader->size = fBufferSize;
1303 
1304 	// add the wait object
1305 
1306 	// get a free one or steal the least recently used one
1307 	waitObject = fFreeWaitObjects.RemoveHead();
1308 	if (waitObject == NULL) {
1309 		waitObject = fUsedWaitObjects.RemoveHead();
1310 		fWaitObjectTable.RemoveUnchecked(waitObject);
1311 	}
1312 
1313 	waitObject->object = object;
1314 	waitObject->type = type;
1315 	fWaitObjectTable.InsertUnchecked(waitObject);
1316 	fUsedWaitObjects.Add(waitObject);
1317 }
1318 
1319 
1320 /*static*/ bool
1321 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
1322 {
1323 	SystemProfiler* self = (SystemProfiler*)cookie;
1324 	self->fImageNotificationsEnabled = true;
1325 		// Set that here, since the image lock is being held now.
1326 	return !self->_ImageAdded(image);
1327 }
1328 
1329 
1330 void*
1331 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count)
1332 {
1333 	size = (size + 3) / 4 * 4;
1334 	size += sizeof(system_profiler_event_header);
1335 
1336 	size_t end = fBufferStart + fBufferSize;
1337 	if (end + size > fBufferCapacity) {
1338 		// Buffer is wrapped or needs wrapping.
1339 		if (end < fBufferCapacity) {
1340 			// not wrapped yet, but needed
1341 			system_profiler_event_header* header
1342 				= (system_profiler_event_header*)(fBufferBase + end);
1343 			header->event = B_SYSTEM_PROFILER_BUFFER_END;
1344 			fBufferSize = fBufferCapacity - fBufferStart;
1345 			end = 0;
1346 		} else
1347 			end -= fBufferCapacity;
1348 
1349 		if (end + size > fBufferStart) {
1350 			fDroppedEvents++;
1351 			return NULL;
1352 		}
1353 	}
1354 
1355 	system_profiler_event_header* header
1356 		= (system_profiler_event_header*)(fBufferBase + end);
1357 	header->event = event;
1358 	header->cpu = cpu;
1359 	header->size = size - sizeof(system_profiler_event_header);
1360 
1361 	fBufferSize += size;
1362 
1363 	return header + 1;
1364 }
1365 
1366 
1367 /*static*/ void
1368 SystemProfiler::_InitTimers(void* cookie, int cpu)
1369 {
1370 	SystemProfiler* self = (SystemProfiler*)cookie;
1371 	self->_ScheduleTimer(cpu);
1372 }
1373 
1374 
1375 /*static*/ void
1376 SystemProfiler::_UninitTimers(void* cookie, int cpu)
1377 {
1378 	SystemProfiler* self = (SystemProfiler*)cookie;
1379 
1380 	CPUProfileData& cpuData = self->fCPUData[cpu];
1381 	cancel_timer(&cpuData.timer);
1382 	cpuData.timerScheduled = false;
1383 }
1384 
1385 
1386 void
1387 SystemProfiler::_ScheduleTimer(int cpu)
1388 {
1389 	CPUProfileData& cpuData = fCPUData[cpu];
1390 	cpuData.timerEnd = system_time() + fInterval;
1391 	cpuData.timer.user_data = this;
1392 	add_timer(&cpuData.timer, &_ProfilingEvent, fInterval,
1393 		B_ONE_SHOT_RELATIVE_TIMER);
1394 	cpuData.timerScheduled = true;
1395 }
1396 
1397 
1398 void
1399 SystemProfiler::_DoSample()
1400 {
1401 	Thread* thread = thread_get_current_thread();
1402 	int cpu = thread->cpu->cpu_num;
1403 	CPUProfileData& cpuData = fCPUData[cpu];
1404 
1405 	// get the samples
1406 	int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1,
1407 		0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1408 
1409 	InterruptsSpinLocker locker(fLock);
1410 
1411 	system_profiler_samples* event = (system_profiler_samples*)
1412 		_AllocateBuffer(sizeof(system_profiler_samples)
1413 				+ count * sizeof(addr_t),
1414 			B_SYSTEM_PROFILER_SAMPLES, cpu, count);
1415 	if (event == NULL)
1416 		return;
1417 
1418 	event->thread = thread->id;
1419 	memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t));
1420 
1421 	fHeader->size = fBufferSize;
1422 }
1423 
1424 
1425 /*static*/ int32
1426 SystemProfiler::_ProfilingEvent(struct timer* timer)
1427 {
1428 	SystemProfiler* self = (SystemProfiler*)timer->user_data;
1429 
1430 	self->_DoSample();
1431 	self->_ScheduleTimer(timer->cpu);
1432 
1433 	return B_HANDLED_INTERRUPT;
1434 }
1435 
1436 
1437 // #pragma mark - private kernel API
1438 
1439 
1440 #if SYSTEM_PROFILER
1441 
1442 status_t
1443 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval)
1444 {
1445 	struct ParameterDeleter {
1446 		ParameterDeleter(area_id area)
1447 			:
1448 			fArea(area),
1449 			fDetached(false)
1450 		{
1451 		}
1452 
1453 		~ParameterDeleter()
1454 		{
1455 			if (!fDetached) {
1456 				delete_area(fArea);
1457 				delete sRecordedParameters;
1458 				sRecordedParameters = NULL;
1459 			}
1460 		}
1461 
1462 		void Detach()
1463 		{
1464 			fDetached = true;
1465 		}
1466 
1467 	private:
1468 		area_id	fArea;
1469 		bool	fDetached;
1470 	};
1471 
1472 	void* address;
1473 	area_id area = create_area("kernel profile data", &address,
1474 		B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK,
1475 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1476 	if (area < 0)
1477 		return area;
1478 
1479 	ParameterDeleter parameterDeleter(area);
1480 
1481 	sRecordedParameters = new(std::nothrow) system_profiler_parameters;
1482 	if (sRecordedParameters == NULL)
1483 		return B_NO_MEMORY;
1484 
1485 	sRecordedParameters->buffer_area = area;
1486 	sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS
1487 		| B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS
1488 		| B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
1489 		| B_SYSTEM_PROFILER_SAMPLING_EVENTS;
1490 	sRecordedParameters->locking_lookup_size = 4096;
1491 	sRecordedParameters->interval = interval;
1492 	sRecordedParameters->stack_depth = stackDepth;
1493 
1494 	area_info areaInfo;
1495 	get_area_info(area, &areaInfo);
1496 
1497 	// initialize the profiler
1498 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM,
1499 		areaInfo, *sRecordedParameters);
1500 	if (profiler == NULL)
1501 		return B_NO_MEMORY;
1502 
1503 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1504 
1505 	status_t error = profiler->Init();
1506 	if (error != B_OK)
1507 		return error;
1508 
1509 	// set the new profiler
1510 	InterruptsSpinLocker locker(sProfilerLock);
1511 	if (sProfiler != NULL)
1512 		return B_BUSY;
1513 
1514 	parameterDeleter.Detach();
1515 	profilerDeleter.Detach();
1516 	sProfiler = profiler;
1517 	locker.Unlock();
1518 
1519 	return B_OK;
1520 }
1521 
1522 
1523 void
1524 stop_system_profiler()
1525 {
1526 	InterruptsSpinLocker locker(sProfilerLock);
1527 	if (sProfiler == NULL)
1528 		return;
1529 
1530 	SystemProfiler* profiler = sProfiler;
1531 	sProfiler = NULL;
1532 	locker.Unlock();
1533 
1534 	profiler->ReleaseReference();
1535 }
1536 
1537 #endif	// SYSTEM_PROFILER
1538 
1539 
1540 // #pragma mark - syscalls
1541 
1542 
1543 status_t
1544 _user_system_profiler_start(struct system_profiler_parameters* userParameters)
1545 {
1546 	// copy params to the kernel
1547 	struct system_profiler_parameters parameters;
1548 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)
1549 		|| user_memcpy(&parameters, userParameters, sizeof(parameters))
1550 			!= B_OK) {
1551 		return B_BAD_ADDRESS;
1552 	}
1553 
1554 	// check the parameters
1555 	team_id team = thread_get_current_thread()->team->id;
1556 
1557 	area_info areaInfo;
1558 	status_t error = get_area_info(parameters.buffer_area, &areaInfo);
1559 	if (error != B_OK)
1560 		return error;
1561 
1562 	if (areaInfo.team != team)
1563 		return B_BAD_VALUE;
1564 
1565 	if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) {
1566 		if (parameters.stack_depth < 1)
1567 			return B_BAD_VALUE;
1568 
1569 		if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL)
1570 			parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL;
1571 
1572 		if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH)
1573 			parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH;
1574 	}
1575 
1576 	// quick check to see whether we do already have a profiler installed
1577 	InterruptsSpinLocker locker(sProfilerLock);
1578 	if (sProfiler != NULL)
1579 		return B_BUSY;
1580 	locker.Unlock();
1581 
1582 	// initialize the profiler
1583 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo,
1584 		parameters);
1585 	if (profiler == NULL)
1586 		return B_NO_MEMORY;
1587 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1588 
1589 	error = profiler->Init();
1590 	if (error != B_OK)
1591 		return error;
1592 
1593 	// set the new profiler
1594 	locker.Lock();
1595 	if (sProfiler != NULL)
1596 		return B_BUSY;
1597 
1598 	profilerDeleter.Detach();
1599 	sProfiler = profiler;
1600 	locker.Unlock();
1601 
1602 	return B_OK;
1603 }
1604 
1605 
1606 status_t
1607 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents)
1608 {
1609 	if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents))
1610 		return B_BAD_ADDRESS;
1611 
1612 	team_id team = thread_get_current_thread()->team->id;
1613 
1614 	InterruptsSpinLocker locker(sProfilerLock);
1615 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1616 		return B_BAD_VALUE;
1617 
1618 	// get a reference to the profiler
1619 	SystemProfiler* profiler = sProfiler;
1620 	BReference<SystemProfiler> reference(profiler);
1621 	locker.Unlock();
1622 
1623 	uint64 droppedEvents;
1624 	status_t error = profiler->NextBuffer(bytesRead,
1625 		_droppedEvents != NULL ? &droppedEvents : NULL);
1626 	if (error == B_OK && _droppedEvents != NULL)
1627 		user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents));
1628 
1629 	return error;
1630 }
1631 
1632 
1633 status_t
1634 _user_system_profiler_stop()
1635 {
1636 	team_id team = thread_get_current_thread()->team->id;
1637 
1638 	InterruptsSpinLocker locker(sProfilerLock);
1639 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1640 		return B_BAD_VALUE;
1641 
1642 	SystemProfiler* profiler = sProfiler;
1643 	sProfiler = NULL;
1644 	locker.Unlock();
1645 
1646 	profiler->ReleaseReference();
1647 
1648 	return B_OK;
1649 }
1650 
1651 
1652 status_t
1653 _user_system_profiler_recorded(system_profiler_parameters* userParameters)
1654 {
1655 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
1656 		return B_BAD_ADDRESS;
1657 	if (sRecordedParameters == NULL)
1658 		return B_ERROR;
1659 
1660 #if SYSTEM_PROFILER
1661 	stop_system_profiler();
1662 
1663 	// Transfer the area to the userland process
1664 
1665 	void* address;
1666 	area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
1667 		B_ANY_ADDRESS, team_get_current_team_id(), true);
1668 	if (newArea < 0)
1669 		return newArea;
1670 
1671 	status_t status = set_area_protection(newArea, B_READ_AREA);
1672 	if (status == B_OK) {
1673 		sRecordedParameters->buffer_area = newArea;
1674 
1675 		status = user_memcpy(userParameters, sRecordedParameters,
1676 			sizeof(system_profiler_parameters));
1677 	}
1678 	if (status != B_OK)
1679 		delete_area(newArea);
1680 
1681 	delete sRecordedParameters;
1682 	sRecordedParameters = NULL;
1683 
1684 	return status;
1685 #else
1686 	return B_NOT_SUPPORTED;
1687 #endif // SYSTEM_PROFILER
1688 }
1689