xref: /haiku/src/system/kernel/debug/system_profiler.cpp (revision 3b07762c548ec4016dea480d1061577cd15ec614)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <system_profiler.h>
8 
9 #include <AutoDeleter.h>
10 #include <Referenceable.h>
11 
12 #include <util/AutoLock.h>
13 
14 #include <system_profiler_defs.h>
15 
16 #include <cpu.h>
17 #include <kernel.h>
18 #include <kimage.h>
19 #include <kscheduler.h>
20 #include <listeners.h>
21 #include <Notifications.h>
22 #include <sem.h>
23 #include <team.h>
24 #include <thread.h>
25 #include <user_debugger.h>
26 #include <vm/vm.h>
27 
28 #include <arch/debug.h>
29 
30 #include "IOSchedulerRoster.h"
31 
32 
33 // This is the kernel-side implementation of the system profiling support.
34 // A userland team can register as system profiler, providing an area as buffer
35 // for events. Those events are team, thread, and image changes (added/removed),
36 // periodic sampling of the return address stack for each CPU, as well as
37 // scheduling and I/O scheduling events.
38 
39 
40 class SystemProfiler;
41 
42 
43 // minimum/maximum size of the table used for wait object caching
44 #define MIN_WAIT_OBJECT_COUNT	128
45 #define MAX_WAIT_OBJECT_COUNT	1024
46 
47 
48 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER;
49 static SystemProfiler* sProfiler = NULL;
50 static struct system_profiler_parameters* sRecordedParameters = NULL;
51 
52 
53 class SystemProfiler : public BReferenceable, private NotificationListener,
54 	private SchedulerListener, private WaitObjectListener {
55 public:
56 								SystemProfiler(team_id team,
57 									const area_info& userAreaInfo,
58 									const system_profiler_parameters&
59 										parameters);
60 								~SystemProfiler();
61 
62 			team_id				TeamID() const	{ return fTeam; }
63 
64 			status_t			Init();
65 			status_t			NextBuffer(size_t bytesRead,
66 									uint64* _droppedEvents);
67 
68 private:
69     virtual	void				EventOccurred(NotificationService& service,
70 									const KMessage* event);
71 
72 	virtual	void				ThreadEnqueuedInRunQueue(Thread* thread);
73 	virtual	void				ThreadRemovedFromRunQueue(Thread* thread);
74 	virtual	void				ThreadScheduled(Thread* oldThread,
75 									Thread* newThread);
76 
77 	virtual	void				SemaphoreCreated(sem_id id,
78 									const char* name);
79 	virtual	void				ConditionVariableInitialized(
80 									ConditionVariable* variable);
81 	virtual	void				MutexInitialized(mutex* lock);
82 	virtual	void				RWLockInitialized(rw_lock* lock);
83 
84 			bool				_TeamAdded(Team* team);
85 			bool				_TeamRemoved(Team* team);
86 			bool				_TeamExec(Team* team);
87 
88 			bool				_ThreadAdded(Thread* thread);
89 			bool				_ThreadRemoved(Thread* thread);
90 
91 			bool				_ImageAdded(struct image* image);
92 			bool				_ImageRemoved(struct image* image);
93 
94 			bool				_IOSchedulerAdded(IOScheduler* scheduler);
95 			bool				_IOSchedulerRemoved(IOScheduler* scheduler);
96 			bool				_IORequestScheduled(IOScheduler* scheduler,
97 									IORequest* request);
98 			bool				_IORequestFinished(IOScheduler* scheduler,
99 									IORequest* request);
100 			bool				_IOOperationStarted(IOScheduler* scheduler,
101 									IORequest* request, IOOperation* operation);
102 			bool				_IOOperationFinished(IOScheduler* scheduler,
103 									IORequest* request, IOOperation* operation);
104 
105 			void				_WaitObjectCreated(addr_t object, uint32 type);
106 			void				_WaitObjectUsed(addr_t object, uint32 type);
107 
108 	inline	void				_MaybeNotifyProfilerThreadLocked();
109 	inline	void				_MaybeNotifyProfilerThread();
110 
111 	static	bool				_InitialImageIterator(struct image* image,
112 									void* cookie);
113 
114 			void*				_AllocateBuffer(size_t size, int event, int cpu,
115 									int count);
116 
117 	static	void				_InitTimers(void* cookie, int cpu);
118 	static	void				_UninitTimers(void* cookie, int cpu);
119 			void				_ScheduleTimer(int cpu);
120 
121 			void				_DoSample();
122 
123 	static	int32				_ProfilingEvent(struct timer* timer);
124 
125 private:
126 			struct CPUProfileData {
127 				struct timer	timer;
128 				bigtime_t		timerEnd;
129 				bool			timerScheduled;
130 				addr_t			buffer[B_DEBUG_STACK_TRACE_DEPTH];
131 			};
132 
133 			struct WaitObjectKey {
134 				addr_t	object;
135 				uint32	type;
136 			};
137 
138 			struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>,
139 					WaitObjectKey {
140 				struct WaitObject* hash_link;
141 			};
142 
143 			struct WaitObjectTableDefinition {
144 				typedef WaitObjectKey	KeyType;
145 				typedef	WaitObject		ValueType;
146 
147 				size_t HashKey(const WaitObjectKey& key) const
148 				{
149 					return (size_t)key.object ^ (size_t)key.type;
150 				}
151 
152 				size_t Hash(const WaitObject* value) const
153 				{
154 					return HashKey(*value);
155 				}
156 
157 				bool Compare(const WaitObjectKey& key,
158 					const WaitObject* value) const
159 				{
160 					return value->type == key.type
161 						&& value->object == key.object;
162 				}
163 
164 				WaitObject*& GetLink(WaitObject* value) const
165 				{
166 					return value->hash_link;
167 				}
168 			};
169 
170 			typedef DoublyLinkedList<WaitObject> WaitObjectList;
171 			typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable;
172 
173 private:
174 			spinlock			fLock;
175 			team_id				fTeam;
176 			area_id				fUserArea;
177 			area_id				fKernelArea;
178 			size_t				fAreaSize;
179 			uint32				fFlags;
180 			uint32				fStackDepth;
181 			bigtime_t			fInterval;
182 			system_profiler_buffer_header* fHeader;
183 			uint8*				fBufferBase;
184 			size_t				fBufferCapacity;
185 			size_t				fBufferStart;
186 			size_t				fBufferSize;
187 			uint64				fDroppedEvents;
188 			int64				fLastTeamAddedSerialNumber;
189 			int64				fLastThreadAddedSerialNumber;
190 			bool				fTeamNotificationsRequested;
191 			bool				fTeamNotificationsEnabled;
192 			bool				fThreadNotificationsRequested;
193 			bool				fThreadNotificationsEnabled;
194 			bool				fImageNotificationsRequested;
195 			bool				fImageNotificationsEnabled;
196 			bool				fIONotificationsRequested;
197 			bool				fIONotificationsEnabled;
198 			bool				fSchedulerNotificationsRequested;
199 			bool				fWaitObjectNotificationsRequested;
200 			Thread* volatile	fWaitingProfilerThread;
201 			bool				fProfilingActive;
202 			bool				fReentered[SMP_MAX_CPUS];
203 			CPUProfileData		fCPUData[SMP_MAX_CPUS];
204 			WaitObject*			fWaitObjectBuffer;
205 			int32				fWaitObjectCount;
206 			WaitObjectList		fUsedWaitObjects;
207 			WaitObjectList		fFreeWaitObjects;
208 			WaitObjectTable		fWaitObjectTable;
209 };
210 
211 
212 /*!	Notifies the profiler thread when the profiling buffer is full enough.
213 	The caller must hold the scheduler lock and fLock.
214 */
215 inline void
216 SystemProfiler::_MaybeNotifyProfilerThreadLocked()
217 {
218 	// If the buffer is full enough, notify the profiler.
219 	if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
220 		int cpu = smp_get_current_cpu();
221 		fReentered[cpu] = true;
222 
223 		InterruptsSpinLocker _(fWaitingProfilerThread->scheduler_lock);
224 		thread_unblock_locked(fWaitingProfilerThread, B_OK);
225 
226 		fWaitingProfilerThread = NULL;
227 		fReentered[cpu] = false;
228 	}
229 }
230 
231 
232 inline void
233 SystemProfiler::_MaybeNotifyProfilerThread()
234 {
235 	if (fWaitingProfilerThread == NULL)
236 		return;
237 
238 	SpinLocker locker(fLock);
239 
240 	_MaybeNotifyProfilerThreadLocked();
241 }
242 
243 
244 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
245 	const system_profiler_parameters& parameters)
246 	:
247 	fTeam(team),
248 	fUserArea(userAreaInfo.area),
249 	fKernelArea(-1),
250 	fAreaSize(userAreaInfo.size),
251 	fFlags(parameters.flags),
252 	fStackDepth(parameters.stack_depth),
253 	fInterval(parameters.interval),
254 	fHeader(NULL),
255 	fBufferBase(NULL),
256 	fBufferCapacity(0),
257 	fBufferStart(0),
258 	fBufferSize(0),
259 	fDroppedEvents(0),
260 	fLastTeamAddedSerialNumber(0),
261 	fLastThreadAddedSerialNumber(0),
262 	fTeamNotificationsRequested(false),
263 	fTeamNotificationsEnabled(false),
264 	fThreadNotificationsRequested(false),
265 	fThreadNotificationsEnabled(false),
266 	fImageNotificationsRequested(false),
267 	fImageNotificationsEnabled(false),
268 	fIONotificationsRequested(false),
269 	fIONotificationsEnabled(false),
270 	fSchedulerNotificationsRequested(false),
271 	fWaitObjectNotificationsRequested(false),
272 	fWaitingProfilerThread(NULL),
273 	fWaitObjectBuffer(NULL),
274 	fWaitObjectCount(0),
275 	fUsedWaitObjects(),
276 	fFreeWaitObjects(),
277 	fWaitObjectTable()
278 {
279 	B_INITIALIZE_SPINLOCK(&fLock);
280 
281 	memset(fReentered, 0, sizeof(fReentered));
282 
283 	// compute the number wait objects we want to cache
284 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
285 		fWaitObjectCount = parameters.locking_lookup_size
286 			/ (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
287 		if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
288 			fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
289 		if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
290 			fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
291 	}
292 }
293 
294 
295 SystemProfiler::~SystemProfiler()
296 {
297 	// Wake up the user thread, if it is waiting, and mark profiling
298 	// inactive.
299 	InterruptsSpinLocker locker(fLock);
300 	if (fWaitingProfilerThread != NULL) {
301 		thread_unblock(fWaitingProfilerThread, B_OK);
302 		fWaitingProfilerThread = NULL;
303 	}
304 	fProfilingActive = false;
305 	locker.Unlock();
306 
307 	// stop scheduler listening
308 	if (fSchedulerNotificationsRequested)
309 		scheduler_remove_listener(this);
310 
311 	// stop wait object listening
312 	if (fWaitObjectNotificationsRequested) {
313 		InterruptsSpinLocker locker(gWaitObjectListenerLock);
314 		remove_wait_object_listener(this);
315 	}
316 
317 	// deactivate the profiling timers on all CPUs
318 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
319 		call_all_cpus(_UninitTimers, this);
320 
321 	// cancel notifications
322 	NotificationManager& notificationManager
323 		= NotificationManager::Manager();
324 
325 	// images
326 	if (fImageNotificationsRequested) {
327 		fImageNotificationsRequested = false;
328 		notificationManager.RemoveListener("images", NULL, *this);
329 	}
330 
331 	// threads
332 	if (fThreadNotificationsRequested) {
333 		fThreadNotificationsRequested = false;
334 		notificationManager.RemoveListener("threads", NULL, *this);
335 	}
336 
337 	// teams
338 	if (fTeamNotificationsRequested) {
339 		fTeamNotificationsRequested = false;
340 		notificationManager.RemoveListener("teams", NULL, *this);
341 	}
342 
343 	// I/O
344 	if (fIONotificationsRequested) {
345 		fIONotificationsRequested = false;
346 		notificationManager.RemoveListener("I/O", NULL, *this);
347 	}
348 
349 	// delete wait object related allocations
350 	fWaitObjectTable.Clear();
351 	delete[] fWaitObjectBuffer;
352 
353 	// unlock the memory and delete the area
354 	if (fKernelArea >= 0) {
355 		unlock_memory(fHeader, fAreaSize, B_READ_DEVICE);
356 		delete_area(fKernelArea);
357 		fKernelArea = -1;
358 	}
359 }
360 
361 
362 status_t
363 SystemProfiler::Init()
364 {
365 	// clone the user area
366 	void* areaBase;
367 	fKernelArea = clone_area("profiling samples", &areaBase,
368 		B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
369 		fUserArea);
370 	if (fKernelArea < 0)
371 		return fKernelArea;
372 
373 	// we need the memory locked
374 	status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE);
375 	if (error != B_OK) {
376 		delete_area(fKernelArea);
377 		fKernelArea = -1;
378 		return error;
379 	}
380 
381 	// the buffer is ready for use
382 	fHeader = (system_profiler_buffer_header*)areaBase;
383 	fBufferBase = (uint8*)(fHeader + 1);
384 	fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase);
385 	fHeader->start = 0;
386 	fHeader->size = 0;
387 
388 	// allocate the wait object buffer and init the hash table
389 	if (fWaitObjectCount > 0) {
390 		fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount];
391 		if (fWaitObjectBuffer == NULL)
392 			return B_NO_MEMORY;
393 
394 		for (int32 i = 0; i < fWaitObjectCount; i++)
395 			fFreeWaitObjects.Add(fWaitObjectBuffer + i);
396 
397 		error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2);
398 		if (error != B_OK)
399 			return error;
400 	}
401 
402 	// start listening for notifications
403 
404 	// teams
405 	NotificationManager& notificationManager
406 		= NotificationManager::Manager();
407 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
408 		error = notificationManager.AddListener("teams",
409 			TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this);
410 		if (error != B_OK)
411 			return error;
412 		fTeamNotificationsRequested = true;
413 	}
414 
415 	// threads
416 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
417 		error = notificationManager.AddListener("threads",
418 			THREAD_ADDED | THREAD_REMOVED, *this);
419 		if (error != B_OK)
420 			return error;
421 		fThreadNotificationsRequested = true;
422 	}
423 
424 	// images
425 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
426 		error = notificationManager.AddListener("images",
427 			IMAGE_ADDED | IMAGE_REMOVED, *this);
428 		if (error != B_OK)
429 			return error;
430 		fImageNotificationsRequested = true;
431 	}
432 
433 	// I/O events
434 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
435 		error = notificationManager.AddListener("I/O",
436 			IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED
437 				| IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED
438 				| IO_SCHEDULER_OPERATION_STARTED
439 				| IO_SCHEDULER_OPERATION_FINISHED,
440 			*this);
441 		if (error != B_OK)
442 			return error;
443 		fIONotificationsRequested = true;
444 	}
445 
446 	// We need to fill the buffer with the initial state of teams, threads,
447 	// and images.
448 
449 	// teams
450 	if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
451 		InterruptsSpinLocker locker(fLock);
452 
453 		TeamListIterator iterator;
454 		while (Team* team = iterator.Next()) {
455 			locker.Unlock();
456 
457 			bool added = _TeamAdded(team);
458 
459 			// release the reference returned by the iterator
460 			team->ReleaseReference();
461 
462 			if (!added)
463 				return B_BUFFER_OVERFLOW;
464 
465 			locker.Lock();
466 		}
467 
468 		fTeamNotificationsEnabled = true;
469 	}
470 
471 	// images
472 	if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
473 		if (image_iterate_through_images(&_InitialImageIterator, this) != NULL)
474 			return B_BUFFER_OVERFLOW;
475 	}
476 
477 	// threads
478 	if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
479 		InterruptsSpinLocker locker(fLock);
480 
481 		ThreadListIterator iterator;
482 		while (Thread* thread = iterator.Next()) {
483 			locker.Unlock();
484 
485 			bool added = _ThreadAdded(thread);
486 
487 			// release the reference returned by the iterator
488 			thread->ReleaseReference();
489 
490 			if (!added)
491 				return B_BUFFER_OVERFLOW;
492 
493 			locker.Lock();
494 		}
495 
496 		fThreadNotificationsEnabled = true;
497 	}
498 
499 	fProfilingActive = true;
500 
501 	// start scheduler and wait object listening
502 	if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
503 		scheduler_add_listener(this);
504 		fSchedulerNotificationsRequested = true;
505 
506 		SpinLocker waitObjectLocker(gWaitObjectListenerLock);
507 		add_wait_object_listener(this);
508 		fWaitObjectNotificationsRequested = true;
509 		waitObjectLocker.Unlock();
510 
511 		// fake schedule events for the initially running threads
512 		int32 cpuCount = smp_get_num_cpus();
513 		for (int32 i = 0; i < cpuCount; i++) {
514 			Thread* thread = gCPU[i].running_thread;
515 			if (thread != NULL)
516 				ThreadScheduled(thread, thread);
517 		}
518 	}
519 
520 	// I/O scheduling
521 	if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
522 		IOSchedulerRoster* roster = IOSchedulerRoster::Default();
523 		AutoLocker<IOSchedulerRoster> rosterLocker(roster);
524 
525 		for (IOSchedulerList::ConstIterator it
526 				= roster->SchedulerList().GetIterator();
527 			IOScheduler* scheduler = it.Next();) {
528 			_IOSchedulerAdded(scheduler);
529 		}
530 
531 		fIONotificationsEnabled = true;
532 	}
533 
534 	// activate the profiling timers on all CPUs
535 	if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
536 		call_all_cpus(_InitTimers, this);
537 
538 	return B_OK;
539 }
540 
541 
542 status_t
543 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
544 {
545 	InterruptsSpinLocker locker(fLock);
546 
547 	if (fWaitingProfilerThread != NULL || !fProfilingActive
548 		|| bytesRead > fBufferSize) {
549 		return B_BAD_VALUE;
550 	}
551 
552 	fBufferSize -= bytesRead;
553 	fBufferStart += bytesRead;
554 	if (fBufferStart > fBufferCapacity)
555 		fBufferStart -= fBufferCapacity;
556 	fHeader->size = fBufferSize;
557 	fHeader->start = fBufferStart;
558 
559 	// already enough data in the buffer to return?
560 	if (fBufferSize > fBufferCapacity / 2)
561 		return B_OK;
562 
563 	// Wait until the buffer gets too full or an error or a timeout occurs.
564 	while (true) {
565 		Thread* thread = thread_get_current_thread();
566 		fWaitingProfilerThread = thread;
567 
568 		thread_prepare_to_block(thread, B_CAN_INTERRUPT,
569 			THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
570 
571 		locker.Unlock();
572 
573 		status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
574 
575 		locker.Lock();
576 
577 		if (error == B_OK) {
578 			// the caller has unset fWaitingProfilerThread for us
579 			break;
580 		}
581 
582 		fWaitingProfilerThread = NULL;
583 
584 		if (error != B_TIMED_OUT)
585 			return error;
586 
587 		// just the timeout -- return, if the buffer is not empty
588 		if (fBufferSize > 0)
589 			break;
590 	}
591 
592 	if (_droppedEvents != NULL) {
593 		*_droppedEvents = fDroppedEvents;
594 		fDroppedEvents = 0;
595 	}
596 
597 	return B_OK;
598 }
599 
600 
601 void
602 SystemProfiler::EventOccurred(NotificationService& service,
603 	const KMessage* event)
604 {
605 	int32 eventCode;
606 	if (event->FindInt32("event", &eventCode) != B_OK)
607 		return;
608 
609 	if (strcmp(service.Name(), "teams") == 0) {
610 		Team* team = (Team*)event->GetPointer("teamStruct", NULL);
611 		if (team == NULL)
612 			return;
613 
614 		switch (eventCode) {
615 			case TEAM_ADDED:
616 				if (fTeamNotificationsEnabled)
617 					_TeamAdded(team);
618 				break;
619 
620 			case TEAM_REMOVED:
621 				if (team->id == fTeam) {
622 					// The profiling team is gone -- uninstall the profiler!
623 					InterruptsSpinLocker locker(sProfilerLock);
624 					if (sProfiler != this)
625 						return;
626 
627 					sProfiler = NULL;
628 					locker.Unlock();
629 
630 					ReleaseReference();
631 					return;
632 				}
633 
634 				// When we're still doing the initial team list scan, we are
635 				// also interested in removals that happened to teams we have
636 				// already seen.
637 				if (fTeamNotificationsEnabled
638 					|| team->serial_number <= fLastTeamAddedSerialNumber) {
639 					_TeamRemoved(team);
640 				}
641 				break;
642 
643 			case TEAM_EXEC:
644 				if (fTeamNotificationsEnabled)
645 					_TeamExec(team);
646 				break;
647 		}
648 	} else if (strcmp(service.Name(), "threads") == 0) {
649 		Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
650 		if (thread == NULL)
651 			return;
652 
653 		switch (eventCode) {
654 			case THREAD_ADDED:
655 				if (fThreadNotificationsEnabled)
656 					_ThreadAdded(thread);
657 				break;
658 
659 			case THREAD_REMOVED:
660 				// When we're still doing the initial thread list scan, we are
661 				// also interested in removals that happened to threads we have
662 				// already seen.
663 				if (fThreadNotificationsEnabled
664 					|| thread->serial_number <= fLastThreadAddedSerialNumber) {
665 					_ThreadRemoved(thread);
666 				}
667 				break;
668 		}
669 	} else if (strcmp(service.Name(), "images") == 0) {
670 		if (!fImageNotificationsEnabled)
671 			return;
672 
673 		struct image* image = (struct image*)event->GetPointer(
674 			"imageStruct", NULL);
675 		if (image == NULL)
676 			return;
677 
678 		switch (eventCode) {
679 			case IMAGE_ADDED:
680 				_ImageAdded(image);
681 				break;
682 
683 			case IMAGE_REMOVED:
684 				_ImageRemoved(image);
685 				break;
686 		}
687 	} else if (strcmp(service.Name(), "I/O") == 0) {
688 		if (!fIONotificationsEnabled)
689 			return;
690 
691 		IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler",
692 			NULL);
693 		if (scheduler == NULL)
694 			return;
695 
696 		IORequest* request = (IORequest*)event->GetPointer("request", NULL);
697 		IOOperation* operation = (IOOperation*)event->GetPointer("operation",
698 			NULL);
699 
700 		switch (eventCode) {
701 			case IO_SCHEDULER_ADDED:
702 				_IOSchedulerAdded(scheduler);
703 				break;
704 
705 			case IO_SCHEDULER_REMOVED:
706 				_IOSchedulerRemoved(scheduler);
707 				break;
708 
709 			case IO_SCHEDULER_REQUEST_SCHEDULED:
710 				_IORequestScheduled(scheduler, request);
711 				break;
712 
713 			case IO_SCHEDULER_REQUEST_FINISHED:
714 				_IORequestFinished(scheduler, request);
715 				break;
716 
717 			case IO_SCHEDULER_OPERATION_STARTED:
718 				_IOOperationStarted(scheduler, request, operation);
719 				break;
720 
721 			case IO_SCHEDULER_OPERATION_FINISHED:
722 				_IOOperationFinished(scheduler, request, operation);
723 				break;
724 		}
725 	}
726 
727 	_MaybeNotifyProfilerThread();
728 }
729 
730 
731 void
732 SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
733 {
734 	int cpu = smp_get_current_cpu();
735 
736 	SpinLocker locker(fLock, false, !fReentered[cpu]);
737 		// When re-entering, we already hold the lock.
738 
739 	system_profiler_thread_enqueued_in_run_queue* event
740 		= (system_profiler_thread_enqueued_in_run_queue*)
741 			_AllocateBuffer(
742 				sizeof(system_profiler_thread_enqueued_in_run_queue),
743 				B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
744 	if (event == NULL)
745 		return;
746 
747 	event->time = system_time_nsecs();
748 	event->thread = thread->id;
749 	event->priority = thread->priority;
750 
751 	fHeader->size = fBufferSize;
752 
753 	// Unblock the profiler thread, if necessary, but don't unblock the thread,
754 	// if it had been waiting on a condition variable, since then we'd likely
755 	// deadlock in ConditionVariable::NotifyOne(), as it acquires a static
756 	// spinlock.
757 	if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
758 		_MaybeNotifyProfilerThreadLocked();
759 }
760 
761 
762 void
763 SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
764 {
765 	int cpu = smp_get_current_cpu();
766 
767 	SpinLocker locker(fLock, false, !fReentered[cpu]);
768 		// When re-entering, we already hold the lock.
769 
770 	system_profiler_thread_removed_from_run_queue* event
771 		= (system_profiler_thread_removed_from_run_queue*)
772 			_AllocateBuffer(
773 				sizeof(system_profiler_thread_removed_from_run_queue),
774 				B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
775 	if (event == NULL)
776 		return;
777 
778 	event->time = system_time_nsecs();
779 	event->thread = thread->id;
780 
781 	fHeader->size = fBufferSize;
782 
783 	// unblock the profiler thread, if necessary
784 	_MaybeNotifyProfilerThreadLocked();
785 }
786 
787 
788 void
789 SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
790 {
791 	int cpu = smp_get_current_cpu();
792 
793 	SpinLocker locker(fLock, false, !fReentered[cpu]);
794 		// When re-entering, we already hold the lock.
795 
796 	// If the old thread starts waiting, handle the wait object.
797 	if (oldThread->state == B_THREAD_WAITING)
798 		_WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);
799 
800 	system_profiler_thread_scheduled* event
801 		= (system_profiler_thread_scheduled*)
802 			_AllocateBuffer(sizeof(system_profiler_thread_scheduled),
803 				B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
804 	if (event == NULL)
805 		return;
806 
807 	event->time = system_time_nsecs();
808 	event->thread = newThread->id;
809 	event->previous_thread = oldThread->id;
810 	event->previous_thread_state = oldThread->state;
811 	event->previous_thread_wait_object_type = oldThread->wait.type;
812 	event->previous_thread_wait_object = (addr_t)oldThread->wait.object;
813 
814 	fHeader->size = fBufferSize;
815 
816 	// unblock the profiler thread, if necessary
817 	_MaybeNotifyProfilerThreadLocked();
818 }
819 
820 
821 void
822 SystemProfiler::SemaphoreCreated(sem_id id, const char* name)
823 {
824 	_WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE);
825 }
826 
827 
828 void
829 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable)
830 {
831 	_WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE);
832 }
833 
834 
835 void
836 SystemProfiler::MutexInitialized(mutex* lock)
837 {
838 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX);
839 }
840 
841 
842 void
843 SystemProfiler::RWLockInitialized(rw_lock* lock)
844 {
845 	_WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK);
846 }
847 
848 
849 bool
850 SystemProfiler::_TeamAdded(Team* team)
851 {
852 	TeamLocker teamLocker(team);
853 
854 	size_t nameLen = strlen(team->Name());
855 	size_t argsLen = strlen(team->Args());
856 
857 	InterruptsSpinLocker locker(fLock);
858 
859 	// During the initial scan check whether the team is already gone again.
860 	// Later this cannot happen, since the team creator notifies us before
861 	// actually starting the team.
862 	if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
863 		return true;
864 
865 	if (team->serial_number > fLastTeamAddedSerialNumber)
866 		fLastTeamAddedSerialNumber = team->serial_number;
867 
868 	system_profiler_team_added* event = (system_profiler_team_added*)
869 		_AllocateBuffer(
870 			sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
871 			B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0);
872 	if (event == NULL)
873 		return false;
874 
875 	event->team = team->id;
876 	strcpy(event->name, team->Name());
877 	event->args_offset = nameLen + 1;
878 	strcpy(event->name + nameLen + 1, team->Args());
879 
880 	fHeader->size = fBufferSize;
881 
882 	return true;
883 }
884 
885 
886 bool
887 SystemProfiler::_TeamRemoved(Team* team)
888 {
889 	// TODO: It is possible that we get remove notifications for teams that
890 	// had already been removed from the global team list when we did the
891 	// initial scan, but were still in the process of dying. ATM it is not
892 	// really possible to identify such a case.
893 
894 	TeamLocker teamLocker(team);
895 	InterruptsSpinLocker locker(fLock);
896 
897 	system_profiler_team_removed* event = (system_profiler_team_removed*)
898 		_AllocateBuffer(sizeof(system_profiler_team_removed),
899 			B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0);
900 	if (event == NULL)
901 		return false;
902 
903 	event->team = team->id;
904 
905 	fHeader->size = fBufferSize;
906 
907 	return true;
908 }
909 
910 
911 bool
912 SystemProfiler::_TeamExec(Team* team)
913 {
914 	TeamLocker teamLocker(team);
915 
916 	size_t argsLen = strlen(team->Args());
917 
918 	InterruptsSpinLocker locker(fLock);
919 
920 	system_profiler_team_exec* event = (system_profiler_team_exec*)
921 		_AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen,
922 			B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0);
923 	if (event == NULL)
924 		return false;
925 
926 	event->team = team->id;
927 	strlcpy(event->thread_name, team->main_thread->name,
928 		sizeof(event->thread_name));
929 	strcpy(event->args, team->Args());
930 
931 	fHeader->size = fBufferSize;
932 
933 	return true;
934 }
935 
936 
937 bool
938 SystemProfiler::_ThreadAdded(Thread* thread)
939 {
940 	ThreadLocker threadLocker(thread);
941 	InterruptsSpinLocker locker(fLock);
942 
943 	// During the initial scan check whether the team is already gone again.
944 	// Later this cannot happen, since the team creator notifies us before
945 	// actually starting the thread.
946 	if (!fThreadNotificationsEnabled && !thread->IsAlive())
947 		return true;
948 
949 	if (thread->serial_number > fLastThreadAddedSerialNumber)
950 		fLastThreadAddedSerialNumber = thread->serial_number;
951 
952 	system_profiler_thread_added* event = (system_profiler_thread_added*)
953 		_AllocateBuffer(sizeof(system_profiler_thread_added),
954 			B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
955 	if (event == NULL)
956 		return false;
957 
958 	event->team = thread->team->id;
959 	event->thread = thread->id;
960 	strlcpy(event->name, thread->name, sizeof(event->name));
961 
962 	fHeader->size = fBufferSize;
963 
964 	return true;
965 }
966 
967 
968 bool
969 SystemProfiler::_ThreadRemoved(Thread* thread)
970 {
971 	// TODO: It is possible that we get remove notifications for threads that
972 	// had already been removed from the global thread list when we did the
973 	// initial scan, but were still in the process of dying. ATM it is not
974 	// really possible to identify such a case.
975 
976 	ThreadLocker threadLocker(thread);
977 	InterruptsSpinLocker locker(fLock);
978 
979 	system_profiler_thread_removed* event
980 		= (system_profiler_thread_removed*)
981 			_AllocateBuffer(sizeof(system_profiler_thread_removed),
982 				B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0);
983 	if (event == NULL)
984 		return false;
985 
986 	event->team = thread->team->id;
987 	event->thread = thread->id;
988 
989 	fHeader->size = fBufferSize;
990 
991 	return true;
992 }
993 
994 
995 bool
996 SystemProfiler::_ImageAdded(struct image* image)
997 {
998 	InterruptsSpinLocker locker(fLock);
999 
1000 	system_profiler_image_added* event = (system_profiler_image_added*)
1001 		_AllocateBuffer(sizeof(system_profiler_image_added),
1002 			B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0);
1003 	if (event == NULL)
1004 		return false;
1005 
1006 	event->team = image->team;
1007 	event->info = image->info;
1008 
1009 	fHeader->size = fBufferSize;
1010 
1011 	return true;
1012 }
1013 
1014 
1015 bool
1016 SystemProfiler::_ImageRemoved(struct image* image)
1017 {
1018 	InterruptsSpinLocker locker(fLock);
1019 
1020 	system_profiler_image_removed* event = (system_profiler_image_removed*)
1021 		_AllocateBuffer(sizeof(system_profiler_image_removed),
1022 			B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0);
1023 	if (event == NULL)
1024 		return false;
1025 
1026 	event->team = image->team;
1027 	event->image = image->info.id;
1028 
1029 	fHeader->size = fBufferSize;
1030 
1031 	return true;
1032 }
1033 
1034 
1035 bool
1036 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler)
1037 {
1038 	size_t nameLen = strlen(scheduler->Name());
1039 
1040 	InterruptsSpinLocker locker(fLock);
1041 
1042 	system_profiler_io_scheduler_added* event
1043 		= (system_profiler_io_scheduler_added*)_AllocateBuffer(
1044 			sizeof(system_profiler_io_scheduler_added) + nameLen,
1045 			B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0);
1046 	if (event == NULL)
1047 		return false;
1048 
1049 	event->scheduler = scheduler->ID();
1050 	strcpy(event->name, scheduler->Name());
1051 
1052 	fHeader->size = fBufferSize;
1053 
1054 	return true;
1055 }
1056 
1057 
1058 bool
1059 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler)
1060 {
1061 	InterruptsSpinLocker locker(fLock);
1062 
1063 	system_profiler_io_scheduler_removed* event
1064 		= (system_profiler_io_scheduler_removed*)_AllocateBuffer(
1065 			sizeof(system_profiler_io_scheduler_removed),
1066 			B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0);
1067 	if (event == NULL)
1068 		return false;
1069 
1070 	event->scheduler = scheduler->ID();
1071 
1072 	fHeader->size = fBufferSize;
1073 
1074 	return true;
1075 }
1076 
1077 
1078 bool
1079 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request)
1080 {
1081 	InterruptsSpinLocker locker(fLock);
1082 
1083 	system_profiler_io_request_scheduled* event
1084 		= (system_profiler_io_request_scheduled*)_AllocateBuffer(
1085 			sizeof(system_profiler_io_request_scheduled),
1086 			B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0);
1087 	if (event == NULL)
1088 		return false;
1089 
1090 	IORequestOwner* owner = request->Owner();
1091 
1092 	event->time = system_time_nsecs();
1093 	event->scheduler = scheduler->ID();
1094 	event->team = owner->team;
1095 	event->thread = owner->thread;
1096 	event->request = request;
1097 	event->offset = request->Offset();
1098 	event->length = request->Length();
1099 	event->write = request->IsWrite();
1100 	event->priority = owner->priority;
1101 
1102 	fHeader->size = fBufferSize;
1103 
1104 	return true;
1105 }
1106 
1107 
1108 bool
1109 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request)
1110 {
1111 	InterruptsSpinLocker locker(fLock);
1112 
1113 	system_profiler_io_request_finished* event
1114 		= (system_profiler_io_request_finished*)_AllocateBuffer(
1115 			sizeof(system_profiler_io_request_finished),
1116 			B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0);
1117 	if (event == NULL)
1118 		return false;
1119 
1120 	event->time = system_time_nsecs();
1121 	event->scheduler = scheduler->ID();
1122 	event->request = request;
1123 	event->status = request->Status();
1124 	event->transferred = request->TransferredBytes();
1125 
1126 	fHeader->size = fBufferSize;
1127 
1128 	return true;
1129 }
1130 
1131 
1132 bool
1133 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request,
1134 	IOOperation* operation)
1135 {
1136 	InterruptsSpinLocker locker(fLock);
1137 
1138 	system_profiler_io_operation_started* event
1139 		= (system_profiler_io_operation_started*)_AllocateBuffer(
1140 			sizeof(system_profiler_io_operation_started),
1141 			B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0);
1142 	if (event == NULL)
1143 		return false;
1144 
1145 	event->time = system_time_nsecs();
1146 	event->scheduler = scheduler->ID();
1147 	event->request = request;
1148 	event->operation = operation;
1149 	event->offset = request->Offset();
1150 	event->length = request->Length();
1151 	event->write = request->IsWrite();
1152 
1153 	fHeader->size = fBufferSize;
1154 
1155 	return true;
1156 }
1157 
1158 
1159 bool
1160 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request,
1161 	IOOperation* operation)
1162 {
1163 	InterruptsSpinLocker locker(fLock);
1164 
1165 	system_profiler_io_operation_finished* event
1166 		= (system_profiler_io_operation_finished*)_AllocateBuffer(
1167 			sizeof(system_profiler_io_operation_finished),
1168 			B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0);
1169 	if (event == NULL)
1170 		return false;
1171 
1172 	event->time = system_time_nsecs();
1173 	event->scheduler = scheduler->ID();
1174 	event->request = request;
1175 	event->operation = operation;
1176 	event->status = request->Status();
1177 	event->transferred = request->TransferredBytes();
1178 
1179 	fHeader->size = fBufferSize;
1180 
1181 	return true;
1182 }
1183 
1184 
1185 void
1186 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type)
1187 {
1188 	SpinLocker locker(fLock);
1189 
1190 	// look up the object
1191 	WaitObjectKey key;
1192 	key.object = object;
1193 	key.type = type;
1194 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1195 
1196 	// If found, remove it and add it to the free list. This might sound weird,
1197 	// but it makes sense, since we lazily track *used* wait objects only.
1198 	// I.e. the object in the table is now guaranteedly obsolete.
1199 	if (waitObject) {
1200 		fWaitObjectTable.RemoveUnchecked(waitObject);
1201 		fUsedWaitObjects.Remove(waitObject);
1202 		fFreeWaitObjects.Add(waitObject, false);
1203 	}
1204 }
1205 
1206 void
1207 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
1208 {
1209 	// look up the object
1210 	WaitObjectKey key;
1211 	key.object = object;
1212 	key.type = type;
1213 	WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1214 
1215 	// If already known, re-queue it as most recently used and be done.
1216 	if (waitObject != NULL) {
1217 		fUsedWaitObjects.Remove(waitObject);
1218 		fUsedWaitObjects.Add(waitObject);
1219 		return;
1220 	}
1221 
1222 	// not known yet -- get the info
1223 	const char* name = NULL;
1224 	const void* referencedObject = NULL;
1225 
1226 	switch (type) {
1227 		case THREAD_BLOCK_TYPE_SEMAPHORE:
1228 		{
1229 			name = sem_get_name_unsafe((sem_id)object);
1230 			break;
1231 		}
1232 
1233 		case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1234 		{
1235 			ConditionVariable* variable = (ConditionVariable*)object;
1236 			name = variable->ObjectType();
1237 			referencedObject = variable->Object();
1238 			break;
1239 		}
1240 
1241 		case THREAD_BLOCK_TYPE_MUTEX:
1242 		{
1243 			mutex* lock = (mutex*)object;
1244 			name = lock->name;
1245 			break;
1246 		}
1247 
1248 		case THREAD_BLOCK_TYPE_RW_LOCK:
1249 		{
1250 			rw_lock* lock = (rw_lock*)object;
1251 			name = lock->name;
1252 			break;
1253 		}
1254 
1255 		case THREAD_BLOCK_TYPE_OTHER:
1256 		{
1257 			name = (const char*)(void*)object;
1258 			break;
1259 		}
1260 
1261 		case THREAD_BLOCK_TYPE_SNOOZE:
1262 		case THREAD_BLOCK_TYPE_SIGNAL:
1263 		default:
1264 			return;
1265 	}
1266 
1267 	// add the event
1268 	size_t nameLen = name != NULL ? strlen(name) : 0;
1269 
1270 	system_profiler_wait_object_info* event
1271 		= (system_profiler_wait_object_info*)
1272 			_AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen,
1273 				B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0);
1274 	if (event == NULL)
1275 		return;
1276 
1277 	event->type = type;
1278 	event->object = object;
1279 	event->referenced_object = (addr_t)referencedObject;
1280 	if (name != NULL)
1281 		strcpy(event->name, name);
1282 	else
1283 		event->name[0] = '\0';
1284 
1285 	fHeader->size = fBufferSize;
1286 
1287 	// add the wait object
1288 
1289 	// get a free one or steal the least recently used one
1290 	waitObject = fFreeWaitObjects.RemoveHead();
1291 	if (waitObject == NULL) {
1292 		waitObject = fUsedWaitObjects.RemoveHead();
1293 		fWaitObjectTable.RemoveUnchecked(waitObject);
1294 	}
1295 
1296 	waitObject->object = object;
1297 	waitObject->type = type;
1298 	fWaitObjectTable.InsertUnchecked(waitObject);
1299 	fUsedWaitObjects.Add(waitObject);
1300 }
1301 
1302 
1303 /*static*/ bool
1304 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
1305 {
1306 	SystemProfiler* self = (SystemProfiler*)cookie;
1307 	self->fImageNotificationsEnabled = true;
1308 		// Set that here, since the image lock is being held now.
1309 	return !self->_ImageAdded(image);
1310 }
1311 
1312 
1313 void*
1314 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count)
1315 {
1316 	size = (size + 3) / 4 * 4;
1317 	size += sizeof(system_profiler_event_header);
1318 
1319 	size_t end = fBufferStart + fBufferSize;
1320 	if (end + size > fBufferCapacity) {
1321 		// Buffer is wrapped or needs wrapping.
1322 		if (end < fBufferCapacity) {
1323 			// not wrapped yet, but needed
1324 			system_profiler_event_header* header
1325 				= (system_profiler_event_header*)(fBufferBase + end);
1326 			header->event = B_SYSTEM_PROFILER_BUFFER_END;
1327 			fBufferSize = fBufferCapacity - fBufferStart;
1328 			end = 0;
1329 		} else
1330 			end -= fBufferCapacity;
1331 
1332 		if (end + size > fBufferStart) {
1333 			fDroppedEvents++;
1334 			return NULL;
1335 		}
1336 	}
1337 
1338 	system_profiler_event_header* header
1339 		= (system_profiler_event_header*)(fBufferBase + end);
1340 	header->event = event;
1341 	header->cpu = cpu;
1342 	header->size = size - sizeof(system_profiler_event_header);
1343 
1344 	fBufferSize += size;
1345 
1346 	return header + 1;
1347 }
1348 
1349 
1350 /*static*/ void
1351 SystemProfiler::_InitTimers(void* cookie, int cpu)
1352 {
1353 	SystemProfiler* self = (SystemProfiler*)cookie;
1354 	self->_ScheduleTimer(cpu);
1355 }
1356 
1357 
1358 /*static*/ void
1359 SystemProfiler::_UninitTimers(void* cookie, int cpu)
1360 {
1361 	SystemProfiler* self = (SystemProfiler*)cookie;
1362 
1363 	CPUProfileData& cpuData = self->fCPUData[cpu];
1364 	cancel_timer(&cpuData.timer);
1365 	cpuData.timerScheduled = false;
1366 }
1367 
1368 
1369 void
1370 SystemProfiler::_ScheduleTimer(int cpu)
1371 {
1372 	CPUProfileData& cpuData = fCPUData[cpu];
1373 	cpuData.timerEnd = system_time() + fInterval;
1374 	cpuData.timer.user_data = this;
1375 	add_timer(&cpuData.timer, &_ProfilingEvent, fInterval,
1376 		B_ONE_SHOT_RELATIVE_TIMER);
1377 	cpuData.timerScheduled = true;
1378 }
1379 
1380 
1381 void
1382 SystemProfiler::_DoSample()
1383 {
1384 	Thread* thread = thread_get_current_thread();
1385 	int cpu = thread->cpu->cpu_num;
1386 	CPUProfileData& cpuData = fCPUData[cpu];
1387 
1388 	// get the samples
1389 	int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1,
1390 		0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1391 
1392 	InterruptsSpinLocker locker(fLock);
1393 
1394 	system_profiler_samples* event = (system_profiler_samples*)
1395 		_AllocateBuffer(sizeof(system_profiler_samples)
1396 				+ count * sizeof(addr_t),
1397 			B_SYSTEM_PROFILER_SAMPLES, cpu, count);
1398 	if (event == NULL)
1399 		return;
1400 
1401 	event->thread = thread->id;
1402 	memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t));
1403 
1404 	fHeader->size = fBufferSize;
1405 }
1406 
1407 
1408 /*static*/ int32
1409 SystemProfiler::_ProfilingEvent(struct timer* timer)
1410 {
1411 	SystemProfiler* self = (SystemProfiler*)timer->user_data;
1412 
1413 	self->_DoSample();
1414 	self->_ScheduleTimer(timer->cpu);
1415 
1416 	return B_HANDLED_INTERRUPT;
1417 }
1418 
1419 
1420 // #pragma mark - private kernel API
1421 
1422 
1423 status_t
1424 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval)
1425 {
1426 	struct ParameterDeleter {
1427 		ParameterDeleter(area_id area)
1428 			:
1429 			fArea(area),
1430 			fDetached(false)
1431 		{
1432 		}
1433 
1434 		~ParameterDeleter()
1435 		{
1436 			if (!fDetached) {
1437 				delete_area(fArea);
1438 				delete sRecordedParameters;
1439 				sRecordedParameters = NULL;
1440 			}
1441 		}
1442 
1443 		void Detach()
1444 		{
1445 			fDetached = true;
1446 		}
1447 
1448 	private:
1449 		area_id	fArea;
1450 		bool	fDetached;
1451 	};
1452 
1453 	void* address;
1454 	area_id area = create_area("kernel profile data", &address,
1455 		B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK,
1456 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1457 	if (area < 0)
1458 		return area;
1459 
1460 	ParameterDeleter parameterDeleter(area);
1461 
1462 	sRecordedParameters = new(std::nothrow) system_profiler_parameters;
1463 	if (sRecordedParameters == NULL)
1464 		return B_NO_MEMORY;
1465 
1466 	sRecordedParameters->buffer_area = area;
1467 	sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS
1468 		| B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS
1469 		| B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
1470 		| B_SYSTEM_PROFILER_SAMPLING_EVENTS;
1471 	sRecordedParameters->locking_lookup_size = 4096;
1472 	sRecordedParameters->interval = interval;
1473 	sRecordedParameters->stack_depth = stackDepth;
1474 
1475 	area_info areaInfo;
1476 	get_area_info(area, &areaInfo);
1477 
1478 	// initialize the profiler
1479 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM,
1480 		areaInfo, *sRecordedParameters);
1481 	if (profiler == NULL)
1482 		return B_NO_MEMORY;
1483 
1484 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1485 
1486 	status_t error = profiler->Init();
1487 	if (error != B_OK)
1488 		return error;
1489 
1490 	// set the new profiler
1491 	InterruptsSpinLocker locker(sProfilerLock);
1492 	if (sProfiler != NULL)
1493 		return B_BUSY;
1494 
1495 	parameterDeleter.Detach();
1496 	profilerDeleter.Detach();
1497 	sProfiler = profiler;
1498 	locker.Unlock();
1499 
1500 	return B_OK;
1501 }
1502 
1503 
1504 void
1505 stop_system_profiler()
1506 {
1507 	InterruptsSpinLocker locker(sProfilerLock);
1508 	if (sProfiler == NULL)
1509 		return;
1510 
1511 	SystemProfiler* profiler = sProfiler;
1512 	sProfiler = NULL;
1513 	locker.Unlock();
1514 
1515 	profiler->ReleaseReference();
1516 }
1517 
1518 
1519 // #pragma mark - syscalls
1520 
1521 
1522 status_t
1523 _user_system_profiler_start(struct system_profiler_parameters* userParameters)
1524 {
1525 	// copy params to the kernel
1526 	struct system_profiler_parameters parameters;
1527 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)
1528 		|| user_memcpy(&parameters, userParameters, sizeof(parameters))
1529 			!= B_OK) {
1530 		return B_BAD_ADDRESS;
1531 	}
1532 
1533 	// check the parameters
1534 	team_id team = thread_get_current_thread()->team->id;
1535 
1536 	area_info areaInfo;
1537 	status_t error = get_area_info(parameters.buffer_area, &areaInfo);
1538 	if (error != B_OK)
1539 		return error;
1540 
1541 	if (areaInfo.team != team)
1542 		return B_BAD_VALUE;
1543 
1544 	if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) {
1545 		if (parameters.stack_depth < 1)
1546 			return B_BAD_VALUE;
1547 
1548 		if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL)
1549 			parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL;
1550 
1551 		if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH)
1552 			parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH;
1553 	}
1554 
1555 	// quick check to see whether we do already have a profiler installed
1556 	InterruptsSpinLocker locker(sProfilerLock);
1557 	if (sProfiler != NULL)
1558 		return B_BUSY;
1559 	locker.Unlock();
1560 
1561 	// initialize the profiler
1562 	SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo,
1563 		parameters);
1564 	if (profiler == NULL)
1565 		return B_NO_MEMORY;
1566 	ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1567 
1568 	error = profiler->Init();
1569 	if (error != B_OK)
1570 		return error;
1571 
1572 	// set the new profiler
1573 	locker.Lock();
1574 	if (sProfiler != NULL)
1575 		return B_BUSY;
1576 
1577 	profilerDeleter.Detach();
1578 	sProfiler = profiler;
1579 	locker.Unlock();
1580 
1581 	return B_OK;
1582 }
1583 
1584 
1585 status_t
1586 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents)
1587 {
1588 	if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents))
1589 		return B_BAD_ADDRESS;
1590 
1591 	team_id team = thread_get_current_thread()->team->id;
1592 
1593 	InterruptsSpinLocker locker(sProfilerLock);
1594 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1595 		return B_BAD_VALUE;
1596 
1597 	// get a reference to the profiler
1598 	SystemProfiler* profiler = sProfiler;
1599 	BReference<SystemProfiler> reference(profiler);
1600 	locker.Unlock();
1601 
1602 	uint64 droppedEvents;
1603 	status_t error = profiler->NextBuffer(bytesRead,
1604 		_droppedEvents != NULL ? &droppedEvents : NULL);
1605 	if (error == B_OK && _droppedEvents != NULL)
1606 		user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents));
1607 
1608 	return error;
1609 }
1610 
1611 
1612 status_t
1613 _user_system_profiler_stop()
1614 {
1615 	team_id team = thread_get_current_thread()->team->id;
1616 
1617 	InterruptsSpinLocker locker(sProfilerLock);
1618 	if (sProfiler == NULL || sProfiler->TeamID() != team)
1619 		return B_BAD_VALUE;
1620 
1621 	SystemProfiler* profiler = sProfiler;
1622 	sProfiler = NULL;
1623 	locker.Unlock();
1624 
1625 	profiler->ReleaseReference();
1626 
1627 	return B_OK;
1628 }
1629 
1630 
1631 status_t
1632 _user_system_profiler_recorded(struct system_profiler_parameters* userParameters)
1633 {
1634 	if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
1635 		return B_BAD_ADDRESS;
1636 	if (sRecordedParameters == NULL)
1637 		return B_ERROR;
1638 
1639 	// Transfer the area to the userland process
1640 
1641 	void* address;
1642 	area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
1643 		B_ANY_ADDRESS, team_get_current_team_id(), true);
1644 	if (newArea < 0)
1645 		return newArea;
1646 
1647 	status_t status = set_area_protection(newArea, B_READ_AREA);
1648 	if (status == B_OK) {
1649 		sRecordedParameters->buffer_area = newArea;
1650 
1651 		status = user_memcpy(userParameters, sRecordedParameters,
1652 			sizeof(system_profiler_parameters));
1653 	}
1654 	if (status != B_OK)
1655 		delete_area(newArea);
1656 
1657 	delete sRecordedParameters;
1658 	sRecordedParameters = NULL;
1659 
1660 	return status;
1661 }
1662