xref: /haiku/src/system/kernel/scheduler/scheduling_analysis.cpp (revision e661df29804f2703a65e23f5789c3c87c0915298)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 #include <scheduling_analysis.h>
7 
8 #include <elf.h>
9 #include <kernel.h>
10 #include <scheduler_defs.h>
11 #include <tracing.h>
12 #include <util/AutoLock.h>
13 
14 #include "scheduler_tracing.h"
15 
16 
17 #if SCHEDULER_TRACING
18 
19 namespace SchedulingAnalysis {
20 
21 using namespace SchedulerTracing;
22 
23 #if SCHEDULING_ANALYSIS_TRACING
24 using namespace SchedulingAnalysisTracing;
25 #endif
26 
27 struct ThreadWaitObject;
28 
29 struct HashObjectKey {
30 	virtual ~HashObjectKey()
31 	{
32 	}
33 
34 	virtual uint32 HashKey() const = 0;
35 };
36 
37 
38 struct HashObject {
39 	HashObject*	next;
40 
41 	virtual ~HashObject()
42 	{
43 	}
44 
45 	virtual uint32 HashKey() const = 0;
46 	virtual bool Equals(const HashObjectKey* key) const = 0;
47 };
48 
49 
50 struct ThreadKey : HashObjectKey {
51 	thread_id	id;
52 
53 	ThreadKey(thread_id id)
54 		:
55 		id(id)
56 	{
57 	}
58 
59 	virtual uint32 HashKey() const
60 	{
61 		return id;
62 	}
63 };
64 
65 
66 struct Thread : HashObject, scheduling_analysis_thread {
67 	ScheduleState state;
68 	bigtime_t lastTime;
69 
70 	ThreadWaitObject* waitObject;
71 
72 	Thread(thread_id id)
73 		:
74 		state(UNKNOWN),
75 		lastTime(0),
76 
77 		waitObject(NULL)
78 	{
79 		this->id = id;
80 		name[0] = '\0';
81 
82 		runs = 0;
83 		total_run_time = 0;
84 		min_run_time = 1;
85 		max_run_time = -1;
86 
87 		latencies = 0;
88 		total_latency = 0;
89 		min_latency = -1;
90 		max_latency = -1;
91 
92 		reruns = 0;
93 		total_rerun_time = 0;
94 		min_rerun_time = -1;
95 		max_rerun_time = -1;
96 
97 		unspecified_wait_time = 0;
98 
99 		preemptions = 0;
100 
101 		wait_objects = NULL;
102 	}
103 
104 	virtual uint32 HashKey() const
105 	{
106 		return id;
107 	}
108 
109 	virtual bool Equals(const HashObjectKey* _key) const
110 	{
111 		const ThreadKey* key = dynamic_cast<const ThreadKey*>(_key);
112 		if (key == NULL)
113 			return false;
114 		return key->id == id;
115 	}
116 };
117 
118 
119 struct WaitObjectKey : HashObjectKey {
120 	uint32	type;
121 	void*	object;
122 
123 	WaitObjectKey(uint32 type, void* object)
124 		:
125 		type(type),
126 		object(object)
127 	{
128 	}
129 
130 	virtual uint32 HashKey() const
131 	{
132 		return type ^ (uint32)(addr_t)object;
133 	}
134 };
135 
136 
137 struct WaitObject : HashObject, scheduling_analysis_wait_object {
138 	WaitObject(uint32 type, void* object)
139 	{
140 		this->type = type;
141 		this->object = object;
142 		name[0] = '\0';
143 		referenced_object = NULL;
144 	}
145 
146 	virtual uint32 HashKey() const
147 	{
148 		return type ^ (uint32)(addr_t)object;
149 	}
150 
151 	virtual bool Equals(const HashObjectKey* _key) const
152 	{
153 		const WaitObjectKey* key = dynamic_cast<const WaitObjectKey*>(_key);
154 		if (key == NULL)
155 			return false;
156 		return key->type == type && key->object == object;
157 	}
158 };
159 
160 
161 struct ThreadWaitObjectKey : HashObjectKey {
162 	thread_id				thread;
163 	uint32					type;
164 	void*					object;
165 
166 	ThreadWaitObjectKey(thread_id thread, uint32 type, void* object)
167 		:
168 		thread(thread),
169 		type(type),
170 		object(object)
171 	{
172 	}
173 
174 	virtual uint32 HashKey() const
175 	{
176 		return thread ^ type ^ (uint32)(addr_t)object;
177 	}
178 };
179 
180 
181 struct ThreadWaitObject : HashObject, scheduling_analysis_thread_wait_object {
182 	ThreadWaitObject(thread_id thread, WaitObject* waitObject)
183 	{
184 		this->thread = thread;
185 		wait_object = waitObject;
186 		wait_time = 0;
187 		waits = 0;
188 		next_in_list = NULL;
189 	}
190 
191 	virtual uint32 HashKey() const
192 	{
193 		return thread ^ wait_object->type ^ (uint32)(addr_t)wait_object->object;
194 	}
195 
196 	virtual bool Equals(const HashObjectKey* _key) const
197 	{
198 		const ThreadWaitObjectKey* key
199 			= dynamic_cast<const ThreadWaitObjectKey*>(_key);
200 		if (key == NULL)
201 			return false;
202 		return key->thread == thread && key->type == wait_object->type
203 			&& key->object == wait_object->object;
204 	}
205 };
206 
207 
208 class SchedulingAnalysisManager {
209 public:
210 	SchedulingAnalysisManager(void* buffer, size_t size)
211 		:
212 		fBuffer(buffer),
213 		fSize(size),
214 		fHashTable(),
215 		fHashTableSize(0)
216 	{
217 		fAnalysis.thread_count = 0;
218 		fAnalysis.threads = 0;
219 		fAnalysis.wait_object_count = 0;
220 		fAnalysis.thread_wait_object_count = 0;
221 
222 		size_t maxObjectSize = max_c(max_c(sizeof(Thread), sizeof(WaitObject)),
223 			sizeof(ThreadWaitObject));
224 		fHashTableSize = size / (maxObjectSize + sizeof(HashObject*));
225 		fHashTable = (HashObject**)((uint8*)fBuffer + fSize) - fHashTableSize;
226 		fNextAllocation = (uint8*)fBuffer;
227 		fRemainingBytes = (addr_t)fHashTable - (addr_t)fBuffer;
228 
229 		image_info info;
230 		if (elf_get_image_info_for_address((addr_t)&scheduler_init, &info)
231 				== B_OK) {
232 			fKernelStart = (addr_t)info.text;
233 			fKernelEnd = (addr_t)info.data + info.data_size;
234 		} else {
235 			fKernelStart = 0;
236 			fKernelEnd = 0;
237 		}
238 	}
239 
240 	const scheduling_analysis* Analysis() const
241 	{
242 		return &fAnalysis;
243 	}
244 
245 	void* Allocate(size_t size)
246 	{
247 		size = (size + 7) & ~(size_t)7;
248 
249 		if (size > fRemainingBytes)
250 			return NULL;
251 
252 		void* address = fNextAllocation;
253 		fNextAllocation += size;
254 		fRemainingBytes -= size;
255 		return address;
256 	}
257 
258 	void Insert(HashObject* object)
259 	{
260 		uint32 index = object->HashKey() % fHashTableSize;
261 		object->next = fHashTable[index];
262 		fHashTable[index] = object;
263 	}
264 
265 	void Remove(HashObject* object)
266 	{
267 		uint32 index = object->HashKey() % fHashTableSize;
268 		HashObject** slot = &fHashTable[index];
269 		while (*slot != object)
270 			slot = &(*slot)->next;
271 
272 		*slot = object->next;
273 	}
274 
275 	HashObject* Lookup(const HashObjectKey& key) const
276 	{
277 		uint32 index = key.HashKey() % fHashTableSize;
278 		HashObject* object = fHashTable[index];
279 		while (object != NULL && !object->Equals(&key))
280 			object = object->next;
281 		return object;
282 	}
283 
284 	Thread* ThreadFor(thread_id id) const
285 	{
286 		return dynamic_cast<Thread*>(Lookup(ThreadKey(id)));
287 	}
288 
289 	WaitObject* WaitObjectFor(uint32 type, void* object) const
290 	{
291 		return dynamic_cast<WaitObject*>(Lookup(WaitObjectKey(type, object)));
292 	}
293 
294 	ThreadWaitObject* ThreadWaitObjectFor(thread_id thread, uint32 type,
295 		void* object) const
296 	{
297 		return dynamic_cast<ThreadWaitObject*>(
298 			Lookup(ThreadWaitObjectKey(thread, type, object)));
299 	}
300 
301 	status_t AddThread(thread_id id, const char* name)
302 	{
303 		Thread* thread = ThreadFor(id);
304 		if (thread == NULL) {
305 			void* memory = Allocate(sizeof(Thread));
306 			if (memory == NULL)
307 				return B_NO_MEMORY;
308 
309 			thread = new(memory) Thread(id);
310 			Insert(thread);
311 			fAnalysis.thread_count++;
312 		}
313 
314 		if (name != NULL && thread->name[0] == '\0')
315 			strlcpy(thread->name, name, sizeof(thread->name));
316 
317 		return B_OK;
318 	}
319 
320 	status_t AddWaitObject(uint32 type, void* object,
321 		WaitObject** _waitObject = NULL)
322 	{
323 		if (WaitObjectFor(type, object) != NULL)
324 			return B_OK;
325 
326 		void* memory = Allocate(sizeof(WaitObject));
327 		if (memory == NULL)
328 			return B_NO_MEMORY;
329 
330 		WaitObject* waitObject = new(memory) WaitObject(type, object);
331 		Insert(waitObject);
332 		fAnalysis.wait_object_count++;
333 
334 		// Set a dummy name for snooze() and waiting for signals, so we don't
335 		// try to update them later on.
336 		if (type == THREAD_BLOCK_TYPE_SNOOZE
337 			|| type == THREAD_BLOCK_TYPE_SIGNAL) {
338 			strcpy(waitObject->name, "?");
339 		}
340 
341 		if (_waitObject != NULL)
342 			*_waitObject = waitObject;
343 
344 		return B_OK;
345 	}
346 
347 	status_t UpdateWaitObject(uint32 type, void* object, const char* name,
348 		void* referencedObject)
349 	{
350 		WaitObject* waitObject = WaitObjectFor(type, object);
351 		if (waitObject == NULL)
352 			return B_OK;
353 
354 		if (waitObject->name[0] != '\0') {
355 			// This is a new object at the same address. Replace the old one.
356 			Remove(waitObject);
357 			status_t error = AddWaitObject(type, object, &waitObject);
358 			if (error != B_OK)
359 				return error;
360 		}
361 
362 		if (name == NULL)
363 			name = "?";
364 
365 		strlcpy(waitObject->name, name, sizeof(waitObject->name));
366 		waitObject->referenced_object = referencedObject;
367 
368 		return B_OK;
369 	}
370 
371 	bool UpdateWaitObjectDontAdd(uint32 type, void* object, const char* name,
372 		void* referencedObject)
373 	{
374 		WaitObject* waitObject = WaitObjectFor(type, object);
375 		if (waitObject == NULL || waitObject->name[0] != '\0')
376 			return false;
377 
378 		if (name == NULL)
379 			name = "?";
380 
381 		strlcpy(waitObject->name, name, sizeof(waitObject->name));
382 		waitObject->referenced_object = referencedObject;
383 
384 		return B_OK;
385 	}
386 
387 	status_t AddThreadWaitObject(Thread* thread, uint32 type, void* object)
388 	{
389 		WaitObject* waitObject = WaitObjectFor(type, object);
390 		if (waitObject == NULL) {
391 			// The algorithm should prevent this case.
392 			return B_ERROR;
393 		}
394 
395 		ThreadWaitObject* threadWaitObject = ThreadWaitObjectFor(thread->id,
396 			type, object);
397 		if (threadWaitObject == NULL
398 			|| threadWaitObject->wait_object != waitObject) {
399 			if (threadWaitObject != NULL)
400 				Remove(threadWaitObject);
401 
402 			void* memory = Allocate(sizeof(ThreadWaitObject));
403 			if (memory == NULL)
404 				return B_NO_MEMORY;
405 
406 			threadWaitObject = new(memory) ThreadWaitObject(thread->id,
407 				waitObject);
408 			Insert(threadWaitObject);
409 			fAnalysis.thread_wait_object_count++;
410 
411 			threadWaitObject->next_in_list = thread->wait_objects;
412 			thread->wait_objects = threadWaitObject;
413 		}
414 
415 		thread->waitObject = threadWaitObject;
416 
417 		return B_OK;
418 	}
419 
420 	int32 MissingWaitObjects() const
421 	{
422 		// Iterate through the hash table and count the wait objects that don't
423 		// have a name yet.
424 		int32 count = 0;
425 		for (uint32 i = 0; i < fHashTableSize; i++) {
426 			HashObject* object = fHashTable[i];
427 			while (object != NULL) {
428 				WaitObject* waitObject = dynamic_cast<WaitObject*>(object);
429 				if (waitObject != NULL && waitObject->name[0] == '\0')
430 					count++;
431 
432 				object = object->next;
433 			}
434 		}
435 
436 		return count;
437 	}
438 
439 	status_t FinishAnalysis()
440 	{
441 		// allocate the thread array
442 		scheduling_analysis_thread** threads
443 			= (scheduling_analysis_thread**)Allocate(
444 				sizeof(Thread*) * fAnalysis.thread_count);
445 		if (threads == NULL)
446 			return B_NO_MEMORY;
447 
448 		// Iterate through the hash table and collect all threads. Also polish
449 		// all wait objects that haven't been update yet.
450 		int32 index = 0;
451 		for (uint32 i = 0; i < fHashTableSize; i++) {
452 			HashObject* object = fHashTable[i];
453 			while (object != NULL) {
454 				Thread* thread = dynamic_cast<Thread*>(object);
455 				if (thread != NULL) {
456 					threads[index++] = thread;
457 				} else if (WaitObject* waitObject
458 						= dynamic_cast<WaitObject*>(object)) {
459 					_PolishWaitObject(waitObject);
460 				}
461 
462 				object = object->next;
463 			}
464 		}
465 
466 		fAnalysis.threads = threads;
467 dprintf("scheduling analysis: free bytes: %lu/%lu\n", fRemainingBytes, fSize);
468 		return B_OK;
469 	}
470 
471 private:
472 	void _PolishWaitObject(WaitObject* waitObject)
473 	{
474 		if (waitObject->name[0] != '\0')
475 			return;
476 
477 		switch (waitObject->type) {
478 			case THREAD_BLOCK_TYPE_SEMAPHORE:
479 			{
480 				sem_info info;
481 				if (get_sem_info((sem_id)(addr_t)waitObject->object, &info)
482 						== B_OK) {
483 					strlcpy(waitObject->name, info.name,
484 						sizeof(waitObject->name));
485 				}
486 				break;
487 			}
488 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
489 			{
490 				// If the condition variable object is in the kernel image,
491 				// assume, it is still initialized.
492 				ConditionVariable* variable
493 					= (ConditionVariable*)waitObject->object;
494 				if (!_IsInKernelImage(variable))
495 					break;
496 
497 				waitObject->referenced_object = (void*)variable->Object();
498 				strlcpy(waitObject->name, variable->ObjectType(),
499 					sizeof(waitObject->name));
500 				break;
501 			}
502 
503 			case THREAD_BLOCK_TYPE_MUTEX:
504 			{
505 				// If the mutex object is in the kernel image, assume, it is
506 				// still initialized.
507 				mutex* lock = (mutex*)waitObject->object;
508 				if (!_IsInKernelImage(lock))
509 					break;
510 
511 				strlcpy(waitObject->name, lock->name, sizeof(waitObject->name));
512 				break;
513 			}
514 
515 			case THREAD_BLOCK_TYPE_RW_LOCK:
516 			{
517 				// If the mutex object is in the kernel image, assume, it is
518 				// still initialized.
519 				rw_lock* lock = (rw_lock*)waitObject->object;
520 				if (!_IsInKernelImage(lock))
521 					break;
522 
523 				strlcpy(waitObject->name, lock->name, sizeof(waitObject->name));
524 				break;
525 			}
526 
527 			case THREAD_BLOCK_TYPE_OTHER:
528 			{
529 				const char* name = (const char*)waitObject->object;
530 				if (name == NULL || _IsInKernelImage(name))
531 					return;
532 
533 				strlcpy(waitObject->name, name, sizeof(waitObject->name));
534 			}
535 
536 			case THREAD_BLOCK_TYPE_SNOOZE:
537 			case THREAD_BLOCK_TYPE_SIGNAL:
538 			default:
539 				break;
540 		}
541 
542 		if (waitObject->name[0] != '\0')
543 			return;
544 
545 		strcpy(waitObject->name, "?");
546 	}
547 
548 	bool _IsInKernelImage(const void* _address)
549 	{
550 		addr_t address = (addr_t)_address;
551 		return address >= fKernelStart && address < fKernelEnd;
552 	}
553 
554 private:
555 	scheduling_analysis	fAnalysis;
556 	void*				fBuffer;
557 	size_t				fSize;
558 	HashObject**		fHashTable;
559 	uint32				fHashTableSize;
560 	uint8*				fNextAllocation;
561 	size_t				fRemainingBytes;
562 	addr_t				fKernelStart;
563 	addr_t				fKernelEnd;
564 };
565 
566 
567 static status_t
568 analyze_scheduling(bigtime_t from, bigtime_t until,
569 	SchedulingAnalysisManager& manager)
570 {
571 	// analyze how much threads and locking primitives we're talking about
572 	TraceEntryIterator iterator;
573 	iterator.MoveTo(INT_MAX);
574 	while (TraceEntry* _entry = iterator.Previous()) {
575 		SchedulerTraceEntry* baseEntry
576 			= dynamic_cast<SchedulerTraceEntry*>(_entry);
577 		if (baseEntry == NULL || baseEntry->Time() >= until)
578 			continue;
579 		if (baseEntry->Time() < from)
580 			break;
581 
582 		status_t error = manager.AddThread(baseEntry->ThreadID(),
583 			baseEntry->Name());
584 		if (error != B_OK)
585 			return error;
586 
587 		if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
588 			error = manager.AddThread(entry->PreviousThreadID(), NULL);
589 			if (error != B_OK)
590 				return error;
591 
592 			if (entry->PreviousState() == B_THREAD_WAITING) {
593 				void* waitObject = (void*)entry->PreviousWaitObject();
594 				switch (entry->PreviousWaitObjectType()) {
595 					case THREAD_BLOCK_TYPE_SNOOZE:
596 					case THREAD_BLOCK_TYPE_SIGNAL:
597 						waitObject = NULL;
598 						break;
599 					case THREAD_BLOCK_TYPE_SEMAPHORE:
600 					case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
601 					case THREAD_BLOCK_TYPE_MUTEX:
602 					case THREAD_BLOCK_TYPE_RW_LOCK:
603 					case THREAD_BLOCK_TYPE_OTHER:
604 					default:
605 						break;
606 				}
607 
608 				error = manager.AddWaitObject(entry->PreviousWaitObjectType(),
609 					waitObject);
610 				if (error != B_OK)
611 					return error;
612 			}
613 		}
614 	}
615 
616 #if SCHEDULING_ANALYSIS_TRACING
617 	int32 startEntryIndex = iterator.Index();
618 #endif
619 
620 	while (TraceEntry* _entry = iterator.Next()) {
621 #if SCHEDULING_ANALYSIS_TRACING
622 		// might be info on a wait object
623 		if (WaitObjectTraceEntry* waitObjectEntry
624 				= dynamic_cast<WaitObjectTraceEntry*>(_entry)) {
625 			status_t error = manager.UpdateWaitObject(waitObjectEntry->Type(),
626 				waitObjectEntry->Object(), waitObjectEntry->Name(),
627 				waitObjectEntry->ReferencedObject());
628 			if (error != B_OK)
629 				return error;
630 			continue;
631 		}
632 #endif
633 
634 		SchedulerTraceEntry* baseEntry
635 			= dynamic_cast<SchedulerTraceEntry*>(_entry);
636 		if (baseEntry == NULL)
637 			continue;
638 		if (baseEntry->Time() >= until)
639 			break;
640 
641 		if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
642 			// scheduled thread
643 			Thread* thread = manager.ThreadFor(entry->ThreadID());
644 
645 			bigtime_t diffTime = entry->Time() - thread->lastTime;
646 
647 			if (thread->state == READY) {
648 				// thread scheduled after having been woken up
649 				thread->latencies++;
650 				thread->total_latency += diffTime;
651 				if (thread->min_latency < 0 || diffTime < thread->min_latency)
652 					thread->min_latency = diffTime;
653 				if (diffTime > thread->max_latency)
654 					thread->max_latency = diffTime;
655 			} else if (thread->state == PREEMPTED) {
656 				// thread scheduled after having been preempted before
657 				thread->reruns++;
658 				thread->total_rerun_time += diffTime;
659 				if (thread->min_rerun_time < 0
660 						|| diffTime < thread->min_rerun_time) {
661 					thread->min_rerun_time = diffTime;
662 				}
663 				if (diffTime > thread->max_rerun_time)
664 					thread->max_rerun_time = diffTime;
665 			}
666 
667 			if (thread->state == STILL_RUNNING) {
668 				// Thread was running and continues to run.
669 				thread->state = RUNNING;
670 			}
671 
672 			if (thread->state != RUNNING) {
673 				thread->lastTime = entry->Time();
674 				thread->state = RUNNING;
675 			}
676 
677 			// unscheduled thread
678 
679 			if (entry->ThreadID() == entry->PreviousThreadID())
680 				continue;
681 
682 			thread = manager.ThreadFor(entry->PreviousThreadID());
683 
684 			diffTime = entry->Time() - thread->lastTime;
685 
686 			if (thread->state == STILL_RUNNING) {
687 				// thread preempted
688 				thread->runs++;
689 				thread->preemptions++;
690 				thread->total_run_time += diffTime;
691 				if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
692 					thread->min_run_time = diffTime;
693 				if (diffTime > thread->max_run_time)
694 					thread->max_run_time = diffTime;
695 
696 				thread->lastTime = entry->Time();
697 				thread->state = PREEMPTED;
698 			} else if (thread->state == RUNNING) {
699 				// thread starts waiting (it hadn't been added to the run
700 				// queue before being unscheduled)
701 				thread->runs++;
702 				thread->total_run_time += diffTime;
703 				if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
704 					thread->min_run_time = diffTime;
705 				if (diffTime > thread->max_run_time)
706 					thread->max_run_time = diffTime;
707 
708 				if (entry->PreviousState() == B_THREAD_WAITING) {
709 					void* waitObject = (void*)entry->PreviousWaitObject();
710 					switch (entry->PreviousWaitObjectType()) {
711 						case THREAD_BLOCK_TYPE_SNOOZE:
712 						case THREAD_BLOCK_TYPE_SIGNAL:
713 							waitObject = NULL;
714 							break;
715 						case THREAD_BLOCK_TYPE_SEMAPHORE:
716 						case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
717 						case THREAD_BLOCK_TYPE_MUTEX:
718 						case THREAD_BLOCK_TYPE_RW_LOCK:
719 						case THREAD_BLOCK_TYPE_OTHER:
720 						default:
721 							break;
722 					}
723 
724 					status_t error = manager.AddThreadWaitObject(thread,
725 						entry->PreviousWaitObjectType(), waitObject);
726 					if (error != B_OK)
727 						return error;
728 				}
729 
730 				thread->lastTime = entry->Time();
731 				thread->state = WAITING;
732 			} else if (thread->state == UNKNOWN) {
733 				uint32 threadState = entry->PreviousState();
734 				if (threadState == B_THREAD_WAITING
735 					|| threadState == B_THREAD_SUSPENDED) {
736 					thread->lastTime = entry->Time();
737 					thread->state = WAITING;
738 				} else if (threadState == B_THREAD_READY) {
739 					thread->lastTime = entry->Time();
740 					thread->state = PREEMPTED;
741 				}
742 			}
743 		} else if (EnqueueThread* entry
744 				= dynamic_cast<EnqueueThread*>(_entry)) {
745 			// thread enqueued in run queue
746 
747 			Thread* thread = manager.ThreadFor(entry->ThreadID());
748 
749 			if (thread->state == RUNNING || thread->state == STILL_RUNNING) {
750 				// Thread was running and is reentered into the run queue. This
751 				// is done by the scheduler, if the thread remains ready.
752 				thread->state = STILL_RUNNING;
753 			} else {
754 				// Thread was waiting and is ready now.
755 				bigtime_t diffTime = entry->Time() - thread->lastTime;
756 				if (thread->waitObject != NULL) {
757 					thread->waitObject->wait_time += diffTime;
758 					thread->waitObject->waits++;
759 					thread->waitObject = NULL;
760 				} else if (thread->state != UNKNOWN)
761 					thread->unspecified_wait_time += diffTime;
762 
763 				thread->lastTime = entry->Time();
764 				thread->state = READY;
765 			}
766 		} else if (RemoveThread* entry = dynamic_cast<RemoveThread*>(_entry)) {
767 			// thread removed from run queue
768 
769 			Thread* thread = manager.ThreadFor(entry->ThreadID());
770 
771 			// This really only happens when the thread priority is changed
772 			// while the thread is ready.
773 
774 			bigtime_t diffTime = entry->Time() - thread->lastTime;
775 			if (thread->state == RUNNING) {
776 				// This should never happen.
777 				thread->runs++;
778 				thread->total_run_time += diffTime;
779 				if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
780 					thread->min_run_time = diffTime;
781 				if (diffTime > thread->max_run_time)
782 					thread->max_run_time = diffTime;
783 			} else if (thread->state == READY || thread->state == PREEMPTED) {
784 				// Not really correct, but the case is rare and we keep it
785 				// simple.
786 				thread->unspecified_wait_time += diffTime;
787 			}
788 
789 			thread->lastTime = entry->Time();
790 			thread->state = WAITING;
791 		}
792 	}
793 
794 
795 #if SCHEDULING_ANALYSIS_TRACING
796 	int32 missingWaitObjects = manager.MissingWaitObjects();
797 	if (missingWaitObjects > 0) {
798 		iterator.MoveTo(startEntryIndex + 1);
799 		while (TraceEntry* _entry = iterator.Previous()) {
800 			if (WaitObjectTraceEntry* waitObjectEntry
801 					= dynamic_cast<WaitObjectTraceEntry*>(_entry)) {
802 				if (manager.UpdateWaitObjectDontAdd(
803 						waitObjectEntry->Type(), waitObjectEntry->Object(),
804 						waitObjectEntry->Name(),
805 						waitObjectEntry->ReferencedObject())) {
806 					if (--missingWaitObjects == 0)
807 						break;
808 				}
809 			}
810 		}
811 	}
812 #endif
813 
814 	return B_OK;
815 }
816 
817 }	// namespace SchedulingAnalysis
818 
819 #endif	// SCHEDULER_TRACING
820 
821 
822 status_t
823 _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
824 	size_t size, scheduling_analysis* analysis)
825 {
826 #if SCHEDULER_TRACING
827 	using namespace SchedulingAnalysis;
828 
829 	if ((addr_t)buffer & 0x7) {
830 		addr_t diff = (addr_t)buffer & 0x7;
831 		buffer = (void*)((addr_t)buffer + 8 - diff);
832 		size -= 8 - diff;
833 	}
834 	size &= ~(size_t)0x7;
835 
836 	if (buffer == NULL || !IS_USER_ADDRESS(buffer) || size == 0)
837 		return B_BAD_VALUE;
838 
839 	status_t error = lock_memory(buffer, size, B_READ_DEVICE);
840 	if (error != B_OK)
841 		return error;
842 
843 	SchedulingAnalysisManager manager(buffer, size);
844 
845 	InterruptsLocker locker;
846 	lock_tracing_buffer();
847 
848 	error = analyze_scheduling(from, until, manager);
849 
850 	unlock_tracing_buffer();
851 	locker.Unlock();
852 
853 	if (error == B_OK)
854 		error = manager.FinishAnalysis();
855 
856 	unlock_memory(buffer, size, B_READ_DEVICE);
857 
858 	if (error == B_OK) {
859 		error = user_memcpy(analysis, manager.Analysis(),
860 			sizeof(scheduling_analysis));
861 	}
862 
863 	return error;
864 #else
865 	return B_BAD_VALUE;
866 #endif
867 }
868