1 /*
2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6 #include <scheduling_analysis.h>
7
8 #include <elf.h>
9 #include <kernel.h>
10 #include <scheduler_defs.h>
11 #include <tracing.h>
12 #include <util/AutoLock.h>
13
14 #include "scheduler_tracing.h"
15
16
17 #if SCHEDULER_TRACING
18
19 namespace SchedulingAnalysis {
20
21 using namespace SchedulerTracing;
22
23 #if SCHEDULING_ANALYSIS_TRACING
24 using namespace SchedulingAnalysisTracing;
25 #endif
26
27 struct ThreadWaitObject;
28
29 struct HashObjectKey {
~HashObjectKeySchedulingAnalysis::HashObjectKey30 virtual ~HashObjectKey()
31 {
32 }
33
34 virtual uint32 HashKey() const = 0;
35 };
36
37
38 struct HashObject {
39 HashObject* next;
40
~HashObjectSchedulingAnalysis::HashObject41 virtual ~HashObject()
42 {
43 }
44
45 virtual uint32 HashKey() const = 0;
46 virtual bool Equals(const HashObjectKey* key) const = 0;
47 };
48
49
50 struct ThreadKey : HashObjectKey {
51 thread_id id;
52
ThreadKeySchedulingAnalysis::ThreadKey53 ThreadKey(thread_id id)
54 :
55 id(id)
56 {
57 }
58
HashKeySchedulingAnalysis::ThreadKey59 virtual uint32 HashKey() const
60 {
61 return id;
62 }
63 };
64
65
66 struct Thread : HashObject, scheduling_analysis_thread {
67 ScheduleState state;
68 bigtime_t lastTime;
69
70 ThreadWaitObject* waitObject;
71
ThreadSchedulingAnalysis::Thread72 Thread(thread_id id)
73 :
74 state(UNKNOWN),
75 lastTime(0),
76
77 waitObject(NULL)
78 {
79 this->id = id;
80 name[0] = '\0';
81
82 runs = 0;
83 total_run_time = 0;
84 min_run_time = 1;
85 max_run_time = -1;
86
87 latencies = 0;
88 total_latency = 0;
89 min_latency = -1;
90 max_latency = -1;
91
92 reruns = 0;
93 total_rerun_time = 0;
94 min_rerun_time = -1;
95 max_rerun_time = -1;
96
97 unspecified_wait_time = 0;
98
99 preemptions = 0;
100
101 wait_objects = NULL;
102 }
103
HashKeySchedulingAnalysis::Thread104 virtual uint32 HashKey() const
105 {
106 return id;
107 }
108
EqualsSchedulingAnalysis::Thread109 virtual bool Equals(const HashObjectKey* _key) const
110 {
111 const ThreadKey* key = dynamic_cast<const ThreadKey*>(_key);
112 if (key == NULL)
113 return false;
114 return key->id == id;
115 }
116 };
117
118
119 struct WaitObjectKey : HashObjectKey {
120 uint32 type;
121 void* object;
122
WaitObjectKeySchedulingAnalysis::WaitObjectKey123 WaitObjectKey(uint32 type, void* object)
124 :
125 type(type),
126 object(object)
127 {
128 }
129
HashKeySchedulingAnalysis::WaitObjectKey130 virtual uint32 HashKey() const
131 {
132 return type ^ (uint32)(addr_t)object;
133 }
134 };
135
136
137 struct WaitObject : HashObject, scheduling_analysis_wait_object {
WaitObjectSchedulingAnalysis::WaitObject138 WaitObject(uint32 type, void* object)
139 {
140 this->type = type;
141 this->object = object;
142 name[0] = '\0';
143 referenced_object = NULL;
144 }
145
HashKeySchedulingAnalysis::WaitObject146 virtual uint32 HashKey() const
147 {
148 return type ^ (uint32)(addr_t)object;
149 }
150
EqualsSchedulingAnalysis::WaitObject151 virtual bool Equals(const HashObjectKey* _key) const
152 {
153 const WaitObjectKey* key = dynamic_cast<const WaitObjectKey*>(_key);
154 if (key == NULL)
155 return false;
156 return key->type == type && key->object == object;
157 }
158 };
159
160
161 struct ThreadWaitObjectKey : HashObjectKey {
162 thread_id thread;
163 uint32 type;
164 void* object;
165
ThreadWaitObjectKeySchedulingAnalysis::ThreadWaitObjectKey166 ThreadWaitObjectKey(thread_id thread, uint32 type, void* object)
167 :
168 thread(thread),
169 type(type),
170 object(object)
171 {
172 }
173
HashKeySchedulingAnalysis::ThreadWaitObjectKey174 virtual uint32 HashKey() const
175 {
176 return thread ^ type ^ (uint32)(addr_t)object;
177 }
178 };
179
180
181 struct ThreadWaitObject : HashObject, scheduling_analysis_thread_wait_object {
ThreadWaitObjectSchedulingAnalysis::ThreadWaitObject182 ThreadWaitObject(thread_id thread, WaitObject* waitObject)
183 {
184 this->thread = thread;
185 wait_object = waitObject;
186 wait_time = 0;
187 waits = 0;
188 next_in_list = NULL;
189 }
190
HashKeySchedulingAnalysis::ThreadWaitObject191 virtual uint32 HashKey() const
192 {
193 return thread ^ wait_object->type ^ (uint32)(addr_t)wait_object->object;
194 }
195
EqualsSchedulingAnalysis::ThreadWaitObject196 virtual bool Equals(const HashObjectKey* _key) const
197 {
198 const ThreadWaitObjectKey* key
199 = dynamic_cast<const ThreadWaitObjectKey*>(_key);
200 if (key == NULL)
201 return false;
202 return key->thread == thread && key->type == wait_object->type
203 && key->object == wait_object->object;
204 }
205 };
206
207
208 class SchedulingAnalysisManager {
209 public:
SchedulingAnalysisManager(void * buffer,size_t size)210 SchedulingAnalysisManager(void* buffer, size_t size)
211 :
212 fBuffer(buffer),
213 fSize(size),
214 fHashTable(),
215 fHashTableSize(0)
216 {
217 fAnalysis.thread_count = 0;
218 fAnalysis.threads = 0;
219 fAnalysis.wait_object_count = 0;
220 fAnalysis.thread_wait_object_count = 0;
221
222 size_t maxObjectSize = max_c(max_c(sizeof(Thread), sizeof(WaitObject)),
223 sizeof(ThreadWaitObject));
224 fHashTableSize = size / (maxObjectSize + sizeof(HashObject*));
225 fHashTable = (HashObject**)((uint8*)fBuffer + fSize) - fHashTableSize;
226 fNextAllocation = (uint8*)fBuffer;
227 fRemainingBytes = (addr_t)fHashTable - (addr_t)fBuffer;
228
229 image_info info;
230 if (elf_get_image_info_for_address((addr_t)&scheduler_init, &info)
231 == B_OK) {
232 fKernelStart = (addr_t)info.text;
233 fKernelEnd = (addr_t)info.data + info.data_size;
234 } else {
235 fKernelStart = 0;
236 fKernelEnd = 0;
237 }
238 }
239
Analysis() const240 const scheduling_analysis* Analysis() const
241 {
242 return &fAnalysis;
243 }
244
Allocate(size_t size)245 void* Allocate(size_t size)
246 {
247 size = (size + 7) & ~(size_t)7;
248
249 if (size > fRemainingBytes)
250 return NULL;
251
252 void* address = fNextAllocation;
253 fNextAllocation += size;
254 fRemainingBytes -= size;
255 return address;
256 }
257
Insert(HashObject * object)258 void Insert(HashObject* object)
259 {
260 uint32 index = object->HashKey() % fHashTableSize;
261 object->next = fHashTable[index];
262 fHashTable[index] = object;
263 }
264
Remove(HashObject * object)265 void Remove(HashObject* object)
266 {
267 uint32 index = object->HashKey() % fHashTableSize;
268 HashObject** slot = &fHashTable[index];
269 while (*slot != object)
270 slot = &(*slot)->next;
271
272 *slot = object->next;
273 }
274
Lookup(const HashObjectKey & key) const275 HashObject* Lookup(const HashObjectKey& key) const
276 {
277 uint32 index = key.HashKey() % fHashTableSize;
278 HashObject* object = fHashTable[index];
279 while (object != NULL && !object->Equals(&key))
280 object = object->next;
281 return object;
282 }
283
ThreadFor(thread_id id) const284 Thread* ThreadFor(thread_id id) const
285 {
286 return dynamic_cast<Thread*>(Lookup(ThreadKey(id)));
287 }
288
WaitObjectFor(uint32 type,void * object) const289 WaitObject* WaitObjectFor(uint32 type, void* object) const
290 {
291 return dynamic_cast<WaitObject*>(Lookup(WaitObjectKey(type, object)));
292 }
293
ThreadWaitObjectFor(thread_id thread,uint32 type,void * object) const294 ThreadWaitObject* ThreadWaitObjectFor(thread_id thread, uint32 type,
295 void* object) const
296 {
297 return dynamic_cast<ThreadWaitObject*>(
298 Lookup(ThreadWaitObjectKey(thread, type, object)));
299 }
300
AddThread(thread_id id,const char * name)301 status_t AddThread(thread_id id, const char* name)
302 {
303 Thread* thread = ThreadFor(id);
304 if (thread == NULL) {
305 void* memory = Allocate(sizeof(Thread));
306 if (memory == NULL)
307 return B_NO_MEMORY;
308
309 thread = new(memory) Thread(id);
310 Insert(thread);
311 fAnalysis.thread_count++;
312 }
313
314 if (name != NULL && thread->name[0] == '\0')
315 strlcpy(thread->name, name, sizeof(thread->name));
316
317 return B_OK;
318 }
319
AddWaitObject(uint32 type,void * object,WaitObject ** _waitObject=NULL)320 status_t AddWaitObject(uint32 type, void* object,
321 WaitObject** _waitObject = NULL)
322 {
323 if (WaitObjectFor(type, object) != NULL)
324 return B_OK;
325
326 void* memory = Allocate(sizeof(WaitObject));
327 if (memory == NULL)
328 return B_NO_MEMORY;
329
330 WaitObject* waitObject = new(memory) WaitObject(type, object);
331 Insert(waitObject);
332 fAnalysis.wait_object_count++;
333
334 // Set a dummy name for snooze() and waiting for signals, so we don't
335 // try to update them later on.
336 if (type == THREAD_BLOCK_TYPE_SNOOZE
337 || type == THREAD_BLOCK_TYPE_SIGNAL) {
338 strcpy(waitObject->name, "?");
339 }
340
341 if (_waitObject != NULL)
342 *_waitObject = waitObject;
343
344 return B_OK;
345 }
346
UpdateWaitObject(uint32 type,void * object,const char * name,void * referencedObject)347 status_t UpdateWaitObject(uint32 type, void* object, const char* name,
348 void* referencedObject)
349 {
350 WaitObject* waitObject = WaitObjectFor(type, object);
351 if (waitObject == NULL)
352 return B_OK;
353
354 if (waitObject->name[0] != '\0') {
355 // This is a new object at the same address. Replace the old one.
356 Remove(waitObject);
357 status_t error = AddWaitObject(type, object, &waitObject);
358 if (error != B_OK)
359 return error;
360 }
361
362 if (name == NULL)
363 name = "?";
364
365 strlcpy(waitObject->name, name, sizeof(waitObject->name));
366 waitObject->referenced_object = referencedObject;
367
368 return B_OK;
369 }
370
UpdateWaitObjectDontAdd(uint32 type,void * object,const char * name,void * referencedObject)371 bool UpdateWaitObjectDontAdd(uint32 type, void* object, const char* name,
372 void* referencedObject)
373 {
374 WaitObject* waitObject = WaitObjectFor(type, object);
375 if (waitObject == NULL || waitObject->name[0] != '\0')
376 return false;
377
378 if (name == NULL)
379 name = "?";
380
381 strlcpy(waitObject->name, name, sizeof(waitObject->name));
382 waitObject->referenced_object = referencedObject;
383
384 return B_OK;
385 }
386
AddThreadWaitObject(Thread * thread,uint32 type,void * object)387 status_t AddThreadWaitObject(Thread* thread, uint32 type, void* object)
388 {
389 WaitObject* waitObject = WaitObjectFor(type, object);
390 if (waitObject == NULL) {
391 // The algorithm should prevent this case.
392 return B_ERROR;
393 }
394
395 ThreadWaitObject* threadWaitObject = ThreadWaitObjectFor(thread->id,
396 type, object);
397 if (threadWaitObject == NULL
398 || threadWaitObject->wait_object != waitObject) {
399 if (threadWaitObject != NULL)
400 Remove(threadWaitObject);
401
402 void* memory = Allocate(sizeof(ThreadWaitObject));
403 if (memory == NULL)
404 return B_NO_MEMORY;
405
406 threadWaitObject = new(memory) ThreadWaitObject(thread->id,
407 waitObject);
408 Insert(threadWaitObject);
409 fAnalysis.thread_wait_object_count++;
410
411 threadWaitObject->next_in_list = thread->wait_objects;
412 thread->wait_objects = threadWaitObject;
413 }
414
415 thread->waitObject = threadWaitObject;
416
417 return B_OK;
418 }
419
MissingWaitObjects() const420 int32 MissingWaitObjects() const
421 {
422 // Iterate through the hash table and count the wait objects that don't
423 // have a name yet.
424 int32 count = 0;
425 for (uint32 i = 0; i < fHashTableSize; i++) {
426 HashObject* object = fHashTable[i];
427 while (object != NULL) {
428 WaitObject* waitObject = dynamic_cast<WaitObject*>(object);
429 if (waitObject != NULL && waitObject->name[0] == '\0')
430 count++;
431
432 object = object->next;
433 }
434 }
435
436 return count;
437 }
438
FinishAnalysis()439 status_t FinishAnalysis()
440 {
441 // allocate the thread array
442 scheduling_analysis_thread** threads
443 = (scheduling_analysis_thread**)Allocate(
444 sizeof(Thread*) * fAnalysis.thread_count);
445 if (threads == NULL)
446 return B_NO_MEMORY;
447
448 // Iterate through the hash table and collect all threads. Also polish
449 // all wait objects that haven't been update yet.
450 int32 index = 0;
451 for (uint32 i = 0; i < fHashTableSize; i++) {
452 HashObject* object = fHashTable[i];
453 while (object != NULL) {
454 Thread* thread = dynamic_cast<Thread*>(object);
455 if (thread != NULL) {
456 threads[index++] = thread;
457 } else if (WaitObject* waitObject
458 = dynamic_cast<WaitObject*>(object)) {
459 _PolishWaitObject(waitObject);
460 }
461
462 object = object->next;
463 }
464 }
465
466 fAnalysis.threads = threads;
467 dprintf("scheduling analysis: free bytes: %lu/%lu\n", fRemainingBytes, fSize);
468 return B_OK;
469 }
470
471 private:
_PolishWaitObject(WaitObject * waitObject)472 void _PolishWaitObject(WaitObject* waitObject)
473 {
474 if (waitObject->name[0] != '\0')
475 return;
476
477 switch (waitObject->type) {
478 case THREAD_BLOCK_TYPE_SEMAPHORE:
479 {
480 sem_info info;
481 if (get_sem_info((sem_id)(addr_t)waitObject->object, &info)
482 == B_OK) {
483 strlcpy(waitObject->name, info.name,
484 sizeof(waitObject->name));
485 }
486 break;
487 }
488 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
489 {
490 // If the condition variable object is in the kernel image,
491 // assume, it is still initialized.
492 ConditionVariable* variable
493 = (ConditionVariable*)waitObject->object;
494 if (!_IsInKernelImage(variable))
495 break;
496
497 waitObject->referenced_object = (void*)variable->Object();
498 strlcpy(waitObject->name, variable->ObjectType(),
499 sizeof(waitObject->name));
500 break;
501 }
502
503 case THREAD_BLOCK_TYPE_MUTEX:
504 {
505 // If the mutex object is in the kernel image, assume, it is
506 // still initialized.
507 mutex* lock = (mutex*)waitObject->object;
508 if (!_IsInKernelImage(lock))
509 break;
510
511 strlcpy(waitObject->name, lock->name, sizeof(waitObject->name));
512 break;
513 }
514
515 case THREAD_BLOCK_TYPE_RW_LOCK:
516 {
517 // If the mutex object is in the kernel image, assume, it is
518 // still initialized.
519 rw_lock* lock = (rw_lock*)waitObject->object;
520 if (!_IsInKernelImage(lock))
521 break;
522
523 strlcpy(waitObject->name, lock->name, sizeof(waitObject->name));
524 break;
525 }
526
527 case THREAD_BLOCK_TYPE_OTHER:
528 {
529 const char* name = (const char*)waitObject->object;
530 if (name == NULL || _IsInKernelImage(name))
531 return;
532
533 strlcpy(waitObject->name, name, sizeof(waitObject->name));
534 }
535
536 case THREAD_BLOCK_TYPE_OTHER_OBJECT:
537 case THREAD_BLOCK_TYPE_SNOOZE:
538 case THREAD_BLOCK_TYPE_SIGNAL:
539 default:
540 break;
541 }
542
543 if (waitObject->name[0] != '\0')
544 return;
545
546 strcpy(waitObject->name, "?");
547 }
548
_IsInKernelImage(const void * _address)549 bool _IsInKernelImage(const void* _address)
550 {
551 addr_t address = (addr_t)_address;
552 return address >= fKernelStart && address < fKernelEnd;
553 }
554
555 private:
556 scheduling_analysis fAnalysis;
557 void* fBuffer;
558 size_t fSize;
559 HashObject** fHashTable;
560 uint32 fHashTableSize;
561 uint8* fNextAllocation;
562 size_t fRemainingBytes;
563 addr_t fKernelStart;
564 addr_t fKernelEnd;
565 };
566
567
568 static status_t
analyze_scheduling(bigtime_t from,bigtime_t until,SchedulingAnalysisManager & manager)569 analyze_scheduling(bigtime_t from, bigtime_t until,
570 SchedulingAnalysisManager& manager)
571 {
572 // analyze how much threads and locking primitives we're talking about
573 TraceEntryIterator iterator;
574 iterator.MoveTo(INT_MAX);
575 while (TraceEntry* _entry = iterator.Previous()) {
576 SchedulerTraceEntry* baseEntry
577 = dynamic_cast<SchedulerTraceEntry*>(_entry);
578 if (baseEntry == NULL || baseEntry->Time() >= until)
579 continue;
580 if (baseEntry->Time() < from)
581 break;
582
583 status_t error = manager.AddThread(baseEntry->ThreadID(),
584 baseEntry->Name());
585 if (error != B_OK)
586 return error;
587
588 if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
589 error = manager.AddThread(entry->PreviousThreadID(), NULL);
590 if (error != B_OK)
591 return error;
592
593 if (entry->PreviousState() == B_THREAD_WAITING) {
594 void* waitObject = (void*)entry->PreviousWaitObject();
595 switch (entry->PreviousWaitObjectType()) {
596 case THREAD_BLOCK_TYPE_SNOOZE:
597 case THREAD_BLOCK_TYPE_SIGNAL:
598 waitObject = NULL;
599 break;
600 case THREAD_BLOCK_TYPE_SEMAPHORE:
601 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
602 case THREAD_BLOCK_TYPE_MUTEX:
603 case THREAD_BLOCK_TYPE_RW_LOCK:
604 case THREAD_BLOCK_TYPE_OTHER:
605 default:
606 break;
607 }
608
609 error = manager.AddWaitObject(entry->PreviousWaitObjectType(),
610 waitObject);
611 if (error != B_OK)
612 return error;
613 }
614 }
615 }
616
617 #if SCHEDULING_ANALYSIS_TRACING
618 int32 startEntryIndex = iterator.Index();
619 #endif
620
621 while (TraceEntry* _entry = iterator.Next()) {
622 #if SCHEDULING_ANALYSIS_TRACING
623 // might be info on a wait object
624 if (WaitObjectTraceEntry* waitObjectEntry
625 = dynamic_cast<WaitObjectTraceEntry*>(_entry)) {
626 status_t error = manager.UpdateWaitObject(waitObjectEntry->Type(),
627 waitObjectEntry->Object(), waitObjectEntry->Name(),
628 waitObjectEntry->ReferencedObject());
629 if (error != B_OK)
630 return error;
631 continue;
632 }
633 #endif
634
635 SchedulerTraceEntry* baseEntry
636 = dynamic_cast<SchedulerTraceEntry*>(_entry);
637 if (baseEntry == NULL)
638 continue;
639 if (baseEntry->Time() >= until)
640 break;
641
642 if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
643 // scheduled thread
644 Thread* thread = manager.ThreadFor(entry->ThreadID());
645
646 bigtime_t diffTime = entry->Time() - thread->lastTime;
647
648 if (thread->state == READY) {
649 // thread scheduled after having been woken up
650 thread->latencies++;
651 thread->total_latency += diffTime;
652 if (thread->min_latency < 0 || diffTime < thread->min_latency)
653 thread->min_latency = diffTime;
654 if (diffTime > thread->max_latency)
655 thread->max_latency = diffTime;
656 } else if (thread->state == PREEMPTED) {
657 // thread scheduled after having been preempted before
658 thread->reruns++;
659 thread->total_rerun_time += diffTime;
660 if (thread->min_rerun_time < 0
661 || diffTime < thread->min_rerun_time) {
662 thread->min_rerun_time = diffTime;
663 }
664 if (diffTime > thread->max_rerun_time)
665 thread->max_rerun_time = diffTime;
666 }
667
668 if (thread->state == STILL_RUNNING) {
669 // Thread was running and continues to run.
670 thread->state = RUNNING;
671 }
672
673 if (thread->state != RUNNING) {
674 thread->lastTime = entry->Time();
675 thread->state = RUNNING;
676 }
677
678 // unscheduled thread
679
680 if (entry->ThreadID() == entry->PreviousThreadID())
681 continue;
682
683 thread = manager.ThreadFor(entry->PreviousThreadID());
684
685 diffTime = entry->Time() - thread->lastTime;
686
687 if (thread->state == STILL_RUNNING) {
688 // thread preempted
689 thread->runs++;
690 thread->preemptions++;
691 thread->total_run_time += diffTime;
692 if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
693 thread->min_run_time = diffTime;
694 if (diffTime > thread->max_run_time)
695 thread->max_run_time = diffTime;
696
697 thread->lastTime = entry->Time();
698 thread->state = PREEMPTED;
699 } else if (thread->state == RUNNING) {
700 // thread starts waiting (it hadn't been added to the run
701 // queue before being unscheduled)
702 thread->runs++;
703 thread->total_run_time += diffTime;
704 if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
705 thread->min_run_time = diffTime;
706 if (diffTime > thread->max_run_time)
707 thread->max_run_time = diffTime;
708
709 if (entry->PreviousState() == B_THREAD_WAITING) {
710 void* waitObject = (void*)entry->PreviousWaitObject();
711 switch (entry->PreviousWaitObjectType()) {
712 case THREAD_BLOCK_TYPE_SNOOZE:
713 case THREAD_BLOCK_TYPE_SIGNAL:
714 waitObject = NULL;
715 break;
716 case THREAD_BLOCK_TYPE_SEMAPHORE:
717 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
718 case THREAD_BLOCK_TYPE_MUTEX:
719 case THREAD_BLOCK_TYPE_RW_LOCK:
720 case THREAD_BLOCK_TYPE_OTHER:
721 default:
722 break;
723 }
724
725 status_t error = manager.AddThreadWaitObject(thread,
726 entry->PreviousWaitObjectType(), waitObject);
727 if (error != B_OK)
728 return error;
729 }
730
731 thread->lastTime = entry->Time();
732 thread->state = WAITING;
733 } else if (thread->state == UNKNOWN) {
734 uint32 threadState = entry->PreviousState();
735 if (threadState == B_THREAD_WAITING
736 || threadState == B_THREAD_SUSPENDED) {
737 thread->lastTime = entry->Time();
738 thread->state = WAITING;
739 } else if (threadState == B_THREAD_READY) {
740 thread->lastTime = entry->Time();
741 thread->state = PREEMPTED;
742 }
743 }
744 } else if (EnqueueThread* entry
745 = dynamic_cast<EnqueueThread*>(_entry)) {
746 // thread enqueued in run queue
747
748 Thread* thread = manager.ThreadFor(entry->ThreadID());
749
750 if (thread->state == RUNNING || thread->state == STILL_RUNNING) {
751 // Thread was running and is reentered into the run queue. This
752 // is done by the scheduler, if the thread remains ready.
753 thread->state = STILL_RUNNING;
754 } else {
755 // Thread was waiting and is ready now.
756 bigtime_t diffTime = entry->Time() - thread->lastTime;
757 if (thread->waitObject != NULL) {
758 thread->waitObject->wait_time += diffTime;
759 thread->waitObject->waits++;
760 thread->waitObject = NULL;
761 } else if (thread->state != UNKNOWN)
762 thread->unspecified_wait_time += diffTime;
763
764 thread->lastTime = entry->Time();
765 thread->state = READY;
766 }
767 } else if (RemoveThread* entry = dynamic_cast<RemoveThread*>(_entry)) {
768 // thread removed from run queue
769
770 Thread* thread = manager.ThreadFor(entry->ThreadID());
771
772 // This really only happens when the thread priority is changed
773 // while the thread is ready.
774
775 bigtime_t diffTime = entry->Time() - thread->lastTime;
776 if (thread->state == RUNNING) {
777 // This should never happen.
778 thread->runs++;
779 thread->total_run_time += diffTime;
780 if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
781 thread->min_run_time = diffTime;
782 if (diffTime > thread->max_run_time)
783 thread->max_run_time = diffTime;
784 } else if (thread->state == READY || thread->state == PREEMPTED) {
785 // Not really correct, but the case is rare and we keep it
786 // simple.
787 thread->unspecified_wait_time += diffTime;
788 }
789
790 thread->lastTime = entry->Time();
791 thread->state = WAITING;
792 }
793 }
794
795
796 #if SCHEDULING_ANALYSIS_TRACING
797 int32 missingWaitObjects = manager.MissingWaitObjects();
798 if (missingWaitObjects > 0) {
799 iterator.MoveTo(startEntryIndex + 1);
800 while (TraceEntry* _entry = iterator.Previous()) {
801 if (WaitObjectTraceEntry* waitObjectEntry
802 = dynamic_cast<WaitObjectTraceEntry*>(_entry)) {
803 if (manager.UpdateWaitObjectDontAdd(
804 waitObjectEntry->Type(), waitObjectEntry->Object(),
805 waitObjectEntry->Name(),
806 waitObjectEntry->ReferencedObject())) {
807 if (--missingWaitObjects == 0)
808 break;
809 }
810 }
811 }
812 }
813 #endif
814
815 return B_OK;
816 }
817
818 } // namespace SchedulingAnalysis
819
820 #endif // SCHEDULER_TRACING
821
822
823 status_t
_user_analyze_scheduling(bigtime_t from,bigtime_t until,void * buffer,size_t size,scheduling_analysis * analysis)824 _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
825 size_t size, scheduling_analysis* analysis)
826 {
827 #if SCHEDULER_TRACING
828 using namespace SchedulingAnalysis;
829
830 if ((addr_t)buffer & 0x7) {
831 addr_t diff = (addr_t)buffer & 0x7;
832 buffer = (void*)((addr_t)buffer + 8 - diff);
833 size -= 8 - diff;
834 }
835 size &= ~(size_t)0x7;
836
837 if (buffer == NULL || !IS_USER_ADDRESS(buffer) || size == 0)
838 return B_BAD_VALUE;
839
840 status_t error = lock_memory(buffer, size, B_READ_DEVICE);
841 if (error != B_OK)
842 return error;
843
844 SchedulingAnalysisManager manager(buffer, size);
845
846 InterruptsLocker locker;
847 lock_tracing_buffer();
848
849 error = analyze_scheduling(from, until, manager);
850
851 unlock_tracing_buffer();
852 locker.Unlock();
853
854 if (error == B_OK)
855 error = manager.FinishAnalysis();
856
857 unlock_memory(buffer, size, B_READ_DEVICE);
858
859 if (error == B_OK) {
860 error = user_memcpy(analysis, manager.Analysis(),
861 sizeof(scheduling_analysis));
862 }
863
864 return error;
865 #else
866 return B_BAD_VALUE;
867 #endif
868 }
869