xref: /haiku/src/system/kernel/scheduler/scheduler_tracing.cpp (revision cbe0a0c436162d78cc3f92a305b64918c839d079)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include "scheduler_tracing.h"
8 
9 #include <debug.h>
10 
11 
12 #if SCHEDULER_TRACING
13 
14 namespace SchedulerTracing {
15 
16 // #pragma mark - EnqueueThread
17 
18 
19 void
20 EnqueueThread::AddDump(TraceOutput& out)
21 {
22 	out.Print("scheduler enqueue %" B_PRId32 " \"%s\", effective priority %"
23 		B_PRId32 ", real priority %" B_PRId32, fID, fName, fEffectivePriority,
24 		fPriority);
25 }
26 
27 
28 const char*
29 EnqueueThread::Name() const
30 {
31 	return fName;
32 }
33 
34 
35 // #pragma mark - RemoveThread
36 
37 
38 void
39 RemoveThread::AddDump(TraceOutput& out)
40 {
41 	out.Print("scheduler remove %" B_PRId32 ", priority %" B_PRId32, fID,
42 		fPriority);
43 }
44 
45 const char*
46 RemoveThread::Name() const
47 {
48 	return NULL;
49 }
50 
51 
52 // #pragma mark - ScheduleThread
53 
54 
55 void
56 ScheduleThread::AddDump(TraceOutput& out)
57 {
58 	out.Print("schedule %" B_PRId32 " \"%s\", priority %" B_PRId32 ", CPU %"
59 		B_PRId32 ", previous thread: %" B_PRId32 " (", fID, fName, fPriority,
60 		fCPU, fPreviousID);
61 	if (fPreviousState == B_THREAD_WAITING) {
62 		switch (fPreviousWaitObjectType) {
63 			case THREAD_BLOCK_TYPE_SEMAPHORE:
64 				out.Print("sem %" B_PRId32,
65 					(sem_id)(addr_t)fPreviousWaitObject);
66 				break;
67 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
68 				out.Print("cvar %p", fPreviousWaitObject);
69 				break;
70 			case THREAD_BLOCK_TYPE_SNOOZE:
71 				out.Print("snooze()");
72 				break;
73 			case THREAD_BLOCK_TYPE_SIGNAL:
74 				out.Print("signal");
75 				break;
76 			case THREAD_BLOCK_TYPE_MUTEX:
77 				out.Print("mutex %p", fPreviousWaitObject);
78 				break;
79 			case THREAD_BLOCK_TYPE_RW_LOCK:
80 				out.Print("rwlock %p", fPreviousWaitObject);
81 				break;
82 			case THREAD_BLOCK_TYPE_USER:
83 				out.Print("_user_block_thread()");
84 				break;
85 			case THREAD_BLOCK_TYPE_OTHER:
86 				out.Print("other (%p)", fPreviousWaitObject);
87 					// We could print the string, but it might come from a
88 					// kernel module that has already been unloaded.
89 				break;
90 			default:
91 				out.Print("unknown (%p)", fPreviousWaitObject);
92 				break;
93 		}
94 #if SCHEDULER_TRACING >= 2
95 	} else if (fPreviousState == B_THREAD_READY) {
96 		out.Print("ready at %p", fPreviousPC);
97 #endif
98 	} else
99 		out.Print("%s", thread_state_to_text(NULL, fPreviousState));
100 
101 	out.Print(")");
102 }
103 
104 
105 const char*
106 ScheduleThread::Name() const
107 {
108 	return fName;
109 }
110 
111 }	// namespace SchedulerTracing
112 
113 
114 // #pragma mark -
115 
116 
117 int
118 cmd_scheduler(int argc, char** argv)
119 {
120 	using namespace SchedulerTracing;
121 
122 	int64 threadID;
123 	if (argc != 2
124 		|| !evaluate_debug_expression(argv[1], (uint64*)&threadID, true)) {
125 		print_debugger_command_usage(argv[0]);
126 		return 0;
127 	}
128 
129 	if (threadID <= 0) {
130 		kprintf("Invalid thread ID: %" B_PRId64 "\n", threadID);
131 		return 0;
132 	}
133 
134 	ScheduleState state = UNKNOWN;
135 	bigtime_t lastTime = 0;
136 
137 	int64 runs = 0;
138 	bigtime_t totalRunTime = 0;
139 	bigtime_t minRunTime = -1;
140 	bigtime_t maxRunTime = -1;
141 
142 	int64 latencies = 0;
143 	bigtime_t totalLatency = 0;
144 	bigtime_t minLatency = -1;
145 	bigtime_t maxLatency = -1;
146 	int32 maxLatencyEntry = -1;
147 
148 	int64 reruns = 0;
149 	bigtime_t totalRerunTime = 0;
150 	bigtime_t minRerunTime = -1;
151 	bigtime_t maxRerunTime = -1;
152 	int32 maxRerunEntry = -1;
153 
154 	int64 preemptions = 0;
155 
156 	TraceEntryIterator iterator;
157 	while (TraceEntry* _entry = iterator.Next()) {
158 		if (dynamic_cast<SchedulerTraceEntry*>(_entry) == NULL)
159 			continue;
160 
161 		if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
162 			if (entry->ThreadID() == threadID) {
163 				// thread scheduled
164 				bigtime_t diffTime = entry->Time() - lastTime;
165 
166 				if (state == READY) {
167 					// thread scheduled after having been woken up
168 					latencies++;
169 					totalLatency += diffTime;
170 					if (minLatency < 0 || diffTime < minLatency)
171 						minLatency = diffTime;
172 					if (diffTime > maxLatency) {
173 						maxLatency = diffTime;
174 						maxLatencyEntry = iterator.Index();
175 					}
176 				} else if (state == PREEMPTED) {
177 					// thread scheduled after having been preempted before
178 					reruns++;
179 					totalRerunTime += diffTime;
180 					if (minRerunTime < 0 || diffTime < minRerunTime)
181 						minRerunTime = diffTime;
182 					if (diffTime > maxRerunTime) {
183 						maxRerunTime = diffTime;
184 						maxRerunEntry = iterator.Index();
185 					}
186 				}
187 
188 				if (state == STILL_RUNNING) {
189 					// Thread was running and continues to run.
190 					state = RUNNING;
191 				}
192 
193 				if (state != RUNNING) {
194 					lastTime = entry->Time();
195 					state = RUNNING;
196 				}
197 			} else if (entry->PreviousThreadID() == threadID) {
198 				// thread unscheduled
199 				bigtime_t diffTime = entry->Time() - lastTime;
200 
201 				if (state == STILL_RUNNING) {
202 					// thread preempted
203 					runs++;
204 					preemptions++;
205 					totalRunTime += diffTime;
206 					if (minRunTime < 0 || diffTime < minRunTime)
207 						minRunTime = diffTime;
208 					if (diffTime > maxRunTime)
209 						maxRunTime = diffTime;
210 
211 					state = PREEMPTED;
212 					lastTime = entry->Time();
213 				} else if (state == RUNNING) {
214 					// thread starts waiting (it hadn't been added to the run
215 					// queue before being unscheduled)
216 					bigtime_t diffTime = entry->Time() - lastTime;
217 					runs++;
218 					totalRunTime += diffTime;
219 					if (minRunTime < 0 || diffTime < minRunTime)
220 						minRunTime = diffTime;
221 					if (diffTime > maxRunTime)
222 						maxRunTime = diffTime;
223 
224 					state = WAITING;
225 					lastTime = entry->Time();
226 				}
227 			}
228 		} else if (EnqueueThread* entry
229 				= dynamic_cast<EnqueueThread*>(_entry)) {
230 			if (entry->ThreadID() != threadID)
231 				continue;
232 
233 			// thread enqueued in run queue
234 
235 			if (state == RUNNING || state == STILL_RUNNING) {
236 				// Thread was running and is reentered into the run queue. This
237 				// is done by the scheduler, if the thread remains ready.
238 				state = STILL_RUNNING;
239 			} else {
240 				// Thread was waiting and is ready now.
241 				lastTime = entry->Time();
242 				state = READY;
243 			}
244 		} else if (RemoveThread* entry = dynamic_cast<RemoveThread*>(_entry)) {
245 			if (entry->ThreadID() != threadID)
246 				continue;
247 
248 			// thread removed from run queue
249 
250 			// This really only happens when the thread priority is changed
251 			// while the thread is ready.
252 
253 			if (state == RUNNING) {
254 				// This should never happen.
255 				bigtime_t diffTime = entry->Time() - lastTime;
256 				runs++;
257 				totalRunTime += diffTime;
258 				if (minRunTime < 0 || diffTime < minRunTime)
259 					minRunTime = diffTime;
260 				if (diffTime > maxRunTime)
261 					maxRunTime = diffTime;
262 			}
263 
264 			state = WAITING;
265 		}
266 	}
267 
268 	// print results
269 	if (runs == 0) {
270 		kprintf("thread %" B_PRId64 " never ran.\n", threadID);
271 		return 0;
272 	}
273 
274 	kprintf("scheduling statistics for thread %" B_PRId64 ":\n", threadID);
275 	kprintf("runs:\n");
276 	kprintf("  total #: %" B_PRId64 "\n", runs);
277 	kprintf("  total:   %" B_PRIdBIGTIME " us\n", totalRunTime);
278 	kprintf("  average: %#.2f us\n", (double)totalRunTime / runs);
279 	kprintf("  min:     %" B_PRIdBIGTIME " us\n", minRunTime);
280 	kprintf("  max:     %" B_PRIdBIGTIME " us\n", maxRunTime);
281 
282 	if (latencies > 0) {
283 		kprintf("scheduling latency after wake up:\n");
284 		kprintf("  total #: %" B_PRIdBIGTIME "\n", latencies);
285 		kprintf("  total:   %" B_PRIdBIGTIME " us\n", totalLatency);
286 		kprintf("  average: %#.2f us\n", (double)totalLatency / latencies);
287 		kprintf("  min:     %" B_PRIdBIGTIME " us\n", minLatency);
288 		kprintf("  max:     %" B_PRIdBIGTIME " us\n", maxLatency);
289 		kprintf("  max:     %" B_PRIdBIGTIME " us (at tracing entry %" B_PRId32
290 			")\n", maxLatency, maxLatencyEntry);
291 	} else
292 		kprintf("thread was never run after having been woken up\n");
293 
294 	if (reruns > 0) {
295 		kprintf("scheduling latency after preemption:\n");
296 		kprintf("  total #: %" B_PRId64 "\n", reruns);
297 		kprintf("  total:   %" B_PRIdBIGTIME " us\n", totalRerunTime);
298 		kprintf("  average: %#.2f us\n", (double)totalRerunTime / reruns);
299 		kprintf("  min:     %" B_PRIdBIGTIME " us\n", minRerunTime);
300 		kprintf("  max:     %" B_PRIdBIGTIME " us (at tracing entry %" B_PRId32
301 			")\n", maxRerunTime, maxRerunEntry);
302 	} else
303 		kprintf("thread was never rerun after preemption\n");
304 
305 	if (preemptions > 0)
306 		kprintf("thread was preempted %" B_PRId64 " times\n", preemptions);
307 	else
308 		kprintf("thread was never preempted\n");
309 
310 	return 0;
311 }
312 
313 #endif	// SCHEDULER_TRACING
314