xref: /haiku/src/system/kernel/scheduler/scheduler_tracing.cpp (revision a629567a9001547736cfe892cdf992be16868fed)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include "scheduler_tracing.h"
8 
9 #include <debug.h>
10 
11 
12 #if SCHEDULER_TRACING
13 
14 namespace SchedulerTracing {
15 
16 // #pragma mark - EnqueueThread
17 
18 
19 void
20 EnqueueThread::AddDump(TraceOutput& out)
21 {
22 	out.Print("scheduler enqueue %ld \"%s\", effective priority %d, "
23 		"real priority %d", fID, fName, fEffectivePriority, fPriority);
24 }
25 
26 
27 const char*
28 EnqueueThread::Name() const
29 {
30 	return fName;
31 }
32 
33 
34 // #pragma mark - RemoveThread
35 
36 
37 void
38 RemoveThread::AddDump(TraceOutput& out)
39 {
40 	out.Print("scheduler remove %ld, priority %d", fID, fPriority);
41 }
42 
43 const char*
44 RemoveThread::Name() const
45 {
46 	return NULL;
47 }
48 
49 
50 // #pragma mark - ScheduleThread
51 
52 
53 void
54 ScheduleThread::AddDump(TraceOutput& out)
55 {
56 	out.Print("schedule %ld \"%s\", priority %d, CPU %ld, "
57 		"previous thread: %ld (", fID, fName, fPriority, fCPU, fPreviousID);
58 	if (fPreviousState == B_THREAD_WAITING) {
59 		switch (fPreviousWaitObjectType) {
60 			case THREAD_BLOCK_TYPE_SEMAPHORE:
61 				out.Print("sem %ld", (sem_id)(addr_t)fPreviousWaitObject);
62 				break;
63 			case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
64 				out.Print("cvar %p", fPreviousWaitObject);
65 				break;
66 			case THREAD_BLOCK_TYPE_SNOOZE:
67 				out.Print("snooze()");
68 				break;
69 			case THREAD_BLOCK_TYPE_SIGNAL:
70 				out.Print("signal");
71 				break;
72 			case THREAD_BLOCK_TYPE_MUTEX:
73 				out.Print("mutex %p", fPreviousWaitObject);
74 				break;
75 			case THREAD_BLOCK_TYPE_RW_LOCK:
76 				out.Print("rwlock %p", fPreviousWaitObject);
77 				break;
78 			case THREAD_BLOCK_TYPE_OTHER:
79 				out.Print("other (%p)", fPreviousWaitObject);
80 					// We could print the string, but it might come from a
81 					// kernel module that has already been unloaded.
82 				break;
83 			default:
84 				out.Print("unknown (%p)", fPreviousWaitObject);
85 				break;
86 		}
87 #if SCHEDULER_TRACING >= 2
88 	} else if (fPreviousState == B_THREAD_READY) {
89 		out.Print("ready at %p", fPreviousPC);
90 #endif
91 	} else
92 		out.Print("%s", thread_state_to_text(NULL, fPreviousState));
93 
94 	out.Print(")");
95 }
96 
97 
98 const char*
99 ScheduleThread::Name() const
100 {
101 	return fName;
102 }
103 
104 }	// namespace SchedulerTracing
105 
106 
107 // #pragma mark -
108 
109 
110 int
111 cmd_scheduler(int argc, char** argv)
112 {
113 	using namespace SchedulerTracing;
114 
115 	int64 threadID;
116 	if (argc != 2
117 		|| !evaluate_debug_expression(argv[1], (uint64*)&threadID, true)) {
118 		print_debugger_command_usage(argv[0]);
119 		return 0;
120 	}
121 
122 	if (threadID <= 0) {
123 		kprintf("Invalid thread ID: %lld\n", threadID);
124 		return 0;
125 	}
126 
127 	ScheduleState state = UNKNOWN;
128 	bigtime_t lastTime = 0;
129 
130 	int64 runs = 0;
131 	bigtime_t totalRunTime = 0;
132 	bigtime_t minRunTime = -1;
133 	bigtime_t maxRunTime = -1;
134 
135 	int64 latencies = 0;
136 	bigtime_t totalLatency = 0;
137 	bigtime_t minLatency = -1;
138 	bigtime_t maxLatency = -1;
139 	int32 maxLatencyEntry = -1;
140 
141 	int64 reruns = 0;
142 	bigtime_t totalRerunTime = 0;
143 	bigtime_t minRerunTime = -1;
144 	bigtime_t maxRerunTime = -1;
145 	int32 maxRerunEntry = -1;
146 
147 	int64 preemptions = 0;
148 
149 	TraceEntryIterator iterator;
150 	while (TraceEntry* _entry = iterator.Next()) {
151 		if (dynamic_cast<SchedulerTraceEntry*>(_entry) == NULL)
152 			continue;
153 
154 		if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
155 			if (entry->ThreadID() == threadID) {
156 				// thread scheduled
157 				bigtime_t diffTime = entry->Time() - lastTime;
158 
159 				if (state == READY) {
160 					// thread scheduled after having been woken up
161 					latencies++;
162 					totalLatency += diffTime;
163 					if (minLatency < 0 || diffTime < minLatency)
164 						minLatency = diffTime;
165 					if (diffTime > maxLatency) {
166 						maxLatency = diffTime;
167 						maxLatencyEntry = iterator.Index();
168 					}
169 				} else if (state == PREEMPTED) {
170 					// thread scheduled after having been preempted before
171 					reruns++;
172 					totalRerunTime += diffTime;
173 					if (minRerunTime < 0 || diffTime < minRerunTime)
174 						minRerunTime = diffTime;
175 					if (diffTime > maxRerunTime) {
176 						maxRerunTime = diffTime;
177 						maxRerunEntry = iterator.Index();
178 					}
179 				}
180 
181 				if (state == STILL_RUNNING) {
182 					// Thread was running and continues to run.
183 					state = RUNNING;
184 				}
185 
186 				if (state != RUNNING) {
187 					lastTime = entry->Time();
188 					state = RUNNING;
189 				}
190 			} else if (entry->PreviousThreadID() == threadID) {
191 				// thread unscheduled
192 				bigtime_t diffTime = entry->Time() - lastTime;
193 
194 				if (state == STILL_RUNNING) {
195 					// thread preempted
196 					runs++;
197 					preemptions++;
198 					totalRunTime += diffTime;
199 					if (minRunTime < 0 || diffTime < minRunTime)
200 						minRunTime = diffTime;
201 					if (diffTime > maxRunTime)
202 						maxRunTime = diffTime;
203 
204 					state = PREEMPTED;
205 					lastTime = entry->Time();
206 				} else if (state == RUNNING) {
207 					// thread starts waiting (it hadn't been added to the run
208 					// queue before being unscheduled)
209 					bigtime_t diffTime = entry->Time() - lastTime;
210 					runs++;
211 					totalRunTime += diffTime;
212 					if (minRunTime < 0 || diffTime < minRunTime)
213 						minRunTime = diffTime;
214 					if (diffTime > maxRunTime)
215 						maxRunTime = diffTime;
216 
217 					state = WAITING;
218 					lastTime = entry->Time();
219 				}
220 			}
221 		} else if (EnqueueThread* entry
222 				= dynamic_cast<EnqueueThread*>(_entry)) {
223 			if (entry->ThreadID() != threadID)
224 				continue;
225 
226 			// thread enqueued in run queue
227 
228 			if (state == RUNNING || state == STILL_RUNNING) {
229 				// Thread was running and is reentered into the run queue. This
230 				// is done by the scheduler, if the thread remains ready.
231 				state = STILL_RUNNING;
232 			} else {
233 				// Thread was waiting and is ready now.
234 				lastTime = entry->Time();
235 				state = READY;
236 			}
237 		} else if (RemoveThread* entry = dynamic_cast<RemoveThread*>(_entry)) {
238 			if (entry->ThreadID() != threadID)
239 				continue;
240 
241 			// thread removed from run queue
242 
243 			// This really only happens when the thread priority is changed
244 			// while the thread is ready.
245 
246 			if (state == RUNNING) {
247 				// This should never happen.
248 				bigtime_t diffTime = entry->Time() - lastTime;
249 				runs++;
250 				totalRunTime += diffTime;
251 				if (minRunTime < 0 || diffTime < minRunTime)
252 					minRunTime = diffTime;
253 				if (diffTime > maxRunTime)
254 					maxRunTime = diffTime;
255 			}
256 
257 			state = WAITING;
258 		}
259 	}
260 
261 	// print results
262 	if (runs == 0) {
263 		kprintf("thread %lld never ran.\n", threadID);
264 		return 0;
265 	}
266 
267 	kprintf("scheduling statistics for thread %lld:\n", threadID);
268 	kprintf("runs:\n");
269 	kprintf("  total #: %lld\n", runs);
270 	kprintf("  total:   %lld us\n", totalRunTime);
271 	kprintf("  average: %#.2f us\n", (double)totalRunTime / runs);
272 	kprintf("  min:     %lld us\n", minRunTime);
273 	kprintf("  max:     %lld us\n", maxRunTime);
274 
275 	if (latencies > 0) {
276 		kprintf("scheduling latency after wake up:\n");
277 		kprintf("  total #: %lld\n", latencies);
278 		kprintf("  total:   %lld us\n", totalLatency);
279 		kprintf("  average: %#.2f us\n", (double)totalLatency / latencies);
280 		kprintf("  min:     %lld us\n", minLatency);
281 		kprintf("  max:     %lld us\n", maxLatency);
282 		kprintf("  max:     %lld us (at tracing entry %ld)\n", maxLatency,
283 			maxLatencyEntry);
284 	} else
285 		kprintf("thread was never run after having been woken up\n");
286 
287 	if (reruns > 0) {
288 		kprintf("scheduling latency after preemption:\n");
289 		kprintf("  total #: %lld\n", reruns);
290 		kprintf("  total:   %lld us\n", totalRerunTime);
291 		kprintf("  average: %#.2f us\n", (double)totalRerunTime / reruns);
292 		kprintf("  min:     %lld us\n", minRerunTime);
293 		kprintf("  max:     %lld us (at tracing entry %ld)\n", maxRerunTime,
294 			maxRerunEntry);
295 	} else
296 		kprintf("thread was never rerun after preemption\n");
297 
298 	if (preemptions > 0)
299 		kprintf("thread was preempted %lld times\n", preemptions);
300 	else
301 		kprintf("thread was never preempted\n");
302 
303 	return 0;
304 }
305 
306 #endif	// SCHEDULER_TRACING
307