xref: /haiku/src/system/kernel/scheduler/scheduler_profiler.cpp (revision d0f2d8282f3f59a1af7fe2d340d2af0cb36a9b20)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 #include "scheduler_profiler.h"
7 
8 #include <debug.h>
9 #include <util/AutoLock.h>
10 
11 #include <algorithm>
12 
13 
14 #ifdef SCHEDULER_PROFILING
15 
16 
17 using namespace Scheduler;
18 using namespace Scheduler::Profiling;
19 
20 
21 static Profiler* sProfiler;
22 
23 static int dump_profiler(int argc, char** argv);
24 
25 
Profiler()26 Profiler::Profiler()
27 	:
28 	kMaxFunctionEntries(1024),
29 	kMaxFunctionStackEntries(512),
30 	fFunctionData(new(std::nothrow) FunctionData[kMaxFunctionEntries]),
31 	fStatus(B_OK)
32 {
33 	B_INITIALIZE_SPINLOCK(&fFunctionLock);
34 
35 	if (fFunctionData == NULL) {
36 		fStatus = B_NO_MEMORY;
37 		return;
38 	}
39 	memset(fFunctionData, 0, sizeof(FunctionData) * kMaxFunctionEntries);
40 
41 	for (int32 i = 0; i < smp_get_num_cpus(); i++) {
42 		fFunctionStacks[i]
43 			= new(std::nothrow) FunctionEntry[kMaxFunctionStackEntries];
44 		if (fFunctionStacks[i] == NULL) {
45 			fStatus = B_NO_MEMORY;
46 			return;
47 		}
48 		memset(fFunctionStacks[i], 0,
49 			sizeof(FunctionEntry) * kMaxFunctionStackEntries);
50 	}
51 	memset(fFunctionStackPointers, 0, sizeof(int32) * smp_get_num_cpus());
52 }
53 
54 
55 void
EnterFunction(int32 cpu,const char * functionName)56 Profiler::EnterFunction(int32 cpu, const char* functionName)
57 {
58 	nanotime_t start = system_time_nsecs();
59 
60 	FunctionData* function = _FindFunction(functionName);
61 	if (function == NULL)
62 		return;
63 	atomic_add((int32*)&function->fCalled, 1);
64 
65 	FunctionEntry* stackEntry
66 		= &fFunctionStacks[cpu][fFunctionStackPointers[cpu]];
67 	fFunctionStackPointers[cpu]++;
68 
69 	ASSERT(fFunctionStackPointers[cpu] < kMaxFunctionStackEntries);
70 
71 	stackEntry->fFunction = function;
72 	stackEntry->fEntryTime = start;
73 	stackEntry->fOthersTime = 0;
74 
75 	nanotime_t stop = system_time_nsecs();
76 	stackEntry->fProfilerTime = stop - start;
77 }
78 
79 
80 void
ExitFunction(int32 cpu,const char * functionName)81 Profiler::ExitFunction(int32 cpu, const char* functionName)
82 {
83 	nanotime_t start = system_time_nsecs();
84 
85 	ASSERT(fFunctionStackPointers[cpu] > 0);
86 	fFunctionStackPointers[cpu]--;
87 	FunctionEntry* stackEntry
88 		= &fFunctionStacks[cpu][fFunctionStackPointers[cpu]];
89 
90 	nanotime_t timeSpent = start - stackEntry->fEntryTime;
91 	timeSpent -= stackEntry->fProfilerTime;
92 
93 	atomic_add64(&stackEntry->fFunction->fTimeInclusive, timeSpent);
94 	atomic_add64(&stackEntry->fFunction->fTimeExclusive,
95 		timeSpent - stackEntry->fOthersTime);
96 
97 	nanotime_t profilerTime = stackEntry->fProfilerTime;
98 	if (fFunctionStackPointers[cpu] > 0) {
99 		stackEntry = &fFunctionStacks[cpu][fFunctionStackPointers[cpu] - 1];
100 		stackEntry->fOthersTime += timeSpent;
101 		stackEntry->fProfilerTime += profilerTime;
102 
103 		nanotime_t stop = system_time_nsecs();
104 		stackEntry->fProfilerTime += stop - start;
105 	}
106 }
107 
108 
109 void
DumpCalled(uint32 maxCount)110 Profiler::DumpCalled(uint32 maxCount)
111 {
112 	uint32 count = _FunctionCount();
113 
114 	qsort(fFunctionData, count, sizeof(FunctionData),
115 		&_CompareFunctions<uint32, &FunctionData::fCalled>);
116 
117 	if (maxCount > 0)
118 		count = std::min(count, maxCount);
119 	_Dump(count);
120 }
121 
122 
123 void
DumpTimeInclusive(uint32 maxCount)124 Profiler::DumpTimeInclusive(uint32 maxCount)
125 {
126 	uint32 count = _FunctionCount();
127 
128 	qsort(fFunctionData, count, sizeof(FunctionData),
129 		&_CompareFunctions<nanotime_t, &FunctionData::fTimeInclusive>);
130 
131 	if (maxCount > 0)
132 		count = std::min(count, maxCount);
133 	_Dump(count);
134 }
135 
136 
137 void
DumpTimeExclusive(uint32 maxCount)138 Profiler::DumpTimeExclusive(uint32 maxCount)
139 {
140 	uint32 count = _FunctionCount();
141 
142 	qsort(fFunctionData, count, sizeof(FunctionData),
143 		&_CompareFunctions<nanotime_t, &FunctionData::fTimeExclusive>);
144 
145 	if (maxCount > 0)
146 		count = std::min(count, maxCount);
147 	_Dump(count);
148 }
149 
150 
151 void
DumpTimeInclusivePerCall(uint32 maxCount)152 Profiler::DumpTimeInclusivePerCall(uint32 maxCount)
153 {
154 	uint32 count = _FunctionCount();
155 
156 	qsort(fFunctionData, count, sizeof(FunctionData),
157 		&_CompareFunctionsPerCall<nanotime_t, &FunctionData::fTimeInclusive>);
158 
159 	if (maxCount > 0)
160 		count = std::min(count, maxCount);
161 	_Dump(count);
162 }
163 
164 
165 void
DumpTimeExclusivePerCall(uint32 maxCount)166 Profiler::DumpTimeExclusivePerCall(uint32 maxCount)
167 {
168 	uint32 count = _FunctionCount();
169 
170 	qsort(fFunctionData, count, sizeof(FunctionData),
171 		&_CompareFunctionsPerCall<nanotime_t, &FunctionData::fTimeExclusive>);
172 
173 	if (maxCount > 0)
174 		count = std::min(count, maxCount);
175 	_Dump(count);
176 }
177 
178 
179 /* static */ Profiler*
Get()180 Profiler::Get()
181 {
182 	return sProfiler;
183 }
184 
185 
186 /* static */ void
Initialize()187 Profiler::Initialize()
188 {
189 	sProfiler = new(std::nothrow) Profiler;
190 	if (sProfiler == NULL || sProfiler->GetStatus() != B_OK)
191 		panic("Scheduler::Profiling::Profiler: could not initialize profiler");
192 
193 	add_debugger_command_etc("scheduler_profiler", &dump_profiler,
194 		"Show data collected by scheduler profiler",
195 		"[ <field> [ <count> ] ]\n"
196 		"Shows data collected by scheduler profiler\n"
197 		"  <field>   - Field used to sort functions. Available: called,"
198 			" time-inclusive, time-inclusive-per-call, time-exclusive,"
199 			" time-exclusive-per-call.\n"
200 		"              (defaults to \"called\")\n"
201 		"  <count>   - Maximum number of showed functions.\n", 0);
202 }
203 
204 
205 uint32
_FunctionCount() const206 Profiler::_FunctionCount() const
207 {
208 	uint32 count;
209 	for (count = 0; count < kMaxFunctionEntries; count++) {
210 		if (fFunctionData[count].fFunction == NULL)
211 			break;
212 	}
213 	return count;
214 }
215 
216 
217 void
_Dump(uint32 count)218 Profiler::_Dump(uint32 count)
219 {
220 	kprintf("Function calls (%" B_PRId32 " functions):\n", count);
221 	kprintf("    called time-inclusive per-call time-exclusive per-call "
222 		"function\n");
223 	for (uint32 i = 0; i < count; i++) {
224 		FunctionData* function = &fFunctionData[i];
225 		kprintf("%10" B_PRId32 " %14" B_PRId64 " %8" B_PRId64 " %14" B_PRId64
226 			" %8" B_PRId64 " %s\n", function->fCalled,
227 			function->fTimeInclusive,
228 			function->fTimeInclusive / function->fCalled,
229 			function->fTimeExclusive,
230 			function->fTimeExclusive / function->fCalled, function->fFunction);
231 	}
232 }
233 
234 
235 Profiler::FunctionData*
_FindFunction(const char * function)236 Profiler::_FindFunction(const char* function)
237 {
238 	for (uint32 i = 0; i < kMaxFunctionEntries; i++) {
239 		if (fFunctionData[i].fFunction == NULL)
240 			break;
241 		if (!strcmp(fFunctionData[i].fFunction, function))
242 			return fFunctionData + i;
243 	}
244 
245 	SpinLocker _(fFunctionLock);
246 	for (uint32 i = 0; i < kMaxFunctionEntries; i++) {
247 		if (fFunctionData[i].fFunction == NULL) {
248 			fFunctionData[i].fFunction = function;
249 			return fFunctionData + i;
250 		}
251 		if (!strcmp(fFunctionData[i].fFunction, function))
252 			return fFunctionData + i;
253 	}
254 
255 	return NULL;
256 }
257 
258 
259 template<typename Type, Type Profiler::FunctionData::*Member>
260 /* static */ int
_CompareFunctions(const void * _a,const void * _b)261 Profiler::_CompareFunctions(const void* _a, const void* _b)
262 {
263 	const FunctionData* a = static_cast<const FunctionData*>(_a);
264 	const FunctionData* b = static_cast<const FunctionData*>(_b);
265 
266 	if (b->*Member > a->*Member)
267 		return 1;
268 	if (b->*Member < a->*Member)
269 		return -1;
270 	return 0;
271 }
272 
273 
274 template<typename Type, Type Profiler::FunctionData::*Member>
275 /* static */ int
_CompareFunctionsPerCall(const void * _a,const void * _b)276 Profiler::_CompareFunctionsPerCall(const void* _a, const void* _b)
277 {
278 	const FunctionData* a = static_cast<const FunctionData*>(_a);
279 	const FunctionData* b = static_cast<const FunctionData*>(_b);
280 
281 	Type valueA = a->*Member / a->fCalled;
282 	Type valueB = b->*Member / b->fCalled;
283 
284 	if (valueB > valueA)
285 		return 1;
286 	if (valueB < valueA)
287 		return -1;
288 	return 0;
289 }
290 
291 
292 static int
dump_profiler(int argc,char ** argv)293 dump_profiler(int argc, char** argv)
294 {
295 	if (argc < 2) {
296 		Profiler::Get()->DumpCalled(0);
297 		return 0;
298 	}
299 
300 	int32 count = 0;
301 	if (argc >= 3)
302 		count = parse_expression(argv[2]);
303 	count = std::max(count, int32(0));
304 
305 	if (!strcmp(argv[1], "called"))
306 		Profiler::Get()->DumpCalled(count);
307 	else if (!strcmp(argv[1], "time-inclusive"))
308 		Profiler::Get()->DumpTimeInclusive(count);
309 	else if (!strcmp(argv[1], "time-inclusive-per-call"))
310 		Profiler::Get()->DumpTimeInclusivePerCall(count);
311 	else if (!strcmp(argv[1], "time-exclusive"))
312 		Profiler::Get()->DumpTimeExclusive(count);
313 	else if (!strcmp(argv[1], "time-exclusive-per-call"))
314 		Profiler::Get()->DumpTimeExclusivePerCall(count);
315 	else
316 		print_debugger_command_usage(argv[0]);
317 
318 	return 0;
319 }
320 
321 
322 #endif	// SCHEDULER_PROFILING
323 
324