xref: /haiku/src/system/kernel/debug/tracing.cpp (revision 8a04709a1d4086fecf352cc079c97e394f1a4ba3)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <tracing.h>
9 
10 #include <stdarg.h>
11 #include <stdlib.h>
12 
13 #include <debug.h>
14 #include <kernel.h>
15 #include <util/AutoLock.h>
16 
17 
18 #if ENABLE_TRACING
19 
20 //#define TRACE_TRACING
21 #ifdef TRACE_TRACING
22 #	define TRACE(x) dprintf_no_syslog x
23 #else
24 #	define TRACE(x) ;
25 #endif
26 
27 
28 enum {
29 	WRAP_ENTRY			= 0x01,
30 	ENTRY_INITIALIZED	= 0x02,
31 	BUFFER_ENTRY		= 0x04
32 };
33 
34 static const size_t kBufferSize = MAX_TRACE_SIZE / 4 - 1;
35 
36 static trace_entry* sBuffer;
37 static trace_entry* sFirstEntry;
38 static trace_entry* sAfterLastEntry;
39 static uint32 sEntries;
40 static uint32 sWritten;
41 static spinlock sLock;
42 
43 
44 static trace_entry*
45 next_entry(trace_entry* entry)
46 {
47 	entry += entry->size >> 2;
48 	if ((entry->flags & WRAP_ENTRY) != 0)
49 		entry = sBuffer;
50 
51 	if (entry == sAfterLastEntry)
52 		return NULL;
53 
54 	return entry;
55 }
56 
57 
58 static bool
59 free_first_entry()
60 {
61 	TRACE(("  skip start %p, %u bytes\n", sFirstEntry, sFirstEntry->size));
62 
63 	trace_entry* newFirst = next_entry(sFirstEntry);
64 
65 	if (sFirstEntry->flags & BUFFER_ENTRY) {
66 		// a buffer entry -- just skip it
67 	} else if (sFirstEntry->flags & ENTRY_INITIALIZED) {
68 		// fully initialized TraceEntry -- destroy it
69 		((TraceEntry*)sFirstEntry)->~TraceEntry();
70 		sEntries--;
71 	} else {
72 		// Not fully initialized TraceEntry. We can't free it, since
73 		// then it's constructor might still write into the memory and
74 		// overwrite data of the entry we're going to allocate.
75 		// We can't do anything until this entry can be discarded.
76 		return false;
77 	}
78 
79 	if (newFirst == NULL) {
80 		// everything is freed -- that practically this can't happen, if
81 		// the buffer is large enough to hold three max-sized entries
82 		sFirstEntry = sAfterLastEntry = sBuffer;
83 		TRACE(("free_first_entry(): all entries freed!\n"));
84 	} else
85 		sFirstEntry = newFirst;
86 
87 	return true;
88 }
89 
90 
91 static bool
92 make_space(size_t needed)
93 {
94 	// we need space for sAfterLastEntry, too (in case we need to wrap around
95 	// later)
96 	needed += 4;
97 
98 	// If there's not enough space (free or occupied) after sAfterLastEntry,
99 	// we free all entries in that region and wrap around.
100 	if (sAfterLastEntry + needed / 4 > sBuffer + kBufferSize) {
101 		TRACE(("make_space(%lu), wrapping around: after last: %p\n", needed,
102 			sAfterLastEntry));
103 
104 		// Free all entries after sAfterLastEntry and one more at the beginning
105 		// of the buffer.
106 		do {
107 			if (!free_first_entry())
108 				return false;
109 		} while (sFirstEntry > sAfterLastEntry);
110 
111 		// just in case free_first_entry() freed the very last existing entry
112 		if (sAfterLastEntry == sBuffer)
113 			return true;
114 
115 		// mark as wrap entry and actually wrap around
116 		sAfterLastEntry->size = 0;
117 		sAfterLastEntry->flags = WRAP_ENTRY;
118 		sAfterLastEntry = sBuffer;
119 	}
120 
121 	if (sFirstEntry <= sAfterLastEntry) {
122 		// buffer is empty or the space after sAfterLastEntry is unoccupied
123 		return true;
124 	}
125 
126 	// free the first entries, until there's enough space
127 	size_t space = (sFirstEntry - sAfterLastEntry) * 4;
128 
129 	if (space < needed) {
130 		TRACE(("make_space(%lu), left %ld\n", needed, space));
131 	}
132 
133 	while (space < needed) {
134 		space += sFirstEntry->size;
135 
136 		if (!free_first_entry())
137 			return false;
138 	}
139 
140 	TRACE(("  out: start %p, entries %ld\n", sFirstEntry, sEntries));
141 
142 	return true;
143 }
144 
145 
146 static trace_entry*
147 allocate_entry(size_t size, uint16 flags)
148 {
149 	if (sBuffer == NULL || size == 0 || size >= 65532)
150 		return NULL;
151 
152 	InterruptsSpinLocker _(sLock);
153 
154 	size = (size + 3) & ~3;
155 
156 	TRACE(("allocate_entry(%lu), start %p, end %p, buffer %p\n", size,
157 		sFirstEntry, sAfterLastEntry, sBuffer));
158 
159 	if (!make_space(size))
160 		return NULL;
161 
162 	trace_entry* entry = sAfterLastEntry;
163 	entry->size = size;
164 	entry->flags = flags;
165 	sAfterLastEntry += size >> 2;
166 
167 	if (!(flags & BUFFER_ENTRY))
168 		sEntries++;
169 
170 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
171 		sAfterLastEntry, sFirstEntry, sEntries));
172 
173 	return entry;
174 }
175 
176 
177 #endif	// ENABLE_TRACING
178 
179 
180 // #pragma mark -
181 
182 
183 TraceOutput::TraceOutput(char* buffer, size_t bufferSize)
184 	: fBuffer(buffer),
185 	  fCapacity(bufferSize)
186 {
187 	Clear();
188 }
189 
190 
191 void
192 TraceOutput::Clear()
193 {
194 	if (fCapacity > 0)
195 		fBuffer[0] = '\0';
196 	fSize = 0;
197 }
198 
199 
200 void
201 TraceOutput::Print(const char* format,...)
202 {
203 	if (IsFull())
204 		return;
205 
206 	va_list args;
207 	va_start(args, format);
208 	fSize += vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
209 	va_end(args);
210 }
211 
212 
213 //	#pragma mark -
214 
215 
216 TraceEntry::TraceEntry()
217 {
218 }
219 
220 
221 TraceEntry::~TraceEntry()
222 {
223 }
224 
225 
226 void
227 TraceEntry::Dump(TraceOutput& out)
228 {
229 #if ENABLE_TRACING
230 	// to be overridden by subclasses
231 	out.Print("ENTRY %p", this);
232 #endif
233 }
234 
235 
236 void
237 TraceEntry::Initialized()
238 {
239 #if ENABLE_TRACING
240 	flags |= ENTRY_INITIALIZED;
241 	sWritten++;
242 #endif
243 }
244 
245 
246 void*
247 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
248 {
249 #if ENABLE_TRACING
250 	return allocate_entry(size, 0);
251 #else
252 	return NULL;
253 #endif
254 }
255 
256 
257 //	#pragma mark -
258 
259 
260 AbstractTraceEntry::~AbstractTraceEntry()
261 {
262 }
263 
264 
265 void
266 AbstractTraceEntry::Dump(TraceOutput& out)
267 {
268 	out.Print("[%6ld] %Ld: ", fThread, fTime);
269 	AddDump(out);
270 }
271 
272 
273 void
274 AbstractTraceEntry::AddDump(TraceOutput& out)
275 {
276 }
277 
278 
279 //	#pragma mark - trace filters
280 
281 
282 class LazyTraceOutput : public TraceOutput {
283 public:
284 	LazyTraceOutput(char* buffer, size_t bufferSize)
285 		: TraceOutput(buffer, bufferSize)
286 	{
287 	}
288 
289 	const char* DumpEntry(const TraceEntry* entry)
290 	{
291 		if (Size() == 0) {
292 			const_cast<TraceEntry*>(entry)->Dump(*this);
293 				// Dump() should probably be const
294 		}
295 
296 		return Buffer();
297 	}
298 };
299 
300 
301 class TraceFilter {
302 public:
303 	virtual ~TraceFilter()
304 	{
305 	}
306 
307 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
308 	{
309 		return false;
310 	}
311 
312 public:
313 	union {
314 		thread_id	fThread;
315 		const char*	fString;
316 		struct {
317 			TraceFilter*	first;
318 			TraceFilter*	second;
319 		} fSubFilters;
320 	};
321 };
322 
323 
324 class ThreadTraceFilter : public TraceFilter {
325 public:
326 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
327 	{
328 		const AbstractTraceEntry* entry
329 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
330 		return (entry != NULL && entry->Thread() == fThread);
331 	}
332 };
333 
334 
335 class PatternTraceFilter : public TraceFilter {
336 public:
337 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
338 	{
339 		return strstr(out.DumpEntry(entry), fString) != NULL;
340 	}
341 };
342 
343 
344 class NotTraceFilter : public TraceFilter {
345 public:
346 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
347 	{
348 		return !fSubFilters.first->Filter(entry, out);
349 	}
350 };
351 
352 
353 class AndTraceFilter : public TraceFilter {
354 public:
355 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
356 	{
357 		return fSubFilters.first->Filter(entry, out)
358 			&& fSubFilters.second->Filter(entry, out);
359 	}
360 };
361 
362 
363 class OrTraceFilter : public TraceFilter {
364 public:
365 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
366 	{
367 		return fSubFilters.first->Filter(entry, out)
368 			|| fSubFilters.second->Filter(entry, out);
369 	}
370 };
371 
372 
373 class TraceFilterParser {
374 public:
375 	static TraceFilterParser* Default()
376 	{
377 		return &sParser;
378 	}
379 
380 	bool Parse(int argc, const char* const* argv)
381 	{
382 		fTokens = argv;
383 		fTokenCount = argc;
384 		fTokenIndex = 0;
385 		fFilterCount = 0;
386 
387 		TraceFilter* filter = _ParseExpression();
388 		return fTokenIndex == fTokenCount && filter != NULL;
389 	}
390 
391 	bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
392 	{
393 		return fFilters[0].Filter(entry, out);
394 	}
395 
396 private:
397 	TraceFilter* _ParseExpression()
398 	{
399 		const char* token = _NextToken();
400 		if (!token) {
401 			// unexpected end of expression
402 			return NULL;
403 		}
404 
405 		if (fFilterCount == MAX_FILTERS) {
406 			// too many filters
407 			return NULL;
408 		}
409 
410 		if (token[0] == '#') {
411 			TraceFilter* filter = new(&fFilters[fFilterCount++])
412 				PatternTraceFilter;
413 			filter->fString = token + 1;
414 			return filter;
415 		} else if (strcmp(token, "not") == 0) {
416 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
417 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
418 				return filter;
419 			return NULL;
420 		} else if (strcmp(token, "and") == 0) {
421 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
422 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
423 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
424 				return filter;
425 			}
426 			return NULL;
427 		} else if (strcmp(token, "or") == 0) {
428 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
429 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
430 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
431 				return filter;
432 			}
433 			return NULL;
434 		} else if (strcmp(token, "thread") == 0) {
435 			const char* arg = _NextToken();
436 			if (arg == NULL) {
437 				// unexpected end of expression
438 				return NULL;
439 			}
440 
441 			TraceFilter* filter = new(&fFilters[fFilterCount++])
442 				ThreadTraceFilter;
443 			filter->fThread = strtol(arg, NULL, 0);
444 			return filter;
445 		} else {
446 			// invalid token
447 			return NULL;
448 		}
449 	}
450 
451 	const char* _CurrentToken() const
452 	{
453 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
454 			return fTokens[fTokenIndex - 1];
455 		return NULL;
456 	}
457 
458 	const char* _NextToken()
459 	{
460 		if (fTokenIndex >= fTokenCount)
461 			return NULL;
462 		return fTokens[fTokenIndex++];
463 	}
464 
465 private:
466 	enum { MAX_FILTERS = 32 };
467 
468 	const char* const*			fTokens;
469 	int							fTokenCount;
470 	int							fTokenIndex;
471 	TraceFilter					fFilters[MAX_FILTERS];
472 	int							fFilterCount;
473 
474 	static TraceFilterParser	sParser;
475 };
476 
477 
478 TraceFilterParser TraceFilterParser::sParser;
479 
480 
481 //	#pragma mark -
482 
483 
484 #if ENABLE_TRACING
485 
486 
487 int
488 dump_tracing(int argc, char** argv)
489 {
490 	int argi = 1;
491 
492 	// Note: start and index are Pascal-like indices (i.e. in [1, sEntries]).
493 	int32 count = 30;
494 	int32 start = 0;	// special index: print the last count entries
495 	int32 cont = 0;
496 
497 	bool hasFilter = false;
498 
499 	if (argi < argc) {
500 		if (strcmp(argv[argi], "forward") == 0) {
501 			cont = 1;
502 			argi++;
503 		} else if (strcmp(argv[argi], "backward") == 0) {
504 			cont = -1;
505 			argi++;
506 		}
507 	}
508 
509 	if (cont != 0 && argi < argc) {
510 		print_debugger_command_usage(argv[0]);
511 		return 0;
512 	}
513 
514 	// start
515 	if (argi < argc) {
516 		if (strcmp(argv[argi], "filter") == 0) {
517 			hasFilter = true;
518 			argi++;
519 		} else if (argv[argi][0] == '#') {
520 			hasFilter = true;
521 		} else {
522 			start = parse_expression(argv[argi]);
523 			argi++;
524 		}
525 	}
526 
527 	// count
528 	if (!hasFilter && argi < argc) {
529 		if (strcmp(argv[argi], "filter") == 0) {
530 			hasFilter = true;
531 			argi++;
532 		} else if (argv[argi][0] == '#') {
533 			hasFilter = true;
534 		} else {
535 			count = parse_expression(argv[argi]);
536 			argi++;
537 		}
538 	}
539 
540 	// filter specification
541 	if (argi < argc) {
542 		hasFilter = true;
543 		if (strcmp(argv[argi], "filter") == 0)
544 			argi++;
545 
546 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
547 			print_debugger_command_usage(argv[0]);
548 			return 0;
549 		}
550 	}
551 
552 	if (cont != 0) {
553 		start = get_debug_variable("_tracingStart", start);
554 		count = get_debug_variable("_tracingCount", count);
555 		start = max_c(1, start + count * cont);
556 		hasFilter = get_debug_variable("_tracingFilter", count);
557 	}
558 
559 	if ((uint32)count > sEntries)
560 		count = sEntries;
561 	if (start <= 0)
562 		start = max_c(1, sEntries - count + 1);
563 	if (uint32(start + count) > sEntries)
564 		count = sEntries - start + 1;
565 
566 	int32 index = 1;
567 	int32 dumped = 0;
568 
569 	for (trace_entry* current = sFirstEntry; current != NULL;
570 			current = next_entry(current), index++) {
571 		if ((current->flags & BUFFER_ENTRY) != 0) {
572 			// skip buffer entries
573 			index--;
574 			continue;
575 		}
576 		if (index < start)
577 			continue;
578 		if (index >= start + count)
579 			break;
580 
581 		if ((current->flags & ENTRY_INITIALIZED) != 0) {
582 			char buffer[256];
583 			LazyTraceOutput out(buffer, sizeof(buffer));
584 
585 			TraceEntry* entry = (TraceEntry*)current;
586 			if (hasFilter && !TraceFilterParser::Default()->Filter(entry, out))
587 				continue;
588 
589 			kprintf("%5ld. %s\n", index, out.DumpEntry(entry));
590 		} else
591 			kprintf("%5ld. ** uninitialized entry **\n", index);
592 
593 		dumped++;
594 	}
595 
596 	kprintf("entries %ld to %ld (%ld of %ld). %ld entries written\n", start,
597 		start + count - 1, dumped, sEntries, sWritten);
598 
599 	set_debug_variable("_tracingStart", start);
600 	set_debug_variable("_tracingCount", count);
601 	set_debug_variable("_tracingFilter", hasFilter);
602 
603 	return cont != 0 ? B_KDEBUG_CONT : 0;
604 }
605 
606 
607 #endif	// ENABLE_TRACING
608 
609 
610 extern "C" uint8*
611 alloc_tracing_buffer(size_t size)
612 {
613 #if	ENABLE_TRACING
614 	trace_entry* entry = allocate_entry(size + sizeof(trace_entry),
615 		BUFFER_ENTRY);
616 	if (entry == NULL)
617 		return NULL;
618 
619 	return (uint8*)(entry + 1);
620 #else
621 	return NULL;
622 #endif
623 }
624 
625 
626 uint8*
627 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
628 {
629 	if (user && !IS_USER_ADDRESS(source))
630 		return NULL;
631 
632 	uint8* buffer = alloc_tracing_buffer(size);
633 	if (buffer == NULL)
634 		return NULL;
635 
636 	if (user) {
637 		if (user_memcpy(buffer, source, size) != B_OK)
638 			return NULL;
639 	} else
640 		memcpy(buffer, source, size);
641 
642 	return buffer;
643 }
644 
645 
646 char*
647 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
648 {
649 	if (source == NULL || maxSize == 0)
650 		return NULL;
651 
652 	if (user && !IS_USER_ADDRESS(source))
653 		return NULL;
654 
655 	// limit maxSize to the actual source string len
656 	if (user) {
657 		ssize_t size = user_strlcpy(NULL, source, 0);
658 			// there's no user_strnlen()
659 		if (size < 0)
660 			return 0;
661 		maxSize = min_c(maxSize, (size_t)size + 1);
662 	} else
663 		maxSize = strnlen(source, maxSize - 1) + 1;
664 
665 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
666 	if (buffer == NULL)
667 		return NULL;
668 
669 	if (user) {
670 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
671 			return NULL;
672 	} else
673 		strlcpy(buffer, source, maxSize);
674 
675 	return buffer;
676 }
677 
678 
679 extern "C" status_t
680 tracing_init(void)
681 {
682 #if	ENABLE_TRACING
683 	area_id area = create_area("tracing log", (void**)&sBuffer,
684 		B_ANY_KERNEL_ADDRESS, MAX_TRACE_SIZE, B_FULL_LOCK,
685 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
686 	if (area < B_OK)
687 		return area;
688 
689 	sFirstEntry = sBuffer;
690 	sAfterLastEntry = sBuffer;
691 
692 	add_debugger_command_etc("traced", &dump_tracing,
693 		"Dump recorded trace entries",
694 		"(\"forward\" | \"backward\") | ([ <start> [ <count> ] ] "
695 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
696 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
697 		"specified, the command continues where the previous invocation left\n"
698 		"off, i.e. printing the previous respectively next entries (as many\n"
699 		"as printed before). In this case the command is continuable, that is\n"
700 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
701 		"  <start>    - The index of the first entry to print. The index of\n"
702 		"               the first recorded entry is 1. If 0 is specified, the\n"
703 		"               last <count> recorded entries are printed. Defaults \n"
704 		"               to 0.\n"
705 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
706 		"  <pattern>  - If specified only entries containing this string are\n"
707 		"               printed.\n"
708 		"  <filter>   - If specified only entries matching this filter\n"
709 		"               expression are printed. The expression can consist of\n"
710 		"               prefix operators \"not\", \"and\", \"or\", filters of\n"
711 		"               the kind \"'thread' <thread>\" (matching entries\n"
712 		"               with the given thread ID), or filter of the kind\n"
713 		"               \"#<pattern>\" (matching entries containing the given\n"
714 		"               string.\n", 0);
715 #endif	// ENABLE_TRACING
716 	return B_OK;
717 }
718 
719