xref: /haiku/src/system/kernel/debug/tracing.cpp (revision 746cac055adc6ac3308c7bc2d29040fb95689cc9)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <tracing.h>
9 
10 #include <stdarg.h>
11 #include <stdlib.h>
12 
13 #include <arch/debug.h>
14 #include <debug.h>
15 #include <elf.h>
16 #include <int.h>
17 #include <kernel.h>
18 #include <team.h>
19 #include <thread.h>
20 #include <util/AutoLock.h>
21 
22 
23 struct tracing_stack_trace {
24 	int32	depth;
25 	addr_t	return_addresses[0];
26 };
27 
28 
29 #if ENABLE_TRACING
30 
31 //#define TRACE_TRACING
32 #ifdef TRACE_TRACING
33 #	define TRACE(x) dprintf_no_syslog x
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 
39 enum {
40 	WRAP_ENTRY			= 0x01,
41 	ENTRY_INITIALIZED	= 0x02,
42 	BUFFER_ENTRY		= 0x04,
43 	FILTER_MATCH		= 0x08
44 };
45 
46 
47 static const size_t kTraceOutputBufferSize = 10240;
48 static const size_t kBufferSize = MAX_TRACE_SIZE / 4;
49 
50 static trace_entry* sBuffer;
51 static trace_entry* sFirstEntry;
52 static trace_entry* sAfterLastEntry;
53 static uint32 sEntries;
54 static uint32 sWritten;
55 static spinlock sLock;
56 static char* sTraceOutputBuffer;
57 
58 
59 static trace_entry*
60 next_entry(trace_entry* entry)
61 {
62 	entry += entry->size;
63 	if ((entry->flags & WRAP_ENTRY) != 0)
64 		entry = sBuffer;
65 
66 	if (entry == sAfterLastEntry)
67 		return NULL;
68 
69 	return entry;
70 }
71 
72 
73 static trace_entry*
74 previous_entry(trace_entry* entry)
75 {
76 	if (entry == sFirstEntry)
77 		return NULL;
78 
79 	if (entry == sBuffer) {
80 		// beginning of buffer -- previous entry is a wrap entry
81 		entry = sBuffer + kBufferSize - entry->previous_size;
82 	}
83 
84 	return entry - entry->previous_size;
85 }
86 
87 
88 static bool
89 free_first_entry()
90 {
91 	TRACE(("  skip start %p, %lu*4 bytes\n", sFirstEntry, sFirstEntry->size));
92 
93 	trace_entry* newFirst = next_entry(sFirstEntry);
94 
95 	if (sFirstEntry->flags & BUFFER_ENTRY) {
96 		// a buffer entry -- just skip it
97 	} else if (sFirstEntry->flags & ENTRY_INITIALIZED) {
98 		// fully initialized TraceEntry -- destroy it
99 		TraceEntry::FromTraceEntry(sFirstEntry)->~TraceEntry();
100 		sEntries--;
101 	} else {
102 		// Not fully initialized TraceEntry. We can't free it, since
103 		// then it's constructor might still write into the memory and
104 		// overwrite data of the entry we're going to allocate.
105 		// We can't do anything until this entry can be discarded.
106 		return false;
107 	}
108 
109 	if (newFirst == NULL) {
110 		// everything is freed -- that practically this can't happen, if
111 		// the buffer is large enough to hold three max-sized entries
112 		sFirstEntry = sAfterLastEntry = sBuffer;
113 		TRACE(("free_first_entry(): all entries freed!\n"));
114 	} else
115 		sFirstEntry = newFirst;
116 
117 	return true;
118 }
119 
120 
121 /*!	Makes sure we have needed * 4 bytes of memory at sAfterLastEntry.
122 	Returns \c false, if unable to free that much.
123 */
124 static bool
125 make_space(size_t needed)
126 {
127 	// we need space for sAfterLastEntry, too (in case we need to wrap around
128 	// later)
129 	needed++;
130 
131 	// If there's not enough space (free or occupied) after sAfterLastEntry,
132 	// we free all entries in that region and wrap around.
133 	if (sAfterLastEntry + needed > sBuffer + kBufferSize) {
134 		TRACE(("make_space(%lu), wrapping around: after last: %p\n", needed,
135 			sAfterLastEntry));
136 
137 		// Free all entries after sAfterLastEntry and one more at the beginning
138 		// of the buffer.
139 		while (sFirstEntry > sAfterLastEntry) {
140 			if (!free_first_entry())
141 				return false;
142 		}
143 		if (sAfterLastEntry != sBuffer && !free_first_entry())
144 			return false;
145 
146 		// just in case free_first_entry() freed the very last existing entry
147 		if (sAfterLastEntry == sBuffer)
148 			return true;
149 
150 		// mark as wrap entry and actually wrap around
151 		trace_entry* wrapEntry = sAfterLastEntry;
152 		wrapEntry->size = 0;
153 		wrapEntry->flags = WRAP_ENTRY;
154 		sAfterLastEntry = sBuffer;
155 		sAfterLastEntry->previous_size = sBuffer + kBufferSize - wrapEntry;
156 	}
157 
158 	if (sFirstEntry <= sAfterLastEntry) {
159 		// buffer is empty or the space after sAfterLastEntry is unoccupied
160 		return true;
161 	}
162 
163 	// free the first entries, until there's enough space
164 	size_t space = sFirstEntry - sAfterLastEntry;
165 
166 	if (space < needed) {
167 		TRACE(("make_space(%lu), left %ld\n", needed, space));
168 	}
169 
170 	while (space < needed) {
171 		space += sFirstEntry->size;
172 
173 		if (!free_first_entry())
174 			return false;
175 	}
176 
177 	TRACE(("  out: start %p, entries %ld\n", sFirstEntry, sEntries));
178 
179 	return true;
180 }
181 
182 
183 static trace_entry*
184 allocate_entry(size_t size, uint16 flags)
185 {
186 	if (sAfterLastEntry == NULL || size == 0 || size >= 65532)
187 		return NULL;
188 
189 	InterruptsSpinLocker _(sLock);
190 
191 	size = (size + 3) >> 2;
192 		// 4 byte aligned, don't store the lower 2 bits
193 
194 	TRACE(("allocate_entry(%lu), start %p, end %p, buffer %p\n", size * 4,
195 		sFirstEntry, sAfterLastEntry, sBuffer));
196 
197 	if (!make_space(size))
198 		return NULL;
199 
200 	trace_entry* entry = sAfterLastEntry;
201 	entry->size = size;
202 	entry->flags = flags;
203 	sAfterLastEntry += size;
204 	sAfterLastEntry->previous_size = size;
205 
206 	if (!(flags & BUFFER_ENTRY))
207 		sEntries++;
208 
209 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
210 		sAfterLastEntry, sFirstEntry, sEntries));
211 
212 	return entry;
213 }
214 
215 
216 #endif	// ENABLE_TRACING
217 
218 
219 // #pragma mark -
220 
221 
222 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
223 	: fBuffer(buffer),
224 	  fCapacity(bufferSize),
225 	  fFlags(flags)
226 {
227 	Clear();
228 }
229 
230 
231 void
232 TraceOutput::Clear()
233 {
234 	if (fCapacity > 0)
235 		fBuffer[0] = '\0';
236 	fSize = 0;
237 }
238 
239 
240 void
241 TraceOutput::Print(const char* format,...)
242 {
243 #if ENABLE_TRACING
244 	if (IsFull())
245 		return;
246 
247 	va_list args;
248 	va_start(args, format);
249 	fSize += vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
250 	va_end(args);
251 #endif
252 }
253 
254 
255 void
256 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
257 {
258 #if ENABLE_TRACING
259 	if (stackTrace == NULL || stackTrace->depth <= 0)
260 		return;
261 
262 	for (int32 i = 0; i < stackTrace->depth; i++) {
263 		addr_t address = stackTrace->return_addresses[i];
264 
265 		const char* symbol;
266 		const char* imageName;
267 		bool exactMatch;
268 		addr_t baseAddress;
269 
270 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
271 				&imageName, &exactMatch) == B_OK) {
272 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
273 				address - baseAddress, imageName,
274 				exactMatch ? "" : " (nearest)");
275 		} else
276 			Print("  %p\n", (void*)address);
277 	}
278 #endif
279 }
280 
281 
282 void
283 TraceOutput::SetLastEntryTime(bigtime_t time)
284 {
285 	fLastEntryTime = time;
286 }
287 
288 
289 bigtime_t
290 TraceOutput::LastEntryTime() const
291 {
292 	return fLastEntryTime;
293 }
294 
295 
296 //	#pragma mark -
297 
298 
299 TraceEntry::TraceEntry()
300 {
301 }
302 
303 
304 TraceEntry::~TraceEntry()
305 {
306 }
307 
308 
309 void
310 TraceEntry::Dump(TraceOutput& out)
311 {
312 #if ENABLE_TRACING
313 	// to be overridden by subclasses
314 	out.Print("ENTRY %p", this);
315 #endif
316 }
317 
318 
319 void
320 TraceEntry::DumpStackTrace(TraceOutput& out)
321 {
322 }
323 
324 
325 void
326 TraceEntry::Initialized()
327 {
328 #if ENABLE_TRACING
329 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
330 	sWritten++;
331 #endif
332 }
333 
334 
335 void*
336 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
337 {
338 #if ENABLE_TRACING
339 	trace_entry* entry = allocate_entry(size + sizeof(trace_entry), 0);
340 	return entry != NULL ? entry + 1 : NULL;
341 #endif
342 	return NULL;
343 }
344 
345 
346 //	#pragma mark -
347 
348 
349 AbstractTraceEntry::AbstractTraceEntry()
350 {
351 	struct thread* thread = thread_get_current_thread();
352 	if (thread != NULL) {
353 		fThread = thread->id;
354 		if (thread->team)
355 			fTeam = thread->team->id;
356 	}
357 	fTime = system_time();
358 }
359 
360 AbstractTraceEntry::~AbstractTraceEntry()
361 {
362 }
363 
364 
365 void
366 AbstractTraceEntry::Dump(TraceOutput& out)
367 {
368 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
369 		? fTime - out.LastEntryTime()
370 		: fTime;
371 
372 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID)
373 		out.Print("[%6ld:%6ld] %10Ld: ", fThread, fTeam, time);
374 	else
375 		out.Print("[%6ld] %10Ld: ", fThread, time);
376 
377 	AddDump(out);
378 
379 	out.SetLastEntryTime(fTime);
380 }
381 
382 
383 void
384 AbstractTraceEntry::AddDump(TraceOutput& out)
385 {
386 }
387 
388 
389 //	#pragma mark -
390 
391 
392 #if ENABLE_TRACING
393 
394 class KernelTraceEntry : public AbstractTraceEntry {
395 	public:
396 		KernelTraceEntry(const char* message)
397 		{
398 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
399 
400 #if KTRACE_PRINTF_STACK_TRACE
401 			fStackTrace = capture_tracing_stack_trace(
402 				KTRACE_PRINTF_STACK_TRACE, 1, false);
403 #endif
404 			Initialized();
405 		}
406 
407 		virtual void AddDump(TraceOutput& out)
408 		{
409 			out.Print("kern: %s", fMessage);
410 		}
411 
412 #if KTRACE_PRINTF_STACK_TRACE
413 		virtual void DumpStackTrace(TraceOutput& out)
414 		{
415 			out.PrintStackTrace(fStackTrace);
416 		}
417 #endif
418 
419 	private:
420 		char*	fMessage;
421 #if KTRACE_PRINTF_STACK_TRACE
422 		tracing_stack_trace* fStackTrace;
423 #endif
424 };
425 
426 
427 class UserTraceEntry : public AbstractTraceEntry {
428 	public:
429 		UserTraceEntry(const char* message)
430 		{
431 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
432 
433 			Initialized();
434 		}
435 
436 		virtual void AddDump(TraceOutput& out)
437 		{
438 			out.Print("user: %s", fMessage);
439 		}
440 
441 	private:
442 		char*	fMessage;
443 };
444 
445 #endif	// ENABLE_TRACING
446 
447 
448 //	#pragma mark - trace filters
449 
450 
451 TraceFilter::~TraceFilter()
452 {
453 }
454 
455 
456 bool
457 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
458 {
459 	return false;
460 }
461 
462 
463 
464 class ThreadTraceFilter : public TraceFilter {
465 public:
466 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
467 	{
468 		const AbstractTraceEntry* entry
469 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
470 		return (entry != NULL && entry->Thread() == fThread);
471 	}
472 };
473 
474 
475 class TeamTraceFilter : public TraceFilter {
476 public:
477 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
478 	{
479 		const AbstractTraceEntry* entry
480 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
481 		return (entry != NULL && entry->Team() == fTeam);
482 	}
483 };
484 
485 
486 class PatternTraceFilter : public TraceFilter {
487 public:
488 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
489 	{
490 		return strstr(out.DumpEntry(entry), fString) != NULL;
491 	}
492 };
493 
494 
495 class DecimalPatternTraceFilter : public TraceFilter {
496 public:
497 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
498 	{
499 		// TODO: this is *very* slow
500 		char buffer[64];
501 		snprintf(buffer, sizeof(buffer), "%Ld", fValue);
502 		return strstr(out.DumpEntry(entry), buffer) != NULL;
503 	}
504 };
505 
506 class HexPatternTraceFilter : public TraceFilter {
507 public:
508 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
509 	{
510 		// TODO: this is *very* slow
511 		char buffer[64];
512 		snprintf(buffer, sizeof(buffer), "%Lx", fValue);
513 		return strstr(out.DumpEntry(entry), buffer) != NULL;
514 	}
515 };
516 
517 class StringPatternTraceFilter : public TraceFilter {
518 public:
519 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
520 	{
521 		if (IS_KERNEL_ADDRESS(fValue))
522 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
523 
524 		// TODO: this is *very* slow
525 		char buffer[64];
526 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
527 		return strstr(out.DumpEntry(entry), buffer) != NULL;
528 	}
529 };
530 
531 class NotTraceFilter : public TraceFilter {
532 public:
533 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
534 	{
535 		return !fSubFilters.first->Filter(entry, out);
536 	}
537 };
538 
539 
540 class AndTraceFilter : public TraceFilter {
541 public:
542 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
543 	{
544 		return fSubFilters.first->Filter(entry, out)
545 			&& fSubFilters.second->Filter(entry, out);
546 	}
547 };
548 
549 
550 class OrTraceFilter : public TraceFilter {
551 public:
552 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
553 	{
554 		return fSubFilters.first->Filter(entry, out)
555 			|| fSubFilters.second->Filter(entry, out);
556 	}
557 };
558 
559 
560 class TraceFilterParser {
561 public:
562 	static TraceFilterParser* Default()
563 	{
564 		return &sParser;
565 	}
566 
567 	bool Parse(int argc, const char* const* argv)
568 	{
569 		fTokens = argv;
570 		fTokenCount = argc;
571 		fTokenIndex = 0;
572 		fFilterCount = 0;
573 
574 		TraceFilter* filter = _ParseExpression();
575 		return fTokenIndex == fTokenCount && filter != NULL;
576 	}
577 
578 	TraceFilter* Filter()
579 	{
580 		return &fFilters[0];
581 	}
582 
583 private:
584 	TraceFilter* _ParseExpression()
585 	{
586 		const char* token = _NextToken();
587 		if (!token) {
588 			// unexpected end of expression
589 			return NULL;
590 		}
591 
592 		if (fFilterCount == MAX_FILTERS) {
593 			// too many filters
594 			return NULL;
595 		}
596 
597 		if (token[0] == '#') {
598 			TraceFilter* filter = new(&fFilters[fFilterCount++])
599 				PatternTraceFilter;
600 			filter->fString = token + 1;
601 			return filter;
602 		} else if (token[0] == 'd' && token[1] == '#') {
603 			TraceFilter* filter = new(&fFilters[fFilterCount++])
604 				DecimalPatternTraceFilter;
605 			filter->fValue = parse_expression(token + 2);
606 			return filter;
607 		} else if (token[0] == 'x' && token[1] == '#') {
608 			TraceFilter* filter = new(&fFilters[fFilterCount++])
609 				HexPatternTraceFilter;
610 			filter->fValue = parse_expression(token + 2);
611 			return filter;
612 		} else if (token[0] == 's' && token[1] == '#') {
613 			TraceFilter* filter = new(&fFilters[fFilterCount++])
614 				StringPatternTraceFilter;
615 			filter->fValue = parse_expression(token + 2);
616 			return filter;
617 		} else if (strcmp(token, "not") == 0) {
618 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
619 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
620 				return filter;
621 			return NULL;
622 		} else if (strcmp(token, "and") == 0) {
623 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
624 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
625 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
626 				return filter;
627 			}
628 			return NULL;
629 		} else if (strcmp(token, "or") == 0) {
630 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
631 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
632 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
633 				return filter;
634 			}
635 			return NULL;
636 		} else if (strcmp(token, "thread") == 0) {
637 			const char* arg = _NextToken();
638 			if (arg == NULL) {
639 				// unexpected end of expression
640 				return NULL;
641 			}
642 
643 			TraceFilter* filter = new(&fFilters[fFilterCount++])
644 				ThreadTraceFilter;
645 			filter->fThread = strtol(arg, NULL, 0);
646 			return filter;
647 		} else if (strcmp(token, "team") == 0) {
648 			const char* arg = _NextToken();
649 			if (arg == NULL) {
650 				// unexpected end of expression
651 				return NULL;
652 			}
653 
654 			TraceFilter* filter = new(&fFilters[fFilterCount++])
655 				TeamTraceFilter;
656 			filter->fTeam = strtol(arg, NULL, 0);
657 			return filter;
658 		} else {
659 			// invalid token
660 			return NULL;
661 		}
662 	}
663 
664 	const char* _CurrentToken() const
665 	{
666 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
667 			return fTokens[fTokenIndex - 1];
668 		return NULL;
669 	}
670 
671 	const char* _NextToken()
672 	{
673 		if (fTokenIndex >= fTokenCount)
674 			return NULL;
675 		return fTokens[fTokenIndex++];
676 	}
677 
678 private:
679 	enum { MAX_FILTERS = 32 };
680 
681 	const char* const*			fTokens;
682 	int							fTokenCount;
683 	int							fTokenIndex;
684 	TraceFilter					fFilters[MAX_FILTERS];
685 	int							fFilterCount;
686 
687 	static TraceFilterParser	sParser;
688 };
689 
690 
691 TraceFilterParser TraceFilterParser::sParser;
692 
693 
694 //	#pragma mark -
695 
696 
697 #if ENABLE_TRACING
698 
699 
700 TraceEntry*
701 TraceEntryIterator::Next()
702 {
703 	if (fIndex == 0) {
704 		fEntry = _NextNonBufferEntry(sFirstEntry);
705 		fIndex = 1;
706 	} else if (fEntry != NULL) {
707 		fEntry = _NextNonBufferEntry(next_entry(fEntry));
708 		fIndex++;
709 	}
710 
711 	return Current();
712 }
713 
714 
715 TraceEntry*
716 TraceEntryIterator::Previous()
717 {
718 	if (fIndex == (int32)sEntries + 1)
719 		fEntry = sAfterLastEntry;
720 
721 	if (fEntry != NULL) {
722 		fEntry = _PreviousNonBufferEntry(previous_entry(fEntry));
723 		fIndex--;
724 	}
725 
726 	return Current();
727 }
728 
729 
730 TraceEntry*
731 TraceEntryIterator::MoveTo(int32 index)
732 {
733 	if (index == fIndex)
734 		return Current();
735 
736 	if (index <= 0 || index > (int32)sEntries) {
737 		fIndex = (index <= 0 ? 0 : sEntries + 1);
738 		fEntry = NULL;
739 		return NULL;
740 	}
741 
742 	// get the shortest iteration path
743 	int32 distance = index - fIndex;
744 	int32 direction = distance < 0 ? -1 : 1;
745 	distance *= direction;
746 
747 	if (index < distance) {
748 		distance = index;
749 		direction = 1;
750 		fEntry = NULL;
751 		fIndex = 0;
752 	}
753 	if ((int32)sEntries + 1 - fIndex < distance) {
754 		distance = sEntries + 1 - fIndex;
755 		direction = -1;
756 		fEntry = NULL;
757 		fIndex = sEntries + 1;
758 	}
759 
760 	// iterate to the index
761 	if (direction < 0) {
762 		while (fIndex != index)
763 			Previous();
764 	} else {
765 		while (fIndex != index)
766 			Next();
767 	}
768 
769 	return Current();
770 }
771 
772 
773 trace_entry*
774 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
775 {
776 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
777 		entry = next_entry(entry);
778 
779 	return entry;
780 }
781 
782 
783 trace_entry*
784 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
785 {
786 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
787 		entry = previous_entry(entry);
788 
789 	return entry;
790 }
791 
792 
793 int
794 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
795 {
796 	int argi = 1;
797 
798 	// variables in which we store our state to be continuable
799 	static int32 _previousCount = 0;
800 	static bool _previousHasFilter = false;
801 	static bool _previousPrintStackTrace = false;
802 	static int32 _previousMaxToCheck = 0;
803 	static int32 _previousFirstChecked = 1;
804 	static int32 _previousLastChecked = -1;
805 	static int32 _previousDirection = 1;
806 	static uint32 _previousWritten = 0;
807 	static uint32 _previousEntries = 0;
808 	static uint32 _previousOutputFlags = 0;
809 	static TraceEntryIterator iterator;
810 
811 
812 	// Note: start and index are Pascal-like indices (i.e. in [1, sEntries]).
813 	int32 start = 0;	// special index: print the last count entries
814 	int32 count = 0;
815 	int32 maxToCheck = 0;
816 	int32 cont = 0;
817 
818 	bool hasFilter = false;
819 	bool printStackTrace = false;
820 
821 	uint32 outputFlags = 0;
822 	while (argi < argc) {
823 		if (strcmp(argv[argi], "--difftime") == 0) {
824 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
825 			argi++;
826 		} else if (strcmp(argv[argi], "--printteam") == 0) {
827 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
828 			argi++;
829 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
830 			printStackTrace = true;
831 			argi++;
832 		} else
833 			break;
834 	}
835 
836 	if (argi < argc) {
837 		if (strcmp(argv[argi], "forward") == 0) {
838 			cont = 1;
839 			argi++;
840 		} else if (strcmp(argv[argi], "backward") == 0) {
841 			cont = -1;
842 			argi++;
843 		}
844 	} else
845 		cont = _previousDirection;
846 
847 	if (cont != 0) {
848 		if (argi < argc) {
849 			print_debugger_command_usage(argv[0]);
850 			return 0;
851 		}
852 		if (sWritten == 0 || sWritten != _previousWritten
853 			|| sEntries != _previousEntries) {
854 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
855 				"before, or there were new entries written since the last "
856 				"invocation.\n", argv[0]);
857 			return 0;
858 		}
859 	}
860 
861 	// get start, count, maxToCheck
862 	int32* params[3] = { &start, &count, &maxToCheck };
863 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
864 		if (strcmp(argv[argi], "filter") == 0) {
865 			hasFilter = true;
866 			argi++;
867 		} else if (argv[argi][0] == '#') {
868 			hasFilter = true;
869 		} else {
870 			*params[i] = parse_expression(argv[argi]);
871 			argi++;
872 		}
873 	}
874 
875 	// filter specification
876 	if (argi < argc) {
877 		hasFilter = true;
878 		if (strcmp(argv[argi], "filter") == 0)
879 			argi++;
880 
881 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
882 			print_debugger_command_usage(argv[0]);
883 			return 0;
884 		}
885 	}
886 
887 	int32 direction;
888 	int32 firstToCheck;
889 	int32 lastToCheck;
890 
891 	if (cont != 0) {
892 		// get values from the previous iteration
893 		direction = cont;
894 		count = _previousCount;
895 		maxToCheck = _previousMaxToCheck;
896 		hasFilter = _previousHasFilter;
897 		outputFlags = _previousOutputFlags;
898 		printStackTrace = _previousPrintStackTrace;
899 
900 		if (direction < 0)
901 			start = _previousFirstChecked - 1;
902 		else
903 			start = _previousLastChecked + 1;
904 	} else {
905 		// defaults for count and maxToCheck
906 		if (count == 0)
907 			count = 30;
908 		if (maxToCheck == 0 || !hasFilter)
909 			maxToCheck = count;
910 		else if (maxToCheck < 0)
911 			maxToCheck = sEntries;
912 
913 		// determine iteration direction
914 		direction = (start <= 0 || count < 0 ? -1 : 1);
915 
916 		// validate count and maxToCheck
917 		if (count < 0)
918 			count = -count;
919 		if (maxToCheck < 0)
920 			maxToCheck = -maxToCheck;
921 		if (maxToCheck > (int32)sEntries)
922 			maxToCheck = sEntries;
923 		if (count > maxToCheck)
924 			count = maxToCheck;
925 
926 		// validate start
927 		if (start <= 0 || start > (int32)sEntries)
928 			start = max_c(1, sEntries);
929 	}
930 
931 	if (direction < 0) {
932 		firstToCheck = max_c(1, start - maxToCheck + 1);
933 		lastToCheck = start;
934 	} else {
935 		firstToCheck = start;
936 		lastToCheck = min_c((int32)sEntries, start + maxToCheck - 1);
937 	}
938 
939 	// reset the iterator, if something changed in the meantime
940 	if (sWritten == 0 || sWritten != _previousWritten
941 		|| sEntries != _previousEntries) {
942 		iterator.Reset();
943 	}
944 
945 	LazyTraceOutput out(sTraceOutputBuffer, kTraceOutputBufferSize,
946 		outputFlags);
947 
948 	bool markedMatching = false;
949 	int32 firstToDump = firstToCheck;
950 	int32 lastToDump = lastToCheck;
951 
952 	TraceFilter* filter = NULL;
953 	if (hasFilter)
954 		filter = TraceFilterParser::Default()->Filter();
955 
956 	if (wrapperFilter != NULL) {
957 		wrapperFilter->Init(filter, direction, cont != 0);
958 		filter = wrapperFilter;
959 	}
960 
961 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
962 		// iteration direction is backwards
963 		markedMatching = true;
964 
965 		// From the last entry to check iterate backwards to check filter
966 		// matches.
967 		int32 matching = 0;
968 
969 		// move to the entry after the last entry to check
970 		iterator.MoveTo(lastToCheck + 1);
971 
972 		// iterate backwards
973 		firstToDump = -1;
974 		lastToDump = -1;
975 		while (iterator.Index() > firstToCheck) {
976 			TraceEntry* entry = iterator.Previous();
977 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
978 				out.Clear();
979 				if (filter->Filter(entry, out)) {
980 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
981 					if (lastToDump == -1)
982 						lastToDump = iterator.Index();
983 					firstToDump = iterator.Index();
984 
985 					matching++;
986 					if (matching >= count)
987 						break;
988 				} else
989 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
990 			}
991 		}
992 
993 		firstToCheck = iterator.Index();
994 
995 		// iterate to the previous entry, so that the next loop starts at the
996 		// right one
997 		iterator.Previous();
998 	}
999 
1000 	out.SetLastEntryTime(0);
1001 
1002 	// set the iterator to the entry before the first one to dump
1003 	iterator.MoveTo(firstToDump - 1);
1004 
1005 	// dump the entries matching the filter in the range
1006 	// [firstToDump, lastToDump]
1007 	int32 dumped = 0;
1008 
1009 	while (TraceEntry* entry = iterator.Next()) {
1010 		int32 index = iterator.Index();
1011 		if (index < firstToDump)
1012 			continue;
1013 		if (index > lastToDump || dumped >= count) {
1014 			if (direction > 0)
1015 				lastToCheck = index - 1;
1016 			break;
1017 		}
1018 
1019 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1020 			out.Clear();
1021 			if (filter &&  (markedMatching
1022 					? (entry->Flags() & FILTER_MATCH) == 0
1023 					: !filter->Filter(entry, out))) {
1024 				continue;
1025 			}
1026 
1027 			// don't print trailing new line
1028 			const char* dump = out.DumpEntry(entry);
1029 			int len = strlen(dump);
1030 			if (len > 0 && dump[len - 1] == '\n')
1031 				len--;
1032 
1033 			kprintf("%5ld. %.*s\n", index, len, dump);
1034 
1035 			if (printStackTrace) {
1036 				out.Clear();
1037 				entry->DumpStackTrace(out);
1038 				if (out.Size() > 0)
1039 					kputs(out.Buffer());
1040 			}
1041 		} else if (!filter)
1042 			kprintf("%5ld. ** uninitialized entry **\n", index);
1043 
1044 		dumped++;
1045 	}
1046 
1047 	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1048 		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1049 		lastToCheck - firstToCheck + 1, sEntries, sWritten);
1050 
1051 	// store iteration state
1052 	_previousCount = count;
1053 	_previousMaxToCheck = maxToCheck;
1054 	_previousHasFilter = hasFilter;
1055 	_previousPrintStackTrace = printStackTrace;
1056 	_previousFirstChecked = firstToCheck;
1057 	_previousLastChecked = lastToCheck;
1058 	_previousDirection = direction;
1059 	_previousWritten = sWritten;
1060 	_previousEntries = sEntries;
1061 	_previousOutputFlags = outputFlags;
1062 
1063 	return cont != 0 ? B_KDEBUG_CONT : 0;
1064 }
1065 
1066 
1067 static int
1068 dump_tracing_command(int argc, char** argv)
1069 {
1070 	return dump_tracing_internal(argc, argv, NULL);
1071 }
1072 
1073 
1074 #endif	// ENABLE_TRACING
1075 
1076 
1077 extern "C" uint8*
1078 alloc_tracing_buffer(size_t size)
1079 {
1080 #if	ENABLE_TRACING
1081 	trace_entry* entry = allocate_entry(size + sizeof(trace_entry),
1082 		BUFFER_ENTRY);
1083 	if (entry == NULL)
1084 		return NULL;
1085 
1086 	return (uint8*)(entry + 1);
1087 #else
1088 	return NULL;
1089 #endif
1090 }
1091 
1092 
1093 uint8*
1094 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1095 {
1096 	if (user && !IS_USER_ADDRESS(source))
1097 		return NULL;
1098 
1099 	uint8* buffer = alloc_tracing_buffer(size);
1100 	if (buffer == NULL)
1101 		return NULL;
1102 
1103 	if (user) {
1104 		if (user_memcpy(buffer, source, size) != B_OK)
1105 			return NULL;
1106 	} else
1107 		memcpy(buffer, source, size);
1108 
1109 	return buffer;
1110 }
1111 
1112 
1113 char*
1114 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1115 {
1116 	if (source == NULL || maxSize == 0)
1117 		return NULL;
1118 
1119 	if (user && !IS_USER_ADDRESS(source))
1120 		return NULL;
1121 
1122 	// limit maxSize to the actual source string len
1123 	if (user) {
1124 		ssize_t size = user_strlcpy(NULL, source, 0);
1125 			// there's no user_strnlen()
1126 		if (size < 0)
1127 			return 0;
1128 		maxSize = min_c(maxSize, (size_t)size + 1);
1129 	} else
1130 		maxSize = strnlen(source, maxSize - 1) + 1;
1131 
1132 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1133 	if (buffer == NULL)
1134 		return NULL;
1135 
1136 	if (user) {
1137 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1138 			return NULL;
1139 	} else
1140 		strlcpy(buffer, source, maxSize);
1141 
1142 	return buffer;
1143 }
1144 
1145 
1146 tracing_stack_trace*
1147 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool userOnly)
1148 {
1149 #if	ENABLE_TRACING
1150 	// TODO: page_fault_exception() doesn't allow us to gracefully handle
1151 	// a bad address in the stack trace, if interrupts are disabled.
1152 	if (!are_interrupts_enabled())
1153 		return NULL;
1154 
1155 	tracing_stack_trace* stackTrace
1156 		= (tracing_stack_trace*)alloc_tracing_buffer(
1157 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1158 
1159 	if (stackTrace != NULL) {
1160 		stackTrace->depth = arch_debug_get_stack_trace(
1161 			stackTrace->return_addresses, maxCount, skipFrames + 1, userOnly);
1162 	}
1163 
1164 	return stackTrace;
1165 #else
1166 	return NULL;
1167 #endif
1168 }
1169 
1170 
1171 int
1172 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1173 {
1174 #if	ENABLE_TRACING
1175 	return dump_tracing_internal(argc, argv, wrapperFilter);
1176 #else
1177 	return 0;
1178 #endif
1179 }
1180 
1181 
1182 void
1183 lock_tracing_buffer()
1184 {
1185 #if ENABLE_TRACING
1186 	acquire_spinlock(&sLock);
1187 #endif
1188 }
1189 
1190 
1191 void
1192 unlock_tracing_buffer()
1193 {
1194 #if ENABLE_TRACING
1195 	release_spinlock(&sLock);
1196 #endif
1197 }
1198 
1199 
1200 extern "C" status_t
1201 tracing_init(void)
1202 {
1203 #if	ENABLE_TRACING
1204 	area_id area = create_area("tracing log", (void**)&sTraceOutputBuffer,
1205 		B_ANY_KERNEL_ADDRESS, MAX_TRACE_SIZE + kTraceOutputBufferSize,
1206 		B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1207 	if (area < B_OK)
1208 		return area;
1209 
1210 	sBuffer = (trace_entry*)(sTraceOutputBuffer + kTraceOutputBufferSize);
1211 	sFirstEntry = sBuffer;
1212 	sAfterLastEntry = sBuffer;
1213 
1214 	add_debugger_command_etc("traced", &dump_tracing_command,
1215 		"Dump recorded trace entries",
1216 		"[ \"--printteam\" ] [ \"--difftime\" ] (\"forward\" | \"backward\") "
1217 			"| ([ <start> [ <count> [ <range> ] ] ] "
1218 				"[ #<pattern> | (\"filter\" <filter>) ])\n"
1219 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1220 		"specified, the command continues where the previous invocation left\n"
1221 		"off, i.e. printing the previous respectively next entries (as many\n"
1222 		"as printed before). In this case the command is continuable, that is\n"
1223 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1224 		"If no arguments are given, the command continues in the direction\n"
1225 		"of the last invocation.\n"
1226 		"\"--printteam\"  enables printing the entries' team IDs.\n"
1227 		"\"--difftime\"   print difference times for all but the first entry.\n"
1228 		"\"--stacktrace\" print stack traces for entries that captured one.\n"
1229 		"  <start>    - The base index of the entries to print. Depending on\n"
1230 		"               whether the iteration direction is forward or\n"
1231 		"               backward this will be the first or last entry printed\n"
1232 		"               (potentially, if a filter is specified). The index of\n"
1233 		"               the first entry in the trace buffer is 1. If 0 is\n"
1234 		"               specified, the last <count> recorded entries are\n"
1235 		"               printed (iteration direction is backward). Defaults \n"
1236 		"               to 0.\n"
1237 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1238 		"               If negative, the -<count> entries before and\n"
1239 		"               including <start> will be printed.\n"
1240 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1241 		"               number of entries to be filtered -- depending on the\n"
1242 		"               iteration direction the entries before or after\n"
1243 		"               <start>. If more than <count> entries match the\n"
1244 		"               filter, only the first (forward) or last (backward)\n"
1245 		"               <count> matching entries will be printed. If 0 is\n"
1246 		"               specified <range> will be set to <count>. If -1,\n"
1247 		"               <range> will be set to the number of recorded\n"
1248 		"               entries.\n"
1249 		"  <pattern>  - If specified only entries containing this string are\n"
1250 		"               printed.\n"
1251 		"  <filter>   - If specified only entries matching this filter\n"
1252 		"               expression are printed. The expression can consist of\n"
1253 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1254 		"               filters \"'thread' <thread>\" (matching entries\n"
1255 		"               with the given thread ID), \"'team' <team>\"\n"
1256 						"(matching entries with the given team ID), and\n"
1257 		"               \"#<pattern>\" (matching entries containing the given\n"
1258 		"               string).\n", 0);
1259 #endif	// ENABLE_TRACING
1260 	return B_OK;
1261 }
1262 
1263 
1264 void
1265 ktrace_printf(const char *format, ...)
1266 {
1267 #if	ENABLE_TRACING
1268 	va_list list;
1269 	va_start(list, format);
1270 
1271 	char buffer[256];
1272 	vsnprintf(buffer, sizeof(buffer), format, list);
1273 
1274 	va_end(list);
1275 
1276 	new(nothrow) KernelTraceEntry(buffer);
1277 #endif	// ENABLE_TRACING
1278 }
1279 
1280 
1281 void
1282 _user_ktrace_output(const char *message)
1283 {
1284 #if	ENABLE_TRACING
1285 	new(nothrow) UserTraceEntry(message);
1286 #endif	// ENABLE_TRACING
1287 }
1288 
1289