xref: /haiku/src/system/kernel/debug/tracing.cpp (revision b30304acc8c37e678a1bf66976d15bdab103f931)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <tracing.h>
9 
10 #include <stdarg.h>
11 #include <stdlib.h>
12 
13 #include <arch/debug.h>
14 #include <debug.h>
15 #include <elf.h>
16 #include <int.h>
17 #include <kernel.h>
18 #include <team.h>
19 #include <thread.h>
20 #include <util/AutoLock.h>
21 
22 
23 struct tracing_stack_trace {
24 	int32	depth;
25 	addr_t	return_addresses[0];
26 };
27 
28 
29 #if ENABLE_TRACING
30 
31 //#define TRACE_TRACING
32 #ifdef TRACE_TRACING
33 #	define TRACE(x) dprintf_no_syslog x
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 
39 enum {
40 	WRAP_ENTRY			= 0x01,
41 	ENTRY_INITIALIZED	= 0x02,
42 	BUFFER_ENTRY		= 0x04,
43 	FILTER_MATCH		= 0x08
44 };
45 
46 
47 static const size_t kTraceOutputBufferSize = 10240;
48 static const size_t kBufferSize = MAX_TRACE_SIZE / 4;
49 
50 static trace_entry* sBuffer;
51 static trace_entry* sFirstEntry;
52 static trace_entry* sAfterLastEntry;
53 static uint32 sEntries;
54 static uint32 sWritten;
55 static spinlock sLock;
56 static char* sTraceOutputBuffer;
57 
58 
59 static trace_entry*
60 next_entry(trace_entry* entry)
61 {
62 	entry += entry->size;
63 	if ((entry->flags & WRAP_ENTRY) != 0)
64 		entry = sBuffer;
65 
66 	if (entry == sAfterLastEntry)
67 		return NULL;
68 
69 	return entry;
70 }
71 
72 
73 static trace_entry*
74 previous_entry(trace_entry* entry)
75 {
76 	if (entry == sFirstEntry)
77 		return NULL;
78 
79 	if (entry == sBuffer) {
80 		// beginning of buffer -- previous entry is a wrap entry
81 		entry = sBuffer + kBufferSize - entry->previous_size;
82 	}
83 
84 	return entry - entry->previous_size;
85 }
86 
87 
88 static bool
89 free_first_entry()
90 {
91 	TRACE(("  skip start %p, %lu*4 bytes\n", sFirstEntry, sFirstEntry->size));
92 
93 	trace_entry* newFirst = next_entry(sFirstEntry);
94 
95 	if (sFirstEntry->flags & BUFFER_ENTRY) {
96 		// a buffer entry -- just skip it
97 	} else if (sFirstEntry->flags & ENTRY_INITIALIZED) {
98 		// fully initialized TraceEntry -- destroy it
99 		TraceEntry::FromTraceEntry(sFirstEntry)->~TraceEntry();
100 		sEntries--;
101 	} else {
102 		// Not fully initialized TraceEntry. We can't free it, since
103 		// then it's constructor might still write into the memory and
104 		// overwrite data of the entry we're going to allocate.
105 		// We can't do anything until this entry can be discarded.
106 		return false;
107 	}
108 
109 	if (newFirst == NULL) {
110 		// everything is freed -- that practically this can't happen, if
111 		// the buffer is large enough to hold three max-sized entries
112 		sFirstEntry = sAfterLastEntry = sBuffer;
113 		TRACE(("free_first_entry(): all entries freed!\n"));
114 	} else
115 		sFirstEntry = newFirst;
116 
117 	return true;
118 }
119 
120 
121 /*!	Makes sure we have needed * 4 bytes of memory at sAfterLastEntry.
122 	Returns \c false, if unable to free that much.
123 */
124 static bool
125 make_space(size_t needed)
126 {
127 	// we need space for sAfterLastEntry, too (in case we need to wrap around
128 	// later)
129 	needed++;
130 
131 	// If there's not enough space (free or occupied) after sAfterLastEntry,
132 	// we free all entries in that region and wrap around.
133 	if (sAfterLastEntry + needed > sBuffer + kBufferSize) {
134 		TRACE(("make_space(%lu), wrapping around: after last: %p\n", needed,
135 			sAfterLastEntry));
136 
137 		// Free all entries after sAfterLastEntry and one more at the beginning
138 		// of the buffer.
139 		while (sFirstEntry > sAfterLastEntry) {
140 			if (!free_first_entry())
141 				return false;
142 		}
143 		if (sAfterLastEntry != sBuffer && !free_first_entry())
144 			return false;
145 
146 		// just in case free_first_entry() freed the very last existing entry
147 		if (sAfterLastEntry == sBuffer)
148 			return true;
149 
150 		// mark as wrap entry and actually wrap around
151 		trace_entry* wrapEntry = sAfterLastEntry;
152 		wrapEntry->size = 0;
153 		wrapEntry->flags = WRAP_ENTRY;
154 		sAfterLastEntry = sBuffer;
155 		sAfterLastEntry->previous_size = sBuffer + kBufferSize - wrapEntry;
156 	}
157 
158 	if (sFirstEntry <= sAfterLastEntry) {
159 		// buffer is empty or the space after sAfterLastEntry is unoccupied
160 		return true;
161 	}
162 
163 	// free the first entries, until there's enough space
164 	size_t space = sFirstEntry - sAfterLastEntry;
165 
166 	if (space < needed) {
167 		TRACE(("make_space(%lu), left %ld\n", needed, space));
168 	}
169 
170 	while (space < needed) {
171 		space += sFirstEntry->size;
172 
173 		if (!free_first_entry())
174 			return false;
175 	}
176 
177 	TRACE(("  out: start %p, entries %ld\n", sFirstEntry, sEntries));
178 
179 	return true;
180 }
181 
182 
183 static trace_entry*
184 allocate_entry(size_t size, uint16 flags)
185 {
186 	if (sAfterLastEntry == NULL || size == 0 || size >= 65532)
187 		return NULL;
188 
189 	InterruptsSpinLocker _(sLock);
190 
191 	size = (size + 3) >> 2;
192 		// 4 byte aligned, don't store the lower 2 bits
193 
194 	TRACE(("allocate_entry(%lu), start %p, end %p, buffer %p\n", size * 4,
195 		sFirstEntry, sAfterLastEntry, sBuffer));
196 
197 	if (!make_space(size))
198 		return NULL;
199 
200 	trace_entry* entry = sAfterLastEntry;
201 	entry->size = size;
202 	entry->flags = flags;
203 	sAfterLastEntry += size;
204 	sAfterLastEntry->previous_size = size;
205 
206 	if (!(flags & BUFFER_ENTRY))
207 		sEntries++;
208 
209 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
210 		sAfterLastEntry, sFirstEntry, sEntries));
211 
212 	return entry;
213 }
214 
215 
216 #endif	// ENABLE_TRACING
217 
218 
219 // #pragma mark -
220 
221 
222 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
223 	: fBuffer(buffer),
224 	  fCapacity(bufferSize),
225 	  fFlags(flags)
226 {
227 	Clear();
228 }
229 
230 
231 void
232 TraceOutput::Clear()
233 {
234 	if (fCapacity > 0)
235 		fBuffer[0] = '\0';
236 	fSize = 0;
237 }
238 
239 
240 void
241 TraceOutput::Print(const char* format,...)
242 {
243 #if ENABLE_TRACING
244 	if (IsFull())
245 		return;
246 
247 	va_list args;
248 	va_start(args, format);
249 	fSize += vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
250 	va_end(args);
251 #endif
252 }
253 
254 
255 void
256 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
257 {
258 #if ENABLE_TRACING
259 	if (stackTrace == NULL || stackTrace->depth <= 0)
260 		return;
261 
262 	for (int32 i = 0; i < stackTrace->depth; i++) {
263 		addr_t address = stackTrace->return_addresses[i];
264 
265 		const char* symbol;
266 		const char* imageName;
267 		bool exactMatch;
268 		addr_t baseAddress;
269 
270 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
271 				&imageName, &exactMatch) == B_OK) {
272 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
273 				address - baseAddress, imageName,
274 				exactMatch ? "" : " (nearest)");
275 		} else
276 			Print("  %p\n", (void*)address);
277 	}
278 #endif
279 }
280 
281 
282 void
283 TraceOutput::SetLastEntryTime(bigtime_t time)
284 {
285 	fLastEntryTime = time;
286 }
287 
288 
289 bigtime_t
290 TraceOutput::LastEntryTime() const
291 {
292 	return fLastEntryTime;
293 }
294 
295 
296 //	#pragma mark -
297 
298 
299 TraceEntry::TraceEntry()
300 {
301 }
302 
303 
304 TraceEntry::~TraceEntry()
305 {
306 }
307 
308 
309 void
310 TraceEntry::Dump(TraceOutput& out)
311 {
312 #if ENABLE_TRACING
313 	// to be overridden by subclasses
314 	out.Print("ENTRY %p", this);
315 #endif
316 }
317 
318 
319 void
320 TraceEntry::DumpStackTrace(TraceOutput& out)
321 {
322 }
323 
324 
325 void
326 TraceEntry::Initialized()
327 {
328 #if ENABLE_TRACING
329 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
330 	sWritten++;
331 #endif
332 }
333 
334 
335 void*
336 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
337 {
338 #if ENABLE_TRACING
339 	trace_entry* entry = allocate_entry(size + sizeof(trace_entry), 0);
340 	return entry != NULL ? entry + 1 : NULL;
341 #endif
342 	return NULL;
343 }
344 
345 
346 //	#pragma mark -
347 
348 
349 AbstractTraceEntry::AbstractTraceEntry()
350 {
351 	struct thread* thread = thread_get_current_thread();
352 	if (thread != NULL) {
353 		fThread = thread->id;
354 		if (thread->team)
355 			fTeam = thread->team->id;
356 	}
357 	fTime = system_time();
358 }
359 
360 AbstractTraceEntry::~AbstractTraceEntry()
361 {
362 }
363 
364 
365 void
366 AbstractTraceEntry::Dump(TraceOutput& out)
367 {
368 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
369 		? fTime - out.LastEntryTime()
370 		: fTime;
371 
372 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID)
373 		out.Print("[%6ld:%6ld] %10Ld: ", fThread, fTeam, time);
374 	else
375 		out.Print("[%6ld] %10Ld: ", fThread, time);
376 
377 	AddDump(out);
378 
379 	out.SetLastEntryTime(fTime);
380 }
381 
382 
383 void
384 AbstractTraceEntry::AddDump(TraceOutput& out)
385 {
386 }
387 
388 
389 //	#pragma mark -
390 
391 
392 #if ENABLE_TRACING
393 
394 class KernelTraceEntry : public AbstractTraceEntry {
395 	public:
396 		KernelTraceEntry(const char* message)
397 		{
398 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
399 
400 #if KTRACE_PRINTF_STACK_TRACE
401 			fStackTrace = capture_tracing_stack_trace(
402 				KTRACE_PRINTF_STACK_TRACE, 1, false);
403 #endif
404 			Initialized();
405 		}
406 
407 		virtual void AddDump(TraceOutput& out)
408 		{
409 			out.Print("kern: %s", fMessage);
410 		}
411 
412 #if KTRACE_PRINTF_STACK_TRACE
413 		virtual void DumpStackTrace(TraceOutput& out)
414 		{
415 			out.PrintStackTrace(fStackTrace);
416 		}
417 #endif
418 
419 	private:
420 		char*	fMessage;
421 #if KTRACE_PRINTF_STACK_TRACE
422 		tracing_stack_trace* fStackTrace;
423 #endif
424 };
425 
426 
427 class UserTraceEntry : public AbstractTraceEntry {
428 	public:
429 		UserTraceEntry(const char* message)
430 		{
431 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
432 
433 			Initialized();
434 		}
435 
436 		virtual void AddDump(TraceOutput& out)
437 		{
438 			out.Print("user: %s", fMessage);
439 		}
440 
441 	private:
442 		char*	fMessage;
443 };
444 
445 #endif	// ENABLE_TRACING
446 
447 
448 //	#pragma mark - trace filters
449 
450 
451 TraceFilter::~TraceFilter()
452 {
453 }
454 
455 
456 bool
457 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
458 {
459 	return false;
460 }
461 
462 
463 
464 class ThreadTraceFilter : public TraceFilter {
465 public:
466 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
467 	{
468 		const AbstractTraceEntry* entry
469 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
470 		return (entry != NULL && entry->Thread() == fThread);
471 	}
472 };
473 
474 
475 class TeamTraceFilter : public TraceFilter {
476 public:
477 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
478 	{
479 		const AbstractTraceEntry* entry
480 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
481 		return (entry != NULL && entry->Team() == fTeam);
482 	}
483 };
484 
485 
486 class PatternTraceFilter : public TraceFilter {
487 public:
488 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
489 	{
490 		return strstr(out.DumpEntry(entry), fString) != NULL;
491 	}
492 };
493 
494 
495 class DecimalPatternTraceFilter : public TraceFilter {
496 public:
497 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
498 	{
499 		// TODO: this is *very* slow
500 		char buffer[64];
501 		snprintf(buffer, sizeof(buffer), "%Ld", fValue);
502 		return strstr(out.DumpEntry(entry), buffer) != NULL;
503 	}
504 };
505 
506 class HexPatternTraceFilter : public TraceFilter {
507 public:
508 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
509 	{
510 		// TODO: this is *very* slow
511 		char buffer[64];
512 		snprintf(buffer, sizeof(buffer), "%Lx", fValue);
513 		return strstr(out.DumpEntry(entry), buffer) != NULL;
514 	}
515 };
516 
517 class StringPatternTraceFilter : public TraceFilter {
518 public:
519 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
520 	{
521 		if (IS_KERNEL_ADDRESS(fValue))
522 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
523 
524 		// TODO: this is *very* slow
525 		char buffer[64];
526 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
527 		return strstr(out.DumpEntry(entry), buffer) != NULL;
528 	}
529 };
530 
531 class NotTraceFilter : public TraceFilter {
532 public:
533 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
534 	{
535 		return !fSubFilters.first->Filter(entry, out);
536 	}
537 };
538 
539 
540 class AndTraceFilter : public TraceFilter {
541 public:
542 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
543 	{
544 		return fSubFilters.first->Filter(entry, out)
545 			&& fSubFilters.second->Filter(entry, out);
546 	}
547 };
548 
549 
550 class OrTraceFilter : public TraceFilter {
551 public:
552 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
553 	{
554 		return fSubFilters.first->Filter(entry, out)
555 			|| fSubFilters.second->Filter(entry, out);
556 	}
557 };
558 
559 
560 class TraceFilterParser {
561 public:
562 	static TraceFilterParser* Default()
563 	{
564 		return &sParser;
565 	}
566 
567 	bool Parse(int argc, const char* const* argv)
568 	{
569 		fTokens = argv;
570 		fTokenCount = argc;
571 		fTokenIndex = 0;
572 		fFilterCount = 0;
573 
574 		TraceFilter* filter = _ParseExpression();
575 		return fTokenIndex == fTokenCount && filter != NULL;
576 	}
577 
578 	TraceFilter* Filter()
579 	{
580 		return &fFilters[0];
581 	}
582 
583 private:
584 	TraceFilter* _ParseExpression()
585 	{
586 		const char* token = _NextToken();
587 		if (!token) {
588 			// unexpected end of expression
589 			return NULL;
590 		}
591 
592 		if (fFilterCount == MAX_FILTERS) {
593 			// too many filters
594 			return NULL;
595 		}
596 
597 		if (token[0] == '#') {
598 			TraceFilter* filter = new(&fFilters[fFilterCount++])
599 				PatternTraceFilter;
600 			filter->fString = token + 1;
601 			return filter;
602 		} else if (token[0] == 'd' && token[1] == '#') {
603 			TraceFilter* filter = new(&fFilters[fFilterCount++])
604 				DecimalPatternTraceFilter;
605 			filter->fValue = parse_expression(token + 2);
606 			return filter;
607 		} else if (token[0] == 'x' && token[1] == '#') {
608 			TraceFilter* filter = new(&fFilters[fFilterCount++])
609 				HexPatternTraceFilter;
610 			filter->fValue = parse_expression(token + 2);
611 			return filter;
612 		} else if (token[0] == 's' && token[1] == '#') {
613 			TraceFilter* filter = new(&fFilters[fFilterCount++])
614 				StringPatternTraceFilter;
615 			filter->fValue = parse_expression(token + 2);
616 			return filter;
617 		} else if (strcmp(token, "not") == 0) {
618 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
619 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
620 				return filter;
621 			return NULL;
622 		} else if (strcmp(token, "and") == 0) {
623 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
624 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
625 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
626 				return filter;
627 			}
628 			return NULL;
629 		} else if (strcmp(token, "or") == 0) {
630 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
631 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
632 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
633 				return filter;
634 			}
635 			return NULL;
636 		} else if (strcmp(token, "thread") == 0) {
637 			const char* arg = _NextToken();
638 			if (arg == NULL) {
639 				// unexpected end of expression
640 				return NULL;
641 			}
642 
643 			TraceFilter* filter = new(&fFilters[fFilterCount++])
644 				ThreadTraceFilter;
645 			filter->fThread = strtol(arg, NULL, 0);
646 			return filter;
647 		} else if (strcmp(token, "team") == 0) {
648 			const char* arg = _NextToken();
649 			if (arg == NULL) {
650 				// unexpected end of expression
651 				return NULL;
652 			}
653 
654 			TraceFilter* filter = new(&fFilters[fFilterCount++])
655 				TeamTraceFilter;
656 			filter->fTeam = strtol(arg, NULL, 0);
657 			return filter;
658 		} else {
659 			// invalid token
660 			return NULL;
661 		}
662 	}
663 
664 	const char* _CurrentToken() const
665 	{
666 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
667 			return fTokens[fTokenIndex - 1];
668 		return NULL;
669 	}
670 
671 	const char* _NextToken()
672 	{
673 		if (fTokenIndex >= fTokenCount)
674 			return NULL;
675 		return fTokens[fTokenIndex++];
676 	}
677 
678 private:
679 	enum { MAX_FILTERS = 32 };
680 
681 	const char* const*			fTokens;
682 	int							fTokenCount;
683 	int							fTokenIndex;
684 	TraceFilter					fFilters[MAX_FILTERS];
685 	int							fFilterCount;
686 
687 	static TraceFilterParser	sParser;
688 };
689 
690 
691 TraceFilterParser TraceFilterParser::sParser;
692 
693 
694 //	#pragma mark -
695 
696 
697 #if ENABLE_TRACING
698 
699 
700 class TraceEntryIterator {
701 public:
702 	TraceEntryIterator()
703 		:
704  		fEntry(NULL),
705 		fIndex(0)
706 	{
707 	}
708 
709 	void Reset()
710 	{
711 		fEntry = NULL;
712 		fIndex = 0;
713 	}
714 
715 	int32 Index() const
716 	{
717 		return fIndex;
718 	}
719 
720 	TraceEntry* Current() const
721 	{
722 		return TraceEntry::FromTraceEntry(fEntry);
723 	}
724 
725 	TraceEntry* Next()
726 	{
727 		if (fIndex == 0) {
728 			fEntry = _NextNonBufferEntry(sFirstEntry);
729 			fIndex = 1;
730 		} else if (fEntry != NULL) {
731 			fEntry = _NextNonBufferEntry(next_entry(fEntry));
732 			fIndex++;
733 		}
734 
735 		return Current();
736 	}
737 
738 	TraceEntry* Previous()
739 	{
740 		if (fIndex == (int32)sEntries + 1)
741 			fEntry = sAfterLastEntry;
742 
743 		if (fEntry != NULL) {
744 			fEntry = _PreviousNonBufferEntry(previous_entry(fEntry));
745 			fIndex--;
746 		}
747 
748 		return Current();
749 	}
750 
751 	TraceEntry* MoveTo(int32 index)
752 	{
753 		if (index == fIndex)
754 			return Current();
755 
756 		if (index <= 0 || index > (int32)sEntries) {
757 			fIndex = (index <= 0 ? 0 : sEntries + 1);
758 			fEntry = NULL;
759 			return NULL;
760 		}
761 
762 		// get the shortest iteration path
763 		int32 distance = index - fIndex;
764 		int32 direction = distance < 0 ? -1 : 1;
765 		distance *= direction;
766 
767 		if (index < distance) {
768 			distance = index;
769 			direction = 1;
770 			fEntry = NULL;
771 			fIndex = 0;
772 		}
773 		if ((int32)sEntries + 1 - fIndex < distance) {
774 			distance = sEntries + 1 - fIndex;
775 			direction = -1;
776 			fEntry = NULL;
777 			fIndex = sEntries + 1;
778 		}
779 
780 		// iterate to the index
781 		if (direction < 0) {
782 			while (fIndex != index)
783 				Previous();
784 		} else {
785 			while (fIndex != index)
786 				Next();
787 		}
788 
789 		return Current();
790 	}
791 
792 private:
793 	trace_entry* _NextNonBufferEntry(trace_entry* entry)
794 	{
795 		while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
796 			entry = next_entry(entry);
797 
798 		return entry;
799 	}
800 
801 	trace_entry* _PreviousNonBufferEntry(trace_entry* entry)
802 	{
803 		while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
804 			entry = previous_entry(entry);
805 
806 		return entry;
807 	}
808 
809 private:
810 	trace_entry*	fEntry;
811 	int32			fIndex;
812 };
813 
814 
815 int
816 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
817 {
818 	int argi = 1;
819 
820 	// variables in which we store our state to be continuable
821 	static int32 _previousCount = 0;
822 	static bool _previousHasFilter = false;
823 	static bool _previousPrintStackTrace = false;
824 	static int32 _previousMaxToCheck = 0;
825 	static int32 _previousFirstChecked = 1;
826 	static int32 _previousLastChecked = -1;
827 	static int32 _previousDirection = 1;
828 	static uint32 _previousWritten = 0;
829 	static uint32 _previousEntries = 0;
830 	static uint32 _previousOutputFlags = 0;
831 	static TraceEntryIterator iterator;
832 
833 
834 	// Note: start and index are Pascal-like indices (i.e. in [1, sEntries]).
835 	int32 start = 0;	// special index: print the last count entries
836 	int32 count = 0;
837 	int32 maxToCheck = 0;
838 	int32 cont = 0;
839 
840 	bool hasFilter = false;
841 	bool printStackTrace = false;
842 
843 	uint32 outputFlags = 0;
844 	while (argi < argc) {
845 		if (strcmp(argv[argi], "--difftime") == 0) {
846 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
847 			argi++;
848 		} else if (strcmp(argv[argi], "--printteam") == 0) {
849 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
850 			argi++;
851 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
852 			printStackTrace = true;
853 			argi++;
854 		} else
855 			break;
856 	}
857 
858 	if (argi < argc) {
859 		if (strcmp(argv[argi], "forward") == 0) {
860 			cont = 1;
861 			argi++;
862 		} else if (strcmp(argv[argi], "backward") == 0) {
863 			cont = -1;
864 			argi++;
865 		}
866 	} else
867 		cont = _previousDirection;
868 
869 	if (cont != 0) {
870 		if (argi < argc) {
871 			print_debugger_command_usage(argv[0]);
872 			return 0;
873 		}
874 		if (sWritten == 0 || sWritten != _previousWritten
875 			|| sEntries != _previousEntries) {
876 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
877 				"before, or there were new entries written since the last "
878 				"invocation.\n", argv[0]);
879 			return 0;
880 		}
881 	}
882 
883 	// get start, count, maxToCheck
884 	int32* params[3] = { &start, &count, &maxToCheck };
885 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
886 		if (strcmp(argv[argi], "filter") == 0) {
887 			hasFilter = true;
888 			argi++;
889 		} else if (argv[argi][0] == '#') {
890 			hasFilter = true;
891 		} else {
892 			*params[i] = parse_expression(argv[argi]);
893 			argi++;
894 		}
895 	}
896 
897 	// filter specification
898 	if (argi < argc) {
899 		hasFilter = true;
900 		if (strcmp(argv[argi], "filter") == 0)
901 			argi++;
902 
903 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
904 			print_debugger_command_usage(argv[0]);
905 			return 0;
906 		}
907 	}
908 
909 	int32 direction;
910 	int32 firstToCheck;
911 	int32 lastToCheck;
912 
913 	if (cont != 0) {
914 		// get values from the previous iteration
915 		direction = cont;
916 		count = _previousCount;
917 		maxToCheck = _previousMaxToCheck;
918 		hasFilter = _previousHasFilter;
919 		outputFlags = _previousOutputFlags;
920 		printStackTrace = _previousPrintStackTrace;
921 
922 		if (direction < 0)
923 			start = _previousFirstChecked - 1;
924 		else
925 			start = _previousLastChecked + 1;
926 	} else {
927 		// defaults for count and maxToCheck
928 		if (count == 0)
929 			count = 30;
930 		if (maxToCheck == 0 || !hasFilter)
931 			maxToCheck = count;
932 		else if (maxToCheck < 0)
933 			maxToCheck = sEntries;
934 
935 		// determine iteration direction
936 		direction = (start <= 0 || count < 0 ? -1 : 1);
937 
938 		// validate count and maxToCheck
939 		if (count < 0)
940 			count = -count;
941 		if (maxToCheck < 0)
942 			maxToCheck = -maxToCheck;
943 		if (maxToCheck > (int32)sEntries)
944 			maxToCheck = sEntries;
945 		if (count > maxToCheck)
946 			count = maxToCheck;
947 
948 		// validate start
949 		if (start <= 0 || start > (int32)sEntries)
950 			start = max_c(1, sEntries);
951 	}
952 
953 	if (direction < 0) {
954 		firstToCheck = max_c(1, start - maxToCheck + 1);
955 		lastToCheck = start;
956 	} else {
957 		firstToCheck = start;
958 		lastToCheck = min_c((int32)sEntries, start + maxToCheck - 1);
959 	}
960 
961 	// reset the iterator, if something changed in the meantime
962 	if (sWritten == 0 || sWritten != _previousWritten
963 		|| sEntries != _previousEntries) {
964 		iterator.Reset();
965 	}
966 
967 	LazyTraceOutput out(sTraceOutputBuffer, kTraceOutputBufferSize,
968 		outputFlags);
969 
970 	bool markedMatching = false;
971 	int32 firstToDump = firstToCheck;
972 	int32 lastToDump = lastToCheck;
973 
974 	TraceFilter* filter = NULL;
975 	if (hasFilter)
976 		filter = TraceFilterParser::Default()->Filter();
977 
978 	if (wrapperFilter != NULL) {
979 		wrapperFilter->Init(filter, direction, cont != 0);
980 		filter = wrapperFilter;
981 	}
982 
983 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
984 		// iteration direction is backwards
985 		markedMatching = true;
986 
987 		// From the last entry to check iterate backwards to check filter
988 		// matches.
989 		int32 matching = 0;
990 
991 		// move to the entry after the last entry to check
992 		iterator.MoveTo(lastToCheck + 1);
993 
994 		// iterate backwards
995 		firstToDump = -1;
996 		lastToDump = -1;
997 		while (iterator.Index() > firstToCheck) {
998 			TraceEntry* entry = iterator.Previous();
999 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1000 				out.Clear();
1001 				if (filter->Filter(entry, out)) {
1002 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1003 					if (lastToDump == -1)
1004 						lastToDump = iterator.Index();
1005 					firstToDump = iterator.Index();
1006 
1007 					matching++;
1008 					if (matching >= count)
1009 						break;
1010 				} else
1011 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1012 			}
1013 		}
1014 
1015 		firstToCheck = iterator.Index();
1016 
1017 		// iterate to the previous entry, so that the next loop starts at the
1018 		// right one
1019 		iterator.Previous();
1020 	}
1021 
1022 	out.SetLastEntryTime(0);
1023 
1024 	// set the iterator to the entry before the first one to dump
1025 	iterator.MoveTo(firstToDump - 1);
1026 
1027 	// dump the entries matching the filter in the range
1028 	// [firstToDump, lastToDump]
1029 	int32 dumped = 0;
1030 
1031 	while (TraceEntry* entry = iterator.Next()) {
1032 		int32 index = iterator.Index();
1033 		if (index < firstToDump)
1034 			continue;
1035 		if (index > lastToDump || dumped >= count) {
1036 			if (direction > 0)
1037 				lastToCheck = index - 1;
1038 			break;
1039 		}
1040 
1041 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1042 			out.Clear();
1043 			if (filter &&  (markedMatching
1044 					? (entry->Flags() & FILTER_MATCH) == 0
1045 					: !filter->Filter(entry, out))) {
1046 				continue;
1047 			}
1048 
1049 			// don't print trailing new line
1050 			const char* dump = out.DumpEntry(entry);
1051 			int len = strlen(dump);
1052 			if (len > 0 && dump[len - 1] == '\n')
1053 				len--;
1054 
1055 			kprintf("%5ld. %.*s\n", index, len, dump);
1056 
1057 			if (printStackTrace) {
1058 				out.Clear();
1059 				entry->DumpStackTrace(out);
1060 				if (out.Size() > 0)
1061 					kputs(out.Buffer());
1062 			}
1063 		} else if (!filter)
1064 			kprintf("%5ld. ** uninitialized entry **\n", index);
1065 
1066 		dumped++;
1067 	}
1068 
1069 	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1070 		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1071 		lastToCheck - firstToCheck + 1, sEntries, sWritten);
1072 
1073 	// store iteration state
1074 	_previousCount = count;
1075 	_previousMaxToCheck = maxToCheck;
1076 	_previousHasFilter = hasFilter;
1077 	_previousPrintStackTrace = printStackTrace;
1078 	_previousFirstChecked = firstToCheck;
1079 	_previousLastChecked = lastToCheck;
1080 	_previousDirection = direction;
1081 	_previousWritten = sWritten;
1082 	_previousEntries = sEntries;
1083 	_previousOutputFlags = outputFlags;
1084 
1085 	return cont != 0 ? B_KDEBUG_CONT : 0;
1086 }
1087 
1088 
1089 static int
1090 dump_tracing_command(int argc, char** argv)
1091 {
1092 	return dump_tracing_internal(argc, argv, NULL);
1093 }
1094 
1095 
1096 #endif	// ENABLE_TRACING
1097 
1098 
1099 extern "C" uint8*
1100 alloc_tracing_buffer(size_t size)
1101 {
1102 #if	ENABLE_TRACING
1103 	trace_entry* entry = allocate_entry(size + sizeof(trace_entry),
1104 		BUFFER_ENTRY);
1105 	if (entry == NULL)
1106 		return NULL;
1107 
1108 	return (uint8*)(entry + 1);
1109 #else
1110 	return NULL;
1111 #endif
1112 }
1113 
1114 
1115 uint8*
1116 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1117 {
1118 	if (user && !IS_USER_ADDRESS(source))
1119 		return NULL;
1120 
1121 	uint8* buffer = alloc_tracing_buffer(size);
1122 	if (buffer == NULL)
1123 		return NULL;
1124 
1125 	if (user) {
1126 		if (user_memcpy(buffer, source, size) != B_OK)
1127 			return NULL;
1128 	} else
1129 		memcpy(buffer, source, size);
1130 
1131 	return buffer;
1132 }
1133 
1134 
1135 char*
1136 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1137 {
1138 	if (source == NULL || maxSize == 0)
1139 		return NULL;
1140 
1141 	if (user && !IS_USER_ADDRESS(source))
1142 		return NULL;
1143 
1144 	// limit maxSize to the actual source string len
1145 	if (user) {
1146 		ssize_t size = user_strlcpy(NULL, source, 0);
1147 			// there's no user_strnlen()
1148 		if (size < 0)
1149 			return 0;
1150 		maxSize = min_c(maxSize, (size_t)size + 1);
1151 	} else
1152 		maxSize = strnlen(source, maxSize - 1) + 1;
1153 
1154 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1155 	if (buffer == NULL)
1156 		return NULL;
1157 
1158 	if (user) {
1159 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1160 			return NULL;
1161 	} else
1162 		strlcpy(buffer, source, maxSize);
1163 
1164 	return buffer;
1165 }
1166 
1167 
1168 tracing_stack_trace*
1169 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool userOnly)
1170 {
1171 #if	ENABLE_TRACING
1172 	// TODO: page_fault_exception() doesn't allow us to gracefully handle
1173 	// a bad address in the stack trace, if interrupts are disabled.
1174 	if (!are_interrupts_enabled())
1175 		return NULL;
1176 
1177 	tracing_stack_trace* stackTrace
1178 		= (tracing_stack_trace*)alloc_tracing_buffer(
1179 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1180 
1181 	if (stackTrace != NULL) {
1182 		stackTrace->depth = arch_debug_get_stack_trace(
1183 			stackTrace->return_addresses, maxCount, skipFrames + 1, userOnly);
1184 	}
1185 
1186 	return stackTrace;
1187 #else
1188 	return NULL;
1189 #endif
1190 }
1191 
1192 
1193 int
1194 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1195 {
1196 #if	ENABLE_TRACING
1197 	return dump_tracing_internal(argc, argv, wrapperFilter);
1198 #else
1199 	return 0;
1200 #endif
1201 }
1202 
1203 
1204 extern "C" status_t
1205 tracing_init(void)
1206 {
1207 #if	ENABLE_TRACING
1208 	area_id area = create_area("tracing log", (void**)&sTraceOutputBuffer,
1209 		B_ANY_KERNEL_ADDRESS, MAX_TRACE_SIZE + kTraceOutputBufferSize,
1210 		B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1211 	if (area < B_OK)
1212 		return area;
1213 
1214 	sBuffer = (trace_entry*)(sTraceOutputBuffer + kTraceOutputBufferSize);
1215 	sFirstEntry = sBuffer;
1216 	sAfterLastEntry = sBuffer;
1217 
1218 	add_debugger_command_etc("traced", &dump_tracing_command,
1219 		"Dump recorded trace entries",
1220 		"[ \"--printteam\" ] [ \"--difftime\" ] (\"forward\" | \"backward\") "
1221 			"| ([ <start> [ <count> [ <range> ] ] ] "
1222 				"[ #<pattern> | (\"filter\" <filter>) ])\n"
1223 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1224 		"specified, the command continues where the previous invocation left\n"
1225 		"off, i.e. printing the previous respectively next entries (as many\n"
1226 		"as printed before). In this case the command is continuable, that is\n"
1227 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1228 		"If no arguments are given, the command continues in the direction\n"
1229 		"of the last invocation.\n"
1230 		"\"--printteam\"  enables printing the entries' team IDs.\n"
1231 		"\"--difftime\"   print difference times for all but the first entry.\n"
1232 		"\"--stacktrace\" print stack traces for entries that captured one.\n"
1233 		"  <start>    - The base index of the entries to print. Depending on\n"
1234 		"               whether the iteration direction is forward or\n"
1235 		"               backward this will be the first or last entry printed\n"
1236 		"               (potentially, if a filter is specified). The index of\n"
1237 		"               the first entry in the trace buffer is 1. If 0 is\n"
1238 		"               specified, the last <count> recorded entries are\n"
1239 		"               printed (iteration direction is backward). Defaults \n"
1240 		"               to 0.\n"
1241 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1242 		"               If negative, the -<count> entries before and\n"
1243 		"               including <start> will be printed.\n"
1244 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1245 		"               number of entries to be filtered -- depending on the\n"
1246 		"               iteration direction the entries before or after\n"
1247 		"               <start>. If more than <count> entries match the\n"
1248 		"               filter, only the first (forward) or last (backward)\n"
1249 		"               <count> matching entries will be printed. If 0 is\n"
1250 		"               specified <range> will be set to <count>. If -1,\n"
1251 		"               <range> will be set to the number of recorded\n"
1252 		"               entries.\n"
1253 		"  <pattern>  - If specified only entries containing this string are\n"
1254 		"               printed.\n"
1255 		"  <filter>   - If specified only entries matching this filter\n"
1256 		"               expression are printed. The expression can consist of\n"
1257 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1258 		"               filters \"'thread' <thread>\" (matching entries\n"
1259 		"               with the given thread ID), \"'team' <team>\"\n"
1260 						"(matching entries with the given team ID), and\n"
1261 		"               \"#<pattern>\" (matching entries containing the given\n"
1262 		"               string).\n", 0);
1263 #endif	// ENABLE_TRACING
1264 	return B_OK;
1265 }
1266 
1267 
1268 void
1269 ktrace_printf(const char *format, ...)
1270 {
1271 #if	ENABLE_TRACING
1272 	va_list list;
1273 	va_start(list, format);
1274 
1275 	char buffer[256];
1276 	vsnprintf(buffer, sizeof(buffer), format, list);
1277 
1278 	va_end(list);
1279 
1280 	new(nothrow) KernelTraceEntry(buffer);
1281 #endif	// ENABLE_TRACING
1282 }
1283 
1284 
1285 void
1286 _user_ktrace_output(const char *message)
1287 {
1288 #if	ENABLE_TRACING
1289 	new(nothrow) UserTraceEntry(message);
1290 #endif	// ENABLE_TRACING
1291 }
1292 
1293