xref: /haiku/src/system/kernel/debug/tracing.cpp (revision 3be9edf8da228afd9fec0390f408c964766122aa)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <tracing.h>
9 
10 #include <stdarg.h>
11 #include <stdlib.h>
12 
13 #include <arch/debug.h>
14 #include <debug.h>
15 #include <elf.h>
16 #include <int.h>
17 #include <kernel.h>
18 #include <team.h>
19 #include <thread.h>
20 #include <util/AutoLock.h>
21 #include <vm.h>
22 
23 
24 struct tracing_stack_trace {
25 	int32	depth;
26 	addr_t	return_addresses[0];
27 };
28 
29 
30 #if ENABLE_TRACING
31 
32 //#define TRACE_TRACING
33 #ifdef TRACE_TRACING
34 #	define TRACE(x) dprintf_no_syslog x
35 #else
36 #	define TRACE(x) ;
37 #endif
38 
39 
40 enum {
41 	WRAP_ENTRY			= 0x01,
42 	ENTRY_INITIALIZED	= 0x02,
43 	BUFFER_ENTRY		= 0x04,
44 	FILTER_MATCH		= 0x08,
45 	INVALID_ENTRY		= 0x10,
46 	CHECK_ENTRY			= 0x20,
47 };
48 
49 
50 static const size_t kTraceOutputBufferSize = 10240;
51 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
52 
53 static const uint32 kMaxRecoveringErrorCount	= 100;
54 static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
55 static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
56 static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
57 static const uint32 kMetaDataMagic1 = 'Vali';
58 static const uint32 kMetaDataMagic2 = 'dTra';
59 static const uint32 kMetaDataMagic3 = 'cing';
60 
61 // the maximum we can address with the trace_entry::[previous_]size fields
62 static const size_t kMaxTracingEntryByteSize
63 	= ((1 << 13) - 1) * sizeof(trace_entry);
64 
65 
66 class TracingMetaData {
67 public:
68 	static	status_t			Create(TracingMetaData*& _metaData);
69 
70 	inline	bool				Lock();
71 	inline	void				Unlock();
72 
73 	inline	trace_entry*		FirstEntry() const;
74 	inline	trace_entry*		AfterLastEntry() const;
75 
76 	inline	uint32				Entries() const;
77 	inline	uint32				EntriesEver() const;
78 
79 	inline	void				IncrementEntriesEver();
80 
81 	inline	char*				TraceOutputBuffer() const;
82 
83 			trace_entry*		NextEntry(trace_entry* entry);
84 			trace_entry*		PreviousEntry(trace_entry* entry);
85 
86 			trace_entry*		AllocateEntry(size_t size, uint16 flags);
87 
88 private:
89 			bool				_FreeFirstEntry();
90 			bool				_MakeSpace(size_t needed);
91 
92 	static	status_t			_CreateMetaDataArea(bool findPrevious,
93 									area_id& _area,
94 									TracingMetaData*& _metaData);
95 			bool				_InitPreviousTracingData();
96 
97 private:
98 			uint32				fMagic1;
99 			trace_entry*		fBuffer;
100 			trace_entry*		fFirstEntry;
101 			trace_entry*		fAfterLastEntry;
102 			uint32				fEntries;
103 			uint32				fMagic2;
104 			uint32				fEntriesEver;
105 			spinlock			fLock;
106 			char*				fTraceOutputBuffer;
107 			addr_t				fPhysicalAddress;
108 			uint32				fMagic3;
109 };
110 
111 static TracingMetaData sDummyTracingMetaData;
112 static TracingMetaData* sTracingMetaData = &sDummyTracingMetaData;
113 static bool sTracingDataRecovered = false;
114 
115 
116 // #pragma mark -
117 
118 
119 // #pragma mark - TracingMetaData
120 
121 
122 bool
123 TracingMetaData::Lock()
124 {
125 	acquire_spinlock(&fLock);
126 	return true;
127 }
128 
129 
130 void
131 TracingMetaData::Unlock()
132 {
133 	release_spinlock(&fLock);
134 }
135 
136 
137 trace_entry*
138 TracingMetaData::FirstEntry() const
139 {
140 	return fFirstEntry;
141 }
142 
143 
144 trace_entry*
145 TracingMetaData::AfterLastEntry() const
146 {
147 	return fAfterLastEntry;
148 }
149 
150 
151 uint32
152 TracingMetaData::Entries() const
153 {
154 	return fEntries;
155 }
156 
157 
158 uint32
159 TracingMetaData::EntriesEver() const
160 {
161 	return fEntriesEver;
162 }
163 
164 
165 void
166 TracingMetaData::IncrementEntriesEver()
167 {
168 	fEntriesEver++;
169 		// NOTE: Race condition on SMP machines! We should use atomic_add(),
170 		// though that costs some performance and the information is for
171 		// informational purpose anyway.
172 }
173 
174 
175 char*
176 TracingMetaData::TraceOutputBuffer() const
177 {
178 	return fTraceOutputBuffer;
179 }
180 
181 
182 trace_entry*
183 TracingMetaData::NextEntry(trace_entry* entry)
184 {
185 	entry += entry->size;
186 	if ((entry->flags & WRAP_ENTRY) != 0)
187 		entry = fBuffer;
188 
189 	if (entry == fAfterLastEntry)
190 		return NULL;
191 
192 	return entry;
193 }
194 
195 
196 trace_entry*
197 TracingMetaData::PreviousEntry(trace_entry* entry)
198 {
199 	if (entry == fFirstEntry)
200 		return NULL;
201 
202 	if (entry == fBuffer) {
203 		// beginning of buffer -- previous entry is a wrap entry
204 		entry = fBuffer + kBufferSize - entry->previous_size;
205 	}
206 
207 	return entry - entry->previous_size;
208 }
209 
210 
211 trace_entry*
212 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
213 {
214 	if (fAfterLastEntry == NULL || size == 0
215 		|| size >= kMaxTracingEntryByteSize) {
216 		return NULL;
217 	}
218 
219 	InterruptsSpinLocker _(fLock);
220 
221 	size = (size + 3) >> 2;
222 		// 4 byte aligned, don't store the lower 2 bits
223 
224 	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
225 		fFirstEntry, fAfterLastEntry, fBuffer));
226 
227 	if (!_MakeSpace(size))
228 		return NULL;
229 
230 	trace_entry* entry = fAfterLastEntry;
231 	entry->size = size;
232 	entry->flags = flags;
233 	fAfterLastEntry += size;
234 	fAfterLastEntry->previous_size = size;
235 
236 	if (!(flags & BUFFER_ENTRY))
237 		fEntries++;
238 
239 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
240 		fAfterLastEntry, fFirstEntry, fEntries));
241 
242 	return entry;
243 }
244 
245 
246 bool
247 TracingMetaData::_FreeFirstEntry()
248 {
249 	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
250 
251 	trace_entry* newFirst = NextEntry(fFirstEntry);
252 
253 	if (fFirstEntry->flags & BUFFER_ENTRY) {
254 		// a buffer entry -- just skip it
255 	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
256 		// Fully initialized TraceEntry: We could destroy it, but don't do so
257 		// for sake of robustness. The destructors of tracing entry classes
258 		// should be empty anyway.
259 		fEntries--;
260 	} else {
261 		// Not fully initialized TraceEntry. We can't free it, since
262 		// then it's constructor might still write into the memory and
263 		// overwrite data of the entry we're going to allocate.
264 		// We can't do anything until this entry can be discarded.
265 		return false;
266 	}
267 
268 	if (newFirst == NULL) {
269 		// everything is freed -- practically this can't happen, if
270 		// the buffer is large enough to hold three max-sized entries
271 		fFirstEntry = fAfterLastEntry = fBuffer;
272 		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
273 	} else
274 		fFirstEntry = newFirst;
275 
276 	return true;
277 }
278 
279 
280 /*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
281 	Returns \c false, if unable to free that much.
282 */
283 bool
284 TracingMetaData::_MakeSpace(size_t needed)
285 {
286 	// we need space for fAfterLastEntry, too (in case we need to wrap around
287 	// later)
288 	needed++;
289 
290 	// If there's not enough space (free or occupied) after fAfterLastEntry,
291 	// we free all entries in that region and wrap around.
292 	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
293 		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
294 			fAfterLastEntry));
295 
296 		// Free all entries after fAfterLastEntry and one more at the beginning
297 		// of the buffer.
298 		while (fFirstEntry > fAfterLastEntry) {
299 			if (!_FreeFirstEntry())
300 				return false;
301 		}
302 		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
303 			return false;
304 
305 		// just in case _FreeFirstEntry() freed the very last existing entry
306 		if (fAfterLastEntry == fBuffer)
307 			return true;
308 
309 		// mark as wrap entry and actually wrap around
310 		trace_entry* wrapEntry = fAfterLastEntry;
311 		wrapEntry->size = 0;
312 		wrapEntry->flags = WRAP_ENTRY;
313 		fAfterLastEntry = fBuffer;
314 		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
315 	}
316 
317 	if (fFirstEntry <= fAfterLastEntry) {
318 		// buffer is empty or the space after fAfterLastEntry is unoccupied
319 		return true;
320 	}
321 
322 	// free the first entries, until there's enough space
323 	size_t space = fFirstEntry - fAfterLastEntry;
324 
325 	if (space < needed) {
326 		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
327 	}
328 
329 	while (space < needed) {
330 		space += fFirstEntry->size;
331 
332 		if (!_FreeFirstEntry())
333 			return false;
334 	}
335 
336 	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
337 
338 	return true;
339 }
340 
341 
342 /*static*/ status_t
343 TracingMetaData::Create(TracingMetaData*& _metaData)
344 {
345 	// search meta data in memory (from previous session)
346 	area_id area;
347 	TracingMetaData* metaData;
348 	status_t error = _CreateMetaDataArea(true, area, metaData);
349 	if (error == B_OK) {
350 		if (metaData->_InitPreviousTracingData()) {
351 			_metaData = metaData;
352 			return B_OK;
353 		}
354 
355 		dprintf("Found previous tracing meta data, but failed to init.\n");
356 
357 		// invalidate the meta data
358 		metaData->fMagic1 = 0;
359 		metaData->fMagic2 = 0;
360 		metaData->fMagic3 = 0;
361 		delete_area(area);
362 	} else
363 		dprintf("No previous tracing meta data found.\n");
364 
365 	// no previous tracng data found -- create new one
366 	error = _CreateMetaDataArea(false, area, metaData);
367 	if (error != B_OK)
368 		return error;
369 
370 	area = create_area("tracing log",
371 		(void**)&metaData->fTraceOutputBuffer, B_ANY_KERNEL_ADDRESS,
372 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
373 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
374 	if (area < 0)
375 		return area;
376 
377 	// get the physical address
378 	physical_entry physicalEntry;
379 	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
380 			&physicalEntry, 1) == B_OK) {
381 		metaData->fPhysicalAddress = (addr_t)physicalEntry.address;
382 	} else {
383 		dprintf("TracingMetaData::Create(): failed to get physical address "
384 			"of tracing buffer\n");
385 		metaData->fPhysicalAddress = 0;
386 	}
387 
388 	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
389 		+ kTraceOutputBufferSize);
390 	metaData->fFirstEntry = metaData->fBuffer;
391 	metaData->fAfterLastEntry = metaData->fBuffer;
392 
393 	metaData->fEntries = 0;
394 	metaData->fEntriesEver = 0;
395 	B_INITIALIZE_SPINLOCK(&metaData->fLock);
396 
397 	metaData->fMagic1 = kMetaDataMagic1;
398 	metaData->fMagic2 = kMetaDataMagic2;
399 	metaData->fMagic3 = kMetaDataMagic3;
400 
401 	_metaData = metaData;
402 	return B_OK;
403 }
404 
405 
406 /*static*/ status_t
407 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
408 	TracingMetaData*& _metaData)
409 {
410 	// search meta data in memory (from previous session)
411 	TracingMetaData* metaData;
412 	addr_t metaDataAddress = kMetaDataBaseAddress;
413 	for (; metaDataAddress <= kMetaDataBaseEndAddress;
414 			metaDataAddress += kMetaDataAddressIncrement) {
415 		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
416 			(void**)&metaData, B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE,
417 			B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
418 			metaDataAddress, CREATE_AREA_DONT_CLEAR);
419 		if (area < 0)
420 			continue;
421 
422 		if (!findPrevious) {
423 			_area = area;
424 			_metaData = metaData;
425 			return B_OK;
426 		}
427 
428 		if (metaData->fMagic1 == kMetaDataMagic1
429 			&& metaData->fMagic2 == kMetaDataMagic2
430 			&& metaData->fMagic3 == kMetaDataMagic3) {
431 			_area = area;
432 			_metaData = metaData;
433 			return B_OK;
434 		}
435 
436 		delete_area(area);
437 	}
438 
439 	return B_ENTRY_NOT_FOUND;
440 }
441 
442 
443 bool
444 TracingMetaData::_InitPreviousTracingData()
445 {
446 	addr_t bufferStart
447 		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
448 	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
449 
450 	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
451 		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
452 		|| (addr_t)fFirstEntry < bufferStart
453 		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
454 		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
455 		|| (addr_t)fAfterLastEntry < bufferStart
456 		|| (addr_t)fAfterLastEntry > bufferEnd
457 		|| fPhysicalAddress == 0) {
458 		dprintf("Failed to init tracing meta data: Sanity checks "
459 			"failed.\n");
460 		return false;
461 	}
462 
463 	// re-map the previous tracing buffer
464 	void* buffer = fTraceOutputBuffer;
465 	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
466 		&buffer, B_EXACT_ADDRESS, kTraceOutputBufferSize + MAX_TRACE_SIZE,
467 		B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
468 		fPhysicalAddress, CREATE_AREA_DONT_CLEAR);
469 	if (area < 0) {
470 		dprintf("Failed to init tracing meta data: Mapping tracing log "
471 			"buffer failed: %s\n", strerror(area));
472 		return false;
473 	}
474 
475 	// verify/repair the tracing entry list
476 	uint32 errorCount = 0;
477 	uint32 entryCount = 0;
478 	uint32 nonBufferEntryCount = 0;
479 	uint32 previousEntrySize = 0;
480 	trace_entry* entry = fFirstEntry;
481 	while (errorCount <= kMaxRecoveringErrorCount) {
482 		// check previous entry size
483 		if (entry->previous_size != previousEntrySize) {
484 			if (entry != fFirstEntry) {
485 				dprintf("ktrace recovering: entry %p: fixing previous_size "
486 					"size: %lu (should be %lu)\n", entry, entry->previous_size,
487 					previousEntrySize);
488 			}
489 			entry->previous_size = previousEntrySize;
490 		}
491 
492 		if (entry == fAfterLastEntry)
493 			break;
494 
495 		// check size field
496 		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
497 			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
498 				entry);
499 			fAfterLastEntry = entry;
500 			break;
501 		}
502 
503 		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
504 			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
505 				entry->size);
506 			fAfterLastEntry = entry;
507 			break;
508 		}
509 
510 		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
511 			dprintf("ktrace recovering: entry %p: entry crosses "
512 				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
513 			fAfterLastEntry = entry;
514 			break;
515 		}
516 
517 		// check for wrap entry
518 		if ((entry->flags & WRAP_ENTRY) != 0) {
519 			if ((uint32)(fBuffer + kBufferSize - entry)
520 					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
521 				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
522 					"buffer location\n", entry);
523 			}
524 
525 			if (entry->size != 0) {
526 				dprintf("ktrace recovering: entry %p: invalid wrap entry "
527 					"size: %lu\n", entry, entry->size);
528 				entry->size = 0;
529 			}
530 
531 			previousEntrySize = fBuffer + kBufferSize - entry;
532 			entry = fBuffer;
533 			continue;
534 		}
535 
536 		if ((entry->flags & BUFFER_ENTRY) == 0) {
537 			entry->flags |= CHECK_ENTRY;
538 			nonBufferEntryCount++;
539 		}
540 
541 		entryCount++;
542 		previousEntrySize = entry->size;
543 
544 		entry += entry->size;
545 	}
546 
547 	if (errorCount > kMaxRecoveringErrorCount) {
548 		dprintf("ktrace recovering: Too many errors.\n");
549 		fAfterLastEntry = entry;
550 		fAfterLastEntry->previous_size = previousEntrySize;
551 	}
552 
553 	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
554 		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
555 		entryCount - nonBufferEntryCount, fEntries);
556 	fEntries = nonBufferEntryCount;
557 
558 	B_INITIALIZE_SPINLOCK(&fLock);
559 
560 	// TODO: Actually check the entries! Do that when first accessing the
561 	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
562 	// true).
563 	sTracingDataRecovered = true;
564 	return true;
565 }
566 
567 
568 #endif	// ENABLE_TRACING
569 
570 
571 // #pragma mark -
572 
573 
574 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
575 	: fBuffer(buffer),
576 	  fCapacity(bufferSize),
577 	  fFlags(flags)
578 {
579 	Clear();
580 }
581 
582 
583 void
584 TraceOutput::Clear()
585 {
586 	if (fCapacity > 0)
587 		fBuffer[0] = '\0';
588 	fSize = 0;
589 }
590 
591 
592 void
593 TraceOutput::Print(const char* format,...)
594 {
595 #if ENABLE_TRACING
596 	if (IsFull())
597 		return;
598 
599 	va_list args;
600 	va_start(args, format);
601 	fSize += vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
602 	va_end(args);
603 #endif
604 }
605 
606 
607 void
608 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
609 {
610 #if ENABLE_TRACING
611 	if (stackTrace == NULL || stackTrace->depth <= 0)
612 		return;
613 
614 	for (int32 i = 0; i < stackTrace->depth; i++) {
615 		addr_t address = stackTrace->return_addresses[i];
616 
617 		const char* symbol;
618 		const char* imageName;
619 		bool exactMatch;
620 		addr_t baseAddress;
621 
622 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
623 				&imageName, &exactMatch) == B_OK) {
624 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
625 				address - baseAddress, imageName,
626 				exactMatch ? "" : " (nearest)");
627 		} else
628 			Print("  %p\n", (void*)address);
629 	}
630 #endif
631 }
632 
633 
634 void
635 TraceOutput::SetLastEntryTime(bigtime_t time)
636 {
637 	fLastEntryTime = time;
638 }
639 
640 
641 bigtime_t
642 TraceOutput::LastEntryTime() const
643 {
644 	return fLastEntryTime;
645 }
646 
647 
648 //	#pragma mark -
649 
650 
651 TraceEntry::TraceEntry()
652 {
653 }
654 
655 
656 TraceEntry::~TraceEntry()
657 {
658 }
659 
660 
661 void
662 TraceEntry::Dump(TraceOutput& out)
663 {
664 #if ENABLE_TRACING
665 	// to be overridden by subclasses
666 	out.Print("ENTRY %p", this);
667 #endif
668 }
669 
670 
671 void
672 TraceEntry::DumpStackTrace(TraceOutput& out)
673 {
674 }
675 
676 
677 void
678 TraceEntry::Initialized()
679 {
680 #if ENABLE_TRACING
681 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
682 	sTracingMetaData->IncrementEntriesEver();
683 #endif
684 }
685 
686 
687 void*
688 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
689 {
690 #if ENABLE_TRACING
691 	trace_entry* entry = sTracingMetaData->AllocateEntry(
692 		size + sizeof(trace_entry), 0);
693 	return entry != NULL ? entry + 1 : NULL;
694 #endif
695 	return NULL;
696 }
697 
698 
699 //	#pragma mark -
700 
701 
702 AbstractTraceEntry::AbstractTraceEntry()
703 {
704 	struct thread* thread = thread_get_current_thread();
705 	if (thread != NULL) {
706 		fThread = thread->id;
707 		if (thread->team)
708 			fTeam = thread->team->id;
709 	}
710 	fTime = system_time();
711 }
712 
713 AbstractTraceEntry::~AbstractTraceEntry()
714 {
715 }
716 
717 
718 void
719 AbstractTraceEntry::Dump(TraceOutput& out)
720 {
721 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
722 		? fTime - out.LastEntryTime()
723 		: fTime;
724 
725 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID)
726 		out.Print("[%6ld:%6ld] %10Ld: ", fThread, fTeam, time);
727 	else
728 		out.Print("[%6ld] %10Ld: ", fThread, time);
729 
730 	AddDump(out);
731 
732 	out.SetLastEntryTime(fTime);
733 }
734 
735 
736 void
737 AbstractTraceEntry::AddDump(TraceOutput& out)
738 {
739 }
740 
741 
742 //	#pragma mark -
743 
744 
745 #if ENABLE_TRACING
746 
747 class KernelTraceEntry : public AbstractTraceEntry {
748 	public:
749 		KernelTraceEntry(const char* message)
750 		{
751 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
752 
753 #if KTRACE_PRINTF_STACK_TRACE
754 			fStackTrace = capture_tracing_stack_trace(
755 				KTRACE_PRINTF_STACK_TRACE, 1, false);
756 #endif
757 			Initialized();
758 		}
759 
760 		virtual void AddDump(TraceOutput& out)
761 		{
762 			out.Print("kern: %s", fMessage);
763 		}
764 
765 #if KTRACE_PRINTF_STACK_TRACE
766 		virtual void DumpStackTrace(TraceOutput& out)
767 		{
768 			out.PrintStackTrace(fStackTrace);
769 		}
770 #endif
771 
772 	private:
773 		char*	fMessage;
774 #if KTRACE_PRINTF_STACK_TRACE
775 		tracing_stack_trace* fStackTrace;
776 #endif
777 };
778 
779 
780 class UserTraceEntry : public AbstractTraceEntry {
781 	public:
782 		UserTraceEntry(const char* message)
783 		{
784 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
785 
786 #if KTRACE_PRINTF_STACK_TRACE
787 			fStackTrace = capture_tracing_stack_trace(
788 				KTRACE_PRINTF_STACK_TRACE, 1, false);
789 #endif
790 			Initialized();
791 		}
792 
793 		virtual void AddDump(TraceOutput& out)
794 		{
795 			out.Print("user: %s", fMessage);
796 		}
797 
798 #if KTRACE_PRINTF_STACK_TRACE
799 		virtual void DumpStackTrace(TraceOutput& out)
800 		{
801 			out.PrintStackTrace(fStackTrace);
802 		}
803 #endif
804 
805 	private:
806 		char*	fMessage;
807 #if KTRACE_PRINTF_STACK_TRACE
808 		tracing_stack_trace* fStackTrace;
809 #endif
810 };
811 
812 
813 class TracingLogStartEntry : public AbstractTraceEntry {
814 	public:
815 		TracingLogStartEntry()
816 		{
817 			Initialized();
818 		}
819 
820 		virtual void AddDump(TraceOutput& out)
821 		{
822 			out.Print("ktrace start");
823 		}
824 };
825 
826 #endif	// ENABLE_TRACING
827 
828 
829 //	#pragma mark - trace filters
830 
831 
832 TraceFilter::~TraceFilter()
833 {
834 }
835 
836 
837 bool
838 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
839 {
840 	return false;
841 }
842 
843 
844 
845 class ThreadTraceFilter : public TraceFilter {
846 public:
847 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
848 	{
849 		const AbstractTraceEntry* entry
850 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
851 		return (entry != NULL && entry->Thread() == fThread);
852 	}
853 };
854 
855 
856 class TeamTraceFilter : public TraceFilter {
857 public:
858 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
859 	{
860 		const AbstractTraceEntry* entry
861 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
862 		return (entry != NULL && entry->Team() == fTeam);
863 	}
864 };
865 
866 
867 class PatternTraceFilter : public TraceFilter {
868 public:
869 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
870 	{
871 		return strstr(out.DumpEntry(entry), fString) != NULL;
872 	}
873 };
874 
875 
876 class DecimalPatternTraceFilter : public TraceFilter {
877 public:
878 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
879 	{
880 		// TODO: this is *very* slow
881 		char buffer[64];
882 		snprintf(buffer, sizeof(buffer), "%Ld", fValue);
883 		return strstr(out.DumpEntry(entry), buffer) != NULL;
884 	}
885 };
886 
887 class HexPatternTraceFilter : public TraceFilter {
888 public:
889 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
890 	{
891 		// TODO: this is *very* slow
892 		char buffer[64];
893 		snprintf(buffer, sizeof(buffer), "%Lx", fValue);
894 		return strstr(out.DumpEntry(entry), buffer) != NULL;
895 	}
896 };
897 
898 class StringPatternTraceFilter : public TraceFilter {
899 public:
900 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
901 	{
902 		if (IS_KERNEL_ADDRESS(fValue))
903 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
904 
905 		// TODO: this is *very* slow
906 		char buffer[64];
907 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
908 		return strstr(out.DumpEntry(entry), buffer) != NULL;
909 	}
910 };
911 
912 class NotTraceFilter : public TraceFilter {
913 public:
914 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
915 	{
916 		return !fSubFilters.first->Filter(entry, out);
917 	}
918 };
919 
920 
921 class AndTraceFilter : public TraceFilter {
922 public:
923 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
924 	{
925 		return fSubFilters.first->Filter(entry, out)
926 			&& fSubFilters.second->Filter(entry, out);
927 	}
928 };
929 
930 
931 class OrTraceFilter : public TraceFilter {
932 public:
933 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
934 	{
935 		return fSubFilters.first->Filter(entry, out)
936 			|| fSubFilters.second->Filter(entry, out);
937 	}
938 };
939 
940 
941 class TraceFilterParser {
942 public:
943 	static TraceFilterParser* Default()
944 	{
945 		return &sParser;
946 	}
947 
948 	bool Parse(int argc, const char* const* argv)
949 	{
950 		fTokens = argv;
951 		fTokenCount = argc;
952 		fTokenIndex = 0;
953 		fFilterCount = 0;
954 
955 		TraceFilter* filter = _ParseExpression();
956 		return fTokenIndex == fTokenCount && filter != NULL;
957 	}
958 
959 	TraceFilter* Filter()
960 	{
961 		return &fFilters[0];
962 	}
963 
964 private:
965 	TraceFilter* _ParseExpression()
966 	{
967 		const char* token = _NextToken();
968 		if (!token) {
969 			// unexpected end of expression
970 			return NULL;
971 		}
972 
973 		if (fFilterCount == MAX_FILTERS) {
974 			// too many filters
975 			return NULL;
976 		}
977 
978 		if (token[0] == '#') {
979 			TraceFilter* filter = new(&fFilters[fFilterCount++])
980 				PatternTraceFilter;
981 			filter->fString = token + 1;
982 			return filter;
983 		} else if (token[0] == 'd' && token[1] == '#') {
984 			TraceFilter* filter = new(&fFilters[fFilterCount++])
985 				DecimalPatternTraceFilter;
986 			filter->fValue = parse_expression(token + 2);
987 			return filter;
988 		} else if (token[0] == 'x' && token[1] == '#') {
989 			TraceFilter* filter = new(&fFilters[fFilterCount++])
990 				HexPatternTraceFilter;
991 			filter->fValue = parse_expression(token + 2);
992 			return filter;
993 		} else if (token[0] == 's' && token[1] == '#') {
994 			TraceFilter* filter = new(&fFilters[fFilterCount++])
995 				StringPatternTraceFilter;
996 			filter->fValue = parse_expression(token + 2);
997 			return filter;
998 		} else if (strcmp(token, "not") == 0) {
999 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1000 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1001 				return filter;
1002 			return NULL;
1003 		} else if (strcmp(token, "and") == 0) {
1004 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1005 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1006 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1007 				return filter;
1008 			}
1009 			return NULL;
1010 		} else if (strcmp(token, "or") == 0) {
1011 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1012 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1013 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1014 				return filter;
1015 			}
1016 			return NULL;
1017 		} else if (strcmp(token, "thread") == 0) {
1018 			const char* arg = _NextToken();
1019 			if (arg == NULL) {
1020 				// unexpected end of expression
1021 				return NULL;
1022 			}
1023 
1024 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1025 				ThreadTraceFilter;
1026 			filter->fThread = strtol(arg, NULL, 0);
1027 			return filter;
1028 		} else if (strcmp(token, "team") == 0) {
1029 			const char* arg = _NextToken();
1030 			if (arg == NULL) {
1031 				// unexpected end of expression
1032 				return NULL;
1033 			}
1034 
1035 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1036 				TeamTraceFilter;
1037 			filter->fTeam = strtol(arg, NULL, 0);
1038 			return filter;
1039 		} else {
1040 			// invalid token
1041 			return NULL;
1042 		}
1043 	}
1044 
1045 	const char* _CurrentToken() const
1046 	{
1047 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1048 			return fTokens[fTokenIndex - 1];
1049 		return NULL;
1050 	}
1051 
1052 	const char* _NextToken()
1053 	{
1054 		if (fTokenIndex >= fTokenCount)
1055 			return NULL;
1056 		return fTokens[fTokenIndex++];
1057 	}
1058 
1059 private:
1060 	enum { MAX_FILTERS = 32 };
1061 
1062 	const char* const*			fTokens;
1063 	int							fTokenCount;
1064 	int							fTokenIndex;
1065 	TraceFilter					fFilters[MAX_FILTERS];
1066 	int							fFilterCount;
1067 
1068 	static TraceFilterParser	sParser;
1069 };
1070 
1071 
1072 TraceFilterParser TraceFilterParser::sParser;
1073 
1074 
1075 //	#pragma mark -
1076 
1077 
1078 #if ENABLE_TRACING
1079 
1080 
1081 TraceEntry*
1082 TraceEntryIterator::Next()
1083 {
1084 	if (fIndex == 0) {
1085 		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1086 		fIndex = 1;
1087 	} else if (fEntry != NULL) {
1088 		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1089 		fIndex++;
1090 	}
1091 
1092 	return Current();
1093 }
1094 
1095 
1096 TraceEntry*
1097 TraceEntryIterator::Previous()
1098 {
1099 	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1100 		fEntry = sTracingMetaData->AfterLastEntry();
1101 
1102 	if (fEntry != NULL) {
1103 		fEntry = _PreviousNonBufferEntry(
1104 			sTracingMetaData->PreviousEntry(fEntry));
1105 		fIndex--;
1106 	}
1107 
1108 	return Current();
1109 }
1110 
1111 
1112 TraceEntry*
1113 TraceEntryIterator::MoveTo(int32 index)
1114 {
1115 	if (index == fIndex)
1116 		return Current();
1117 
1118 	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1119 		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1120 		fEntry = NULL;
1121 		return NULL;
1122 	}
1123 
1124 	// get the shortest iteration path
1125 	int32 distance = index - fIndex;
1126 	int32 direction = distance < 0 ? -1 : 1;
1127 	distance *= direction;
1128 
1129 	if (index < distance) {
1130 		distance = index;
1131 		direction = 1;
1132 		fEntry = NULL;
1133 		fIndex = 0;
1134 	}
1135 	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1136 		distance = sTracingMetaData->Entries() + 1 - fIndex;
1137 		direction = -1;
1138 		fEntry = NULL;
1139 		fIndex = sTracingMetaData->Entries() + 1;
1140 	}
1141 
1142 	// iterate to the index
1143 	if (direction < 0) {
1144 		while (fIndex != index)
1145 			Previous();
1146 	} else {
1147 		while (fIndex != index)
1148 			Next();
1149 	}
1150 
1151 	return Current();
1152 }
1153 
1154 
1155 trace_entry*
1156 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1157 {
1158 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1159 		entry = sTracingMetaData->NextEntry(entry);
1160 
1161 	return entry;
1162 }
1163 
1164 
1165 trace_entry*
1166 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1167 {
1168 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1169 		entry = sTracingMetaData->PreviousEntry(entry);
1170 
1171 	return entry;
1172 }
1173 
1174 
1175 int
1176 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1177 {
1178 	int argi = 1;
1179 
1180 	// variables in which we store our state to be continuable
1181 	static int32 _previousCount = 0;
1182 	static bool _previousHasFilter = false;
1183 	static bool _previousPrintStackTrace = false;
1184 	static int32 _previousMaxToCheck = 0;
1185 	static int32 _previousFirstChecked = 1;
1186 	static int32 _previousLastChecked = -1;
1187 	static int32 _previousDirection = 1;
1188 	static uint32 _previousEntriesEver = 0;
1189 	static uint32 _previousEntries = 0;
1190 	static uint32 _previousOutputFlags = 0;
1191 	static TraceEntryIterator iterator;
1192 
1193 	uint32 entriesEver = sTracingMetaData->EntriesEver();
1194 
1195 	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1196 	int32 start = 0;	// special index: print the last count entries
1197 	int32 count = 0;
1198 	int32 maxToCheck = 0;
1199 	int32 cont = 0;
1200 
1201 	bool hasFilter = false;
1202 	bool printStackTrace = false;
1203 
1204 	uint32 outputFlags = 0;
1205 	while (argi < argc) {
1206 		if (strcmp(argv[argi], "--difftime") == 0) {
1207 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1208 			argi++;
1209 		} else if (strcmp(argv[argi], "--printteam") == 0) {
1210 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1211 			argi++;
1212 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1213 			printStackTrace = true;
1214 			argi++;
1215 		} else
1216 			break;
1217 	}
1218 
1219 	if (argi < argc) {
1220 		if (strcmp(argv[argi], "forward") == 0) {
1221 			cont = 1;
1222 			argi++;
1223 		} else if (strcmp(argv[argi], "backward") == 0) {
1224 			cont = -1;
1225 			argi++;
1226 		}
1227 	} else
1228 		cont = _previousDirection;
1229 
1230 	if (cont != 0) {
1231 		if (argi < argc) {
1232 			print_debugger_command_usage(argv[0]);
1233 			return 0;
1234 		}
1235 		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1236 			|| sTracingMetaData->Entries() != _previousEntries) {
1237 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1238 				"before, or there were new entries written since the last "
1239 				"invocation.\n", argv[0]);
1240 			return 0;
1241 		}
1242 	}
1243 
1244 	// get start, count, maxToCheck
1245 	int32* params[3] = { &start, &count, &maxToCheck };
1246 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1247 		if (strcmp(argv[argi], "filter") == 0) {
1248 			hasFilter = true;
1249 			argi++;
1250 		} else if (argv[argi][0] == '#') {
1251 			hasFilter = true;
1252 		} else {
1253 			*params[i] = parse_expression(argv[argi]);
1254 			argi++;
1255 		}
1256 	}
1257 
1258 	// filter specification
1259 	if (argi < argc) {
1260 		hasFilter = true;
1261 		if (strcmp(argv[argi], "filter") == 0)
1262 			argi++;
1263 
1264 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1265 			print_debugger_command_usage(argv[0]);
1266 			return 0;
1267 		}
1268 	}
1269 
1270 	int32 direction;
1271 	int32 firstToCheck;
1272 	int32 lastToCheck;
1273 
1274 	if (cont != 0) {
1275 		// get values from the previous iteration
1276 		direction = cont;
1277 		count = _previousCount;
1278 		maxToCheck = _previousMaxToCheck;
1279 		hasFilter = _previousHasFilter;
1280 		outputFlags = _previousOutputFlags;
1281 		printStackTrace = _previousPrintStackTrace;
1282 
1283 		if (direction < 0)
1284 			start = _previousFirstChecked - 1;
1285 		else
1286 			start = _previousLastChecked + 1;
1287 	} else {
1288 		// defaults for count and maxToCheck
1289 		if (count == 0)
1290 			count = 30;
1291 		if (maxToCheck == 0 || !hasFilter)
1292 			maxToCheck = count;
1293 		else if (maxToCheck < 0)
1294 			maxToCheck = sTracingMetaData->Entries();
1295 
1296 		// determine iteration direction
1297 		direction = (start <= 0 || count < 0 ? -1 : 1);
1298 
1299 		// validate count and maxToCheck
1300 		if (count < 0)
1301 			count = -count;
1302 		if (maxToCheck < 0)
1303 			maxToCheck = -maxToCheck;
1304 		if (maxToCheck > (int32)sTracingMetaData->Entries())
1305 			maxToCheck = sTracingMetaData->Entries();
1306 		if (count > maxToCheck)
1307 			count = maxToCheck;
1308 
1309 		// validate start
1310 		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1311 			start = max_c(1, sTracingMetaData->Entries());
1312 	}
1313 
1314 	if (direction < 0) {
1315 		firstToCheck = max_c(1, start - maxToCheck + 1);
1316 		lastToCheck = start;
1317 	} else {
1318 		firstToCheck = start;
1319 		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1320 			start + maxToCheck - 1);
1321 	}
1322 
1323 	// reset the iterator, if something changed in the meantime
1324 	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1325 		|| sTracingMetaData->Entries() != _previousEntries) {
1326 		iterator.Reset();
1327 	}
1328 
1329 	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1330 		kTraceOutputBufferSize, outputFlags);
1331 
1332 	bool markedMatching = false;
1333 	int32 firstToDump = firstToCheck;
1334 	int32 lastToDump = lastToCheck;
1335 
1336 	TraceFilter* filter = NULL;
1337 	if (hasFilter)
1338 		filter = TraceFilterParser::Default()->Filter();
1339 
1340 	if (wrapperFilter != NULL) {
1341 		wrapperFilter->Init(filter, direction, cont != 0);
1342 		filter = wrapperFilter;
1343 	}
1344 
1345 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1346 		// iteration direction is backwards
1347 		markedMatching = true;
1348 
1349 		// From the last entry to check iterate backwards to check filter
1350 		// matches.
1351 		int32 matching = 0;
1352 
1353 		// move to the entry after the last entry to check
1354 		iterator.MoveTo(lastToCheck + 1);
1355 
1356 		// iterate backwards
1357 		firstToDump = -1;
1358 		lastToDump = -1;
1359 		while (iterator.Index() > firstToCheck) {
1360 			TraceEntry* entry = iterator.Previous();
1361 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1362 				out.Clear();
1363 				if (filter->Filter(entry, out)) {
1364 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1365 					if (lastToDump == -1)
1366 						lastToDump = iterator.Index();
1367 					firstToDump = iterator.Index();
1368 
1369 					matching++;
1370 					if (matching >= count)
1371 						break;
1372 				} else
1373 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1374 			}
1375 		}
1376 
1377 		firstToCheck = iterator.Index();
1378 
1379 		// iterate to the previous entry, so that the next loop starts at the
1380 		// right one
1381 		iterator.Previous();
1382 	}
1383 
1384 	out.SetLastEntryTime(0);
1385 
1386 	// set the iterator to the entry before the first one to dump
1387 	iterator.MoveTo(firstToDump - 1);
1388 
1389 	// dump the entries matching the filter in the range
1390 	// [firstToDump, lastToDump]
1391 	int32 dumped = 0;
1392 
1393 	while (TraceEntry* entry = iterator.Next()) {
1394 		int32 index = iterator.Index();
1395 		if (index < firstToDump)
1396 			continue;
1397 		if (index > lastToDump || dumped >= count) {
1398 			if (direction > 0)
1399 				lastToCheck = index - 1;
1400 			break;
1401 		}
1402 
1403 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1404 			out.Clear();
1405 			if (filter &&  (markedMatching
1406 					? (entry->Flags() & FILTER_MATCH) == 0
1407 					: !filter->Filter(entry, out))) {
1408 				continue;
1409 			}
1410 
1411 			// don't print trailing new line
1412 			const char* dump = out.DumpEntry(entry);
1413 			int len = strlen(dump);
1414 			if (len > 0 && dump[len - 1] == '\n')
1415 				len--;
1416 
1417 			kprintf("%5ld. %.*s\n", index, len, dump);
1418 
1419 			if (printStackTrace) {
1420 				out.Clear();
1421 				entry->DumpStackTrace(out);
1422 				if (out.Size() > 0)
1423 					kputs(out.Buffer());
1424 			}
1425 		} else if (!filter)
1426 			kprintf("%5ld. ** uninitialized entry **\n", index);
1427 
1428 		dumped++;
1429 	}
1430 
1431 	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1432 		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1433 		lastToCheck - firstToCheck + 1, sTracingMetaData->Entries(),
1434 		entriesEver);
1435 
1436 	// store iteration state
1437 	_previousCount = count;
1438 	_previousMaxToCheck = maxToCheck;
1439 	_previousHasFilter = hasFilter;
1440 	_previousPrintStackTrace = printStackTrace;
1441 	_previousFirstChecked = firstToCheck;
1442 	_previousLastChecked = lastToCheck;
1443 	_previousDirection = direction;
1444 	_previousEntriesEver = entriesEver;
1445 	_previousEntries = sTracingMetaData->Entries();
1446 	_previousOutputFlags = outputFlags;
1447 
1448 	return cont != 0 ? B_KDEBUG_CONT : 0;
1449 }
1450 
1451 
1452 static int
1453 dump_tracing_command(int argc, char** argv)
1454 {
1455 	return dump_tracing_internal(argc, argv, NULL);
1456 }
1457 
1458 
1459 #endif	// ENABLE_TRACING
1460 
1461 
1462 extern "C" uint8*
1463 alloc_tracing_buffer(size_t size)
1464 {
1465 #if	ENABLE_TRACING
1466 	trace_entry* entry = sTracingMetaData->AllocateEntry(
1467 		size + sizeof(trace_entry), BUFFER_ENTRY);
1468 	if (entry == NULL)
1469 		return NULL;
1470 
1471 	return (uint8*)(entry + 1);
1472 #else
1473 	return NULL;
1474 #endif
1475 }
1476 
1477 
1478 uint8*
1479 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1480 {
1481 	if (user && !IS_USER_ADDRESS(source))
1482 		return NULL;
1483 
1484 	uint8* buffer = alloc_tracing_buffer(size);
1485 	if (buffer == NULL)
1486 		return NULL;
1487 
1488 	if (user) {
1489 		if (user_memcpy(buffer, source, size) != B_OK)
1490 			return NULL;
1491 	} else
1492 		memcpy(buffer, source, size);
1493 
1494 	return buffer;
1495 }
1496 
1497 
1498 char*
1499 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1500 {
1501 	if (source == NULL || maxSize == 0)
1502 		return NULL;
1503 
1504 	if (user && !IS_USER_ADDRESS(source))
1505 		return NULL;
1506 
1507 	// limit maxSize to the actual source string len
1508 	if (user) {
1509 		ssize_t size = user_strlcpy(NULL, source, 0);
1510 			// there's no user_strnlen()
1511 		if (size < 0)
1512 			return 0;
1513 		maxSize = min_c(maxSize, (size_t)size + 1);
1514 	} else
1515 		maxSize = strnlen(source, maxSize - 1) + 1;
1516 
1517 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1518 	if (buffer == NULL)
1519 		return NULL;
1520 
1521 	if (user) {
1522 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1523 			return NULL;
1524 	} else
1525 		strlcpy(buffer, source, maxSize);
1526 
1527 	return buffer;
1528 }
1529 
1530 
1531 tracing_stack_trace*
1532 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool userOnly)
1533 {
1534 #if	ENABLE_TRACING
1535 	// TODO: page_fault_exception() doesn't allow us to gracefully handle
1536 	// a bad address in the stack trace, if interrupts are disabled.
1537 	if (!are_interrupts_enabled())
1538 		return NULL;
1539 
1540 	tracing_stack_trace* stackTrace
1541 		= (tracing_stack_trace*)alloc_tracing_buffer(
1542 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1543 
1544 	if (stackTrace != NULL) {
1545 		stackTrace->depth = arch_debug_get_stack_trace(
1546 			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1547 			userOnly);
1548 	}
1549 
1550 	return stackTrace;
1551 #else
1552 	return NULL;
1553 #endif
1554 }
1555 
1556 
1557 int
1558 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1559 {
1560 #if	ENABLE_TRACING
1561 	return dump_tracing_internal(argc, argv, wrapperFilter);
1562 #else
1563 	return 0;
1564 #endif
1565 }
1566 
1567 
1568 void
1569 lock_tracing_buffer()
1570 {
1571 #if ENABLE_TRACING
1572 	sTracingMetaData->Lock();
1573 #endif
1574 }
1575 
1576 
1577 void
1578 unlock_tracing_buffer()
1579 {
1580 #if ENABLE_TRACING
1581 	sTracingMetaData->Unlock();
1582 #endif
1583 }
1584 
1585 
1586 extern "C" status_t
1587 tracing_init(void)
1588 {
1589 #if	ENABLE_TRACING
1590 	status_t result = TracingMetaData::Create(sTracingMetaData);
1591 	if (result != B_OK) {
1592 		sTracingMetaData = &sDummyTracingMetaData;
1593 		return result;
1594 	}
1595 
1596 	new(nothrow) TracingLogStartEntry();
1597 
1598 	add_debugger_command_etc("traced", &dump_tracing_command,
1599 		"Dump recorded trace entries",
1600 		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1601 			"(\"forward\" | \"backward\") "
1602 			"| ([ <start> [ <count> [ <range> ] ] ] "
1603 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1604 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1605 		"specified, the command continues where the previous invocation left\n"
1606 		"off, i.e. printing the previous respectively next entries (as many\n"
1607 		"as printed before). In this case the command is continuable, that is\n"
1608 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1609 		"If no arguments are given, the command continues in the direction\n"
1610 		"of the last invocation.\n"
1611 		"--printteam  - enables printing the entries' team IDs.\n"
1612 		"--difftime   - print difference times for all but the first entry.\n"
1613 		"--stacktrace - print stack traces for entries that captured one.\n"
1614 		"  <start>    - The base index of the entries to print. Depending on\n"
1615 		"               whether the iteration direction is forward or\n"
1616 		"               backward this will be the first or last entry printed\n"
1617 		"               (potentially, if a filter is specified). The index of\n"
1618 		"               the first entry in the trace buffer is 1. If 0 is\n"
1619 		"               specified, the last <count> recorded entries are\n"
1620 		"               printed (iteration direction is backward). Defaults \n"
1621 		"               to 0.\n"
1622 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1623 		"               If negative, the -<count> entries before and\n"
1624 		"               including <start> will be printed.\n"
1625 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1626 		"               number of entries to be filtered -- depending on the\n"
1627 		"               iteration direction the entries before or after\n"
1628 		"               <start>. If more than <count> entries match the\n"
1629 		"               filter, only the first (forward) or last (backward)\n"
1630 		"               <count> matching entries will be printed. If 0 is\n"
1631 		"               specified <range> will be set to <count>. If -1,\n"
1632 		"               <range> will be set to the number of recorded\n"
1633 		"               entries.\n"
1634 		"  <pattern>  - If specified only entries containing this string are\n"
1635 		"               printed.\n"
1636 		"  <filter>   - If specified only entries matching this filter\n"
1637 		"               expression are printed. The expression can consist of\n"
1638 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1639 		"               filters \"'thread' <thread>\" (matching entries\n"
1640 		"               with the given thread ID), \"'team' <team>\"\n"
1641 						"(matching entries with the given team ID), and\n"
1642 		"               \"#<pattern>\" (matching entries containing the given\n"
1643 		"               string).\n", 0);
1644 #endif	// ENABLE_TRACING
1645 	return B_OK;
1646 }
1647 
1648 
1649 void
1650 ktrace_printf(const char *format, ...)
1651 {
1652 #if	ENABLE_TRACING
1653 	va_list list;
1654 	va_start(list, format);
1655 
1656 	char buffer[256];
1657 	vsnprintf(buffer, sizeof(buffer), format, list);
1658 
1659 	va_end(list);
1660 
1661 	new(nothrow) KernelTraceEntry(buffer);
1662 #endif	// ENABLE_TRACING
1663 }
1664 
1665 
1666 void
1667 _user_ktrace_output(const char *message)
1668 {
1669 #if	ENABLE_TRACING
1670 	new(nothrow) UserTraceEntry(message);
1671 #endif	// ENABLE_TRACING
1672 }
1673 
1674