xref: /haiku/src/system/kernel/debug/tracing.cpp (revision 1c09002cbee8e797a0f8bbfc5678dfadd39ee1a7)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <tracing.h>
9 
10 #include <stdarg.h>
11 #include <stdlib.h>
12 
13 #include <algorithm>
14 
15 #include <arch/debug.h>
16 #include <debug.h>
17 #include <elf.h>
18 #include <int.h>
19 #include <kernel.h>
20 #include <team.h>
21 #include <thread.h>
22 #include <util/AutoLock.h>
23 #include <vm/vm.h>
24 
25 
26 struct tracing_stack_trace {
27 	int32	depth;
28 	addr_t	return_addresses[0];
29 };
30 
31 
32 #if ENABLE_TRACING
33 
34 //#define TRACE_TRACING
35 #ifdef TRACE_TRACING
36 #	define TRACE(x) dprintf_no_syslog x
37 #else
38 #	define TRACE(x) ;
39 #endif
40 
41 
42 enum {
43 	WRAP_ENTRY			= 0x01,
44 	ENTRY_INITIALIZED	= 0x02,
45 	BUFFER_ENTRY		= 0x04,
46 	FILTER_MATCH		= 0x08,
47 	INVALID_ENTRY		= 0x10,
48 	CHECK_ENTRY			= 0x20,
49 };
50 
51 
52 static const size_t kTraceOutputBufferSize = 10240;
53 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
54 
55 static const uint32 kMaxRecoveringErrorCount	= 100;
56 static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
57 static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
58 static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
59 static const uint32 kMetaDataMagic1 = 'Vali';
60 static const uint32 kMetaDataMagic2 = 'dTra';
61 static const uint32 kMetaDataMagic3 = 'cing';
62 
63 // the maximum we can address with the trace_entry::[previous_]size fields
64 static const size_t kMaxTracingEntryByteSize
65 	= ((1 << 13) - 1) * sizeof(trace_entry);
66 
67 
68 class TracingMetaData {
69 public:
70 	static	status_t			Create(TracingMetaData*& _metaData);
71 
72 	inline	bool				Lock();
73 	inline	void				Unlock();
74 
75 	inline	trace_entry*		FirstEntry() const;
76 	inline	trace_entry*		AfterLastEntry() const;
77 
78 	inline	uint32				Entries() const;
79 	inline	uint32				EntriesEver() const;
80 
81 	inline	void				IncrementEntriesEver();
82 
83 	inline	char*				TraceOutputBuffer() const;
84 
85 			trace_entry*		NextEntry(trace_entry* entry);
86 			trace_entry*		PreviousEntry(trace_entry* entry);
87 
88 			trace_entry*		AllocateEntry(size_t size, uint16 flags);
89 
90 private:
91 			bool				_FreeFirstEntry();
92 			bool				_MakeSpace(size_t needed);
93 
94 	static	status_t			_CreateMetaDataArea(bool findPrevious,
95 									area_id& _area,
96 									TracingMetaData*& _metaData);
97 			bool				_InitPreviousTracingData();
98 
99 private:
100 			uint32				fMagic1;
101 			trace_entry*		fBuffer;
102 			trace_entry*		fFirstEntry;
103 			trace_entry*		fAfterLastEntry;
104 			uint32				fEntries;
105 			uint32				fMagic2;
106 			uint32				fEntriesEver;
107 			spinlock			fLock;
108 			char*				fTraceOutputBuffer;
109 			phys_addr_t			fPhysicalAddress;
110 			uint32				fMagic3;
111 };
112 
113 static TracingMetaData sDummyTracingMetaData;
114 static TracingMetaData* sTracingMetaData = &sDummyTracingMetaData;
115 static bool sTracingDataRecovered = false;
116 
117 
118 // #pragma mark -
119 
120 
121 // #pragma mark - TracingMetaData
122 
123 
124 bool
125 TracingMetaData::Lock()
126 {
127 	acquire_spinlock(&fLock);
128 	return true;
129 }
130 
131 
132 void
133 TracingMetaData::Unlock()
134 {
135 	release_spinlock(&fLock);
136 }
137 
138 
139 trace_entry*
140 TracingMetaData::FirstEntry() const
141 {
142 	return fFirstEntry;
143 }
144 
145 
146 trace_entry*
147 TracingMetaData::AfterLastEntry() const
148 {
149 	return fAfterLastEntry;
150 }
151 
152 
153 uint32
154 TracingMetaData::Entries() const
155 {
156 	return fEntries;
157 }
158 
159 
160 uint32
161 TracingMetaData::EntriesEver() const
162 {
163 	return fEntriesEver;
164 }
165 
166 
167 void
168 TracingMetaData::IncrementEntriesEver()
169 {
170 	fEntriesEver++;
171 		// NOTE: Race condition on SMP machines! We should use atomic_add(),
172 		// though that costs some performance and the information is for
173 		// informational purpose anyway.
174 }
175 
176 
177 char*
178 TracingMetaData::TraceOutputBuffer() const
179 {
180 	return fTraceOutputBuffer;
181 }
182 
183 
184 trace_entry*
185 TracingMetaData::NextEntry(trace_entry* entry)
186 {
187 	entry += entry->size;
188 	if ((entry->flags & WRAP_ENTRY) != 0)
189 		entry = fBuffer;
190 
191 	if (entry == fAfterLastEntry)
192 		return NULL;
193 
194 	return entry;
195 }
196 
197 
198 trace_entry*
199 TracingMetaData::PreviousEntry(trace_entry* entry)
200 {
201 	if (entry == fFirstEntry)
202 		return NULL;
203 
204 	if (entry == fBuffer) {
205 		// beginning of buffer -- previous entry is a wrap entry
206 		entry = fBuffer + kBufferSize - entry->previous_size;
207 	}
208 
209 	return entry - entry->previous_size;
210 }
211 
212 
213 trace_entry*
214 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
215 {
216 	if (fAfterLastEntry == NULL || size == 0
217 		|| size >= kMaxTracingEntryByteSize) {
218 		return NULL;
219 	}
220 
221 	InterruptsSpinLocker _(fLock);
222 
223 	size = (size + 3) >> 2;
224 		// 4 byte aligned, don't store the lower 2 bits
225 
226 	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
227 		fFirstEntry, fAfterLastEntry, fBuffer));
228 
229 	if (!_MakeSpace(size))
230 		return NULL;
231 
232 	trace_entry* entry = fAfterLastEntry;
233 	entry->size = size;
234 	entry->flags = flags;
235 	fAfterLastEntry += size;
236 	fAfterLastEntry->previous_size = size;
237 
238 	if (!(flags & BUFFER_ENTRY))
239 		fEntries++;
240 
241 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
242 		fAfterLastEntry, fFirstEntry, fEntries));
243 
244 	return entry;
245 }
246 
247 
248 bool
249 TracingMetaData::_FreeFirstEntry()
250 {
251 	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
252 
253 	trace_entry* newFirst = NextEntry(fFirstEntry);
254 
255 	if (fFirstEntry->flags & BUFFER_ENTRY) {
256 		// a buffer entry -- just skip it
257 	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
258 		// Fully initialized TraceEntry: We could destroy it, but don't do so
259 		// for sake of robustness. The destructors of tracing entry classes
260 		// should be empty anyway.
261 		fEntries--;
262 	} else {
263 		// Not fully initialized TraceEntry. We can't free it, since
264 		// then it's constructor might still write into the memory and
265 		// overwrite data of the entry we're going to allocate.
266 		// We can't do anything until this entry can be discarded.
267 		return false;
268 	}
269 
270 	if (newFirst == NULL) {
271 		// everything is freed -- practically this can't happen, if
272 		// the buffer is large enough to hold three max-sized entries
273 		fFirstEntry = fAfterLastEntry = fBuffer;
274 		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
275 	} else
276 		fFirstEntry = newFirst;
277 
278 	return true;
279 }
280 
281 
282 /*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
283 	Returns \c false, if unable to free that much.
284 */
285 bool
286 TracingMetaData::_MakeSpace(size_t needed)
287 {
288 	// we need space for fAfterLastEntry, too (in case we need to wrap around
289 	// later)
290 	needed++;
291 
292 	// If there's not enough space (free or occupied) after fAfterLastEntry,
293 	// we free all entries in that region and wrap around.
294 	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
295 		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
296 			fAfterLastEntry));
297 
298 		// Free all entries after fAfterLastEntry and one more at the beginning
299 		// of the buffer.
300 		while (fFirstEntry > fAfterLastEntry) {
301 			if (!_FreeFirstEntry())
302 				return false;
303 		}
304 		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
305 			return false;
306 
307 		// just in case _FreeFirstEntry() freed the very last existing entry
308 		if (fAfterLastEntry == fBuffer)
309 			return true;
310 
311 		// mark as wrap entry and actually wrap around
312 		trace_entry* wrapEntry = fAfterLastEntry;
313 		wrapEntry->size = 0;
314 		wrapEntry->flags = WRAP_ENTRY;
315 		fAfterLastEntry = fBuffer;
316 		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
317 	}
318 
319 	if (fFirstEntry <= fAfterLastEntry) {
320 		// buffer is empty or the space after fAfterLastEntry is unoccupied
321 		return true;
322 	}
323 
324 	// free the first entries, until there's enough space
325 	size_t space = fFirstEntry - fAfterLastEntry;
326 
327 	if (space < needed) {
328 		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
329 	}
330 
331 	while (space < needed) {
332 		space += fFirstEntry->size;
333 
334 		if (!_FreeFirstEntry())
335 			return false;
336 	}
337 
338 	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
339 
340 	return true;
341 }
342 
343 
344 /*static*/ status_t
345 TracingMetaData::Create(TracingMetaData*& _metaData)
346 {
347 	// search meta data in memory (from previous session)
348 	area_id area;
349 	TracingMetaData* metaData;
350 	status_t error = _CreateMetaDataArea(true, area, metaData);
351 	if (error == B_OK) {
352 		if (metaData->_InitPreviousTracingData()) {
353 			_metaData = metaData;
354 			return B_OK;
355 		}
356 
357 		dprintf("Found previous tracing meta data, but failed to init.\n");
358 
359 		// invalidate the meta data
360 		metaData->fMagic1 = 0;
361 		metaData->fMagic2 = 0;
362 		metaData->fMagic3 = 0;
363 		delete_area(area);
364 	} else
365 		dprintf("No previous tracing meta data found.\n");
366 
367 	// no previous tracing data found -- create new one
368 	error = _CreateMetaDataArea(false, area, metaData);
369 	if (error != B_OK)
370 		return error;
371 
372 	virtual_address_restrictions virtualRestrictions = {};
373 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
374 	physical_address_restrictions physicalRestrictions = {};
375 	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
376 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
377 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
378 		&virtualRestrictions, &physicalRestrictions,
379 		(void**)&metaData->fTraceOutputBuffer);
380 	if (area < 0)
381 		return area;
382 
383 	// get the physical address
384 	physical_entry physicalEntry;
385 	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
386 			&physicalEntry, 1) == B_OK) {
387 		metaData->fPhysicalAddress = physicalEntry.address;
388 	} else {
389 		dprintf("TracingMetaData::Create(): failed to get physical address "
390 			"of tracing buffer\n");
391 		metaData->fPhysicalAddress = 0;
392 	}
393 
394 	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
395 		+ kTraceOutputBufferSize);
396 	metaData->fFirstEntry = metaData->fBuffer;
397 	metaData->fAfterLastEntry = metaData->fBuffer;
398 
399 	metaData->fEntries = 0;
400 	metaData->fEntriesEver = 0;
401 	B_INITIALIZE_SPINLOCK(&metaData->fLock);
402 
403 	metaData->fMagic1 = kMetaDataMagic1;
404 	metaData->fMagic2 = kMetaDataMagic2;
405 	metaData->fMagic3 = kMetaDataMagic3;
406 
407 	_metaData = metaData;
408 	return B_OK;
409 }
410 
411 
412 /*static*/ status_t
413 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
414 	TracingMetaData*& _metaData)
415 {
416 	// search meta data in memory (from previous session)
417 	TracingMetaData* metaData;
418 	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
419 	for (; metaDataAddress <= kMetaDataBaseEndAddress;
420 			metaDataAddress += kMetaDataAddressIncrement) {
421 		virtual_address_restrictions virtualRestrictions = {};
422 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
423 		physical_address_restrictions physicalRestrictions = {};
424 		physicalRestrictions.low_address = metaDataAddress;
425 		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
426 		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
427 			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
428 			CREATE_AREA_DONT_CLEAR, &virtualRestrictions, &physicalRestrictions,
429 			(void**)&metaData);
430 		if (area < 0)
431 			continue;
432 
433 		if (!findPrevious) {
434 			_area = area;
435 			_metaData = metaData;
436 			return B_OK;
437 		}
438 
439 		if (metaData->fMagic1 == kMetaDataMagic1
440 			&& metaData->fMagic2 == kMetaDataMagic2
441 			&& metaData->fMagic3 == kMetaDataMagic3) {
442 			_area = area;
443 			_metaData = metaData;
444 			return B_OK;
445 		}
446 
447 		delete_area(area);
448 	}
449 
450 	return B_ENTRY_NOT_FOUND;
451 }
452 
453 
454 bool
455 TracingMetaData::_InitPreviousTracingData()
456 {
457 	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
458 	// well. The entries should checked more thoroughly for validity -- e.g. the
459 	// pointers to the entries' vtable pointers could be invalid, which can
460 	// make the "traced" command quite unusable. The validity of the entries
461 	// could be checked in a safe environment (i.e. with a fault handler) with
462 	// typeid() and call of a virtual function.
463 	return false;
464 
465 	addr_t bufferStart
466 		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
467 	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
468 
469 	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
470 		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
471 		|| (addr_t)fFirstEntry < bufferStart
472 		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
473 		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
474 		|| (addr_t)fAfterLastEntry < bufferStart
475 		|| (addr_t)fAfterLastEntry > bufferEnd
476 		|| fPhysicalAddress == 0) {
477 		dprintf("Failed to init tracing meta data: Sanity checks "
478 			"failed.\n");
479 		return false;
480 	}
481 
482 	// re-map the previous tracing buffer
483 	virtual_address_restrictions virtualRestrictions = {};
484 	virtualRestrictions.address = fTraceOutputBuffer;
485 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
486 	physical_address_restrictions physicalRestrictions = {};
487 	physicalRestrictions.low_address = fPhysicalAddress;
488 	physicalRestrictions.high_address = fPhysicalAddress
489 		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
490 	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
491 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
492 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR,
493 		&virtualRestrictions, &physicalRestrictions, NULL);
494 	if (area < 0) {
495 		dprintf("Failed to init tracing meta data: Mapping tracing log "
496 			"buffer failed: %s\n", strerror(area));
497 		return false;
498 	}
499 
500 	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
501 		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
502 
503 	// verify/repair the tracing entry list
504 	uint32 errorCount = 0;
505 	uint32 entryCount = 0;
506 	uint32 nonBufferEntryCount = 0;
507 	uint32 previousEntrySize = 0;
508 	trace_entry* entry = fFirstEntry;
509 	while (errorCount <= kMaxRecoveringErrorCount) {
510 		// check previous entry size
511 		if (entry->previous_size != previousEntrySize) {
512 			if (entry != fFirstEntry) {
513 				dprintf("ktrace recovering: entry %p: fixing previous_size "
514 					"size: %lu (should be %lu)\n", entry, entry->previous_size,
515 					previousEntrySize);
516 				errorCount++;
517 			}
518 			entry->previous_size = previousEntrySize;
519 		}
520 
521 		if (entry == fAfterLastEntry)
522 			break;
523 
524 		// check size field
525 		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
526 			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
527 				entry);
528 			errorCount++;
529 			fAfterLastEntry = entry;
530 			break;
531 		}
532 
533 		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
534 			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
535 				entry->size);
536 			errorCount++;
537 			fAfterLastEntry = entry;
538 			break;
539 		}
540 
541 		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
542 			dprintf("ktrace recovering: entry %p: entry crosses "
543 				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
544 			errorCount++;
545 			fAfterLastEntry = entry;
546 			break;
547 		}
548 
549 		// check for wrap entry
550 		if ((entry->flags & WRAP_ENTRY) != 0) {
551 			if ((uint32)(fBuffer + kBufferSize - entry)
552 					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
553 				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
554 					"buffer location\n", entry);
555 				errorCount++;
556 			}
557 
558 			if (entry->size != 0) {
559 				dprintf("ktrace recovering: entry %p: invalid wrap entry "
560 					"size: %lu\n", entry, entry->size);
561 				errorCount++;
562 				entry->size = 0;
563 			}
564 
565 			previousEntrySize = fBuffer + kBufferSize - entry;
566 			entry = fBuffer;
567 			continue;
568 		}
569 
570 		if ((entry->flags & BUFFER_ENTRY) == 0) {
571 			entry->flags |= CHECK_ENTRY;
572 			nonBufferEntryCount++;
573 		}
574 
575 		entryCount++;
576 		previousEntrySize = entry->size;
577 
578 		entry += entry->size;
579 	}
580 
581 	if (errorCount > kMaxRecoveringErrorCount) {
582 		dprintf("ktrace recovering: Too many errors.\n");
583 		fAfterLastEntry = entry;
584 		fAfterLastEntry->previous_size = previousEntrySize;
585 	}
586 
587 	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
588 		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
589 		entryCount - nonBufferEntryCount, fEntries);
590 	fEntries = nonBufferEntryCount;
591 
592 	B_INITIALIZE_SPINLOCK(&fLock);
593 
594 	// TODO: Actually check the entries! Do that when first accessing the
595 	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
596 	// true).
597 	sTracingDataRecovered = true;
598 	return true;
599 }
600 
601 
602 #endif	// ENABLE_TRACING
603 
604 
605 // #pragma mark -
606 
607 
608 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
609 	: fBuffer(buffer),
610 	  fCapacity(bufferSize),
611 	  fFlags(flags)
612 {
613 	Clear();
614 }
615 
616 
617 void
618 TraceOutput::Clear()
619 {
620 	if (fCapacity > 0)
621 		fBuffer[0] = '\0';
622 	fSize = 0;
623 }
624 
625 
626 void
627 TraceOutput::Print(const char* format,...)
628 {
629 #if ENABLE_TRACING
630 	if (IsFull())
631 		return;
632 
633 	if (fSize < fCapacity) {
634 		va_list args;
635 		va_start(args, format);
636 		size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format,
637 			args);
638 		fSize += std::min(length, fCapacity - fSize - 1);
639 		va_end(args);
640 	}
641 #endif
642 }
643 
644 
645 void
646 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
647 {
648 #if ENABLE_TRACING
649 	if (stackTrace == NULL || stackTrace->depth <= 0)
650 		return;
651 
652 	for (int32 i = 0; i < stackTrace->depth; i++) {
653 		addr_t address = stackTrace->return_addresses[i];
654 
655 		const char* symbol;
656 		const char* imageName;
657 		bool exactMatch;
658 		addr_t baseAddress;
659 
660 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
661 				&imageName, &exactMatch) == B_OK) {
662 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
663 				address - baseAddress, imageName,
664 				exactMatch ? "" : " (nearest)");
665 		} else
666 			Print("  %p\n", (void*)address);
667 	}
668 #endif
669 }
670 
671 
672 void
673 TraceOutput::SetLastEntryTime(bigtime_t time)
674 {
675 	fLastEntryTime = time;
676 }
677 
678 
679 bigtime_t
680 TraceOutput::LastEntryTime() const
681 {
682 	return fLastEntryTime;
683 }
684 
685 
686 //	#pragma mark -
687 
688 
689 TraceEntry::TraceEntry()
690 {
691 }
692 
693 
694 TraceEntry::~TraceEntry()
695 {
696 }
697 
698 
699 void
700 TraceEntry::Dump(TraceOutput& out)
701 {
702 #if ENABLE_TRACING
703 	// to be overridden by subclasses
704 	out.Print("ENTRY %p", this);
705 #endif
706 }
707 
708 
709 void
710 TraceEntry::DumpStackTrace(TraceOutput& out)
711 {
712 }
713 
714 
715 void
716 TraceEntry::Initialized()
717 {
718 #if ENABLE_TRACING
719 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
720 	sTracingMetaData->IncrementEntriesEver();
721 #endif
722 }
723 
724 
725 void*
726 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
727 {
728 #if ENABLE_TRACING
729 	trace_entry* entry = sTracingMetaData->AllocateEntry(
730 		size + sizeof(trace_entry), 0);
731 	return entry != NULL ? entry + 1 : NULL;
732 #endif
733 	return NULL;
734 }
735 
736 
737 //	#pragma mark -
738 
739 
740 AbstractTraceEntry::AbstractTraceEntry()
741 {
742 	Thread* thread = thread_get_current_thread();
743 	if (thread != NULL) {
744 		fThread = thread->id;
745 		if (thread->team)
746 			fTeam = thread->team->id;
747 	}
748 	fTime = system_time();
749 }
750 
751 AbstractTraceEntry::~AbstractTraceEntry()
752 {
753 }
754 
755 
756 void
757 AbstractTraceEntry::Dump(TraceOutput& out)
758 {
759 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
760 		? fTime - out.LastEntryTime()
761 		: fTime;
762 
763 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID)
764 		out.Print("[%6ld:%6ld] %10Ld: ", fThread, fTeam, time);
765 	else
766 		out.Print("[%6ld] %10Ld: ", fThread, time);
767 
768 	AddDump(out);
769 
770 	out.SetLastEntryTime(fTime);
771 }
772 
773 
774 void
775 AbstractTraceEntry::AddDump(TraceOutput& out)
776 {
777 }
778 
779 
780 //	#pragma mark -
781 
782 
783 #if ENABLE_TRACING
784 
785 class KernelTraceEntry : public AbstractTraceEntry {
786 	public:
787 		KernelTraceEntry(const char* message)
788 		{
789 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
790 
791 #if KTRACE_PRINTF_STACK_TRACE
792 			fStackTrace = capture_tracing_stack_trace(
793 				KTRACE_PRINTF_STACK_TRACE, 1, false);
794 #endif
795 			Initialized();
796 		}
797 
798 		virtual void AddDump(TraceOutput& out)
799 		{
800 			out.Print("kern: %s", fMessage);
801 		}
802 
803 #if KTRACE_PRINTF_STACK_TRACE
804 		virtual void DumpStackTrace(TraceOutput& out)
805 		{
806 			out.PrintStackTrace(fStackTrace);
807 		}
808 #endif
809 
810 	private:
811 		char*	fMessage;
812 #if KTRACE_PRINTF_STACK_TRACE
813 		tracing_stack_trace* fStackTrace;
814 #endif
815 };
816 
817 
818 class UserTraceEntry : public AbstractTraceEntry {
819 	public:
820 		UserTraceEntry(const char* message)
821 		{
822 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
823 
824 #if KTRACE_PRINTF_STACK_TRACE
825 			fStackTrace = capture_tracing_stack_trace(
826 				KTRACE_PRINTF_STACK_TRACE, 1, false);
827 #endif
828 			Initialized();
829 		}
830 
831 		virtual void AddDump(TraceOutput& out)
832 		{
833 			out.Print("user: %s", fMessage);
834 		}
835 
836 #if KTRACE_PRINTF_STACK_TRACE
837 		virtual void DumpStackTrace(TraceOutput& out)
838 		{
839 			out.PrintStackTrace(fStackTrace);
840 		}
841 #endif
842 
843 	private:
844 		char*	fMessage;
845 #if KTRACE_PRINTF_STACK_TRACE
846 		tracing_stack_trace* fStackTrace;
847 #endif
848 };
849 
850 
851 class TracingLogStartEntry : public AbstractTraceEntry {
852 	public:
853 		TracingLogStartEntry()
854 		{
855 			Initialized();
856 		}
857 
858 		virtual void AddDump(TraceOutput& out)
859 		{
860 			out.Print("ktrace start");
861 		}
862 };
863 
864 #endif	// ENABLE_TRACING
865 
866 
867 //	#pragma mark - trace filters
868 
869 
870 TraceFilter::~TraceFilter()
871 {
872 }
873 
874 
875 bool
876 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
877 {
878 	return false;
879 }
880 
881 
882 
883 class ThreadTraceFilter : public TraceFilter {
884 public:
885 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
886 	{
887 		const AbstractTraceEntry* entry
888 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
889 		return (entry != NULL && entry->ThreadID() == fThread);
890 	}
891 };
892 
893 
894 class TeamTraceFilter : public TraceFilter {
895 public:
896 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
897 	{
898 		const AbstractTraceEntry* entry
899 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
900 		return (entry != NULL && entry->TeamID() == fTeam);
901 	}
902 };
903 
904 
905 class PatternTraceFilter : public TraceFilter {
906 public:
907 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
908 	{
909 		return strstr(out.DumpEntry(entry), fString) != NULL;
910 	}
911 };
912 
913 
914 class DecimalPatternTraceFilter : public TraceFilter {
915 public:
916 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
917 	{
918 		// TODO: this is *very* slow
919 		char buffer[64];
920 		snprintf(buffer, sizeof(buffer), "%Ld", fValue);
921 		return strstr(out.DumpEntry(entry), buffer) != NULL;
922 	}
923 };
924 
925 class HexPatternTraceFilter : public TraceFilter {
926 public:
927 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
928 	{
929 		// TODO: this is *very* slow
930 		char buffer[64];
931 		snprintf(buffer, sizeof(buffer), "%Lx", fValue);
932 		return strstr(out.DumpEntry(entry), buffer) != NULL;
933 	}
934 };
935 
936 class StringPatternTraceFilter : public TraceFilter {
937 public:
938 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
939 	{
940 		if (IS_KERNEL_ADDRESS(fValue))
941 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
942 
943 		// TODO: this is *very* slow
944 		char buffer[64];
945 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
946 		return strstr(out.DumpEntry(entry), buffer) != NULL;
947 	}
948 };
949 
950 class NotTraceFilter : public TraceFilter {
951 public:
952 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
953 	{
954 		return !fSubFilters.first->Filter(entry, out);
955 	}
956 };
957 
958 
959 class AndTraceFilter : public TraceFilter {
960 public:
961 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
962 	{
963 		return fSubFilters.first->Filter(entry, out)
964 			&& fSubFilters.second->Filter(entry, out);
965 	}
966 };
967 
968 
969 class OrTraceFilter : public TraceFilter {
970 public:
971 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
972 	{
973 		return fSubFilters.first->Filter(entry, out)
974 			|| fSubFilters.second->Filter(entry, out);
975 	}
976 };
977 
978 
979 class TraceFilterParser {
980 public:
981 	static TraceFilterParser* Default()
982 	{
983 		return &sParser;
984 	}
985 
986 	bool Parse(int argc, const char* const* argv)
987 	{
988 		fTokens = argv;
989 		fTokenCount = argc;
990 		fTokenIndex = 0;
991 		fFilterCount = 0;
992 
993 		TraceFilter* filter = _ParseExpression();
994 		return fTokenIndex == fTokenCount && filter != NULL;
995 	}
996 
997 	TraceFilter* Filter()
998 	{
999 		return &fFilters[0];
1000 	}
1001 
1002 private:
1003 	TraceFilter* _ParseExpression()
1004 	{
1005 		const char* token = _NextToken();
1006 		if (!token) {
1007 			// unexpected end of expression
1008 			return NULL;
1009 		}
1010 
1011 		if (fFilterCount == MAX_FILTERS) {
1012 			// too many filters
1013 			return NULL;
1014 		}
1015 
1016 		if (token[0] == '#') {
1017 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1018 				PatternTraceFilter;
1019 			filter->fString = token + 1;
1020 			return filter;
1021 		} else if (token[0] == 'd' && token[1] == '#') {
1022 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1023 				DecimalPatternTraceFilter;
1024 			filter->fValue = parse_expression(token + 2);
1025 			return filter;
1026 		} else if (token[0] == 'x' && token[1] == '#') {
1027 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1028 				HexPatternTraceFilter;
1029 			filter->fValue = parse_expression(token + 2);
1030 			return filter;
1031 		} else if (token[0] == 's' && token[1] == '#') {
1032 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1033 				StringPatternTraceFilter;
1034 			filter->fValue = parse_expression(token + 2);
1035 			return filter;
1036 		} else if (strcmp(token, "not") == 0) {
1037 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1038 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1039 				return filter;
1040 			return NULL;
1041 		} else if (strcmp(token, "and") == 0) {
1042 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1043 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1044 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1045 				return filter;
1046 			}
1047 			return NULL;
1048 		} else if (strcmp(token, "or") == 0) {
1049 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1050 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1051 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1052 				return filter;
1053 			}
1054 			return NULL;
1055 		} else if (strcmp(token, "thread") == 0) {
1056 			const char* arg = _NextToken();
1057 			if (arg == NULL) {
1058 				// unexpected end of expression
1059 				return NULL;
1060 			}
1061 
1062 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1063 				ThreadTraceFilter;
1064 			filter->fThread = strtol(arg, NULL, 0);
1065 			return filter;
1066 		} else if (strcmp(token, "team") == 0) {
1067 			const char* arg = _NextToken();
1068 			if (arg == NULL) {
1069 				// unexpected end of expression
1070 				return NULL;
1071 			}
1072 
1073 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1074 				TeamTraceFilter;
1075 			filter->fTeam = strtol(arg, NULL, 0);
1076 			return filter;
1077 		} else {
1078 			// invalid token
1079 			return NULL;
1080 		}
1081 	}
1082 
1083 	const char* _CurrentToken() const
1084 	{
1085 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1086 			return fTokens[fTokenIndex - 1];
1087 		return NULL;
1088 	}
1089 
1090 	const char* _NextToken()
1091 	{
1092 		if (fTokenIndex >= fTokenCount)
1093 			return NULL;
1094 		return fTokens[fTokenIndex++];
1095 	}
1096 
1097 private:
1098 	enum { MAX_FILTERS = 32 };
1099 
1100 	const char* const*			fTokens;
1101 	int							fTokenCount;
1102 	int							fTokenIndex;
1103 	TraceFilter					fFilters[MAX_FILTERS];
1104 	int							fFilterCount;
1105 
1106 	static TraceFilterParser	sParser;
1107 };
1108 
1109 
1110 TraceFilterParser TraceFilterParser::sParser;
1111 
1112 
1113 //	#pragma mark -
1114 
1115 
1116 #if ENABLE_TRACING
1117 
1118 
1119 TraceEntry*
1120 TraceEntryIterator::Next()
1121 {
1122 	if (fIndex == 0) {
1123 		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1124 		fIndex = 1;
1125 	} else if (fEntry != NULL) {
1126 		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1127 		fIndex++;
1128 	}
1129 
1130 	return Current();
1131 }
1132 
1133 
1134 TraceEntry*
1135 TraceEntryIterator::Previous()
1136 {
1137 	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1138 		fEntry = sTracingMetaData->AfterLastEntry();
1139 
1140 	if (fEntry != NULL) {
1141 		fEntry = _PreviousNonBufferEntry(
1142 			sTracingMetaData->PreviousEntry(fEntry));
1143 		fIndex--;
1144 	}
1145 
1146 	return Current();
1147 }
1148 
1149 
1150 TraceEntry*
1151 TraceEntryIterator::MoveTo(int32 index)
1152 {
1153 	if (index == fIndex)
1154 		return Current();
1155 
1156 	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1157 		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1158 		fEntry = NULL;
1159 		return NULL;
1160 	}
1161 
1162 	// get the shortest iteration path
1163 	int32 distance = index - fIndex;
1164 	int32 direction = distance < 0 ? -1 : 1;
1165 	distance *= direction;
1166 
1167 	if (index < distance) {
1168 		distance = index;
1169 		direction = 1;
1170 		fEntry = NULL;
1171 		fIndex = 0;
1172 	}
1173 	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1174 		distance = sTracingMetaData->Entries() + 1 - fIndex;
1175 		direction = -1;
1176 		fEntry = NULL;
1177 		fIndex = sTracingMetaData->Entries() + 1;
1178 	}
1179 
1180 	// iterate to the index
1181 	if (direction < 0) {
1182 		while (fIndex != index)
1183 			Previous();
1184 	} else {
1185 		while (fIndex != index)
1186 			Next();
1187 	}
1188 
1189 	return Current();
1190 }
1191 
1192 
1193 trace_entry*
1194 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1195 {
1196 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1197 		entry = sTracingMetaData->NextEntry(entry);
1198 
1199 	return entry;
1200 }
1201 
1202 
1203 trace_entry*
1204 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1205 {
1206 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1207 		entry = sTracingMetaData->PreviousEntry(entry);
1208 
1209 	return entry;
1210 }
1211 
1212 
1213 int
1214 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1215 {
1216 	int argi = 1;
1217 
1218 	// variables in which we store our state to be continuable
1219 	static int32 _previousCount = 0;
1220 	static bool _previousHasFilter = false;
1221 	static bool _previousPrintStackTrace = false;
1222 	static int32 _previousMaxToCheck = 0;
1223 	static int32 _previousFirstChecked = 1;
1224 	static int32 _previousLastChecked = -1;
1225 	static int32 _previousDirection = 1;
1226 	static uint32 _previousEntriesEver = 0;
1227 	static uint32 _previousEntries = 0;
1228 	static uint32 _previousOutputFlags = 0;
1229 	static TraceEntryIterator iterator;
1230 
1231 	uint32 entriesEver = sTracingMetaData->EntriesEver();
1232 
1233 	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1234 	int32 start = 0;	// special index: print the last count entries
1235 	int32 count = 0;
1236 	int32 maxToCheck = 0;
1237 	int32 cont = 0;
1238 
1239 	bool hasFilter = false;
1240 	bool printStackTrace = false;
1241 
1242 	uint32 outputFlags = 0;
1243 	while (argi < argc) {
1244 		if (strcmp(argv[argi], "--difftime") == 0) {
1245 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1246 			argi++;
1247 		} else if (strcmp(argv[argi], "--printteam") == 0) {
1248 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1249 			argi++;
1250 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1251 			printStackTrace = true;
1252 			argi++;
1253 		} else
1254 			break;
1255 	}
1256 
1257 	if (argi < argc) {
1258 		if (strcmp(argv[argi], "forward") == 0) {
1259 			cont = 1;
1260 			argi++;
1261 		} else if (strcmp(argv[argi], "backward") == 0) {
1262 			cont = -1;
1263 			argi++;
1264 		}
1265 	} else
1266 		cont = _previousDirection;
1267 
1268 	if (cont != 0) {
1269 		if (argi < argc) {
1270 			print_debugger_command_usage(argv[0]);
1271 			return 0;
1272 		}
1273 		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1274 			|| sTracingMetaData->Entries() != _previousEntries) {
1275 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1276 				"before, or there were new entries written since the last "
1277 				"invocation.\n", argv[0]);
1278 			return 0;
1279 		}
1280 	}
1281 
1282 	// get start, count, maxToCheck
1283 	int32* params[3] = { &start, &count, &maxToCheck };
1284 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1285 		if (strcmp(argv[argi], "filter") == 0) {
1286 			hasFilter = true;
1287 			argi++;
1288 		} else if (argv[argi][0] == '#') {
1289 			hasFilter = true;
1290 		} else {
1291 			*params[i] = parse_expression(argv[argi]);
1292 			argi++;
1293 		}
1294 	}
1295 
1296 	// filter specification
1297 	if (argi < argc) {
1298 		hasFilter = true;
1299 		if (strcmp(argv[argi], "filter") == 0)
1300 			argi++;
1301 
1302 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1303 			print_debugger_command_usage(argv[0]);
1304 			return 0;
1305 		}
1306 	}
1307 
1308 	int32 direction;
1309 	int32 firstToCheck;
1310 	int32 lastToCheck;
1311 
1312 	if (cont != 0) {
1313 		// get values from the previous iteration
1314 		direction = cont;
1315 		count = _previousCount;
1316 		maxToCheck = _previousMaxToCheck;
1317 		hasFilter = _previousHasFilter;
1318 		outputFlags = _previousOutputFlags;
1319 		printStackTrace = _previousPrintStackTrace;
1320 
1321 		if (direction < 0)
1322 			start = _previousFirstChecked - 1;
1323 		else
1324 			start = _previousLastChecked + 1;
1325 	} else {
1326 		// defaults for count and maxToCheck
1327 		if (count == 0)
1328 			count = 30;
1329 		if (maxToCheck == 0 || !hasFilter)
1330 			maxToCheck = count;
1331 		else if (maxToCheck < 0)
1332 			maxToCheck = sTracingMetaData->Entries();
1333 
1334 		// determine iteration direction
1335 		direction = (start <= 0 || count < 0 ? -1 : 1);
1336 
1337 		// validate count and maxToCheck
1338 		if (count < 0)
1339 			count = -count;
1340 		if (maxToCheck < 0)
1341 			maxToCheck = -maxToCheck;
1342 		if (maxToCheck > (int32)sTracingMetaData->Entries())
1343 			maxToCheck = sTracingMetaData->Entries();
1344 		if (count > maxToCheck)
1345 			count = maxToCheck;
1346 
1347 		// validate start
1348 		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1349 			start = max_c(1, sTracingMetaData->Entries());
1350 	}
1351 
1352 	if (direction < 0) {
1353 		firstToCheck = max_c(1, start - maxToCheck + 1);
1354 		lastToCheck = start;
1355 	} else {
1356 		firstToCheck = start;
1357 		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1358 			start + maxToCheck - 1);
1359 	}
1360 
1361 	// reset the iterator, if something changed in the meantime
1362 	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1363 		|| sTracingMetaData->Entries() != _previousEntries) {
1364 		iterator.Reset();
1365 	}
1366 
1367 	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1368 		kTraceOutputBufferSize, outputFlags);
1369 
1370 	bool markedMatching = false;
1371 	int32 firstToDump = firstToCheck;
1372 	int32 lastToDump = lastToCheck;
1373 
1374 	TraceFilter* filter = NULL;
1375 	if (hasFilter)
1376 		filter = TraceFilterParser::Default()->Filter();
1377 
1378 	if (wrapperFilter != NULL) {
1379 		wrapperFilter->Init(filter, direction, cont != 0);
1380 		filter = wrapperFilter;
1381 	}
1382 
1383 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1384 		// iteration direction is backwards
1385 		markedMatching = true;
1386 
1387 		// From the last entry to check iterate backwards to check filter
1388 		// matches.
1389 		int32 matching = 0;
1390 
1391 		// move to the entry after the last entry to check
1392 		iterator.MoveTo(lastToCheck + 1);
1393 
1394 		// iterate backwards
1395 		firstToDump = -1;
1396 		lastToDump = -1;
1397 		while (iterator.Index() > firstToCheck) {
1398 			TraceEntry* entry = iterator.Previous();
1399 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1400 				out.Clear();
1401 				if (filter->Filter(entry, out)) {
1402 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1403 					if (lastToDump == -1)
1404 						lastToDump = iterator.Index();
1405 					firstToDump = iterator.Index();
1406 
1407 					matching++;
1408 					if (matching >= count)
1409 						break;
1410 				} else
1411 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1412 			}
1413 		}
1414 
1415 		firstToCheck = iterator.Index();
1416 
1417 		// iterate to the previous entry, so that the next loop starts at the
1418 		// right one
1419 		iterator.Previous();
1420 	}
1421 
1422 	out.SetLastEntryTime(0);
1423 
1424 	// set the iterator to the entry before the first one to dump
1425 	iterator.MoveTo(firstToDump - 1);
1426 
1427 	// dump the entries matching the filter in the range
1428 	// [firstToDump, lastToDump]
1429 	int32 dumped = 0;
1430 
1431 	while (TraceEntry* entry = iterator.Next()) {
1432 		int32 index = iterator.Index();
1433 		if (index < firstToDump)
1434 			continue;
1435 		if (index > lastToDump || dumped >= count) {
1436 			if (direction > 0)
1437 				lastToCheck = index - 1;
1438 			break;
1439 		}
1440 
1441 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1442 			out.Clear();
1443 			if (filter &&  (markedMatching
1444 					? (entry->Flags() & FILTER_MATCH) == 0
1445 					: !filter->Filter(entry, out))) {
1446 				continue;
1447 			}
1448 
1449 			// don't print trailing new line
1450 			const char* dump = out.DumpEntry(entry);
1451 			int len = strlen(dump);
1452 			if (len > 0 && dump[len - 1] == '\n')
1453 				len--;
1454 
1455 			kprintf("%5ld. %.*s\n", index, len, dump);
1456 
1457 			if (printStackTrace) {
1458 				out.Clear();
1459 				entry->DumpStackTrace(out);
1460 				if (out.Size() > 0)
1461 					kputs(out.Buffer());
1462 			}
1463 		} else if (!filter)
1464 			kprintf("%5ld. ** uninitialized entry **\n", index);
1465 
1466 		dumped++;
1467 	}
1468 
1469 	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1470 		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1471 		lastToCheck - firstToCheck + 1, sTracingMetaData->Entries(),
1472 		entriesEver);
1473 
1474 	// store iteration state
1475 	_previousCount = count;
1476 	_previousMaxToCheck = maxToCheck;
1477 	_previousHasFilter = hasFilter;
1478 	_previousPrintStackTrace = printStackTrace;
1479 	_previousFirstChecked = firstToCheck;
1480 	_previousLastChecked = lastToCheck;
1481 	_previousDirection = direction;
1482 	_previousEntriesEver = entriesEver;
1483 	_previousEntries = sTracingMetaData->Entries();
1484 	_previousOutputFlags = outputFlags;
1485 
1486 	return cont != 0 ? B_KDEBUG_CONT : 0;
1487 }
1488 
1489 
1490 static int
1491 dump_tracing_command(int argc, char** argv)
1492 {
1493 	return dump_tracing_internal(argc, argv, NULL);
1494 }
1495 
1496 
1497 #endif	// ENABLE_TRACING
1498 
1499 
1500 extern "C" uint8*
1501 alloc_tracing_buffer(size_t size)
1502 {
1503 #if	ENABLE_TRACING
1504 	trace_entry* entry = sTracingMetaData->AllocateEntry(
1505 		size + sizeof(trace_entry), BUFFER_ENTRY);
1506 	if (entry == NULL)
1507 		return NULL;
1508 
1509 	return (uint8*)(entry + 1);
1510 #else
1511 	return NULL;
1512 #endif
1513 }
1514 
1515 
1516 uint8*
1517 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1518 {
1519 	if (user && !IS_USER_ADDRESS(source))
1520 		return NULL;
1521 
1522 	uint8* buffer = alloc_tracing_buffer(size);
1523 	if (buffer == NULL)
1524 		return NULL;
1525 
1526 	if (user) {
1527 		if (user_memcpy(buffer, source, size) != B_OK)
1528 			return NULL;
1529 	} else
1530 		memcpy(buffer, source, size);
1531 
1532 	return buffer;
1533 }
1534 
1535 
1536 char*
1537 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1538 {
1539 	if (source == NULL || maxSize == 0)
1540 		return NULL;
1541 
1542 	if (user && !IS_USER_ADDRESS(source))
1543 		return NULL;
1544 
1545 	// limit maxSize to the actual source string len
1546 	if (user) {
1547 		ssize_t size = user_strlcpy(NULL, source, 0);
1548 			// there's no user_strnlen()
1549 		if (size < 0)
1550 			return 0;
1551 		maxSize = min_c(maxSize, (size_t)size + 1);
1552 	} else
1553 		maxSize = strnlen(source, maxSize - 1) + 1;
1554 
1555 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1556 	if (buffer == NULL)
1557 		return NULL;
1558 
1559 	if (user) {
1560 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1561 			return NULL;
1562 	} else
1563 		strlcpy(buffer, source, maxSize);
1564 
1565 	return buffer;
1566 }
1567 
1568 
1569 tracing_stack_trace*
1570 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1571 {
1572 #if	ENABLE_TRACING
1573 	// page_fault_exception() doesn't allow us to gracefully handle a bad
1574 	// address in the stack trace, if interrupts are disabled, so we always
1575 	// restrict the stack traces to the kernel only in this case. A bad address
1576 	// in the kernel stack trace would still cause a panic(), but this is
1577 	// probably even desired.
1578 	if (!are_interrupts_enabled())
1579 		kernelOnly = true;
1580 
1581 	tracing_stack_trace* stackTrace
1582 		= (tracing_stack_trace*)alloc_tracing_buffer(
1583 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1584 
1585 	if (stackTrace != NULL) {
1586 		stackTrace->depth = arch_debug_get_stack_trace(
1587 			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1588 			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1589 	}
1590 
1591 	return stackTrace;
1592 #else
1593 	return NULL;
1594 #endif
1595 }
1596 
1597 
1598 int
1599 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1600 {
1601 #if	ENABLE_TRACING
1602 	return dump_tracing_internal(argc, argv, wrapperFilter);
1603 #else
1604 	return 0;
1605 #endif
1606 }
1607 
1608 
1609 void
1610 lock_tracing_buffer()
1611 {
1612 #if ENABLE_TRACING
1613 	sTracingMetaData->Lock();
1614 #endif
1615 }
1616 
1617 
1618 void
1619 unlock_tracing_buffer()
1620 {
1621 #if ENABLE_TRACING
1622 	sTracingMetaData->Unlock();
1623 #endif
1624 }
1625 
1626 
1627 extern "C" status_t
1628 tracing_init(void)
1629 {
1630 #if	ENABLE_TRACING
1631 	status_t result = TracingMetaData::Create(sTracingMetaData);
1632 	if (result != B_OK) {
1633 		sTracingMetaData = &sDummyTracingMetaData;
1634 		return result;
1635 	}
1636 
1637 	new(nothrow) TracingLogStartEntry();
1638 
1639 	add_debugger_command_etc("traced", &dump_tracing_command,
1640 		"Dump recorded trace entries",
1641 		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1642 			"(\"forward\" | \"backward\") "
1643 			"| ([ <start> [ <count> [ <range> ] ] ] "
1644 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1645 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1646 		"specified, the command continues where the previous invocation left\n"
1647 		"off, i.e. printing the previous respectively next entries (as many\n"
1648 		"as printed before). In this case the command is continuable, that is\n"
1649 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1650 		"If no arguments are given, the command continues in the direction\n"
1651 		"of the last invocation.\n"
1652 		"--printteam  - enables printing the entries' team IDs.\n"
1653 		"--difftime   - print difference times for all but the first entry.\n"
1654 		"--stacktrace - print stack traces for entries that captured one.\n"
1655 		"  <start>    - The base index of the entries to print. Depending on\n"
1656 		"               whether the iteration direction is forward or\n"
1657 		"               backward this will be the first or last entry printed\n"
1658 		"               (potentially, if a filter is specified). The index of\n"
1659 		"               the first entry in the trace buffer is 1. If 0 is\n"
1660 		"               specified, the last <count> recorded entries are\n"
1661 		"               printed (iteration direction is backward). Defaults \n"
1662 		"               to 0.\n"
1663 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1664 		"               If negative, the -<count> entries before and\n"
1665 		"               including <start> will be printed.\n"
1666 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1667 		"               number of entries to be filtered -- depending on the\n"
1668 		"               iteration direction the entries before or after\n"
1669 		"               <start>. If more than <count> entries match the\n"
1670 		"               filter, only the first (forward) or last (backward)\n"
1671 		"               <count> matching entries will be printed. If 0 is\n"
1672 		"               specified <range> will be set to <count>. If -1,\n"
1673 		"               <range> will be set to the number of recorded\n"
1674 		"               entries.\n"
1675 		"  <pattern>  - If specified only entries containing this string are\n"
1676 		"               printed.\n"
1677 		"  <filter>   - If specified only entries matching this filter\n"
1678 		"               expression are printed. The expression can consist of\n"
1679 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1680 		"               filters \"'thread' <thread>\" (matching entries\n"
1681 		"               with the given thread ID), \"'team' <team>\"\n"
1682 						"(matching entries with the given team ID), and\n"
1683 		"               \"#<pattern>\" (matching entries containing the given\n"
1684 		"               string).\n", 0);
1685 #endif	// ENABLE_TRACING
1686 	return B_OK;
1687 }
1688 
1689 
1690 void
1691 ktrace_printf(const char *format, ...)
1692 {
1693 #if	ENABLE_TRACING
1694 	va_list list;
1695 	va_start(list, format);
1696 
1697 	char buffer[256];
1698 	vsnprintf(buffer, sizeof(buffer), format, list);
1699 
1700 	va_end(list);
1701 
1702 	new(nothrow) KernelTraceEntry(buffer);
1703 #endif	// ENABLE_TRACING
1704 }
1705 
1706 
1707 void
1708 _user_ktrace_output(const char *message)
1709 {
1710 #if	ENABLE_TRACING
1711 	new(nothrow) UserTraceEntry(message);
1712 #endif	// ENABLE_TRACING
1713 }
1714 
1715