xref: /haiku/src/system/kernel/debug/tracing.cpp (revision 7a74a5df454197933bc6e80a542102362ee98703)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2012, Rene Gollent, rene@gollent.com.
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <tracing.h>
10 
11 #include <stdlib.h>
12 
13 #include <algorithm>
14 
15 #include <arch/debug.h>
16 #include <debug.h>
17 #include <debug_heap.h>
18 #include <elf.h>
19 #include <int.h>
20 #include <kernel.h>
21 #include <team.h>
22 #include <thread.h>
23 #include <util/AutoLock.h>
24 #include <vm/vm.h>
25 
26 
27 #if ENABLE_TRACING
28 
29 //#define TRACE_TRACING
30 #ifdef TRACE_TRACING
31 #	define TRACE(x) dprintf_no_syslog x
32 #else
33 #	define TRACE(x) ;
34 #endif
35 
36 
37 enum {
38 	WRAP_ENTRY			= 0x01,
39 	ENTRY_INITIALIZED	= 0x02,
40 	BUFFER_ENTRY		= 0x04,
41 	FILTER_MATCH		= 0x08,
42 	INVALID_ENTRY		= 0x10,
43 	CHECK_ENTRY			= 0x20,
44 };
45 
46 
47 static const size_t kTraceOutputBufferSize = 10240;
48 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
49 
50 static const uint32 kMaxRecoveringErrorCount	= 100;
51 static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
52 static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
53 static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
54 static const uint32 kMetaDataMagic1 = 'Vali';
55 static const uint32 kMetaDataMagic2 = 'dTra';
56 static const uint32 kMetaDataMagic3 = 'cing';
57 
58 // the maximum we can address with the trace_entry::[previous_]size fields
59 static const size_t kMaxTracingEntryByteSize
60 	= ((1 << 13) - 1) * sizeof(trace_entry);
61 
62 
63 struct TraceOutputPrint {
64 	TraceOutputPrint(TraceOutput& output)
65 		:
66 		fOutput(output)
67 	{
68 	}
69 
70 	void operator()(const char* format,...) const
71 	{
72 		va_list args;
73 		va_start(args, format);
74 		fOutput.PrintArgs(format, args);
75 		va_end(args);
76 	}
77 
78 private:
79 	TraceOutput&	fOutput;
80 };
81 
82 
83 class TracingMetaData {
84 public:
85 	static	status_t			Create(TracingMetaData*& _metaData);
86 
87 	inline	bool				Lock();
88 	inline	void				Unlock();
89 
90 	inline	trace_entry*		FirstEntry() const;
91 	inline	trace_entry*		AfterLastEntry() const;
92 
93 	inline	uint32				Entries() const;
94 	inline	uint32				EntriesEver() const;
95 
96 	inline	void				IncrementEntriesEver();
97 
98 	inline	char*				TraceOutputBuffer() const;
99 
100 			trace_entry*		NextEntry(trace_entry* entry);
101 			trace_entry*		PreviousEntry(trace_entry* entry);
102 
103 			trace_entry*		AllocateEntry(size_t size, uint16 flags);
104 
105 			bool				IsInBuffer(void* address, size_t size);
106 
107 private:
108 			bool				_FreeFirstEntry();
109 			bool				_MakeSpace(size_t needed);
110 
111 	static	status_t			_CreateMetaDataArea(bool findPrevious,
112 									area_id& _area,
113 									TracingMetaData*& _metaData);
114 			bool				_InitPreviousTracingData();
115 
116 private:
117 			uint32				fMagic1;
118 			trace_entry*		fBuffer;
119 			trace_entry*		fFirstEntry;
120 			trace_entry*		fAfterLastEntry;
121 			uint32				fEntries;
122 			uint32				fMagic2;
123 			uint32				fEntriesEver;
124 			spinlock			fLock;
125 			char*				fTraceOutputBuffer;
126 			phys_addr_t			fPhysicalAddress;
127 			uint32				fMagic3;
128 };
129 
130 static TracingMetaData sDummyTracingMetaData;
131 static TracingMetaData* sTracingMetaData = &sDummyTracingMetaData;
132 static bool sTracingDataRecovered = false;
133 
134 
135 // #pragma mark -
136 
137 
138 template<typename Print>
139 static void
140 print_stack_trace(struct tracing_stack_trace* stackTrace,
141 	const Print& print)
142 {
143 	if (stackTrace == NULL || stackTrace->depth <= 0)
144 		return;
145 
146 	static const size_t kBufferSize = 256;
147 	char* buffer = (char*)debug_malloc(kBufferSize);
148 
149 	for (int32 i = 0; i < stackTrace->depth; i++) {
150 		addr_t address = stackTrace->return_addresses[i];
151 
152 		const char* symbol;
153 		const char* demangledName = NULL;
154 		const char* imageName;
155 		bool exactMatch;
156 		addr_t baseAddress;
157 
158 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
159 				&imageName, &exactMatch) == B_OK) {
160 
161 			if (buffer != NULL) {
162 				bool isObjectMethod;
163 				demangledName = debug_demangle_symbol(symbol, buffer,
164 					kBufferSize, &isObjectMethod);
165 			}
166 
167 			print("  %p  %s + 0x%lx (%s)%s\n", (void*)address,
168 				demangledName != NULL ? demangledName : symbol,
169 				address - baseAddress, imageName,
170 				exactMatch ? "" : " (nearest)");
171 		} else
172 			print("  %p\n", (void*)address);
173 	}
174 
175 	if (buffer != NULL)
176 		debug_free(buffer);
177 }
178 
179 
180 // #pragma mark - TracingMetaData
181 
182 
183 bool
184 TracingMetaData::Lock()
185 {
186 	acquire_spinlock(&fLock);
187 	return true;
188 }
189 
190 
191 void
192 TracingMetaData::Unlock()
193 {
194 	release_spinlock(&fLock);
195 }
196 
197 
198 trace_entry*
199 TracingMetaData::FirstEntry() const
200 {
201 	return fFirstEntry;
202 }
203 
204 
205 trace_entry*
206 TracingMetaData::AfterLastEntry() const
207 {
208 	return fAfterLastEntry;
209 }
210 
211 
212 uint32
213 TracingMetaData::Entries() const
214 {
215 	return fEntries;
216 }
217 
218 
219 uint32
220 TracingMetaData::EntriesEver() const
221 {
222 	return fEntriesEver;
223 }
224 
225 
226 void
227 TracingMetaData::IncrementEntriesEver()
228 {
229 	fEntriesEver++;
230 		// NOTE: Race condition on SMP machines! We should use atomic_add(),
231 		// though that costs some performance and the information is for
232 		// informational purpose anyway.
233 }
234 
235 
236 char*
237 TracingMetaData::TraceOutputBuffer() const
238 {
239 	return fTraceOutputBuffer;
240 }
241 
242 
243 trace_entry*
244 TracingMetaData::NextEntry(trace_entry* entry)
245 {
246 	entry += entry->size;
247 	if ((entry->flags & WRAP_ENTRY) != 0)
248 		entry = fBuffer;
249 
250 	if (entry == fAfterLastEntry)
251 		return NULL;
252 
253 	return entry;
254 }
255 
256 
257 trace_entry*
258 TracingMetaData::PreviousEntry(trace_entry* entry)
259 {
260 	if (entry == fFirstEntry)
261 		return NULL;
262 
263 	if (entry == fBuffer) {
264 		// beginning of buffer -- previous entry is a wrap entry
265 		entry = fBuffer + kBufferSize - entry->previous_size;
266 	}
267 
268 	return entry - entry->previous_size;
269 }
270 
271 
272 trace_entry*
273 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
274 {
275 	if (fAfterLastEntry == NULL || size == 0
276 		|| size >= kMaxTracingEntryByteSize) {
277 		return NULL;
278 	}
279 
280 	InterruptsSpinLocker _(fLock);
281 
282 	size = (size + 3) >> 2;
283 		// 4 byte aligned, don't store the lower 2 bits
284 
285 	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
286 		fFirstEntry, fAfterLastEntry, fBuffer));
287 
288 	if (!_MakeSpace(size))
289 		return NULL;
290 
291 	trace_entry* entry = fAfterLastEntry;
292 	entry->size = size;
293 	entry->flags = flags;
294 	fAfterLastEntry += size;
295 	fAfterLastEntry->previous_size = size;
296 
297 	if (!(flags & BUFFER_ENTRY))
298 		fEntries++;
299 
300 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
301 		fAfterLastEntry, fFirstEntry, fEntries));
302 
303 	return entry;
304 }
305 
306 
307 bool
308 TracingMetaData::IsInBuffer(void* address, size_t size)
309 {
310 	if (fEntries == 0)
311 		return false;
312 
313 	addr_t start = (addr_t)address;
314 	addr_t end = start + size;
315 
316 	if (start < (addr_t)fBuffer || end > (addr_t)(fBuffer + kBufferSize))
317 		return false;
318 
319 	if (fFirstEntry > fAfterLastEntry)
320 		return start >= (addr_t)fFirstEntry || end <= (addr_t)fAfterLastEntry;
321 
322 	return start >= (addr_t)fFirstEntry && end <= (addr_t)fAfterLastEntry;
323 }
324 
325 
326 bool
327 TracingMetaData::_FreeFirstEntry()
328 {
329 	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
330 
331 	trace_entry* newFirst = NextEntry(fFirstEntry);
332 
333 	if (fFirstEntry->flags & BUFFER_ENTRY) {
334 		// a buffer entry -- just skip it
335 	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
336 		// Fully initialized TraceEntry: We could destroy it, but don't do so
337 		// for sake of robustness. The destructors of tracing entry classes
338 		// should be empty anyway.
339 		fEntries--;
340 	} else {
341 		// Not fully initialized TraceEntry. We can't free it, since
342 		// then it's constructor might still write into the memory and
343 		// overwrite data of the entry we're going to allocate.
344 		// We can't do anything until this entry can be discarded.
345 		return false;
346 	}
347 
348 	if (newFirst == NULL) {
349 		// everything is freed -- practically this can't happen, if
350 		// the buffer is large enough to hold three max-sized entries
351 		fFirstEntry = fAfterLastEntry = fBuffer;
352 		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
353 	} else
354 		fFirstEntry = newFirst;
355 
356 	return true;
357 }
358 
359 
360 /*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
361 	Returns \c false, if unable to free that much.
362 */
363 bool
364 TracingMetaData::_MakeSpace(size_t needed)
365 {
366 	// we need space for fAfterLastEntry, too (in case we need to wrap around
367 	// later)
368 	needed++;
369 
370 	// If there's not enough space (free or occupied) after fAfterLastEntry,
371 	// we free all entries in that region and wrap around.
372 	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
373 		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
374 			fAfterLastEntry));
375 
376 		// Free all entries after fAfterLastEntry and one more at the beginning
377 		// of the buffer.
378 		while (fFirstEntry > fAfterLastEntry) {
379 			if (!_FreeFirstEntry())
380 				return false;
381 		}
382 		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
383 			return false;
384 
385 		// just in case _FreeFirstEntry() freed the very last existing entry
386 		if (fAfterLastEntry == fBuffer)
387 			return true;
388 
389 		// mark as wrap entry and actually wrap around
390 		trace_entry* wrapEntry = fAfterLastEntry;
391 		wrapEntry->size = 0;
392 		wrapEntry->flags = WRAP_ENTRY;
393 		fAfterLastEntry = fBuffer;
394 		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
395 	}
396 
397 	if (fFirstEntry <= fAfterLastEntry) {
398 		// buffer is empty or the space after fAfterLastEntry is unoccupied
399 		return true;
400 	}
401 
402 	// free the first entries, until there's enough space
403 	size_t space = fFirstEntry - fAfterLastEntry;
404 
405 	if (space < needed) {
406 		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
407 	}
408 
409 	while (space < needed) {
410 		space += fFirstEntry->size;
411 
412 		if (!_FreeFirstEntry())
413 			return false;
414 	}
415 
416 	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
417 
418 	return true;
419 }
420 
421 
422 /*static*/ status_t
423 TracingMetaData::Create(TracingMetaData*& _metaData)
424 {
425 	// search meta data in memory (from previous session)
426 	area_id area;
427 	TracingMetaData* metaData;
428 	status_t error = _CreateMetaDataArea(true, area, metaData);
429 	if (error == B_OK) {
430 		if (metaData->_InitPreviousTracingData()) {
431 			_metaData = metaData;
432 			return B_OK;
433 		}
434 
435 		dprintf("Found previous tracing meta data, but failed to init.\n");
436 
437 		// invalidate the meta data
438 		metaData->fMagic1 = 0;
439 		metaData->fMagic2 = 0;
440 		metaData->fMagic3 = 0;
441 		delete_area(area);
442 	} else
443 		dprintf("No previous tracing meta data found.\n");
444 
445 	// no previous tracing data found -- create new one
446 	error = _CreateMetaDataArea(false, area, metaData);
447 	if (error != B_OK)
448 		return error;
449 
450 	virtual_address_restrictions virtualRestrictions = {};
451 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
452 	physical_address_restrictions physicalRestrictions = {};
453 	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
454 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
455 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
456 		&virtualRestrictions, &physicalRestrictions,
457 		(void**)&metaData->fTraceOutputBuffer);
458 	if (area < 0)
459 		return area;
460 
461 	// get the physical address
462 	physical_entry physicalEntry;
463 	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
464 			&physicalEntry, 1) == B_OK) {
465 		metaData->fPhysicalAddress = physicalEntry.address;
466 	} else {
467 		dprintf("TracingMetaData::Create(): failed to get physical address "
468 			"of tracing buffer\n");
469 		metaData->fPhysicalAddress = 0;
470 	}
471 
472 	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
473 		+ kTraceOutputBufferSize);
474 	metaData->fFirstEntry = metaData->fBuffer;
475 	metaData->fAfterLastEntry = metaData->fBuffer;
476 
477 	metaData->fEntries = 0;
478 	metaData->fEntriesEver = 0;
479 	B_INITIALIZE_SPINLOCK(&metaData->fLock);
480 
481 	metaData->fMagic1 = kMetaDataMagic1;
482 	metaData->fMagic2 = kMetaDataMagic2;
483 	metaData->fMagic3 = kMetaDataMagic3;
484 
485 	_metaData = metaData;
486 	return B_OK;
487 }
488 
489 
490 /*static*/ status_t
491 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
492 	TracingMetaData*& _metaData)
493 {
494 	// search meta data in memory (from previous session)
495 	TracingMetaData* metaData;
496 	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
497 	for (; metaDataAddress <= kMetaDataBaseEndAddress;
498 			metaDataAddress += kMetaDataAddressIncrement) {
499 		virtual_address_restrictions virtualRestrictions = {};
500 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
501 		physical_address_restrictions physicalRestrictions = {};
502 		physicalRestrictions.low_address = metaDataAddress;
503 		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
504 		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
505 			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
506 			CREATE_AREA_DONT_CLEAR, &virtualRestrictions, &physicalRestrictions,
507 			(void**)&metaData);
508 		if (area < 0)
509 			continue;
510 
511 		if (!findPrevious) {
512 			_area = area;
513 			_metaData = metaData;
514 			return B_OK;
515 		}
516 
517 		if (metaData->fMagic1 == kMetaDataMagic1
518 			&& metaData->fMagic2 == kMetaDataMagic2
519 			&& metaData->fMagic3 == kMetaDataMagic3) {
520 			_area = area;
521 			_metaData = metaData;
522 			return B_OK;
523 		}
524 
525 		delete_area(area);
526 	}
527 
528 	return B_ENTRY_NOT_FOUND;
529 }
530 
531 
532 bool
533 TracingMetaData::_InitPreviousTracingData()
534 {
535 	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
536 	// well. The entries should be checked more thoroughly for validity -- e.g.
537 	// the pointers to the entries' vtable pointers could be invalid, which can
538 	// make the "traced" command quite unusable. The validity of the entries
539 	// could be checked in a safe environment (i.e. with a fault handler) with
540 	// typeid() and call of a virtual function.
541 	return false;
542 
543 	addr_t bufferStart
544 		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
545 	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
546 
547 	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
548 		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
549 		|| (addr_t)fFirstEntry < bufferStart
550 		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
551 		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
552 		|| (addr_t)fAfterLastEntry < bufferStart
553 		|| (addr_t)fAfterLastEntry > bufferEnd
554 		|| fPhysicalAddress == 0) {
555 		dprintf("Failed to init tracing meta data: Sanity checks "
556 			"failed.\n");
557 		return false;
558 	}
559 
560 	// re-map the previous tracing buffer
561 	virtual_address_restrictions virtualRestrictions = {};
562 	virtualRestrictions.address = fTraceOutputBuffer;
563 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
564 	physical_address_restrictions physicalRestrictions = {};
565 	physicalRestrictions.low_address = fPhysicalAddress;
566 	physicalRestrictions.high_address = fPhysicalAddress
567 		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
568 	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
569 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
570 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR,
571 		&virtualRestrictions, &physicalRestrictions, NULL);
572 	if (area < 0) {
573 		dprintf("Failed to init tracing meta data: Mapping tracing log "
574 			"buffer failed: %s\n", strerror(area));
575 		return false;
576 	}
577 
578 	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
579 		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
580 
581 	// verify/repair the tracing entry list
582 	uint32 errorCount = 0;
583 	uint32 entryCount = 0;
584 	uint32 nonBufferEntryCount = 0;
585 	uint32 previousEntrySize = 0;
586 	trace_entry* entry = fFirstEntry;
587 	while (errorCount <= kMaxRecoveringErrorCount) {
588 		// check previous entry size
589 		if (entry->previous_size != previousEntrySize) {
590 			if (entry != fFirstEntry) {
591 				dprintf("ktrace recovering: entry %p: fixing previous_size "
592 					"size: %lu (should be %lu)\n", entry, entry->previous_size,
593 					previousEntrySize);
594 				errorCount++;
595 			}
596 			entry->previous_size = previousEntrySize;
597 		}
598 
599 		if (entry == fAfterLastEntry)
600 			break;
601 
602 		// check size field
603 		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
604 			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
605 				entry);
606 			errorCount++;
607 			fAfterLastEntry = entry;
608 			break;
609 		}
610 
611 		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
612 			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
613 				entry->size);
614 			errorCount++;
615 			fAfterLastEntry = entry;
616 			break;
617 		}
618 
619 		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
620 			dprintf("ktrace recovering: entry %p: entry crosses "
621 				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
622 			errorCount++;
623 			fAfterLastEntry = entry;
624 			break;
625 		}
626 
627 		// check for wrap entry
628 		if ((entry->flags & WRAP_ENTRY) != 0) {
629 			if ((uint32)(fBuffer + kBufferSize - entry)
630 					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
631 				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
632 					"buffer location\n", entry);
633 				errorCount++;
634 			}
635 
636 			if (entry->size != 0) {
637 				dprintf("ktrace recovering: entry %p: invalid wrap entry "
638 					"size: %lu\n", entry, entry->size);
639 				errorCount++;
640 				entry->size = 0;
641 			}
642 
643 			previousEntrySize = fBuffer + kBufferSize - entry;
644 			entry = fBuffer;
645 			continue;
646 		}
647 
648 		if ((entry->flags & BUFFER_ENTRY) == 0) {
649 			entry->flags |= CHECK_ENTRY;
650 			nonBufferEntryCount++;
651 		}
652 
653 		entryCount++;
654 		previousEntrySize = entry->size;
655 
656 		entry += entry->size;
657 	}
658 
659 	if (errorCount > kMaxRecoveringErrorCount) {
660 		dprintf("ktrace recovering: Too many errors.\n");
661 		fAfterLastEntry = entry;
662 		fAfterLastEntry->previous_size = previousEntrySize;
663 	}
664 
665 	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
666 		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
667 		entryCount - nonBufferEntryCount, fEntries);
668 	fEntries = nonBufferEntryCount;
669 
670 	B_INITIALIZE_SPINLOCK(&fLock);
671 
672 	// TODO: Actually check the entries! Do that when first accessing the
673 	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
674 	// true).
675 	sTracingDataRecovered = true;
676 	return true;
677 }
678 
679 
680 #endif	// ENABLE_TRACING
681 
682 
683 // #pragma mark -
684 
685 
686 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
687 	: fBuffer(buffer),
688 	  fCapacity(bufferSize),
689 	  fFlags(flags)
690 {
691 	Clear();
692 }
693 
694 
695 void
696 TraceOutput::Clear()
697 {
698 	if (fCapacity > 0)
699 		fBuffer[0] = '\0';
700 	fSize = 0;
701 }
702 
703 
704 void
705 TraceOutput::PrintArgs(const char* format, va_list args)
706 {
707 #if ENABLE_TRACING
708 	if (IsFull())
709 		return;
710 
711 	size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
712 	fSize += std::min(length, fCapacity - fSize - 1);
713 #endif
714 }
715 
716 
717 void
718 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
719 {
720 #if ENABLE_TRACING
721 	print_stack_trace(stackTrace, TraceOutputPrint(*this));
722 	if (stackTrace == NULL || stackTrace->depth <= 0)
723 		return;
724 
725 	for (int32 i = 0; i < stackTrace->depth; i++) {
726 		addr_t address = stackTrace->return_addresses[i];
727 
728 		const char* symbol;
729 		const char* imageName;
730 		bool exactMatch;
731 		addr_t baseAddress;
732 
733 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
734 				&imageName, &exactMatch) == B_OK) {
735 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
736 				address - baseAddress, imageName,
737 				exactMatch ? "" : " (nearest)");
738 		} else
739 			Print("  %p\n", (void*)address);
740 	}
741 #endif
742 }
743 
744 
745 void
746 TraceOutput::SetLastEntryTime(bigtime_t time)
747 {
748 	fLastEntryTime = time;
749 }
750 
751 
752 bigtime_t
753 TraceOutput::LastEntryTime() const
754 {
755 	return fLastEntryTime;
756 }
757 
758 
759 //	#pragma mark -
760 
761 
762 TraceEntry::TraceEntry()
763 {
764 }
765 
766 
767 TraceEntry::~TraceEntry()
768 {
769 }
770 
771 
772 void
773 TraceEntry::Dump(TraceOutput& out)
774 {
775 #if ENABLE_TRACING
776 	// to be overridden by subclasses
777 	out.Print("ENTRY %p", this);
778 #endif
779 }
780 
781 
782 void
783 TraceEntry::DumpStackTrace(TraceOutput& out)
784 {
785 }
786 
787 
788 void
789 TraceEntry::Initialized()
790 {
791 #if ENABLE_TRACING
792 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
793 	sTracingMetaData->IncrementEntriesEver();
794 #endif
795 }
796 
797 
798 void*
799 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
800 {
801 #if ENABLE_TRACING
802 	trace_entry* entry = sTracingMetaData->AllocateEntry(
803 		size + sizeof(trace_entry), 0);
804 	return entry != NULL ? entry + 1 : NULL;
805 #endif
806 	return NULL;
807 }
808 
809 
810 //	#pragma mark -
811 
812 
813 AbstractTraceEntry::~AbstractTraceEntry()
814 {
815 }
816 
817 
818 void
819 AbstractTraceEntry::Dump(TraceOutput& out)
820 {
821 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
822 		? fTime - out.LastEntryTime()
823 		: fTime;
824 
825 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID)
826 		out.Print("[%6ld:%6ld] %10Ld: ", fThread, fTeam, time);
827 	else
828 		out.Print("[%6ld] %10Ld: ", fThread, time);
829 
830 	AddDump(out);
831 
832 	out.SetLastEntryTime(fTime);
833 }
834 
835 
836 void
837 AbstractTraceEntry::AddDump(TraceOutput& out)
838 {
839 }
840 
841 
842 void
843 AbstractTraceEntry::_Init()
844 {
845 	Thread* thread = thread_get_current_thread();
846 	if (thread != NULL) {
847 		fThread = thread->id;
848 		if (thread->team)
849 			fTeam = thread->team->id;
850 	}
851 	fTime = system_time();
852 }
853 
854 
855 //	#pragma mark - AbstractTraceEntryWithStackTrace
856 
857 
858 
859 AbstractTraceEntryWithStackTrace::AbstractTraceEntryWithStackTrace(
860 	size_t stackTraceDepth, size_t skipFrames, bool kernelOnly)
861 {
862 	fStackTrace = capture_tracing_stack_trace(stackTraceDepth, skipFrames + 1,
863 		kernelOnly);
864 }
865 
866 
867 void
868 AbstractTraceEntryWithStackTrace::DumpStackTrace(TraceOutput& out)
869 {
870 	out.PrintStackTrace(fStackTrace);
871 }
872 
873 
874 //	#pragma mark -
875 
876 
877 #if ENABLE_TRACING
878 
879 class KernelTraceEntry : public AbstractTraceEntry {
880 	public:
881 		KernelTraceEntry(const char* message)
882 		{
883 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
884 
885 #if KTRACE_PRINTF_STACK_TRACE
886 			fStackTrace = capture_tracing_stack_trace(
887 				KTRACE_PRINTF_STACK_TRACE, 1, false);
888 #endif
889 			Initialized();
890 		}
891 
892 		virtual void AddDump(TraceOutput& out)
893 		{
894 			out.Print("kern: %s", fMessage);
895 		}
896 
897 #if KTRACE_PRINTF_STACK_TRACE
898 		virtual void DumpStackTrace(TraceOutput& out)
899 		{
900 			out.PrintStackTrace(fStackTrace);
901 		}
902 #endif
903 
904 	private:
905 		char*	fMessage;
906 #if KTRACE_PRINTF_STACK_TRACE
907 		tracing_stack_trace* fStackTrace;
908 #endif
909 };
910 
911 
912 class UserTraceEntry : public AbstractTraceEntry {
913 	public:
914 		UserTraceEntry(const char* message)
915 		{
916 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
917 
918 #if KTRACE_PRINTF_STACK_TRACE
919 			fStackTrace = capture_tracing_stack_trace(
920 				KTRACE_PRINTF_STACK_TRACE, 1, false);
921 #endif
922 			Initialized();
923 		}
924 
925 		virtual void AddDump(TraceOutput& out)
926 		{
927 			out.Print("user: %s", fMessage);
928 		}
929 
930 #if KTRACE_PRINTF_STACK_TRACE
931 		virtual void DumpStackTrace(TraceOutput& out)
932 		{
933 			out.PrintStackTrace(fStackTrace);
934 		}
935 #endif
936 
937 	private:
938 		char*	fMessage;
939 #if KTRACE_PRINTF_STACK_TRACE
940 		tracing_stack_trace* fStackTrace;
941 #endif
942 };
943 
944 
945 class TracingLogStartEntry : public AbstractTraceEntry {
946 	public:
947 		TracingLogStartEntry()
948 		{
949 			Initialized();
950 		}
951 
952 		virtual void AddDump(TraceOutput& out)
953 		{
954 			out.Print("ktrace start");
955 		}
956 };
957 
958 #endif	// ENABLE_TRACING
959 
960 
961 //	#pragma mark - trace filters
962 
963 
964 TraceFilter::~TraceFilter()
965 {
966 }
967 
968 
969 bool
970 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
971 {
972 	return false;
973 }
974 
975 
976 
977 class ThreadTraceFilter : public TraceFilter {
978 public:
979 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
980 	{
981 		const AbstractTraceEntry* entry
982 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
983 		return (entry != NULL && entry->ThreadID() == fThread);
984 	}
985 };
986 
987 
988 class TeamTraceFilter : public TraceFilter {
989 public:
990 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
991 	{
992 		const AbstractTraceEntry* entry
993 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
994 		return (entry != NULL && entry->TeamID() == fTeam);
995 	}
996 };
997 
998 
999 class PatternTraceFilter : public TraceFilter {
1000 public:
1001 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1002 	{
1003 		return strstr(out.DumpEntry(entry), fString) != NULL;
1004 	}
1005 };
1006 
1007 
1008 class DecimalPatternTraceFilter : public TraceFilter {
1009 public:
1010 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1011 	{
1012 		// TODO: this is *very* slow
1013 		char buffer[64];
1014 		snprintf(buffer, sizeof(buffer), "%Ld", fValue);
1015 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1016 	}
1017 };
1018 
1019 class HexPatternTraceFilter : public TraceFilter {
1020 public:
1021 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1022 	{
1023 		// TODO: this is *very* slow
1024 		char buffer[64];
1025 		snprintf(buffer, sizeof(buffer), "%Lx", fValue);
1026 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1027 	}
1028 };
1029 
1030 class StringPatternTraceFilter : public TraceFilter {
1031 public:
1032 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1033 	{
1034 		if (IS_KERNEL_ADDRESS(fValue))
1035 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
1036 
1037 		// TODO: this is *very* slow
1038 		char buffer[64];
1039 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
1040 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1041 	}
1042 };
1043 
1044 class NotTraceFilter : public TraceFilter {
1045 public:
1046 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1047 	{
1048 		return !fSubFilters.first->Filter(entry, out);
1049 	}
1050 };
1051 
1052 
1053 class AndTraceFilter : public TraceFilter {
1054 public:
1055 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1056 	{
1057 		return fSubFilters.first->Filter(entry, out)
1058 			&& fSubFilters.second->Filter(entry, out);
1059 	}
1060 };
1061 
1062 
1063 class OrTraceFilter : public TraceFilter {
1064 public:
1065 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1066 	{
1067 		return fSubFilters.first->Filter(entry, out)
1068 			|| fSubFilters.second->Filter(entry, out);
1069 	}
1070 };
1071 
1072 
1073 class TraceFilterParser {
1074 public:
1075 	static TraceFilterParser* Default()
1076 	{
1077 		return &sParser;
1078 	}
1079 
1080 	bool Parse(int argc, const char* const* argv)
1081 	{
1082 		fTokens = argv;
1083 		fTokenCount = argc;
1084 		fTokenIndex = 0;
1085 		fFilterCount = 0;
1086 
1087 		TraceFilter* filter = _ParseExpression();
1088 		return fTokenIndex == fTokenCount && filter != NULL;
1089 	}
1090 
1091 	TraceFilter* Filter()
1092 	{
1093 		return &fFilters[0];
1094 	}
1095 
1096 private:
1097 	TraceFilter* _ParseExpression()
1098 	{
1099 		const char* token = _NextToken();
1100 		if (!token) {
1101 			// unexpected end of expression
1102 			return NULL;
1103 		}
1104 
1105 		if (fFilterCount == MAX_FILTERS) {
1106 			// too many filters
1107 			return NULL;
1108 		}
1109 
1110 		if (token[0] == '#') {
1111 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1112 				PatternTraceFilter;
1113 			filter->fString = token + 1;
1114 			return filter;
1115 		} else if (token[0] == 'd' && token[1] == '#') {
1116 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1117 				DecimalPatternTraceFilter;
1118 			filter->fValue = parse_expression(token + 2);
1119 			return filter;
1120 		} else if (token[0] == 'x' && token[1] == '#') {
1121 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1122 				HexPatternTraceFilter;
1123 			filter->fValue = parse_expression(token + 2);
1124 			return filter;
1125 		} else if (token[0] == 's' && token[1] == '#') {
1126 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1127 				StringPatternTraceFilter;
1128 			filter->fValue = parse_expression(token + 2);
1129 			return filter;
1130 		} else if (strcmp(token, "not") == 0) {
1131 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1132 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1133 				return filter;
1134 			return NULL;
1135 		} else if (strcmp(token, "and") == 0) {
1136 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1137 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1138 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1139 				return filter;
1140 			}
1141 			return NULL;
1142 		} else if (strcmp(token, "or") == 0) {
1143 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1144 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1145 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1146 				return filter;
1147 			}
1148 			return NULL;
1149 		} else if (strcmp(token, "thread") == 0) {
1150 			const char* arg = _NextToken();
1151 			if (arg == NULL) {
1152 				// unexpected end of expression
1153 				return NULL;
1154 			}
1155 
1156 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1157 				ThreadTraceFilter;
1158 			filter->fThread = strtol(arg, NULL, 0);
1159 			return filter;
1160 		} else if (strcmp(token, "team") == 0) {
1161 			const char* arg = _NextToken();
1162 			if (arg == NULL) {
1163 				// unexpected end of expression
1164 				return NULL;
1165 			}
1166 
1167 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1168 				TeamTraceFilter;
1169 			filter->fTeam = strtol(arg, NULL, 0);
1170 			return filter;
1171 		} else {
1172 			// invalid token
1173 			return NULL;
1174 		}
1175 	}
1176 
1177 	const char* _CurrentToken() const
1178 	{
1179 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1180 			return fTokens[fTokenIndex - 1];
1181 		return NULL;
1182 	}
1183 
1184 	const char* _NextToken()
1185 	{
1186 		if (fTokenIndex >= fTokenCount)
1187 			return NULL;
1188 		return fTokens[fTokenIndex++];
1189 	}
1190 
1191 private:
1192 	enum { MAX_FILTERS = 32 };
1193 
1194 	const char* const*			fTokens;
1195 	int							fTokenCount;
1196 	int							fTokenIndex;
1197 	TraceFilter					fFilters[MAX_FILTERS];
1198 	int							fFilterCount;
1199 
1200 	static TraceFilterParser	sParser;
1201 };
1202 
1203 
1204 TraceFilterParser TraceFilterParser::sParser;
1205 
1206 
1207 //	#pragma mark -
1208 
1209 
1210 #if ENABLE_TRACING
1211 
1212 
1213 TraceEntry*
1214 TraceEntryIterator::Next()
1215 {
1216 	if (fIndex == 0) {
1217 		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1218 		fIndex = 1;
1219 	} else if (fEntry != NULL) {
1220 		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1221 		fIndex++;
1222 	}
1223 
1224 	return Current();
1225 }
1226 
1227 
1228 TraceEntry*
1229 TraceEntryIterator::Previous()
1230 {
1231 	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1232 		fEntry = sTracingMetaData->AfterLastEntry();
1233 
1234 	if (fEntry != NULL) {
1235 		fEntry = _PreviousNonBufferEntry(
1236 			sTracingMetaData->PreviousEntry(fEntry));
1237 		fIndex--;
1238 	}
1239 
1240 	return Current();
1241 }
1242 
1243 
1244 TraceEntry*
1245 TraceEntryIterator::MoveTo(int32 index)
1246 {
1247 	if (index == fIndex)
1248 		return Current();
1249 
1250 	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1251 		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1252 		fEntry = NULL;
1253 		return NULL;
1254 	}
1255 
1256 	// get the shortest iteration path
1257 	int32 distance = index - fIndex;
1258 	int32 direction = distance < 0 ? -1 : 1;
1259 	distance *= direction;
1260 
1261 	if (index < distance) {
1262 		distance = index;
1263 		direction = 1;
1264 		fEntry = NULL;
1265 		fIndex = 0;
1266 	}
1267 	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1268 		distance = sTracingMetaData->Entries() + 1 - fIndex;
1269 		direction = -1;
1270 		fEntry = NULL;
1271 		fIndex = sTracingMetaData->Entries() + 1;
1272 	}
1273 
1274 	// iterate to the index
1275 	if (direction < 0) {
1276 		while (fIndex != index)
1277 			Previous();
1278 	} else {
1279 		while (fIndex != index)
1280 			Next();
1281 	}
1282 
1283 	return Current();
1284 }
1285 
1286 
1287 trace_entry*
1288 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1289 {
1290 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1291 		entry = sTracingMetaData->NextEntry(entry);
1292 
1293 	return entry;
1294 }
1295 
1296 
1297 trace_entry*
1298 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1299 {
1300 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1301 		entry = sTracingMetaData->PreviousEntry(entry);
1302 
1303 	return entry;
1304 }
1305 
1306 
1307 int
1308 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1309 {
1310 	int argi = 1;
1311 
1312 	// variables in which we store our state to be continuable
1313 	static int32 _previousCount = 0;
1314 	static bool _previousHasFilter = false;
1315 	static bool _previousPrintStackTrace = false;
1316 	static int32 _previousMaxToCheck = 0;
1317 	static int32 _previousFirstChecked = 1;
1318 	static int32 _previousLastChecked = -1;
1319 	static int32 _previousDirection = 1;
1320 	static uint32 _previousEntriesEver = 0;
1321 	static uint32 _previousEntries = 0;
1322 	static uint32 _previousOutputFlags = 0;
1323 	static TraceEntryIterator iterator;
1324 
1325 	uint32 entriesEver = sTracingMetaData->EntriesEver();
1326 
1327 	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1328 	int32 start = 0;	// special index: print the last count entries
1329 	int32 count = 0;
1330 	int32 maxToCheck = 0;
1331 	int32 cont = 0;
1332 
1333 	bool hasFilter = false;
1334 	bool printStackTrace = false;
1335 
1336 	uint32 outputFlags = 0;
1337 	while (argi < argc) {
1338 		if (strcmp(argv[argi], "--difftime") == 0) {
1339 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1340 			argi++;
1341 		} else if (strcmp(argv[argi], "--printteam") == 0) {
1342 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1343 			argi++;
1344 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1345 			printStackTrace = true;
1346 			argi++;
1347 		} else
1348 			break;
1349 	}
1350 
1351 	if (argi < argc) {
1352 		if (strcmp(argv[argi], "forward") == 0) {
1353 			cont = 1;
1354 			argi++;
1355 		} else if (strcmp(argv[argi], "backward") == 0) {
1356 			cont = -1;
1357 			argi++;
1358 		}
1359 	} else
1360 		cont = _previousDirection;
1361 
1362 	if (cont != 0) {
1363 		if (argi < argc) {
1364 			print_debugger_command_usage(argv[0]);
1365 			return 0;
1366 		}
1367 		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1368 			|| sTracingMetaData->Entries() != _previousEntries) {
1369 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1370 				"before, or there were new entries written since the last "
1371 				"invocation.\n", argv[0]);
1372 			return 0;
1373 		}
1374 	}
1375 
1376 	// get start, count, maxToCheck
1377 	int32* params[3] = { &start, &count, &maxToCheck };
1378 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1379 		if (strcmp(argv[argi], "filter") == 0) {
1380 			hasFilter = true;
1381 			argi++;
1382 		} else if (argv[argi][0] == '#') {
1383 			hasFilter = true;
1384 		} else {
1385 			*params[i] = parse_expression(argv[argi]);
1386 			argi++;
1387 		}
1388 	}
1389 
1390 	// filter specification
1391 	if (argi < argc) {
1392 		hasFilter = true;
1393 		if (strcmp(argv[argi], "filter") == 0)
1394 			argi++;
1395 
1396 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1397 			print_debugger_command_usage(argv[0]);
1398 			return 0;
1399 		}
1400 	}
1401 
1402 	int32 direction;
1403 	int32 firstToCheck;
1404 	int32 lastToCheck;
1405 
1406 	if (cont != 0) {
1407 		// get values from the previous iteration
1408 		direction = cont;
1409 		count = _previousCount;
1410 		maxToCheck = _previousMaxToCheck;
1411 		hasFilter = _previousHasFilter;
1412 		outputFlags = _previousOutputFlags;
1413 		printStackTrace = _previousPrintStackTrace;
1414 
1415 		if (direction < 0)
1416 			start = _previousFirstChecked - 1;
1417 		else
1418 			start = _previousLastChecked + 1;
1419 	} else {
1420 		// defaults for count and maxToCheck
1421 		if (count == 0)
1422 			count = 30;
1423 		if (maxToCheck == 0 || !hasFilter)
1424 			maxToCheck = count;
1425 		else if (maxToCheck < 0)
1426 			maxToCheck = sTracingMetaData->Entries();
1427 
1428 		// determine iteration direction
1429 		direction = (start <= 0 || count < 0 ? -1 : 1);
1430 
1431 		// validate count and maxToCheck
1432 		if (count < 0)
1433 			count = -count;
1434 		if (maxToCheck < 0)
1435 			maxToCheck = -maxToCheck;
1436 		if (maxToCheck > (int32)sTracingMetaData->Entries())
1437 			maxToCheck = sTracingMetaData->Entries();
1438 		if (count > maxToCheck)
1439 			count = maxToCheck;
1440 
1441 		// validate start
1442 		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1443 			start = max_c(1, sTracingMetaData->Entries());
1444 	}
1445 
1446 	if (direction < 0) {
1447 		firstToCheck = max_c(1, start - maxToCheck + 1);
1448 		lastToCheck = start;
1449 	} else {
1450 		firstToCheck = start;
1451 		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1452 			start + maxToCheck - 1);
1453 	}
1454 
1455 	// reset the iterator, if something changed in the meantime
1456 	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1457 		|| sTracingMetaData->Entries() != _previousEntries) {
1458 		iterator.Reset();
1459 	}
1460 
1461 	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1462 		kTraceOutputBufferSize, outputFlags);
1463 
1464 	bool markedMatching = false;
1465 	int32 firstToDump = firstToCheck;
1466 	int32 lastToDump = lastToCheck;
1467 
1468 	TraceFilter* filter = NULL;
1469 	if (hasFilter)
1470 		filter = TraceFilterParser::Default()->Filter();
1471 
1472 	if (wrapperFilter != NULL) {
1473 		wrapperFilter->Init(filter, direction, cont != 0);
1474 		filter = wrapperFilter;
1475 	}
1476 
1477 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1478 		// iteration direction is backwards
1479 		markedMatching = true;
1480 
1481 		// From the last entry to check iterate backwards to check filter
1482 		// matches.
1483 		int32 matching = 0;
1484 
1485 		// move to the entry after the last entry to check
1486 		iterator.MoveTo(lastToCheck + 1);
1487 
1488 		// iterate backwards
1489 		firstToDump = -1;
1490 		lastToDump = -1;
1491 		while (iterator.Index() > firstToCheck) {
1492 			TraceEntry* entry = iterator.Previous();
1493 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1494 				out.Clear();
1495 				if (filter->Filter(entry, out)) {
1496 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1497 					if (lastToDump == -1)
1498 						lastToDump = iterator.Index();
1499 					firstToDump = iterator.Index();
1500 
1501 					matching++;
1502 					if (matching >= count)
1503 						break;
1504 				} else
1505 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1506 			}
1507 		}
1508 
1509 		firstToCheck = iterator.Index();
1510 
1511 		// iterate to the previous entry, so that the next loop starts at the
1512 		// right one
1513 		iterator.Previous();
1514 	}
1515 
1516 	out.SetLastEntryTime(0);
1517 
1518 	// set the iterator to the entry before the first one to dump
1519 	iterator.MoveTo(firstToDump - 1);
1520 
1521 	// dump the entries matching the filter in the range
1522 	// [firstToDump, lastToDump]
1523 	int32 dumped = 0;
1524 
1525 	while (TraceEntry* entry = iterator.Next()) {
1526 		int32 index = iterator.Index();
1527 		if (index < firstToDump)
1528 			continue;
1529 		if (index > lastToDump || dumped >= count) {
1530 			if (direction > 0)
1531 				lastToCheck = index - 1;
1532 			break;
1533 		}
1534 
1535 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1536 			out.Clear();
1537 			if (filter &&  (markedMatching
1538 					? (entry->Flags() & FILTER_MATCH) == 0
1539 					: !filter->Filter(entry, out))) {
1540 				continue;
1541 			}
1542 
1543 			// don't print trailing new line
1544 			const char* dump = out.DumpEntry(entry);
1545 			int len = strlen(dump);
1546 			if (len > 0 && dump[len - 1] == '\n')
1547 				len--;
1548 
1549 			kprintf("%5ld. %.*s\n", index, len, dump);
1550 
1551 			if (printStackTrace) {
1552 				out.Clear();
1553 				entry->DumpStackTrace(out);
1554 				if (out.Size() > 0)
1555 					kputs(out.Buffer());
1556 			}
1557 		} else if (!filter)
1558 			kprintf("%5ld. ** uninitialized entry **\n", index);
1559 
1560 		dumped++;
1561 	}
1562 
1563 	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1564 		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1565 		lastToCheck - firstToCheck + 1, sTracingMetaData->Entries(),
1566 		entriesEver);
1567 
1568 	// store iteration state
1569 	_previousCount = count;
1570 	_previousMaxToCheck = maxToCheck;
1571 	_previousHasFilter = hasFilter;
1572 	_previousPrintStackTrace = printStackTrace;
1573 	_previousFirstChecked = firstToCheck;
1574 	_previousLastChecked = lastToCheck;
1575 	_previousDirection = direction;
1576 	_previousEntriesEver = entriesEver;
1577 	_previousEntries = sTracingMetaData->Entries();
1578 	_previousOutputFlags = outputFlags;
1579 
1580 	return cont != 0 ? B_KDEBUG_CONT : 0;
1581 }
1582 
1583 
1584 static int
1585 dump_tracing_command(int argc, char** argv)
1586 {
1587 	return dump_tracing_internal(argc, argv, NULL);
1588 }
1589 
1590 
1591 #endif	// ENABLE_TRACING
1592 
1593 
1594 extern "C" uint8*
1595 alloc_tracing_buffer(size_t size)
1596 {
1597 #if	ENABLE_TRACING
1598 	trace_entry* entry = sTracingMetaData->AllocateEntry(
1599 		size + sizeof(trace_entry), BUFFER_ENTRY);
1600 	if (entry == NULL)
1601 		return NULL;
1602 
1603 	return (uint8*)(entry + 1);
1604 #else
1605 	return NULL;
1606 #endif
1607 }
1608 
1609 
1610 uint8*
1611 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1612 {
1613 	if (user && !IS_USER_ADDRESS(source))
1614 		return NULL;
1615 
1616 	uint8* buffer = alloc_tracing_buffer(size);
1617 	if (buffer == NULL)
1618 		return NULL;
1619 
1620 	if (user) {
1621 		if (user_memcpy(buffer, source, size) != B_OK)
1622 			return NULL;
1623 	} else
1624 		memcpy(buffer, source, size);
1625 
1626 	return buffer;
1627 }
1628 
1629 
1630 char*
1631 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1632 {
1633 	if (source == NULL || maxSize == 0)
1634 		return NULL;
1635 
1636 	if (user && !IS_USER_ADDRESS(source))
1637 		return NULL;
1638 
1639 	// limit maxSize to the actual source string len
1640 	if (user) {
1641 		ssize_t size = user_strlcpy(NULL, source, 0);
1642 			// there's no user_strnlen()
1643 		if (size < 0)
1644 			return 0;
1645 		maxSize = min_c(maxSize, (size_t)size + 1);
1646 	} else
1647 		maxSize = strnlen(source, maxSize - 1) + 1;
1648 
1649 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1650 	if (buffer == NULL)
1651 		return NULL;
1652 
1653 	if (user) {
1654 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1655 			return NULL;
1656 	} else
1657 		strlcpy(buffer, source, maxSize);
1658 
1659 	return buffer;
1660 }
1661 
1662 
1663 tracing_stack_trace*
1664 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1665 {
1666 #if	ENABLE_TRACING
1667 	// page_fault_exception() doesn't allow us to gracefully handle a bad
1668 	// address in the stack trace, if interrupts are disabled, so we always
1669 	// restrict the stack traces to the kernel only in this case. A bad address
1670 	// in the kernel stack trace would still cause a panic(), but this is
1671 	// probably even desired.
1672 	if (!are_interrupts_enabled())
1673 		kernelOnly = true;
1674 
1675 	tracing_stack_trace* stackTrace
1676 		= (tracing_stack_trace*)alloc_tracing_buffer(
1677 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1678 
1679 	if (stackTrace != NULL) {
1680 		stackTrace->depth = arch_debug_get_stack_trace(
1681 			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1682 			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1683 	}
1684 
1685 	return stackTrace;
1686 #else
1687 	return NULL;
1688 #endif
1689 }
1690 
1691 
1692 addr_t
1693 tracing_find_caller_in_stack_trace(struct tracing_stack_trace* stackTrace,
1694 	const addr_t excludeRanges[], uint32 excludeRangeCount)
1695 {
1696 	for (int32 i = 0; i < stackTrace->depth; i++) {
1697 		addr_t returnAddress = stackTrace->return_addresses[i];
1698 
1699 		bool inRange = false;
1700 		for (uint32 j = 0; j < excludeRangeCount; j++) {
1701 			if (returnAddress >= excludeRanges[j * 2 + 0]
1702 				&& returnAddress < excludeRanges[j * 2 + 1]) {
1703 				inRange = true;
1704 				break;
1705 			}
1706 		}
1707 
1708 		if (!inRange)
1709 			return returnAddress;
1710 	}
1711 
1712 	return 0;
1713 }
1714 
1715 
1716 void
1717 tracing_print_stack_trace(struct tracing_stack_trace* stackTrace)
1718 {
1719 #if ENABLE_TRACING
1720 	print_stack_trace(stackTrace, kprintf);
1721 #endif
1722 }
1723 
1724 
1725 int
1726 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1727 {
1728 #if	ENABLE_TRACING
1729 	return dump_tracing_internal(argc, argv, wrapperFilter);
1730 #else
1731 	return 0;
1732 #endif
1733 }
1734 
1735 
1736 bool
1737 tracing_is_entry_valid(AbstractTraceEntry* candidate, bigtime_t entryTime)
1738 {
1739 #if ENABLE_TRACING
1740 	if (!sTracingMetaData->IsInBuffer(candidate, sizeof(*candidate)))
1741 		return false;
1742 
1743 	if (entryTime < 0)
1744 		return true;
1745 
1746 	TraceEntryIterator iterator;
1747 	while (TraceEntry* entry = iterator.Next()) {
1748 		AbstractTraceEntry* abstract = dynamic_cast<AbstractTraceEntry*>(entry);
1749 		if (abstract == NULL)
1750 			continue;
1751 
1752 		if (abstract != candidate && abstract->Time() > entryTime)
1753 			return false;
1754 
1755 		return candidate->Time() == entryTime;
1756 	}
1757 #endif
1758 
1759 	return false;
1760 }
1761 
1762 
1763 void
1764 lock_tracing_buffer()
1765 {
1766 #if ENABLE_TRACING
1767 	sTracingMetaData->Lock();
1768 #endif
1769 }
1770 
1771 
1772 void
1773 unlock_tracing_buffer()
1774 {
1775 #if ENABLE_TRACING
1776 	sTracingMetaData->Unlock();
1777 #endif
1778 }
1779 
1780 
1781 extern "C" status_t
1782 tracing_init(void)
1783 {
1784 #if	ENABLE_TRACING
1785 	status_t result = TracingMetaData::Create(sTracingMetaData);
1786 	if (result != B_OK) {
1787 		sTracingMetaData = &sDummyTracingMetaData;
1788 		return result;
1789 	}
1790 
1791 	new(nothrow) TracingLogStartEntry();
1792 
1793 	add_debugger_command_etc("traced", &dump_tracing_command,
1794 		"Dump recorded trace entries",
1795 		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1796 			"(\"forward\" | \"backward\") "
1797 			"| ([ <start> [ <count> [ <range> ] ] ] "
1798 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1799 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1800 		"specified, the command continues where the previous invocation left\n"
1801 		"off, i.e. printing the previous respectively next entries (as many\n"
1802 		"as printed before). In this case the command is continuable, that is\n"
1803 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1804 		"If no arguments are given, the command continues in the direction\n"
1805 		"of the last invocation.\n"
1806 		"--printteam  - enables printing the entries' team IDs.\n"
1807 		"--difftime   - print difference times for all but the first entry.\n"
1808 		"--stacktrace - print stack traces for entries that captured one.\n"
1809 		"  <start>    - The base index of the entries to print. Depending on\n"
1810 		"               whether the iteration direction is forward or\n"
1811 		"               backward this will be the first or last entry printed\n"
1812 		"               (potentially, if a filter is specified). The index of\n"
1813 		"               the first entry in the trace buffer is 1. If 0 is\n"
1814 		"               specified, the last <count> recorded entries are\n"
1815 		"               printed (iteration direction is backward). Defaults \n"
1816 		"               to 0.\n"
1817 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1818 		"               If negative, the -<count> entries before and\n"
1819 		"               including <start> will be printed.\n"
1820 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1821 		"               number of entries to be filtered -- depending on the\n"
1822 		"               iteration direction the entries before or after\n"
1823 		"               <start>. If more than <count> entries match the\n"
1824 		"               filter, only the first (forward) or last (backward)\n"
1825 		"               <count> matching entries will be printed. If 0 is\n"
1826 		"               specified <range> will be set to <count>. If -1,\n"
1827 		"               <range> will be set to the number of recorded\n"
1828 		"               entries.\n"
1829 		"  <pattern>  - If specified only entries containing this string are\n"
1830 		"               printed.\n"
1831 		"  <filter>   - If specified only entries matching this filter\n"
1832 		"               expression are printed. The expression can consist of\n"
1833 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1834 		"               filters \"'thread' <thread>\" (matching entries\n"
1835 		"               with the given thread ID), \"'team' <team>\"\n"
1836 						"(matching entries with the given team ID), and\n"
1837 		"               \"#<pattern>\" (matching entries containing the given\n"
1838 		"               string).\n", 0);
1839 #endif	// ENABLE_TRACING
1840 	return B_OK;
1841 }
1842 
1843 
1844 void
1845 ktrace_printf(const char *format, ...)
1846 {
1847 #if	ENABLE_TRACING
1848 	va_list list;
1849 	va_start(list, format);
1850 
1851 	char buffer[256];
1852 	vsnprintf(buffer, sizeof(buffer), format, list);
1853 
1854 	va_end(list);
1855 
1856 	new(nothrow) KernelTraceEntry(buffer);
1857 #endif	// ENABLE_TRACING
1858 }
1859 
1860 
1861 void
1862 _user_ktrace_output(const char *message)
1863 {
1864 #if	ENABLE_TRACING
1865 	new(nothrow) UserTraceEntry(message);
1866 #endif	// ENABLE_TRACING
1867 }
1868 
1869