xref: /haiku/src/system/kernel/debug/tracing.cpp (revision 040a81419dda83d1014e9dc94936a4cb3f027303)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2012, Rene Gollent, rene@gollent.com.
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <tracing.h>
10 
11 #include <stdlib.h>
12 
13 #include <algorithm>
14 
15 #include <arch/debug.h>
16 #include <debug.h>
17 #include <debug_heap.h>
18 #include <elf.h>
19 #include <int.h>
20 #include <kernel.h>
21 #include <team.h>
22 #include <thread.h>
23 #include <util/AutoLock.h>
24 #include <vm/vm.h>
25 
26 
27 #if ENABLE_TRACING
28 
29 //#define TRACE_TRACING
30 #ifdef TRACE_TRACING
31 #	define TRACE(x) dprintf_no_syslog x
32 #else
33 #	define TRACE(x) ;
34 #endif
35 
36 
37 enum {
38 	WRAP_ENTRY			= 0x01,
39 	ENTRY_INITIALIZED	= 0x02,
40 	BUFFER_ENTRY		= 0x04,
41 	FILTER_MATCH		= 0x08,
42 	INVALID_ENTRY		= 0x10,
43 	CHECK_ENTRY			= 0x20,
44 };
45 
46 
47 static const size_t kTraceOutputBufferSize = 10240;
48 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
49 
50 static const uint32 kMaxRecoveringErrorCount	= 100;
51 static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
52 static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
53 static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
54 static const uint32 kMetaDataMagic1 = 'Vali';
55 static const uint32 kMetaDataMagic2 = 'dTra';
56 static const uint32 kMetaDataMagic3 = 'cing';
57 
58 // the maximum we can address with the trace_entry::[previous_]size fields
59 static const size_t kMaxTracingEntryByteSize
60 	= ((1 << 13) - 1) * sizeof(trace_entry);
61 
62 
63 struct TraceOutputPrint {
64 	TraceOutputPrint(TraceOutput& output)
65 		:
66 		fOutput(output)
67 	{
68 	}
69 
70 	void operator()(const char* format,...) const
71 	{
72 		va_list args;
73 		va_start(args, format);
74 		fOutput.PrintArgs(format, args);
75 		va_end(args);
76 	}
77 
78 private:
79 	TraceOutput&	fOutput;
80 };
81 
82 
83 class TracingMetaData {
84 public:
85 	static	status_t			Create(TracingMetaData*& _metaData);
86 
87 	inline	bool				Lock();
88 	inline	void				Unlock();
89 
90 	inline	trace_entry*		FirstEntry() const;
91 	inline	trace_entry*		AfterLastEntry() const;
92 
93 	inline	uint32				Entries() const;
94 	inline	uint32				EntriesEver() const;
95 
96 	inline	void				IncrementEntriesEver();
97 
98 	inline	char*				TraceOutputBuffer() const;
99 
100 			trace_entry*		NextEntry(trace_entry* entry);
101 			trace_entry*		PreviousEntry(trace_entry* entry);
102 
103 			trace_entry*		AllocateEntry(size_t size, uint16 flags);
104 
105 			bool				IsInBuffer(void* address, size_t size);
106 
107 private:
108 			bool				_FreeFirstEntry();
109 			bool				_MakeSpace(size_t needed);
110 
111 	static	status_t			_CreateMetaDataArea(bool findPrevious,
112 									area_id& _area,
113 									TracingMetaData*& _metaData);
114 			bool				_InitPreviousTracingData();
115 
116 private:
117 			uint32				fMagic1;
118 			trace_entry*		fBuffer;
119 			trace_entry*		fFirstEntry;
120 			trace_entry*		fAfterLastEntry;
121 			uint32				fEntries;
122 			uint32				fMagic2;
123 			uint32				fEntriesEver;
124 			spinlock			fLock;
125 			char*				fTraceOutputBuffer;
126 			phys_addr_t			fPhysicalAddress;
127 			uint32				fMagic3;
128 };
129 
130 static TracingMetaData sFallbackTracingMetaData;
131 static TracingMetaData* sTracingMetaData = &sFallbackTracingMetaData;
132 static bool sTracingDataRecovered = false;
133 
134 
135 // #pragma mark -
136 
137 
138 template<typename Print>
139 static void
140 print_stack_trace(struct tracing_stack_trace* stackTrace,
141 	const Print& print)
142 {
143 	if (stackTrace == NULL || stackTrace->depth <= 0)
144 		return;
145 
146 	static const size_t kBufferSize = 256;
147 	char* buffer = (char*)debug_malloc(kBufferSize);
148 
149 	for (int32 i = 0; i < stackTrace->depth; i++) {
150 		addr_t address = stackTrace->return_addresses[i];
151 
152 		const char* symbol;
153 		const char* demangledName = NULL;
154 		const char* imageName;
155 		bool exactMatch;
156 		addr_t baseAddress;
157 
158 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
159 				&imageName, &exactMatch) == B_OK) {
160 
161 			if (buffer != NULL) {
162 				bool isObjectMethod;
163 				demangledName = debug_demangle_symbol(symbol, buffer,
164 					kBufferSize, &isObjectMethod);
165 			}
166 
167 			print("  %p  %s + 0x%lx (%s)%s\n", (void*)address,
168 				demangledName != NULL ? demangledName : symbol,
169 				address - baseAddress, imageName,
170 				exactMatch ? "" : " (nearest)");
171 		} else
172 			print("  %p\n", (void*)address);
173 	}
174 
175 	if (buffer != NULL)
176 		debug_free(buffer);
177 }
178 
179 
180 // #pragma mark - TracingMetaData
181 
182 
183 bool
184 TracingMetaData::Lock()
185 {
186 	acquire_spinlock(&fLock);
187 	return true;
188 }
189 
190 
191 void
192 TracingMetaData::Unlock()
193 {
194 	release_spinlock(&fLock);
195 }
196 
197 
198 trace_entry*
199 TracingMetaData::FirstEntry() const
200 {
201 	return fFirstEntry;
202 }
203 
204 
205 trace_entry*
206 TracingMetaData::AfterLastEntry() const
207 {
208 	return fAfterLastEntry;
209 }
210 
211 
212 uint32
213 TracingMetaData::Entries() const
214 {
215 	return fEntries;
216 }
217 
218 
219 uint32
220 TracingMetaData::EntriesEver() const
221 {
222 	return fEntriesEver;
223 }
224 
225 
226 void
227 TracingMetaData::IncrementEntriesEver()
228 {
229 	fEntriesEver++;
230 		// NOTE: Race condition on SMP machines! We should use atomic_add(),
231 		// though that costs some performance and the information is for
232 		// informational purpose anyway.
233 }
234 
235 
236 char*
237 TracingMetaData::TraceOutputBuffer() const
238 {
239 	return fTraceOutputBuffer;
240 }
241 
242 
243 trace_entry*
244 TracingMetaData::NextEntry(trace_entry* entry)
245 {
246 	entry += entry->size;
247 	if ((entry->flags & WRAP_ENTRY) != 0)
248 		entry = fBuffer;
249 
250 	if (entry == fAfterLastEntry)
251 		return NULL;
252 
253 	return entry;
254 }
255 
256 
257 trace_entry*
258 TracingMetaData::PreviousEntry(trace_entry* entry)
259 {
260 	if (entry == fFirstEntry)
261 		return NULL;
262 
263 	if (entry == fBuffer) {
264 		// beginning of buffer -- previous entry is a wrap entry
265 		entry = fBuffer + kBufferSize - entry->previous_size;
266 	}
267 
268 	return entry - entry->previous_size;
269 }
270 
271 
272 trace_entry*
273 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
274 {
275 	if (fAfterLastEntry == NULL || size == 0
276 		|| size >= kMaxTracingEntryByteSize) {
277 		return NULL;
278 	}
279 
280 	InterruptsSpinLocker _(fLock);
281 
282 	size = (size + 3) >> 2;
283 		// 4 byte aligned, don't store the lower 2 bits
284 
285 	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
286 		fFirstEntry, fAfterLastEntry, fBuffer));
287 
288 	if (!_MakeSpace(size))
289 		return NULL;
290 
291 	trace_entry* entry = fAfterLastEntry;
292 	entry->size = size;
293 	entry->flags = flags;
294 	fAfterLastEntry += size;
295 	fAfterLastEntry->previous_size = size;
296 
297 	if (!(flags & BUFFER_ENTRY))
298 		fEntries++;
299 
300 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
301 		fAfterLastEntry, fFirstEntry, fEntries));
302 
303 	return entry;
304 }
305 
306 
307 bool
308 TracingMetaData::IsInBuffer(void* address, size_t size)
309 {
310 	if (fEntries == 0)
311 		return false;
312 
313 	addr_t start = (addr_t)address;
314 	addr_t end = start + size;
315 
316 	if (start < (addr_t)fBuffer || end > (addr_t)(fBuffer + kBufferSize))
317 		return false;
318 
319 	if (fFirstEntry > fAfterLastEntry)
320 		return start >= (addr_t)fFirstEntry || end <= (addr_t)fAfterLastEntry;
321 
322 	return start >= (addr_t)fFirstEntry && end <= (addr_t)fAfterLastEntry;
323 }
324 
325 
326 bool
327 TracingMetaData::_FreeFirstEntry()
328 {
329 	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
330 
331 	trace_entry* newFirst = NextEntry(fFirstEntry);
332 
333 	if (fFirstEntry->flags & BUFFER_ENTRY) {
334 		// a buffer entry -- just skip it
335 	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
336 		// Fully initialized TraceEntry: We could destroy it, but don't do so
337 		// for sake of robustness. The destructors of tracing entry classes
338 		// should be empty anyway.
339 		fEntries--;
340 	} else {
341 		// Not fully initialized TraceEntry. We can't free it, since
342 		// then it's constructor might still write into the memory and
343 		// overwrite data of the entry we're going to allocate.
344 		// We can't do anything until this entry can be discarded.
345 		return false;
346 	}
347 
348 	if (newFirst == NULL) {
349 		// everything is freed -- practically this can't happen, if
350 		// the buffer is large enough to hold three max-sized entries
351 		fFirstEntry = fAfterLastEntry = fBuffer;
352 		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
353 	} else
354 		fFirstEntry = newFirst;
355 
356 	return true;
357 }
358 
359 
360 /*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
361 	Returns \c false, if unable to free that much.
362 */
363 bool
364 TracingMetaData::_MakeSpace(size_t needed)
365 {
366 	// we need space for fAfterLastEntry, too (in case we need to wrap around
367 	// later)
368 	needed++;
369 
370 	// If there's not enough space (free or occupied) after fAfterLastEntry,
371 	// we free all entries in that region and wrap around.
372 	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
373 		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
374 			fAfterLastEntry));
375 
376 		// Free all entries after fAfterLastEntry and one more at the beginning
377 		// of the buffer.
378 		while (fFirstEntry > fAfterLastEntry) {
379 			if (!_FreeFirstEntry())
380 				return false;
381 		}
382 		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
383 			return false;
384 
385 		// just in case _FreeFirstEntry() freed the very last existing entry
386 		if (fAfterLastEntry == fBuffer)
387 			return true;
388 
389 		// mark as wrap entry and actually wrap around
390 		trace_entry* wrapEntry = fAfterLastEntry;
391 		wrapEntry->size = 0;
392 		wrapEntry->flags = WRAP_ENTRY;
393 		fAfterLastEntry = fBuffer;
394 		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
395 	}
396 
397 	if (fFirstEntry <= fAfterLastEntry) {
398 		// buffer is empty or the space after fAfterLastEntry is unoccupied
399 		return true;
400 	}
401 
402 	// free the first entries, until there's enough space
403 	size_t space = fFirstEntry - fAfterLastEntry;
404 
405 	if (space < needed) {
406 		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
407 	}
408 
409 	while (space < needed) {
410 		space += fFirstEntry->size;
411 
412 		if (!_FreeFirstEntry())
413 			return false;
414 	}
415 
416 	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
417 
418 	return true;
419 }
420 
421 
422 /*static*/ status_t
423 TracingMetaData::Create(TracingMetaData*& _metaData)
424 {
425 	// search meta data in memory (from previous session)
426 	area_id area;
427 	TracingMetaData* metaData;
428 	status_t error = _CreateMetaDataArea(true, area, metaData);
429 	if (error == B_OK) {
430 		if (metaData->_InitPreviousTracingData()) {
431 			_metaData = metaData;
432 			return B_OK;
433 		}
434 
435 		dprintf("Found previous tracing meta data, but failed to init.\n");
436 
437 		// invalidate the meta data
438 		metaData->fMagic1 = 0;
439 		metaData->fMagic2 = 0;
440 		metaData->fMagic3 = 0;
441 		delete_area(area);
442 	} else
443 		dprintf("No previous tracing meta data found.\n");
444 
445 	// no previous tracing data found -- create new one
446 	error = _CreateMetaDataArea(false, area, metaData);
447 	if (error != B_OK)
448 		return error;
449 
450 	virtual_address_restrictions virtualRestrictions = {};
451 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
452 	physical_address_restrictions physicalRestrictions = {};
453 	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
454 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
455 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
456 		&virtualRestrictions, &physicalRestrictions,
457 		(void**)&metaData->fTraceOutputBuffer);
458 	if (area < 0)
459 		return area;
460 
461 	// get the physical address
462 	physical_entry physicalEntry;
463 	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
464 			&physicalEntry, 1) == B_OK) {
465 		metaData->fPhysicalAddress = physicalEntry.address;
466 	} else {
467 		dprintf("TracingMetaData::Create(): failed to get physical address "
468 			"of tracing buffer\n");
469 		metaData->fPhysicalAddress = 0;
470 	}
471 
472 	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
473 		+ kTraceOutputBufferSize);
474 	metaData->fFirstEntry = metaData->fBuffer;
475 	metaData->fAfterLastEntry = metaData->fBuffer;
476 
477 	metaData->fEntries = 0;
478 	metaData->fEntriesEver = 0;
479 	B_INITIALIZE_SPINLOCK(&metaData->fLock);
480 
481 	metaData->fMagic1 = kMetaDataMagic1;
482 	metaData->fMagic2 = kMetaDataMagic2;
483 	metaData->fMagic3 = kMetaDataMagic3;
484 
485 	_metaData = metaData;
486 	return B_OK;
487 }
488 
489 
490 /*static*/ status_t
491 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
492 	TracingMetaData*& _metaData)
493 {
494 	// search meta data in memory (from previous session)
495 	TracingMetaData* metaData;
496 	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
497 	for (; metaDataAddress <= kMetaDataBaseEndAddress;
498 			metaDataAddress += kMetaDataAddressIncrement) {
499 		virtual_address_restrictions virtualRestrictions = {};
500 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
501 		physical_address_restrictions physicalRestrictions = {};
502 		physicalRestrictions.low_address = metaDataAddress;
503 		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
504 		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
505 			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
506 			CREATE_AREA_DONT_CLEAR, 0, &virtualRestrictions,
507 			&physicalRestrictions, (void**)&metaData);
508 		if (area < 0)
509 			continue;
510 
511 		if (!findPrevious) {
512 			_area = area;
513 			_metaData = metaData;
514 			return B_OK;
515 		}
516 
517 		if (metaData->fMagic1 == kMetaDataMagic1
518 			&& metaData->fMagic2 == kMetaDataMagic2
519 			&& metaData->fMagic3 == kMetaDataMagic3) {
520 			_area = area;
521 			_metaData = metaData;
522 			return B_OK;
523 		}
524 
525 		delete_area(area);
526 	}
527 
528 	if (findPrevious)
529 		return B_ENTRY_NOT_FOUND;
530 
531 	// We could allocate any of the standard locations. Instead of failing
532 	// entirely, we use the static meta data. The tracing buffer won't be
533 	// reattachable in the next session, but at least we can use it in this
534 	// session.
535 	_metaData = &sFallbackTracingMetaData;
536 	return B_OK;
537 }
538 
539 
540 bool
541 TracingMetaData::_InitPreviousTracingData()
542 {
543 	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
544 	// well. The entries should be checked more thoroughly for validity -- e.g.
545 	// the pointers to the entries' vtable pointers could be invalid, which can
546 	// make the "traced" command quite unusable. The validity of the entries
547 	// could be checked in a safe environment (i.e. with a fault handler) with
548 	// typeid() and call of a virtual function.
549 	return false;
550 
551 	addr_t bufferStart
552 		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
553 	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
554 
555 	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
556 		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
557 		|| (addr_t)fFirstEntry < bufferStart
558 		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
559 		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
560 		|| (addr_t)fAfterLastEntry < bufferStart
561 		|| (addr_t)fAfterLastEntry > bufferEnd
562 		|| fPhysicalAddress == 0) {
563 		dprintf("Failed to init tracing meta data: Sanity checks "
564 			"failed.\n");
565 		return false;
566 	}
567 
568 	// re-map the previous tracing buffer
569 	virtual_address_restrictions virtualRestrictions = {};
570 	virtualRestrictions.address = fTraceOutputBuffer;
571 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
572 	physical_address_restrictions physicalRestrictions = {};
573 	physicalRestrictions.low_address = fPhysicalAddress;
574 	physicalRestrictions.high_address = fPhysicalAddress
575 		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
576 	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
577 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
578 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR, 0,
579 		&virtualRestrictions, &physicalRestrictions, NULL);
580 	if (area < 0) {
581 		dprintf("Failed to init tracing meta data: Mapping tracing log "
582 			"buffer failed: %s\n", strerror(area));
583 		return false;
584 	}
585 
586 	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
587 		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
588 
589 	// verify/repair the tracing entry list
590 	uint32 errorCount = 0;
591 	uint32 entryCount = 0;
592 	uint32 nonBufferEntryCount = 0;
593 	uint32 previousEntrySize = 0;
594 	trace_entry* entry = fFirstEntry;
595 	while (errorCount <= kMaxRecoveringErrorCount) {
596 		// check previous entry size
597 		if (entry->previous_size != previousEntrySize) {
598 			if (entry != fFirstEntry) {
599 				dprintf("ktrace recovering: entry %p: fixing previous_size "
600 					"size: %" B_PRIu32 " (should be %" B_PRIu32 ")\n", entry,
601 					entry->previous_size, previousEntrySize);
602 				errorCount++;
603 			}
604 			entry->previous_size = previousEntrySize;
605 		}
606 
607 		if (entry == fAfterLastEntry)
608 			break;
609 
610 		// check size field
611 		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
612 			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
613 				entry);
614 			errorCount++;
615 			fAfterLastEntry = entry;
616 			break;
617 		}
618 
619 		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
620 			dprintf("ktrace recovering: entry %p: size too big: %" B_PRIu32 "\n",
621 				entry, entry->size);
622 			errorCount++;
623 			fAfterLastEntry = entry;
624 			break;
625 		}
626 
627 		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
628 			dprintf("ktrace recovering: entry %p: entry crosses "
629 				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
630 			errorCount++;
631 			fAfterLastEntry = entry;
632 			break;
633 		}
634 
635 		// check for wrap entry
636 		if ((entry->flags & WRAP_ENTRY) != 0) {
637 			if ((uint32)(fBuffer + kBufferSize - entry)
638 					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
639 				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
640 					"buffer location\n", entry);
641 				errorCount++;
642 			}
643 
644 			if (entry->size != 0) {
645 				dprintf("ktrace recovering: entry %p: invalid wrap entry "
646 					"size: %" B_PRIu32 "\n", entry, entry->size);
647 				errorCount++;
648 				entry->size = 0;
649 			}
650 
651 			previousEntrySize = fBuffer + kBufferSize - entry;
652 			entry = fBuffer;
653 			continue;
654 		}
655 
656 		if ((entry->flags & BUFFER_ENTRY) == 0) {
657 			entry->flags |= CHECK_ENTRY;
658 			nonBufferEntryCount++;
659 		}
660 
661 		entryCount++;
662 		previousEntrySize = entry->size;
663 
664 		entry += entry->size;
665 	}
666 
667 	if (errorCount > kMaxRecoveringErrorCount) {
668 		dprintf("ktrace recovering: Too many errors.\n");
669 		fAfterLastEntry = entry;
670 		fAfterLastEntry->previous_size = previousEntrySize;
671 	}
672 
673 	dprintf("ktrace recovering: Recovered %" B_PRIu32 " entries + %" B_PRIu32
674 		" buffer entries from previous session. Expected %" B_PRIu32
675 		" entries.\n", nonBufferEntryCount, entryCount - nonBufferEntryCount,
676 		fEntries);
677 	fEntries = nonBufferEntryCount;
678 
679 	B_INITIALIZE_SPINLOCK(&fLock);
680 
681 	// TODO: Actually check the entries! Do that when first accessing the
682 	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
683 	// true).
684 	sTracingDataRecovered = true;
685 	return true;
686 }
687 
688 
689 #endif	// ENABLE_TRACING
690 
691 
692 // #pragma mark -
693 
694 
695 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
696 	: fBuffer(buffer),
697 	  fCapacity(bufferSize),
698 	  fFlags(flags)
699 {
700 	Clear();
701 }
702 
703 
704 void
705 TraceOutput::Clear()
706 {
707 	if (fCapacity > 0)
708 		fBuffer[0] = '\0';
709 	fSize = 0;
710 }
711 
712 
713 void
714 TraceOutput::PrintArgs(const char* format, va_list args)
715 {
716 #if ENABLE_TRACING
717 	if (IsFull())
718 		return;
719 
720 	size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
721 	fSize += std::min(length, fCapacity - fSize - 1);
722 #endif
723 }
724 
725 
726 void
727 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
728 {
729 #if ENABLE_TRACING
730 	print_stack_trace(stackTrace, TraceOutputPrint(*this));
731 	if (stackTrace == NULL || stackTrace->depth <= 0)
732 		return;
733 
734 	for (int32 i = 0; i < stackTrace->depth; i++) {
735 		addr_t address = stackTrace->return_addresses[i];
736 
737 		const char* symbol;
738 		const char* imageName;
739 		bool exactMatch;
740 		addr_t baseAddress;
741 
742 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
743 				&imageName, &exactMatch) == B_OK) {
744 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
745 				address - baseAddress, imageName,
746 				exactMatch ? "" : " (nearest)");
747 		} else
748 			Print("  %p\n", (void*)address);
749 	}
750 #endif
751 }
752 
753 
754 void
755 TraceOutput::SetLastEntryTime(bigtime_t time)
756 {
757 	fLastEntryTime = time;
758 }
759 
760 
761 bigtime_t
762 TraceOutput::LastEntryTime() const
763 {
764 	return fLastEntryTime;
765 }
766 
767 
768 //	#pragma mark -
769 
770 
771 TraceEntry::TraceEntry()
772 {
773 }
774 
775 
776 TraceEntry::~TraceEntry()
777 {
778 }
779 
780 
781 void
782 TraceEntry::Dump(TraceOutput& out)
783 {
784 #if ENABLE_TRACING
785 	// to be overridden by subclasses
786 	out.Print("ENTRY %p", this);
787 #endif
788 }
789 
790 
791 void
792 TraceEntry::DumpStackTrace(TraceOutput& out)
793 {
794 }
795 
796 
797 void
798 TraceEntry::Initialized()
799 {
800 #if ENABLE_TRACING
801 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
802 	sTracingMetaData->IncrementEntriesEver();
803 #endif
804 }
805 
806 
807 void*
808 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
809 {
810 #if ENABLE_TRACING
811 	trace_entry* entry = sTracingMetaData->AllocateEntry(
812 		size + sizeof(trace_entry), 0);
813 	return entry != NULL ? entry + 1 : NULL;
814 #endif
815 	return NULL;
816 }
817 
818 
819 //	#pragma mark -
820 
821 
822 AbstractTraceEntry::~AbstractTraceEntry()
823 {
824 }
825 
826 
827 void
828 AbstractTraceEntry::Dump(TraceOutput& out)
829 {
830 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
831 		? fTime - out.LastEntryTime()
832 		: fTime;
833 
834 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID) {
835 		out.Print("[%6" B_PRId32 ":%6" B_PRId32 "] %10" B_PRId64 ": ", fThread,
836 			fTeam, time);
837 	} else
838 		out.Print("[%6" B_PRId32 "] %10" B_PRId64 ": ", fThread, time);
839 
840 	AddDump(out);
841 
842 	out.SetLastEntryTime(fTime);
843 }
844 
845 
846 void
847 AbstractTraceEntry::AddDump(TraceOutput& out)
848 {
849 }
850 
851 
852 void
853 AbstractTraceEntry::_Init()
854 {
855 	Thread* thread = thread_get_current_thread();
856 	if (thread != NULL) {
857 		fThread = thread->id;
858 		if (thread->team)
859 			fTeam = thread->team->id;
860 	}
861 	fTime = system_time();
862 }
863 
864 
865 //	#pragma mark - AbstractTraceEntryWithStackTrace
866 
867 
868 
869 AbstractTraceEntryWithStackTrace::AbstractTraceEntryWithStackTrace(
870 	size_t stackTraceDepth, size_t skipFrames, bool kernelOnly)
871 {
872 	fStackTrace = capture_tracing_stack_trace(stackTraceDepth, skipFrames + 1,
873 		kernelOnly);
874 }
875 
876 
877 void
878 AbstractTraceEntryWithStackTrace::DumpStackTrace(TraceOutput& out)
879 {
880 	out.PrintStackTrace(fStackTrace);
881 }
882 
883 
884 //	#pragma mark -
885 
886 
887 #if ENABLE_TRACING
888 
889 class KernelTraceEntry : public AbstractTraceEntry {
890 	public:
891 		KernelTraceEntry(const char* message)
892 		{
893 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
894 
895 #if KTRACE_PRINTF_STACK_TRACE
896 			fStackTrace = capture_tracing_stack_trace(
897 				KTRACE_PRINTF_STACK_TRACE, 1, false);
898 #endif
899 			Initialized();
900 		}
901 
902 		virtual void AddDump(TraceOutput& out)
903 		{
904 			out.Print("kern: %s", fMessage);
905 		}
906 
907 #if KTRACE_PRINTF_STACK_TRACE
908 		virtual void DumpStackTrace(TraceOutput& out)
909 		{
910 			out.PrintStackTrace(fStackTrace);
911 		}
912 #endif
913 
914 	private:
915 		char*	fMessage;
916 #if KTRACE_PRINTF_STACK_TRACE
917 		tracing_stack_trace* fStackTrace;
918 #endif
919 };
920 
921 
922 class UserTraceEntry : public AbstractTraceEntry {
923 	public:
924 		UserTraceEntry(const char* message)
925 		{
926 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
927 
928 #if KTRACE_PRINTF_STACK_TRACE
929 			fStackTrace = capture_tracing_stack_trace(
930 				KTRACE_PRINTF_STACK_TRACE, 1, false);
931 #endif
932 			Initialized();
933 		}
934 
935 		virtual void AddDump(TraceOutput& out)
936 		{
937 			out.Print("user: %s", fMessage);
938 		}
939 
940 #if KTRACE_PRINTF_STACK_TRACE
941 		virtual void DumpStackTrace(TraceOutput& out)
942 		{
943 			out.PrintStackTrace(fStackTrace);
944 		}
945 #endif
946 
947 	private:
948 		char*	fMessage;
949 #if KTRACE_PRINTF_STACK_TRACE
950 		tracing_stack_trace* fStackTrace;
951 #endif
952 };
953 
954 
955 class TracingLogStartEntry : public AbstractTraceEntry {
956 	public:
957 		TracingLogStartEntry()
958 		{
959 			Initialized();
960 		}
961 
962 		virtual void AddDump(TraceOutput& out)
963 		{
964 			out.Print("ktrace start");
965 		}
966 };
967 
968 #endif	// ENABLE_TRACING
969 
970 
971 //	#pragma mark - trace filters
972 
973 
974 TraceFilter::~TraceFilter()
975 {
976 }
977 
978 
979 bool
980 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
981 {
982 	return false;
983 }
984 
985 
986 
987 class ThreadTraceFilter : public TraceFilter {
988 public:
989 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
990 	{
991 		const AbstractTraceEntry* entry
992 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
993 		return (entry != NULL && entry->ThreadID() == fThread);
994 	}
995 };
996 
997 
998 class TeamTraceFilter : public TraceFilter {
999 public:
1000 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
1001 	{
1002 		const AbstractTraceEntry* entry
1003 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
1004 		return (entry != NULL && entry->TeamID() == fTeam);
1005 	}
1006 };
1007 
1008 
1009 class PatternTraceFilter : public TraceFilter {
1010 public:
1011 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1012 	{
1013 		return strstr(out.DumpEntry(entry), fString) != NULL;
1014 	}
1015 };
1016 
1017 
1018 class DecimalPatternTraceFilter : public TraceFilter {
1019 public:
1020 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1021 	{
1022 		// TODO: this is *very* slow
1023 		char buffer[64];
1024 		snprintf(buffer, sizeof(buffer), "%" B_PRId64, fValue);
1025 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1026 	}
1027 };
1028 
1029 class HexPatternTraceFilter : public TraceFilter {
1030 public:
1031 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1032 	{
1033 		// TODO: this is *very* slow
1034 		char buffer[64];
1035 		snprintf(buffer, sizeof(buffer), "%" B_PRIx64, fValue);
1036 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1037 	}
1038 };
1039 
1040 class StringPatternTraceFilter : public TraceFilter {
1041 public:
1042 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1043 	{
1044 		if (IS_KERNEL_ADDRESS(fValue))
1045 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
1046 
1047 		// TODO: this is *very* slow
1048 		char buffer[64];
1049 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
1050 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1051 	}
1052 };
1053 
1054 class NotTraceFilter : public TraceFilter {
1055 public:
1056 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1057 	{
1058 		return !fSubFilters.first->Filter(entry, out);
1059 	}
1060 };
1061 
1062 
1063 class AndTraceFilter : public TraceFilter {
1064 public:
1065 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1066 	{
1067 		return fSubFilters.first->Filter(entry, out)
1068 			&& fSubFilters.second->Filter(entry, out);
1069 	}
1070 };
1071 
1072 
1073 class OrTraceFilter : public TraceFilter {
1074 public:
1075 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1076 	{
1077 		return fSubFilters.first->Filter(entry, out)
1078 			|| fSubFilters.second->Filter(entry, out);
1079 	}
1080 };
1081 
1082 
1083 class TraceFilterParser {
1084 public:
1085 	static TraceFilterParser* Default()
1086 	{
1087 		return &sParser;
1088 	}
1089 
1090 	bool Parse(int argc, const char* const* argv)
1091 	{
1092 		fTokens = argv;
1093 		fTokenCount = argc;
1094 		fTokenIndex = 0;
1095 		fFilterCount = 0;
1096 
1097 		TraceFilter* filter = _ParseExpression();
1098 		return fTokenIndex == fTokenCount && filter != NULL;
1099 	}
1100 
1101 	TraceFilter* Filter()
1102 	{
1103 		return &fFilters[0];
1104 	}
1105 
1106 private:
1107 	TraceFilter* _ParseExpression()
1108 	{
1109 		const char* token = _NextToken();
1110 		if (!token) {
1111 			// unexpected end of expression
1112 			return NULL;
1113 		}
1114 
1115 		if (fFilterCount == MAX_FILTERS) {
1116 			// too many filters
1117 			return NULL;
1118 		}
1119 
1120 		if (token[0] == '#') {
1121 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1122 				PatternTraceFilter;
1123 			filter->fString = token + 1;
1124 			return filter;
1125 		} else if (token[0] == 'd' && token[1] == '#') {
1126 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1127 				DecimalPatternTraceFilter;
1128 			filter->fValue = parse_expression(token + 2);
1129 			return filter;
1130 		} else if (token[0] == 'x' && token[1] == '#') {
1131 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1132 				HexPatternTraceFilter;
1133 			filter->fValue = parse_expression(token + 2);
1134 			return filter;
1135 		} else if (token[0] == 's' && token[1] == '#') {
1136 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1137 				StringPatternTraceFilter;
1138 			filter->fValue = parse_expression(token + 2);
1139 			return filter;
1140 		} else if (strcmp(token, "not") == 0) {
1141 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1142 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1143 				return filter;
1144 			return NULL;
1145 		} else if (strcmp(token, "and") == 0) {
1146 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1147 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1148 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1149 				return filter;
1150 			}
1151 			return NULL;
1152 		} else if (strcmp(token, "or") == 0) {
1153 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1154 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1155 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1156 				return filter;
1157 			}
1158 			return NULL;
1159 		} else if (strcmp(token, "thread") == 0) {
1160 			const char* arg = _NextToken();
1161 			if (arg == NULL) {
1162 				// unexpected end of expression
1163 				return NULL;
1164 			}
1165 
1166 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1167 				ThreadTraceFilter;
1168 			filter->fThread = strtol(arg, NULL, 0);
1169 			return filter;
1170 		} else if (strcmp(token, "team") == 0) {
1171 			const char* arg = _NextToken();
1172 			if (arg == NULL) {
1173 				// unexpected end of expression
1174 				return NULL;
1175 			}
1176 
1177 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1178 				TeamTraceFilter;
1179 			filter->fTeam = strtol(arg, NULL, 0);
1180 			return filter;
1181 		} else {
1182 			// invalid token
1183 			return NULL;
1184 		}
1185 	}
1186 
1187 	const char* _CurrentToken() const
1188 	{
1189 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1190 			return fTokens[fTokenIndex - 1];
1191 		return NULL;
1192 	}
1193 
1194 	const char* _NextToken()
1195 	{
1196 		if (fTokenIndex >= fTokenCount)
1197 			return NULL;
1198 		return fTokens[fTokenIndex++];
1199 	}
1200 
1201 private:
1202 	enum { MAX_FILTERS = 32 };
1203 
1204 	const char* const*			fTokens;
1205 	int							fTokenCount;
1206 	int							fTokenIndex;
1207 	TraceFilter					fFilters[MAX_FILTERS];
1208 	int							fFilterCount;
1209 
1210 	static TraceFilterParser	sParser;
1211 };
1212 
1213 
1214 TraceFilterParser TraceFilterParser::sParser;
1215 
1216 
1217 //	#pragma mark -
1218 
1219 
1220 #if ENABLE_TRACING
1221 
1222 
1223 TraceEntry*
1224 TraceEntryIterator::Next()
1225 {
1226 	if (fIndex == 0) {
1227 		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1228 		fIndex = 1;
1229 	} else if (fEntry != NULL) {
1230 		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1231 		fIndex++;
1232 	}
1233 
1234 	return Current();
1235 }
1236 
1237 
1238 TraceEntry*
1239 TraceEntryIterator::Previous()
1240 {
1241 	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1242 		fEntry = sTracingMetaData->AfterLastEntry();
1243 
1244 	if (fEntry != NULL) {
1245 		fEntry = _PreviousNonBufferEntry(
1246 			sTracingMetaData->PreviousEntry(fEntry));
1247 		fIndex--;
1248 	}
1249 
1250 	return Current();
1251 }
1252 
1253 
1254 TraceEntry*
1255 TraceEntryIterator::MoveTo(int32 index)
1256 {
1257 	if (index == fIndex)
1258 		return Current();
1259 
1260 	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1261 		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1262 		fEntry = NULL;
1263 		return NULL;
1264 	}
1265 
1266 	// get the shortest iteration path
1267 	int32 distance = index - fIndex;
1268 	int32 direction = distance < 0 ? -1 : 1;
1269 	distance *= direction;
1270 
1271 	if (index < distance) {
1272 		distance = index;
1273 		direction = 1;
1274 		fEntry = NULL;
1275 		fIndex = 0;
1276 	}
1277 	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1278 		distance = sTracingMetaData->Entries() + 1 - fIndex;
1279 		direction = -1;
1280 		fEntry = NULL;
1281 		fIndex = sTracingMetaData->Entries() + 1;
1282 	}
1283 
1284 	// iterate to the index
1285 	if (direction < 0) {
1286 		while (fIndex != index)
1287 			Previous();
1288 	} else {
1289 		while (fIndex != index)
1290 			Next();
1291 	}
1292 
1293 	return Current();
1294 }
1295 
1296 
1297 trace_entry*
1298 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1299 {
1300 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1301 		entry = sTracingMetaData->NextEntry(entry);
1302 
1303 	return entry;
1304 }
1305 
1306 
1307 trace_entry*
1308 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1309 {
1310 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1311 		entry = sTracingMetaData->PreviousEntry(entry);
1312 
1313 	return entry;
1314 }
1315 
1316 
1317 int
1318 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1319 {
1320 	int argi = 1;
1321 
1322 	// variables in which we store our state to be continuable
1323 	static int32 _previousCount = 0;
1324 	static bool _previousHasFilter = false;
1325 	static bool _previousPrintStackTrace = false;
1326 	static int32 _previousMaxToCheck = 0;
1327 	static int32 _previousFirstChecked = 1;
1328 	static int32 _previousLastChecked = -1;
1329 	static int32 _previousDirection = 1;
1330 	static uint32 _previousEntriesEver = 0;
1331 	static uint32 _previousEntries = 0;
1332 	static uint32 _previousOutputFlags = 0;
1333 	static TraceEntryIterator iterator;
1334 
1335 	uint32 entriesEver = sTracingMetaData->EntriesEver();
1336 
1337 	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1338 	int32 start = 0;	// special index: print the last count entries
1339 	int32 count = 0;
1340 	int32 maxToCheck = 0;
1341 	int32 cont = 0;
1342 
1343 	bool hasFilter = false;
1344 	bool printStackTrace = false;
1345 
1346 	uint32 outputFlags = 0;
1347 	while (argi < argc) {
1348 		if (strcmp(argv[argi], "--difftime") == 0) {
1349 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1350 			argi++;
1351 		} else if (strcmp(argv[argi], "--printteam") == 0) {
1352 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1353 			argi++;
1354 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1355 			printStackTrace = true;
1356 			argi++;
1357 		} else
1358 			break;
1359 	}
1360 
1361 	if (argi < argc) {
1362 		if (strcmp(argv[argi], "forward") == 0) {
1363 			cont = 1;
1364 			argi++;
1365 		} else if (strcmp(argv[argi], "backward") == 0) {
1366 			cont = -1;
1367 			argi++;
1368 		}
1369 	} else
1370 		cont = _previousDirection;
1371 
1372 	if (cont != 0) {
1373 		if (argi < argc) {
1374 			print_debugger_command_usage(argv[0]);
1375 			return 0;
1376 		}
1377 		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1378 			|| sTracingMetaData->Entries() != _previousEntries) {
1379 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1380 				"before, or there were new entries written since the last "
1381 				"invocation.\n", argv[0]);
1382 			return 0;
1383 		}
1384 	}
1385 
1386 	// get start, count, maxToCheck
1387 	int32* params[3] = { &start, &count, &maxToCheck };
1388 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1389 		if (strcmp(argv[argi], "filter") == 0) {
1390 			hasFilter = true;
1391 			argi++;
1392 		} else if (argv[argi][0] == '#') {
1393 			hasFilter = true;
1394 		} else {
1395 			*params[i] = parse_expression(argv[argi]);
1396 			argi++;
1397 		}
1398 	}
1399 
1400 	// filter specification
1401 	if (argi < argc) {
1402 		hasFilter = true;
1403 		if (strcmp(argv[argi], "filter") == 0)
1404 			argi++;
1405 
1406 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1407 			print_debugger_command_usage(argv[0]);
1408 			return 0;
1409 		}
1410 	}
1411 
1412 	int32 direction;
1413 	int32 firstToCheck;
1414 	int32 lastToCheck;
1415 
1416 	if (cont != 0) {
1417 		// get values from the previous iteration
1418 		direction = cont;
1419 		count = _previousCount;
1420 		maxToCheck = _previousMaxToCheck;
1421 		hasFilter = _previousHasFilter;
1422 		outputFlags = _previousOutputFlags;
1423 		printStackTrace = _previousPrintStackTrace;
1424 
1425 		if (direction < 0)
1426 			start = _previousFirstChecked - 1;
1427 		else
1428 			start = _previousLastChecked + 1;
1429 	} else {
1430 		// defaults for count and maxToCheck
1431 		if (count == 0)
1432 			count = 30;
1433 		if (maxToCheck == 0 || !hasFilter)
1434 			maxToCheck = count;
1435 		else if (maxToCheck < 0)
1436 			maxToCheck = sTracingMetaData->Entries();
1437 
1438 		// determine iteration direction
1439 		direction = (start <= 0 || count < 0 ? -1 : 1);
1440 
1441 		// validate count and maxToCheck
1442 		if (count < 0)
1443 			count = -count;
1444 		if (maxToCheck < 0)
1445 			maxToCheck = -maxToCheck;
1446 		if (maxToCheck > (int32)sTracingMetaData->Entries())
1447 			maxToCheck = sTracingMetaData->Entries();
1448 		if (count > maxToCheck)
1449 			count = maxToCheck;
1450 
1451 		// validate start
1452 		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1453 			start = max_c(1, sTracingMetaData->Entries());
1454 	}
1455 
1456 	if (direction < 0) {
1457 		firstToCheck = max_c(1, start - maxToCheck + 1);
1458 		lastToCheck = start;
1459 	} else {
1460 		firstToCheck = start;
1461 		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1462 			start + maxToCheck - 1);
1463 	}
1464 
1465 	// reset the iterator, if something changed in the meantime
1466 	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1467 		|| sTracingMetaData->Entries() != _previousEntries) {
1468 		iterator.Reset();
1469 	}
1470 
1471 	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1472 		kTraceOutputBufferSize, outputFlags);
1473 
1474 	bool markedMatching = false;
1475 	int32 firstToDump = firstToCheck;
1476 	int32 lastToDump = lastToCheck;
1477 
1478 	TraceFilter* filter = NULL;
1479 	if (hasFilter)
1480 		filter = TraceFilterParser::Default()->Filter();
1481 
1482 	if (wrapperFilter != NULL) {
1483 		wrapperFilter->Init(filter, direction, cont != 0);
1484 		filter = wrapperFilter;
1485 	}
1486 
1487 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1488 		// iteration direction is backwards
1489 		markedMatching = true;
1490 
1491 		// From the last entry to check iterate backwards to check filter
1492 		// matches.
1493 		int32 matching = 0;
1494 
1495 		// move to the entry after the last entry to check
1496 		iterator.MoveTo(lastToCheck + 1);
1497 
1498 		// iterate backwards
1499 		firstToDump = -1;
1500 		lastToDump = -1;
1501 		while (iterator.Index() > firstToCheck) {
1502 			TraceEntry* entry = iterator.Previous();
1503 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1504 				out.Clear();
1505 				if (filter->Filter(entry, out)) {
1506 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1507 					if (lastToDump == -1)
1508 						lastToDump = iterator.Index();
1509 					firstToDump = iterator.Index();
1510 
1511 					matching++;
1512 					if (matching >= count)
1513 						break;
1514 				} else
1515 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1516 			}
1517 		}
1518 
1519 		firstToCheck = iterator.Index();
1520 
1521 		// iterate to the previous entry, so that the next loop starts at the
1522 		// right one
1523 		iterator.Previous();
1524 	}
1525 
1526 	out.SetLastEntryTime(0);
1527 
1528 	// set the iterator to the entry before the first one to dump
1529 	iterator.MoveTo(firstToDump - 1);
1530 
1531 	// dump the entries matching the filter in the range
1532 	// [firstToDump, lastToDump]
1533 	int32 dumped = 0;
1534 
1535 	while (TraceEntry* entry = iterator.Next()) {
1536 		int32 index = iterator.Index();
1537 		if (index < firstToDump)
1538 			continue;
1539 		if (index > lastToDump || dumped >= count) {
1540 			if (direction > 0)
1541 				lastToCheck = index - 1;
1542 			break;
1543 		}
1544 
1545 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1546 			out.Clear();
1547 			if (filter &&  (markedMatching
1548 					? (entry->Flags() & FILTER_MATCH) == 0
1549 					: !filter->Filter(entry, out))) {
1550 				continue;
1551 			}
1552 
1553 			// don't print trailing new line
1554 			const char* dump = out.DumpEntry(entry);
1555 			int len = strlen(dump);
1556 			if (len > 0 && dump[len - 1] == '\n')
1557 				len--;
1558 
1559 			kprintf("%5" B_PRId32 ". %.*s\n", index, len, dump);
1560 
1561 			if (printStackTrace) {
1562 				out.Clear();
1563 				entry->DumpStackTrace(out);
1564 				if (out.Size() > 0)
1565 					kputs(out.Buffer());
1566 			}
1567 		} else if (!filter)
1568 			kprintf("%5" B_PRId32 ". ** uninitialized entry **\n", index);
1569 
1570 		dumped++;
1571 	}
1572 
1573 	kprintf("printed %" B_PRId32 " entries within range %" B_PRId32 " to %"
1574 		B_PRId32 " (%" B_PRId32 " of %" B_PRId32 " total, %" B_PRId32 " ever)\n",
1575 		dumped, firstToCheck, lastToCheck, lastToCheck - firstToCheck + 1,
1576 		sTracingMetaData->Entries(), entriesEver);
1577 
1578 	// store iteration state
1579 	_previousCount = count;
1580 	_previousMaxToCheck = maxToCheck;
1581 	_previousHasFilter = hasFilter;
1582 	_previousPrintStackTrace = printStackTrace;
1583 	_previousFirstChecked = firstToCheck;
1584 	_previousLastChecked = lastToCheck;
1585 	_previousDirection = direction;
1586 	_previousEntriesEver = entriesEver;
1587 	_previousEntries = sTracingMetaData->Entries();
1588 	_previousOutputFlags = outputFlags;
1589 
1590 	return cont != 0 ? B_KDEBUG_CONT : 0;
1591 }
1592 
1593 
1594 static int
1595 dump_tracing_command(int argc, char** argv)
1596 {
1597 	return dump_tracing_internal(argc, argv, NULL);
1598 }
1599 
1600 
1601 #endif	// ENABLE_TRACING
1602 
1603 
1604 extern "C" uint8*
1605 alloc_tracing_buffer(size_t size)
1606 {
1607 #if	ENABLE_TRACING
1608 	trace_entry* entry = sTracingMetaData->AllocateEntry(
1609 		size + sizeof(trace_entry), BUFFER_ENTRY);
1610 	if (entry == NULL)
1611 		return NULL;
1612 
1613 	return (uint8*)(entry + 1);
1614 #else
1615 	return NULL;
1616 #endif
1617 }
1618 
1619 
1620 uint8*
1621 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1622 {
1623 	if (user && !IS_USER_ADDRESS(source))
1624 		return NULL;
1625 
1626 	uint8* buffer = alloc_tracing_buffer(size);
1627 	if (buffer == NULL)
1628 		return NULL;
1629 
1630 	if (user) {
1631 		if (user_memcpy(buffer, source, size) != B_OK)
1632 			return NULL;
1633 	} else
1634 		memcpy(buffer, source, size);
1635 
1636 	return buffer;
1637 }
1638 
1639 
1640 char*
1641 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1642 {
1643 	if (source == NULL || maxSize == 0)
1644 		return NULL;
1645 
1646 	if (user && !IS_USER_ADDRESS(source))
1647 		return NULL;
1648 
1649 	// limit maxSize to the actual source string len
1650 	if (user) {
1651 		ssize_t size = user_strlcpy(NULL, source, 0);
1652 			// there's no user_strnlen()
1653 		if (size < 0)
1654 			return 0;
1655 		maxSize = min_c(maxSize, (size_t)size + 1);
1656 	} else
1657 		maxSize = strnlen(source, maxSize - 1) + 1;
1658 
1659 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1660 	if (buffer == NULL)
1661 		return NULL;
1662 
1663 	if (user) {
1664 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1665 			return NULL;
1666 	} else
1667 		strlcpy(buffer, source, maxSize);
1668 
1669 	return buffer;
1670 }
1671 
1672 
1673 tracing_stack_trace*
1674 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1675 {
1676 #if	ENABLE_TRACING
1677 	// page_fault_exception() doesn't allow us to gracefully handle a bad
1678 	// address in the stack trace, if interrupts are disabled, so we always
1679 	// restrict the stack traces to the kernel only in this case. A bad address
1680 	// in the kernel stack trace would still cause a panic(), but this is
1681 	// probably even desired.
1682 	if (!are_interrupts_enabled())
1683 		kernelOnly = true;
1684 
1685 	tracing_stack_trace* stackTrace
1686 		= (tracing_stack_trace*)alloc_tracing_buffer(
1687 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1688 
1689 	if (stackTrace != NULL) {
1690 		stackTrace->depth = arch_debug_get_stack_trace(
1691 			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1692 			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1693 	}
1694 
1695 	return stackTrace;
1696 #else
1697 	return NULL;
1698 #endif
1699 }
1700 
1701 
1702 addr_t
1703 tracing_find_caller_in_stack_trace(struct tracing_stack_trace* stackTrace,
1704 	const addr_t excludeRanges[], uint32 excludeRangeCount)
1705 {
1706 	for (int32 i = 0; i < stackTrace->depth; i++) {
1707 		addr_t returnAddress = stackTrace->return_addresses[i];
1708 
1709 		bool inRange = false;
1710 		for (uint32 j = 0; j < excludeRangeCount; j++) {
1711 			if (returnAddress >= excludeRanges[j * 2 + 0]
1712 				&& returnAddress < excludeRanges[j * 2 + 1]) {
1713 				inRange = true;
1714 				break;
1715 			}
1716 		}
1717 
1718 		if (!inRange)
1719 			return returnAddress;
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 
1726 void
1727 tracing_print_stack_trace(struct tracing_stack_trace* stackTrace)
1728 {
1729 #if ENABLE_TRACING
1730 	print_stack_trace(stackTrace, kprintf);
1731 #endif
1732 }
1733 
1734 
1735 int
1736 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1737 {
1738 #if	ENABLE_TRACING
1739 	return dump_tracing_internal(argc, argv, wrapperFilter);
1740 #else
1741 	return 0;
1742 #endif
1743 }
1744 
1745 
1746 bool
1747 tracing_is_entry_valid(AbstractTraceEntry* candidate, bigtime_t entryTime)
1748 {
1749 #if ENABLE_TRACING
1750 	if (!sTracingMetaData->IsInBuffer(candidate, sizeof(*candidate)))
1751 		return false;
1752 
1753 	if (entryTime < 0)
1754 		return true;
1755 
1756 	TraceEntryIterator iterator;
1757 	while (TraceEntry* entry = iterator.Next()) {
1758 		AbstractTraceEntry* abstract = dynamic_cast<AbstractTraceEntry*>(entry);
1759 		if (abstract == NULL)
1760 			continue;
1761 
1762 		if (abstract != candidate && abstract->Time() > entryTime)
1763 			return false;
1764 
1765 		return candidate->Time() == entryTime;
1766 	}
1767 #endif
1768 
1769 	return false;
1770 }
1771 
1772 
1773 void
1774 lock_tracing_buffer()
1775 {
1776 #if ENABLE_TRACING
1777 	sTracingMetaData->Lock();
1778 #endif
1779 }
1780 
1781 
1782 void
1783 unlock_tracing_buffer()
1784 {
1785 #if ENABLE_TRACING
1786 	sTracingMetaData->Unlock();
1787 #endif
1788 }
1789 
1790 
1791 extern "C" status_t
1792 tracing_init(void)
1793 {
1794 #if	ENABLE_TRACING
1795 	status_t result = TracingMetaData::Create(sTracingMetaData);
1796 	if (result != B_OK) {
1797 		memset(&sFallbackTracingMetaData, 0, sizeof(sFallbackTracingMetaData));
1798 		sTracingMetaData = &sFallbackTracingMetaData;
1799 		return result;
1800 	}
1801 
1802 	new(nothrow) TracingLogStartEntry();
1803 
1804 	add_debugger_command_etc("traced", &dump_tracing_command,
1805 		"Dump recorded trace entries",
1806 		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1807 			"(\"forward\" | \"backward\") "
1808 			"| ([ <start> [ <count> [ <range> ] ] ] "
1809 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1810 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1811 		"specified, the command continues where the previous invocation left\n"
1812 		"off, i.e. printing the previous respectively next entries (as many\n"
1813 		"as printed before). In this case the command is continuable, that is\n"
1814 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1815 		"If no arguments are given, the command continues in the direction\n"
1816 		"of the last invocation.\n"
1817 		"--printteam  - enables printing the entries' team IDs.\n"
1818 		"--difftime   - print difference times for all but the first entry.\n"
1819 		"--stacktrace - print stack traces for entries that captured one.\n"
1820 		"  <start>    - The base index of the entries to print. Depending on\n"
1821 		"               whether the iteration direction is forward or\n"
1822 		"               backward this will be the first or last entry printed\n"
1823 		"               (potentially, if a filter is specified). The index of\n"
1824 		"               the first entry in the trace buffer is 1. If 0 is\n"
1825 		"               specified, the last <count> recorded entries are\n"
1826 		"               printed (iteration direction is backward). Defaults \n"
1827 		"               to 0.\n"
1828 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1829 		"               If negative, the -<count> entries before and\n"
1830 		"               including <start> will be printed.\n"
1831 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1832 		"               number of entries to be filtered -- depending on the\n"
1833 		"               iteration direction the entries before or after\n"
1834 		"               <start>. If more than <count> entries match the\n"
1835 		"               filter, only the first (forward) or last (backward)\n"
1836 		"               <count> matching entries will be printed. If 0 is\n"
1837 		"               specified <range> will be set to <count>. If -1,\n"
1838 		"               <range> will be set to the number of recorded\n"
1839 		"               entries.\n"
1840 		"  <pattern>  - If specified only entries containing this string are\n"
1841 		"               printed.\n"
1842 		"  <filter>   - If specified only entries matching this filter\n"
1843 		"               expression are printed. The expression can consist of\n"
1844 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1845 		"               filters \"'thread' <thread>\" (matching entries\n"
1846 		"               with the given thread ID), \"'team' <team>\"\n"
1847 						"(matching entries with the given team ID), and\n"
1848 		"               \"#<pattern>\" (matching entries containing the given\n"
1849 		"               string).\n", 0);
1850 #endif	// ENABLE_TRACING
1851 	return B_OK;
1852 }
1853 
1854 
1855 void
1856 ktrace_printf(const char *format, ...)
1857 {
1858 #if	ENABLE_TRACING
1859 	va_list list;
1860 	va_start(list, format);
1861 
1862 	char buffer[256];
1863 	vsnprintf(buffer, sizeof(buffer), format, list);
1864 
1865 	va_end(list);
1866 
1867 	new(nothrow) KernelTraceEntry(buffer);
1868 #endif	// ENABLE_TRACING
1869 }
1870 
1871 
1872 void
1873 _user_ktrace_output(const char *message)
1874 {
1875 #if	ENABLE_TRACING
1876 	new(nothrow) UserTraceEntry(message);
1877 #endif	// ENABLE_TRACING
1878 }
1879 
1880