xref: /haiku/src/system/kernel/debug/tracing.cpp (revision 1e60bdeab63fa7a57bc9a55b032052e95a18bd2c)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2012, Rene Gollent, rene@gollent.com.
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include <tracing.h>
10 
11 #include <stdlib.h>
12 
13 #include <algorithm>
14 
15 #include <arch/debug.h>
16 #include <debug.h>
17 #include <debug_heap.h>
18 #include <elf.h>
19 #include <int.h>
20 #include <kernel.h>
21 #include <team.h>
22 #include <thread.h>
23 #include <util/AutoLock.h>
24 #include <vm/vm.h>
25 
26 
27 #if ENABLE_TRACING
28 
29 //#define TRACE_TRACING
30 #ifdef TRACE_TRACING
31 #	define TRACE(x) dprintf_no_syslog x
32 #else
33 #	define TRACE(x) ;
34 #endif
35 
36 
37 enum {
38 	WRAP_ENTRY			= 0x01,
39 	ENTRY_INITIALIZED	= 0x02,
40 	BUFFER_ENTRY		= 0x04,
41 	FILTER_MATCH		= 0x08,
42 	INVALID_ENTRY		= 0x10,
43 	CHECK_ENTRY			= 0x20,
44 };
45 
46 
47 static const size_t kTraceOutputBufferSize = 10240;
48 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
49 
50 static const uint32 kMaxRecoveringErrorCount	= 100;
51 static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
52 static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
53 static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
54 static const uint32 kMetaDataMagic1 = 'Vali';
55 static const uint32 kMetaDataMagic2 = 'dTra';
56 static const uint32 kMetaDataMagic3 = 'cing';
57 
58 // the maximum we can address with the trace_entry::[previous_]size fields
59 static const size_t kMaxTracingEntryByteSize
60 	= ((1 << 13) - 1) * sizeof(trace_entry);
61 
62 
63 struct TraceOutputPrint {
64 	TraceOutputPrint(TraceOutput& output)
65 		:
66 		fOutput(output)
67 	{
68 	}
69 
70 	void operator()(const char* format,...) const
71 	{
72 		va_list args;
73 		va_start(args, format);
74 		fOutput.PrintArgs(format, args);
75 		va_end(args);
76 	}
77 
78 private:
79 	TraceOutput&	fOutput;
80 };
81 
82 
83 class TracingMetaData {
84 public:
85 	static	status_t			Create(TracingMetaData*& _metaData);
86 
87 	inline	bool				Lock();
88 	inline	void				Unlock();
89 
90 	inline	trace_entry*		FirstEntry() const;
91 	inline	trace_entry*		AfterLastEntry() const;
92 
93 	inline	uint32				Entries() const;
94 	inline	uint32				EntriesEver() const;
95 
96 	inline	void				IncrementEntriesEver();
97 
98 	inline	char*				TraceOutputBuffer() const;
99 
100 			trace_entry*		NextEntry(trace_entry* entry);
101 			trace_entry*		PreviousEntry(trace_entry* entry);
102 
103 			trace_entry*		AllocateEntry(size_t size, uint16 flags);
104 
105 			bool				IsInBuffer(void* address, size_t size);
106 
107 private:
108 			bool				_FreeFirstEntry();
109 			bool				_MakeSpace(size_t needed);
110 
111 	static	status_t			_CreateMetaDataArea(bool findPrevious,
112 									area_id& _area,
113 									TracingMetaData*& _metaData);
114 			bool				_InitPreviousTracingData();
115 
116 private:
117 			uint32				fMagic1;
118 			trace_entry*		fBuffer;
119 			trace_entry*		fFirstEntry;
120 			trace_entry*		fAfterLastEntry;
121 			uint32				fEntries;
122 			uint32				fMagic2;
123 			uint32				fEntriesEver;
124 			spinlock			fLock;
125 			char*				fTraceOutputBuffer;
126 			phys_addr_t			fPhysicalAddress;
127 			uint32				fMagic3;
128 };
129 
130 static TracingMetaData sFallbackTracingMetaData;
131 static TracingMetaData* sTracingMetaData = &sFallbackTracingMetaData;
132 static bool sTracingDataRecovered = false;
133 
134 
135 // #pragma mark -
136 
137 
138 template<typename Print>
139 static void
140 print_stack_trace(struct tracing_stack_trace* stackTrace,
141 	const Print& print)
142 {
143 	if (stackTrace == NULL || stackTrace->depth <= 0)
144 		return;
145 
146 	static const size_t kBufferSize = 256;
147 	char* buffer = (char*)debug_malloc(kBufferSize);
148 
149 	for (int32 i = 0; i < stackTrace->depth; i++) {
150 		addr_t address = stackTrace->return_addresses[i];
151 
152 		const char* symbol;
153 		const char* demangledName = NULL;
154 		const char* imageName;
155 		bool exactMatch;
156 		addr_t baseAddress;
157 
158 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
159 				&imageName, &exactMatch) == B_OK) {
160 
161 			if (buffer != NULL) {
162 				bool isObjectMethod;
163 				demangledName = debug_demangle_symbol(symbol, buffer,
164 					kBufferSize, &isObjectMethod);
165 			}
166 
167 			print("  %p  %s + 0x%lx (%s)%s\n", (void*)address,
168 				demangledName != NULL ? demangledName : symbol,
169 				address - baseAddress, imageName,
170 				exactMatch ? "" : " (nearest)");
171 		} else
172 			print("  %p\n", (void*)address);
173 	}
174 
175 	if (buffer != NULL)
176 		debug_free(buffer);
177 }
178 
179 
180 // #pragma mark - TracingMetaData
181 
182 
183 bool
184 TracingMetaData::Lock()
185 {
186 	acquire_spinlock(&fLock);
187 	return true;
188 }
189 
190 
191 void
192 TracingMetaData::Unlock()
193 {
194 	release_spinlock(&fLock);
195 }
196 
197 
198 trace_entry*
199 TracingMetaData::FirstEntry() const
200 {
201 	return fFirstEntry;
202 }
203 
204 
205 trace_entry*
206 TracingMetaData::AfterLastEntry() const
207 {
208 	return fAfterLastEntry;
209 }
210 
211 
212 uint32
213 TracingMetaData::Entries() const
214 {
215 	return fEntries;
216 }
217 
218 
219 uint32
220 TracingMetaData::EntriesEver() const
221 {
222 	return fEntriesEver;
223 }
224 
225 
226 void
227 TracingMetaData::IncrementEntriesEver()
228 {
229 	fEntriesEver++;
230 		// NOTE: Race condition on SMP machines! We should use atomic_add(),
231 		// though that costs some performance and the information is for
232 		// informational purpose anyway.
233 }
234 
235 
236 char*
237 TracingMetaData::TraceOutputBuffer() const
238 {
239 	return fTraceOutputBuffer;
240 }
241 
242 
243 trace_entry*
244 TracingMetaData::NextEntry(trace_entry* entry)
245 {
246 	entry += entry->size;
247 	if ((entry->flags & WRAP_ENTRY) != 0)
248 		entry = fBuffer;
249 
250 	if (entry == fAfterLastEntry)
251 		return NULL;
252 
253 	return entry;
254 }
255 
256 
257 trace_entry*
258 TracingMetaData::PreviousEntry(trace_entry* entry)
259 {
260 	if (entry == fFirstEntry)
261 		return NULL;
262 
263 	if (entry == fBuffer) {
264 		// beginning of buffer -- previous entry is a wrap entry
265 		entry = fBuffer + kBufferSize - entry->previous_size;
266 	}
267 
268 	return entry - entry->previous_size;
269 }
270 
271 
272 trace_entry*
273 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
274 {
275 	if (fAfterLastEntry == NULL || size == 0
276 		|| size >= kMaxTracingEntryByteSize) {
277 		return NULL;
278 	}
279 
280 	InterruptsSpinLocker _(fLock);
281 
282 	size = (size + 3) >> 2;
283 		// 4 byte aligned, don't store the lower 2 bits
284 
285 	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
286 		fFirstEntry, fAfterLastEntry, fBuffer));
287 
288 	if (!_MakeSpace(size))
289 		return NULL;
290 
291 	trace_entry* entry = fAfterLastEntry;
292 	entry->size = size;
293 	entry->flags = flags;
294 	fAfterLastEntry += size;
295 	fAfterLastEntry->previous_size = size;
296 
297 	if (!(flags & BUFFER_ENTRY))
298 		fEntries++;
299 
300 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
301 		fAfterLastEntry, fFirstEntry, fEntries));
302 
303 	return entry;
304 }
305 
306 
307 bool
308 TracingMetaData::IsInBuffer(void* address, size_t size)
309 {
310 	if (fEntries == 0)
311 		return false;
312 
313 	addr_t start = (addr_t)address;
314 	addr_t end = start + size;
315 
316 	if (start < (addr_t)fBuffer || end > (addr_t)(fBuffer + kBufferSize))
317 		return false;
318 
319 	if (fFirstEntry > fAfterLastEntry)
320 		return start >= (addr_t)fFirstEntry || end <= (addr_t)fAfterLastEntry;
321 
322 	return start >= (addr_t)fFirstEntry && end <= (addr_t)fAfterLastEntry;
323 }
324 
325 
326 bool
327 TracingMetaData::_FreeFirstEntry()
328 {
329 	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
330 
331 	trace_entry* newFirst = NextEntry(fFirstEntry);
332 
333 	if (fFirstEntry->flags & BUFFER_ENTRY) {
334 		// a buffer entry -- just skip it
335 	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
336 		// Fully initialized TraceEntry: We could destroy it, but don't do so
337 		// for sake of robustness. The destructors of tracing entry classes
338 		// should be empty anyway.
339 		fEntries--;
340 	} else {
341 		// Not fully initialized TraceEntry. We can't free it, since
342 		// then it's constructor might still write into the memory and
343 		// overwrite data of the entry we're going to allocate.
344 		// We can't do anything until this entry can be discarded.
345 		return false;
346 	}
347 
348 	if (newFirst == NULL) {
349 		// everything is freed -- practically this can't happen, if
350 		// the buffer is large enough to hold three max-sized entries
351 		fFirstEntry = fAfterLastEntry = fBuffer;
352 		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
353 	} else
354 		fFirstEntry = newFirst;
355 
356 	return true;
357 }
358 
359 
360 /*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
361 	Returns \c false, if unable to free that much.
362 */
363 bool
364 TracingMetaData::_MakeSpace(size_t needed)
365 {
366 	// we need space for fAfterLastEntry, too (in case we need to wrap around
367 	// later)
368 	needed++;
369 
370 	// If there's not enough space (free or occupied) after fAfterLastEntry,
371 	// we free all entries in that region and wrap around.
372 	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
373 		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
374 			fAfterLastEntry));
375 
376 		// Free all entries after fAfterLastEntry and one more at the beginning
377 		// of the buffer.
378 		while (fFirstEntry > fAfterLastEntry) {
379 			if (!_FreeFirstEntry())
380 				return false;
381 		}
382 		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
383 			return false;
384 
385 		// just in case _FreeFirstEntry() freed the very last existing entry
386 		if (fAfterLastEntry == fBuffer)
387 			return true;
388 
389 		// mark as wrap entry and actually wrap around
390 		trace_entry* wrapEntry = fAfterLastEntry;
391 		wrapEntry->size = 0;
392 		wrapEntry->flags = WRAP_ENTRY;
393 		fAfterLastEntry = fBuffer;
394 		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
395 	}
396 
397 	if (fFirstEntry <= fAfterLastEntry) {
398 		// buffer is empty or the space after fAfterLastEntry is unoccupied
399 		return true;
400 	}
401 
402 	// free the first entries, until there's enough space
403 	size_t space = fFirstEntry - fAfterLastEntry;
404 
405 	if (space < needed) {
406 		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
407 	}
408 
409 	while (space < needed) {
410 		space += fFirstEntry->size;
411 
412 		if (!_FreeFirstEntry())
413 			return false;
414 	}
415 
416 	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
417 
418 	return true;
419 }
420 
421 
422 /*static*/ status_t
423 TracingMetaData::Create(TracingMetaData*& _metaData)
424 {
425 	// search meta data in memory (from previous session)
426 	area_id area;
427 	TracingMetaData* metaData;
428 	status_t error = _CreateMetaDataArea(true, area, metaData);
429 	if (error == B_OK) {
430 		if (metaData->_InitPreviousTracingData()) {
431 			_metaData = metaData;
432 			return B_OK;
433 		}
434 
435 		dprintf("Found previous tracing meta data, but failed to init.\n");
436 
437 		// invalidate the meta data
438 		metaData->fMagic1 = 0;
439 		metaData->fMagic2 = 0;
440 		metaData->fMagic3 = 0;
441 		delete_area(area);
442 	} else
443 		dprintf("No previous tracing meta data found.\n");
444 
445 	// no previous tracing data found -- create new one
446 	error = _CreateMetaDataArea(false, area, metaData);
447 	if (error != B_OK)
448 		return error;
449 
450 	virtual_address_restrictions virtualRestrictions = {};
451 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
452 	physical_address_restrictions physicalRestrictions = {};
453 	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
454 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
455 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
456 		&virtualRestrictions, &physicalRestrictions,
457 		(void**)&metaData->fTraceOutputBuffer);
458 	if (area < 0)
459 		return area;
460 
461 	// get the physical address
462 	physical_entry physicalEntry;
463 	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
464 			&physicalEntry, 1) == B_OK) {
465 		metaData->fPhysicalAddress = physicalEntry.address;
466 	} else {
467 		dprintf("TracingMetaData::Create(): failed to get physical address "
468 			"of tracing buffer\n");
469 		metaData->fPhysicalAddress = 0;
470 	}
471 
472 	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
473 		+ kTraceOutputBufferSize);
474 	metaData->fFirstEntry = metaData->fBuffer;
475 	metaData->fAfterLastEntry = metaData->fBuffer;
476 
477 	metaData->fEntries = 0;
478 	metaData->fEntriesEver = 0;
479 	B_INITIALIZE_SPINLOCK(&metaData->fLock);
480 
481 	metaData->fMagic1 = kMetaDataMagic1;
482 	metaData->fMagic2 = kMetaDataMagic2;
483 	metaData->fMagic3 = kMetaDataMagic3;
484 
485 	_metaData = metaData;
486 	return B_OK;
487 }
488 
489 
490 /*static*/ status_t
491 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
492 	TracingMetaData*& _metaData)
493 {
494 	// search meta data in memory (from previous session)
495 	TracingMetaData* metaData;
496 	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
497 	for (; metaDataAddress <= kMetaDataBaseEndAddress;
498 			metaDataAddress += kMetaDataAddressIncrement) {
499 		virtual_address_restrictions virtualRestrictions = {};
500 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
501 		physical_address_restrictions physicalRestrictions = {};
502 		physicalRestrictions.low_address = metaDataAddress;
503 		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
504 		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
505 			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
506 			CREATE_AREA_DONT_CLEAR, 0, &virtualRestrictions,
507 			&physicalRestrictions, (void**)&metaData);
508 		if (area < 0)
509 			continue;
510 
511 		if (!findPrevious) {
512 			_area = area;
513 			_metaData = metaData;
514 			return B_OK;
515 		}
516 
517 		if (metaData->fMagic1 == kMetaDataMagic1
518 			&& metaData->fMagic2 == kMetaDataMagic2
519 			&& metaData->fMagic3 == kMetaDataMagic3) {
520 			_area = area;
521 			_metaData = metaData;
522 			return B_OK;
523 		}
524 
525 		delete_area(area);
526 	}
527 
528 	if (findPrevious)
529 		return B_ENTRY_NOT_FOUND;
530 
531 	// We could allocate any of the standard locations. Instead of failing
532 	// entirely, we use the static meta data. The tracing buffer won't be
533 	// reattachable in the next session, but at least we can use it in this
534 	// session.
535 	_metaData = &sFallbackTracingMetaData;
536 	return B_OK;
537 }
538 
539 
540 bool
541 TracingMetaData::_InitPreviousTracingData()
542 {
543 	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
544 	// well. The entries should be checked more thoroughly for validity -- e.g.
545 	// the pointers to the entries' vtable pointers could be invalid, which can
546 	// make the "traced" command quite unusable. The validity of the entries
547 	// could be checked in a safe environment (i.e. with a fault handler) with
548 	// typeid() and call of a virtual function.
549 	return false;
550 
551 	addr_t bufferStart
552 		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
553 	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
554 
555 	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
556 		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
557 		|| (addr_t)fFirstEntry < bufferStart
558 		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
559 		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
560 		|| (addr_t)fAfterLastEntry < bufferStart
561 		|| (addr_t)fAfterLastEntry > bufferEnd
562 		|| fPhysicalAddress == 0) {
563 		dprintf("Failed to init tracing meta data: Sanity checks "
564 			"failed.\n");
565 		return false;
566 	}
567 
568 	// re-map the previous tracing buffer
569 	virtual_address_restrictions virtualRestrictions = {};
570 	virtualRestrictions.address = fTraceOutputBuffer;
571 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
572 	physical_address_restrictions physicalRestrictions = {};
573 	physicalRestrictions.low_address = fPhysicalAddress;
574 	physicalRestrictions.high_address = fPhysicalAddress
575 		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
576 	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
577 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
578 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR, 0,
579 		&virtualRestrictions, &physicalRestrictions, NULL);
580 	if (area < 0) {
581 		dprintf("Failed to init tracing meta data: Mapping tracing log "
582 			"buffer failed: %s\n", strerror(area));
583 		return false;
584 	}
585 
586 	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
587 		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
588 
589 	// verify/repair the tracing entry list
590 	uint32 errorCount = 0;
591 	uint32 entryCount = 0;
592 	uint32 nonBufferEntryCount = 0;
593 	uint32 previousEntrySize = 0;
594 	trace_entry* entry = fFirstEntry;
595 	while (errorCount <= kMaxRecoveringErrorCount) {
596 		// check previous entry size
597 		if (entry->previous_size != previousEntrySize) {
598 			if (entry != fFirstEntry) {
599 				dprintf("ktrace recovering: entry %p: fixing previous_size "
600 					"size: %" B_PRIu32 " (should be %" B_PRIu32 ")\n", entry,
601 					entry->previous_size, previousEntrySize);
602 				errorCount++;
603 			}
604 			entry->previous_size = previousEntrySize;
605 		}
606 
607 		if (entry == fAfterLastEntry)
608 			break;
609 
610 		// check size field
611 		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
612 			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
613 				entry);
614 			errorCount++;
615 			fAfterLastEntry = entry;
616 			break;
617 		}
618 
619 		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
620 			dprintf("ktrace recovering: entry %p: size too big: %" B_PRIu32 "\n",
621 				entry, entry->size);
622 			errorCount++;
623 			fAfterLastEntry = entry;
624 			break;
625 		}
626 
627 		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
628 			dprintf("ktrace recovering: entry %p: entry crosses "
629 				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
630 			errorCount++;
631 			fAfterLastEntry = entry;
632 			break;
633 		}
634 
635 		// check for wrap entry
636 		if ((entry->flags & WRAP_ENTRY) != 0) {
637 			if ((uint32)(fBuffer + kBufferSize - entry)
638 					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
639 				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
640 					"buffer location\n", entry);
641 				errorCount++;
642 			}
643 
644 			if (entry->size != 0) {
645 				dprintf("ktrace recovering: entry %p: invalid wrap entry "
646 					"size: %" B_PRIu32 "\n", entry, entry->size);
647 				errorCount++;
648 				entry->size = 0;
649 			}
650 
651 			previousEntrySize = fBuffer + kBufferSize - entry;
652 			entry = fBuffer;
653 			continue;
654 		}
655 
656 		if ((entry->flags & BUFFER_ENTRY) == 0) {
657 			entry->flags |= CHECK_ENTRY;
658 			nonBufferEntryCount++;
659 		}
660 
661 		entryCount++;
662 		previousEntrySize = entry->size;
663 
664 		entry += entry->size;
665 	}
666 
667 	if (errorCount > kMaxRecoveringErrorCount) {
668 		dprintf("ktrace recovering: Too many errors.\n");
669 		fAfterLastEntry = entry;
670 		fAfterLastEntry->previous_size = previousEntrySize;
671 	}
672 
673 	dprintf("ktrace recovering: Recovered %" B_PRIu32 " entries + %" B_PRIu32
674 		" buffer entries from previous session. Expected %" B_PRIu32
675 		" entries.\n", nonBufferEntryCount, entryCount - nonBufferEntryCount,
676 		fEntries);
677 	fEntries = nonBufferEntryCount;
678 
679 	B_INITIALIZE_SPINLOCK(&fLock);
680 
681 	// TODO: Actually check the entries! Do that when first accessing the
682 	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
683 	// true).
684 	sTracingDataRecovered = true;
685 	return true;
686 }
687 
688 
689 #endif	// ENABLE_TRACING
690 
691 
692 // #pragma mark -
693 
694 
695 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
696 	: fBuffer(buffer),
697 	  fCapacity(bufferSize),
698 	  fFlags(flags)
699 {
700 	Clear();
701 }
702 
703 
704 void
705 TraceOutput::Clear()
706 {
707 	if (fCapacity > 0)
708 		fBuffer[0] = '\0';
709 	fSize = 0;
710 }
711 
712 
713 void
714 TraceOutput::PrintArgs(const char* format, va_list args)
715 {
716 #if ENABLE_TRACING
717 	if (IsFull())
718 		return;
719 
720 	size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
721 	fSize += std::min(length, fCapacity - fSize - 1);
722 #endif
723 }
724 
725 
726 void
727 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
728 {
729 #if ENABLE_TRACING
730 	print_stack_trace(stackTrace, TraceOutputPrint(*this));
731 #endif
732 }
733 
734 
735 void
736 TraceOutput::SetLastEntryTime(bigtime_t time)
737 {
738 	fLastEntryTime = time;
739 }
740 
741 
742 bigtime_t
743 TraceOutput::LastEntryTime() const
744 {
745 	return fLastEntryTime;
746 }
747 
748 
749 //	#pragma mark -
750 
751 
752 TraceEntry::TraceEntry()
753 {
754 }
755 
756 
757 TraceEntry::~TraceEntry()
758 {
759 }
760 
761 
762 void
763 TraceEntry::Dump(TraceOutput& out)
764 {
765 #if ENABLE_TRACING
766 	// to be overridden by subclasses
767 	out.Print("ENTRY %p", this);
768 #endif
769 }
770 
771 
772 void
773 TraceEntry::DumpStackTrace(TraceOutput& out)
774 {
775 }
776 
777 
778 void
779 TraceEntry::Initialized()
780 {
781 #if ENABLE_TRACING
782 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
783 	sTracingMetaData->IncrementEntriesEver();
784 #endif
785 }
786 
787 
788 void*
789 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
790 {
791 #if ENABLE_TRACING
792 	trace_entry* entry = sTracingMetaData->AllocateEntry(
793 		size + sizeof(trace_entry), 0);
794 	return entry != NULL ? entry + 1 : NULL;
795 #endif
796 	return NULL;
797 }
798 
799 
800 //	#pragma mark -
801 
802 
803 AbstractTraceEntry::~AbstractTraceEntry()
804 {
805 }
806 
807 
808 void
809 AbstractTraceEntry::Dump(TraceOutput& out)
810 {
811 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
812 		? fTime - out.LastEntryTime()
813 		: fTime;
814 
815 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID) {
816 		out.Print("[%6" B_PRId32 ":%6" B_PRId32 "] %10" B_PRId64 ": ", fThread,
817 			fTeam, time);
818 	} else
819 		out.Print("[%6" B_PRId32 "] %10" B_PRId64 ": ", fThread, time);
820 
821 	AddDump(out);
822 
823 	out.SetLastEntryTime(fTime);
824 }
825 
826 
827 void
828 AbstractTraceEntry::AddDump(TraceOutput& out)
829 {
830 }
831 
832 
833 void
834 AbstractTraceEntry::_Init()
835 {
836 	Thread* thread = thread_get_current_thread();
837 	if (thread != NULL) {
838 		fThread = thread->id;
839 		if (thread->team)
840 			fTeam = thread->team->id;
841 	}
842 	fTime = system_time();
843 }
844 
845 
846 //	#pragma mark - AbstractTraceEntryWithStackTrace
847 
848 
849 
850 AbstractTraceEntryWithStackTrace::AbstractTraceEntryWithStackTrace(
851 	size_t stackTraceDepth, size_t skipFrames, bool kernelOnly)
852 {
853 	fStackTrace = capture_tracing_stack_trace(stackTraceDepth, skipFrames + 1,
854 		kernelOnly);
855 }
856 
857 
858 void
859 AbstractTraceEntryWithStackTrace::DumpStackTrace(TraceOutput& out)
860 {
861 	out.PrintStackTrace(fStackTrace);
862 }
863 
864 
865 //	#pragma mark -
866 
867 
868 #if ENABLE_TRACING
869 
870 class KernelTraceEntry : public AbstractTraceEntry {
871 	public:
872 		KernelTraceEntry(const char* message)
873 		{
874 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
875 
876 #if KTRACE_PRINTF_STACK_TRACE
877 			fStackTrace = capture_tracing_stack_trace(
878 				KTRACE_PRINTF_STACK_TRACE, 1, false);
879 #endif
880 			Initialized();
881 		}
882 
883 		virtual void AddDump(TraceOutput& out)
884 		{
885 			out.Print("kern: %s", fMessage);
886 		}
887 
888 #if KTRACE_PRINTF_STACK_TRACE
889 		virtual void DumpStackTrace(TraceOutput& out)
890 		{
891 			out.PrintStackTrace(fStackTrace);
892 		}
893 #endif
894 
895 	private:
896 		char*	fMessage;
897 #if KTRACE_PRINTF_STACK_TRACE
898 		tracing_stack_trace* fStackTrace;
899 #endif
900 };
901 
902 
903 class UserTraceEntry : public AbstractTraceEntry {
904 	public:
905 		UserTraceEntry(const char* message)
906 		{
907 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
908 
909 #if KTRACE_PRINTF_STACK_TRACE
910 			fStackTrace = capture_tracing_stack_trace(
911 				KTRACE_PRINTF_STACK_TRACE, 1, false);
912 #endif
913 			Initialized();
914 		}
915 
916 		virtual void AddDump(TraceOutput& out)
917 		{
918 			out.Print("user: %s", fMessage);
919 		}
920 
921 #if KTRACE_PRINTF_STACK_TRACE
922 		virtual void DumpStackTrace(TraceOutput& out)
923 		{
924 			out.PrintStackTrace(fStackTrace);
925 		}
926 #endif
927 
928 	private:
929 		char*	fMessage;
930 #if KTRACE_PRINTF_STACK_TRACE
931 		tracing_stack_trace* fStackTrace;
932 #endif
933 };
934 
935 
936 class TracingLogStartEntry : public AbstractTraceEntry {
937 	public:
938 		TracingLogStartEntry()
939 		{
940 			Initialized();
941 		}
942 
943 		virtual void AddDump(TraceOutput& out)
944 		{
945 			out.Print("ktrace start");
946 		}
947 };
948 
949 #endif	// ENABLE_TRACING
950 
951 
952 //	#pragma mark - trace filters
953 
954 
955 TraceFilter::~TraceFilter()
956 {
957 }
958 
959 
960 bool
961 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
962 {
963 	return false;
964 }
965 
966 
967 
968 class ThreadTraceFilter : public TraceFilter {
969 public:
970 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
971 	{
972 		const AbstractTraceEntry* entry
973 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
974 		return (entry != NULL && entry->ThreadID() == fThread);
975 	}
976 };
977 
978 
979 class TeamTraceFilter : public TraceFilter {
980 public:
981 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
982 	{
983 		const AbstractTraceEntry* entry
984 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
985 		return (entry != NULL && entry->TeamID() == fTeam);
986 	}
987 };
988 
989 
990 class PatternTraceFilter : public TraceFilter {
991 public:
992 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
993 	{
994 		return strstr(out.DumpEntry(entry), fString) != NULL;
995 	}
996 };
997 
998 
999 class DecimalPatternTraceFilter : public TraceFilter {
1000 public:
1001 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1002 	{
1003 		// TODO: this is *very* slow
1004 		char buffer[64];
1005 		snprintf(buffer, sizeof(buffer), "%" B_PRId64, fValue);
1006 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1007 	}
1008 };
1009 
1010 class HexPatternTraceFilter : public TraceFilter {
1011 public:
1012 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1013 	{
1014 		// TODO: this is *very* slow
1015 		char buffer[64];
1016 		snprintf(buffer, sizeof(buffer), "%" B_PRIx64, fValue);
1017 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1018 	}
1019 };
1020 
1021 class StringPatternTraceFilter : public TraceFilter {
1022 public:
1023 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1024 	{
1025 		if (IS_KERNEL_ADDRESS(fValue))
1026 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
1027 
1028 		// TODO: this is *very* slow
1029 		char buffer[64];
1030 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
1031 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1032 	}
1033 };
1034 
1035 class NotTraceFilter : public TraceFilter {
1036 public:
1037 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1038 	{
1039 		return !fSubFilters.first->Filter(entry, out);
1040 	}
1041 };
1042 
1043 
1044 class AndTraceFilter : public TraceFilter {
1045 public:
1046 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1047 	{
1048 		return fSubFilters.first->Filter(entry, out)
1049 			&& fSubFilters.second->Filter(entry, out);
1050 	}
1051 };
1052 
1053 
1054 class OrTraceFilter : public TraceFilter {
1055 public:
1056 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1057 	{
1058 		return fSubFilters.first->Filter(entry, out)
1059 			|| fSubFilters.second->Filter(entry, out);
1060 	}
1061 };
1062 
1063 
1064 class TraceFilterParser {
1065 public:
1066 	static TraceFilterParser* Default()
1067 	{
1068 		return &sParser;
1069 	}
1070 
1071 	bool Parse(int argc, const char* const* argv)
1072 	{
1073 		fTokens = argv;
1074 		fTokenCount = argc;
1075 		fTokenIndex = 0;
1076 		fFilterCount = 0;
1077 
1078 		TraceFilter* filter = _ParseExpression();
1079 		return fTokenIndex == fTokenCount && filter != NULL;
1080 	}
1081 
1082 	TraceFilter* Filter()
1083 	{
1084 		return &fFilters[0];
1085 	}
1086 
1087 private:
1088 	TraceFilter* _ParseExpression()
1089 	{
1090 		const char* token = _NextToken();
1091 		if (!token) {
1092 			// unexpected end of expression
1093 			return NULL;
1094 		}
1095 
1096 		if (fFilterCount == MAX_FILTERS) {
1097 			// too many filters
1098 			return NULL;
1099 		}
1100 
1101 		if (token[0] == '#') {
1102 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1103 				PatternTraceFilter;
1104 			filter->fString = token + 1;
1105 			return filter;
1106 		} else if (token[0] == 'd' && token[1] == '#') {
1107 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1108 				DecimalPatternTraceFilter;
1109 			filter->fValue = parse_expression(token + 2);
1110 			return filter;
1111 		} else if (token[0] == 'x' && token[1] == '#') {
1112 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1113 				HexPatternTraceFilter;
1114 			filter->fValue = parse_expression(token + 2);
1115 			return filter;
1116 		} else if (token[0] == 's' && token[1] == '#') {
1117 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1118 				StringPatternTraceFilter;
1119 			filter->fValue = parse_expression(token + 2);
1120 			return filter;
1121 		} else if (strcmp(token, "not") == 0) {
1122 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1123 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1124 				return filter;
1125 			return NULL;
1126 		} else if (strcmp(token, "and") == 0) {
1127 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1128 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1129 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1130 				return filter;
1131 			}
1132 			return NULL;
1133 		} else if (strcmp(token, "or") == 0) {
1134 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1135 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1136 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1137 				return filter;
1138 			}
1139 			return NULL;
1140 		} else if (strcmp(token, "thread") == 0) {
1141 			const char* arg = _NextToken();
1142 			if (arg == NULL) {
1143 				// unexpected end of expression
1144 				return NULL;
1145 			}
1146 
1147 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1148 				ThreadTraceFilter;
1149 			filter->fThread = strtol(arg, NULL, 0);
1150 			return filter;
1151 		} else if (strcmp(token, "team") == 0) {
1152 			const char* arg = _NextToken();
1153 			if (arg == NULL) {
1154 				// unexpected end of expression
1155 				return NULL;
1156 			}
1157 
1158 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1159 				TeamTraceFilter;
1160 			filter->fTeam = strtol(arg, NULL, 0);
1161 			return filter;
1162 		} else {
1163 			// invalid token
1164 			return NULL;
1165 		}
1166 	}
1167 
1168 	const char* _CurrentToken() const
1169 	{
1170 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1171 			return fTokens[fTokenIndex - 1];
1172 		return NULL;
1173 	}
1174 
1175 	const char* _NextToken()
1176 	{
1177 		if (fTokenIndex >= fTokenCount)
1178 			return NULL;
1179 		return fTokens[fTokenIndex++];
1180 	}
1181 
1182 private:
1183 	enum { MAX_FILTERS = 32 };
1184 
1185 	const char* const*			fTokens;
1186 	int							fTokenCount;
1187 	int							fTokenIndex;
1188 	TraceFilter					fFilters[MAX_FILTERS];
1189 	int							fFilterCount;
1190 
1191 	static TraceFilterParser	sParser;
1192 };
1193 
1194 
1195 TraceFilterParser TraceFilterParser::sParser;
1196 
1197 
1198 //	#pragma mark -
1199 
1200 
1201 #if ENABLE_TRACING
1202 
1203 
1204 TraceEntry*
1205 TraceEntryIterator::Next()
1206 {
1207 	if (fIndex == 0) {
1208 		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1209 		fIndex = 1;
1210 	} else if (fEntry != NULL) {
1211 		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1212 		fIndex++;
1213 	}
1214 
1215 	return Current();
1216 }
1217 
1218 
1219 TraceEntry*
1220 TraceEntryIterator::Previous()
1221 {
1222 	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1223 		fEntry = sTracingMetaData->AfterLastEntry();
1224 
1225 	if (fEntry != NULL) {
1226 		fEntry = _PreviousNonBufferEntry(
1227 			sTracingMetaData->PreviousEntry(fEntry));
1228 		fIndex--;
1229 	}
1230 
1231 	return Current();
1232 }
1233 
1234 
1235 TraceEntry*
1236 TraceEntryIterator::MoveTo(int32 index)
1237 {
1238 	if (index == fIndex)
1239 		return Current();
1240 
1241 	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1242 		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1243 		fEntry = NULL;
1244 		return NULL;
1245 	}
1246 
1247 	// get the shortest iteration path
1248 	int32 distance = index - fIndex;
1249 	int32 direction = distance < 0 ? -1 : 1;
1250 	distance *= direction;
1251 
1252 	if (index < distance) {
1253 		distance = index;
1254 		direction = 1;
1255 		fEntry = NULL;
1256 		fIndex = 0;
1257 	}
1258 	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1259 		distance = sTracingMetaData->Entries() + 1 - fIndex;
1260 		direction = -1;
1261 		fEntry = NULL;
1262 		fIndex = sTracingMetaData->Entries() + 1;
1263 	}
1264 
1265 	// iterate to the index
1266 	if (direction < 0) {
1267 		while (fIndex != index)
1268 			Previous();
1269 	} else {
1270 		while (fIndex != index)
1271 			Next();
1272 	}
1273 
1274 	return Current();
1275 }
1276 
1277 
1278 trace_entry*
1279 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1280 {
1281 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1282 		entry = sTracingMetaData->NextEntry(entry);
1283 
1284 	return entry;
1285 }
1286 
1287 
1288 trace_entry*
1289 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1290 {
1291 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1292 		entry = sTracingMetaData->PreviousEntry(entry);
1293 
1294 	return entry;
1295 }
1296 
1297 
1298 int
1299 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1300 {
1301 	int argi = 1;
1302 
1303 	// variables in which we store our state to be continuable
1304 	static int32 _previousCount = 0;
1305 	static bool _previousHasFilter = false;
1306 	static bool _previousPrintStackTrace = false;
1307 	static int32 _previousMaxToCheck = 0;
1308 	static int32 _previousFirstChecked = 1;
1309 	static int32 _previousLastChecked = -1;
1310 	static int32 _previousDirection = 1;
1311 	static uint32 _previousEntriesEver = 0;
1312 	static uint32 _previousEntries = 0;
1313 	static uint32 _previousOutputFlags = 0;
1314 	static TraceEntryIterator iterator;
1315 
1316 	uint32 entriesEver = sTracingMetaData->EntriesEver();
1317 
1318 	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1319 	int32 start = 0;	// special index: print the last count entries
1320 	int32 count = 0;
1321 	int32 maxToCheck = 0;
1322 	int32 cont = 0;
1323 
1324 	bool hasFilter = false;
1325 	bool printStackTrace = false;
1326 
1327 	uint32 outputFlags = 0;
1328 	while (argi < argc) {
1329 		if (strcmp(argv[argi], "--difftime") == 0) {
1330 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1331 			argi++;
1332 		} else if (strcmp(argv[argi], "--printteam") == 0) {
1333 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1334 			argi++;
1335 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1336 			printStackTrace = true;
1337 			argi++;
1338 		} else
1339 			break;
1340 	}
1341 
1342 	if (argi < argc) {
1343 		if (strcmp(argv[argi], "forward") == 0) {
1344 			cont = 1;
1345 			argi++;
1346 		} else if (strcmp(argv[argi], "backward") == 0) {
1347 			cont = -1;
1348 			argi++;
1349 		}
1350 	} else
1351 		cont = _previousDirection;
1352 
1353 	if (cont != 0) {
1354 		if (argi < argc) {
1355 			print_debugger_command_usage(argv[0]);
1356 			return 0;
1357 		}
1358 		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1359 			|| sTracingMetaData->Entries() != _previousEntries) {
1360 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1361 				"before, or there were new entries written since the last "
1362 				"invocation.\n", argv[0]);
1363 			return 0;
1364 		}
1365 	}
1366 
1367 	// get start, count, maxToCheck
1368 	int32* params[3] = { &start, &count, &maxToCheck };
1369 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1370 		if (strcmp(argv[argi], "filter") == 0) {
1371 			hasFilter = true;
1372 			argi++;
1373 		} else if (argv[argi][0] == '#') {
1374 			hasFilter = true;
1375 		} else {
1376 			*params[i] = parse_expression(argv[argi]);
1377 			argi++;
1378 		}
1379 	}
1380 
1381 	// filter specification
1382 	if (argi < argc) {
1383 		hasFilter = true;
1384 		if (strcmp(argv[argi], "filter") == 0)
1385 			argi++;
1386 
1387 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1388 			print_debugger_command_usage(argv[0]);
1389 			return 0;
1390 		}
1391 	}
1392 
1393 	int32 direction;
1394 	int32 firstToCheck;
1395 	int32 lastToCheck;
1396 
1397 	if (cont != 0) {
1398 		// get values from the previous iteration
1399 		direction = cont;
1400 		count = _previousCount;
1401 		maxToCheck = _previousMaxToCheck;
1402 		hasFilter = _previousHasFilter;
1403 		outputFlags = _previousOutputFlags;
1404 		printStackTrace = _previousPrintStackTrace;
1405 
1406 		if (direction < 0)
1407 			start = _previousFirstChecked - 1;
1408 		else
1409 			start = _previousLastChecked + 1;
1410 	} else {
1411 		// defaults for count and maxToCheck
1412 		if (count == 0)
1413 			count = 30;
1414 		if (maxToCheck == 0 || !hasFilter)
1415 			maxToCheck = count;
1416 		else if (maxToCheck < 0)
1417 			maxToCheck = sTracingMetaData->Entries();
1418 
1419 		// determine iteration direction
1420 		direction = (start <= 0 || count < 0 ? -1 : 1);
1421 
1422 		// validate count and maxToCheck
1423 		if (count < 0)
1424 			count = -count;
1425 		if (maxToCheck < 0)
1426 			maxToCheck = -maxToCheck;
1427 		if (maxToCheck > (int32)sTracingMetaData->Entries())
1428 			maxToCheck = sTracingMetaData->Entries();
1429 		if (count > maxToCheck)
1430 			count = maxToCheck;
1431 
1432 		// validate start
1433 		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1434 			start = max_c(1, sTracingMetaData->Entries());
1435 	}
1436 
1437 	if (direction < 0) {
1438 		firstToCheck = max_c(1, start - maxToCheck + 1);
1439 		lastToCheck = start;
1440 	} else {
1441 		firstToCheck = start;
1442 		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1443 			start + maxToCheck - 1);
1444 	}
1445 
1446 	// reset the iterator, if something changed in the meantime
1447 	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1448 		|| sTracingMetaData->Entries() != _previousEntries) {
1449 		iterator.Reset();
1450 	}
1451 
1452 	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1453 		kTraceOutputBufferSize, outputFlags);
1454 
1455 	bool markedMatching = false;
1456 	int32 firstToDump = firstToCheck;
1457 	int32 lastToDump = lastToCheck;
1458 
1459 	TraceFilter* filter = NULL;
1460 	if (hasFilter)
1461 		filter = TraceFilterParser::Default()->Filter();
1462 
1463 	if (wrapperFilter != NULL) {
1464 		wrapperFilter->Init(filter, direction, cont != 0);
1465 		filter = wrapperFilter;
1466 	}
1467 
1468 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1469 		// iteration direction is backwards
1470 		markedMatching = true;
1471 
1472 		// From the last entry to check iterate backwards to check filter
1473 		// matches.
1474 		int32 matching = 0;
1475 
1476 		// move to the entry after the last entry to check
1477 		iterator.MoveTo(lastToCheck + 1);
1478 
1479 		// iterate backwards
1480 		firstToDump = -1;
1481 		lastToDump = -1;
1482 		while (iterator.Index() > firstToCheck) {
1483 			TraceEntry* entry = iterator.Previous();
1484 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1485 				out.Clear();
1486 				if (filter->Filter(entry, out)) {
1487 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1488 					if (lastToDump == -1)
1489 						lastToDump = iterator.Index();
1490 					firstToDump = iterator.Index();
1491 
1492 					matching++;
1493 					if (matching >= count)
1494 						break;
1495 				} else
1496 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1497 			}
1498 		}
1499 
1500 		firstToCheck = iterator.Index();
1501 
1502 		// iterate to the previous entry, so that the next loop starts at the
1503 		// right one
1504 		iterator.Previous();
1505 	}
1506 
1507 	out.SetLastEntryTime(0);
1508 
1509 	// set the iterator to the entry before the first one to dump
1510 	iterator.MoveTo(firstToDump - 1);
1511 
1512 	// dump the entries matching the filter in the range
1513 	// [firstToDump, lastToDump]
1514 	int32 dumped = 0;
1515 
1516 	while (TraceEntry* entry = iterator.Next()) {
1517 		int32 index = iterator.Index();
1518 		if (index < firstToDump)
1519 			continue;
1520 		if (index > lastToDump || dumped >= count) {
1521 			if (direction > 0)
1522 				lastToCheck = index - 1;
1523 			break;
1524 		}
1525 
1526 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1527 			out.Clear();
1528 			if (filter &&  (markedMatching
1529 					? (entry->Flags() & FILTER_MATCH) == 0
1530 					: !filter->Filter(entry, out))) {
1531 				continue;
1532 			}
1533 
1534 			// don't print trailing new line
1535 			const char* dump = out.DumpEntry(entry);
1536 			int len = strlen(dump);
1537 			if (len > 0 && dump[len - 1] == '\n')
1538 				len--;
1539 
1540 			kprintf("%5" B_PRId32 ". %.*s\n", index, len, dump);
1541 
1542 			if (printStackTrace) {
1543 				out.Clear();
1544 				entry->DumpStackTrace(out);
1545 				if (out.Size() > 0)
1546 					kputs(out.Buffer());
1547 			}
1548 		} else if (!filter)
1549 			kprintf("%5" B_PRId32 ". ** uninitialized entry **\n", index);
1550 
1551 		dumped++;
1552 	}
1553 
1554 	kprintf("printed %" B_PRId32 " entries within range %" B_PRId32 " to %"
1555 		B_PRId32 " (%" B_PRId32 " of %" B_PRId32 " total, %" B_PRId32 " ever)\n",
1556 		dumped, firstToCheck, lastToCheck, lastToCheck - firstToCheck + 1,
1557 		sTracingMetaData->Entries(), entriesEver);
1558 
1559 	// store iteration state
1560 	_previousCount = count;
1561 	_previousMaxToCheck = maxToCheck;
1562 	_previousHasFilter = hasFilter;
1563 	_previousPrintStackTrace = printStackTrace;
1564 	_previousFirstChecked = firstToCheck;
1565 	_previousLastChecked = lastToCheck;
1566 	_previousDirection = direction;
1567 	_previousEntriesEver = entriesEver;
1568 	_previousEntries = sTracingMetaData->Entries();
1569 	_previousOutputFlags = outputFlags;
1570 
1571 	return cont != 0 ? B_KDEBUG_CONT : 0;
1572 }
1573 
1574 
1575 static int
1576 dump_tracing_command(int argc, char** argv)
1577 {
1578 	return dump_tracing_internal(argc, argv, NULL);
1579 }
1580 
1581 
1582 #endif	// ENABLE_TRACING
1583 
1584 
1585 extern "C" uint8*
1586 alloc_tracing_buffer(size_t size)
1587 {
1588 #if	ENABLE_TRACING
1589 	trace_entry* entry = sTracingMetaData->AllocateEntry(
1590 		size + sizeof(trace_entry), BUFFER_ENTRY);
1591 	if (entry == NULL)
1592 		return NULL;
1593 
1594 	return (uint8*)(entry + 1);
1595 #else
1596 	return NULL;
1597 #endif
1598 }
1599 
1600 
1601 uint8*
1602 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1603 {
1604 	if (user && !IS_USER_ADDRESS(source))
1605 		return NULL;
1606 
1607 	uint8* buffer = alloc_tracing_buffer(size);
1608 	if (buffer == NULL)
1609 		return NULL;
1610 
1611 	if (user) {
1612 		if (user_memcpy(buffer, source, size) != B_OK)
1613 			return NULL;
1614 	} else
1615 		memcpy(buffer, source, size);
1616 
1617 	return buffer;
1618 }
1619 
1620 
1621 char*
1622 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1623 {
1624 	if (source == NULL || maxSize == 0)
1625 		return NULL;
1626 
1627 	if (user && !IS_USER_ADDRESS(source))
1628 		return NULL;
1629 
1630 	// limit maxSize to the actual source string len
1631 	if (user) {
1632 		ssize_t size = user_strlcpy(NULL, source, 0);
1633 			// there's no user_strnlen()
1634 		if (size < 0)
1635 			return 0;
1636 		maxSize = min_c(maxSize, (size_t)size + 1);
1637 	} else
1638 		maxSize = strnlen(source, maxSize - 1) + 1;
1639 
1640 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1641 	if (buffer == NULL)
1642 		return NULL;
1643 
1644 	if (user) {
1645 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1646 			return NULL;
1647 	} else
1648 		strlcpy(buffer, source, maxSize);
1649 
1650 	return buffer;
1651 }
1652 
1653 
1654 tracing_stack_trace*
1655 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1656 {
1657 #if	ENABLE_TRACING
1658 	// page_fault_exception() doesn't allow us to gracefully handle a bad
1659 	// address in the stack trace, if interrupts are disabled, so we always
1660 	// restrict the stack traces to the kernel only in this case. A bad address
1661 	// in the kernel stack trace would still cause a panic(), but this is
1662 	// probably even desired.
1663 	if (!are_interrupts_enabled())
1664 		kernelOnly = true;
1665 
1666 	tracing_stack_trace* stackTrace
1667 		= (tracing_stack_trace*)alloc_tracing_buffer(
1668 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1669 
1670 	if (stackTrace != NULL) {
1671 		stackTrace->depth = arch_debug_get_stack_trace(
1672 			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1673 			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1674 	}
1675 
1676 	return stackTrace;
1677 #else
1678 	return NULL;
1679 #endif
1680 }
1681 
1682 
1683 addr_t
1684 tracing_find_caller_in_stack_trace(struct tracing_stack_trace* stackTrace,
1685 	const addr_t excludeRanges[], uint32 excludeRangeCount)
1686 {
1687 	for (int32 i = 0; i < stackTrace->depth; i++) {
1688 		addr_t returnAddress = stackTrace->return_addresses[i];
1689 
1690 		bool inRange = false;
1691 		for (uint32 j = 0; j < excludeRangeCount; j++) {
1692 			if (returnAddress >= excludeRanges[j * 2 + 0]
1693 				&& returnAddress < excludeRanges[j * 2 + 1]) {
1694 				inRange = true;
1695 				break;
1696 			}
1697 		}
1698 
1699 		if (!inRange)
1700 			return returnAddress;
1701 	}
1702 
1703 	return 0;
1704 }
1705 
1706 
1707 void
1708 tracing_print_stack_trace(struct tracing_stack_trace* stackTrace)
1709 {
1710 #if ENABLE_TRACING
1711 	print_stack_trace(stackTrace, kprintf);
1712 #endif
1713 }
1714 
1715 
1716 int
1717 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1718 {
1719 #if	ENABLE_TRACING
1720 	return dump_tracing_internal(argc, argv, wrapperFilter);
1721 #else
1722 	return 0;
1723 #endif
1724 }
1725 
1726 
1727 bool
1728 tracing_is_entry_valid(AbstractTraceEntry* candidate, bigtime_t entryTime)
1729 {
1730 #if ENABLE_TRACING
1731 	if (!sTracingMetaData->IsInBuffer(candidate, sizeof(*candidate)))
1732 		return false;
1733 
1734 	if (entryTime < 0)
1735 		return true;
1736 
1737 	TraceEntryIterator iterator;
1738 	while (TraceEntry* entry = iterator.Next()) {
1739 		AbstractTraceEntry* abstract = dynamic_cast<AbstractTraceEntry*>(entry);
1740 		if (abstract == NULL)
1741 			continue;
1742 
1743 		if (abstract != candidate && abstract->Time() > entryTime)
1744 			return false;
1745 
1746 		return candidate->Time() == entryTime;
1747 	}
1748 #endif
1749 
1750 	return false;
1751 }
1752 
1753 
1754 void
1755 lock_tracing_buffer()
1756 {
1757 #if ENABLE_TRACING
1758 	sTracingMetaData->Lock();
1759 #endif
1760 }
1761 
1762 
1763 void
1764 unlock_tracing_buffer()
1765 {
1766 #if ENABLE_TRACING
1767 	sTracingMetaData->Unlock();
1768 #endif
1769 }
1770 
1771 
1772 extern "C" status_t
1773 tracing_init(void)
1774 {
1775 #if	ENABLE_TRACING
1776 	status_t result = TracingMetaData::Create(sTracingMetaData);
1777 	if (result != B_OK) {
1778 		memset(&sFallbackTracingMetaData, 0, sizeof(sFallbackTracingMetaData));
1779 		sTracingMetaData = &sFallbackTracingMetaData;
1780 		return result;
1781 	}
1782 
1783 	new(nothrow) TracingLogStartEntry();
1784 
1785 	add_debugger_command_etc("traced", &dump_tracing_command,
1786 		"Dump recorded trace entries",
1787 		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1788 			"(\"forward\" | \"backward\") "
1789 			"| ([ <start> [ <count> [ <range> ] ] ] "
1790 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1791 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1792 		"specified, the command continues where the previous invocation left\n"
1793 		"off, i.e. printing the previous respectively next entries (as many\n"
1794 		"as printed before). In this case the command is continuable, that is\n"
1795 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1796 		"If no arguments are given, the command continues in the direction\n"
1797 		"of the last invocation.\n"
1798 		"--printteam  - enables printing the entries' team IDs.\n"
1799 		"--difftime   - print difference times for all but the first entry.\n"
1800 		"--stacktrace - print stack traces for entries that captured one.\n"
1801 		"  <start>    - The base index of the entries to print. Depending on\n"
1802 		"               whether the iteration direction is forward or\n"
1803 		"               backward this will be the first or last entry printed\n"
1804 		"               (potentially, if a filter is specified). The index of\n"
1805 		"               the first entry in the trace buffer is 1. If 0 is\n"
1806 		"               specified, the last <count> recorded entries are\n"
1807 		"               printed (iteration direction is backward). Defaults \n"
1808 		"               to 0.\n"
1809 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1810 		"               If negative, the -<count> entries before and\n"
1811 		"               including <start> will be printed.\n"
1812 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1813 		"               number of entries to be filtered -- depending on the\n"
1814 		"               iteration direction the entries before or after\n"
1815 		"               <start>. If more than <count> entries match the\n"
1816 		"               filter, only the first (forward) or last (backward)\n"
1817 		"               <count> matching entries will be printed. If 0 is\n"
1818 		"               specified <range> will be set to <count>. If -1,\n"
1819 		"               <range> will be set to the number of recorded\n"
1820 		"               entries.\n"
1821 		"  <pattern>  - If specified only entries containing this string are\n"
1822 		"               printed.\n"
1823 		"  <filter>   - If specified only entries matching this filter\n"
1824 		"               expression are printed. The expression can consist of\n"
1825 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1826 		"               filters \"'thread' <thread>\" (matching entries\n"
1827 		"               with the given thread ID), \"'team' <team>\"\n"
1828 						"(matching entries with the given team ID), and\n"
1829 		"               \"#<pattern>\" (matching entries containing the given\n"
1830 		"               string).\n", 0);
1831 #endif	// ENABLE_TRACING
1832 	return B_OK;
1833 }
1834 
1835 
1836 void
1837 ktrace_printf(const char *format, ...)
1838 {
1839 #if	ENABLE_TRACING
1840 	va_list list;
1841 	va_start(list, format);
1842 
1843 	char buffer[256];
1844 	vsnprintf(buffer, sizeof(buffer), format, list);
1845 
1846 	va_end(list);
1847 
1848 	new(nothrow) KernelTraceEntry(buffer);
1849 #endif	// ENABLE_TRACING
1850 }
1851 
1852 
1853 void
1854 _user_ktrace_output(const char *message)
1855 {
1856 #if	ENABLE_TRACING
1857 	new(nothrow) UserTraceEntry(message);
1858 #endif	// ENABLE_TRACING
1859 }
1860 
1861