xref: /haiku/src/system/kernel/debug/tracing.cpp (revision f2b4344867e97c3f4e742a1b4a15e6879644601a)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <tracing.h>
9 
10 #include <stdlib.h>
11 
12 #include <algorithm>
13 
14 #include <arch/debug.h>
15 #include <debug.h>
16 #include <elf.h>
17 #include <int.h>
18 #include <kernel.h>
19 #include <team.h>
20 #include <thread.h>
21 #include <util/AutoLock.h>
22 #include <vm/vm.h>
23 
24 
25 #if ENABLE_TRACING
26 
27 //#define TRACE_TRACING
28 #ifdef TRACE_TRACING
29 #	define TRACE(x) dprintf_no_syslog x
30 #else
31 #	define TRACE(x) ;
32 #endif
33 
34 
35 enum {
36 	WRAP_ENTRY			= 0x01,
37 	ENTRY_INITIALIZED	= 0x02,
38 	BUFFER_ENTRY		= 0x04,
39 	FILTER_MATCH		= 0x08,
40 	INVALID_ENTRY		= 0x10,
41 	CHECK_ENTRY			= 0x20,
42 };
43 
44 
45 static const size_t kTraceOutputBufferSize = 10240;
46 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
47 
48 static const uint32 kMaxRecoveringErrorCount	= 100;
49 static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
50 static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
51 static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
52 static const uint32 kMetaDataMagic1 = 'Vali';
53 static const uint32 kMetaDataMagic2 = 'dTra';
54 static const uint32 kMetaDataMagic3 = 'cing';
55 
56 // the maximum we can address with the trace_entry::[previous_]size fields
57 static const size_t kMaxTracingEntryByteSize
58 	= ((1 << 13) - 1) * sizeof(trace_entry);
59 
60 
61 struct TraceOutputPrint {
62 	TraceOutputPrint(TraceOutput& output)
63 		:
64 		fOutput(output)
65 	{
66 	}
67 
68 	void operator()(const char* format,...) const
69 	{
70 		va_list args;
71 		va_start(args, format);
72 		fOutput.PrintArgs(format, args);
73 		va_end(args);
74 	}
75 
76 private:
77 	TraceOutput&	fOutput;
78 };
79 
80 
81 class TracingMetaData {
82 public:
83 	static	status_t			Create(TracingMetaData*& _metaData);
84 
85 	inline	bool				Lock();
86 	inline	void				Unlock();
87 
88 	inline	trace_entry*		FirstEntry() const;
89 	inline	trace_entry*		AfterLastEntry() const;
90 
91 	inline	uint32				Entries() const;
92 	inline	uint32				EntriesEver() const;
93 
94 	inline	void				IncrementEntriesEver();
95 
96 	inline	char*				TraceOutputBuffer() const;
97 
98 			trace_entry*		NextEntry(trace_entry* entry);
99 			trace_entry*		PreviousEntry(trace_entry* entry);
100 
101 			trace_entry*		AllocateEntry(size_t size, uint16 flags);
102 
103 			bool				IsInBuffer(void* address, size_t size);
104 
105 private:
106 			bool				_FreeFirstEntry();
107 			bool				_MakeSpace(size_t needed);
108 
109 	static	status_t			_CreateMetaDataArea(bool findPrevious,
110 									area_id& _area,
111 									TracingMetaData*& _metaData);
112 			bool				_InitPreviousTracingData();
113 
114 private:
115 			uint32				fMagic1;
116 			trace_entry*		fBuffer;
117 			trace_entry*		fFirstEntry;
118 			trace_entry*		fAfterLastEntry;
119 			uint32				fEntries;
120 			uint32				fMagic2;
121 			uint32				fEntriesEver;
122 			spinlock			fLock;
123 			char*				fTraceOutputBuffer;
124 			phys_addr_t			fPhysicalAddress;
125 			uint32				fMagic3;
126 };
127 
128 static TracingMetaData sDummyTracingMetaData;
129 static TracingMetaData* sTracingMetaData = &sDummyTracingMetaData;
130 static bool sTracingDataRecovered = false;
131 
132 
133 // #pragma mark -
134 
135 
136 template<typename Print>
137 static void
138 print_stack_trace(struct tracing_stack_trace* stackTrace,
139 	const Print& print)
140 {
141 	if (stackTrace == NULL || stackTrace->depth <= 0)
142 		return;
143 
144 	for (int32 i = 0; i < stackTrace->depth; i++) {
145 		addr_t address = stackTrace->return_addresses[i];
146 
147 		const char* symbol;
148 		const char* imageName;
149 		bool exactMatch;
150 		addr_t baseAddress;
151 
152 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
153 				&imageName, &exactMatch) == B_OK) {
154 			print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
155 				address - baseAddress, imageName,
156 				exactMatch ? "" : " (nearest)");
157 		} else
158 			print("  %p\n", (void*)address);
159 	}
160 }
161 
162 
163 // #pragma mark - TracingMetaData
164 
165 
166 bool
167 TracingMetaData::Lock()
168 {
169 	acquire_spinlock(&fLock);
170 	return true;
171 }
172 
173 
174 void
175 TracingMetaData::Unlock()
176 {
177 	release_spinlock(&fLock);
178 }
179 
180 
181 trace_entry*
182 TracingMetaData::FirstEntry() const
183 {
184 	return fFirstEntry;
185 }
186 
187 
188 trace_entry*
189 TracingMetaData::AfterLastEntry() const
190 {
191 	return fAfterLastEntry;
192 }
193 
194 
195 uint32
196 TracingMetaData::Entries() const
197 {
198 	return fEntries;
199 }
200 
201 
202 uint32
203 TracingMetaData::EntriesEver() const
204 {
205 	return fEntriesEver;
206 }
207 
208 
209 void
210 TracingMetaData::IncrementEntriesEver()
211 {
212 	fEntriesEver++;
213 		// NOTE: Race condition on SMP machines! We should use atomic_add(),
214 		// though that costs some performance and the information is for
215 		// informational purpose anyway.
216 }
217 
218 
219 char*
220 TracingMetaData::TraceOutputBuffer() const
221 {
222 	return fTraceOutputBuffer;
223 }
224 
225 
226 trace_entry*
227 TracingMetaData::NextEntry(trace_entry* entry)
228 {
229 	entry += entry->size;
230 	if ((entry->flags & WRAP_ENTRY) != 0)
231 		entry = fBuffer;
232 
233 	if (entry == fAfterLastEntry)
234 		return NULL;
235 
236 	return entry;
237 }
238 
239 
240 trace_entry*
241 TracingMetaData::PreviousEntry(trace_entry* entry)
242 {
243 	if (entry == fFirstEntry)
244 		return NULL;
245 
246 	if (entry == fBuffer) {
247 		// beginning of buffer -- previous entry is a wrap entry
248 		entry = fBuffer + kBufferSize - entry->previous_size;
249 	}
250 
251 	return entry - entry->previous_size;
252 }
253 
254 
255 trace_entry*
256 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
257 {
258 	if (fAfterLastEntry == NULL || size == 0
259 		|| size >= kMaxTracingEntryByteSize) {
260 		return NULL;
261 	}
262 
263 	InterruptsSpinLocker _(fLock);
264 
265 	size = (size + 3) >> 2;
266 		// 4 byte aligned, don't store the lower 2 bits
267 
268 	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
269 		fFirstEntry, fAfterLastEntry, fBuffer));
270 
271 	if (!_MakeSpace(size))
272 		return NULL;
273 
274 	trace_entry* entry = fAfterLastEntry;
275 	entry->size = size;
276 	entry->flags = flags;
277 	fAfterLastEntry += size;
278 	fAfterLastEntry->previous_size = size;
279 
280 	if (!(flags & BUFFER_ENTRY))
281 		fEntries++;
282 
283 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
284 		fAfterLastEntry, fFirstEntry, fEntries));
285 
286 	return entry;
287 }
288 
289 
290 bool
291 TracingMetaData::IsInBuffer(void* address, size_t size)
292 {
293 	if (fEntries == 0)
294 		return false;
295 
296 	addr_t start = (addr_t)address;
297 	addr_t end = start + size;
298 
299 	if (start < (addr_t)fBuffer || end > (addr_t)(fBuffer + kBufferSize))
300 		return false;
301 
302 	if (fFirstEntry > fAfterLastEntry)
303 		return start >= (addr_t)fFirstEntry || end <= (addr_t)fAfterLastEntry;
304 
305 	return start >= (addr_t)fFirstEntry && end <= (addr_t)fAfterLastEntry;
306 }
307 
308 
309 bool
310 TracingMetaData::_FreeFirstEntry()
311 {
312 	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
313 
314 	trace_entry* newFirst = NextEntry(fFirstEntry);
315 
316 	if (fFirstEntry->flags & BUFFER_ENTRY) {
317 		// a buffer entry -- just skip it
318 	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
319 		// Fully initialized TraceEntry: We could destroy it, but don't do so
320 		// for sake of robustness. The destructors of tracing entry classes
321 		// should be empty anyway.
322 		fEntries--;
323 	} else {
324 		// Not fully initialized TraceEntry. We can't free it, since
325 		// then it's constructor might still write into the memory and
326 		// overwrite data of the entry we're going to allocate.
327 		// We can't do anything until this entry can be discarded.
328 		return false;
329 	}
330 
331 	if (newFirst == NULL) {
332 		// everything is freed -- practically this can't happen, if
333 		// the buffer is large enough to hold three max-sized entries
334 		fFirstEntry = fAfterLastEntry = fBuffer;
335 		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
336 	} else
337 		fFirstEntry = newFirst;
338 
339 	return true;
340 }
341 
342 
343 /*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
344 	Returns \c false, if unable to free that much.
345 */
346 bool
347 TracingMetaData::_MakeSpace(size_t needed)
348 {
349 	// we need space for fAfterLastEntry, too (in case we need to wrap around
350 	// later)
351 	needed++;
352 
353 	// If there's not enough space (free or occupied) after fAfterLastEntry,
354 	// we free all entries in that region and wrap around.
355 	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
356 		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
357 			fAfterLastEntry));
358 
359 		// Free all entries after fAfterLastEntry and one more at the beginning
360 		// of the buffer.
361 		while (fFirstEntry > fAfterLastEntry) {
362 			if (!_FreeFirstEntry())
363 				return false;
364 		}
365 		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
366 			return false;
367 
368 		// just in case _FreeFirstEntry() freed the very last existing entry
369 		if (fAfterLastEntry == fBuffer)
370 			return true;
371 
372 		// mark as wrap entry and actually wrap around
373 		trace_entry* wrapEntry = fAfterLastEntry;
374 		wrapEntry->size = 0;
375 		wrapEntry->flags = WRAP_ENTRY;
376 		fAfterLastEntry = fBuffer;
377 		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
378 	}
379 
380 	if (fFirstEntry <= fAfterLastEntry) {
381 		// buffer is empty or the space after fAfterLastEntry is unoccupied
382 		return true;
383 	}
384 
385 	// free the first entries, until there's enough space
386 	size_t space = fFirstEntry - fAfterLastEntry;
387 
388 	if (space < needed) {
389 		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
390 	}
391 
392 	while (space < needed) {
393 		space += fFirstEntry->size;
394 
395 		if (!_FreeFirstEntry())
396 			return false;
397 	}
398 
399 	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
400 
401 	return true;
402 }
403 
404 
405 /*static*/ status_t
406 TracingMetaData::Create(TracingMetaData*& _metaData)
407 {
408 	// search meta data in memory (from previous session)
409 	area_id area;
410 	TracingMetaData* metaData;
411 	status_t error = _CreateMetaDataArea(true, area, metaData);
412 	if (error == B_OK) {
413 		if (metaData->_InitPreviousTracingData()) {
414 			_metaData = metaData;
415 			return B_OK;
416 		}
417 
418 		dprintf("Found previous tracing meta data, but failed to init.\n");
419 
420 		// invalidate the meta data
421 		metaData->fMagic1 = 0;
422 		metaData->fMagic2 = 0;
423 		metaData->fMagic3 = 0;
424 		delete_area(area);
425 	} else
426 		dprintf("No previous tracing meta data found.\n");
427 
428 	// no previous tracing data found -- create new one
429 	error = _CreateMetaDataArea(false, area, metaData);
430 	if (error != B_OK)
431 		return error;
432 
433 	virtual_address_restrictions virtualRestrictions = {};
434 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
435 	physical_address_restrictions physicalRestrictions = {};
436 	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
437 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
438 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
439 		&virtualRestrictions, &physicalRestrictions,
440 		(void**)&metaData->fTraceOutputBuffer);
441 	if (area < 0)
442 		return area;
443 
444 	// get the physical address
445 	physical_entry physicalEntry;
446 	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
447 			&physicalEntry, 1) == B_OK) {
448 		metaData->fPhysicalAddress = physicalEntry.address;
449 	} else {
450 		dprintf("TracingMetaData::Create(): failed to get physical address "
451 			"of tracing buffer\n");
452 		metaData->fPhysicalAddress = 0;
453 	}
454 
455 	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
456 		+ kTraceOutputBufferSize);
457 	metaData->fFirstEntry = metaData->fBuffer;
458 	metaData->fAfterLastEntry = metaData->fBuffer;
459 
460 	metaData->fEntries = 0;
461 	metaData->fEntriesEver = 0;
462 	B_INITIALIZE_SPINLOCK(&metaData->fLock);
463 
464 	metaData->fMagic1 = kMetaDataMagic1;
465 	metaData->fMagic2 = kMetaDataMagic2;
466 	metaData->fMagic3 = kMetaDataMagic3;
467 
468 	_metaData = metaData;
469 	return B_OK;
470 }
471 
472 
473 /*static*/ status_t
474 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
475 	TracingMetaData*& _metaData)
476 {
477 	// search meta data in memory (from previous session)
478 	TracingMetaData* metaData;
479 	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
480 	for (; metaDataAddress <= kMetaDataBaseEndAddress;
481 			metaDataAddress += kMetaDataAddressIncrement) {
482 		virtual_address_restrictions virtualRestrictions = {};
483 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
484 		physical_address_restrictions physicalRestrictions = {};
485 		physicalRestrictions.low_address = metaDataAddress;
486 		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
487 		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
488 			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
489 			CREATE_AREA_DONT_CLEAR, &virtualRestrictions, &physicalRestrictions,
490 			(void**)&metaData);
491 		if (area < 0)
492 			continue;
493 
494 		if (!findPrevious) {
495 			_area = area;
496 			_metaData = metaData;
497 			return B_OK;
498 		}
499 
500 		if (metaData->fMagic1 == kMetaDataMagic1
501 			&& metaData->fMagic2 == kMetaDataMagic2
502 			&& metaData->fMagic3 == kMetaDataMagic3) {
503 			_area = area;
504 			_metaData = metaData;
505 			return B_OK;
506 		}
507 
508 		delete_area(area);
509 	}
510 
511 	return B_ENTRY_NOT_FOUND;
512 }
513 
514 
515 bool
516 TracingMetaData::_InitPreviousTracingData()
517 {
518 	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
519 	// well. The entries should be checked more thoroughly for validity -- e.g.
520 	// the pointers to the entries' vtable pointers could be invalid, which can
521 	// make the "traced" command quite unusable. The validity of the entries
522 	// could be checked in a safe environment (i.e. with a fault handler) with
523 	// typeid() and call of a virtual function.
524 	return false;
525 
526 	addr_t bufferStart
527 		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
528 	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
529 
530 	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
531 		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
532 		|| (addr_t)fFirstEntry < bufferStart
533 		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
534 		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
535 		|| (addr_t)fAfterLastEntry < bufferStart
536 		|| (addr_t)fAfterLastEntry > bufferEnd
537 		|| fPhysicalAddress == 0) {
538 		dprintf("Failed to init tracing meta data: Sanity checks "
539 			"failed.\n");
540 		return false;
541 	}
542 
543 	// re-map the previous tracing buffer
544 	virtual_address_restrictions virtualRestrictions = {};
545 	virtualRestrictions.address = fTraceOutputBuffer;
546 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
547 	physical_address_restrictions physicalRestrictions = {};
548 	physicalRestrictions.low_address = fPhysicalAddress;
549 	physicalRestrictions.high_address = fPhysicalAddress
550 		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
551 	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
552 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
553 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR,
554 		&virtualRestrictions, &physicalRestrictions, NULL);
555 	if (area < 0) {
556 		dprintf("Failed to init tracing meta data: Mapping tracing log "
557 			"buffer failed: %s\n", strerror(area));
558 		return false;
559 	}
560 
561 	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
562 		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
563 
564 	// verify/repair the tracing entry list
565 	uint32 errorCount = 0;
566 	uint32 entryCount = 0;
567 	uint32 nonBufferEntryCount = 0;
568 	uint32 previousEntrySize = 0;
569 	trace_entry* entry = fFirstEntry;
570 	while (errorCount <= kMaxRecoveringErrorCount) {
571 		// check previous entry size
572 		if (entry->previous_size != previousEntrySize) {
573 			if (entry != fFirstEntry) {
574 				dprintf("ktrace recovering: entry %p: fixing previous_size "
575 					"size: %lu (should be %lu)\n", entry, entry->previous_size,
576 					previousEntrySize);
577 				errorCount++;
578 			}
579 			entry->previous_size = previousEntrySize;
580 		}
581 
582 		if (entry == fAfterLastEntry)
583 			break;
584 
585 		// check size field
586 		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
587 			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
588 				entry);
589 			errorCount++;
590 			fAfterLastEntry = entry;
591 			break;
592 		}
593 
594 		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
595 			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
596 				entry->size);
597 			errorCount++;
598 			fAfterLastEntry = entry;
599 			break;
600 		}
601 
602 		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
603 			dprintf("ktrace recovering: entry %p: entry crosses "
604 				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
605 			errorCount++;
606 			fAfterLastEntry = entry;
607 			break;
608 		}
609 
610 		// check for wrap entry
611 		if ((entry->flags & WRAP_ENTRY) != 0) {
612 			if ((uint32)(fBuffer + kBufferSize - entry)
613 					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
614 				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
615 					"buffer location\n", entry);
616 				errorCount++;
617 			}
618 
619 			if (entry->size != 0) {
620 				dprintf("ktrace recovering: entry %p: invalid wrap entry "
621 					"size: %lu\n", entry, entry->size);
622 				errorCount++;
623 				entry->size = 0;
624 			}
625 
626 			previousEntrySize = fBuffer + kBufferSize - entry;
627 			entry = fBuffer;
628 			continue;
629 		}
630 
631 		if ((entry->flags & BUFFER_ENTRY) == 0) {
632 			entry->flags |= CHECK_ENTRY;
633 			nonBufferEntryCount++;
634 		}
635 
636 		entryCount++;
637 		previousEntrySize = entry->size;
638 
639 		entry += entry->size;
640 	}
641 
642 	if (errorCount > kMaxRecoveringErrorCount) {
643 		dprintf("ktrace recovering: Too many errors.\n");
644 		fAfterLastEntry = entry;
645 		fAfterLastEntry->previous_size = previousEntrySize;
646 	}
647 
648 	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
649 		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
650 		entryCount - nonBufferEntryCount, fEntries);
651 	fEntries = nonBufferEntryCount;
652 
653 	B_INITIALIZE_SPINLOCK(&fLock);
654 
655 	// TODO: Actually check the entries! Do that when first accessing the
656 	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
657 	// true).
658 	sTracingDataRecovered = true;
659 	return true;
660 }
661 
662 
663 #endif	// ENABLE_TRACING
664 
665 
666 // #pragma mark -
667 
668 
669 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
670 	: fBuffer(buffer),
671 	  fCapacity(bufferSize),
672 	  fFlags(flags)
673 {
674 	Clear();
675 }
676 
677 
678 void
679 TraceOutput::Clear()
680 {
681 	if (fCapacity > 0)
682 		fBuffer[0] = '\0';
683 	fSize = 0;
684 }
685 
686 
687 void
688 TraceOutput::PrintArgs(const char* format, va_list args)
689 {
690 #if ENABLE_TRACING
691 	if (IsFull())
692 		return;
693 
694 	size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
695 	fSize += std::min(length, fCapacity - fSize - 1);
696 #endif
697 }
698 
699 
700 void
701 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
702 {
703 #if ENABLE_TRACING
704 	print_stack_trace(stackTrace, TraceOutputPrint(*this));
705 	if (stackTrace == NULL || stackTrace->depth <= 0)
706 		return;
707 
708 	for (int32 i = 0; i < stackTrace->depth; i++) {
709 		addr_t address = stackTrace->return_addresses[i];
710 
711 		const char* symbol;
712 		const char* imageName;
713 		bool exactMatch;
714 		addr_t baseAddress;
715 
716 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
717 				&imageName, &exactMatch) == B_OK) {
718 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
719 				address - baseAddress, imageName,
720 				exactMatch ? "" : " (nearest)");
721 		} else
722 			Print("  %p\n", (void*)address);
723 	}
724 #endif
725 }
726 
727 
728 void
729 TraceOutput::SetLastEntryTime(bigtime_t time)
730 {
731 	fLastEntryTime = time;
732 }
733 
734 
735 bigtime_t
736 TraceOutput::LastEntryTime() const
737 {
738 	return fLastEntryTime;
739 }
740 
741 
742 //	#pragma mark -
743 
744 
745 TraceEntry::TraceEntry()
746 {
747 }
748 
749 
750 TraceEntry::~TraceEntry()
751 {
752 }
753 
754 
755 void
756 TraceEntry::Dump(TraceOutput& out)
757 {
758 #if ENABLE_TRACING
759 	// to be overridden by subclasses
760 	out.Print("ENTRY %p", this);
761 #endif
762 }
763 
764 
765 void
766 TraceEntry::DumpStackTrace(TraceOutput& out)
767 {
768 }
769 
770 
771 void
772 TraceEntry::Initialized()
773 {
774 #if ENABLE_TRACING
775 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
776 	sTracingMetaData->IncrementEntriesEver();
777 #endif
778 }
779 
780 
781 void*
782 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
783 {
784 #if ENABLE_TRACING
785 	trace_entry* entry = sTracingMetaData->AllocateEntry(
786 		size + sizeof(trace_entry), 0);
787 	return entry != NULL ? entry + 1 : NULL;
788 #endif
789 	return NULL;
790 }
791 
792 
793 //	#pragma mark -
794 
795 
796 AbstractTraceEntry::~AbstractTraceEntry()
797 {
798 }
799 
800 
801 void
802 AbstractTraceEntry::Dump(TraceOutput& out)
803 {
804 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
805 		? fTime - out.LastEntryTime()
806 		: fTime;
807 
808 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID)
809 		out.Print("[%6ld:%6ld] %10Ld: ", fThread, fTeam, time);
810 	else
811 		out.Print("[%6ld] %10Ld: ", fThread, time);
812 
813 	AddDump(out);
814 
815 	out.SetLastEntryTime(fTime);
816 }
817 
818 
819 void
820 AbstractTraceEntry::AddDump(TraceOutput& out)
821 {
822 }
823 
824 
825 void
826 AbstractTraceEntry::_Init()
827 {
828 	Thread* thread = thread_get_current_thread();
829 	if (thread != NULL) {
830 		fThread = thread->id;
831 		if (thread->team)
832 			fTeam = thread->team->id;
833 	}
834 	fTime = system_time();
835 }
836 
837 
838 //	#pragma mark - AbstractTraceEntryWithStackTrace
839 
840 
841 
842 AbstractTraceEntryWithStackTrace::AbstractTraceEntryWithStackTrace(
843 	size_t stackTraceDepth, size_t skipFrames, bool kernelOnly)
844 {
845 	fStackTrace = capture_tracing_stack_trace(stackTraceDepth, skipFrames + 1,
846 		kernelOnly);
847 }
848 
849 
850 void
851 AbstractTraceEntryWithStackTrace::DumpStackTrace(TraceOutput& out)
852 {
853 	out.PrintStackTrace(fStackTrace);
854 }
855 
856 
857 //	#pragma mark -
858 
859 
860 #if ENABLE_TRACING
861 
862 class KernelTraceEntry : public AbstractTraceEntry {
863 	public:
864 		KernelTraceEntry(const char* message)
865 		{
866 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
867 
868 #if KTRACE_PRINTF_STACK_TRACE
869 			fStackTrace = capture_tracing_stack_trace(
870 				KTRACE_PRINTF_STACK_TRACE, 1, false);
871 #endif
872 			Initialized();
873 		}
874 
875 		virtual void AddDump(TraceOutput& out)
876 		{
877 			out.Print("kern: %s", fMessage);
878 		}
879 
880 #if KTRACE_PRINTF_STACK_TRACE
881 		virtual void DumpStackTrace(TraceOutput& out)
882 		{
883 			out.PrintStackTrace(fStackTrace);
884 		}
885 #endif
886 
887 	private:
888 		char*	fMessage;
889 #if KTRACE_PRINTF_STACK_TRACE
890 		tracing_stack_trace* fStackTrace;
891 #endif
892 };
893 
894 
895 class UserTraceEntry : public AbstractTraceEntry {
896 	public:
897 		UserTraceEntry(const char* message)
898 		{
899 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
900 
901 #if KTRACE_PRINTF_STACK_TRACE
902 			fStackTrace = capture_tracing_stack_trace(
903 				KTRACE_PRINTF_STACK_TRACE, 1, false);
904 #endif
905 			Initialized();
906 		}
907 
908 		virtual void AddDump(TraceOutput& out)
909 		{
910 			out.Print("user: %s", fMessage);
911 		}
912 
913 #if KTRACE_PRINTF_STACK_TRACE
914 		virtual void DumpStackTrace(TraceOutput& out)
915 		{
916 			out.PrintStackTrace(fStackTrace);
917 		}
918 #endif
919 
920 	private:
921 		char*	fMessage;
922 #if KTRACE_PRINTF_STACK_TRACE
923 		tracing_stack_trace* fStackTrace;
924 #endif
925 };
926 
927 
928 class TracingLogStartEntry : public AbstractTraceEntry {
929 	public:
930 		TracingLogStartEntry()
931 		{
932 			Initialized();
933 		}
934 
935 		virtual void AddDump(TraceOutput& out)
936 		{
937 			out.Print("ktrace start");
938 		}
939 };
940 
941 #endif	// ENABLE_TRACING
942 
943 
944 //	#pragma mark - trace filters
945 
946 
947 TraceFilter::~TraceFilter()
948 {
949 }
950 
951 
952 bool
953 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
954 {
955 	return false;
956 }
957 
958 
959 
960 class ThreadTraceFilter : public TraceFilter {
961 public:
962 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
963 	{
964 		const AbstractTraceEntry* entry
965 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
966 		return (entry != NULL && entry->ThreadID() == fThread);
967 	}
968 };
969 
970 
971 class TeamTraceFilter : public TraceFilter {
972 public:
973 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
974 	{
975 		const AbstractTraceEntry* entry
976 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
977 		return (entry != NULL && entry->TeamID() == fTeam);
978 	}
979 };
980 
981 
982 class PatternTraceFilter : public TraceFilter {
983 public:
984 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
985 	{
986 		return strstr(out.DumpEntry(entry), fString) != NULL;
987 	}
988 };
989 
990 
991 class DecimalPatternTraceFilter : public TraceFilter {
992 public:
993 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
994 	{
995 		// TODO: this is *very* slow
996 		char buffer[64];
997 		snprintf(buffer, sizeof(buffer), "%Ld", fValue);
998 		return strstr(out.DumpEntry(entry), buffer) != NULL;
999 	}
1000 };
1001 
1002 class HexPatternTraceFilter : public TraceFilter {
1003 public:
1004 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1005 	{
1006 		// TODO: this is *very* slow
1007 		char buffer[64];
1008 		snprintf(buffer, sizeof(buffer), "%Lx", fValue);
1009 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1010 	}
1011 };
1012 
1013 class StringPatternTraceFilter : public TraceFilter {
1014 public:
1015 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1016 	{
1017 		if (IS_KERNEL_ADDRESS(fValue))
1018 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
1019 
1020 		// TODO: this is *very* slow
1021 		char buffer[64];
1022 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
1023 		return strstr(out.DumpEntry(entry), buffer) != NULL;
1024 	}
1025 };
1026 
1027 class NotTraceFilter : public TraceFilter {
1028 public:
1029 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1030 	{
1031 		return !fSubFilters.first->Filter(entry, out);
1032 	}
1033 };
1034 
1035 
1036 class AndTraceFilter : public TraceFilter {
1037 public:
1038 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1039 	{
1040 		return fSubFilters.first->Filter(entry, out)
1041 			&& fSubFilters.second->Filter(entry, out);
1042 	}
1043 };
1044 
1045 
1046 class OrTraceFilter : public TraceFilter {
1047 public:
1048 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1049 	{
1050 		return fSubFilters.first->Filter(entry, out)
1051 			|| fSubFilters.second->Filter(entry, out);
1052 	}
1053 };
1054 
1055 
1056 class TraceFilterParser {
1057 public:
1058 	static TraceFilterParser* Default()
1059 	{
1060 		return &sParser;
1061 	}
1062 
1063 	bool Parse(int argc, const char* const* argv)
1064 	{
1065 		fTokens = argv;
1066 		fTokenCount = argc;
1067 		fTokenIndex = 0;
1068 		fFilterCount = 0;
1069 
1070 		TraceFilter* filter = _ParseExpression();
1071 		return fTokenIndex == fTokenCount && filter != NULL;
1072 	}
1073 
1074 	TraceFilter* Filter()
1075 	{
1076 		return &fFilters[0];
1077 	}
1078 
1079 private:
1080 	TraceFilter* _ParseExpression()
1081 	{
1082 		const char* token = _NextToken();
1083 		if (!token) {
1084 			// unexpected end of expression
1085 			return NULL;
1086 		}
1087 
1088 		if (fFilterCount == MAX_FILTERS) {
1089 			// too many filters
1090 			return NULL;
1091 		}
1092 
1093 		if (token[0] == '#') {
1094 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1095 				PatternTraceFilter;
1096 			filter->fString = token + 1;
1097 			return filter;
1098 		} else if (token[0] == 'd' && token[1] == '#') {
1099 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1100 				DecimalPatternTraceFilter;
1101 			filter->fValue = parse_expression(token + 2);
1102 			return filter;
1103 		} else if (token[0] == 'x' && token[1] == '#') {
1104 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1105 				HexPatternTraceFilter;
1106 			filter->fValue = parse_expression(token + 2);
1107 			return filter;
1108 		} else if (token[0] == 's' && token[1] == '#') {
1109 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1110 				StringPatternTraceFilter;
1111 			filter->fValue = parse_expression(token + 2);
1112 			return filter;
1113 		} else if (strcmp(token, "not") == 0) {
1114 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1115 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1116 				return filter;
1117 			return NULL;
1118 		} else if (strcmp(token, "and") == 0) {
1119 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1120 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1121 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1122 				return filter;
1123 			}
1124 			return NULL;
1125 		} else if (strcmp(token, "or") == 0) {
1126 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1127 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1128 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1129 				return filter;
1130 			}
1131 			return NULL;
1132 		} else if (strcmp(token, "thread") == 0) {
1133 			const char* arg = _NextToken();
1134 			if (arg == NULL) {
1135 				// unexpected end of expression
1136 				return NULL;
1137 			}
1138 
1139 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1140 				ThreadTraceFilter;
1141 			filter->fThread = strtol(arg, NULL, 0);
1142 			return filter;
1143 		} else if (strcmp(token, "team") == 0) {
1144 			const char* arg = _NextToken();
1145 			if (arg == NULL) {
1146 				// unexpected end of expression
1147 				return NULL;
1148 			}
1149 
1150 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1151 				TeamTraceFilter;
1152 			filter->fTeam = strtol(arg, NULL, 0);
1153 			return filter;
1154 		} else {
1155 			// invalid token
1156 			return NULL;
1157 		}
1158 	}
1159 
1160 	const char* _CurrentToken() const
1161 	{
1162 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1163 			return fTokens[fTokenIndex - 1];
1164 		return NULL;
1165 	}
1166 
1167 	const char* _NextToken()
1168 	{
1169 		if (fTokenIndex >= fTokenCount)
1170 			return NULL;
1171 		return fTokens[fTokenIndex++];
1172 	}
1173 
1174 private:
1175 	enum { MAX_FILTERS = 32 };
1176 
1177 	const char* const*			fTokens;
1178 	int							fTokenCount;
1179 	int							fTokenIndex;
1180 	TraceFilter					fFilters[MAX_FILTERS];
1181 	int							fFilterCount;
1182 
1183 	static TraceFilterParser	sParser;
1184 };
1185 
1186 
1187 TraceFilterParser TraceFilterParser::sParser;
1188 
1189 
1190 //	#pragma mark -
1191 
1192 
1193 #if ENABLE_TRACING
1194 
1195 
1196 TraceEntry*
1197 TraceEntryIterator::Next()
1198 {
1199 	if (fIndex == 0) {
1200 		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1201 		fIndex = 1;
1202 	} else if (fEntry != NULL) {
1203 		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1204 		fIndex++;
1205 	}
1206 
1207 	return Current();
1208 }
1209 
1210 
1211 TraceEntry*
1212 TraceEntryIterator::Previous()
1213 {
1214 	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1215 		fEntry = sTracingMetaData->AfterLastEntry();
1216 
1217 	if (fEntry != NULL) {
1218 		fEntry = _PreviousNonBufferEntry(
1219 			sTracingMetaData->PreviousEntry(fEntry));
1220 		fIndex--;
1221 	}
1222 
1223 	return Current();
1224 }
1225 
1226 
1227 TraceEntry*
1228 TraceEntryIterator::MoveTo(int32 index)
1229 {
1230 	if (index == fIndex)
1231 		return Current();
1232 
1233 	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1234 		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1235 		fEntry = NULL;
1236 		return NULL;
1237 	}
1238 
1239 	// get the shortest iteration path
1240 	int32 distance = index - fIndex;
1241 	int32 direction = distance < 0 ? -1 : 1;
1242 	distance *= direction;
1243 
1244 	if (index < distance) {
1245 		distance = index;
1246 		direction = 1;
1247 		fEntry = NULL;
1248 		fIndex = 0;
1249 	}
1250 	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1251 		distance = sTracingMetaData->Entries() + 1 - fIndex;
1252 		direction = -1;
1253 		fEntry = NULL;
1254 		fIndex = sTracingMetaData->Entries() + 1;
1255 	}
1256 
1257 	// iterate to the index
1258 	if (direction < 0) {
1259 		while (fIndex != index)
1260 			Previous();
1261 	} else {
1262 		while (fIndex != index)
1263 			Next();
1264 	}
1265 
1266 	return Current();
1267 }
1268 
1269 
1270 trace_entry*
1271 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1272 {
1273 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1274 		entry = sTracingMetaData->NextEntry(entry);
1275 
1276 	return entry;
1277 }
1278 
1279 
1280 trace_entry*
1281 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1282 {
1283 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1284 		entry = sTracingMetaData->PreviousEntry(entry);
1285 
1286 	return entry;
1287 }
1288 
1289 
1290 int
1291 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1292 {
1293 	int argi = 1;
1294 
1295 	// variables in which we store our state to be continuable
1296 	static int32 _previousCount = 0;
1297 	static bool _previousHasFilter = false;
1298 	static bool _previousPrintStackTrace = false;
1299 	static int32 _previousMaxToCheck = 0;
1300 	static int32 _previousFirstChecked = 1;
1301 	static int32 _previousLastChecked = -1;
1302 	static int32 _previousDirection = 1;
1303 	static uint32 _previousEntriesEver = 0;
1304 	static uint32 _previousEntries = 0;
1305 	static uint32 _previousOutputFlags = 0;
1306 	static TraceEntryIterator iterator;
1307 
1308 	uint32 entriesEver = sTracingMetaData->EntriesEver();
1309 
1310 	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1311 	int32 start = 0;	// special index: print the last count entries
1312 	int32 count = 0;
1313 	int32 maxToCheck = 0;
1314 	int32 cont = 0;
1315 
1316 	bool hasFilter = false;
1317 	bool printStackTrace = false;
1318 
1319 	uint32 outputFlags = 0;
1320 	while (argi < argc) {
1321 		if (strcmp(argv[argi], "--difftime") == 0) {
1322 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1323 			argi++;
1324 		} else if (strcmp(argv[argi], "--printteam") == 0) {
1325 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1326 			argi++;
1327 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1328 			printStackTrace = true;
1329 			argi++;
1330 		} else
1331 			break;
1332 	}
1333 
1334 	if (argi < argc) {
1335 		if (strcmp(argv[argi], "forward") == 0) {
1336 			cont = 1;
1337 			argi++;
1338 		} else if (strcmp(argv[argi], "backward") == 0) {
1339 			cont = -1;
1340 			argi++;
1341 		}
1342 	} else
1343 		cont = _previousDirection;
1344 
1345 	if (cont != 0) {
1346 		if (argi < argc) {
1347 			print_debugger_command_usage(argv[0]);
1348 			return 0;
1349 		}
1350 		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1351 			|| sTracingMetaData->Entries() != _previousEntries) {
1352 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1353 				"before, or there were new entries written since the last "
1354 				"invocation.\n", argv[0]);
1355 			return 0;
1356 		}
1357 	}
1358 
1359 	// get start, count, maxToCheck
1360 	int32* params[3] = { &start, &count, &maxToCheck };
1361 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1362 		if (strcmp(argv[argi], "filter") == 0) {
1363 			hasFilter = true;
1364 			argi++;
1365 		} else if (argv[argi][0] == '#') {
1366 			hasFilter = true;
1367 		} else {
1368 			*params[i] = parse_expression(argv[argi]);
1369 			argi++;
1370 		}
1371 	}
1372 
1373 	// filter specification
1374 	if (argi < argc) {
1375 		hasFilter = true;
1376 		if (strcmp(argv[argi], "filter") == 0)
1377 			argi++;
1378 
1379 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1380 			print_debugger_command_usage(argv[0]);
1381 			return 0;
1382 		}
1383 	}
1384 
1385 	int32 direction;
1386 	int32 firstToCheck;
1387 	int32 lastToCheck;
1388 
1389 	if (cont != 0) {
1390 		// get values from the previous iteration
1391 		direction = cont;
1392 		count = _previousCount;
1393 		maxToCheck = _previousMaxToCheck;
1394 		hasFilter = _previousHasFilter;
1395 		outputFlags = _previousOutputFlags;
1396 		printStackTrace = _previousPrintStackTrace;
1397 
1398 		if (direction < 0)
1399 			start = _previousFirstChecked - 1;
1400 		else
1401 			start = _previousLastChecked + 1;
1402 	} else {
1403 		// defaults for count and maxToCheck
1404 		if (count == 0)
1405 			count = 30;
1406 		if (maxToCheck == 0 || !hasFilter)
1407 			maxToCheck = count;
1408 		else if (maxToCheck < 0)
1409 			maxToCheck = sTracingMetaData->Entries();
1410 
1411 		// determine iteration direction
1412 		direction = (start <= 0 || count < 0 ? -1 : 1);
1413 
1414 		// validate count and maxToCheck
1415 		if (count < 0)
1416 			count = -count;
1417 		if (maxToCheck < 0)
1418 			maxToCheck = -maxToCheck;
1419 		if (maxToCheck > (int32)sTracingMetaData->Entries())
1420 			maxToCheck = sTracingMetaData->Entries();
1421 		if (count > maxToCheck)
1422 			count = maxToCheck;
1423 
1424 		// validate start
1425 		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1426 			start = max_c(1, sTracingMetaData->Entries());
1427 	}
1428 
1429 	if (direction < 0) {
1430 		firstToCheck = max_c(1, start - maxToCheck + 1);
1431 		lastToCheck = start;
1432 	} else {
1433 		firstToCheck = start;
1434 		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1435 			start + maxToCheck - 1);
1436 	}
1437 
1438 	// reset the iterator, if something changed in the meantime
1439 	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1440 		|| sTracingMetaData->Entries() != _previousEntries) {
1441 		iterator.Reset();
1442 	}
1443 
1444 	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1445 		kTraceOutputBufferSize, outputFlags);
1446 
1447 	bool markedMatching = false;
1448 	int32 firstToDump = firstToCheck;
1449 	int32 lastToDump = lastToCheck;
1450 
1451 	TraceFilter* filter = NULL;
1452 	if (hasFilter)
1453 		filter = TraceFilterParser::Default()->Filter();
1454 
1455 	if (wrapperFilter != NULL) {
1456 		wrapperFilter->Init(filter, direction, cont != 0);
1457 		filter = wrapperFilter;
1458 	}
1459 
1460 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1461 		// iteration direction is backwards
1462 		markedMatching = true;
1463 
1464 		// From the last entry to check iterate backwards to check filter
1465 		// matches.
1466 		int32 matching = 0;
1467 
1468 		// move to the entry after the last entry to check
1469 		iterator.MoveTo(lastToCheck + 1);
1470 
1471 		// iterate backwards
1472 		firstToDump = -1;
1473 		lastToDump = -1;
1474 		while (iterator.Index() > firstToCheck) {
1475 			TraceEntry* entry = iterator.Previous();
1476 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1477 				out.Clear();
1478 				if (filter->Filter(entry, out)) {
1479 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1480 					if (lastToDump == -1)
1481 						lastToDump = iterator.Index();
1482 					firstToDump = iterator.Index();
1483 
1484 					matching++;
1485 					if (matching >= count)
1486 						break;
1487 				} else
1488 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1489 			}
1490 		}
1491 
1492 		firstToCheck = iterator.Index();
1493 
1494 		// iterate to the previous entry, so that the next loop starts at the
1495 		// right one
1496 		iterator.Previous();
1497 	}
1498 
1499 	out.SetLastEntryTime(0);
1500 
1501 	// set the iterator to the entry before the first one to dump
1502 	iterator.MoveTo(firstToDump - 1);
1503 
1504 	// dump the entries matching the filter in the range
1505 	// [firstToDump, lastToDump]
1506 	int32 dumped = 0;
1507 
1508 	while (TraceEntry* entry = iterator.Next()) {
1509 		int32 index = iterator.Index();
1510 		if (index < firstToDump)
1511 			continue;
1512 		if (index > lastToDump || dumped >= count) {
1513 			if (direction > 0)
1514 				lastToCheck = index - 1;
1515 			break;
1516 		}
1517 
1518 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1519 			out.Clear();
1520 			if (filter &&  (markedMatching
1521 					? (entry->Flags() & FILTER_MATCH) == 0
1522 					: !filter->Filter(entry, out))) {
1523 				continue;
1524 			}
1525 
1526 			// don't print trailing new line
1527 			const char* dump = out.DumpEntry(entry);
1528 			int len = strlen(dump);
1529 			if (len > 0 && dump[len - 1] == '\n')
1530 				len--;
1531 
1532 			kprintf("%5ld. %.*s\n", index, len, dump);
1533 
1534 			if (printStackTrace) {
1535 				out.Clear();
1536 				entry->DumpStackTrace(out);
1537 				if (out.Size() > 0)
1538 					kputs(out.Buffer());
1539 			}
1540 		} else if (!filter)
1541 			kprintf("%5ld. ** uninitialized entry **\n", index);
1542 
1543 		dumped++;
1544 	}
1545 
1546 	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1547 		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1548 		lastToCheck - firstToCheck + 1, sTracingMetaData->Entries(),
1549 		entriesEver);
1550 
1551 	// store iteration state
1552 	_previousCount = count;
1553 	_previousMaxToCheck = maxToCheck;
1554 	_previousHasFilter = hasFilter;
1555 	_previousPrintStackTrace = printStackTrace;
1556 	_previousFirstChecked = firstToCheck;
1557 	_previousLastChecked = lastToCheck;
1558 	_previousDirection = direction;
1559 	_previousEntriesEver = entriesEver;
1560 	_previousEntries = sTracingMetaData->Entries();
1561 	_previousOutputFlags = outputFlags;
1562 
1563 	return cont != 0 ? B_KDEBUG_CONT : 0;
1564 }
1565 
1566 
1567 static int
1568 dump_tracing_command(int argc, char** argv)
1569 {
1570 	return dump_tracing_internal(argc, argv, NULL);
1571 }
1572 
1573 
1574 #endif	// ENABLE_TRACING
1575 
1576 
1577 extern "C" uint8*
1578 alloc_tracing_buffer(size_t size)
1579 {
1580 #if	ENABLE_TRACING
1581 	trace_entry* entry = sTracingMetaData->AllocateEntry(
1582 		size + sizeof(trace_entry), BUFFER_ENTRY);
1583 	if (entry == NULL)
1584 		return NULL;
1585 
1586 	return (uint8*)(entry + 1);
1587 #else
1588 	return NULL;
1589 #endif
1590 }
1591 
1592 
1593 uint8*
1594 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1595 {
1596 	if (user && !IS_USER_ADDRESS(source))
1597 		return NULL;
1598 
1599 	uint8* buffer = alloc_tracing_buffer(size);
1600 	if (buffer == NULL)
1601 		return NULL;
1602 
1603 	if (user) {
1604 		if (user_memcpy(buffer, source, size) != B_OK)
1605 			return NULL;
1606 	} else
1607 		memcpy(buffer, source, size);
1608 
1609 	return buffer;
1610 }
1611 
1612 
1613 char*
1614 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1615 {
1616 	if (source == NULL || maxSize == 0)
1617 		return NULL;
1618 
1619 	if (user && !IS_USER_ADDRESS(source))
1620 		return NULL;
1621 
1622 	// limit maxSize to the actual source string len
1623 	if (user) {
1624 		ssize_t size = user_strlcpy(NULL, source, 0);
1625 			// there's no user_strnlen()
1626 		if (size < 0)
1627 			return 0;
1628 		maxSize = min_c(maxSize, (size_t)size + 1);
1629 	} else
1630 		maxSize = strnlen(source, maxSize - 1) + 1;
1631 
1632 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1633 	if (buffer == NULL)
1634 		return NULL;
1635 
1636 	if (user) {
1637 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1638 			return NULL;
1639 	} else
1640 		strlcpy(buffer, source, maxSize);
1641 
1642 	return buffer;
1643 }
1644 
1645 
1646 tracing_stack_trace*
1647 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1648 {
1649 #if	ENABLE_TRACING
1650 	// page_fault_exception() doesn't allow us to gracefully handle a bad
1651 	// address in the stack trace, if interrupts are disabled, so we always
1652 	// restrict the stack traces to the kernel only in this case. A bad address
1653 	// in the kernel stack trace would still cause a panic(), but this is
1654 	// probably even desired.
1655 	if (!are_interrupts_enabled())
1656 		kernelOnly = true;
1657 
1658 	tracing_stack_trace* stackTrace
1659 		= (tracing_stack_trace*)alloc_tracing_buffer(
1660 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1661 
1662 	if (stackTrace != NULL) {
1663 		stackTrace->depth = arch_debug_get_stack_trace(
1664 			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1665 			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1666 	}
1667 
1668 	return stackTrace;
1669 #else
1670 	return NULL;
1671 #endif
1672 }
1673 
1674 
1675 addr_t
1676 tracing_find_caller_in_stack_trace(struct tracing_stack_trace* stackTrace,
1677 	const addr_t excludeRanges[], uint32 excludeRangeCount)
1678 {
1679 	for (int32 i = 0; i < stackTrace->depth; i++) {
1680 		addr_t returnAddress = stackTrace->return_addresses[i];
1681 
1682 		bool inRange = false;
1683 		for (uint32 j = 0; j < excludeRangeCount; j++) {
1684 			if (returnAddress >= excludeRanges[j * 2 + 0]
1685 				&& returnAddress < excludeRanges[j * 2 + 1]) {
1686 				inRange = true;
1687 				break;
1688 			}
1689 		}
1690 
1691 		if (!inRange)
1692 			return returnAddress;
1693 	}
1694 
1695 	return 0;
1696 }
1697 
1698 
1699 void
1700 tracing_print_stack_trace(struct tracing_stack_trace* stackTrace)
1701 {
1702 #if ENABLE_TRACING
1703 	print_stack_trace(stackTrace, kprintf);
1704 #endif
1705 }
1706 
1707 
1708 int
1709 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1710 {
1711 #if	ENABLE_TRACING
1712 	return dump_tracing_internal(argc, argv, wrapperFilter);
1713 #else
1714 	return 0;
1715 #endif
1716 }
1717 
1718 
1719 bool
1720 tracing_is_entry_valid(AbstractTraceEntry* candidate, bigtime_t entryTime)
1721 {
1722 #if ENABLE_TRACING
1723 	if (!sTracingMetaData->IsInBuffer(candidate, sizeof(*candidate)))
1724 		return false;
1725 
1726 	if (entryTime < 0)
1727 		return true;
1728 
1729 	TraceEntryIterator iterator;
1730 	while (TraceEntry* entry = iterator.Next()) {
1731 		AbstractTraceEntry* abstract = dynamic_cast<AbstractTraceEntry*>(entry);
1732 		if (abstract == NULL)
1733 			continue;
1734 
1735 		if (abstract != candidate && abstract->Time() > entryTime)
1736 			return false;
1737 
1738 		return candidate->Time() == entryTime;
1739 	}
1740 #endif
1741 
1742 	return false;
1743 }
1744 
1745 
1746 void
1747 lock_tracing_buffer()
1748 {
1749 #if ENABLE_TRACING
1750 	sTracingMetaData->Lock();
1751 #endif
1752 }
1753 
1754 
1755 void
1756 unlock_tracing_buffer()
1757 {
1758 #if ENABLE_TRACING
1759 	sTracingMetaData->Unlock();
1760 #endif
1761 }
1762 
1763 
1764 extern "C" status_t
1765 tracing_init(void)
1766 {
1767 #if	ENABLE_TRACING
1768 	status_t result = TracingMetaData::Create(sTracingMetaData);
1769 	if (result != B_OK) {
1770 		sTracingMetaData = &sDummyTracingMetaData;
1771 		return result;
1772 	}
1773 
1774 	new(nothrow) TracingLogStartEntry();
1775 
1776 	add_debugger_command_etc("traced", &dump_tracing_command,
1777 		"Dump recorded trace entries",
1778 		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1779 			"(\"forward\" | \"backward\") "
1780 			"| ([ <start> [ <count> [ <range> ] ] ] "
1781 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1782 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1783 		"specified, the command continues where the previous invocation left\n"
1784 		"off, i.e. printing the previous respectively next entries (as many\n"
1785 		"as printed before). In this case the command is continuable, that is\n"
1786 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1787 		"If no arguments are given, the command continues in the direction\n"
1788 		"of the last invocation.\n"
1789 		"--printteam  - enables printing the entries' team IDs.\n"
1790 		"--difftime   - print difference times for all but the first entry.\n"
1791 		"--stacktrace - print stack traces for entries that captured one.\n"
1792 		"  <start>    - The base index of the entries to print. Depending on\n"
1793 		"               whether the iteration direction is forward or\n"
1794 		"               backward this will be the first or last entry printed\n"
1795 		"               (potentially, if a filter is specified). The index of\n"
1796 		"               the first entry in the trace buffer is 1. If 0 is\n"
1797 		"               specified, the last <count> recorded entries are\n"
1798 		"               printed (iteration direction is backward). Defaults \n"
1799 		"               to 0.\n"
1800 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1801 		"               If negative, the -<count> entries before and\n"
1802 		"               including <start> will be printed.\n"
1803 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1804 		"               number of entries to be filtered -- depending on the\n"
1805 		"               iteration direction the entries before or after\n"
1806 		"               <start>. If more than <count> entries match the\n"
1807 		"               filter, only the first (forward) or last (backward)\n"
1808 		"               <count> matching entries will be printed. If 0 is\n"
1809 		"               specified <range> will be set to <count>. If -1,\n"
1810 		"               <range> will be set to the number of recorded\n"
1811 		"               entries.\n"
1812 		"  <pattern>  - If specified only entries containing this string are\n"
1813 		"               printed.\n"
1814 		"  <filter>   - If specified only entries matching this filter\n"
1815 		"               expression are printed. The expression can consist of\n"
1816 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1817 		"               filters \"'thread' <thread>\" (matching entries\n"
1818 		"               with the given thread ID), \"'team' <team>\"\n"
1819 						"(matching entries with the given team ID), and\n"
1820 		"               \"#<pattern>\" (matching entries containing the given\n"
1821 		"               string).\n", 0);
1822 #endif	// ENABLE_TRACING
1823 	return B_OK;
1824 }
1825 
1826 
1827 void
1828 ktrace_printf(const char *format, ...)
1829 {
1830 #if	ENABLE_TRACING
1831 	va_list list;
1832 	va_start(list, format);
1833 
1834 	char buffer[256];
1835 	vsnprintf(buffer, sizeof(buffer), format, list);
1836 
1837 	va_end(list);
1838 
1839 	new(nothrow) KernelTraceEntry(buffer);
1840 #endif	// ENABLE_TRACING
1841 }
1842 
1843 
1844 void
1845 _user_ktrace_output(const char *message)
1846 {
1847 #if	ENABLE_TRACING
1848 	new(nothrow) UserTraceEntry(message);
1849 #endif	// ENABLE_TRACING
1850 }
1851 
1852