xref: /haiku/src/system/kernel/debug/tracing.cpp (revision a5bf12376daeded4049521eb17a6cc41192250d9)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <tracing.h>
9 
10 #include <stdarg.h>
11 #include <stdlib.h>
12 
13 #include <algorithm>
14 
15 #include <arch/debug.h>
16 #include <debug.h>
17 #include <elf.h>
18 #include <int.h>
19 #include <kernel.h>
20 #include <team.h>
21 #include <thread.h>
22 #include <util/AutoLock.h>
23 #include <vm/vm.h>
24 
25 
26 struct tracing_stack_trace {
27 	int32	depth;
28 	addr_t	return_addresses[0];
29 };
30 
31 
32 #if ENABLE_TRACING
33 
34 //#define TRACE_TRACING
35 #ifdef TRACE_TRACING
36 #	define TRACE(x) dprintf_no_syslog x
37 #else
38 #	define TRACE(x) ;
39 #endif
40 
41 
42 enum {
43 	WRAP_ENTRY			= 0x01,
44 	ENTRY_INITIALIZED	= 0x02,
45 	BUFFER_ENTRY		= 0x04,
46 	FILTER_MATCH		= 0x08,
47 	INVALID_ENTRY		= 0x10,
48 	CHECK_ENTRY			= 0x20,
49 };
50 
51 
52 static const size_t kTraceOutputBufferSize = 10240;
53 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
54 
55 static const uint32 kMaxRecoveringErrorCount	= 100;
56 static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
57 static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
58 static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
59 static const uint32 kMetaDataMagic1 = 'Vali';
60 static const uint32 kMetaDataMagic2 = 'dTra';
61 static const uint32 kMetaDataMagic3 = 'cing';
62 
63 // the maximum we can address with the trace_entry::[previous_]size fields
64 static const size_t kMaxTracingEntryByteSize
65 	= ((1 << 13) - 1) * sizeof(trace_entry);
66 
67 
68 class TracingMetaData {
69 public:
70 	static	status_t			Create(TracingMetaData*& _metaData);
71 
72 	inline	bool				Lock();
73 	inline	void				Unlock();
74 
75 	inline	trace_entry*		FirstEntry() const;
76 	inline	trace_entry*		AfterLastEntry() const;
77 
78 	inline	uint32				Entries() const;
79 	inline	uint32				EntriesEver() const;
80 
81 	inline	void				IncrementEntriesEver();
82 
83 	inline	char*				TraceOutputBuffer() const;
84 
85 			trace_entry*		NextEntry(trace_entry* entry);
86 			trace_entry*		PreviousEntry(trace_entry* entry);
87 
88 			trace_entry*		AllocateEntry(size_t size, uint16 flags);
89 
90 private:
91 			bool				_FreeFirstEntry();
92 			bool				_MakeSpace(size_t needed);
93 
94 	static	status_t			_CreateMetaDataArea(bool findPrevious,
95 									area_id& _area,
96 									TracingMetaData*& _metaData);
97 			bool				_InitPreviousTracingData();
98 
99 private:
100 			uint32				fMagic1;
101 			trace_entry*		fBuffer;
102 			trace_entry*		fFirstEntry;
103 			trace_entry*		fAfterLastEntry;
104 			uint32				fEntries;
105 			uint32				fMagic2;
106 			uint32				fEntriesEver;
107 			spinlock			fLock;
108 			char*				fTraceOutputBuffer;
109 			phys_addr_t			fPhysicalAddress;
110 			uint32				fMagic3;
111 };
112 
113 static TracingMetaData sDummyTracingMetaData;
114 static TracingMetaData* sTracingMetaData = &sDummyTracingMetaData;
115 static bool sTracingDataRecovered = false;
116 
117 
118 // #pragma mark -
119 
120 
121 // #pragma mark - TracingMetaData
122 
123 
124 bool
125 TracingMetaData::Lock()
126 {
127 	acquire_spinlock(&fLock);
128 	return true;
129 }
130 
131 
132 void
133 TracingMetaData::Unlock()
134 {
135 	release_spinlock(&fLock);
136 }
137 
138 
139 trace_entry*
140 TracingMetaData::FirstEntry() const
141 {
142 	return fFirstEntry;
143 }
144 
145 
146 trace_entry*
147 TracingMetaData::AfterLastEntry() const
148 {
149 	return fAfterLastEntry;
150 }
151 
152 
153 uint32
154 TracingMetaData::Entries() const
155 {
156 	return fEntries;
157 }
158 
159 
160 uint32
161 TracingMetaData::EntriesEver() const
162 {
163 	return fEntriesEver;
164 }
165 
166 
167 void
168 TracingMetaData::IncrementEntriesEver()
169 {
170 	fEntriesEver++;
171 		// NOTE: Race condition on SMP machines! We should use atomic_add(),
172 		// though that costs some performance and the information is for
173 		// informational purpose anyway.
174 }
175 
176 
177 char*
178 TracingMetaData::TraceOutputBuffer() const
179 {
180 	return fTraceOutputBuffer;
181 }
182 
183 
184 trace_entry*
185 TracingMetaData::NextEntry(trace_entry* entry)
186 {
187 	entry += entry->size;
188 	if ((entry->flags & WRAP_ENTRY) != 0)
189 		entry = fBuffer;
190 
191 	if (entry == fAfterLastEntry)
192 		return NULL;
193 
194 	return entry;
195 }
196 
197 
198 trace_entry*
199 TracingMetaData::PreviousEntry(trace_entry* entry)
200 {
201 	if (entry == fFirstEntry)
202 		return NULL;
203 
204 	if (entry == fBuffer) {
205 		// beginning of buffer -- previous entry is a wrap entry
206 		entry = fBuffer + kBufferSize - entry->previous_size;
207 	}
208 
209 	return entry - entry->previous_size;
210 }
211 
212 
213 trace_entry*
214 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
215 {
216 	if (fAfterLastEntry == NULL || size == 0
217 		|| size >= kMaxTracingEntryByteSize) {
218 		return NULL;
219 	}
220 
221 	InterruptsSpinLocker _(fLock);
222 
223 	size = (size + 3) >> 2;
224 		// 4 byte aligned, don't store the lower 2 bits
225 
226 	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
227 		fFirstEntry, fAfterLastEntry, fBuffer));
228 
229 	if (!_MakeSpace(size))
230 		return NULL;
231 
232 	trace_entry* entry = fAfterLastEntry;
233 	entry->size = size;
234 	entry->flags = flags;
235 	fAfterLastEntry += size;
236 	fAfterLastEntry->previous_size = size;
237 
238 	if (!(flags & BUFFER_ENTRY))
239 		fEntries++;
240 
241 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
242 		fAfterLastEntry, fFirstEntry, fEntries));
243 
244 	return entry;
245 }
246 
247 
248 bool
249 TracingMetaData::_FreeFirstEntry()
250 {
251 	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
252 
253 	trace_entry* newFirst = NextEntry(fFirstEntry);
254 
255 	if (fFirstEntry->flags & BUFFER_ENTRY) {
256 		// a buffer entry -- just skip it
257 	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
258 		// Fully initialized TraceEntry: We could destroy it, but don't do so
259 		// for sake of robustness. The destructors of tracing entry classes
260 		// should be empty anyway.
261 		fEntries--;
262 	} else {
263 		// Not fully initialized TraceEntry. We can't free it, since
264 		// then it's constructor might still write into the memory and
265 		// overwrite data of the entry we're going to allocate.
266 		// We can't do anything until this entry can be discarded.
267 		return false;
268 	}
269 
270 	if (newFirst == NULL) {
271 		// everything is freed -- practically this can't happen, if
272 		// the buffer is large enough to hold three max-sized entries
273 		fFirstEntry = fAfterLastEntry = fBuffer;
274 		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
275 	} else
276 		fFirstEntry = newFirst;
277 
278 	return true;
279 }
280 
281 
282 /*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
283 	Returns \c false, if unable to free that much.
284 */
285 bool
286 TracingMetaData::_MakeSpace(size_t needed)
287 {
288 	// we need space for fAfterLastEntry, too (in case we need to wrap around
289 	// later)
290 	needed++;
291 
292 	// If there's not enough space (free or occupied) after fAfterLastEntry,
293 	// we free all entries in that region and wrap around.
294 	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
295 		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
296 			fAfterLastEntry));
297 
298 		// Free all entries after fAfterLastEntry and one more at the beginning
299 		// of the buffer.
300 		while (fFirstEntry > fAfterLastEntry) {
301 			if (!_FreeFirstEntry())
302 				return false;
303 		}
304 		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
305 			return false;
306 
307 		// just in case _FreeFirstEntry() freed the very last existing entry
308 		if (fAfterLastEntry == fBuffer)
309 			return true;
310 
311 		// mark as wrap entry and actually wrap around
312 		trace_entry* wrapEntry = fAfterLastEntry;
313 		wrapEntry->size = 0;
314 		wrapEntry->flags = WRAP_ENTRY;
315 		fAfterLastEntry = fBuffer;
316 		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
317 	}
318 
319 	if (fFirstEntry <= fAfterLastEntry) {
320 		// buffer is empty or the space after fAfterLastEntry is unoccupied
321 		return true;
322 	}
323 
324 	// free the first entries, until there's enough space
325 	size_t space = fFirstEntry - fAfterLastEntry;
326 
327 	if (space < needed) {
328 		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
329 	}
330 
331 	while (space < needed) {
332 		space += fFirstEntry->size;
333 
334 		if (!_FreeFirstEntry())
335 			return false;
336 	}
337 
338 	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
339 
340 	return true;
341 }
342 
343 
344 /*static*/ status_t
345 TracingMetaData::Create(TracingMetaData*& _metaData)
346 {
347 	// search meta data in memory (from previous session)
348 	area_id area;
349 	TracingMetaData* metaData;
350 	status_t error = _CreateMetaDataArea(true, area, metaData);
351 	if (error == B_OK) {
352 		if (metaData->_InitPreviousTracingData()) {
353 			_metaData = metaData;
354 			return B_OK;
355 		}
356 
357 		dprintf("Found previous tracing meta data, but failed to init.\n");
358 
359 		// invalidate the meta data
360 		metaData->fMagic1 = 0;
361 		metaData->fMagic2 = 0;
362 		metaData->fMagic3 = 0;
363 		delete_area(area);
364 	} else
365 		dprintf("No previous tracing meta data found.\n");
366 
367 	// no previous tracing data found -- create new one
368 	error = _CreateMetaDataArea(false, area, metaData);
369 	if (error != B_OK)
370 		return error;
371 
372 	virtual_address_restrictions virtualRestrictions = {};
373 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
374 	physical_address_restrictions physicalRestrictions = {};
375 	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
376 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
377 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
378 		&virtualRestrictions, &physicalRestrictions,
379 		(void**)&metaData->fTraceOutputBuffer);
380 	if (area < 0)
381 		return area;
382 
383 	// get the physical address
384 	physical_entry physicalEntry;
385 	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
386 			&physicalEntry, 1) == B_OK) {
387 		metaData->fPhysicalAddress = physicalEntry.address;
388 	} else {
389 		dprintf("TracingMetaData::Create(): failed to get physical address "
390 			"of tracing buffer\n");
391 		metaData->fPhysicalAddress = 0;
392 	}
393 
394 	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
395 		+ kTraceOutputBufferSize);
396 	metaData->fFirstEntry = metaData->fBuffer;
397 	metaData->fAfterLastEntry = metaData->fBuffer;
398 
399 	metaData->fEntries = 0;
400 	metaData->fEntriesEver = 0;
401 	B_INITIALIZE_SPINLOCK(&metaData->fLock);
402 
403 	metaData->fMagic1 = kMetaDataMagic1;
404 	metaData->fMagic2 = kMetaDataMagic2;
405 	metaData->fMagic3 = kMetaDataMagic3;
406 
407 	_metaData = metaData;
408 	return B_OK;
409 }
410 
411 
412 /*static*/ status_t
413 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
414 	TracingMetaData*& _metaData)
415 {
416 	// search meta data in memory (from previous session)
417 	TracingMetaData* metaData;
418 	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
419 	for (; metaDataAddress <= kMetaDataBaseEndAddress;
420 			metaDataAddress += kMetaDataAddressIncrement) {
421 		virtual_address_restrictions virtualRestrictions = {};
422 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
423 		physical_address_restrictions physicalRestrictions = {};
424 		physicalRestrictions.low_address = metaDataAddress;
425 		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
426 		area_id create_area_etc(B_SYSTEM_TEAM, "tracing metadata", B_PAGE_SIZE,
427 			B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
428 			CREATE_AREA_DONT_CLEAR, &virtualRestrictions, &physicalRestrictions,
429 			(void**)&metaData);
430 		if (area < 0)
431 			continue;
432 
433 		if (!findPrevious) {
434 			_area = area;
435 			_metaData = metaData;
436 			return B_OK;
437 		}
438 
439 		if (metaData->fMagic1 == kMetaDataMagic1
440 			&& metaData->fMagic2 == kMetaDataMagic2
441 			&& metaData->fMagic3 == kMetaDataMagic3) {
442 			_area = area;
443 			_metaData = metaData;
444 			return B_OK;
445 		}
446 
447 		delete_area(area);
448 	}
449 
450 	return B_ENTRY_NOT_FOUND;
451 }
452 
453 
454 bool
455 TracingMetaData::_InitPreviousTracingData()
456 {
457 	addr_t bufferStart
458 		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
459 	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
460 
461 	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
462 		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
463 		|| (addr_t)fFirstEntry < bufferStart
464 		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
465 		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
466 		|| (addr_t)fAfterLastEntry < bufferStart
467 		|| (addr_t)fAfterLastEntry > bufferEnd
468 		|| fPhysicalAddress == 0) {
469 		dprintf("Failed to init tracing meta data: Sanity checks "
470 			"failed.\n");
471 		return false;
472 	}
473 
474 	// re-map the previous tracing buffer
475 	virtual_address_restrictions virtualRestrictions = {};
476 	virtualRestrictions.address = fTraceOutputBuffer;
477 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
478 	physical_address_restrictions physicalRestrictions = {};
479 	physicalRestrictions.low_address = fPhysicalAddress;
480 	physicalRestrictions.high_address = fPhysicalAddress
481 		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE);
482 	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
483 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
484 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR,
485 		&virtualRestrictions, &physicalRestrictions, NULL);
486 	if (area < 0) {
487 		dprintf("Failed to init tracing meta data: Mapping tracing log "
488 			"buffer failed: %s\n", strerror(area));
489 		return false;
490 	}
491 
492 	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
493 		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
494 
495 	// verify/repair the tracing entry list
496 	uint32 errorCount = 0;
497 	uint32 entryCount = 0;
498 	uint32 nonBufferEntryCount = 0;
499 	uint32 previousEntrySize = 0;
500 	trace_entry* entry = fFirstEntry;
501 	while (errorCount <= kMaxRecoveringErrorCount) {
502 		// check previous entry size
503 		if (entry->previous_size != previousEntrySize) {
504 			if (entry != fFirstEntry) {
505 				dprintf("ktrace recovering: entry %p: fixing previous_size "
506 					"size: %lu (should be %lu)\n", entry, entry->previous_size,
507 					previousEntrySize);
508 				errorCount++;
509 			}
510 			entry->previous_size = previousEntrySize;
511 		}
512 
513 		if (entry == fAfterLastEntry)
514 			break;
515 
516 		// check size field
517 		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
518 			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
519 				entry);
520 			errorCount++;
521 			fAfterLastEntry = entry;
522 			break;
523 		}
524 
525 		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
526 			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
527 				entry->size);
528 			errorCount++;
529 			fAfterLastEntry = entry;
530 			break;
531 		}
532 
533 		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
534 			dprintf("ktrace recovering: entry %p: entry crosses "
535 				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
536 			errorCount++;
537 			fAfterLastEntry = entry;
538 			break;
539 		}
540 
541 		// check for wrap entry
542 		if ((entry->flags & WRAP_ENTRY) != 0) {
543 			if ((uint32)(fBuffer + kBufferSize - entry)
544 					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
545 				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
546 					"buffer location\n", entry);
547 				errorCount++;
548 			}
549 
550 			if (entry->size != 0) {
551 				dprintf("ktrace recovering: entry %p: invalid wrap entry "
552 					"size: %lu\n", entry, entry->size);
553 				errorCount++;
554 				entry->size = 0;
555 			}
556 
557 			previousEntrySize = fBuffer + kBufferSize - entry;
558 			entry = fBuffer;
559 			continue;
560 		}
561 
562 		if ((entry->flags & BUFFER_ENTRY) == 0) {
563 			entry->flags |= CHECK_ENTRY;
564 			nonBufferEntryCount++;
565 		}
566 
567 		entryCount++;
568 		previousEntrySize = entry->size;
569 
570 		entry += entry->size;
571 	}
572 
573 	if (errorCount > kMaxRecoveringErrorCount) {
574 		dprintf("ktrace recovering: Too many errors.\n");
575 		fAfterLastEntry = entry;
576 		fAfterLastEntry->previous_size = previousEntrySize;
577 	}
578 
579 	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
580 		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
581 		entryCount - nonBufferEntryCount, fEntries);
582 	fEntries = nonBufferEntryCount;
583 
584 	B_INITIALIZE_SPINLOCK(&fLock);
585 
586 	// TODO: Actually check the entries! Do that when first accessing the
587 	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
588 	// true).
589 	sTracingDataRecovered = true;
590 	return true;
591 }
592 
593 
594 #endif	// ENABLE_TRACING
595 
596 
597 // #pragma mark -
598 
599 
600 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
601 	: fBuffer(buffer),
602 	  fCapacity(bufferSize),
603 	  fFlags(flags)
604 {
605 	Clear();
606 }
607 
608 
609 void
610 TraceOutput::Clear()
611 {
612 	if (fCapacity > 0)
613 		fBuffer[0] = '\0';
614 	fSize = 0;
615 }
616 
617 
618 void
619 TraceOutput::Print(const char* format,...)
620 {
621 #if ENABLE_TRACING
622 	if (IsFull())
623 		return;
624 
625 	if (fSize < fCapacity) {
626 		va_list args;
627 		va_start(args, format);
628 		size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format,
629 			args);
630 		fSize += std::min(length, fCapacity - fSize - 1);
631 		va_end(args);
632 	}
633 #endif
634 }
635 
636 
637 void
638 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
639 {
640 #if ENABLE_TRACING
641 	if (stackTrace == NULL || stackTrace->depth <= 0)
642 		return;
643 
644 	for (int32 i = 0; i < stackTrace->depth; i++) {
645 		addr_t address = stackTrace->return_addresses[i];
646 
647 		const char* symbol;
648 		const char* imageName;
649 		bool exactMatch;
650 		addr_t baseAddress;
651 
652 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
653 				&imageName, &exactMatch) == B_OK) {
654 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
655 				address - baseAddress, imageName,
656 				exactMatch ? "" : " (nearest)");
657 		} else
658 			Print("  %p\n", (void*)address);
659 	}
660 #endif
661 }
662 
663 
664 void
665 TraceOutput::SetLastEntryTime(bigtime_t time)
666 {
667 	fLastEntryTime = time;
668 }
669 
670 
671 bigtime_t
672 TraceOutput::LastEntryTime() const
673 {
674 	return fLastEntryTime;
675 }
676 
677 
678 //	#pragma mark -
679 
680 
681 TraceEntry::TraceEntry()
682 {
683 }
684 
685 
686 TraceEntry::~TraceEntry()
687 {
688 }
689 
690 
691 void
692 TraceEntry::Dump(TraceOutput& out)
693 {
694 #if ENABLE_TRACING
695 	// to be overridden by subclasses
696 	out.Print("ENTRY %p", this);
697 #endif
698 }
699 
700 
701 void
702 TraceEntry::DumpStackTrace(TraceOutput& out)
703 {
704 }
705 
706 
707 void
708 TraceEntry::Initialized()
709 {
710 #if ENABLE_TRACING
711 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
712 	sTracingMetaData->IncrementEntriesEver();
713 #endif
714 }
715 
716 
717 void*
718 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
719 {
720 #if ENABLE_TRACING
721 	trace_entry* entry = sTracingMetaData->AllocateEntry(
722 		size + sizeof(trace_entry), 0);
723 	return entry != NULL ? entry + 1 : NULL;
724 #endif
725 	return NULL;
726 }
727 
728 
729 //	#pragma mark -
730 
731 
732 AbstractTraceEntry::AbstractTraceEntry()
733 {
734 	struct thread* thread = thread_get_current_thread();
735 	if (thread != NULL) {
736 		fThread = thread->id;
737 		if (thread->team)
738 			fTeam = thread->team->id;
739 	}
740 	fTime = system_time();
741 }
742 
743 AbstractTraceEntry::~AbstractTraceEntry()
744 {
745 }
746 
747 
748 void
749 AbstractTraceEntry::Dump(TraceOutput& out)
750 {
751 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
752 		? fTime - out.LastEntryTime()
753 		: fTime;
754 
755 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID)
756 		out.Print("[%6ld:%6ld] %10Ld: ", fThread, fTeam, time);
757 	else
758 		out.Print("[%6ld] %10Ld: ", fThread, time);
759 
760 	AddDump(out);
761 
762 	out.SetLastEntryTime(fTime);
763 }
764 
765 
766 void
767 AbstractTraceEntry::AddDump(TraceOutput& out)
768 {
769 }
770 
771 
772 //	#pragma mark -
773 
774 
775 #if ENABLE_TRACING
776 
777 class KernelTraceEntry : public AbstractTraceEntry {
778 	public:
779 		KernelTraceEntry(const char* message)
780 		{
781 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
782 
783 #if KTRACE_PRINTF_STACK_TRACE
784 			fStackTrace = capture_tracing_stack_trace(
785 				KTRACE_PRINTF_STACK_TRACE, 1, false);
786 #endif
787 			Initialized();
788 		}
789 
790 		virtual void AddDump(TraceOutput& out)
791 		{
792 			out.Print("kern: %s", fMessage);
793 		}
794 
795 #if KTRACE_PRINTF_STACK_TRACE
796 		virtual void DumpStackTrace(TraceOutput& out)
797 		{
798 			out.PrintStackTrace(fStackTrace);
799 		}
800 #endif
801 
802 	private:
803 		char*	fMessage;
804 #if KTRACE_PRINTF_STACK_TRACE
805 		tracing_stack_trace* fStackTrace;
806 #endif
807 };
808 
809 
810 class UserTraceEntry : public AbstractTraceEntry {
811 	public:
812 		UserTraceEntry(const char* message)
813 		{
814 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
815 
816 #if KTRACE_PRINTF_STACK_TRACE
817 			fStackTrace = capture_tracing_stack_trace(
818 				KTRACE_PRINTF_STACK_TRACE, 1, false);
819 #endif
820 			Initialized();
821 		}
822 
823 		virtual void AddDump(TraceOutput& out)
824 		{
825 			out.Print("user: %s", fMessage);
826 		}
827 
828 #if KTRACE_PRINTF_STACK_TRACE
829 		virtual void DumpStackTrace(TraceOutput& out)
830 		{
831 			out.PrintStackTrace(fStackTrace);
832 		}
833 #endif
834 
835 	private:
836 		char*	fMessage;
837 #if KTRACE_PRINTF_STACK_TRACE
838 		tracing_stack_trace* fStackTrace;
839 #endif
840 };
841 
842 
843 class TracingLogStartEntry : public AbstractTraceEntry {
844 	public:
845 		TracingLogStartEntry()
846 		{
847 			Initialized();
848 		}
849 
850 		virtual void AddDump(TraceOutput& out)
851 		{
852 			out.Print("ktrace start");
853 		}
854 };
855 
856 #endif	// ENABLE_TRACING
857 
858 
859 //	#pragma mark - trace filters
860 
861 
862 TraceFilter::~TraceFilter()
863 {
864 }
865 
866 
867 bool
868 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
869 {
870 	return false;
871 }
872 
873 
874 
875 class ThreadTraceFilter : public TraceFilter {
876 public:
877 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
878 	{
879 		const AbstractTraceEntry* entry
880 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
881 		return (entry != NULL && entry->Thread() == fThread);
882 	}
883 };
884 
885 
886 class TeamTraceFilter : public TraceFilter {
887 public:
888 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
889 	{
890 		const AbstractTraceEntry* entry
891 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
892 		return (entry != NULL && entry->Team() == fTeam);
893 	}
894 };
895 
896 
897 class PatternTraceFilter : public TraceFilter {
898 public:
899 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
900 	{
901 		return strstr(out.DumpEntry(entry), fString) != NULL;
902 	}
903 };
904 
905 
906 class DecimalPatternTraceFilter : public TraceFilter {
907 public:
908 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
909 	{
910 		// TODO: this is *very* slow
911 		char buffer[64];
912 		snprintf(buffer, sizeof(buffer), "%Ld", fValue);
913 		return strstr(out.DumpEntry(entry), buffer) != NULL;
914 	}
915 };
916 
917 class HexPatternTraceFilter : public TraceFilter {
918 public:
919 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
920 	{
921 		// TODO: this is *very* slow
922 		char buffer[64];
923 		snprintf(buffer, sizeof(buffer), "%Lx", fValue);
924 		return strstr(out.DumpEntry(entry), buffer) != NULL;
925 	}
926 };
927 
928 class StringPatternTraceFilter : public TraceFilter {
929 public:
930 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
931 	{
932 		if (IS_KERNEL_ADDRESS(fValue))
933 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
934 
935 		// TODO: this is *very* slow
936 		char buffer[64];
937 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
938 		return strstr(out.DumpEntry(entry), buffer) != NULL;
939 	}
940 };
941 
942 class NotTraceFilter : public TraceFilter {
943 public:
944 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
945 	{
946 		return !fSubFilters.first->Filter(entry, out);
947 	}
948 };
949 
950 
951 class AndTraceFilter : public TraceFilter {
952 public:
953 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
954 	{
955 		return fSubFilters.first->Filter(entry, out)
956 			&& fSubFilters.second->Filter(entry, out);
957 	}
958 };
959 
960 
961 class OrTraceFilter : public TraceFilter {
962 public:
963 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
964 	{
965 		return fSubFilters.first->Filter(entry, out)
966 			|| fSubFilters.second->Filter(entry, out);
967 	}
968 };
969 
970 
971 class TraceFilterParser {
972 public:
973 	static TraceFilterParser* Default()
974 	{
975 		return &sParser;
976 	}
977 
978 	bool Parse(int argc, const char* const* argv)
979 	{
980 		fTokens = argv;
981 		fTokenCount = argc;
982 		fTokenIndex = 0;
983 		fFilterCount = 0;
984 
985 		TraceFilter* filter = _ParseExpression();
986 		return fTokenIndex == fTokenCount && filter != NULL;
987 	}
988 
989 	TraceFilter* Filter()
990 	{
991 		return &fFilters[0];
992 	}
993 
994 private:
995 	TraceFilter* _ParseExpression()
996 	{
997 		const char* token = _NextToken();
998 		if (!token) {
999 			// unexpected end of expression
1000 			return NULL;
1001 		}
1002 
1003 		if (fFilterCount == MAX_FILTERS) {
1004 			// too many filters
1005 			return NULL;
1006 		}
1007 
1008 		if (token[0] == '#') {
1009 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1010 				PatternTraceFilter;
1011 			filter->fString = token + 1;
1012 			return filter;
1013 		} else if (token[0] == 'd' && token[1] == '#') {
1014 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1015 				DecimalPatternTraceFilter;
1016 			filter->fValue = parse_expression(token + 2);
1017 			return filter;
1018 		} else if (token[0] == 'x' && token[1] == '#') {
1019 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1020 				HexPatternTraceFilter;
1021 			filter->fValue = parse_expression(token + 2);
1022 			return filter;
1023 		} else if (token[0] == 's' && token[1] == '#') {
1024 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1025 				StringPatternTraceFilter;
1026 			filter->fValue = parse_expression(token + 2);
1027 			return filter;
1028 		} else if (strcmp(token, "not") == 0) {
1029 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1030 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1031 				return filter;
1032 			return NULL;
1033 		} else if (strcmp(token, "and") == 0) {
1034 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1035 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1036 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1037 				return filter;
1038 			}
1039 			return NULL;
1040 		} else if (strcmp(token, "or") == 0) {
1041 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1042 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1043 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1044 				return filter;
1045 			}
1046 			return NULL;
1047 		} else if (strcmp(token, "thread") == 0) {
1048 			const char* arg = _NextToken();
1049 			if (arg == NULL) {
1050 				// unexpected end of expression
1051 				return NULL;
1052 			}
1053 
1054 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1055 				ThreadTraceFilter;
1056 			filter->fThread = strtol(arg, NULL, 0);
1057 			return filter;
1058 		} else if (strcmp(token, "team") == 0) {
1059 			const char* arg = _NextToken();
1060 			if (arg == NULL) {
1061 				// unexpected end of expression
1062 				return NULL;
1063 			}
1064 
1065 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1066 				TeamTraceFilter;
1067 			filter->fTeam = strtol(arg, NULL, 0);
1068 			return filter;
1069 		} else {
1070 			// invalid token
1071 			return NULL;
1072 		}
1073 	}
1074 
1075 	const char* _CurrentToken() const
1076 	{
1077 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1078 			return fTokens[fTokenIndex - 1];
1079 		return NULL;
1080 	}
1081 
1082 	const char* _NextToken()
1083 	{
1084 		if (fTokenIndex >= fTokenCount)
1085 			return NULL;
1086 		return fTokens[fTokenIndex++];
1087 	}
1088 
1089 private:
1090 	enum { MAX_FILTERS = 32 };
1091 
1092 	const char* const*			fTokens;
1093 	int							fTokenCount;
1094 	int							fTokenIndex;
1095 	TraceFilter					fFilters[MAX_FILTERS];
1096 	int							fFilterCount;
1097 
1098 	static TraceFilterParser	sParser;
1099 };
1100 
1101 
1102 TraceFilterParser TraceFilterParser::sParser;
1103 
1104 
1105 //	#pragma mark -
1106 
1107 
1108 #if ENABLE_TRACING
1109 
1110 
1111 TraceEntry*
1112 TraceEntryIterator::Next()
1113 {
1114 	if (fIndex == 0) {
1115 		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1116 		fIndex = 1;
1117 	} else if (fEntry != NULL) {
1118 		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1119 		fIndex++;
1120 	}
1121 
1122 	return Current();
1123 }
1124 
1125 
1126 TraceEntry*
1127 TraceEntryIterator::Previous()
1128 {
1129 	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1130 		fEntry = sTracingMetaData->AfterLastEntry();
1131 
1132 	if (fEntry != NULL) {
1133 		fEntry = _PreviousNonBufferEntry(
1134 			sTracingMetaData->PreviousEntry(fEntry));
1135 		fIndex--;
1136 	}
1137 
1138 	return Current();
1139 }
1140 
1141 
1142 TraceEntry*
1143 TraceEntryIterator::MoveTo(int32 index)
1144 {
1145 	if (index == fIndex)
1146 		return Current();
1147 
1148 	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1149 		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1150 		fEntry = NULL;
1151 		return NULL;
1152 	}
1153 
1154 	// get the shortest iteration path
1155 	int32 distance = index - fIndex;
1156 	int32 direction = distance < 0 ? -1 : 1;
1157 	distance *= direction;
1158 
1159 	if (index < distance) {
1160 		distance = index;
1161 		direction = 1;
1162 		fEntry = NULL;
1163 		fIndex = 0;
1164 	}
1165 	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1166 		distance = sTracingMetaData->Entries() + 1 - fIndex;
1167 		direction = -1;
1168 		fEntry = NULL;
1169 		fIndex = sTracingMetaData->Entries() + 1;
1170 	}
1171 
1172 	// iterate to the index
1173 	if (direction < 0) {
1174 		while (fIndex != index)
1175 			Previous();
1176 	} else {
1177 		while (fIndex != index)
1178 			Next();
1179 	}
1180 
1181 	return Current();
1182 }
1183 
1184 
1185 trace_entry*
1186 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1187 {
1188 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1189 		entry = sTracingMetaData->NextEntry(entry);
1190 
1191 	return entry;
1192 }
1193 
1194 
1195 trace_entry*
1196 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1197 {
1198 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1199 		entry = sTracingMetaData->PreviousEntry(entry);
1200 
1201 	return entry;
1202 }
1203 
1204 
1205 int
1206 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1207 {
1208 	int argi = 1;
1209 
1210 	// variables in which we store our state to be continuable
1211 	static int32 _previousCount = 0;
1212 	static bool _previousHasFilter = false;
1213 	static bool _previousPrintStackTrace = false;
1214 	static int32 _previousMaxToCheck = 0;
1215 	static int32 _previousFirstChecked = 1;
1216 	static int32 _previousLastChecked = -1;
1217 	static int32 _previousDirection = 1;
1218 	static uint32 _previousEntriesEver = 0;
1219 	static uint32 _previousEntries = 0;
1220 	static uint32 _previousOutputFlags = 0;
1221 	static TraceEntryIterator iterator;
1222 
1223 	uint32 entriesEver = sTracingMetaData->EntriesEver();
1224 
1225 	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1226 	int32 start = 0;	// special index: print the last count entries
1227 	int32 count = 0;
1228 	int32 maxToCheck = 0;
1229 	int32 cont = 0;
1230 
1231 	bool hasFilter = false;
1232 	bool printStackTrace = false;
1233 
1234 	uint32 outputFlags = 0;
1235 	while (argi < argc) {
1236 		if (strcmp(argv[argi], "--difftime") == 0) {
1237 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1238 			argi++;
1239 		} else if (strcmp(argv[argi], "--printteam") == 0) {
1240 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1241 			argi++;
1242 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1243 			printStackTrace = true;
1244 			argi++;
1245 		} else
1246 			break;
1247 	}
1248 
1249 	if (argi < argc) {
1250 		if (strcmp(argv[argi], "forward") == 0) {
1251 			cont = 1;
1252 			argi++;
1253 		} else if (strcmp(argv[argi], "backward") == 0) {
1254 			cont = -1;
1255 			argi++;
1256 		}
1257 	} else
1258 		cont = _previousDirection;
1259 
1260 	if (cont != 0) {
1261 		if (argi < argc) {
1262 			print_debugger_command_usage(argv[0]);
1263 			return 0;
1264 		}
1265 		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1266 			|| sTracingMetaData->Entries() != _previousEntries) {
1267 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1268 				"before, or there were new entries written since the last "
1269 				"invocation.\n", argv[0]);
1270 			return 0;
1271 		}
1272 	}
1273 
1274 	// get start, count, maxToCheck
1275 	int32* params[3] = { &start, &count, &maxToCheck };
1276 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1277 		if (strcmp(argv[argi], "filter") == 0) {
1278 			hasFilter = true;
1279 			argi++;
1280 		} else if (argv[argi][0] == '#') {
1281 			hasFilter = true;
1282 		} else {
1283 			*params[i] = parse_expression(argv[argi]);
1284 			argi++;
1285 		}
1286 	}
1287 
1288 	// filter specification
1289 	if (argi < argc) {
1290 		hasFilter = true;
1291 		if (strcmp(argv[argi], "filter") == 0)
1292 			argi++;
1293 
1294 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1295 			print_debugger_command_usage(argv[0]);
1296 			return 0;
1297 		}
1298 	}
1299 
1300 	int32 direction;
1301 	int32 firstToCheck;
1302 	int32 lastToCheck;
1303 
1304 	if (cont != 0) {
1305 		// get values from the previous iteration
1306 		direction = cont;
1307 		count = _previousCount;
1308 		maxToCheck = _previousMaxToCheck;
1309 		hasFilter = _previousHasFilter;
1310 		outputFlags = _previousOutputFlags;
1311 		printStackTrace = _previousPrintStackTrace;
1312 
1313 		if (direction < 0)
1314 			start = _previousFirstChecked - 1;
1315 		else
1316 			start = _previousLastChecked + 1;
1317 	} else {
1318 		// defaults for count and maxToCheck
1319 		if (count == 0)
1320 			count = 30;
1321 		if (maxToCheck == 0 || !hasFilter)
1322 			maxToCheck = count;
1323 		else if (maxToCheck < 0)
1324 			maxToCheck = sTracingMetaData->Entries();
1325 
1326 		// determine iteration direction
1327 		direction = (start <= 0 || count < 0 ? -1 : 1);
1328 
1329 		// validate count and maxToCheck
1330 		if (count < 0)
1331 			count = -count;
1332 		if (maxToCheck < 0)
1333 			maxToCheck = -maxToCheck;
1334 		if (maxToCheck > (int32)sTracingMetaData->Entries())
1335 			maxToCheck = sTracingMetaData->Entries();
1336 		if (count > maxToCheck)
1337 			count = maxToCheck;
1338 
1339 		// validate start
1340 		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1341 			start = max_c(1, sTracingMetaData->Entries());
1342 	}
1343 
1344 	if (direction < 0) {
1345 		firstToCheck = max_c(1, start - maxToCheck + 1);
1346 		lastToCheck = start;
1347 	} else {
1348 		firstToCheck = start;
1349 		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1350 			start + maxToCheck - 1);
1351 	}
1352 
1353 	// reset the iterator, if something changed in the meantime
1354 	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1355 		|| sTracingMetaData->Entries() != _previousEntries) {
1356 		iterator.Reset();
1357 	}
1358 
1359 	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1360 		kTraceOutputBufferSize, outputFlags);
1361 
1362 	bool markedMatching = false;
1363 	int32 firstToDump = firstToCheck;
1364 	int32 lastToDump = lastToCheck;
1365 
1366 	TraceFilter* filter = NULL;
1367 	if (hasFilter)
1368 		filter = TraceFilterParser::Default()->Filter();
1369 
1370 	if (wrapperFilter != NULL) {
1371 		wrapperFilter->Init(filter, direction, cont != 0);
1372 		filter = wrapperFilter;
1373 	}
1374 
1375 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1376 		// iteration direction is backwards
1377 		markedMatching = true;
1378 
1379 		// From the last entry to check iterate backwards to check filter
1380 		// matches.
1381 		int32 matching = 0;
1382 
1383 		// move to the entry after the last entry to check
1384 		iterator.MoveTo(lastToCheck + 1);
1385 
1386 		// iterate backwards
1387 		firstToDump = -1;
1388 		lastToDump = -1;
1389 		while (iterator.Index() > firstToCheck) {
1390 			TraceEntry* entry = iterator.Previous();
1391 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1392 				out.Clear();
1393 				if (filter->Filter(entry, out)) {
1394 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1395 					if (lastToDump == -1)
1396 						lastToDump = iterator.Index();
1397 					firstToDump = iterator.Index();
1398 
1399 					matching++;
1400 					if (matching >= count)
1401 						break;
1402 				} else
1403 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1404 			}
1405 		}
1406 
1407 		firstToCheck = iterator.Index();
1408 
1409 		// iterate to the previous entry, so that the next loop starts at the
1410 		// right one
1411 		iterator.Previous();
1412 	}
1413 
1414 	out.SetLastEntryTime(0);
1415 
1416 	// set the iterator to the entry before the first one to dump
1417 	iterator.MoveTo(firstToDump - 1);
1418 
1419 	// dump the entries matching the filter in the range
1420 	// [firstToDump, lastToDump]
1421 	int32 dumped = 0;
1422 
1423 	while (TraceEntry* entry = iterator.Next()) {
1424 		int32 index = iterator.Index();
1425 		if (index < firstToDump)
1426 			continue;
1427 		if (index > lastToDump || dumped >= count) {
1428 			if (direction > 0)
1429 				lastToCheck = index - 1;
1430 			break;
1431 		}
1432 
1433 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1434 			out.Clear();
1435 			if (filter &&  (markedMatching
1436 					? (entry->Flags() & FILTER_MATCH) == 0
1437 					: !filter->Filter(entry, out))) {
1438 				continue;
1439 			}
1440 
1441 			// don't print trailing new line
1442 			const char* dump = out.DumpEntry(entry);
1443 			int len = strlen(dump);
1444 			if (len > 0 && dump[len - 1] == '\n')
1445 				len--;
1446 
1447 			kprintf("%5ld. %.*s\n", index, len, dump);
1448 
1449 			if (printStackTrace) {
1450 				out.Clear();
1451 				entry->DumpStackTrace(out);
1452 				if (out.Size() > 0)
1453 					kputs(out.Buffer());
1454 			}
1455 		} else if (!filter)
1456 			kprintf("%5ld. ** uninitialized entry **\n", index);
1457 
1458 		dumped++;
1459 	}
1460 
1461 	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1462 		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1463 		lastToCheck - firstToCheck + 1, sTracingMetaData->Entries(),
1464 		entriesEver);
1465 
1466 	// store iteration state
1467 	_previousCount = count;
1468 	_previousMaxToCheck = maxToCheck;
1469 	_previousHasFilter = hasFilter;
1470 	_previousPrintStackTrace = printStackTrace;
1471 	_previousFirstChecked = firstToCheck;
1472 	_previousLastChecked = lastToCheck;
1473 	_previousDirection = direction;
1474 	_previousEntriesEver = entriesEver;
1475 	_previousEntries = sTracingMetaData->Entries();
1476 	_previousOutputFlags = outputFlags;
1477 
1478 	return cont != 0 ? B_KDEBUG_CONT : 0;
1479 }
1480 
1481 
1482 static int
1483 dump_tracing_command(int argc, char** argv)
1484 {
1485 	return dump_tracing_internal(argc, argv, NULL);
1486 }
1487 
1488 
1489 #endif	// ENABLE_TRACING
1490 
1491 
1492 extern "C" uint8*
1493 alloc_tracing_buffer(size_t size)
1494 {
1495 #if	ENABLE_TRACING
1496 	trace_entry* entry = sTracingMetaData->AllocateEntry(
1497 		size + sizeof(trace_entry), BUFFER_ENTRY);
1498 	if (entry == NULL)
1499 		return NULL;
1500 
1501 	return (uint8*)(entry + 1);
1502 #else
1503 	return NULL;
1504 #endif
1505 }
1506 
1507 
1508 uint8*
1509 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1510 {
1511 	if (user && !IS_USER_ADDRESS(source))
1512 		return NULL;
1513 
1514 	uint8* buffer = alloc_tracing_buffer(size);
1515 	if (buffer == NULL)
1516 		return NULL;
1517 
1518 	if (user) {
1519 		if (user_memcpy(buffer, source, size) != B_OK)
1520 			return NULL;
1521 	} else
1522 		memcpy(buffer, source, size);
1523 
1524 	return buffer;
1525 }
1526 
1527 
1528 char*
1529 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1530 {
1531 	if (source == NULL || maxSize == 0)
1532 		return NULL;
1533 
1534 	if (user && !IS_USER_ADDRESS(source))
1535 		return NULL;
1536 
1537 	// limit maxSize to the actual source string len
1538 	if (user) {
1539 		ssize_t size = user_strlcpy(NULL, source, 0);
1540 			// there's no user_strnlen()
1541 		if (size < 0)
1542 			return 0;
1543 		maxSize = min_c(maxSize, (size_t)size + 1);
1544 	} else
1545 		maxSize = strnlen(source, maxSize - 1) + 1;
1546 
1547 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1548 	if (buffer == NULL)
1549 		return NULL;
1550 
1551 	if (user) {
1552 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1553 			return NULL;
1554 	} else
1555 		strlcpy(buffer, source, maxSize);
1556 
1557 	return buffer;
1558 }
1559 
1560 
1561 tracing_stack_trace*
1562 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1563 {
1564 #if	ENABLE_TRACING
1565 	// page_fault_exception() doesn't allow us to gracefully handle a bad
1566 	// address in the stack trace, if interrupts are disabled, so we always
1567 	// restrict the stack traces to the kernel only in this case. A bad address
1568 	// in the kernel stack trace would still cause a panic(), but this is
1569 	// probably even desired.
1570 	if (!are_interrupts_enabled())
1571 		kernelOnly = true;
1572 
1573 	tracing_stack_trace* stackTrace
1574 		= (tracing_stack_trace*)alloc_tracing_buffer(
1575 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1576 
1577 	if (stackTrace != NULL) {
1578 		stackTrace->depth = arch_debug_get_stack_trace(
1579 			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1580 			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1581 	}
1582 
1583 	return stackTrace;
1584 #else
1585 	return NULL;
1586 #endif
1587 }
1588 
1589 
1590 int
1591 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1592 {
1593 #if	ENABLE_TRACING
1594 	return dump_tracing_internal(argc, argv, wrapperFilter);
1595 #else
1596 	return 0;
1597 #endif
1598 }
1599 
1600 
1601 void
1602 lock_tracing_buffer()
1603 {
1604 #if ENABLE_TRACING
1605 	sTracingMetaData->Lock();
1606 #endif
1607 }
1608 
1609 
1610 void
1611 unlock_tracing_buffer()
1612 {
1613 #if ENABLE_TRACING
1614 	sTracingMetaData->Unlock();
1615 #endif
1616 }
1617 
1618 
1619 extern "C" status_t
1620 tracing_init(void)
1621 {
1622 #if	ENABLE_TRACING
1623 	status_t result = TracingMetaData::Create(sTracingMetaData);
1624 	if (result != B_OK) {
1625 		sTracingMetaData = &sDummyTracingMetaData;
1626 		return result;
1627 	}
1628 
1629 	new(nothrow) TracingLogStartEntry();
1630 
1631 	add_debugger_command_etc("traced", &dump_tracing_command,
1632 		"Dump recorded trace entries",
1633 		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1634 			"(\"forward\" | \"backward\") "
1635 			"| ([ <start> [ <count> [ <range> ] ] ] "
1636 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1637 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1638 		"specified, the command continues where the previous invocation left\n"
1639 		"off, i.e. printing the previous respectively next entries (as many\n"
1640 		"as printed before). In this case the command is continuable, that is\n"
1641 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1642 		"If no arguments are given, the command continues in the direction\n"
1643 		"of the last invocation.\n"
1644 		"--printteam  - enables printing the entries' team IDs.\n"
1645 		"--difftime   - print difference times for all but the first entry.\n"
1646 		"--stacktrace - print stack traces for entries that captured one.\n"
1647 		"  <start>    - The base index of the entries to print. Depending on\n"
1648 		"               whether the iteration direction is forward or\n"
1649 		"               backward this will be the first or last entry printed\n"
1650 		"               (potentially, if a filter is specified). The index of\n"
1651 		"               the first entry in the trace buffer is 1. If 0 is\n"
1652 		"               specified, the last <count> recorded entries are\n"
1653 		"               printed (iteration direction is backward). Defaults \n"
1654 		"               to 0.\n"
1655 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1656 		"               If negative, the -<count> entries before and\n"
1657 		"               including <start> will be printed.\n"
1658 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1659 		"               number of entries to be filtered -- depending on the\n"
1660 		"               iteration direction the entries before or after\n"
1661 		"               <start>. If more than <count> entries match the\n"
1662 		"               filter, only the first (forward) or last (backward)\n"
1663 		"               <count> matching entries will be printed. If 0 is\n"
1664 		"               specified <range> will be set to <count>. If -1,\n"
1665 		"               <range> will be set to the number of recorded\n"
1666 		"               entries.\n"
1667 		"  <pattern>  - If specified only entries containing this string are\n"
1668 		"               printed.\n"
1669 		"  <filter>   - If specified only entries matching this filter\n"
1670 		"               expression are printed. The expression can consist of\n"
1671 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1672 		"               filters \"'thread' <thread>\" (matching entries\n"
1673 		"               with the given thread ID), \"'team' <team>\"\n"
1674 						"(matching entries with the given team ID), and\n"
1675 		"               \"#<pattern>\" (matching entries containing the given\n"
1676 		"               string).\n", 0);
1677 #endif	// ENABLE_TRACING
1678 	return B_OK;
1679 }
1680 
1681 
1682 void
1683 ktrace_printf(const char *format, ...)
1684 {
1685 #if	ENABLE_TRACING
1686 	va_list list;
1687 	va_start(list, format);
1688 
1689 	char buffer[256];
1690 	vsnprintf(buffer, sizeof(buffer), format, list);
1691 
1692 	va_end(list);
1693 
1694 	new(nothrow) KernelTraceEntry(buffer);
1695 #endif	// ENABLE_TRACING
1696 }
1697 
1698 
1699 void
1700 _user_ktrace_output(const char *message)
1701 {
1702 #if	ENABLE_TRACING
1703 	new(nothrow) UserTraceEntry(message);
1704 #endif	// ENABLE_TRACING
1705 }
1706 
1707