xref: /haiku/src/system/kernel/debug/tracing.cpp (revision 0044a8c39ab5721051b6279506d1a8c511e20453)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <tracing.h>
9 
10 #include <stdarg.h>
11 #include <stdlib.h>
12 
13 #include <algorithm>
14 
15 #include <arch/debug.h>
16 #include <debug.h>
17 #include <elf.h>
18 #include <int.h>
19 #include <kernel.h>
20 #include <team.h>
21 #include <thread.h>
22 #include <util/AutoLock.h>
23 #include <vm/vm.h>
24 
25 
26 struct tracing_stack_trace {
27 	int32	depth;
28 	addr_t	return_addresses[0];
29 };
30 
31 
32 #if ENABLE_TRACING
33 
34 //#define TRACE_TRACING
35 #ifdef TRACE_TRACING
36 #	define TRACE(x) dprintf_no_syslog x
37 #else
38 #	define TRACE(x) ;
39 #endif
40 
41 
42 enum {
43 	WRAP_ENTRY			= 0x01,
44 	ENTRY_INITIALIZED	= 0x02,
45 	BUFFER_ENTRY		= 0x04,
46 	FILTER_MATCH		= 0x08,
47 	INVALID_ENTRY		= 0x10,
48 	CHECK_ENTRY			= 0x20,
49 };
50 
51 
52 static const size_t kTraceOutputBufferSize = 10240;
53 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
54 
55 static const uint32 kMaxRecoveringErrorCount	= 100;
56 static const addr_t kMetaDataBaseAddress		= 32 * 1024 * 1024;
57 static const addr_t kMetaDataBaseEndAddress		= 128 * 1024 * 1024;
58 static const addr_t kMetaDataAddressIncrement	= 8 * 1024 * 1024;
59 static const uint32 kMetaDataMagic1 = 'Vali';
60 static const uint32 kMetaDataMagic2 = 'dTra';
61 static const uint32 kMetaDataMagic3 = 'cing';
62 
63 // the maximum we can address with the trace_entry::[previous_]size fields
64 static const size_t kMaxTracingEntryByteSize
65 	= ((1 << 13) - 1) * sizeof(trace_entry);
66 
67 
68 class TracingMetaData {
69 public:
70 	static	status_t			Create(TracingMetaData*& _metaData);
71 
72 	inline	bool				Lock();
73 	inline	void				Unlock();
74 
75 	inline	trace_entry*		FirstEntry() const;
76 	inline	trace_entry*		AfterLastEntry() const;
77 
78 	inline	uint32				Entries() const;
79 	inline	uint32				EntriesEver() const;
80 
81 	inline	void				IncrementEntriesEver();
82 
83 	inline	char*				TraceOutputBuffer() const;
84 
85 			trace_entry*		NextEntry(trace_entry* entry);
86 			trace_entry*		PreviousEntry(trace_entry* entry);
87 
88 			trace_entry*		AllocateEntry(size_t size, uint16 flags);
89 
90 private:
91 			bool				_FreeFirstEntry();
92 			bool				_MakeSpace(size_t needed);
93 
94 	static	status_t			_CreateMetaDataArea(bool findPrevious,
95 									area_id& _area,
96 									TracingMetaData*& _metaData);
97 			bool				_InitPreviousTracingData();
98 
99 private:
100 			uint32				fMagic1;
101 			trace_entry*		fBuffer;
102 			trace_entry*		fFirstEntry;
103 			trace_entry*		fAfterLastEntry;
104 			uint32				fEntries;
105 			uint32				fMagic2;
106 			uint32				fEntriesEver;
107 			spinlock			fLock;
108 			char*				fTraceOutputBuffer;
109 			phys_addr_t			fPhysicalAddress;
110 			uint32				fMagic3;
111 };
112 
113 static TracingMetaData sDummyTracingMetaData;
114 static TracingMetaData* sTracingMetaData = &sDummyTracingMetaData;
115 static bool sTracingDataRecovered = false;
116 
117 
118 // #pragma mark -
119 
120 
121 // #pragma mark - TracingMetaData
122 
123 
124 bool
125 TracingMetaData::Lock()
126 {
127 	acquire_spinlock(&fLock);
128 	return true;
129 }
130 
131 
132 void
133 TracingMetaData::Unlock()
134 {
135 	release_spinlock(&fLock);
136 }
137 
138 
139 trace_entry*
140 TracingMetaData::FirstEntry() const
141 {
142 	return fFirstEntry;
143 }
144 
145 
146 trace_entry*
147 TracingMetaData::AfterLastEntry() const
148 {
149 	return fAfterLastEntry;
150 }
151 
152 
153 uint32
154 TracingMetaData::Entries() const
155 {
156 	return fEntries;
157 }
158 
159 
160 uint32
161 TracingMetaData::EntriesEver() const
162 {
163 	return fEntriesEver;
164 }
165 
166 
167 void
168 TracingMetaData::IncrementEntriesEver()
169 {
170 	fEntriesEver++;
171 		// NOTE: Race condition on SMP machines! We should use atomic_add(),
172 		// though that costs some performance and the information is for
173 		// informational purpose anyway.
174 }
175 
176 
177 char*
178 TracingMetaData::TraceOutputBuffer() const
179 {
180 	return fTraceOutputBuffer;
181 }
182 
183 
184 trace_entry*
185 TracingMetaData::NextEntry(trace_entry* entry)
186 {
187 	entry += entry->size;
188 	if ((entry->flags & WRAP_ENTRY) != 0)
189 		entry = fBuffer;
190 
191 	if (entry == fAfterLastEntry)
192 		return NULL;
193 
194 	return entry;
195 }
196 
197 
198 trace_entry*
199 TracingMetaData::PreviousEntry(trace_entry* entry)
200 {
201 	if (entry == fFirstEntry)
202 		return NULL;
203 
204 	if (entry == fBuffer) {
205 		// beginning of buffer -- previous entry is a wrap entry
206 		entry = fBuffer + kBufferSize - entry->previous_size;
207 	}
208 
209 	return entry - entry->previous_size;
210 }
211 
212 
213 trace_entry*
214 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
215 {
216 	if (fAfterLastEntry == NULL || size == 0
217 		|| size >= kMaxTracingEntryByteSize) {
218 		return NULL;
219 	}
220 
221 	InterruptsSpinLocker _(fLock);
222 
223 	size = (size + 3) >> 2;
224 		// 4 byte aligned, don't store the lower 2 bits
225 
226 	TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
227 		fFirstEntry, fAfterLastEntry, fBuffer));
228 
229 	if (!_MakeSpace(size))
230 		return NULL;
231 
232 	trace_entry* entry = fAfterLastEntry;
233 	entry->size = size;
234 	entry->flags = flags;
235 	fAfterLastEntry += size;
236 	fAfterLastEntry->previous_size = size;
237 
238 	if (!(flags & BUFFER_ENTRY))
239 		fEntries++;
240 
241 	TRACE(("  entry: %p, end %p, start %p, entries %ld\n", entry,
242 		fAfterLastEntry, fFirstEntry, fEntries));
243 
244 	return entry;
245 }
246 
247 
248 bool
249 TracingMetaData::_FreeFirstEntry()
250 {
251 	TRACE(("  skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
252 
253 	trace_entry* newFirst = NextEntry(fFirstEntry);
254 
255 	if (fFirstEntry->flags & BUFFER_ENTRY) {
256 		// a buffer entry -- just skip it
257 	} else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
258 		// Fully initialized TraceEntry: We could destroy it, but don't do so
259 		// for sake of robustness. The destructors of tracing entry classes
260 		// should be empty anyway.
261 		fEntries--;
262 	} else {
263 		// Not fully initialized TraceEntry. We can't free it, since
264 		// then it's constructor might still write into the memory and
265 		// overwrite data of the entry we're going to allocate.
266 		// We can't do anything until this entry can be discarded.
267 		return false;
268 	}
269 
270 	if (newFirst == NULL) {
271 		// everything is freed -- practically this can't happen, if
272 		// the buffer is large enough to hold three max-sized entries
273 		fFirstEntry = fAfterLastEntry = fBuffer;
274 		TRACE(("_FreeFirstEntry(): all entries freed!\n"));
275 	} else
276 		fFirstEntry = newFirst;
277 
278 	return true;
279 }
280 
281 
282 /*!	Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
283 	Returns \c false, if unable to free that much.
284 */
285 bool
286 TracingMetaData::_MakeSpace(size_t needed)
287 {
288 	// we need space for fAfterLastEntry, too (in case we need to wrap around
289 	// later)
290 	needed++;
291 
292 	// If there's not enough space (free or occupied) after fAfterLastEntry,
293 	// we free all entries in that region and wrap around.
294 	if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
295 		TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
296 			fAfterLastEntry));
297 
298 		// Free all entries after fAfterLastEntry and one more at the beginning
299 		// of the buffer.
300 		while (fFirstEntry > fAfterLastEntry) {
301 			if (!_FreeFirstEntry())
302 				return false;
303 		}
304 		if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
305 			return false;
306 
307 		// just in case _FreeFirstEntry() freed the very last existing entry
308 		if (fAfterLastEntry == fBuffer)
309 			return true;
310 
311 		// mark as wrap entry and actually wrap around
312 		trace_entry* wrapEntry = fAfterLastEntry;
313 		wrapEntry->size = 0;
314 		wrapEntry->flags = WRAP_ENTRY;
315 		fAfterLastEntry = fBuffer;
316 		fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
317 	}
318 
319 	if (fFirstEntry <= fAfterLastEntry) {
320 		// buffer is empty or the space after fAfterLastEntry is unoccupied
321 		return true;
322 	}
323 
324 	// free the first entries, until there's enough space
325 	size_t space = fFirstEntry - fAfterLastEntry;
326 
327 	if (space < needed) {
328 		TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
329 	}
330 
331 	while (space < needed) {
332 		space += fFirstEntry->size;
333 
334 		if (!_FreeFirstEntry())
335 			return false;
336 	}
337 
338 	TRACE(("  out: start %p, entries %ld\n", fFirstEntry, fEntries));
339 
340 	return true;
341 }
342 
343 
344 /*static*/ status_t
345 TracingMetaData::Create(TracingMetaData*& _metaData)
346 {
347 	// search meta data in memory (from previous session)
348 	area_id area;
349 	TracingMetaData* metaData;
350 	status_t error = _CreateMetaDataArea(true, area, metaData);
351 	if (error == B_OK) {
352 		if (metaData->_InitPreviousTracingData()) {
353 			_metaData = metaData;
354 			return B_OK;
355 		}
356 
357 		dprintf("Found previous tracing meta data, but failed to init.\n");
358 
359 		// invalidate the meta data
360 		metaData->fMagic1 = 0;
361 		metaData->fMagic2 = 0;
362 		metaData->fMagic3 = 0;
363 		delete_area(area);
364 	} else
365 		dprintf("No previous tracing meta data found.\n");
366 
367 	// no previous tracing data found -- create new one
368 	error = _CreateMetaDataArea(false, area, metaData);
369 	if (error != B_OK)
370 		return error;
371 
372 	virtual_address_restrictions virtualRestrictions = {};
373 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
374 	physical_address_restrictions physicalRestrictions = {};
375 	area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
376 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
377 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
378 		&virtualRestrictions, &physicalRestrictions,
379 		(void**)&metaData->fTraceOutputBuffer);
380 	if (area < 0)
381 		return area;
382 
383 	// get the physical address
384 	physical_entry physicalEntry;
385 	if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
386 			&physicalEntry, 1) == B_OK) {
387 		metaData->fPhysicalAddress = physicalEntry.address;
388 	} else {
389 		dprintf("TracingMetaData::Create(): failed to get physical address "
390 			"of tracing buffer\n");
391 		metaData->fPhysicalAddress = 0;
392 	}
393 
394 	metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
395 		+ kTraceOutputBufferSize);
396 	metaData->fFirstEntry = metaData->fBuffer;
397 	metaData->fAfterLastEntry = metaData->fBuffer;
398 
399 	metaData->fEntries = 0;
400 	metaData->fEntriesEver = 0;
401 	B_INITIALIZE_SPINLOCK(&metaData->fLock);
402 
403 	metaData->fMagic1 = kMetaDataMagic1;
404 	metaData->fMagic2 = kMetaDataMagic2;
405 	metaData->fMagic3 = kMetaDataMagic3;
406 
407 	_metaData = metaData;
408 	return B_OK;
409 }
410 
411 
412 /*static*/ status_t
413 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
414 	TracingMetaData*& _metaData)
415 {
416 	// search meta data in memory (from previous session)
417 	TracingMetaData* metaData;
418 	phys_addr_t metaDataAddress = kMetaDataBaseAddress;
419 	for (; metaDataAddress <= kMetaDataBaseEndAddress;
420 			metaDataAddress += kMetaDataAddressIncrement) {
421 		virtual_address_restrictions virtualRestrictions = {};
422 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
423 		physical_address_restrictions physicalRestrictions = {};
424 		physicalRestrictions.low_address = metaDataAddress;
425 		physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
426 		area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
427 			B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
428 			CREATE_AREA_DONT_CLEAR, &virtualRestrictions, &physicalRestrictions,
429 			(void**)&metaData);
430 		if (area < 0)
431 			continue;
432 
433 		if (!findPrevious) {
434 			_area = area;
435 			_metaData = metaData;
436 			return B_OK;
437 		}
438 
439 		if (metaData->fMagic1 == kMetaDataMagic1
440 			&& metaData->fMagic2 == kMetaDataMagic2
441 			&& metaData->fMagic3 == kMetaDataMagic3) {
442 			_area = area;
443 			_metaData = metaData;
444 			return B_OK;
445 		}
446 
447 		delete_area(area);
448 	}
449 
450 	return B_ENTRY_NOT_FOUND;
451 }
452 
453 
454 bool
455 TracingMetaData::_InitPreviousTracingData()
456 {
457 	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
458 	// well. The entries should checked more thoroughly for validity -- e.g. the
459 	// pointers to the entries' vtable pointers could be invalid, which can
460 	// make the "traced" command quite unusable. The validity of the entries
461 	// could be checked in a safe environment (i.e. with a fault handler) with
462 	// typeid() and call of a virtual function.
463 	return false;
464 
465 	addr_t bufferStart
466 		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
467 	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
468 
469 	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
470 		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
471 		|| (addr_t)fFirstEntry < bufferStart
472 		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
473 		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
474 		|| (addr_t)fAfterLastEntry < bufferStart
475 		|| (addr_t)fAfterLastEntry > bufferEnd
476 		|| fPhysicalAddress == 0) {
477 		dprintf("Failed to init tracing meta data: Sanity checks "
478 			"failed.\n");
479 		return false;
480 	}
481 
482 	// re-map the previous tracing buffer
483 	virtual_address_restrictions virtualRestrictions = {};
484 	virtualRestrictions.address = fTraceOutputBuffer;
485 	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
486 	physical_address_restrictions physicalRestrictions = {};
487 	physicalRestrictions.low_address = fPhysicalAddress;
488 	physicalRestrictions.high_address = fPhysicalAddress
489 		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
490 	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
491 		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
492 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR,
493 		&virtualRestrictions, &physicalRestrictions, NULL);
494 	if (area < 0) {
495 		dprintf("Failed to init tracing meta data: Mapping tracing log "
496 			"buffer failed: %s\n", strerror(area));
497 		return false;
498 	}
499 
500 	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
501 		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
502 
503 	// verify/repair the tracing entry list
504 	uint32 errorCount = 0;
505 	uint32 entryCount = 0;
506 	uint32 nonBufferEntryCount = 0;
507 	uint32 previousEntrySize = 0;
508 	trace_entry* entry = fFirstEntry;
509 	while (errorCount <= kMaxRecoveringErrorCount) {
510 		// check previous entry size
511 		if (entry->previous_size != previousEntrySize) {
512 			if (entry != fFirstEntry) {
513 				dprintf("ktrace recovering: entry %p: fixing previous_size "
514 					"size: %lu (should be %lu)\n", entry, entry->previous_size,
515 					previousEntrySize);
516 				errorCount++;
517 			}
518 			entry->previous_size = previousEntrySize;
519 		}
520 
521 		if (entry == fAfterLastEntry)
522 			break;
523 
524 		// check size field
525 		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
526 			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
527 				entry);
528 			errorCount++;
529 			fAfterLastEntry = entry;
530 			break;
531 		}
532 
533 		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
534 			dprintf("ktrace recovering: entry %p: size too big: %lu\n", entry,
535 				entry->size);
536 			errorCount++;
537 			fAfterLastEntry = entry;
538 			break;
539 		}
540 
541 		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
542 			dprintf("ktrace recovering: entry %p: entry crosses "
543 				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
544 			errorCount++;
545 			fAfterLastEntry = entry;
546 			break;
547 		}
548 
549 		// check for wrap entry
550 		if ((entry->flags & WRAP_ENTRY) != 0) {
551 			if ((uint32)(fBuffer + kBufferSize - entry)
552 					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
553 				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
554 					"buffer location\n", entry);
555 				errorCount++;
556 			}
557 
558 			if (entry->size != 0) {
559 				dprintf("ktrace recovering: entry %p: invalid wrap entry "
560 					"size: %lu\n", entry, entry->size);
561 				errorCount++;
562 				entry->size = 0;
563 			}
564 
565 			previousEntrySize = fBuffer + kBufferSize - entry;
566 			entry = fBuffer;
567 			continue;
568 		}
569 
570 		if ((entry->flags & BUFFER_ENTRY) == 0) {
571 			entry->flags |= CHECK_ENTRY;
572 			nonBufferEntryCount++;
573 		}
574 
575 		entryCount++;
576 		previousEntrySize = entry->size;
577 
578 		entry += entry->size;
579 	}
580 
581 	if (errorCount > kMaxRecoveringErrorCount) {
582 		dprintf("ktrace recovering: Too many errors.\n");
583 		fAfterLastEntry = entry;
584 		fAfterLastEntry->previous_size = previousEntrySize;
585 	}
586 
587 	dprintf("ktrace recovering: Recovered %lu entries + %lu buffer entries "
588 		"from previous session. Expected %lu entries.\n", nonBufferEntryCount,
589 		entryCount - nonBufferEntryCount, fEntries);
590 	fEntries = nonBufferEntryCount;
591 
592 	B_INITIALIZE_SPINLOCK(&fLock);
593 
594 	// TODO: Actually check the entries! Do that when first accessing the
595 	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
596 	// true).
597 	sTracingDataRecovered = true;
598 	return true;
599 }
600 
601 
602 #endif	// ENABLE_TRACING
603 
604 
605 // #pragma mark -
606 
607 
608 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
609 	: fBuffer(buffer),
610 	  fCapacity(bufferSize),
611 	  fFlags(flags)
612 {
613 	Clear();
614 }
615 
616 
617 void
618 TraceOutput::Clear()
619 {
620 	if (fCapacity > 0)
621 		fBuffer[0] = '\0';
622 	fSize = 0;
623 }
624 
625 
626 void
627 TraceOutput::Print(const char* format,...)
628 {
629 #if ENABLE_TRACING
630 	if (IsFull())
631 		return;
632 
633 	if (fSize < fCapacity) {
634 		va_list args;
635 		va_start(args, format);
636 		size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format,
637 			args);
638 		fSize += std::min(length, fCapacity - fSize - 1);
639 		va_end(args);
640 	}
641 #endif
642 }
643 
644 
645 void
646 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
647 {
648 #if ENABLE_TRACING
649 	if (stackTrace == NULL || stackTrace->depth <= 0)
650 		return;
651 
652 	for (int32 i = 0; i < stackTrace->depth; i++) {
653 		addr_t address = stackTrace->return_addresses[i];
654 
655 		const char* symbol;
656 		const char* imageName;
657 		bool exactMatch;
658 		addr_t baseAddress;
659 
660 		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
661 				&imageName, &exactMatch) == B_OK) {
662 			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
663 				address - baseAddress, imageName,
664 				exactMatch ? "" : " (nearest)");
665 		} else
666 			Print("  %p\n", (void*)address);
667 	}
668 #endif
669 }
670 
671 
672 void
673 TraceOutput::SetLastEntryTime(bigtime_t time)
674 {
675 	fLastEntryTime = time;
676 }
677 
678 
679 bigtime_t
680 TraceOutput::LastEntryTime() const
681 {
682 	return fLastEntryTime;
683 }
684 
685 
686 //	#pragma mark -
687 
688 
689 TraceEntry::TraceEntry()
690 {
691 }
692 
693 
694 TraceEntry::~TraceEntry()
695 {
696 }
697 
698 
699 void
700 TraceEntry::Dump(TraceOutput& out)
701 {
702 #if ENABLE_TRACING
703 	// to be overridden by subclasses
704 	out.Print("ENTRY %p", this);
705 #endif
706 }
707 
708 
709 void
710 TraceEntry::DumpStackTrace(TraceOutput& out)
711 {
712 }
713 
714 
715 void
716 TraceEntry::Initialized()
717 {
718 #if ENABLE_TRACING
719 	ToTraceEntry()->flags |= ENTRY_INITIALIZED;
720 	sTracingMetaData->IncrementEntriesEver();
721 #endif
722 }
723 
724 
725 void*
726 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
727 {
728 #if ENABLE_TRACING
729 	trace_entry* entry = sTracingMetaData->AllocateEntry(
730 		size + sizeof(trace_entry), 0);
731 	return entry != NULL ? entry + 1 : NULL;
732 #endif
733 	return NULL;
734 }
735 
736 
737 //	#pragma mark -
738 
739 
740 AbstractTraceEntry::~AbstractTraceEntry()
741 {
742 }
743 
744 
745 void
746 AbstractTraceEntry::Dump(TraceOutput& out)
747 {
748 	bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
749 		? fTime - out.LastEntryTime()
750 		: fTime;
751 
752 	if (out.Flags() & TRACE_OUTPUT_TEAM_ID)
753 		out.Print("[%6ld:%6ld] %10Ld: ", fThread, fTeam, time);
754 	else
755 		out.Print("[%6ld] %10Ld: ", fThread, time);
756 
757 	AddDump(out);
758 
759 	out.SetLastEntryTime(fTime);
760 }
761 
762 
763 void
764 AbstractTraceEntry::AddDump(TraceOutput& out)
765 {
766 }
767 
768 
769 void
770 AbstractTraceEntry::_Init()
771 {
772 	Thread* thread = thread_get_current_thread();
773 	if (thread != NULL) {
774 		fThread = thread->id;
775 		if (thread->team)
776 			fTeam = thread->team->id;
777 	}
778 	fTime = system_time();
779 }
780 
781 
782 //	#pragma mark - AbstractTraceEntryWithStackTrace
783 
784 
785 
786 AbstractTraceEntryWithStackTrace::AbstractTraceEntryWithStackTrace(
787 	size_t stackTraceDepth, size_t skipFrames, bool kernelOnly)
788 {
789 	fStackTrace = capture_tracing_stack_trace(stackTraceDepth, skipFrames + 1,
790 		kernelOnly);
791 }
792 
793 
794 void
795 AbstractTraceEntryWithStackTrace::DumpStackTrace(TraceOutput& out)
796 {
797 	out.PrintStackTrace(fStackTrace);
798 }
799 
800 
801 //	#pragma mark -
802 
803 
804 #if ENABLE_TRACING
805 
806 class KernelTraceEntry : public AbstractTraceEntry {
807 	public:
808 		KernelTraceEntry(const char* message)
809 		{
810 			fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
811 
812 #if KTRACE_PRINTF_STACK_TRACE
813 			fStackTrace = capture_tracing_stack_trace(
814 				KTRACE_PRINTF_STACK_TRACE, 1, false);
815 #endif
816 			Initialized();
817 		}
818 
819 		virtual void AddDump(TraceOutput& out)
820 		{
821 			out.Print("kern: %s", fMessage);
822 		}
823 
824 #if KTRACE_PRINTF_STACK_TRACE
825 		virtual void DumpStackTrace(TraceOutput& out)
826 		{
827 			out.PrintStackTrace(fStackTrace);
828 		}
829 #endif
830 
831 	private:
832 		char*	fMessage;
833 #if KTRACE_PRINTF_STACK_TRACE
834 		tracing_stack_trace* fStackTrace;
835 #endif
836 };
837 
838 
839 class UserTraceEntry : public AbstractTraceEntry {
840 	public:
841 		UserTraceEntry(const char* message)
842 		{
843 			fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
844 
845 #if KTRACE_PRINTF_STACK_TRACE
846 			fStackTrace = capture_tracing_stack_trace(
847 				KTRACE_PRINTF_STACK_TRACE, 1, false);
848 #endif
849 			Initialized();
850 		}
851 
852 		virtual void AddDump(TraceOutput& out)
853 		{
854 			out.Print("user: %s", fMessage);
855 		}
856 
857 #if KTRACE_PRINTF_STACK_TRACE
858 		virtual void DumpStackTrace(TraceOutput& out)
859 		{
860 			out.PrintStackTrace(fStackTrace);
861 		}
862 #endif
863 
864 	private:
865 		char*	fMessage;
866 #if KTRACE_PRINTF_STACK_TRACE
867 		tracing_stack_trace* fStackTrace;
868 #endif
869 };
870 
871 
872 class TracingLogStartEntry : public AbstractTraceEntry {
873 	public:
874 		TracingLogStartEntry()
875 		{
876 			Initialized();
877 		}
878 
879 		virtual void AddDump(TraceOutput& out)
880 		{
881 			out.Print("ktrace start");
882 		}
883 };
884 
885 #endif	// ENABLE_TRACING
886 
887 
888 //	#pragma mark - trace filters
889 
890 
891 TraceFilter::~TraceFilter()
892 {
893 }
894 
895 
896 bool
897 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
898 {
899 	return false;
900 }
901 
902 
903 
904 class ThreadTraceFilter : public TraceFilter {
905 public:
906 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
907 	{
908 		const AbstractTraceEntry* entry
909 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
910 		return (entry != NULL && entry->ThreadID() == fThread);
911 	}
912 };
913 
914 
915 class TeamTraceFilter : public TraceFilter {
916 public:
917 	virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
918 	{
919 		const AbstractTraceEntry* entry
920 			= dynamic_cast<const AbstractTraceEntry*>(_entry);
921 		return (entry != NULL && entry->TeamID() == fTeam);
922 	}
923 };
924 
925 
926 class PatternTraceFilter : public TraceFilter {
927 public:
928 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
929 	{
930 		return strstr(out.DumpEntry(entry), fString) != NULL;
931 	}
932 };
933 
934 
935 class DecimalPatternTraceFilter : public TraceFilter {
936 public:
937 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
938 	{
939 		// TODO: this is *very* slow
940 		char buffer[64];
941 		snprintf(buffer, sizeof(buffer), "%Ld", fValue);
942 		return strstr(out.DumpEntry(entry), buffer) != NULL;
943 	}
944 };
945 
946 class HexPatternTraceFilter : public TraceFilter {
947 public:
948 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
949 	{
950 		// TODO: this is *very* slow
951 		char buffer[64];
952 		snprintf(buffer, sizeof(buffer), "%Lx", fValue);
953 		return strstr(out.DumpEntry(entry), buffer) != NULL;
954 	}
955 };
956 
957 class StringPatternTraceFilter : public TraceFilter {
958 public:
959 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
960 	{
961 		if (IS_KERNEL_ADDRESS(fValue))
962 			return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
963 
964 		// TODO: this is *very* slow
965 		char buffer[64];
966 		user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
967 		return strstr(out.DumpEntry(entry), buffer) != NULL;
968 	}
969 };
970 
971 class NotTraceFilter : public TraceFilter {
972 public:
973 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
974 	{
975 		return !fSubFilters.first->Filter(entry, out);
976 	}
977 };
978 
979 
980 class AndTraceFilter : public TraceFilter {
981 public:
982 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
983 	{
984 		return fSubFilters.first->Filter(entry, out)
985 			&& fSubFilters.second->Filter(entry, out);
986 	}
987 };
988 
989 
990 class OrTraceFilter : public TraceFilter {
991 public:
992 	virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
993 	{
994 		return fSubFilters.first->Filter(entry, out)
995 			|| fSubFilters.second->Filter(entry, out);
996 	}
997 };
998 
999 
1000 class TraceFilterParser {
1001 public:
1002 	static TraceFilterParser* Default()
1003 	{
1004 		return &sParser;
1005 	}
1006 
1007 	bool Parse(int argc, const char* const* argv)
1008 	{
1009 		fTokens = argv;
1010 		fTokenCount = argc;
1011 		fTokenIndex = 0;
1012 		fFilterCount = 0;
1013 
1014 		TraceFilter* filter = _ParseExpression();
1015 		return fTokenIndex == fTokenCount && filter != NULL;
1016 	}
1017 
1018 	TraceFilter* Filter()
1019 	{
1020 		return &fFilters[0];
1021 	}
1022 
1023 private:
1024 	TraceFilter* _ParseExpression()
1025 	{
1026 		const char* token = _NextToken();
1027 		if (!token) {
1028 			// unexpected end of expression
1029 			return NULL;
1030 		}
1031 
1032 		if (fFilterCount == MAX_FILTERS) {
1033 			// too many filters
1034 			return NULL;
1035 		}
1036 
1037 		if (token[0] == '#') {
1038 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1039 				PatternTraceFilter;
1040 			filter->fString = token + 1;
1041 			return filter;
1042 		} else if (token[0] == 'd' && token[1] == '#') {
1043 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1044 				DecimalPatternTraceFilter;
1045 			filter->fValue = parse_expression(token + 2);
1046 			return filter;
1047 		} else if (token[0] == 'x' && token[1] == '#') {
1048 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1049 				HexPatternTraceFilter;
1050 			filter->fValue = parse_expression(token + 2);
1051 			return filter;
1052 		} else if (token[0] == 's' && token[1] == '#') {
1053 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1054 				StringPatternTraceFilter;
1055 			filter->fValue = parse_expression(token + 2);
1056 			return filter;
1057 		} else if (strcmp(token, "not") == 0) {
1058 			TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1059 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1060 				return filter;
1061 			return NULL;
1062 		} else if (strcmp(token, "and") == 0) {
1063 			TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1064 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1065 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1066 				return filter;
1067 			}
1068 			return NULL;
1069 		} else if (strcmp(token, "or") == 0) {
1070 			TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1071 			if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1072 				&& (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1073 				return filter;
1074 			}
1075 			return NULL;
1076 		} else if (strcmp(token, "thread") == 0) {
1077 			const char* arg = _NextToken();
1078 			if (arg == NULL) {
1079 				// unexpected end of expression
1080 				return NULL;
1081 			}
1082 
1083 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1084 				ThreadTraceFilter;
1085 			filter->fThread = strtol(arg, NULL, 0);
1086 			return filter;
1087 		} else if (strcmp(token, "team") == 0) {
1088 			const char* arg = _NextToken();
1089 			if (arg == NULL) {
1090 				// unexpected end of expression
1091 				return NULL;
1092 			}
1093 
1094 			TraceFilter* filter = new(&fFilters[fFilterCount++])
1095 				TeamTraceFilter;
1096 			filter->fTeam = strtol(arg, NULL, 0);
1097 			return filter;
1098 		} else {
1099 			// invalid token
1100 			return NULL;
1101 		}
1102 	}
1103 
1104 	const char* _CurrentToken() const
1105 	{
1106 		if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1107 			return fTokens[fTokenIndex - 1];
1108 		return NULL;
1109 	}
1110 
1111 	const char* _NextToken()
1112 	{
1113 		if (fTokenIndex >= fTokenCount)
1114 			return NULL;
1115 		return fTokens[fTokenIndex++];
1116 	}
1117 
1118 private:
1119 	enum { MAX_FILTERS = 32 };
1120 
1121 	const char* const*			fTokens;
1122 	int							fTokenCount;
1123 	int							fTokenIndex;
1124 	TraceFilter					fFilters[MAX_FILTERS];
1125 	int							fFilterCount;
1126 
1127 	static TraceFilterParser	sParser;
1128 };
1129 
1130 
1131 TraceFilterParser TraceFilterParser::sParser;
1132 
1133 
1134 //	#pragma mark -
1135 
1136 
1137 #if ENABLE_TRACING
1138 
1139 
1140 TraceEntry*
1141 TraceEntryIterator::Next()
1142 {
1143 	if (fIndex == 0) {
1144 		fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1145 		fIndex = 1;
1146 	} else if (fEntry != NULL) {
1147 		fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1148 		fIndex++;
1149 	}
1150 
1151 	return Current();
1152 }
1153 
1154 
1155 TraceEntry*
1156 TraceEntryIterator::Previous()
1157 {
1158 	if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1159 		fEntry = sTracingMetaData->AfterLastEntry();
1160 
1161 	if (fEntry != NULL) {
1162 		fEntry = _PreviousNonBufferEntry(
1163 			sTracingMetaData->PreviousEntry(fEntry));
1164 		fIndex--;
1165 	}
1166 
1167 	return Current();
1168 }
1169 
1170 
1171 TraceEntry*
1172 TraceEntryIterator::MoveTo(int32 index)
1173 {
1174 	if (index == fIndex)
1175 		return Current();
1176 
1177 	if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1178 		fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1179 		fEntry = NULL;
1180 		return NULL;
1181 	}
1182 
1183 	// get the shortest iteration path
1184 	int32 distance = index - fIndex;
1185 	int32 direction = distance < 0 ? -1 : 1;
1186 	distance *= direction;
1187 
1188 	if (index < distance) {
1189 		distance = index;
1190 		direction = 1;
1191 		fEntry = NULL;
1192 		fIndex = 0;
1193 	}
1194 	if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1195 		distance = sTracingMetaData->Entries() + 1 - fIndex;
1196 		direction = -1;
1197 		fEntry = NULL;
1198 		fIndex = sTracingMetaData->Entries() + 1;
1199 	}
1200 
1201 	// iterate to the index
1202 	if (direction < 0) {
1203 		while (fIndex != index)
1204 			Previous();
1205 	} else {
1206 		while (fIndex != index)
1207 			Next();
1208 	}
1209 
1210 	return Current();
1211 }
1212 
1213 
1214 trace_entry*
1215 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1216 {
1217 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1218 		entry = sTracingMetaData->NextEntry(entry);
1219 
1220 	return entry;
1221 }
1222 
1223 
1224 trace_entry*
1225 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1226 {
1227 	while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1228 		entry = sTracingMetaData->PreviousEntry(entry);
1229 
1230 	return entry;
1231 }
1232 
1233 
1234 int
1235 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1236 {
1237 	int argi = 1;
1238 
1239 	// variables in which we store our state to be continuable
1240 	static int32 _previousCount = 0;
1241 	static bool _previousHasFilter = false;
1242 	static bool _previousPrintStackTrace = false;
1243 	static int32 _previousMaxToCheck = 0;
1244 	static int32 _previousFirstChecked = 1;
1245 	static int32 _previousLastChecked = -1;
1246 	static int32 _previousDirection = 1;
1247 	static uint32 _previousEntriesEver = 0;
1248 	static uint32 _previousEntries = 0;
1249 	static uint32 _previousOutputFlags = 0;
1250 	static TraceEntryIterator iterator;
1251 
1252 	uint32 entriesEver = sTracingMetaData->EntriesEver();
1253 
1254 	// Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1255 	int32 start = 0;	// special index: print the last count entries
1256 	int32 count = 0;
1257 	int32 maxToCheck = 0;
1258 	int32 cont = 0;
1259 
1260 	bool hasFilter = false;
1261 	bool printStackTrace = false;
1262 
1263 	uint32 outputFlags = 0;
1264 	while (argi < argc) {
1265 		if (strcmp(argv[argi], "--difftime") == 0) {
1266 			outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1267 			argi++;
1268 		} else if (strcmp(argv[argi], "--printteam") == 0) {
1269 			outputFlags |= TRACE_OUTPUT_TEAM_ID;
1270 			argi++;
1271 		} else if (strcmp(argv[argi], "--stacktrace") == 0) {
1272 			printStackTrace = true;
1273 			argi++;
1274 		} else
1275 			break;
1276 	}
1277 
1278 	if (argi < argc) {
1279 		if (strcmp(argv[argi], "forward") == 0) {
1280 			cont = 1;
1281 			argi++;
1282 		} else if (strcmp(argv[argi], "backward") == 0) {
1283 			cont = -1;
1284 			argi++;
1285 		}
1286 	} else
1287 		cont = _previousDirection;
1288 
1289 	if (cont != 0) {
1290 		if (argi < argc) {
1291 			print_debugger_command_usage(argv[0]);
1292 			return 0;
1293 		}
1294 		if (entriesEver == 0 || entriesEver != _previousEntriesEver
1295 			|| sTracingMetaData->Entries() != _previousEntries) {
1296 			kprintf("Can't continue iteration. \"%s\" has not been invoked "
1297 				"before, or there were new entries written since the last "
1298 				"invocation.\n", argv[0]);
1299 			return 0;
1300 		}
1301 	}
1302 
1303 	// get start, count, maxToCheck
1304 	int32* params[3] = { &start, &count, &maxToCheck };
1305 	for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1306 		if (strcmp(argv[argi], "filter") == 0) {
1307 			hasFilter = true;
1308 			argi++;
1309 		} else if (argv[argi][0] == '#') {
1310 			hasFilter = true;
1311 		} else {
1312 			*params[i] = parse_expression(argv[argi]);
1313 			argi++;
1314 		}
1315 	}
1316 
1317 	// filter specification
1318 	if (argi < argc) {
1319 		hasFilter = true;
1320 		if (strcmp(argv[argi], "filter") == 0)
1321 			argi++;
1322 
1323 		if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1324 			print_debugger_command_usage(argv[0]);
1325 			return 0;
1326 		}
1327 	}
1328 
1329 	int32 direction;
1330 	int32 firstToCheck;
1331 	int32 lastToCheck;
1332 
1333 	if (cont != 0) {
1334 		// get values from the previous iteration
1335 		direction = cont;
1336 		count = _previousCount;
1337 		maxToCheck = _previousMaxToCheck;
1338 		hasFilter = _previousHasFilter;
1339 		outputFlags = _previousOutputFlags;
1340 		printStackTrace = _previousPrintStackTrace;
1341 
1342 		if (direction < 0)
1343 			start = _previousFirstChecked - 1;
1344 		else
1345 			start = _previousLastChecked + 1;
1346 	} else {
1347 		// defaults for count and maxToCheck
1348 		if (count == 0)
1349 			count = 30;
1350 		if (maxToCheck == 0 || !hasFilter)
1351 			maxToCheck = count;
1352 		else if (maxToCheck < 0)
1353 			maxToCheck = sTracingMetaData->Entries();
1354 
1355 		// determine iteration direction
1356 		direction = (start <= 0 || count < 0 ? -1 : 1);
1357 
1358 		// validate count and maxToCheck
1359 		if (count < 0)
1360 			count = -count;
1361 		if (maxToCheck < 0)
1362 			maxToCheck = -maxToCheck;
1363 		if (maxToCheck > (int32)sTracingMetaData->Entries())
1364 			maxToCheck = sTracingMetaData->Entries();
1365 		if (count > maxToCheck)
1366 			count = maxToCheck;
1367 
1368 		// validate start
1369 		if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1370 			start = max_c(1, sTracingMetaData->Entries());
1371 	}
1372 
1373 	if (direction < 0) {
1374 		firstToCheck = max_c(1, start - maxToCheck + 1);
1375 		lastToCheck = start;
1376 	} else {
1377 		firstToCheck = start;
1378 		lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1379 			start + maxToCheck - 1);
1380 	}
1381 
1382 	// reset the iterator, if something changed in the meantime
1383 	if (entriesEver == 0 || entriesEver != _previousEntriesEver
1384 		|| sTracingMetaData->Entries() != _previousEntries) {
1385 		iterator.Reset();
1386 	}
1387 
1388 	LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1389 		kTraceOutputBufferSize, outputFlags);
1390 
1391 	bool markedMatching = false;
1392 	int32 firstToDump = firstToCheck;
1393 	int32 lastToDump = lastToCheck;
1394 
1395 	TraceFilter* filter = NULL;
1396 	if (hasFilter)
1397 		filter = TraceFilterParser::Default()->Filter();
1398 
1399 	if (wrapperFilter != NULL) {
1400 		wrapperFilter->Init(filter, direction, cont != 0);
1401 		filter = wrapperFilter;
1402 	}
1403 
1404 	if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1405 		// iteration direction is backwards
1406 		markedMatching = true;
1407 
1408 		// From the last entry to check iterate backwards to check filter
1409 		// matches.
1410 		int32 matching = 0;
1411 
1412 		// move to the entry after the last entry to check
1413 		iterator.MoveTo(lastToCheck + 1);
1414 
1415 		// iterate backwards
1416 		firstToDump = -1;
1417 		lastToDump = -1;
1418 		while (iterator.Index() > firstToCheck) {
1419 			TraceEntry* entry = iterator.Previous();
1420 			if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1421 				out.Clear();
1422 				if (filter->Filter(entry, out)) {
1423 					entry->ToTraceEntry()->flags |= FILTER_MATCH;
1424 					if (lastToDump == -1)
1425 						lastToDump = iterator.Index();
1426 					firstToDump = iterator.Index();
1427 
1428 					matching++;
1429 					if (matching >= count)
1430 						break;
1431 				} else
1432 					entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1433 			}
1434 		}
1435 
1436 		firstToCheck = iterator.Index();
1437 
1438 		// iterate to the previous entry, so that the next loop starts at the
1439 		// right one
1440 		iterator.Previous();
1441 	}
1442 
1443 	out.SetLastEntryTime(0);
1444 
1445 	// set the iterator to the entry before the first one to dump
1446 	iterator.MoveTo(firstToDump - 1);
1447 
1448 	// dump the entries matching the filter in the range
1449 	// [firstToDump, lastToDump]
1450 	int32 dumped = 0;
1451 
1452 	while (TraceEntry* entry = iterator.Next()) {
1453 		int32 index = iterator.Index();
1454 		if (index < firstToDump)
1455 			continue;
1456 		if (index > lastToDump || dumped >= count) {
1457 			if (direction > 0)
1458 				lastToCheck = index - 1;
1459 			break;
1460 		}
1461 
1462 		if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1463 			out.Clear();
1464 			if (filter &&  (markedMatching
1465 					? (entry->Flags() & FILTER_MATCH) == 0
1466 					: !filter->Filter(entry, out))) {
1467 				continue;
1468 			}
1469 
1470 			// don't print trailing new line
1471 			const char* dump = out.DumpEntry(entry);
1472 			int len = strlen(dump);
1473 			if (len > 0 && dump[len - 1] == '\n')
1474 				len--;
1475 
1476 			kprintf("%5ld. %.*s\n", index, len, dump);
1477 
1478 			if (printStackTrace) {
1479 				out.Clear();
1480 				entry->DumpStackTrace(out);
1481 				if (out.Size() > 0)
1482 					kputs(out.Buffer());
1483 			}
1484 		} else if (!filter)
1485 			kprintf("%5ld. ** uninitialized entry **\n", index);
1486 
1487 		dumped++;
1488 	}
1489 
1490 	kprintf("printed %ld entries within range %ld to %ld (%ld of %ld total, "
1491 		"%ld ever)\n", dumped, firstToCheck, lastToCheck,
1492 		lastToCheck - firstToCheck + 1, sTracingMetaData->Entries(),
1493 		entriesEver);
1494 
1495 	// store iteration state
1496 	_previousCount = count;
1497 	_previousMaxToCheck = maxToCheck;
1498 	_previousHasFilter = hasFilter;
1499 	_previousPrintStackTrace = printStackTrace;
1500 	_previousFirstChecked = firstToCheck;
1501 	_previousLastChecked = lastToCheck;
1502 	_previousDirection = direction;
1503 	_previousEntriesEver = entriesEver;
1504 	_previousEntries = sTracingMetaData->Entries();
1505 	_previousOutputFlags = outputFlags;
1506 
1507 	return cont != 0 ? B_KDEBUG_CONT : 0;
1508 }
1509 
1510 
1511 static int
1512 dump_tracing_command(int argc, char** argv)
1513 {
1514 	return dump_tracing_internal(argc, argv, NULL);
1515 }
1516 
1517 
1518 #endif	// ENABLE_TRACING
1519 
1520 
1521 extern "C" uint8*
1522 alloc_tracing_buffer(size_t size)
1523 {
1524 #if	ENABLE_TRACING
1525 	trace_entry* entry = sTracingMetaData->AllocateEntry(
1526 		size + sizeof(trace_entry), BUFFER_ENTRY);
1527 	if (entry == NULL)
1528 		return NULL;
1529 
1530 	return (uint8*)(entry + 1);
1531 #else
1532 	return NULL;
1533 #endif
1534 }
1535 
1536 
1537 uint8*
1538 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1539 {
1540 	if (user && !IS_USER_ADDRESS(source))
1541 		return NULL;
1542 
1543 	uint8* buffer = alloc_tracing_buffer(size);
1544 	if (buffer == NULL)
1545 		return NULL;
1546 
1547 	if (user) {
1548 		if (user_memcpy(buffer, source, size) != B_OK)
1549 			return NULL;
1550 	} else
1551 		memcpy(buffer, source, size);
1552 
1553 	return buffer;
1554 }
1555 
1556 
1557 char*
1558 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1559 {
1560 	if (source == NULL || maxSize == 0)
1561 		return NULL;
1562 
1563 	if (user && !IS_USER_ADDRESS(source))
1564 		return NULL;
1565 
1566 	// limit maxSize to the actual source string len
1567 	if (user) {
1568 		ssize_t size = user_strlcpy(NULL, source, 0);
1569 			// there's no user_strnlen()
1570 		if (size < 0)
1571 			return 0;
1572 		maxSize = min_c(maxSize, (size_t)size + 1);
1573 	} else
1574 		maxSize = strnlen(source, maxSize - 1) + 1;
1575 
1576 	char* buffer = (char*)alloc_tracing_buffer(maxSize);
1577 	if (buffer == NULL)
1578 		return NULL;
1579 
1580 	if (user) {
1581 		if (user_strlcpy(buffer, source, maxSize) < B_OK)
1582 			return NULL;
1583 	} else
1584 		strlcpy(buffer, source, maxSize);
1585 
1586 	return buffer;
1587 }
1588 
1589 
1590 tracing_stack_trace*
1591 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1592 {
1593 #if	ENABLE_TRACING
1594 	// page_fault_exception() doesn't allow us to gracefully handle a bad
1595 	// address in the stack trace, if interrupts are disabled, so we always
1596 	// restrict the stack traces to the kernel only in this case. A bad address
1597 	// in the kernel stack trace would still cause a panic(), but this is
1598 	// probably even desired.
1599 	if (!are_interrupts_enabled())
1600 		kernelOnly = true;
1601 
1602 	tracing_stack_trace* stackTrace
1603 		= (tracing_stack_trace*)alloc_tracing_buffer(
1604 			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1605 
1606 	if (stackTrace != NULL) {
1607 		stackTrace->depth = arch_debug_get_stack_trace(
1608 			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1609 			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1610 	}
1611 
1612 	return stackTrace;
1613 #else
1614 	return NULL;
1615 #endif
1616 }
1617 
1618 
1619 int
1620 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1621 {
1622 #if	ENABLE_TRACING
1623 	return dump_tracing_internal(argc, argv, wrapperFilter);
1624 #else
1625 	return 0;
1626 #endif
1627 }
1628 
1629 
1630 void
1631 lock_tracing_buffer()
1632 {
1633 #if ENABLE_TRACING
1634 	sTracingMetaData->Lock();
1635 #endif
1636 }
1637 
1638 
1639 void
1640 unlock_tracing_buffer()
1641 {
1642 #if ENABLE_TRACING
1643 	sTracingMetaData->Unlock();
1644 #endif
1645 }
1646 
1647 
1648 extern "C" status_t
1649 tracing_init(void)
1650 {
1651 #if	ENABLE_TRACING
1652 	status_t result = TracingMetaData::Create(sTracingMetaData);
1653 	if (result != B_OK) {
1654 		sTracingMetaData = &sDummyTracingMetaData;
1655 		return result;
1656 	}
1657 
1658 	new(nothrow) TracingLogStartEntry();
1659 
1660 	add_debugger_command_etc("traced", &dump_tracing_command,
1661 		"Dump recorded trace entries",
1662 		"[ --printteam ] [ --difftime ] [ --stacktrace ] "
1663 			"(\"forward\" | \"backward\") "
1664 			"| ([ <start> [ <count> [ <range> ] ] ] "
1665 			"[ #<pattern> | (\"filter\" <filter>) ])\n"
1666 		"Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1667 		"specified, the command continues where the previous invocation left\n"
1668 		"off, i.e. printing the previous respectively next entries (as many\n"
1669 		"as printed before). In this case the command is continuable, that is\n"
1670 		"afterwards entering an empty line in the debugger will reinvoke it.\n"
1671 		"If no arguments are given, the command continues in the direction\n"
1672 		"of the last invocation.\n"
1673 		"--printteam  - enables printing the entries' team IDs.\n"
1674 		"--difftime   - print difference times for all but the first entry.\n"
1675 		"--stacktrace - print stack traces for entries that captured one.\n"
1676 		"  <start>    - The base index of the entries to print. Depending on\n"
1677 		"               whether the iteration direction is forward or\n"
1678 		"               backward this will be the first or last entry printed\n"
1679 		"               (potentially, if a filter is specified). The index of\n"
1680 		"               the first entry in the trace buffer is 1. If 0 is\n"
1681 		"               specified, the last <count> recorded entries are\n"
1682 		"               printed (iteration direction is backward). Defaults \n"
1683 		"               to 0.\n"
1684 		"  <count>    - The number of entries to be printed. Defaults to 30.\n"
1685 		"               If negative, the -<count> entries before and\n"
1686 		"               including <start> will be printed.\n"
1687 		"  <range>    - Only relevant if a filter is specified. Specifies the\n"
1688 		"               number of entries to be filtered -- depending on the\n"
1689 		"               iteration direction the entries before or after\n"
1690 		"               <start>. If more than <count> entries match the\n"
1691 		"               filter, only the first (forward) or last (backward)\n"
1692 		"               <count> matching entries will be printed. If 0 is\n"
1693 		"               specified <range> will be set to <count>. If -1,\n"
1694 		"               <range> will be set to the number of recorded\n"
1695 		"               entries.\n"
1696 		"  <pattern>  - If specified only entries containing this string are\n"
1697 		"               printed.\n"
1698 		"  <filter>   - If specified only entries matching this filter\n"
1699 		"               expression are printed. The expression can consist of\n"
1700 		"               prefix operators \"not\", \"and\", \"or\", and\n"
1701 		"               filters \"'thread' <thread>\" (matching entries\n"
1702 		"               with the given thread ID), \"'team' <team>\"\n"
1703 						"(matching entries with the given team ID), and\n"
1704 		"               \"#<pattern>\" (matching entries containing the given\n"
1705 		"               string).\n", 0);
1706 #endif	// ENABLE_TRACING
1707 	return B_OK;
1708 }
1709 
1710 
1711 void
1712 ktrace_printf(const char *format, ...)
1713 {
1714 #if	ENABLE_TRACING
1715 	va_list list;
1716 	va_start(list, format);
1717 
1718 	char buffer[256];
1719 	vsnprintf(buffer, sizeof(buffer), format, list);
1720 
1721 	va_end(list);
1722 
1723 	new(nothrow) KernelTraceEntry(buffer);
1724 #endif	// ENABLE_TRACING
1725 }
1726 
1727 
1728 void
1729 _user_ktrace_output(const char *message)
1730 {
1731 #if	ENABLE_TRACING
1732 	new(nothrow) UserTraceEntry(message);
1733 #endif	// ENABLE_TRACING
1734 }
1735 
1736