1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4 * Copyright 2012, Rene Gollent, rene@gollent.com.
5 * Distributed under the terms of the MIT License.
6 */
7
8
9 #include <tracing.h>
10
11 #include <stdlib.h>
12
13 #include <algorithm>
14
15 #include <arch/debug.h>
16 #include <debug.h>
17 #include <debug_heap.h>
18 #include <elf.h>
19 #include <int.h>
20 #include <kernel.h>
21 #include <team.h>
22 #include <thread.h>
23 #include <util/AutoLock.h>
24 #include <vm/vm.h>
25
26
27 #if ENABLE_TRACING
28
29 //#define TRACE_TRACING
30 #ifdef TRACE_TRACING
31 # define TRACE(x) dprintf_no_syslog x
32 #else
33 # define TRACE(x) ;
34 #endif
35
36
37 enum {
38 WRAP_ENTRY = 0x01,
39 ENTRY_INITIALIZED = 0x02,
40 BUFFER_ENTRY = 0x04,
41 FILTER_MATCH = 0x08,
42 INVALID_ENTRY = 0x10,
43 CHECK_ENTRY = 0x20,
44 };
45
46
47 static const size_t kTraceOutputBufferSize = 10240;
48 static const size_t kBufferSize = MAX_TRACE_SIZE / sizeof(trace_entry);
49
50 static const uint32 kMaxRecoveringErrorCount = 100;
51 static const addr_t kMetaDataBaseAddress = 32 * 1024 * 1024;
52 static const addr_t kMetaDataBaseEndAddress = 128 * 1024 * 1024;
53 static const addr_t kMetaDataAddressIncrement = 8 * 1024 * 1024;
54 static const uint32 kMetaDataMagic1 = 'Vali';
55 static const uint32 kMetaDataMagic2 = 'dTra';
56 static const uint32 kMetaDataMagic3 = 'cing';
57
58 // the maximum we can address with the trace_entry::[previous_]size fields
59 static const size_t kMaxTracingEntryByteSize
60 = ((1 << 13) - 1) * sizeof(trace_entry);
61
62
63 struct TraceOutputPrint {
TraceOutputPrintTraceOutputPrint64 TraceOutputPrint(TraceOutput& output)
65 :
66 fOutput(output)
67 {
68 }
69
operator ()TraceOutputPrint70 void operator()(const char* format,...) const
71 {
72 va_list args;
73 va_start(args, format);
74 fOutput.PrintArgs(format, args);
75 va_end(args);
76 }
77
78 private:
79 TraceOutput& fOutput;
80 };
81
82
83 class TracingMetaData {
84 public:
85 static status_t Create(TracingMetaData*& _metaData);
86
87 inline bool Lock();
88 inline void Unlock();
89
90 inline trace_entry* FirstEntry() const;
91 inline trace_entry* AfterLastEntry() const;
92
93 inline uint32 Entries() const;
94 inline uint32 EntriesEver() const;
95
96 inline void IncrementEntriesEver();
97
98 inline char* TraceOutputBuffer() const;
99
100 trace_entry* NextEntry(trace_entry* entry);
101 trace_entry* PreviousEntry(trace_entry* entry);
102
103 trace_entry* AllocateEntry(size_t size, uint16 flags);
104
105 bool IsInBuffer(void* address, size_t size);
106
107 private:
108 bool _FreeFirstEntry();
109 bool _MakeSpace(size_t needed);
110
111 static status_t _CreateMetaDataArea(bool findPrevious,
112 area_id& _area,
113 TracingMetaData*& _metaData);
114 bool _InitPreviousTracingData();
115
116 private:
117 uint32 fMagic1;
118 trace_entry* fBuffer;
119 trace_entry* fFirstEntry;
120 trace_entry* fAfterLastEntry;
121 uint32 fEntries;
122 uint32 fMagic2;
123 uint32 fEntriesEver;
124 spinlock fLock;
125 char* fTraceOutputBuffer;
126 phys_addr_t fPhysicalAddress;
127 uint32 fMagic3;
128 };
129
130 static TracingMetaData sFallbackTracingMetaData;
131 static TracingMetaData* sTracingMetaData = &sFallbackTracingMetaData;
132 static bool sTracingDataRecovered = false;
133
134
135 // #pragma mark -
136
137
138 template<typename Print>
139 static void
print_stack_trace(struct tracing_stack_trace * stackTrace,const Print & print)140 print_stack_trace(struct tracing_stack_trace* stackTrace,
141 const Print& print)
142 {
143 if (stackTrace == NULL || stackTrace->depth <= 0)
144 return;
145
146 static const size_t kBufferSize = 256;
147 char* buffer = (char*)debug_malloc(kBufferSize);
148
149 for (int32 i = 0; i < stackTrace->depth; i++) {
150 addr_t address = stackTrace->return_addresses[i];
151
152 const char* symbol;
153 const char* demangledName = NULL;
154 const char* imageName;
155 bool exactMatch;
156 addr_t baseAddress;
157
158 if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
159 &imageName, &exactMatch) == B_OK) {
160
161 if (buffer != NULL) {
162 bool isObjectMethod;
163 demangledName = debug_demangle_symbol(symbol, buffer,
164 kBufferSize, &isObjectMethod);
165 }
166
167 print(" %p %s + 0x%lx (%s)%s\n", (void*)address,
168 demangledName != NULL ? demangledName : symbol,
169 address - baseAddress, imageName,
170 exactMatch ? "" : " (nearest)");
171 } else
172 print(" %p\n", (void*)address);
173 }
174
175 if (buffer != NULL)
176 debug_free(buffer);
177 }
178
179
180 // #pragma mark - TracingMetaData
181
182
183 bool
Lock()184 TracingMetaData::Lock()
185 {
186 acquire_spinlock(&fLock);
187 return true;
188 }
189
190
191 void
Unlock()192 TracingMetaData::Unlock()
193 {
194 release_spinlock(&fLock);
195 }
196
197
198 trace_entry*
FirstEntry() const199 TracingMetaData::FirstEntry() const
200 {
201 return fFirstEntry;
202 }
203
204
205 trace_entry*
AfterLastEntry() const206 TracingMetaData::AfterLastEntry() const
207 {
208 return fAfterLastEntry;
209 }
210
211
212 uint32
Entries() const213 TracingMetaData::Entries() const
214 {
215 return fEntries;
216 }
217
218
219 uint32
EntriesEver() const220 TracingMetaData::EntriesEver() const
221 {
222 return fEntriesEver;
223 }
224
225
226 void
IncrementEntriesEver()227 TracingMetaData::IncrementEntriesEver()
228 {
229 fEntriesEver++;
230 // NOTE: Race condition on SMP machines! We should use atomic_add(),
231 // though that costs some performance and the information is for
232 // informational purpose anyway.
233 }
234
235
236 char*
TraceOutputBuffer() const237 TracingMetaData::TraceOutputBuffer() const
238 {
239 return fTraceOutputBuffer;
240 }
241
242
243 trace_entry*
NextEntry(trace_entry * entry)244 TracingMetaData::NextEntry(trace_entry* entry)
245 {
246 entry += entry->size;
247 if ((entry->flags & WRAP_ENTRY) != 0)
248 entry = fBuffer;
249
250 if (entry == fAfterLastEntry)
251 return NULL;
252
253 return entry;
254 }
255
256
257 trace_entry*
PreviousEntry(trace_entry * entry)258 TracingMetaData::PreviousEntry(trace_entry* entry)
259 {
260 if (entry == fFirstEntry)
261 return NULL;
262
263 if (entry == fBuffer) {
264 // beginning of buffer -- previous entry is a wrap entry
265 entry = fBuffer + kBufferSize - entry->previous_size;
266 }
267
268 return entry - entry->previous_size;
269 }
270
271
272 trace_entry*
AllocateEntry(size_t size,uint16 flags)273 TracingMetaData::AllocateEntry(size_t size, uint16 flags)
274 {
275 if (fAfterLastEntry == NULL || size == 0
276 || size >= kMaxTracingEntryByteSize) {
277 return NULL;
278 }
279
280 InterruptsSpinLocker _(fLock);
281
282 size = (size + 3) >> 2;
283 // 4 byte aligned, don't store the lower 2 bits
284
285 TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size * 4,
286 fFirstEntry, fAfterLastEntry, fBuffer));
287
288 if (!_MakeSpace(size))
289 return NULL;
290
291 trace_entry* entry = fAfterLastEntry;
292 entry->size = size;
293 entry->flags = flags;
294 fAfterLastEntry += size;
295 fAfterLastEntry->previous_size = size;
296
297 if (!(flags & BUFFER_ENTRY))
298 fEntries++;
299
300 TRACE((" entry: %p, end %p, start %p, entries %ld\n", entry,
301 fAfterLastEntry, fFirstEntry, fEntries));
302
303 return entry;
304 }
305
306
307 bool
IsInBuffer(void * address,size_t size)308 TracingMetaData::IsInBuffer(void* address, size_t size)
309 {
310 if (fEntries == 0)
311 return false;
312
313 addr_t start = (addr_t)address;
314 addr_t end = start + size;
315
316 if (start < (addr_t)fBuffer || end > (addr_t)(fBuffer + kBufferSize))
317 return false;
318
319 if (fFirstEntry > fAfterLastEntry)
320 return start >= (addr_t)fFirstEntry || end <= (addr_t)fAfterLastEntry;
321
322 return start >= (addr_t)fFirstEntry && end <= (addr_t)fAfterLastEntry;
323 }
324
325
326 bool
_FreeFirstEntry()327 TracingMetaData::_FreeFirstEntry()
328 {
329 TRACE((" skip start %p, %lu*4 bytes\n", fFirstEntry, fFirstEntry->size));
330
331 trace_entry* newFirst = NextEntry(fFirstEntry);
332
333 if (fFirstEntry->flags & BUFFER_ENTRY) {
334 // a buffer entry -- just skip it
335 } else if (fFirstEntry->flags & ENTRY_INITIALIZED) {
336 // Fully initialized TraceEntry: We could destroy it, but don't do so
337 // for sake of robustness. The destructors of tracing entry classes
338 // should be empty anyway.
339 fEntries--;
340 } else {
341 // Not fully initialized TraceEntry. We can't free it, since
342 // then it's constructor might still write into the memory and
343 // overwrite data of the entry we're going to allocate.
344 // We can't do anything until this entry can be discarded.
345 return false;
346 }
347
348 if (newFirst == NULL) {
349 // everything is freed -- practically this can't happen, if
350 // the buffer is large enough to hold three max-sized entries
351 fFirstEntry = fAfterLastEntry = fBuffer;
352 TRACE(("_FreeFirstEntry(): all entries freed!\n"));
353 } else
354 fFirstEntry = newFirst;
355
356 return true;
357 }
358
359
360 /*! Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
361 Returns \c false, if unable to free that much.
362 */
363 bool
_MakeSpace(size_t needed)364 TracingMetaData::_MakeSpace(size_t needed)
365 {
366 // we need space for fAfterLastEntry, too (in case we need to wrap around
367 // later)
368 needed++;
369
370 // If there's not enough space (free or occupied) after fAfterLastEntry,
371 // we free all entries in that region and wrap around.
372 if (fAfterLastEntry + needed > fBuffer + kBufferSize) {
373 TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed,
374 fAfterLastEntry));
375
376 // Free all entries after fAfterLastEntry and one more at the beginning
377 // of the buffer.
378 while (fFirstEntry > fAfterLastEntry) {
379 if (!_FreeFirstEntry())
380 return false;
381 }
382 if (fAfterLastEntry != fBuffer && !_FreeFirstEntry())
383 return false;
384
385 // just in case _FreeFirstEntry() freed the very last existing entry
386 if (fAfterLastEntry == fBuffer)
387 return true;
388
389 // mark as wrap entry and actually wrap around
390 trace_entry* wrapEntry = fAfterLastEntry;
391 wrapEntry->size = 0;
392 wrapEntry->flags = WRAP_ENTRY;
393 fAfterLastEntry = fBuffer;
394 fAfterLastEntry->previous_size = fBuffer + kBufferSize - wrapEntry;
395 }
396
397 if (fFirstEntry <= fAfterLastEntry) {
398 // buffer is empty or the space after fAfterLastEntry is unoccupied
399 return true;
400 }
401
402 // free the first entries, until there's enough space
403 size_t space = fFirstEntry - fAfterLastEntry;
404
405 if (space < needed) {
406 TRACE(("_MakeSpace(%lu), left %ld\n", needed, space));
407 }
408
409 while (space < needed) {
410 space += fFirstEntry->size;
411
412 if (!_FreeFirstEntry())
413 return false;
414 }
415
416 TRACE((" out: start %p, entries %ld\n", fFirstEntry, fEntries));
417
418 return true;
419 }
420
421
422 /*static*/ status_t
Create(TracingMetaData * & _metaData)423 TracingMetaData::Create(TracingMetaData*& _metaData)
424 {
425 // search meta data in memory (from previous session)
426 area_id area;
427 TracingMetaData* metaData;
428 status_t error = _CreateMetaDataArea(true, area, metaData);
429 if (error == B_OK) {
430 if (metaData->_InitPreviousTracingData()) {
431 _metaData = metaData;
432 return B_OK;
433 }
434
435 dprintf("Found previous tracing meta data, but failed to init.\n");
436
437 // invalidate the meta data
438 metaData->fMagic1 = 0;
439 metaData->fMagic2 = 0;
440 metaData->fMagic3 = 0;
441 delete_area(area);
442 } else
443 dprintf("No previous tracing meta data found.\n");
444
445 // no previous tracing data found -- create new one
446 error = _CreateMetaDataArea(false, area, metaData);
447 if (error != B_OK)
448 return error;
449
450 virtual_address_restrictions virtualRestrictions = {};
451 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
452 physical_address_restrictions physicalRestrictions = {};
453 area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
454 kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
455 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
456 &virtualRestrictions, &physicalRestrictions,
457 (void**)&metaData->fTraceOutputBuffer);
458 if (area < 0)
459 return area;
460
461 // get the physical address
462 physical_entry physicalEntry;
463 if (get_memory_map(metaData->fTraceOutputBuffer, B_PAGE_SIZE,
464 &physicalEntry, 1) == B_OK) {
465 metaData->fPhysicalAddress = physicalEntry.address;
466 } else {
467 dprintf("TracingMetaData::Create(): failed to get physical address "
468 "of tracing buffer\n");
469 metaData->fPhysicalAddress = 0;
470 }
471
472 metaData->fBuffer = (trace_entry*)(metaData->fTraceOutputBuffer
473 + kTraceOutputBufferSize);
474 metaData->fFirstEntry = metaData->fBuffer;
475 metaData->fAfterLastEntry = metaData->fBuffer;
476
477 metaData->fEntries = 0;
478 metaData->fEntriesEver = 0;
479 B_INITIALIZE_SPINLOCK(&metaData->fLock);
480
481 metaData->fMagic1 = kMetaDataMagic1;
482 metaData->fMagic2 = kMetaDataMagic2;
483 metaData->fMagic3 = kMetaDataMagic3;
484
485 _metaData = metaData;
486 return B_OK;
487 }
488
489
490 /*static*/ status_t
_CreateMetaDataArea(bool findPrevious,area_id & _area,TracingMetaData * & _metaData)491 TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
492 TracingMetaData*& _metaData)
493 {
494 // search meta data in memory (from previous session)
495 TracingMetaData* metaData;
496 phys_addr_t metaDataAddress = kMetaDataBaseAddress;
497 for (; metaDataAddress <= kMetaDataBaseEndAddress;
498 metaDataAddress += kMetaDataAddressIncrement) {
499 virtual_address_restrictions virtualRestrictions = {};
500 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
501 physical_address_restrictions physicalRestrictions = {};
502 physicalRestrictions.low_address = metaDataAddress;
503 physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
504 area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
505 B_PAGE_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
506 CREATE_AREA_DONT_CLEAR, 0, &virtualRestrictions,
507 &physicalRestrictions, (void**)&metaData);
508 if (area < 0)
509 continue;
510
511 if (!findPrevious) {
512 _area = area;
513 _metaData = metaData;
514 return B_OK;
515 }
516
517 if (metaData->fMagic1 == kMetaDataMagic1
518 && metaData->fMagic2 == kMetaDataMagic2
519 && metaData->fMagic3 == kMetaDataMagic3) {
520 _area = area;
521 _metaData = metaData;
522 return B_OK;
523 }
524
525 delete_area(area);
526 }
527
528 if (findPrevious)
529 return B_ENTRY_NOT_FOUND;
530
531 // We could allocate any of the standard locations. Instead of failing
532 // entirely, we use the static meta data. The tracing buffer won't be
533 // reattachable in the next session, but at least we can use it in this
534 // session.
535 _metaData = &sFallbackTracingMetaData;
536 return B_OK;
537 }
538
539
540 bool
_InitPreviousTracingData()541 TracingMetaData::_InitPreviousTracingData()
542 {
543 // TODO: ATM re-attaching the previous tracing buffer doesn't work very
544 // well. The entries should be checked more thoroughly for validity -- e.g.
545 // the pointers to the entries' vtable pointers could be invalid, which can
546 // make the "traced" command quite unusable. The validity of the entries
547 // could be checked in a safe environment (i.e. with a fault handler) with
548 // typeid() and call of a virtual function.
549 return false;
550
551 addr_t bufferStart
552 = (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
553 addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;
554
555 if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
556 || (addr_t)fFirstEntry % sizeof(trace_entry) != 0
557 || (addr_t)fFirstEntry < bufferStart
558 || (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
559 || (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
560 || (addr_t)fAfterLastEntry < bufferStart
561 || (addr_t)fAfterLastEntry > bufferEnd
562 || fPhysicalAddress == 0) {
563 dprintf("Failed to init tracing meta data: Sanity checks "
564 "failed.\n");
565 return false;
566 }
567
568 // re-map the previous tracing buffer
569 virtual_address_restrictions virtualRestrictions = {};
570 virtualRestrictions.address = fTraceOutputBuffer;
571 virtualRestrictions.address_specification = B_EXACT_ADDRESS;
572 physical_address_restrictions physicalRestrictions = {};
573 physicalRestrictions.low_address = fPhysicalAddress;
574 physicalRestrictions.high_address = fPhysicalAddress
575 + ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
576 area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
577 kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
578 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR, 0,
579 &virtualRestrictions, &physicalRestrictions, NULL);
580 if (area < 0) {
581 dprintf("Failed to init tracing meta data: Mapping tracing log "
582 "buffer failed: %s\n", strerror(area));
583 return false;
584 }
585
586 dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
587 fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
588
589 // verify/repair the tracing entry list
590 uint32 errorCount = 0;
591 uint32 entryCount = 0;
592 uint32 nonBufferEntryCount = 0;
593 uint32 previousEntrySize = 0;
594 trace_entry* entry = fFirstEntry;
595 while (errorCount <= kMaxRecoveringErrorCount) {
596 // check previous entry size
597 if (entry->previous_size != previousEntrySize) {
598 if (entry != fFirstEntry) {
599 dprintf("ktrace recovering: entry %p: fixing previous_size "
600 "size: %" B_PRIu32 " (should be %" B_PRIu32 ")\n", entry,
601 entry->previous_size, previousEntrySize);
602 errorCount++;
603 }
604 entry->previous_size = previousEntrySize;
605 }
606
607 if (entry == fAfterLastEntry)
608 break;
609
610 // check size field
611 if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
612 dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
613 entry);
614 errorCount++;
615 fAfterLastEntry = entry;
616 break;
617 }
618
619 if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
620 dprintf("ktrace recovering: entry %p: size too big: %" B_PRIu32 "\n",
621 entry, entry->size);
622 errorCount++;
623 fAfterLastEntry = entry;
624 break;
625 }
626
627 if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
628 dprintf("ktrace recovering: entry %p: entry crosses "
629 "fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
630 errorCount++;
631 fAfterLastEntry = entry;
632 break;
633 }
634
635 // check for wrap entry
636 if ((entry->flags & WRAP_ENTRY) != 0) {
637 if ((uint32)(fBuffer + kBufferSize - entry)
638 > kMaxTracingEntryByteSize / sizeof(trace_entry)) {
639 dprintf("ktrace recovering: entry %p: wrap entry at invalid "
640 "buffer location\n", entry);
641 errorCount++;
642 }
643
644 if (entry->size != 0) {
645 dprintf("ktrace recovering: entry %p: invalid wrap entry "
646 "size: %" B_PRIu32 "\n", entry, entry->size);
647 errorCount++;
648 entry->size = 0;
649 }
650
651 previousEntrySize = fBuffer + kBufferSize - entry;
652 entry = fBuffer;
653 continue;
654 }
655
656 if ((entry->flags & BUFFER_ENTRY) == 0) {
657 entry->flags |= CHECK_ENTRY;
658 nonBufferEntryCount++;
659 }
660
661 entryCount++;
662 previousEntrySize = entry->size;
663
664 entry += entry->size;
665 }
666
667 if (errorCount > kMaxRecoveringErrorCount) {
668 dprintf("ktrace recovering: Too many errors.\n");
669 fAfterLastEntry = entry;
670 fAfterLastEntry->previous_size = previousEntrySize;
671 }
672
673 dprintf("ktrace recovering: Recovered %" B_PRIu32 " entries + %" B_PRIu32
674 " buffer entries from previous session. Expected %" B_PRIu32
675 " entries.\n", nonBufferEntryCount, entryCount - nonBufferEntryCount,
676 fEntries);
677 fEntries = nonBufferEntryCount;
678
679 B_INITIALIZE_SPINLOCK(&fLock);
680
681 // TODO: Actually check the entries! Do that when first accessing the
682 // tracing buffer from the kernel debugger (when sTracingDataRecovered is
683 // true).
684 sTracingDataRecovered = true;
685 return true;
686 }
687
688
689 #endif // ENABLE_TRACING
690
691
692 // #pragma mark -
693
694
TraceOutput(char * buffer,size_t bufferSize,uint32 flags)695 TraceOutput::TraceOutput(char* buffer, size_t bufferSize, uint32 flags)
696 : fBuffer(buffer),
697 fCapacity(bufferSize),
698 fFlags(flags)
699 {
700 Clear();
701 }
702
703
704 void
Clear()705 TraceOutput::Clear()
706 {
707 if (fCapacity > 0)
708 fBuffer[0] = '\0';
709 fSize = 0;
710 }
711
712
713 void
PrintArgs(const char * format,va_list args)714 TraceOutput::PrintArgs(const char* format, va_list args)
715 {
716 #if ENABLE_TRACING
717 if (IsFull())
718 return;
719
720 size_t length = vsnprintf(fBuffer + fSize, fCapacity - fSize, format, args);
721 fSize += std::min(length, fCapacity - fSize - 1);
722 #endif
723 }
724
725
726 void
PrintStackTrace(tracing_stack_trace * stackTrace)727 TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
728 {
729 #if ENABLE_TRACING
730 print_stack_trace(stackTrace, TraceOutputPrint(*this));
731 #endif
732 }
733
734
735 void
SetLastEntryTime(bigtime_t time)736 TraceOutput::SetLastEntryTime(bigtime_t time)
737 {
738 fLastEntryTime = time;
739 }
740
741
742 bigtime_t
LastEntryTime() const743 TraceOutput::LastEntryTime() const
744 {
745 return fLastEntryTime;
746 }
747
748
749 // #pragma mark -
750
751
TraceEntry()752 TraceEntry::TraceEntry()
753 {
754 }
755
756
~TraceEntry()757 TraceEntry::~TraceEntry()
758 {
759 }
760
761
762 void
Dump(TraceOutput & out)763 TraceEntry::Dump(TraceOutput& out)
764 {
765 #if ENABLE_TRACING
766 // to be overridden by subclasses
767 out.Print("ENTRY %p", this);
768 #endif
769 }
770
771
772 void
DumpStackTrace(TraceOutput & out)773 TraceEntry::DumpStackTrace(TraceOutput& out)
774 {
775 }
776
777
778 void
Initialized()779 TraceEntry::Initialized()
780 {
781 #if ENABLE_TRACING
782 ToTraceEntry()->flags |= ENTRY_INITIALIZED;
783 sTracingMetaData->IncrementEntriesEver();
784 #endif
785 }
786
787
788 void*
operator new(size_t size,const std::nothrow_t &)789 TraceEntry::operator new(size_t size, const std::nothrow_t&) throw()
790 {
791 #if ENABLE_TRACING
792 trace_entry* entry = sTracingMetaData->AllocateEntry(
793 size + sizeof(trace_entry), 0);
794 return entry != NULL ? entry + 1 : NULL;
795 #endif
796 return NULL;
797 }
798
799
800 // #pragma mark -
801
802
~AbstractTraceEntry()803 AbstractTraceEntry::~AbstractTraceEntry()
804 {
805 }
806
807
808 void
Dump(TraceOutput & out)809 AbstractTraceEntry::Dump(TraceOutput& out)
810 {
811 bigtime_t time = (out.Flags() & TRACE_OUTPUT_DIFF_TIME)
812 ? fTime - out.LastEntryTime()
813 : fTime;
814
815 if (out.Flags() & TRACE_OUTPUT_TEAM_ID) {
816 out.Print("[%6" B_PRId32 ":%6" B_PRId32 "] %10" B_PRId64 ": ", fThread,
817 fTeam, time);
818 } else
819 out.Print("[%6" B_PRId32 "] %10" B_PRId64 ": ", fThread, time);
820
821 AddDump(out);
822
823 out.SetLastEntryTime(fTime);
824 }
825
826
827 void
AddDump(TraceOutput & out)828 AbstractTraceEntry::AddDump(TraceOutput& out)
829 {
830 }
831
832
833 void
_Init()834 AbstractTraceEntry::_Init()
835 {
836 Thread* thread = thread_get_current_thread();
837 if (thread != NULL) {
838 fThread = thread->id;
839 if (thread->team)
840 fTeam = thread->team->id;
841 }
842 fTime = system_time();
843 }
844
845
846 // #pragma mark - AbstractTraceEntryWithStackTrace
847
848
849
AbstractTraceEntryWithStackTrace(size_t stackTraceDepth,size_t skipFrames,bool kernelOnly)850 AbstractTraceEntryWithStackTrace::AbstractTraceEntryWithStackTrace(
851 size_t stackTraceDepth, size_t skipFrames, bool kernelOnly)
852 {
853 fStackTrace = capture_tracing_stack_trace(stackTraceDepth, skipFrames + 1,
854 kernelOnly);
855 }
856
857
858 void
DumpStackTrace(TraceOutput & out)859 AbstractTraceEntryWithStackTrace::DumpStackTrace(TraceOutput& out)
860 {
861 out.PrintStackTrace(fStackTrace);
862 }
863
864
865 // #pragma mark -
866
867
868 #if ENABLE_TRACING
869
870 class KernelTraceEntry : public AbstractTraceEntry {
871 public:
KernelTraceEntry(const char * message)872 KernelTraceEntry(const char* message)
873 {
874 fMessage = alloc_tracing_buffer_strcpy(message, 256, false);
875
876 #if KTRACE_PRINTF_STACK_TRACE
877 fStackTrace = capture_tracing_stack_trace(
878 KTRACE_PRINTF_STACK_TRACE, 1, false);
879 #endif
880 Initialized();
881 }
882
AddDump(TraceOutput & out)883 virtual void AddDump(TraceOutput& out)
884 {
885 out.Print("kern: %s", fMessage);
886 }
887
888 #if KTRACE_PRINTF_STACK_TRACE
DumpStackTrace(TraceOutput & out)889 virtual void DumpStackTrace(TraceOutput& out)
890 {
891 out.PrintStackTrace(fStackTrace);
892 }
893 #endif
894
895 private:
896 char* fMessage;
897 #if KTRACE_PRINTF_STACK_TRACE
898 tracing_stack_trace* fStackTrace;
899 #endif
900 };
901
902
903 class UserTraceEntry : public AbstractTraceEntry {
904 public:
UserTraceEntry(const char * message)905 UserTraceEntry(const char* message)
906 {
907 fMessage = alloc_tracing_buffer_strcpy(message, 256, true);
908
909 #if KTRACE_PRINTF_STACK_TRACE
910 fStackTrace = capture_tracing_stack_trace(
911 KTRACE_PRINTF_STACK_TRACE, 1, false);
912 #endif
913 Initialized();
914 }
915
AddDump(TraceOutput & out)916 virtual void AddDump(TraceOutput& out)
917 {
918 out.Print("user: %s", fMessage);
919 }
920
921 #if KTRACE_PRINTF_STACK_TRACE
DumpStackTrace(TraceOutput & out)922 virtual void DumpStackTrace(TraceOutput& out)
923 {
924 out.PrintStackTrace(fStackTrace);
925 }
926 #endif
927
928 private:
929 char* fMessage;
930 #if KTRACE_PRINTF_STACK_TRACE
931 tracing_stack_trace* fStackTrace;
932 #endif
933 };
934
935
936 class TracingLogStartEntry : public AbstractTraceEntry {
937 public:
TracingLogStartEntry()938 TracingLogStartEntry()
939 {
940 Initialized();
941 }
942
AddDump(TraceOutput & out)943 virtual void AddDump(TraceOutput& out)
944 {
945 out.Print("ktrace start");
946 }
947 };
948
949 #endif // ENABLE_TRACING
950
951
952 // #pragma mark - trace filters
953
954
~TraceFilter()955 TraceFilter::~TraceFilter()
956 {
957 }
958
959
960 bool
Filter(const TraceEntry * entry,LazyTraceOutput & out)961 TraceFilter::Filter(const TraceEntry* entry, LazyTraceOutput& out)
962 {
963 return false;
964 }
965
966
967
968 class ThreadTraceFilter : public TraceFilter {
969 public:
Filter(const TraceEntry * _entry,LazyTraceOutput & out)970 virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
971 {
972 const AbstractTraceEntry* entry
973 = dynamic_cast<const AbstractTraceEntry*>(_entry);
974 return (entry != NULL && entry->ThreadID() == fThread);
975 }
976 };
977
978
979 class TeamTraceFilter : public TraceFilter {
980 public:
Filter(const TraceEntry * _entry,LazyTraceOutput & out)981 virtual bool Filter(const TraceEntry* _entry, LazyTraceOutput& out)
982 {
983 const AbstractTraceEntry* entry
984 = dynamic_cast<const AbstractTraceEntry*>(_entry);
985 return (entry != NULL && entry->TeamID() == fTeam);
986 }
987 };
988
989
990 class PatternTraceFilter : public TraceFilter {
991 public:
Filter(const TraceEntry * entry,LazyTraceOutput & out)992 virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
993 {
994 return strstr(out.DumpEntry(entry), fString) != NULL;
995 }
996 };
997
998
999 class DecimalPatternTraceFilter : public TraceFilter {
1000 public:
Filter(const TraceEntry * entry,LazyTraceOutput & out)1001 virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1002 {
1003 // TODO: this is *very* slow
1004 char buffer[64];
1005 snprintf(buffer, sizeof(buffer), "%" B_PRId64, fValue);
1006 return strstr(out.DumpEntry(entry), buffer) != NULL;
1007 }
1008 };
1009
1010 class HexPatternTraceFilter : public TraceFilter {
1011 public:
Filter(const TraceEntry * entry,LazyTraceOutput & out)1012 virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1013 {
1014 // TODO: this is *very* slow
1015 char buffer[64];
1016 snprintf(buffer, sizeof(buffer), "%" B_PRIx64, fValue);
1017 return strstr(out.DumpEntry(entry), buffer) != NULL;
1018 }
1019 };
1020
1021 class StringPatternTraceFilter : public TraceFilter {
1022 public:
Filter(const TraceEntry * entry,LazyTraceOutput & out)1023 virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1024 {
1025 if (IS_KERNEL_ADDRESS(fValue))
1026 return strstr(out.DumpEntry(entry), (const char*)fValue) != NULL;
1027
1028 // TODO: this is *very* slow
1029 char buffer[64];
1030 user_strlcpy(buffer, (const char*)fValue, sizeof(buffer));
1031 return strstr(out.DumpEntry(entry), buffer) != NULL;
1032 }
1033 };
1034
1035 class NotTraceFilter : public TraceFilter {
1036 public:
Filter(const TraceEntry * entry,LazyTraceOutput & out)1037 virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1038 {
1039 return !fSubFilters.first->Filter(entry, out);
1040 }
1041 };
1042
1043
1044 class AndTraceFilter : public TraceFilter {
1045 public:
Filter(const TraceEntry * entry,LazyTraceOutput & out)1046 virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1047 {
1048 return fSubFilters.first->Filter(entry, out)
1049 && fSubFilters.second->Filter(entry, out);
1050 }
1051 };
1052
1053
1054 class OrTraceFilter : public TraceFilter {
1055 public:
Filter(const TraceEntry * entry,LazyTraceOutput & out)1056 virtual bool Filter(const TraceEntry* entry, LazyTraceOutput& out)
1057 {
1058 return fSubFilters.first->Filter(entry, out)
1059 || fSubFilters.second->Filter(entry, out);
1060 }
1061 };
1062
1063
1064 class TraceFilterParser {
1065 public:
Default()1066 static TraceFilterParser* Default()
1067 {
1068 return &sParser;
1069 }
1070
Parse(int argc,const char * const * argv)1071 bool Parse(int argc, const char* const* argv)
1072 {
1073 fTokens = argv;
1074 fTokenCount = argc;
1075 fTokenIndex = 0;
1076 fFilterCount = 0;
1077
1078 TraceFilter* filter = _ParseExpression();
1079 return fTokenIndex == fTokenCount && filter != NULL;
1080 }
1081
Filter()1082 TraceFilter* Filter()
1083 {
1084 return &fFilters[0];
1085 }
1086
1087 private:
_ParseExpression()1088 TraceFilter* _ParseExpression()
1089 {
1090 const char* token = _NextToken();
1091 if (!token) {
1092 // unexpected end of expression
1093 return NULL;
1094 }
1095
1096 if (fFilterCount == MAX_FILTERS) {
1097 // too many filters
1098 return NULL;
1099 }
1100
1101 if (token[0] == '#') {
1102 TraceFilter* filter = new(&fFilters[fFilterCount++])
1103 PatternTraceFilter;
1104 filter->fString = token + 1;
1105 return filter;
1106 } else if (token[0] == 'd' && token[1] == '#') {
1107 TraceFilter* filter = new(&fFilters[fFilterCount++])
1108 DecimalPatternTraceFilter;
1109 filter->fValue = parse_expression(token + 2);
1110 return filter;
1111 } else if (token[0] == 'x' && token[1] == '#') {
1112 TraceFilter* filter = new(&fFilters[fFilterCount++])
1113 HexPatternTraceFilter;
1114 filter->fValue = parse_expression(token + 2);
1115 return filter;
1116 } else if (token[0] == 's' && token[1] == '#') {
1117 TraceFilter* filter = new(&fFilters[fFilterCount++])
1118 StringPatternTraceFilter;
1119 filter->fValue = parse_expression(token + 2);
1120 return filter;
1121 } else if (strcmp(token, "not") == 0) {
1122 TraceFilter* filter = new(&fFilters[fFilterCount++]) NotTraceFilter;
1123 if ((filter->fSubFilters.first = _ParseExpression()) != NULL)
1124 return filter;
1125 delete(filter);
1126 return NULL;
1127 } else if (strcmp(token, "and") == 0) {
1128 TraceFilter* filter = new(&fFilters[fFilterCount++]) AndTraceFilter;
1129 if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1130 && (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1131 return filter;
1132 }
1133 delete(filter);
1134 return NULL;
1135 } else if (strcmp(token, "or") == 0) {
1136 TraceFilter* filter = new(&fFilters[fFilterCount++]) OrTraceFilter;
1137 if ((filter->fSubFilters.first = _ParseExpression()) != NULL
1138 && (filter->fSubFilters.second = _ParseExpression()) != NULL) {
1139 return filter;
1140 }
1141 delete(filter);
1142 return NULL;
1143 } else if (strcmp(token, "thread") == 0) {
1144 const char* arg = _NextToken();
1145 if (arg == NULL) {
1146 // unexpected end of expression
1147 return NULL;
1148 }
1149
1150 TraceFilter* filter = new(&fFilters[fFilterCount++])
1151 ThreadTraceFilter;
1152 filter->fThread = strtol(arg, NULL, 0);
1153 return filter;
1154 } else if (strcmp(token, "team") == 0) {
1155 const char* arg = _NextToken();
1156 if (arg == NULL) {
1157 // unexpected end of expression
1158 return NULL;
1159 }
1160
1161 TraceFilter* filter = new(&fFilters[fFilterCount++])
1162 TeamTraceFilter;
1163 filter->fTeam = strtol(arg, NULL, 0);
1164 return filter;
1165 } else {
1166 // invalid token
1167 return NULL;
1168 }
1169 }
1170
_CurrentToken() const1171 const char* _CurrentToken() const
1172 {
1173 if (fTokenIndex >= 1 && fTokenIndex <= fTokenCount)
1174 return fTokens[fTokenIndex - 1];
1175 return NULL;
1176 }
1177
_NextToken()1178 const char* _NextToken()
1179 {
1180 if (fTokenIndex >= fTokenCount)
1181 return NULL;
1182 return fTokens[fTokenIndex++];
1183 }
1184
1185 private:
1186 enum { MAX_FILTERS = 32 };
1187
1188 const char* const* fTokens;
1189 int fTokenCount;
1190 int fTokenIndex;
1191 TraceFilter fFilters[MAX_FILTERS];
1192 int fFilterCount;
1193
1194 static TraceFilterParser sParser;
1195 };
1196
1197
1198 TraceFilterParser TraceFilterParser::sParser;
1199
1200
1201 // #pragma mark -
1202
1203
1204 #if ENABLE_TRACING
1205
1206
1207 TraceEntry*
Next()1208 TraceEntryIterator::Next()
1209 {
1210 if (fIndex == 0) {
1211 fEntry = _NextNonBufferEntry(sTracingMetaData->FirstEntry());
1212 fIndex = 1;
1213 } else if (fEntry != NULL) {
1214 fEntry = _NextNonBufferEntry(sTracingMetaData->NextEntry(fEntry));
1215 fIndex++;
1216 }
1217
1218 return Current();
1219 }
1220
1221
1222 TraceEntry*
Previous()1223 TraceEntryIterator::Previous()
1224 {
1225 if (fIndex == (int32)sTracingMetaData->Entries() + 1)
1226 fEntry = sTracingMetaData->AfterLastEntry();
1227
1228 if (fEntry != NULL) {
1229 fEntry = _PreviousNonBufferEntry(
1230 sTracingMetaData->PreviousEntry(fEntry));
1231 fIndex--;
1232 }
1233
1234 return Current();
1235 }
1236
1237
1238 TraceEntry*
MoveTo(int32 index)1239 TraceEntryIterator::MoveTo(int32 index)
1240 {
1241 if (index == fIndex)
1242 return Current();
1243
1244 if (index <= 0 || index > (int32)sTracingMetaData->Entries()) {
1245 fIndex = (index <= 0 ? 0 : sTracingMetaData->Entries() + 1);
1246 fEntry = NULL;
1247 return NULL;
1248 }
1249
1250 // get the shortest iteration path
1251 int32 distance = index - fIndex;
1252 int32 direction = distance < 0 ? -1 : 1;
1253 distance *= direction;
1254
1255 if (index < distance) {
1256 distance = index;
1257 direction = 1;
1258 fEntry = NULL;
1259 fIndex = 0;
1260 }
1261 if ((int32)sTracingMetaData->Entries() + 1 - fIndex < distance) {
1262 distance = sTracingMetaData->Entries() + 1 - fIndex;
1263 direction = -1;
1264 fEntry = NULL;
1265 fIndex = sTracingMetaData->Entries() + 1;
1266 }
1267
1268 // iterate to the index
1269 if (direction < 0) {
1270 while (fIndex != index)
1271 Previous();
1272 } else {
1273 while (fIndex != index)
1274 Next();
1275 }
1276
1277 return Current();
1278 }
1279
1280
1281 trace_entry*
_NextNonBufferEntry(trace_entry * entry)1282 TraceEntryIterator::_NextNonBufferEntry(trace_entry* entry)
1283 {
1284 while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1285 entry = sTracingMetaData->NextEntry(entry);
1286
1287 return entry;
1288 }
1289
1290
1291 trace_entry*
_PreviousNonBufferEntry(trace_entry * entry)1292 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry* entry)
1293 {
1294 while (entry != NULL && (entry->flags & BUFFER_ENTRY) != 0)
1295 entry = sTracingMetaData->PreviousEntry(entry);
1296
1297 return entry;
1298 }
1299
1300
1301 int
dump_tracing_internal(int argc,char ** argv,WrapperTraceFilter * wrapperFilter)1302 dump_tracing_internal(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1303 {
1304 int argi = 1;
1305
1306 // variables in which we store our state to be continuable
1307 static int32 _previousCount = 0;
1308 static bool _previousHasFilter = false;
1309 static bool _previousPrintStackTrace = false;
1310 static int32 _previousMaxToCheck = 0;
1311 static int32 _previousFirstChecked = 1;
1312 static int32 _previousLastChecked = -1;
1313 static int32 _previousDirection = 1;
1314 static uint32 _previousEntriesEver = 0;
1315 static uint32 _previousEntries = 0;
1316 static uint32 _previousOutputFlags = 0;
1317 static TraceEntryIterator iterator;
1318
1319 uint32 entriesEver = sTracingMetaData->EntriesEver();
1320
1321 // Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1322 int32 start = 0; // special index: print the last count entries
1323 int32 count = 0;
1324 int32 maxToCheck = 0;
1325 int32 cont = 0;
1326
1327 bool hasFilter = false;
1328 bool printStackTrace = false;
1329
1330 uint32 outputFlags = 0;
1331 while (argi < argc) {
1332 if (strcmp(argv[argi], "--difftime") == 0) {
1333 outputFlags |= TRACE_OUTPUT_DIFF_TIME;
1334 argi++;
1335 } else if (strcmp(argv[argi], "--printteam") == 0) {
1336 outputFlags |= TRACE_OUTPUT_TEAM_ID;
1337 argi++;
1338 } else if (strcmp(argv[argi], "--stacktrace") == 0) {
1339 printStackTrace = true;
1340 argi++;
1341 } else
1342 break;
1343 }
1344
1345 if (argi < argc) {
1346 if (strcmp(argv[argi], "forward") == 0) {
1347 cont = 1;
1348 argi++;
1349 } else if (strcmp(argv[argi], "backward") == 0) {
1350 cont = -1;
1351 argi++;
1352 }
1353 } else
1354 cont = _previousDirection;
1355
1356 if (cont != 0) {
1357 if (argi < argc) {
1358 print_debugger_command_usage(argv[0]);
1359 return 0;
1360 }
1361 if (entriesEver == 0 || entriesEver != _previousEntriesEver
1362 || sTracingMetaData->Entries() != _previousEntries) {
1363 kprintf("Can't continue iteration. \"%s\" has not been invoked "
1364 "before, or there were new entries written since the last "
1365 "invocation.\n", argv[0]);
1366 return 0;
1367 }
1368 }
1369
1370 // get start, count, maxToCheck
1371 int32* params[3] = { &start, &count, &maxToCheck };
1372 for (int i = 0; i < 3 && !hasFilter && argi < argc; i++) {
1373 if (strcmp(argv[argi], "filter") == 0) {
1374 hasFilter = true;
1375 argi++;
1376 } else if (argv[argi][0] == '#') {
1377 hasFilter = true;
1378 } else {
1379 *params[i] = parse_expression(argv[argi]);
1380 argi++;
1381 }
1382 }
1383
1384 // filter specification
1385 if (argi < argc) {
1386 hasFilter = true;
1387 if (strcmp(argv[argi], "filter") == 0)
1388 argi++;
1389
1390 if (!TraceFilterParser::Default()->Parse(argc - argi, argv + argi)) {
1391 print_debugger_command_usage(argv[0]);
1392 return 0;
1393 }
1394 }
1395
1396 int32 direction;
1397 int32 firstToCheck;
1398 int32 lastToCheck;
1399
1400 if (cont != 0) {
1401 // get values from the previous iteration
1402 direction = cont;
1403 count = _previousCount;
1404 maxToCheck = _previousMaxToCheck;
1405 hasFilter = _previousHasFilter;
1406 outputFlags = _previousOutputFlags;
1407 printStackTrace = _previousPrintStackTrace;
1408
1409 if (direction < 0)
1410 start = _previousFirstChecked - 1;
1411 else
1412 start = _previousLastChecked + 1;
1413 } else {
1414 // defaults for count and maxToCheck
1415 if (count == 0)
1416 count = 30;
1417 if (maxToCheck == 0 || !hasFilter)
1418 maxToCheck = count;
1419 else if (maxToCheck < 0)
1420 maxToCheck = sTracingMetaData->Entries();
1421
1422 // determine iteration direction
1423 direction = (start <= 0 || count < 0 ? -1 : 1);
1424
1425 // validate count and maxToCheck
1426 if (count < 0)
1427 count = -count;
1428 if (maxToCheck < 0)
1429 maxToCheck = -maxToCheck;
1430 if (maxToCheck > (int32)sTracingMetaData->Entries())
1431 maxToCheck = sTracingMetaData->Entries();
1432 if (count > maxToCheck)
1433 count = maxToCheck;
1434
1435 // validate start
1436 if (start <= 0 || start > (int32)sTracingMetaData->Entries())
1437 start = max_c(1, sTracingMetaData->Entries());
1438 }
1439
1440 if (direction < 0) {
1441 firstToCheck = max_c(1, start - maxToCheck + 1);
1442 lastToCheck = start;
1443 } else {
1444 firstToCheck = start;
1445 lastToCheck = min_c((int32)sTracingMetaData->Entries(),
1446 start + maxToCheck - 1);
1447 }
1448
1449 // reset the iterator, if something changed in the meantime
1450 if (entriesEver == 0 || entriesEver != _previousEntriesEver
1451 || sTracingMetaData->Entries() != _previousEntries) {
1452 iterator.Reset();
1453 }
1454
1455 LazyTraceOutput out(sTracingMetaData->TraceOutputBuffer(),
1456 kTraceOutputBufferSize, outputFlags);
1457
1458 bool markedMatching = false;
1459 int32 firstToDump = firstToCheck;
1460 int32 lastToDump = lastToCheck;
1461
1462 TraceFilter* filter = NULL;
1463 if (hasFilter)
1464 filter = TraceFilterParser::Default()->Filter();
1465
1466 if (wrapperFilter != NULL) {
1467 wrapperFilter->Init(filter, direction, cont != 0);
1468 filter = wrapperFilter;
1469 }
1470
1471 if (direction < 0 && filter && lastToCheck - firstToCheck >= count) {
1472 // iteration direction is backwards
1473 markedMatching = true;
1474
1475 // From the last entry to check iterate backwards to check filter
1476 // matches.
1477 int32 matching = 0;
1478
1479 // move to the entry after the last entry to check
1480 iterator.MoveTo(lastToCheck + 1);
1481
1482 // iterate backwards
1483 firstToDump = -1;
1484 lastToDump = -1;
1485 while (iterator.Index() > firstToCheck) {
1486 TraceEntry* entry = iterator.Previous();
1487 if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1488 out.Clear();
1489 if (filter->Filter(entry, out)) {
1490 entry->ToTraceEntry()->flags |= FILTER_MATCH;
1491 if (lastToDump == -1)
1492 lastToDump = iterator.Index();
1493 firstToDump = iterator.Index();
1494
1495 matching++;
1496 if (matching >= count)
1497 break;
1498 } else
1499 entry->ToTraceEntry()->flags &= ~FILTER_MATCH;
1500 }
1501 }
1502
1503 firstToCheck = iterator.Index();
1504
1505 // iterate to the previous entry, so that the next loop starts at the
1506 // right one
1507 iterator.Previous();
1508 }
1509
1510 out.SetLastEntryTime(0);
1511
1512 // set the iterator to the entry before the first one to dump
1513 iterator.MoveTo(firstToDump - 1);
1514
1515 // dump the entries matching the filter in the range
1516 // [firstToDump, lastToDump]
1517 int32 dumped = 0;
1518
1519 while (TraceEntry* entry = iterator.Next()) {
1520 int32 index = iterator.Index();
1521 if (index < firstToDump)
1522 continue;
1523 if (index > lastToDump || dumped >= count) {
1524 if (direction > 0)
1525 lastToCheck = index - 1;
1526 break;
1527 }
1528
1529 if ((entry->Flags() & ENTRY_INITIALIZED) != 0) {
1530 out.Clear();
1531 if (filter && (markedMatching
1532 ? (entry->Flags() & FILTER_MATCH) == 0
1533 : !filter->Filter(entry, out))) {
1534 continue;
1535 }
1536
1537 // don't print trailing new line
1538 const char* dump = out.DumpEntry(entry);
1539 int len = strlen(dump);
1540 if (len > 0 && dump[len - 1] == '\n')
1541 len--;
1542
1543 kprintf("%5" B_PRId32 ". %.*s\n", index, len, dump);
1544
1545 if (printStackTrace) {
1546 out.Clear();
1547 entry->DumpStackTrace(out);
1548 if (out.Size() > 0)
1549 kputs(out.Buffer());
1550 }
1551 } else if (!filter)
1552 kprintf("%5" B_PRId32 ". ** uninitialized entry **\n", index);
1553
1554 dumped++;
1555 }
1556
1557 kprintf("printed %" B_PRId32 " entries within range %" B_PRId32 " to %"
1558 B_PRId32 " (%" B_PRId32 " of %" B_PRId32 " total, %" B_PRId32 " ever)\n",
1559 dumped, firstToCheck, lastToCheck, lastToCheck - firstToCheck + 1,
1560 sTracingMetaData->Entries(), entriesEver);
1561
1562 // store iteration state
1563 _previousCount = count;
1564 _previousMaxToCheck = maxToCheck;
1565 _previousHasFilter = hasFilter;
1566 _previousPrintStackTrace = printStackTrace;
1567 _previousFirstChecked = firstToCheck;
1568 _previousLastChecked = lastToCheck;
1569 _previousDirection = direction;
1570 _previousEntriesEver = entriesEver;
1571 _previousEntries = sTracingMetaData->Entries();
1572 _previousOutputFlags = outputFlags;
1573
1574 return cont != 0 ? B_KDEBUG_CONT : 0;
1575 }
1576
1577
1578 static int
dump_tracing_command(int argc,char ** argv)1579 dump_tracing_command(int argc, char** argv)
1580 {
1581 return dump_tracing_internal(argc, argv, NULL);
1582 }
1583
1584
1585 #endif // ENABLE_TRACING
1586
1587
1588 extern "C" uint8*
alloc_tracing_buffer(size_t size)1589 alloc_tracing_buffer(size_t size)
1590 {
1591 #if ENABLE_TRACING
1592 trace_entry* entry = sTracingMetaData->AllocateEntry(
1593 size + sizeof(trace_entry), BUFFER_ENTRY);
1594 if (entry == NULL)
1595 return NULL;
1596
1597 return (uint8*)(entry + 1);
1598 #else
1599 return NULL;
1600 #endif
1601 }
1602
1603
1604 uint8*
alloc_tracing_buffer_memcpy(const void * source,size_t size,bool user)1605 alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user)
1606 {
1607 if (user && !IS_USER_ADDRESS(source))
1608 return NULL;
1609
1610 uint8* buffer = alloc_tracing_buffer(size);
1611 if (buffer == NULL)
1612 return NULL;
1613
1614 if (user) {
1615 if (user_memcpy(buffer, source, size) != B_OK)
1616 return NULL;
1617 } else
1618 memcpy(buffer, source, size);
1619
1620 return buffer;
1621 }
1622
1623
1624 char*
alloc_tracing_buffer_strcpy(const char * source,size_t maxSize,bool user)1625 alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
1626 {
1627 if (source == NULL || maxSize == 0)
1628 return NULL;
1629
1630 if (user && !IS_USER_ADDRESS(source))
1631 return NULL;
1632
1633 // limit maxSize to the actual source string len
1634 if (user) {
1635 ssize_t size = user_strlcpy(NULL, source, 0);
1636 // there's no user_strnlen()
1637 if (size < 0)
1638 return 0;
1639 maxSize = min_c(maxSize, (size_t)size + 1);
1640 } else
1641 maxSize = strnlen(source, maxSize - 1) + 1;
1642
1643 char* buffer = (char*)alloc_tracing_buffer(maxSize);
1644 if (buffer == NULL)
1645 return NULL;
1646
1647 if (user) {
1648 if (user_strlcpy(buffer, source, maxSize) < B_OK)
1649 return NULL;
1650 } else
1651 strlcpy(buffer, source, maxSize);
1652
1653 return buffer;
1654 }
1655
1656
1657 tracing_stack_trace*
capture_tracing_stack_trace(int32 maxCount,int32 skipFrames,bool kernelOnly)1658 capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
1659 {
1660 #if ENABLE_TRACING
1661 // page_fault_exception() doesn't allow us to gracefully handle a bad
1662 // address in the stack trace, if interrupts are disabled, so we always
1663 // restrict the stack traces to the kernel only in this case. A bad address
1664 // in the kernel stack trace would still cause a panic(), but this is
1665 // probably even desired.
1666 if (!are_interrupts_enabled())
1667 kernelOnly = true;
1668
1669 tracing_stack_trace* stackTrace
1670 = (tracing_stack_trace*)alloc_tracing_buffer(
1671 sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));
1672
1673 if (stackTrace != NULL) {
1674 stackTrace->depth = arch_debug_get_stack_trace(
1675 stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
1676 STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
1677 }
1678
1679 return stackTrace;
1680 #else
1681 return NULL;
1682 #endif
1683 }
1684
1685
1686 addr_t
tracing_find_caller_in_stack_trace(struct tracing_stack_trace * stackTrace,const addr_t excludeRanges[],uint32 excludeRangeCount)1687 tracing_find_caller_in_stack_trace(struct tracing_stack_trace* stackTrace,
1688 const addr_t excludeRanges[], uint32 excludeRangeCount)
1689 {
1690 for (int32 i = 0; i < stackTrace->depth; i++) {
1691 addr_t returnAddress = stackTrace->return_addresses[i];
1692
1693 bool inRange = false;
1694 for (uint32 j = 0; j < excludeRangeCount; j++) {
1695 if (returnAddress >= excludeRanges[j * 2 + 0]
1696 && returnAddress < excludeRanges[j * 2 + 1]) {
1697 inRange = true;
1698 break;
1699 }
1700 }
1701
1702 if (!inRange)
1703 return returnAddress;
1704 }
1705
1706 return 0;
1707 }
1708
1709
1710 void
tracing_print_stack_trace(struct tracing_stack_trace * stackTrace)1711 tracing_print_stack_trace(struct tracing_stack_trace* stackTrace)
1712 {
1713 #if ENABLE_TRACING
1714 print_stack_trace(stackTrace, kprintf);
1715 #endif
1716 }
1717
1718
1719 int
dump_tracing(int argc,char ** argv,WrapperTraceFilter * wrapperFilter)1720 dump_tracing(int argc, char** argv, WrapperTraceFilter* wrapperFilter)
1721 {
1722 #if ENABLE_TRACING
1723 return dump_tracing_internal(argc, argv, wrapperFilter);
1724 #else
1725 return 0;
1726 #endif
1727 }
1728
1729
1730 bool
tracing_is_entry_valid(AbstractTraceEntry * candidate,bigtime_t entryTime)1731 tracing_is_entry_valid(AbstractTraceEntry* candidate, bigtime_t entryTime)
1732 {
1733 #if ENABLE_TRACING
1734 if (!sTracingMetaData->IsInBuffer(candidate, sizeof(*candidate)))
1735 return false;
1736
1737 if (entryTime < 0)
1738 return true;
1739
1740 TraceEntryIterator iterator;
1741 while (TraceEntry* entry = iterator.Next()) {
1742 AbstractTraceEntry* abstract = dynamic_cast<AbstractTraceEntry*>(entry);
1743 if (abstract == NULL)
1744 continue;
1745
1746 if (abstract != candidate && abstract->Time() > entryTime)
1747 return false;
1748
1749 return candidate->Time() == entryTime;
1750 }
1751 #endif
1752
1753 return false;
1754 }
1755
1756
1757 void
lock_tracing_buffer()1758 lock_tracing_buffer()
1759 {
1760 #if ENABLE_TRACING
1761 sTracingMetaData->Lock();
1762 #endif
1763 }
1764
1765
1766 void
unlock_tracing_buffer()1767 unlock_tracing_buffer()
1768 {
1769 #if ENABLE_TRACING
1770 sTracingMetaData->Unlock();
1771 #endif
1772 }
1773
1774
1775 extern "C" status_t
tracing_init(void)1776 tracing_init(void)
1777 {
1778 #if ENABLE_TRACING
1779 status_t result = TracingMetaData::Create(sTracingMetaData);
1780 if (result != B_OK) {
1781 memset(&sFallbackTracingMetaData, 0, sizeof(sFallbackTracingMetaData));
1782 sTracingMetaData = &sFallbackTracingMetaData;
1783 return result;
1784 }
1785
1786 new(nothrow) TracingLogStartEntry();
1787
1788 add_debugger_command_etc("traced", &dump_tracing_command,
1789 "Dump recorded trace entries",
1790 "[ --printteam ] [ --difftime ] [ --stacktrace ] "
1791 "(\"forward\" | \"backward\") "
1792 "| ([ <start> [ <count> [ <range> ] ] ] "
1793 "[ #<pattern> | (\"filter\" <filter>) ])\n"
1794 "Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1795 "specified, the command continues where the previous invocation left\n"
1796 "off, i.e. printing the previous respectively next entries (as many\n"
1797 "as printed before). In this case the command is continuable, that is\n"
1798 "afterwards entering an empty line in the debugger will reinvoke it.\n"
1799 "If no arguments are given, the command continues in the direction\n"
1800 "of the last invocation.\n"
1801 "--printteam - enables printing the entries' team IDs.\n"
1802 "--difftime - print difference times for all but the first entry.\n"
1803 "--stacktrace - print stack traces for entries that captured one.\n"
1804 " <start> - The base index of the entries to print. Depending on\n"
1805 " whether the iteration direction is forward or\n"
1806 " backward this will be the first or last entry printed\n"
1807 " (potentially, if a filter is specified). The index of\n"
1808 " the first entry in the trace buffer is 1. If 0 is\n"
1809 " specified, the last <count> recorded entries are\n"
1810 " printed (iteration direction is backward). Defaults \n"
1811 " to 0.\n"
1812 " <count> - The number of entries to be printed. Defaults to 30.\n"
1813 " If negative, the -<count> entries before and\n"
1814 " including <start> will be printed.\n"
1815 " <range> - Only relevant if a filter is specified. Specifies the\n"
1816 " number of entries to be filtered -- depending on the\n"
1817 " iteration direction the entries before or after\n"
1818 " <start>. If more than <count> entries match the\n"
1819 " filter, only the first (forward) or last (backward)\n"
1820 " <count> matching entries will be printed. If 0 is\n"
1821 " specified <range> will be set to <count>. If -1,\n"
1822 " <range> will be set to the number of recorded\n"
1823 " entries.\n"
1824 " <pattern> - If specified only entries containing this string are\n"
1825 " printed.\n"
1826 " <filter> - If specified only entries matching this filter\n"
1827 " expression are printed. The expression can consist of\n"
1828 " prefix operators \"not\", \"and\", \"or\", and\n"
1829 " filters \"'thread' <thread>\" (matching entries\n"
1830 " with the given thread ID), \"'team' <team>\"\n"
1831 "(matching entries with the given team ID), and\n"
1832 " \"#<pattern>\" (matching entries containing the given\n"
1833 " string).\n", 0);
1834 #endif // ENABLE_TRACING
1835 return B_OK;
1836 }
1837
1838
1839 void
ktrace_printf(const char * format,...)1840 ktrace_printf(const char *format, ...)
1841 {
1842 #if ENABLE_TRACING
1843 va_list list;
1844 va_start(list, format);
1845
1846 char buffer[256];
1847 vsnprintf(buffer, sizeof(buffer), format, list);
1848
1849 va_end(list);
1850
1851 new(nothrow) KernelTraceEntry(buffer);
1852 #endif // ENABLE_TRACING
1853 }
1854
1855
1856 void
_user_ktrace_output(const char * message)1857 _user_ktrace_output(const char *message)
1858 {
1859 #if ENABLE_TRACING
1860 new(nothrow) UserTraceEntry(message);
1861 #endif // ENABLE_TRACING
1862 }
1863
1864