1 /* 2 * Copyright 2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <core_dump.h> 8 9 #include <errno.h> 10 #include <string.h> 11 12 #include <algorithm> 13 #include <new> 14 15 #include <BeBuild.h> 16 #include <ByteOrder.h> 17 18 #include <AutoDeleter.h> 19 20 #include <commpage.h> 21 #include <condition_variable.h> 22 #include <elf.h> 23 #include <kimage.h> 24 #include <ksignal.h> 25 #include <team.h> 26 #include <thread.h> 27 #include <user_debugger.h> 28 #include <util/AutoLock.h> 29 #include <util/ThreadAutoLock.h> 30 #include <util/DoublyLinkedList.h> 31 #include <vm/vm.h> 32 #include <vm/VMArea.h> 33 #include <vm/VMCache.h> 34 35 #include "../cache/vnode_store.h" 36 #include "../vm/VMAddressSpaceLocking.h" 37 38 39 //#define TRACE_CORE_DUMP 40 #ifdef TRACE_CORE_DUMP 41 # define TRACE(...) dprintf(__VA_ARGS__) 42 #else 43 # define TRACE(...) do {} while (false) 44 #endif 45 46 47 namespace { 48 49 50 static const size_t kBufferSize = 1024 * 1024; 51 static const char* const kCoreNote = ELF_NOTE_CORE; 52 static const char* const kHaikuNote = ELF_NOTE_HAIKU; 53 54 55 struct Allocator { 56 Allocator() 57 : 58 fAligned(NULL), 59 fStrings(NULL), 60 fAlignedCapacity(0), 61 fStringCapacity(0), 62 fAlignedSize(0), 63 fStringSize(0) 64 { 65 } 66 67 ~Allocator() 68 { 69 free(fAligned); 70 } 71 72 bool HasMissingAllocations() const 73 { 74 return fAlignedSize > fAlignedCapacity || fStringSize > fStringCapacity; 75 } 76 77 bool Reallocate() 78 { 79 free(fAligned); 80 81 fAlignedCapacity = fAlignedSize; 82 fStringCapacity = fStringSize; 83 fAlignedSize = 0; 84 fStringSize = 0; 85 86 fAligned = (uint8*)malloc(fAlignedCapacity + fStringCapacity); 87 if (fAligned == NULL) 88 return false; 89 fStrings = (char*)(fAligned + fAlignedCapacity); 90 91 return true; 92 } 93 94 void* AllocateAligned(size_t size) 95 { 96 size_t offset = fAlignedSize; 97 fAlignedSize += (size + 7) / 8 * 8; 98 if (fAlignedSize <= fAlignedCapacity) 99 return fAligned + offset; 100 return NULL; 101 } 102 103 char* AllocateString(size_t length) 104 { 105 size_t offset = fStringSize; 106 fStringSize += length + 1; 107 if (fStringSize <= fStringCapacity) 108 return fStrings + offset; 109 return NULL; 110 } 111 112 template <typename Type> 113 Type* New() 114 { 115 void* buffer = AllocateAligned(sizeof(Type)); 116 if (buffer == NULL) 117 return NULL; 118 return new(buffer) Type; 119 } 120 121 char* DuplicateString(const char* string) 122 { 123 if (string == NULL) 124 return NULL; 125 char* newString = AllocateString(strlen(string)); 126 if (newString != NULL) 127 strcpy(newString, string); 128 return newString; 129 } 130 131 private: 132 uint8* fAligned; 133 char* fStrings; 134 size_t fAlignedCapacity; 135 size_t fStringCapacity; 136 size_t fAlignedSize; 137 size_t fStringSize; 138 }; 139 140 141 struct TeamInfo : team_info { 142 }; 143 144 145 struct ThreadState : DoublyLinkedListLinkImpl<ThreadState> { 146 ThreadState() 147 : 148 fThread(NULL), 149 fComplete(false) 150 { 151 } 152 153 ~ThreadState() 154 { 155 SetThread(NULL); 156 } 157 158 static ThreadState* Create() 159 { 160 ThreadState* state = new(std::nothrow) ThreadState; 161 if (state == NULL) 162 return NULL; 163 return state; 164 } 165 166 Thread* GetThread() const 167 { 168 return fThread; 169 } 170 171 void SetThread(Thread* thread) 172 { 173 if (fThread != NULL) 174 fThread->ReleaseReference(); 175 176 fThread = thread; 177 178 if (fThread != NULL) 179 fThread->AcquireReference(); 180 } 181 182 /*! Invoke with thread lock and scheduler lock being held. */ 183 void GetState() 184 { 185 fState = fThread->state; 186 fPriority = fThread->priority; 187 fStackBase = fThread->user_stack_base; 188 fStackEnd = fStackBase + fThread->user_stack_size; 189 strlcpy(fName, fThread->name, sizeof(fName)); 190 if (arch_get_thread_debug_cpu_state(fThread, &fCpuState) != B_OK) 191 memset(&fCpuState, 0, sizeof(fCpuState)); 192 } 193 194 bool IsComplete() const 195 { 196 return fComplete; 197 } 198 199 void SetComplete(bool complete) 200 { 201 fComplete = complete; 202 } 203 204 int32 State() const 205 { 206 return fState; 207 } 208 209 int32 Priority() const 210 { 211 return fPriority; 212 } 213 214 addr_t StackBase() const 215 { 216 return fStackBase; 217 } 218 219 addr_t StackEnd() const 220 { 221 return fStackEnd; 222 } 223 224 const char* Name() const 225 { 226 return fName; 227 } 228 229 const debug_cpu_state* CpuState() const 230 { 231 return &fCpuState; 232 } 233 234 private: 235 Thread* fThread; 236 int32 fState; 237 int32 fPriority; 238 addr_t fStackBase; 239 addr_t fStackEnd; 240 char fName[B_OS_NAME_LENGTH]; 241 debug_cpu_state fCpuState; 242 bool fComplete; 243 }; 244 245 246 typedef DoublyLinkedList<ThreadState> ThreadStateList; 247 248 249 struct ImageInfo : DoublyLinkedListLinkImpl<ImageInfo> { 250 ImageInfo(struct image* image) 251 : 252 fId(image->info.basic_info.id), 253 fType(image->info.basic_info.type), 254 fDeviceId(image->info.basic_info.device), 255 fNodeId(image->info.basic_info.node), 256 fName(strdup(image->info.basic_info.name)), 257 fInitRoutine((addr_t)image->info.basic_info.init_routine), 258 fTermRoutine((addr_t)image->info.basic_info.term_routine), 259 fText((addr_t)image->info.basic_info.text), 260 fData((addr_t)image->info.basic_info.data), 261 fTextSize(image->info.basic_info.text_size), 262 fDataSize(image->info.basic_info.data_size), 263 fTextDelta(image->info.text_delta), 264 fSymbolTable((addr_t)image->info.symbol_table), 265 fSymbolHash((addr_t)image->info.symbol_hash), 266 fStringTable((addr_t)image->info.string_table), 267 fSymbolTableData(NULL), 268 fStringTableData(NULL), 269 fSymbolCount(0), 270 fStringTableSize(0) 271 { 272 if (fName != NULL && strcmp(fName, "commpage") == 0) 273 _GetCommpageSymbols(); 274 } 275 276 ~ImageInfo() 277 { 278 free(fName); 279 _FreeSymbolData(); 280 } 281 282 static ImageInfo* Create(struct image* image) 283 { 284 ImageInfo* imageInfo = new(std::nothrow) ImageInfo(image); 285 if (imageInfo == NULL || imageInfo->fName == NULL) { 286 delete imageInfo; 287 return NULL; 288 } 289 290 return imageInfo; 291 } 292 293 image_id Id() const 294 { 295 return fId; 296 } 297 298 image_type Type() const 299 { 300 return fType; 301 } 302 303 const char* Name() const 304 { 305 return fName; 306 } 307 308 dev_t DeviceId() const 309 { 310 return fDeviceId; 311 } 312 313 ino_t NodeId() const 314 { 315 return fNodeId; 316 } 317 318 addr_t InitRoutine() const 319 { 320 return fInitRoutine; 321 } 322 323 addr_t TermRoutine() const 324 { 325 return fTermRoutine; 326 } 327 328 addr_t TextBase() const 329 { 330 return fText; 331 } 332 333 size_t TextSize() const 334 { 335 return fTextSize; 336 } 337 338 ssize_t TextDelta() const 339 { 340 return fTextDelta; 341 } 342 343 addr_t DataBase() const 344 { 345 return fData; 346 } 347 348 size_t DataSize() const 349 { 350 return fDataSize; 351 } 352 353 addr_t SymbolTable() const 354 { 355 return fSymbolTable; 356 } 357 358 addr_t SymbolHash() const 359 { 360 return fSymbolHash; 361 } 362 363 addr_t StringTable() const 364 { 365 return fStringTable; 366 } 367 368 elf_sym* SymbolTableData() const 369 { 370 return fSymbolTableData; 371 } 372 373 char* StringTableData() const 374 { 375 return fStringTableData; 376 } 377 378 uint32 SymbolCount() const 379 { 380 return fSymbolCount; 381 } 382 383 size_t StringTableSize() const 384 { 385 return fStringTableSize; 386 } 387 388 private: 389 void _GetCommpageSymbols() 390 { 391 image_id commpageId = get_commpage_image(); 392 393 // get the size of the tables 394 int32 symbolCount = 0; 395 size_t stringTableSize = 0; 396 status_t error = elf_read_kernel_image_symbols(commpageId, NULL, 397 &symbolCount, NULL, &stringTableSize, 398 NULL, true); 399 if (error != B_OK) 400 return; 401 if (symbolCount == 0 || stringTableSize == 0) 402 return; 403 404 // allocate the tables 405 fSymbolTableData = (elf_sym*)malloc(sizeof(elf_sym) * symbolCount); 406 fStringTableData = (char*)malloc(stringTableSize); 407 if (fSymbolTableData == NULL || fStringTableData == NULL) { 408 _FreeSymbolData(); 409 return; 410 } 411 412 fSymbolCount = symbolCount; 413 fStringTableSize = stringTableSize; 414 415 // get the data 416 error = elf_read_kernel_image_symbols(commpageId, 417 fSymbolTableData, &symbolCount, fStringTableData, &stringTableSize, 418 NULL, true); 419 if (error != B_OK) 420 _FreeSymbolData(); 421 } 422 423 void _FreeSymbolData() 424 { 425 free(fSymbolTableData); 426 free(fStringTableData); 427 428 fSymbolTableData = NULL; 429 fStringTableData = NULL; 430 fSymbolCount = 0; 431 fStringTableSize = 0; 432 } 433 434 private: 435 image_id fId; 436 image_type fType; 437 dev_t fDeviceId; 438 ino_t fNodeId; 439 char* fName; 440 addr_t fInitRoutine; 441 addr_t fTermRoutine; 442 addr_t fText; 443 addr_t fData; 444 size_t fTextSize; 445 size_t fDataSize; 446 ssize_t fTextDelta; 447 addr_t fSymbolTable; 448 addr_t fSymbolHash; 449 addr_t fStringTable; 450 // for commpage image 451 elf_sym* fSymbolTableData; 452 char* fStringTableData; 453 uint32 fSymbolCount; 454 size_t fStringTableSize; 455 }; 456 457 458 typedef DoublyLinkedList<ImageInfo> ImageInfoList; 459 460 461 struct AreaInfo : DoublyLinkedListLinkImpl<AreaInfo> { 462 static AreaInfo* Create(Allocator& allocator, VMArea* area, size_t ramSize, 463 dev_t deviceId, ino_t nodeId) 464 { 465 AreaInfo* areaInfo = allocator.New<AreaInfo>(); 466 const char* name = allocator.DuplicateString(area->name); 467 468 if (areaInfo != NULL) { 469 areaInfo->fId = area->id; 470 areaInfo->fName = name; 471 areaInfo->fBase = area->Base(); 472 areaInfo->fSize = area->Size(); 473 areaInfo->fLock = B_FULL_LOCK; 474 areaInfo->fProtection = area->protection; 475 areaInfo->fRamSize = ramSize; 476 areaInfo->fDeviceId = deviceId; 477 areaInfo->fNodeId = nodeId; 478 areaInfo->fCacheOffset = area->cache_offset; 479 areaInfo->fImageInfo = NULL; 480 } 481 482 return areaInfo; 483 } 484 485 area_id Id() const 486 { 487 return fId; 488 } 489 490 const char* Name() const 491 { 492 return fName; 493 } 494 495 addr_t Base() const 496 { 497 return fBase; 498 } 499 500 size_t Size() const 501 { 502 return fSize; 503 } 504 505 uint32 Lock() const 506 { 507 return fLock; 508 } 509 510 uint32 Protection() const 511 { 512 return fProtection; 513 } 514 515 size_t RamSize() const 516 { 517 return fRamSize; 518 } 519 520 off_t CacheOffset() const 521 { 522 return fCacheOffset; 523 } 524 525 dev_t DeviceId() const 526 { 527 return fDeviceId; 528 } 529 530 ino_t NodeId() const 531 { 532 return fNodeId; 533 } 534 535 ImageInfo* GetImageInfo() const 536 { 537 return fImageInfo; 538 } 539 540 void SetImageInfo(ImageInfo* imageInfo) 541 { 542 fImageInfo = imageInfo; 543 } 544 545 private: 546 area_id fId; 547 const char* fName; 548 addr_t fBase; 549 size_t fSize; 550 uint32 fLock; 551 uint32 fProtection; 552 size_t fRamSize; 553 dev_t fDeviceId; 554 ino_t fNodeId; 555 off_t fCacheOffset; 556 ImageInfo* fImageInfo; 557 }; 558 559 560 typedef DoublyLinkedList<AreaInfo> AreaInfoList; 561 562 563 struct BufferedFile { 564 BufferedFile() 565 : 566 fFd(-1), 567 fBuffer(NULL), 568 fCapacity(0), 569 fOffset(0), 570 fBuffered(0), 571 fStatus(B_NO_INIT) 572 { 573 } 574 575 ~BufferedFile() 576 { 577 if (fFd >= 0) 578 close(fFd); 579 580 free(fBuffer); 581 } 582 583 status_t Init(const char* path) 584 { 585 fCapacity = kBufferSize; 586 fBuffer = (uint8*)malloc(fCapacity); 587 if (fBuffer == NULL) 588 return B_NO_MEMORY; 589 590 fFd = open(path, O_WRONLY | O_CREAT | O_EXCL, S_IRUSR); 591 if (fFd < 0) 592 return errno; 593 594 fStatus = B_OK; 595 return B_OK; 596 } 597 598 status_t Status() const 599 { 600 return fStatus; 601 } 602 603 off_t EndOffset() const 604 { 605 return fOffset + (off_t)fBuffered; 606 } 607 608 status_t Flush() 609 { 610 if (fStatus != B_OK) 611 return fStatus; 612 613 if (fBuffered == 0) 614 return B_OK; 615 616 ssize_t written = pwrite(fFd, fBuffer, fBuffered, fOffset); 617 if (written < 0) 618 return fStatus = errno; 619 if ((size_t)written != fBuffered) 620 return fStatus = B_IO_ERROR; 621 622 fOffset += (off_t)fBuffered; 623 fBuffered = 0; 624 return B_OK; 625 } 626 627 status_t Seek(off_t offset) 628 { 629 if (fStatus != B_OK) 630 return fStatus; 631 632 if (fBuffered == 0) { 633 fOffset = offset; 634 } else if (offset != fOffset + (off_t)fBuffered) { 635 status_t error = Flush(); 636 if (error != B_OK) 637 return fStatus = error; 638 fOffset = offset; 639 } 640 641 return B_OK; 642 } 643 644 status_t Write(const void* data, size_t size) 645 { 646 if (fStatus != B_OK) 647 return fStatus; 648 649 if (size == 0) 650 return B_OK; 651 652 while (size > 0) { 653 size_t toWrite = std::min(size, fCapacity - fBuffered); 654 if (toWrite == 0) { 655 status_t error = Flush(); 656 if (error != B_OK) 657 return fStatus = error; 658 continue; 659 } 660 661 memcpy(fBuffer + fBuffered, data, toWrite); 662 fBuffered += toWrite; 663 size -= toWrite; 664 } 665 666 return B_OK; 667 } 668 669 template<typename Data> 670 status_t Write(const Data& data) 671 { 672 return Write(&data, sizeof(data)); 673 } 674 675 status_t WriteAt(off_t offset, const void* data, size_t size) 676 { 677 if (Seek(offset) != B_OK) 678 return fStatus; 679 680 return Write(data, size); 681 } 682 683 status_t WriteUserArea(addr_t base, size_t size) 684 { 685 uint8* data = (uint8*)base; 686 size = size / B_PAGE_SIZE * B_PAGE_SIZE; 687 688 // copy the area page-wise into the buffer, flushing when necessary 689 while (size > 0) { 690 if (fBuffered + B_PAGE_SIZE > fCapacity) { 691 status_t error = Flush(); 692 if (error != B_OK) 693 return error; 694 } 695 696 if (user_memcpy(fBuffer + fBuffered, data, B_PAGE_SIZE) != B_OK) 697 memset(fBuffer + fBuffered, 0, B_PAGE_SIZE); 698 699 fBuffered += B_PAGE_SIZE; 700 data += B_PAGE_SIZE; 701 size -= B_PAGE_SIZE; 702 } 703 704 return B_OK; 705 } 706 707 private: 708 int fFd; 709 uint8* fBuffer; 710 size_t fCapacity; 711 off_t fOffset; 712 size_t fBuffered; 713 status_t fStatus; 714 }; 715 716 717 struct DummyWriter { 718 DummyWriter() 719 : 720 fWritten(0) 721 { 722 } 723 724 status_t Status() const 725 { 726 return B_OK; 727 } 728 729 size_t BytesWritten() const 730 { 731 return fWritten; 732 } 733 734 status_t Write(const void* data, size_t size) 735 { 736 fWritten += size; 737 return B_OK; 738 } 739 740 template<typename Data> 741 status_t Write(const Data& data) 742 { 743 return Write(&data, sizeof(data)); 744 } 745 746 private: 747 size_t fWritten; 748 }; 749 750 751 struct CoreDumper { 752 CoreDumper() 753 : 754 fCurrentThread(thread_get_current_thread()), 755 fTeam(fCurrentThread->team), 756 fFile(), 757 fThreadCount(0), 758 fThreadStates(), 759 fPreAllocatedThreadStates(), 760 fAreaInfoAllocator(), 761 fAreaInfos(), 762 fImageInfos(), 763 fThreadBlockCondition() 764 { 765 fThreadBlockCondition.Init(this, "core dump"); 766 } 767 768 ~CoreDumper() 769 { 770 while (ThreadState* state = fThreadStates.RemoveHead()) 771 delete state; 772 while (ThreadState* state = fPreAllocatedThreadStates.RemoveHead()) 773 delete state; 774 while (ImageInfo* info = fImageInfos.RemoveHead()) 775 delete info; 776 } 777 778 status_t Dump(const char* path, bool killTeam) 779 { 780 // gcc thinks fTeam may be null in atomic_or 781 // and warn which causes error on some configs 782 if (fTeam == NULL) 783 return B_ERROR; 784 785 // the path must be absolute 786 if (path[0] != '/') 787 return B_BAD_VALUE; 788 789 AutoLocker<Team> teamLocker(fTeam); 790 791 // indicate that we're dumping core 792 if ((atomic_or(&fTeam->flags, TEAM_FLAG_DUMP_CORE) 793 & TEAM_FLAG_DUMP_CORE) != 0) { 794 return B_BUSY; 795 } 796 797 fTeam->SetCoreDumpCondition(&fThreadBlockCondition); 798 799 int32 threadCount = _SetThreadsCoreDumpFlag(true); 800 801 teamLocker.Unlock(); 802 803 // write the core file 804 status_t error = _Dump(path, threadCount); 805 806 // send kill signal, if requested 807 if (killTeam) 808 kill_team(fTeam->id); 809 810 // clean up the team state and wake up waiting threads 811 teamLocker.Lock(); 812 813 fTeam->SetCoreDumpCondition(NULL); 814 815 atomic_and(&fTeam->flags, ~(int32)TEAM_FLAG_DUMP_CORE); 816 817 _SetThreadsCoreDumpFlag(false); 818 819 fThreadBlockCondition.NotifyAll(); 820 821 return error; 822 } 823 824 private: 825 status_t _Dump(const char* path, int32 threadCount) 826 { 827 status_t error = _GetTeamInfo(); 828 if (error != B_OK) 829 return error; 830 831 // pre-allocate a list of thread states 832 if (!_AllocateThreadStates(threadCount)) 833 return B_NO_MEMORY; 834 835 // collect the threads states 836 _GetThreadStates(); 837 838 // collect the other team information 839 if (!_GetAreaInfos() || !_GetImageInfos()) 840 return B_NO_MEMORY; 841 842 // open the file 843 error = fFile.Init(path); 844 if (error != B_OK) 845 return error; 846 847 _PrepareCoreFileInfo(); 848 849 // write ELF header 850 error = _WriteElfHeader(); 851 if (error != B_OK) 852 return error; 853 854 // write note segment 855 error = _WriteNotes(); 856 if (error != B_OK) 857 return error; 858 859 size_t notesEndOffset = (size_t)fFile.EndOffset(); 860 fNoteSegmentSize = notesEndOffset - fNoteSegmentOffset; 861 fFirstAreaSegmentOffset = (notesEndOffset + B_PAGE_SIZE - 1) 862 / B_PAGE_SIZE * B_PAGE_SIZE; 863 864 error = _WriteProgramHeaders(); 865 if (error != B_OK) 866 return error; 867 868 // write area segments 869 error = _WriteAreaSegments(); 870 if (error != B_OK) 871 return error; 872 873 return _WriteElfHeader(); 874 } 875 876 int32 _SetThreadsCoreDumpFlag(bool setFlag) 877 { 878 int32 count = 0; 879 880 for (Thread* thread = fTeam->thread_list; thread != NULL; 881 thread = thread->team_next) { 882 count++; 883 if (setFlag) { 884 atomic_or(&thread->flags, THREAD_FLAGS_TRAP_FOR_CORE_DUMP); 885 } else { 886 atomic_and(&thread->flags, 887 ~(int32)THREAD_FLAGS_TRAP_FOR_CORE_DUMP); 888 } 889 } 890 891 return count; 892 } 893 894 status_t _GetTeamInfo() 895 { 896 return get_team_info(fTeam->id, &fTeamInfo); 897 } 898 899 bool _AllocateThreadStates(int32 count) 900 { 901 if (!_PreAllocateThreadStates(count)) 902 return false; 903 904 TeamLocker teamLocker(fTeam); 905 906 for (;;) { 907 fThreadCount = 0; 908 int32 missing = 0; 909 910 for (Thread* thread = fTeam->thread_list; thread != NULL; 911 thread = thread->team_next) { 912 fThreadCount++; 913 ThreadState* state = fPreAllocatedThreadStates.RemoveHead(); 914 if (state != NULL) { 915 state->SetThread(thread); 916 fThreadStates.Insert(state); 917 } else 918 missing++; 919 } 920 921 if (missing == 0) 922 break; 923 924 teamLocker.Unlock(); 925 926 fPreAllocatedThreadStates.MoveFrom(&fThreadStates); 927 if (!_PreAllocateThreadStates(missing)) 928 return false; 929 930 teamLocker.Lock(); 931 } 932 933 return true; 934 } 935 936 bool _PreAllocateThreadStates(int32 count) 937 { 938 for (int32 i = 0; i < count; i++) { 939 ThreadState* state = ThreadState::Create(); 940 if (state == NULL) 941 return false; 942 fPreAllocatedThreadStates.Insert(state); 943 } 944 945 return true; 946 } 947 948 void _GetThreadStates() 949 { 950 for (;;) { 951 bool missing = false; 952 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 953 ThreadState* state = it.Next();) { 954 if (state->IsComplete()) 955 continue; 956 957 Thread* thread = state->GetThread(); 958 AutoLocker<Thread> threadLocker(thread); 959 if (thread->team != fTeam) { 960 // no longer in our team -- i.e. dying and transferred to 961 // the kernel team 962 threadLocker.Unlock(); 963 it.Remove(); 964 delete state; 965 fThreadCount--; 966 continue; 967 } 968 969 InterruptsSpinLocker schedulerLocker(&thread->scheduler_lock); 970 if (thread != fCurrentThread 971 && thread->state == B_THREAD_RUNNING) { 972 missing = true; 973 continue; 974 } 975 976 state->GetState(); 977 state->SetComplete(true); 978 } 979 980 if (!missing) 981 break; 982 983 // We still haven't got a state for all threads. Wait a moment and 984 // try again. 985 snooze(10000); 986 } 987 } 988 989 bool _GetAreaInfos() 990 { 991 for (;;) { 992 AddressSpaceReadLocker addressSpaceLocker(fTeam->address_space, 993 true); 994 995 for (VMAddressSpace::AreaIterator it 996 = addressSpaceLocker.AddressSpace()->GetAreaIterator(); 997 VMArea* area = it.Next();) { 998 999 VMCache* cache = vm_area_get_locked_cache(area); 1000 size_t ramSize = (size_t)cache->page_count * B_PAGE_SIZE; 1001 // simplified, but what the kernel uses as well ATM 1002 1003 // iterate to the root cache and, if it is a mapped file, get 1004 // the file's node_ref 1005 while (VMCache* source = cache->source) { 1006 source->Lock(); 1007 source->AcquireRefLocked(); 1008 cache->ReleaseRefAndUnlock(); 1009 cache = source; 1010 } 1011 1012 dev_t deviceId = -1; 1013 ino_t nodeId = -1; 1014 if (cache->type == CACHE_TYPE_VNODE) { 1015 VMVnodeCache* vnodeCache = (VMVnodeCache*)cache; 1016 deviceId = vnodeCache->DeviceId(); 1017 nodeId = vnodeCache->InodeId(); 1018 } 1019 1020 cache->ReleaseRefAndUnlock(); 1021 1022 AreaInfo* areaInfo = AreaInfo::Create(fAreaInfoAllocator, area, 1023 ramSize, deviceId, nodeId); 1024 1025 if (areaInfo != NULL) 1026 fAreaInfos.Insert(areaInfo); 1027 } 1028 1029 addressSpaceLocker.Unlock(); 1030 1031 if (!fAreaInfoAllocator.HasMissingAllocations()) 1032 return true; 1033 1034 if (!fAreaInfoAllocator.Reallocate()) 1035 return false; 1036 } 1037 } 1038 1039 bool _GetImageInfos() 1040 { 1041 return image_iterate_through_team_images(fTeam->id, 1042 &_GetImageInfoCallback, this) == NULL; 1043 } 1044 1045 static bool _GetImageInfoCallback(struct image* image, void* cookie) 1046 { 1047 return ((CoreDumper*)cookie)->_GetImageInfo(image); 1048 } 1049 1050 bool _GetImageInfo(struct image* image) 1051 { 1052 ImageInfo* info = ImageInfo::Create(image); 1053 if (info == NULL) 1054 return true; 1055 1056 fImageInfos.Insert(info); 1057 return false; 1058 } 1059 1060 void _PrepareCoreFileInfo() 1061 { 1062 // assign image infos to area infos where possible 1063 fAreaCount = 0; 1064 fMappedFilesCount = 0; 1065 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1066 AreaInfo* areaInfo = it.Next();) { 1067 fAreaCount++; 1068 dev_t deviceId = areaInfo->DeviceId(); 1069 if (deviceId < 0) 1070 continue; 1071 ImageInfo* imageInfo = _FindImageInfo(deviceId, areaInfo->NodeId()); 1072 if (imageInfo != NULL) { 1073 areaInfo->SetImageInfo(imageInfo); 1074 fMappedFilesCount++; 1075 } 1076 } 1077 1078 fImageCount = fImageInfos.Count(); 1079 fSegmentCount = 1 + fAreaCount; 1080 fProgramHeadersOffset = sizeof(elf_ehdr); 1081 fNoteSegmentOffset = fProgramHeadersOffset 1082 + sizeof(elf_phdr) * fSegmentCount; 1083 } 1084 1085 ImageInfo* _FindImageInfo(dev_t deviceId, ino_t nodeId) const 1086 { 1087 for (ImageInfoList::ConstIterator it = fImageInfos.GetIterator(); 1088 ImageInfo* info = it.Next();) { 1089 if (info->DeviceId() == deviceId && info->NodeId() == nodeId) 1090 return info; 1091 } 1092 1093 return NULL; 1094 } 1095 1096 status_t _WriteElfHeader() 1097 { 1098 elf_ehdr header; 1099 memset(&header, 0, sizeof(header)); 1100 1101 // e_ident 1102 header.e_ident[EI_MAG0] = ELFMAG[0]; 1103 header.e_ident[EI_MAG1] = ELFMAG[1]; 1104 header.e_ident[EI_MAG2] = ELFMAG[2]; 1105 header.e_ident[EI_MAG3] = ELFMAG[3]; 1106 #ifdef B_HAIKU_64_BIT 1107 header.e_ident[EI_CLASS] = ELFCLASS64; 1108 #else 1109 header.e_ident[EI_CLASS] = ELFCLASS32; 1110 #endif 1111 #if B_HOST_IS_LENDIAN 1112 header.e_ident[EI_DATA] = ELFDATA2LSB; 1113 #else 1114 header.e_ident[EI_DATA] = ELFDATA2MSB; 1115 #endif 1116 header.e_ident[EI_VERSION] = EV_CURRENT; 1117 1118 // e_type 1119 header.e_type = ET_CORE; 1120 1121 // e_machine 1122 #if defined(__HAIKU_ARCH_X86) 1123 header.e_machine = EM_386; 1124 #elif defined(__HAIKU_ARCH_X86_64) 1125 header.e_machine = EM_X86_64; 1126 #elif defined(__HAIKU_ARCH_PPC) 1127 header.e_machine = EM_PPC64; 1128 #elif defined(__HAIKU_ARCH_M68K) 1129 header.e_machine = EM_68K; 1130 #elif defined(__HAIKU_ARCH_MIPSEL) 1131 header.e_machine = EM_MIPS; 1132 #elif defined(__HAIKU_ARCH_ARM) 1133 header.e_machine = EM_ARM; 1134 #elif defined(__HAIKU_ARCH_ARM64) 1135 header.e_machine = EM_AARCH64; 1136 #elif defined(__HAIKU_ARCH_SPARC) 1137 header.e_machine = EM_SPARCV9; 1138 #elif defined(__HAIKU_ARCH_RISCV64) 1139 header.e_machine = EM_RISCV; 1140 #else 1141 # error Unsupported architecture! 1142 #endif 1143 1144 header.e_version = EV_CURRENT; 1145 header.e_entry = 0; 1146 header.e_phoff = sizeof(header); 1147 header.e_shoff = 0; 1148 header.e_flags = 0; 1149 header.e_ehsize = sizeof(header); 1150 header.e_phentsize = sizeof(elf_phdr); 1151 header.e_phnum = fSegmentCount; 1152 header.e_shentsize = sizeof(elf_shdr); 1153 header.e_shnum = 0; 1154 header.e_shstrndx = SHN_UNDEF; 1155 1156 return fFile.WriteAt(0, &header, sizeof(header)); 1157 } 1158 1159 status_t _WriteProgramHeaders() 1160 { 1161 fFile.Seek(fProgramHeadersOffset); 1162 1163 // write the header for the notes segment 1164 elf_phdr header; 1165 memset(&header, 0, sizeof(header)); 1166 header.p_type = PT_NOTE; 1167 header.p_flags = 0; 1168 header.p_offset = fNoteSegmentOffset; 1169 header.p_vaddr = 0; 1170 header.p_paddr = 0; 1171 header.p_filesz = fNoteSegmentSize; 1172 header.p_memsz = 0; 1173 header.p_align = 0; 1174 fFile.Write(header); 1175 1176 // write the headers for the area segments 1177 size_t segmentOffset = fFirstAreaSegmentOffset; 1178 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1179 AreaInfo* areaInfo = it.Next();) { 1180 memset(&header, 0, sizeof(header)); 1181 header.p_type = PT_LOAD; 1182 header.p_flags = 0; 1183 uint32 protection = areaInfo->Protection(); 1184 if ((protection & B_READ_AREA) != 0) 1185 header.p_flags |= PF_READ; 1186 if ((protection & B_WRITE_AREA) != 0) 1187 header.p_flags |= PF_WRITE; 1188 if ((protection & B_EXECUTE_AREA) != 0) 1189 header.p_flags |= PF_EXECUTE; 1190 header.p_offset = segmentOffset; 1191 header.p_vaddr = areaInfo->Base(); 1192 header.p_paddr = 0; 1193 header.p_filesz = areaInfo->Size(); 1194 header.p_memsz = areaInfo->Size(); 1195 header.p_align = 0; 1196 fFile.Write(header); 1197 1198 segmentOffset += areaInfo->Size(); 1199 } 1200 1201 return fFile.Status(); 1202 } 1203 1204 status_t _WriteAreaSegments() 1205 { 1206 fFile.Seek(fFirstAreaSegmentOffset); 1207 1208 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1209 AreaInfo* areaInfo = it.Next();) { 1210 status_t error = fFile.WriteUserArea(areaInfo->Base(), 1211 areaInfo->Size()); 1212 if (error != B_OK) 1213 return error; 1214 } 1215 1216 return fFile.Status(); 1217 } 1218 1219 status_t _WriteNotes() 1220 { 1221 status_t error = fFile.Seek((off_t)fNoteSegmentOffset); 1222 if (error != B_OK) 1223 return error; 1224 1225 error = _WriteFilesNote(); 1226 if (error != B_OK) 1227 return error; 1228 1229 error = _WriteTeamNote(); 1230 if (error != B_OK) 1231 return error; 1232 1233 error = _WriteAreasNote(); 1234 if (error != B_OK) 1235 return error; 1236 1237 error = _WriteImagesNote(); 1238 if (error != B_OK) 1239 return error; 1240 1241 error = _WriteImageSymbolsNotes(); 1242 if (error != B_OK) 1243 return error; 1244 1245 error = _WriteThreadsNote(); 1246 if (error != B_OK) 1247 return error; 1248 1249 return B_OK; 1250 } 1251 1252 template<typename Writer> 1253 void _WriteTeamNote(Writer& writer) 1254 { 1255 elf_note_team note; 1256 memset(¬e, 0, sizeof(note)); 1257 note.nt_id = fTeamInfo.team; 1258 note.nt_uid = fTeamInfo.uid; 1259 note.nt_gid = fTeamInfo.gid; 1260 writer.Write((uint32)sizeof(note)); 1261 writer.Write(note); 1262 1263 // write args 1264 const char* args = fTeamInfo.args; 1265 writer.Write(args, strlen(args) + 1); 1266 } 1267 1268 status_t _WriteTeamNote() 1269 { 1270 // determine needed size for the note's data 1271 DummyWriter dummyWriter; 1272 _WriteTeamNote(dummyWriter); 1273 size_t dataSize = dummyWriter.BytesWritten(); 1274 1275 // write the note header 1276 _WriteNoteHeader(kHaikuNote, NT_TEAM, dataSize); 1277 1278 // write the note data 1279 _WriteTeamNote(fFile); 1280 1281 // padding 1282 _WriteNotePadding(dataSize); 1283 1284 return fFile.Status(); 1285 } 1286 1287 template<typename Writer> 1288 void _WriteFilesNote(Writer& writer) 1289 { 1290 // file count and table size 1291 writer.Write(fMappedFilesCount); 1292 writer.Write((size_t)B_PAGE_SIZE); 1293 1294 // write table 1295 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1296 AreaInfo* areaInfo = it.Next();) { 1297 if (areaInfo->GetImageInfo() == NULL) 1298 continue; 1299 1300 // start address, end address, and file offset in pages 1301 writer.Write(areaInfo->Base()); 1302 writer.Write(areaInfo->Base() + areaInfo->Size()); 1303 writer.Write(size_t(areaInfo->CacheOffset() / B_PAGE_SIZE)); 1304 } 1305 1306 // write strings 1307 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1308 AreaInfo* areaInfo = it.Next();) { 1309 ImageInfo* imageInfo = areaInfo->GetImageInfo(); 1310 if (imageInfo == NULL) 1311 continue; 1312 1313 const char* name = imageInfo->Name(); 1314 writer.Write(name, strlen(name) + 1); 1315 } 1316 } 1317 1318 status_t _WriteFilesNote() 1319 { 1320 // determine needed size for the note's data 1321 DummyWriter dummyWriter; 1322 _WriteFilesNote(dummyWriter); 1323 size_t dataSize = dummyWriter.BytesWritten(); 1324 1325 // write the note header 1326 _WriteNoteHeader(kCoreNote, NT_FILE, dataSize); 1327 1328 // write the note data 1329 _WriteFilesNote(fFile); 1330 1331 // padding 1332 _WriteNotePadding(dataSize); 1333 1334 return fFile.Status(); 1335 } 1336 1337 template<typename Writer> 1338 void _WriteAreasNote(Writer& writer) 1339 { 1340 // area count 1341 writer.Write((uint32)fAreaCount); 1342 writer.Write((uint32)sizeof(elf_note_area_entry)); 1343 1344 // write table 1345 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1346 AreaInfo* areaInfo = it.Next();) { 1347 elf_note_area_entry entry; 1348 memset(&entry, 0, sizeof(entry)); 1349 entry.na_id = areaInfo->Id(); 1350 entry.na_lock = areaInfo->Lock(); 1351 entry.na_protection = areaInfo->Protection(); 1352 entry.na_base = areaInfo->Base(); 1353 entry.na_size = areaInfo->Size(); 1354 entry.na_ram_size = areaInfo->RamSize(); 1355 writer.Write(entry); 1356 } 1357 1358 // write strings 1359 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1360 AreaInfo* areaInfo = it.Next();) { 1361 const char* name = areaInfo->Name(); 1362 writer.Write(name, strlen(name) + 1); 1363 } 1364 } 1365 1366 status_t _WriteAreasNote() 1367 { 1368 // determine needed size for the note's data 1369 DummyWriter dummyWriter; 1370 _WriteAreasNote(dummyWriter); 1371 size_t dataSize = dummyWriter.BytesWritten(); 1372 1373 // write the note header 1374 _WriteNoteHeader(kHaikuNote, NT_AREAS, dataSize); 1375 1376 // write the note data 1377 _WriteAreasNote(fFile); 1378 1379 // padding 1380 _WriteNotePadding(dataSize); 1381 1382 return fFile.Status(); 1383 } 1384 1385 template<typename Writer> 1386 void _WriteImagesNote(Writer& writer) 1387 { 1388 // image count 1389 writer.Write((uint32)fImageCount); 1390 writer.Write((uint32)sizeof(elf_note_image_entry)); 1391 1392 // write table 1393 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1394 ImageInfo* imageInfo = it.Next();) { 1395 elf_note_image_entry entry; 1396 memset(&entry, 0, sizeof(entry)); 1397 entry.ni_id = imageInfo->Id(); 1398 entry.ni_type = imageInfo->Type(); 1399 entry.ni_init_routine = imageInfo->InitRoutine(); 1400 entry.ni_term_routine = imageInfo->TermRoutine(); 1401 entry.ni_device = imageInfo->DeviceId(); 1402 entry.ni_node = imageInfo->NodeId(); 1403 entry.ni_text_base = imageInfo->TextBase(); 1404 entry.ni_text_size = imageInfo->TextSize(); 1405 entry.ni_data_base = imageInfo->DataBase(); 1406 entry.ni_data_size = imageInfo->DataSize(); 1407 entry.ni_text_delta = imageInfo->TextDelta(); 1408 entry.ni_symbol_table = imageInfo->SymbolTable(); 1409 entry.ni_symbol_hash = imageInfo->SymbolHash(); 1410 entry.ni_string_table = imageInfo->StringTable(); 1411 writer.Write(entry); 1412 } 1413 1414 // write strings 1415 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1416 ImageInfo* imageInfo = it.Next();) { 1417 const char* name = imageInfo->Name(); 1418 writer.Write(name, strlen(name) + 1); 1419 } 1420 } 1421 1422 status_t _WriteImagesNote() 1423 { 1424 // determine needed size for the note's data 1425 DummyWriter dummyWriter; 1426 _WriteImagesNote(dummyWriter); 1427 size_t dataSize = dummyWriter.BytesWritten(); 1428 1429 // write the note header 1430 _WriteNoteHeader(kHaikuNote, NT_IMAGES, dataSize); 1431 1432 // write the note data 1433 _WriteImagesNote(fFile); 1434 1435 // padding 1436 _WriteNotePadding(dataSize); 1437 1438 return fFile.Status(); 1439 } 1440 1441 status_t _WriteImageSymbolsNotes() 1442 { 1443 // write table 1444 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1445 ImageInfo* imageInfo = it.Next();) { 1446 if (imageInfo->SymbolTableData() == NULL 1447 || imageInfo->StringTableData() == NULL) { 1448 continue; 1449 } 1450 1451 status_t error = _WriteImageSymbolsNote(imageInfo); 1452 if (error != B_OK) 1453 return error; 1454 } 1455 1456 return B_OK; 1457 } 1458 1459 template<typename Writer> 1460 void _WriteImageSymbolsNote(const ImageInfo* imageInfo, Writer& writer) 1461 { 1462 uint32 symbolCount = imageInfo->SymbolCount(); 1463 uint32 symbolEntrySize = (uint32)sizeof(elf_sym); 1464 1465 writer.Write((int32)imageInfo->Id()); 1466 writer.Write(symbolCount); 1467 writer.Write(symbolEntrySize); 1468 writer.Write(imageInfo->SymbolTableData(), 1469 symbolCount * symbolEntrySize); 1470 writer.Write(imageInfo->StringTableData(), 1471 imageInfo->StringTableSize()); 1472 } 1473 1474 status_t _WriteImageSymbolsNote(const ImageInfo* imageInfo) 1475 { 1476 // determine needed size for the note's data 1477 DummyWriter dummyWriter; 1478 _WriteImageSymbolsNote(imageInfo, dummyWriter); 1479 size_t dataSize = dummyWriter.BytesWritten(); 1480 1481 // write the note header 1482 _WriteNoteHeader(kHaikuNote, NT_SYMBOLS, dataSize); 1483 1484 // write the note data 1485 _WriteImageSymbolsNote(imageInfo, fFile); 1486 1487 // padding 1488 _WriteNotePadding(dataSize); 1489 1490 return fFile.Status(); 1491 } 1492 1493 template<typename Writer> 1494 void _WriteThreadsNote(Writer& writer) 1495 { 1496 // thread count and size of CPU state 1497 writer.Write((uint32)fThreadCount); 1498 writer.Write((uint32)sizeof(elf_note_thread_entry)); 1499 writer.Write((uint32)sizeof(debug_cpu_state)); 1500 1501 // write table 1502 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 1503 ThreadState* state = it.Next();) { 1504 elf_note_thread_entry entry; 1505 memset(&entry, 0, sizeof(entry)); 1506 entry.nth_id = state->GetThread()->id; 1507 entry.nth_state = state->State(); 1508 entry.nth_priority = state->Priority(); 1509 entry.nth_stack_base = state->StackBase(); 1510 entry.nth_stack_end = state->StackEnd(); 1511 writer.Write(&entry, sizeof(entry)); 1512 writer.Write(state->CpuState(), sizeof(debug_cpu_state)); 1513 } 1514 1515 // write strings 1516 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 1517 ThreadState* state = it.Next();) { 1518 const char* name = state->Name(); 1519 writer.Write(name, strlen(name) + 1); 1520 } 1521 } 1522 1523 status_t _WriteThreadsNote() 1524 { 1525 // determine needed size for the note's data 1526 DummyWriter dummyWriter; 1527 _WriteThreadsNote(dummyWriter); 1528 size_t dataSize = dummyWriter.BytesWritten(); 1529 1530 // write the note header 1531 _WriteNoteHeader(kHaikuNote, NT_THREADS, dataSize); 1532 1533 // write the note data 1534 _WriteThreadsNote(fFile); 1535 1536 // padding 1537 _WriteNotePadding(dataSize); 1538 1539 return fFile.Status(); 1540 } 1541 1542 status_t _WriteNoteHeader(const char* name, uint32 type, uint32 dataSize) 1543 { 1544 // prepare and write the header 1545 Elf32_Nhdr noteHeader; 1546 memset(¬eHeader, 0, sizeof(noteHeader)); 1547 size_t nameSize = strlen(name) + 1; 1548 noteHeader.n_namesz = nameSize; 1549 noteHeader.n_descsz = dataSize; 1550 noteHeader.n_type = type; 1551 fFile.Write(noteHeader); 1552 1553 // write the name 1554 fFile.Write(name, nameSize); 1555 // pad the name to 4 byte alignment 1556 _WriteNotePadding(nameSize); 1557 return fFile.Status(); 1558 } 1559 1560 status_t _WriteNotePadding(size_t sizeToPad) 1561 { 1562 if (sizeToPad % 4 != 0) { 1563 uint8 pad[3] = {}; 1564 fFile.Write(&pad, 4 - sizeToPad % 4); 1565 } 1566 return fFile.Status(); 1567 } 1568 1569 private: 1570 Thread* fCurrentThread; 1571 Team* fTeam; 1572 BufferedFile fFile; 1573 TeamInfo fTeamInfo; 1574 size_t fThreadCount; 1575 ThreadStateList fThreadStates; 1576 ThreadStateList fPreAllocatedThreadStates; 1577 Allocator fAreaInfoAllocator; 1578 AreaInfoList fAreaInfos; 1579 ImageInfoList fImageInfos; 1580 ConditionVariable fThreadBlockCondition; 1581 size_t fSegmentCount; 1582 size_t fProgramHeadersOffset; 1583 size_t fNoteSegmentOffset; 1584 size_t fNoteSegmentSize; 1585 size_t fFirstAreaSegmentOffset; 1586 size_t fAreaCount; 1587 size_t fImageCount; 1588 size_t fMappedFilesCount; 1589 }; 1590 1591 1592 } // unnamed namespace 1593 1594 1595 status_t 1596 core_dump_write_core_file(const char* path, bool killTeam) 1597 { 1598 TRACE("core_dump_write_core_file(\"%s\", %d): team: %" B_PRId32 "\n", path, 1599 killTeam, team_get_current_team_id()); 1600 1601 CoreDumper* coreDumper = new(std::nothrow) CoreDumper(); 1602 if (coreDumper == NULL) 1603 return B_NO_MEMORY; 1604 ObjectDeleter<CoreDumper> coreDumperDeleter(coreDumper); 1605 return coreDumper->Dump(path, killTeam); 1606 } 1607 1608 1609 void 1610 core_dump_trap_thread() 1611 { 1612 Thread* thread = thread_get_current_thread(); 1613 ConditionVariableEntry conditionVariableEntry; 1614 TeamLocker teamLocker(thread->team); 1615 1616 while ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP) 1617 != 0) { 1618 thread->team->CoreDumpCondition()->Add(&conditionVariableEntry); 1619 teamLocker.Unlock(); 1620 conditionVariableEntry.Wait(); 1621 teamLocker.Lock(); 1622 } 1623 } 1624