1 /* 2 * Copyright 2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <core_dump.h> 8 9 #include <errno.h> 10 #include <string.h> 11 12 #include <algorithm> 13 #include <new> 14 15 #include <BeBuild.h> 16 #include <ByteOrder.h> 17 18 #include <AutoDeleter.h> 19 20 #include <commpage.h> 21 #include <condition_variable.h> 22 #include <elf.h> 23 #include <kimage.h> 24 #include <ksignal.h> 25 #include <team.h> 26 #include <thread.h> 27 #include <user_debugger.h> 28 #include <util/AutoLock.h> 29 #include <util/ThreadAutoLock.h> 30 #include <util/DoublyLinkedList.h> 31 #include <vm/vm.h> 32 #include <vm/VMArea.h> 33 #include <vm/VMCache.h> 34 35 #include "../cache/vnode_store.h" 36 #include "../vm/VMAddressSpaceLocking.h" 37 38 39 //#define TRACE_CORE_DUMP 40 #ifdef TRACE_CORE_DUMP 41 # define TRACE(...) dprintf(__VA_ARGS__) 42 #else 43 # define TRACE(...) do {} while (false) 44 #endif 45 46 47 namespace { 48 49 50 static const size_t kBufferSize = 1024 * 1024; 51 static const char* const kCoreNote = ELF_NOTE_CORE; 52 static const char* const kHaikuNote = ELF_NOTE_HAIKU; 53 54 55 struct Allocator { 56 Allocator() 57 : 58 fAligned(NULL), 59 fStrings(NULL), 60 fAlignedCapacity(0), 61 fStringCapacity(0), 62 fAlignedSize(0), 63 fStringSize(0) 64 { 65 } 66 67 ~Allocator() 68 { 69 free(fAligned); 70 } 71 72 bool HasMissingAllocations() const 73 { 74 return fAlignedSize > fAlignedCapacity || fStringSize > fStringCapacity; 75 } 76 77 bool Reallocate() 78 { 79 free(fAligned); 80 81 fAlignedCapacity = fAlignedSize; 82 fStringCapacity = fStringSize; 83 fAlignedSize = 0; 84 fStringSize = 0; 85 86 fAligned = (uint8*)malloc(fAlignedCapacity + fStringCapacity); 87 if (fAligned == NULL) 88 return false; 89 fStrings = (char*)(fAligned + fAlignedCapacity); 90 91 return true; 92 } 93 94 void* AllocateAligned(size_t size) 95 { 96 size_t offset = fAlignedSize; 97 fAlignedSize += (size + 7) / 8 * 8; 98 if (fAlignedSize <= fAlignedCapacity) 99 return fAligned + offset; 100 return NULL; 101 } 102 103 char* AllocateString(size_t length) 104 { 105 size_t offset = fStringSize; 106 fStringSize += length + 1; 107 if (fStringSize <= fStringCapacity) 108 return fStrings + offset; 109 return NULL; 110 } 111 112 template <typename Type> 113 Type* New() 114 { 115 void* buffer = AllocateAligned(sizeof(Type)); 116 if (buffer == NULL) 117 return NULL; 118 return new(buffer) Type; 119 } 120 121 char* DuplicateString(const char* string) 122 { 123 if (string == NULL) 124 return NULL; 125 char* newString = AllocateString(strlen(string)); 126 if (newString != NULL) 127 strcpy(newString, string); 128 return newString; 129 } 130 131 private: 132 uint8* fAligned; 133 char* fStrings; 134 size_t fAlignedCapacity; 135 size_t fStringCapacity; 136 size_t fAlignedSize; 137 size_t fStringSize; 138 }; 139 140 141 struct TeamInfo : team_info { 142 }; 143 144 145 struct ThreadState : DoublyLinkedListLinkImpl<ThreadState> { 146 ThreadState() 147 : 148 fThread(NULL), 149 fComplete(false) 150 { 151 } 152 153 ~ThreadState() 154 { 155 SetThread(NULL); 156 } 157 158 static ThreadState* Create() 159 { 160 ThreadState* state = new(std::nothrow) ThreadState; 161 if (state == NULL) 162 return NULL; 163 return state; 164 } 165 166 Thread* GetThread() const 167 { 168 return fThread; 169 } 170 171 void SetThread(Thread* thread) 172 { 173 if (fThread != NULL) 174 fThread->ReleaseReference(); 175 176 fThread = thread; 177 178 if (fThread != NULL) 179 fThread->AcquireReference(); 180 } 181 182 /*! Invoke with thread lock and scheduler lock being held. */ 183 void GetState() 184 { 185 fState = fThread->state; 186 fPriority = fThread->priority; 187 fStackBase = fThread->user_stack_base; 188 fStackEnd = fStackBase + fThread->user_stack_size; 189 strlcpy(fName, fThread->name, sizeof(fName)); 190 if (arch_get_thread_debug_cpu_state(fThread, &fCpuState) != B_OK) 191 memset(&fCpuState, 0, sizeof(fCpuState)); 192 } 193 194 bool IsComplete() const 195 { 196 return fComplete; 197 } 198 199 void SetComplete(bool complete) 200 { 201 fComplete = complete; 202 } 203 204 int32 State() const 205 { 206 return fState; 207 } 208 209 int32 Priority() const 210 { 211 return fPriority; 212 } 213 214 addr_t StackBase() const 215 { 216 return fStackBase; 217 } 218 219 addr_t StackEnd() const 220 { 221 return fStackEnd; 222 } 223 224 const char* Name() const 225 { 226 return fName; 227 } 228 229 const debug_cpu_state* CpuState() const 230 { 231 return &fCpuState; 232 } 233 234 private: 235 Thread* fThread; 236 int32 fState; 237 int32 fPriority; 238 addr_t fStackBase; 239 addr_t fStackEnd; 240 char fName[B_OS_NAME_LENGTH]; 241 debug_cpu_state fCpuState; 242 bool fComplete; 243 }; 244 245 246 typedef DoublyLinkedList<ThreadState> ThreadStateList; 247 248 249 struct ImageInfo : DoublyLinkedListLinkImpl<ImageInfo> { 250 ImageInfo(struct image* image) 251 : 252 fId(image->info.basic_info.id), 253 fType(image->info.basic_info.type), 254 fDeviceId(image->info.basic_info.device), 255 fNodeId(image->info.basic_info.node), 256 fName(strdup(image->info.basic_info.name)), 257 fInitRoutine((addr_t)image->info.basic_info.init_routine), 258 fTermRoutine((addr_t)image->info.basic_info.term_routine), 259 fText((addr_t)image->info.basic_info.text), 260 fData((addr_t)image->info.basic_info.data), 261 fTextSize(image->info.basic_info.text_size), 262 fDataSize(image->info.basic_info.data_size), 263 fTextDelta(image->info.text_delta), 264 fSymbolTable((addr_t)image->info.symbol_table), 265 fSymbolHash((addr_t)image->info.symbol_hash), 266 fStringTable((addr_t)image->info.string_table), 267 fSymbolTableData(NULL), 268 fStringTableData(NULL), 269 fSymbolCount(0), 270 fStringTableSize(0) 271 { 272 if (fName != NULL && strcmp(fName, "commpage") == 0) 273 _GetCommpageSymbols(); 274 } 275 276 ~ImageInfo() 277 { 278 free(fName); 279 _FreeSymbolData(); 280 } 281 282 static ImageInfo* Create(struct image* image) 283 { 284 ImageInfo* imageInfo = new(std::nothrow) ImageInfo(image); 285 if (imageInfo == NULL || imageInfo->fName == NULL) { 286 delete imageInfo; 287 return NULL; 288 } 289 290 return imageInfo; 291 } 292 293 image_id Id() const 294 { 295 return fId; 296 } 297 298 image_type Type() const 299 { 300 return fType; 301 } 302 303 const char* Name() const 304 { 305 return fName; 306 } 307 308 dev_t DeviceId() const 309 { 310 return fDeviceId; 311 } 312 313 ino_t NodeId() const 314 { 315 return fNodeId; 316 } 317 318 addr_t InitRoutine() const 319 { 320 return fInitRoutine; 321 } 322 323 addr_t TermRoutine() const 324 { 325 return fTermRoutine; 326 } 327 328 addr_t TextBase() const 329 { 330 return fText; 331 } 332 333 size_t TextSize() const 334 { 335 return fTextSize; 336 } 337 338 ssize_t TextDelta() const 339 { 340 return fTextDelta; 341 } 342 343 addr_t DataBase() const 344 { 345 return fData; 346 } 347 348 size_t DataSize() const 349 { 350 return fDataSize; 351 } 352 353 addr_t SymbolTable() const 354 { 355 return fSymbolTable; 356 } 357 358 addr_t SymbolHash() const 359 { 360 return fSymbolHash; 361 } 362 363 addr_t StringTable() const 364 { 365 return fStringTable; 366 } 367 368 elf_sym* SymbolTableData() const 369 { 370 return fSymbolTableData; 371 } 372 373 char* StringTableData() const 374 { 375 return fStringTableData; 376 } 377 378 uint32 SymbolCount() const 379 { 380 return fSymbolCount; 381 } 382 383 size_t StringTableSize() const 384 { 385 return fStringTableSize; 386 } 387 388 private: 389 void _GetCommpageSymbols() 390 { 391 image_id commpageId = get_commpage_image(); 392 393 // get the size of the tables 394 int32 symbolCount = 0; 395 size_t stringTableSize = 0; 396 status_t error = elf_read_kernel_image_symbols(commpageId, NULL, 397 &symbolCount, NULL, &stringTableSize, 398 NULL, true); 399 if (error != B_OK) 400 return; 401 if (symbolCount == 0 || stringTableSize == 0) 402 return; 403 404 // allocate the tables 405 fSymbolTableData = (elf_sym*)malloc(sizeof(elf_sym) * symbolCount); 406 fStringTableData = (char*)malloc(stringTableSize); 407 if (fSymbolTableData == NULL || fStringTableData == NULL) { 408 _FreeSymbolData(); 409 return; 410 } 411 412 fSymbolCount = symbolCount; 413 fStringTableSize = stringTableSize; 414 415 // get the data 416 error = elf_read_kernel_image_symbols(commpageId, 417 fSymbolTableData, &symbolCount, fStringTableData, &stringTableSize, 418 NULL, true); 419 if (error != B_OK) 420 _FreeSymbolData(); 421 } 422 423 void _FreeSymbolData() 424 { 425 free(fSymbolTableData); 426 free(fStringTableData); 427 428 fSymbolTableData = NULL; 429 fStringTableData = NULL; 430 fSymbolCount = 0; 431 fStringTableSize = 0; 432 } 433 434 private: 435 image_id fId; 436 image_type fType; 437 dev_t fDeviceId; 438 ino_t fNodeId; 439 char* fName; 440 addr_t fInitRoutine; 441 addr_t fTermRoutine; 442 addr_t fText; 443 addr_t fData; 444 size_t fTextSize; 445 size_t fDataSize; 446 ssize_t fTextDelta; 447 addr_t fSymbolTable; 448 addr_t fSymbolHash; 449 addr_t fStringTable; 450 // for commpage image 451 elf_sym* fSymbolTableData; 452 char* fStringTableData; 453 uint32 fSymbolCount; 454 size_t fStringTableSize; 455 }; 456 457 458 typedef DoublyLinkedList<ImageInfo> ImageInfoList; 459 460 461 struct AreaInfo : DoublyLinkedListLinkImpl<AreaInfo> { 462 static AreaInfo* Create(Allocator& allocator, VMArea* area, size_t ramSize, 463 dev_t deviceId, ino_t nodeId) 464 { 465 AreaInfo* areaInfo = allocator.New<AreaInfo>(); 466 const char* name = allocator.DuplicateString(area->name); 467 468 if (areaInfo != NULL) { 469 areaInfo->fId = area->id; 470 areaInfo->fName = name; 471 areaInfo->fBase = area->Base(); 472 areaInfo->fSize = area->Size(); 473 areaInfo->fLock = B_FULL_LOCK; 474 areaInfo->fProtection = area->protection; 475 areaInfo->fRamSize = ramSize; 476 areaInfo->fDeviceId = deviceId; 477 areaInfo->fNodeId = nodeId; 478 areaInfo->fCacheOffset = area->cache_offset; 479 areaInfo->fImageInfo = NULL; 480 } 481 482 return areaInfo; 483 } 484 485 area_id Id() const 486 { 487 return fId; 488 } 489 490 const char* Name() const 491 { 492 return fName; 493 } 494 495 addr_t Base() const 496 { 497 return fBase; 498 } 499 500 size_t Size() const 501 { 502 return fSize; 503 } 504 505 uint32 Lock() const 506 { 507 return fLock; 508 } 509 510 uint32 Protection() const 511 { 512 return fProtection; 513 } 514 515 size_t RamSize() const 516 { 517 return fRamSize; 518 } 519 520 off_t CacheOffset() const 521 { 522 return fCacheOffset; 523 } 524 525 dev_t DeviceId() const 526 { 527 return fDeviceId; 528 } 529 530 ino_t NodeId() const 531 { 532 return fNodeId; 533 } 534 535 ImageInfo* GetImageInfo() const 536 { 537 return fImageInfo; 538 } 539 540 void SetImageInfo(ImageInfo* imageInfo) 541 { 542 fImageInfo = imageInfo; 543 } 544 545 private: 546 area_id fId; 547 const char* fName; 548 addr_t fBase; 549 size_t fSize; 550 uint32 fLock; 551 uint32 fProtection; 552 size_t fRamSize; 553 dev_t fDeviceId; 554 ino_t fNodeId; 555 off_t fCacheOffset; 556 ImageInfo* fImageInfo; 557 }; 558 559 560 typedef DoublyLinkedList<AreaInfo> AreaInfoList; 561 562 563 struct BufferedFile { 564 BufferedFile() 565 : 566 fFd(-1), 567 fBuffer(NULL), 568 fCapacity(0), 569 fOffset(0), 570 fBuffered(0), 571 fStatus(B_NO_INIT) 572 { 573 } 574 575 ~BufferedFile() 576 { 577 if (fFd >= 0) 578 close(fFd); 579 580 free(fBuffer); 581 } 582 583 status_t Init(const char* path) 584 { 585 fCapacity = kBufferSize; 586 fBuffer = (uint8*)malloc(fCapacity); 587 if (fBuffer == NULL) 588 return B_NO_MEMORY; 589 590 fFd = open(path, O_WRONLY | O_CREAT | O_EXCL, S_IRUSR); 591 if (fFd < 0) 592 return errno; 593 594 fStatus = B_OK; 595 return B_OK; 596 } 597 598 status_t Status() const 599 { 600 return fStatus; 601 } 602 603 off_t EndOffset() const 604 { 605 return fOffset + (off_t)fBuffered; 606 } 607 608 status_t Flush() 609 { 610 if (fStatus != B_OK) 611 return fStatus; 612 613 if (fBuffered == 0) 614 return B_OK; 615 616 ssize_t written = pwrite(fFd, fBuffer, fBuffered, fOffset); 617 if (written < 0) 618 return fStatus = errno; 619 if ((size_t)written != fBuffered) 620 return fStatus = B_IO_ERROR; 621 622 fOffset += (off_t)fBuffered; 623 fBuffered = 0; 624 return B_OK; 625 } 626 627 status_t Seek(off_t offset) 628 { 629 if (fStatus != B_OK) 630 return fStatus; 631 632 if (fBuffered == 0) { 633 fOffset = offset; 634 } else if (offset != fOffset + (off_t)fBuffered) { 635 status_t error = Flush(); 636 if (error != B_OK) 637 return fStatus = error; 638 fOffset = offset; 639 } 640 641 return B_OK; 642 } 643 644 status_t Write(const void* data, size_t size) 645 { 646 if (fStatus != B_OK) 647 return fStatus; 648 649 if (size == 0) 650 return B_OK; 651 652 while (size > 0) { 653 size_t toWrite = std::min(size, fCapacity - fBuffered); 654 if (toWrite == 0) { 655 status_t error = Flush(); 656 if (error != B_OK) 657 return fStatus = error; 658 continue; 659 } 660 661 memcpy(fBuffer + fBuffered, data, toWrite); 662 fBuffered += toWrite; 663 size -= toWrite; 664 } 665 666 return B_OK; 667 } 668 669 template<typename Data> 670 status_t Write(const Data& data) 671 { 672 return Write(&data, sizeof(data)); 673 } 674 675 status_t WriteAt(off_t offset, const void* data, size_t size) 676 { 677 if (Seek(offset) != B_OK) 678 return fStatus; 679 680 return Write(data, size); 681 } 682 683 status_t WriteUserArea(addr_t base, size_t size) 684 { 685 uint8* data = (uint8*)base; 686 size = size / B_PAGE_SIZE * B_PAGE_SIZE; 687 688 // copy the area page-wise into the buffer, flushing when necessary 689 while (size > 0) { 690 if (fBuffered + B_PAGE_SIZE > fCapacity) { 691 status_t error = Flush(); 692 if (error != B_OK) 693 return error; 694 } 695 696 if (user_memcpy(fBuffer + fBuffered, data, B_PAGE_SIZE) != B_OK) 697 memset(fBuffer + fBuffered, 0, B_PAGE_SIZE); 698 699 fBuffered += B_PAGE_SIZE; 700 data += B_PAGE_SIZE; 701 size -= B_PAGE_SIZE; 702 } 703 704 return B_OK; 705 } 706 707 private: 708 int fFd; 709 uint8* fBuffer; 710 size_t fCapacity; 711 off_t fOffset; 712 size_t fBuffered; 713 status_t fStatus; 714 }; 715 716 717 struct DummyWriter { 718 DummyWriter() 719 : 720 fWritten(0) 721 { 722 } 723 724 status_t Status() const 725 { 726 return B_OK; 727 } 728 729 size_t BytesWritten() const 730 { 731 return fWritten; 732 } 733 734 status_t Write(const void* data, size_t size) 735 { 736 fWritten += size; 737 return B_OK; 738 } 739 740 template<typename Data> 741 status_t Write(const Data& data) 742 { 743 return Write(&data, sizeof(data)); 744 } 745 746 private: 747 size_t fWritten; 748 }; 749 750 751 struct CoreDumper { 752 CoreDumper() 753 : 754 fCurrentThread(thread_get_current_thread()), 755 fTeam(fCurrentThread->team), 756 fFile(), 757 fThreadCount(0), 758 fThreadStates(), 759 fPreAllocatedThreadStates(), 760 fAreaInfoAllocator(), 761 fAreaInfos(), 762 fImageInfos(), 763 fThreadBlockCondition() 764 { 765 fThreadBlockCondition.Init(this, "core dump"); 766 } 767 768 ~CoreDumper() 769 { 770 while (ThreadState* state = fThreadStates.RemoveHead()) 771 delete state; 772 while (ThreadState* state = fPreAllocatedThreadStates.RemoveHead()) 773 delete state; 774 while (ImageInfo* info = fImageInfos.RemoveHead()) 775 delete info; 776 } 777 778 status_t Dump(const char* path, bool killTeam) 779 { 780 // the path must be absolute 781 if (path[0] != '/') 782 return B_BAD_VALUE; 783 784 AutoLocker<Team> teamLocker(fTeam); 785 786 // indicate that we're dumping core 787 if ((atomic_or(&fTeam->flags, TEAM_FLAG_DUMP_CORE) 788 & TEAM_FLAG_DUMP_CORE) != 0) { 789 return B_BUSY; 790 } 791 792 fTeam->SetCoreDumpCondition(&fThreadBlockCondition); 793 794 int32 threadCount = _SetThreadsCoreDumpFlag(true); 795 796 teamLocker.Unlock(); 797 798 // write the core file 799 status_t error = _Dump(path, threadCount); 800 801 // send kill signal, if requested 802 if (killTeam) 803 kill_team(fTeam->id); 804 805 // clean up the team state and wake up waiting threads 806 teamLocker.Lock(); 807 808 fTeam->SetCoreDumpCondition(NULL); 809 810 atomic_and(&fTeam->flags, ~(int32)TEAM_FLAG_DUMP_CORE); 811 812 _SetThreadsCoreDumpFlag(false); 813 814 fThreadBlockCondition.NotifyAll(); 815 816 return error; 817 } 818 819 private: 820 status_t _Dump(const char* path, int32 threadCount) 821 { 822 status_t error = _GetTeamInfo(); 823 if (error != B_OK) 824 return error; 825 826 // pre-allocate a list of thread states 827 if (!_AllocateThreadStates(threadCount)) 828 return B_NO_MEMORY; 829 830 // collect the threads states 831 _GetThreadStates(); 832 833 // collect the other team information 834 if (!_GetAreaInfos() || !_GetImageInfos()) 835 return B_NO_MEMORY; 836 837 // open the file 838 error = fFile.Init(path); 839 if (error != B_OK) 840 return error; 841 842 _PrepareCoreFileInfo(); 843 844 // write ELF header 845 error = _WriteElfHeader(); 846 if (error != B_OK) 847 return error; 848 849 // write note segment 850 error = _WriteNotes(); 851 if (error != B_OK) 852 return error; 853 854 size_t notesEndOffset = (size_t)fFile.EndOffset(); 855 fNoteSegmentSize = notesEndOffset - fNoteSegmentOffset; 856 fFirstAreaSegmentOffset = (notesEndOffset + B_PAGE_SIZE - 1) 857 / B_PAGE_SIZE * B_PAGE_SIZE; 858 859 error = _WriteProgramHeaders(); 860 if (error != B_OK) 861 return error; 862 863 // write area segments 864 error = _WriteAreaSegments(); 865 if (error != B_OK) 866 return error; 867 868 return _WriteElfHeader(); 869 } 870 871 int32 _SetThreadsCoreDumpFlag(bool setFlag) 872 { 873 int32 count = 0; 874 875 for (Thread* thread = fTeam->thread_list; thread != NULL; 876 thread = thread->team_next) { 877 count++; 878 if (setFlag) { 879 atomic_or(&thread->flags, THREAD_FLAGS_TRAP_FOR_CORE_DUMP); 880 } else { 881 atomic_and(&thread->flags, 882 ~(int32)THREAD_FLAGS_TRAP_FOR_CORE_DUMP); 883 } 884 } 885 886 return count; 887 } 888 889 status_t _GetTeamInfo() 890 { 891 return get_team_info(fTeam->id, &fTeamInfo); 892 } 893 894 bool _AllocateThreadStates(int32 count) 895 { 896 if (!_PreAllocateThreadStates(count)) 897 return false; 898 899 TeamLocker teamLocker(fTeam); 900 901 for (;;) { 902 fThreadCount = 0; 903 int32 missing = 0; 904 905 for (Thread* thread = fTeam->thread_list; thread != NULL; 906 thread = thread->team_next) { 907 fThreadCount++; 908 ThreadState* state = fPreAllocatedThreadStates.RemoveHead(); 909 if (state != NULL) { 910 state->SetThread(thread); 911 fThreadStates.Insert(state); 912 } else 913 missing++; 914 } 915 916 if (missing == 0) 917 break; 918 919 teamLocker.Unlock(); 920 921 fPreAllocatedThreadStates.MoveFrom(&fThreadStates); 922 if (!_PreAllocateThreadStates(missing)) 923 return false; 924 925 teamLocker.Lock(); 926 } 927 928 return true; 929 } 930 931 bool _PreAllocateThreadStates(int32 count) 932 { 933 for (int32 i = 0; i < count; i++) { 934 ThreadState* state = ThreadState::Create(); 935 if (state == NULL) 936 return false; 937 fPreAllocatedThreadStates.Insert(state); 938 } 939 940 return true; 941 } 942 943 void _GetThreadStates() 944 { 945 for (;;) { 946 bool missing = false; 947 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 948 ThreadState* state = it.Next();) { 949 if (state->IsComplete()) 950 continue; 951 952 Thread* thread = state->GetThread(); 953 AutoLocker<Thread> threadLocker(thread); 954 if (thread->team != fTeam) { 955 // no longer in our team -- i.e. dying and transferred to 956 // the kernel team 957 threadLocker.Unlock(); 958 it.Remove(); 959 delete state; 960 fThreadCount--; 961 continue; 962 } 963 964 InterruptsSpinLocker schedulerLocker(&thread->scheduler_lock); 965 if (thread != fCurrentThread 966 && thread->state == B_THREAD_RUNNING) { 967 missing = true; 968 continue; 969 } 970 971 state->GetState(); 972 state->SetComplete(true); 973 } 974 975 if (!missing) 976 break; 977 978 // We still haven't got a state for all threads. Wait a moment and 979 // try again. 980 snooze(10000); 981 } 982 } 983 984 bool _GetAreaInfos() 985 { 986 for (;;) { 987 AddressSpaceReadLocker addressSpaceLocker(fTeam->address_space, 988 true); 989 990 for (VMAddressSpace::AreaIterator it 991 = addressSpaceLocker.AddressSpace()->GetAreaIterator(); 992 VMArea* area = it.Next();) { 993 994 VMCache* cache = vm_area_get_locked_cache(area); 995 size_t ramSize = (size_t)cache->page_count * B_PAGE_SIZE; 996 // simplified, but what the kernel uses as well ATM 997 998 // iterate to the root cache and, if it is a mapped file, get 999 // the file's node_ref 1000 while (VMCache* source = cache->source) { 1001 source->Lock(); 1002 source->AcquireRefLocked(); 1003 cache->ReleaseRefAndUnlock(); 1004 cache = source; 1005 } 1006 1007 dev_t deviceId = -1; 1008 ino_t nodeId = -1; 1009 if (cache->type == CACHE_TYPE_VNODE) { 1010 VMVnodeCache* vnodeCache = (VMVnodeCache*)cache; 1011 deviceId = vnodeCache->DeviceId(); 1012 nodeId = vnodeCache->InodeId(); 1013 } 1014 1015 cache->ReleaseRefAndUnlock(); 1016 1017 AreaInfo* areaInfo = AreaInfo::Create(fAreaInfoAllocator, area, 1018 ramSize, deviceId, nodeId); 1019 1020 if (areaInfo != NULL) 1021 fAreaInfos.Insert(areaInfo); 1022 } 1023 1024 addressSpaceLocker.Unlock(); 1025 1026 if (!fAreaInfoAllocator.HasMissingAllocations()) 1027 return true; 1028 1029 if (!fAreaInfoAllocator.Reallocate()) 1030 return false; 1031 } 1032 } 1033 1034 bool _GetImageInfos() 1035 { 1036 return image_iterate_through_team_images(fTeam->id, 1037 &_GetImageInfoCallback, this) == NULL; 1038 } 1039 1040 static bool _GetImageInfoCallback(struct image* image, void* cookie) 1041 { 1042 return ((CoreDumper*)cookie)->_GetImageInfo(image); 1043 } 1044 1045 bool _GetImageInfo(struct image* image) 1046 { 1047 ImageInfo* info = ImageInfo::Create(image); 1048 if (info == NULL) 1049 return true; 1050 1051 fImageInfos.Insert(info); 1052 return false; 1053 } 1054 1055 void _PrepareCoreFileInfo() 1056 { 1057 // assign image infos to area infos where possible 1058 fAreaCount = 0; 1059 fMappedFilesCount = 0; 1060 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1061 AreaInfo* areaInfo = it.Next();) { 1062 fAreaCount++; 1063 dev_t deviceId = areaInfo->DeviceId(); 1064 if (deviceId < 0) 1065 continue; 1066 ImageInfo* imageInfo = _FindImageInfo(deviceId, areaInfo->NodeId()); 1067 if (imageInfo != NULL) { 1068 areaInfo->SetImageInfo(imageInfo); 1069 fMappedFilesCount++; 1070 } 1071 } 1072 1073 fImageCount = fImageInfos.Count(); 1074 fSegmentCount = 1 + fAreaCount; 1075 fProgramHeadersOffset = sizeof(elf_ehdr); 1076 fNoteSegmentOffset = fProgramHeadersOffset 1077 + sizeof(elf_phdr) * fSegmentCount; 1078 } 1079 1080 ImageInfo* _FindImageInfo(dev_t deviceId, ino_t nodeId) const 1081 { 1082 for (ImageInfoList::ConstIterator it = fImageInfos.GetIterator(); 1083 ImageInfo* info = it.Next();) { 1084 if (info->DeviceId() == deviceId && info->NodeId() == nodeId) 1085 return info; 1086 } 1087 1088 return NULL; 1089 } 1090 1091 status_t _WriteElfHeader() 1092 { 1093 elf_ehdr header; 1094 memset(&header, 0, sizeof(header)); 1095 1096 // e_ident 1097 header.e_ident[EI_MAG0] = ELFMAG[0]; 1098 header.e_ident[EI_MAG1] = ELFMAG[1]; 1099 header.e_ident[EI_MAG2] = ELFMAG[2]; 1100 header.e_ident[EI_MAG3] = ELFMAG[3]; 1101 #ifdef B_HAIKU_64_BIT 1102 header.e_ident[EI_CLASS] = ELFCLASS64; 1103 #else 1104 header.e_ident[EI_CLASS] = ELFCLASS32; 1105 #endif 1106 #if B_HOST_IS_LENDIAN 1107 header.e_ident[EI_DATA] = ELFDATA2LSB; 1108 #else 1109 header.e_ident[EI_DATA] = ELFDATA2MSB; 1110 #endif 1111 header.e_ident[EI_VERSION] = EV_CURRENT; 1112 1113 // e_type 1114 header.e_type = ET_CORE; 1115 1116 // e_machine 1117 #if defined(__HAIKU_ARCH_X86) 1118 header.e_machine = EM_386; 1119 #elif defined(__HAIKU_ARCH_X86_64) 1120 header.e_machine = EM_X86_64; 1121 #elif defined(__HAIKU_ARCH_PPC) 1122 header.e_machine = EM_PPC64; 1123 #elif defined(__HAIKU_ARCH_M68K) 1124 header.e_machine = EM_68K; 1125 #elif defined(__HAIKU_ARCH_MIPSEL) 1126 header.e_machine = EM_MIPS; 1127 #elif defined(__HAIKU_ARCH_ARM) 1128 header.e_machine = EM_ARM; 1129 #elif defined(__HAIKU_ARCH_ARM64) 1130 header.e_machine = EM_AARCH64; 1131 #elif defined(__HAIKU_ARCH_SPARC) 1132 header.e_machine = EM_SPARCV9; 1133 #elif defined(__HAIKU_ARCH_RISCV64) 1134 header.e_machine = EM_RISCV; 1135 #else 1136 # error Unsupported architecture! 1137 #endif 1138 1139 header.e_version = EV_CURRENT; 1140 header.e_entry = 0; 1141 header.e_phoff = sizeof(header); 1142 header.e_shoff = 0; 1143 header.e_flags = 0; 1144 header.e_ehsize = sizeof(header); 1145 header.e_phentsize = sizeof(elf_phdr); 1146 header.e_phnum = fSegmentCount; 1147 header.e_shentsize = sizeof(elf_shdr); 1148 header.e_shnum = 0; 1149 header.e_shstrndx = SHN_UNDEF; 1150 1151 return fFile.WriteAt(0, &header, sizeof(header)); 1152 } 1153 1154 status_t _WriteProgramHeaders() 1155 { 1156 fFile.Seek(fProgramHeadersOffset); 1157 1158 // write the header for the notes segment 1159 elf_phdr header; 1160 memset(&header, 0, sizeof(header)); 1161 header.p_type = PT_NOTE; 1162 header.p_flags = 0; 1163 header.p_offset = fNoteSegmentOffset; 1164 header.p_vaddr = 0; 1165 header.p_paddr = 0; 1166 header.p_filesz = fNoteSegmentSize; 1167 header.p_memsz = 0; 1168 header.p_align = 0; 1169 fFile.Write(header); 1170 1171 // write the headers for the area segments 1172 size_t segmentOffset = fFirstAreaSegmentOffset; 1173 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1174 AreaInfo* areaInfo = it.Next();) { 1175 memset(&header, 0, sizeof(header)); 1176 header.p_type = PT_LOAD; 1177 header.p_flags = 0; 1178 uint32 protection = areaInfo->Protection(); 1179 if ((protection & B_READ_AREA) != 0) 1180 header.p_flags |= PF_READ; 1181 if ((protection & B_WRITE_AREA) != 0) 1182 header.p_flags |= PF_WRITE; 1183 if ((protection & B_EXECUTE_AREA) != 0) 1184 header.p_flags |= PF_EXECUTE; 1185 header.p_offset = segmentOffset; 1186 header.p_vaddr = areaInfo->Base(); 1187 header.p_paddr = 0; 1188 header.p_filesz = areaInfo->Size(); 1189 header.p_memsz = areaInfo->Size(); 1190 header.p_align = 0; 1191 fFile.Write(header); 1192 1193 segmentOffset += areaInfo->Size(); 1194 } 1195 1196 return fFile.Status(); 1197 } 1198 1199 status_t _WriteAreaSegments() 1200 { 1201 fFile.Seek(fFirstAreaSegmentOffset); 1202 1203 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1204 AreaInfo* areaInfo = it.Next();) { 1205 status_t error = fFile.WriteUserArea(areaInfo->Base(), 1206 areaInfo->Size()); 1207 if (error != B_OK) 1208 return error; 1209 } 1210 1211 return fFile.Status(); 1212 } 1213 1214 status_t _WriteNotes() 1215 { 1216 status_t error = fFile.Seek((off_t)fNoteSegmentOffset); 1217 if (error != B_OK) 1218 return error; 1219 1220 error = _WriteFilesNote(); 1221 if (error != B_OK) 1222 return error; 1223 1224 error = _WriteTeamNote(); 1225 if (error != B_OK) 1226 return error; 1227 1228 error = _WriteAreasNote(); 1229 if (error != B_OK) 1230 return error; 1231 1232 error = _WriteImagesNote(); 1233 if (error != B_OK) 1234 return error; 1235 1236 error = _WriteImageSymbolsNotes(); 1237 if (error != B_OK) 1238 return error; 1239 1240 error = _WriteThreadsNote(); 1241 if (error != B_OK) 1242 return error; 1243 1244 return B_OK; 1245 } 1246 1247 template<typename Writer> 1248 void _WriteTeamNote(Writer& writer) 1249 { 1250 elf_note_team note; 1251 memset(¬e, 0, sizeof(note)); 1252 note.nt_id = fTeamInfo.team; 1253 note.nt_uid = fTeamInfo.uid; 1254 note.nt_gid = fTeamInfo.gid; 1255 writer.Write((uint32)sizeof(note)); 1256 writer.Write(note); 1257 1258 // write args 1259 const char* args = fTeamInfo.args; 1260 writer.Write(args, strlen(args) + 1); 1261 } 1262 1263 status_t _WriteTeamNote() 1264 { 1265 // determine needed size for the note's data 1266 DummyWriter dummyWriter; 1267 _WriteTeamNote(dummyWriter); 1268 size_t dataSize = dummyWriter.BytesWritten(); 1269 1270 // write the note header 1271 _WriteNoteHeader(kHaikuNote, NT_TEAM, dataSize); 1272 1273 // write the note data 1274 _WriteTeamNote(fFile); 1275 1276 // padding 1277 _WriteNotePadding(dataSize); 1278 1279 return fFile.Status(); 1280 } 1281 1282 template<typename Writer> 1283 void _WriteFilesNote(Writer& writer) 1284 { 1285 // file count and table size 1286 writer.Write(fMappedFilesCount); 1287 writer.Write((size_t)B_PAGE_SIZE); 1288 1289 // write table 1290 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1291 AreaInfo* areaInfo = it.Next();) { 1292 if (areaInfo->GetImageInfo() == NULL) 1293 continue; 1294 1295 // start address, end address, and file offset in pages 1296 writer.Write(areaInfo->Base()); 1297 writer.Write(areaInfo->Base() + areaInfo->Size()); 1298 writer.Write(size_t(areaInfo->CacheOffset() / B_PAGE_SIZE)); 1299 } 1300 1301 // write strings 1302 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1303 AreaInfo* areaInfo = it.Next();) { 1304 ImageInfo* imageInfo = areaInfo->GetImageInfo(); 1305 if (imageInfo == NULL) 1306 continue; 1307 1308 const char* name = imageInfo->Name(); 1309 writer.Write(name, strlen(name) + 1); 1310 } 1311 } 1312 1313 status_t _WriteFilesNote() 1314 { 1315 // determine needed size for the note's data 1316 DummyWriter dummyWriter; 1317 _WriteFilesNote(dummyWriter); 1318 size_t dataSize = dummyWriter.BytesWritten(); 1319 1320 // write the note header 1321 _WriteNoteHeader(kCoreNote, NT_FILE, dataSize); 1322 1323 // write the note data 1324 _WriteFilesNote(fFile); 1325 1326 // padding 1327 _WriteNotePadding(dataSize); 1328 1329 return fFile.Status(); 1330 } 1331 1332 template<typename Writer> 1333 void _WriteAreasNote(Writer& writer) 1334 { 1335 // area count 1336 writer.Write((uint32)fAreaCount); 1337 writer.Write((uint32)sizeof(elf_note_area_entry)); 1338 1339 // write table 1340 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1341 AreaInfo* areaInfo = it.Next();) { 1342 elf_note_area_entry entry; 1343 memset(&entry, 0, sizeof(entry)); 1344 entry.na_id = areaInfo->Id(); 1345 entry.na_lock = areaInfo->Lock(); 1346 entry.na_protection = areaInfo->Protection(); 1347 entry.na_base = areaInfo->Base(); 1348 entry.na_size = areaInfo->Size(); 1349 entry.na_ram_size = areaInfo->RamSize(); 1350 writer.Write(entry); 1351 } 1352 1353 // write strings 1354 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1355 AreaInfo* areaInfo = it.Next();) { 1356 const char* name = areaInfo->Name(); 1357 writer.Write(name, strlen(name) + 1); 1358 } 1359 } 1360 1361 status_t _WriteAreasNote() 1362 { 1363 // determine needed size for the note's data 1364 DummyWriter dummyWriter; 1365 _WriteAreasNote(dummyWriter); 1366 size_t dataSize = dummyWriter.BytesWritten(); 1367 1368 // write the note header 1369 _WriteNoteHeader(kHaikuNote, NT_AREAS, dataSize); 1370 1371 // write the note data 1372 _WriteAreasNote(fFile); 1373 1374 // padding 1375 _WriteNotePadding(dataSize); 1376 1377 return fFile.Status(); 1378 } 1379 1380 template<typename Writer> 1381 void _WriteImagesNote(Writer& writer) 1382 { 1383 // image count 1384 writer.Write((uint32)fImageCount); 1385 writer.Write((uint32)sizeof(elf_note_image_entry)); 1386 1387 // write table 1388 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1389 ImageInfo* imageInfo = it.Next();) { 1390 elf_note_image_entry entry; 1391 memset(&entry, 0, sizeof(entry)); 1392 entry.ni_id = imageInfo->Id(); 1393 entry.ni_type = imageInfo->Type(); 1394 entry.ni_init_routine = imageInfo->InitRoutine(); 1395 entry.ni_term_routine = imageInfo->TermRoutine(); 1396 entry.ni_device = imageInfo->DeviceId(); 1397 entry.ni_node = imageInfo->NodeId(); 1398 entry.ni_text_base = imageInfo->TextBase(); 1399 entry.ni_text_size = imageInfo->TextSize(); 1400 entry.ni_data_base = imageInfo->DataBase(); 1401 entry.ni_data_size = imageInfo->DataSize(); 1402 entry.ni_text_delta = imageInfo->TextDelta(); 1403 entry.ni_symbol_table = imageInfo->SymbolTable(); 1404 entry.ni_symbol_hash = imageInfo->SymbolHash(); 1405 entry.ni_string_table = imageInfo->StringTable(); 1406 writer.Write(entry); 1407 } 1408 1409 // write strings 1410 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1411 ImageInfo* imageInfo = it.Next();) { 1412 const char* name = imageInfo->Name(); 1413 writer.Write(name, strlen(name) + 1); 1414 } 1415 } 1416 1417 status_t _WriteImagesNote() 1418 { 1419 // determine needed size for the note's data 1420 DummyWriter dummyWriter; 1421 _WriteImagesNote(dummyWriter); 1422 size_t dataSize = dummyWriter.BytesWritten(); 1423 1424 // write the note header 1425 _WriteNoteHeader(kHaikuNote, NT_IMAGES, dataSize); 1426 1427 // write the note data 1428 _WriteImagesNote(fFile); 1429 1430 // padding 1431 _WriteNotePadding(dataSize); 1432 1433 return fFile.Status(); 1434 } 1435 1436 status_t _WriteImageSymbolsNotes() 1437 { 1438 // write table 1439 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1440 ImageInfo* imageInfo = it.Next();) { 1441 if (imageInfo->SymbolTableData() == NULL 1442 || imageInfo->StringTableData() == NULL) { 1443 continue; 1444 } 1445 1446 status_t error = _WriteImageSymbolsNote(imageInfo); 1447 if (error != B_OK) 1448 return error; 1449 } 1450 1451 return B_OK; 1452 } 1453 1454 template<typename Writer> 1455 void _WriteImageSymbolsNote(const ImageInfo* imageInfo, Writer& writer) 1456 { 1457 uint32 symbolCount = imageInfo->SymbolCount(); 1458 uint32 symbolEntrySize = (uint32)sizeof(elf_sym); 1459 1460 writer.Write((int32)imageInfo->Id()); 1461 writer.Write(symbolCount); 1462 writer.Write(symbolEntrySize); 1463 writer.Write(imageInfo->SymbolTableData(), 1464 symbolCount * symbolEntrySize); 1465 writer.Write(imageInfo->StringTableData(), 1466 imageInfo->StringTableSize()); 1467 } 1468 1469 status_t _WriteImageSymbolsNote(const ImageInfo* imageInfo) 1470 { 1471 // determine needed size for the note's data 1472 DummyWriter dummyWriter; 1473 _WriteImageSymbolsNote(imageInfo, dummyWriter); 1474 size_t dataSize = dummyWriter.BytesWritten(); 1475 1476 // write the note header 1477 _WriteNoteHeader(kHaikuNote, NT_SYMBOLS, dataSize); 1478 1479 // write the note data 1480 _WriteImageSymbolsNote(imageInfo, fFile); 1481 1482 // padding 1483 _WriteNotePadding(dataSize); 1484 1485 return fFile.Status(); 1486 } 1487 1488 template<typename Writer> 1489 void _WriteThreadsNote(Writer& writer) 1490 { 1491 // thread count and size of CPU state 1492 writer.Write((uint32)fThreadCount); 1493 writer.Write((uint32)sizeof(elf_note_thread_entry)); 1494 writer.Write((uint32)sizeof(debug_cpu_state)); 1495 1496 // write table 1497 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 1498 ThreadState* state = it.Next();) { 1499 elf_note_thread_entry entry; 1500 memset(&entry, 0, sizeof(entry)); 1501 entry.nth_id = state->GetThread()->id; 1502 entry.nth_state = state->State(); 1503 entry.nth_priority = state->Priority(); 1504 entry.nth_stack_base = state->StackBase(); 1505 entry.nth_stack_end = state->StackEnd(); 1506 writer.Write(&entry, sizeof(entry)); 1507 writer.Write(state->CpuState(), sizeof(debug_cpu_state)); 1508 } 1509 1510 // write strings 1511 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 1512 ThreadState* state = it.Next();) { 1513 const char* name = state->Name(); 1514 writer.Write(name, strlen(name) + 1); 1515 } 1516 } 1517 1518 status_t _WriteThreadsNote() 1519 { 1520 // determine needed size for the note's data 1521 DummyWriter dummyWriter; 1522 _WriteThreadsNote(dummyWriter); 1523 size_t dataSize = dummyWriter.BytesWritten(); 1524 1525 // write the note header 1526 _WriteNoteHeader(kHaikuNote, NT_THREADS, dataSize); 1527 1528 // write the note data 1529 _WriteThreadsNote(fFile); 1530 1531 // padding 1532 _WriteNotePadding(dataSize); 1533 1534 return fFile.Status(); 1535 } 1536 1537 status_t _WriteNoteHeader(const char* name, uint32 type, uint32 dataSize) 1538 { 1539 // prepare and write the header 1540 Elf32_Nhdr noteHeader; 1541 memset(¬eHeader, 0, sizeof(noteHeader)); 1542 size_t nameSize = strlen(name) + 1; 1543 noteHeader.n_namesz = nameSize; 1544 noteHeader.n_descsz = dataSize; 1545 noteHeader.n_type = type; 1546 fFile.Write(noteHeader); 1547 1548 // write the name 1549 fFile.Write(name, nameSize); 1550 // pad the name to 4 byte alignment 1551 _WriteNotePadding(nameSize); 1552 return fFile.Status(); 1553 } 1554 1555 status_t _WriteNotePadding(size_t sizeToPad) 1556 { 1557 if (sizeToPad % 4 != 0) { 1558 uint8 pad[3] = {}; 1559 fFile.Write(&pad, 4 - sizeToPad % 4); 1560 } 1561 return fFile.Status(); 1562 } 1563 1564 private: 1565 Thread* fCurrentThread; 1566 Team* fTeam; 1567 BufferedFile fFile; 1568 TeamInfo fTeamInfo; 1569 size_t fThreadCount; 1570 ThreadStateList fThreadStates; 1571 ThreadStateList fPreAllocatedThreadStates; 1572 Allocator fAreaInfoAllocator; 1573 AreaInfoList fAreaInfos; 1574 ImageInfoList fImageInfos; 1575 ConditionVariable fThreadBlockCondition; 1576 size_t fSegmentCount; 1577 size_t fProgramHeadersOffset; 1578 size_t fNoteSegmentOffset; 1579 size_t fNoteSegmentSize; 1580 size_t fFirstAreaSegmentOffset; 1581 size_t fAreaCount; 1582 size_t fImageCount; 1583 size_t fMappedFilesCount; 1584 }; 1585 1586 1587 } // unnamed namespace 1588 1589 1590 status_t 1591 core_dump_write_core_file(const char* path, bool killTeam) 1592 { 1593 TRACE("core_dump_write_core_file(\"%s\", %d): team: %" B_PRId32 "\n", path, 1594 killTeam, team_get_current_team_id()); 1595 1596 CoreDumper* coreDumper = new(std::nothrow) CoreDumper(); 1597 if (coreDumper == NULL) 1598 return B_NO_MEMORY; 1599 ObjectDeleter<CoreDumper> coreDumperDeleter(coreDumper); 1600 return coreDumper->Dump(path, killTeam); 1601 } 1602 1603 1604 void 1605 core_dump_trap_thread() 1606 { 1607 Thread* thread = thread_get_current_thread(); 1608 ConditionVariableEntry conditionVariableEntry; 1609 TeamLocker teamLocker(thread->team); 1610 1611 while ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP) 1612 != 0) { 1613 thread->team->CoreDumpCondition()->Add(&conditionVariableEntry); 1614 teamLocker.Unlock(); 1615 conditionVariableEntry.Wait(); 1616 teamLocker.Lock(); 1617 } 1618 } 1619