1 /* 2 * Copyright 2016, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <core_dump.h> 8 9 #include <errno.h> 10 #include <string.h> 11 12 #include <algorithm> 13 #include <new> 14 15 #include <BeBuild.h> 16 #include <ByteOrder.h> 17 18 #include <AutoDeleter.h> 19 20 #include <commpage.h> 21 #include <condition_variable.h> 22 #include <elf.h> 23 #include <kimage.h> 24 #include <ksignal.h> 25 #include <team.h> 26 #include <thread.h> 27 #include <user_debugger.h> 28 #include <util/AutoLock.h> 29 #include <util/DoublyLinkedList.h> 30 #include <vm/vm.h> 31 #include <vm/VMArea.h> 32 #include <vm/VMCache.h> 33 34 #include "../cache/vnode_store.h" 35 #include "../vm/VMAddressSpaceLocking.h" 36 37 38 //#define TRACE_CORE_DUMP 39 #ifdef TRACE_CORE_DUMP 40 # define TRACE(...) dprintf(__VA_ARGS__) 41 #else 42 # define TRACE(...) do {} while (false) 43 #endif 44 45 46 namespace { 47 48 49 static const size_t kBufferSize = 1024 * 1024; 50 static const char* const kCoreNote = ELF_NOTE_CORE; 51 static const char* const kHaikuNote = ELF_NOTE_HAIKU; 52 53 54 struct Allocator { 55 Allocator() 56 : 57 fAligned(NULL), 58 fStrings(NULL), 59 fAlignedCapacity(0), 60 fStringCapacity(0), 61 fAlignedSize(0), 62 fStringSize(0) 63 { 64 } 65 66 ~Allocator() 67 { 68 free(fAligned); 69 } 70 71 bool HasMissingAllocations() const 72 { 73 return fAlignedSize > fAlignedCapacity || fStringSize > fStringCapacity; 74 } 75 76 bool Reallocate() 77 { 78 free(fAligned); 79 80 fAlignedCapacity = fAlignedSize; 81 fStringCapacity = fStringSize; 82 fAlignedSize = 0; 83 fStringSize = 0; 84 85 fAligned = (uint8*)malloc(fAlignedCapacity + fStringCapacity); 86 if (fAligned == NULL) 87 return false; 88 fStrings = (char*)(fAligned + fAlignedCapacity); 89 90 return true; 91 } 92 93 void* AllocateAligned(size_t size) 94 { 95 size_t offset = fAlignedSize; 96 fAlignedSize += (size + 7) / 8 * 8; 97 if (fAlignedSize <= fAlignedCapacity) 98 return fAligned + offset; 99 return NULL; 100 } 101 102 char* AllocateString(size_t length) 103 { 104 size_t offset = fStringSize; 105 fStringSize += length + 1; 106 if (fStringSize <= fStringCapacity) 107 return fStrings + offset; 108 return NULL; 109 } 110 111 template <typename Type> 112 Type* New() 113 { 114 void* buffer = AllocateAligned(sizeof(Type)); 115 if (buffer == NULL) 116 return NULL; 117 return new(buffer) Type; 118 } 119 120 char* DuplicateString(const char* string) 121 { 122 if (string == NULL) 123 return NULL; 124 char* newString = AllocateString(strlen(string)); 125 if (newString != NULL) 126 strcpy(newString, string); 127 return newString; 128 } 129 130 private: 131 uint8* fAligned; 132 char* fStrings; 133 size_t fAlignedCapacity; 134 size_t fStringCapacity; 135 size_t fAlignedSize; 136 size_t fStringSize; 137 }; 138 139 140 struct TeamInfo : team_info { 141 }; 142 143 144 struct ThreadState : DoublyLinkedListLinkImpl<ThreadState> { 145 ThreadState() 146 : 147 fThread(NULL), 148 fComplete(false) 149 { 150 } 151 152 ~ThreadState() 153 { 154 SetThread(NULL); 155 } 156 157 static ThreadState* Create() 158 { 159 ThreadState* state = new(std::nothrow) ThreadState; 160 if (state == NULL) 161 return NULL; 162 return state; 163 } 164 165 Thread* GetThread() const 166 { 167 return fThread; 168 } 169 170 void SetThread(Thread* thread) 171 { 172 if (fThread != NULL) 173 fThread->ReleaseReference(); 174 175 fThread = thread; 176 177 if (fThread != NULL) 178 fThread->AcquireReference(); 179 } 180 181 /*! Invoke with thread lock and scheduler lock being held. */ 182 void GetState() 183 { 184 fState = fThread->state; 185 fPriority = fThread->priority; 186 fStackBase = fThread->user_stack_base; 187 fStackEnd = fStackBase + fThread->user_stack_size; 188 strlcpy(fName, fThread->name, sizeof(fName)); 189 if (arch_get_thread_debug_cpu_state(fThread, &fCpuState) != B_OK) 190 memset(&fCpuState, 0, sizeof(fCpuState)); 191 } 192 193 bool IsComplete() const 194 { 195 return fComplete; 196 } 197 198 void SetComplete(bool complete) 199 { 200 fComplete = complete; 201 } 202 203 int32 State() const 204 { 205 return fState; 206 } 207 208 int32 Priority() const 209 { 210 return fPriority; 211 } 212 213 addr_t StackBase() const 214 { 215 return fStackBase; 216 } 217 218 addr_t StackEnd() const 219 { 220 return fStackEnd; 221 } 222 223 const char* Name() const 224 { 225 return fName; 226 } 227 228 const debug_cpu_state* CpuState() const 229 { 230 return &fCpuState; 231 } 232 233 private: 234 Thread* fThread; 235 int32 fState; 236 int32 fPriority; 237 addr_t fStackBase; 238 addr_t fStackEnd; 239 char fName[B_OS_NAME_LENGTH]; 240 debug_cpu_state fCpuState; 241 bool fComplete; 242 }; 243 244 245 typedef DoublyLinkedList<ThreadState> ThreadStateList; 246 247 248 struct ImageInfo : DoublyLinkedListLinkImpl<ImageInfo> { 249 ImageInfo(struct image* image) 250 : 251 fId(image->info.basic_info.id), 252 fType(image->info.basic_info.type), 253 fDeviceId(image->info.basic_info.device), 254 fNodeId(image->info.basic_info.node), 255 fName(strdup(image->info.basic_info.name)), 256 fInitRoutine((addr_t)image->info.basic_info.init_routine), 257 fTermRoutine((addr_t)image->info.basic_info.term_routine), 258 fText((addr_t)image->info.basic_info.text), 259 fData((addr_t)image->info.basic_info.data), 260 fTextSize(image->info.basic_info.text_size), 261 fDataSize(image->info.basic_info.data_size), 262 fTextDelta(image->info.text_delta), 263 fSymbolTable((addr_t)image->info.symbol_table), 264 fSymbolHash((addr_t)image->info.symbol_hash), 265 fStringTable((addr_t)image->info.string_table), 266 fSymbolTableData(NULL), 267 fStringTableData(NULL), 268 fSymbolCount(0), 269 fStringTableSize(0) 270 { 271 if (fName != NULL && strcmp(fName, "commpage") == 0) 272 _GetCommpageSymbols(); 273 } 274 275 ~ImageInfo() 276 { 277 free(fName); 278 _FreeSymbolData(); 279 } 280 281 static ImageInfo* Create(struct image* image) 282 { 283 ImageInfo* imageInfo = new(std::nothrow) ImageInfo(image); 284 if (imageInfo == NULL || imageInfo->fName == NULL) { 285 delete imageInfo; 286 return NULL; 287 } 288 289 return imageInfo; 290 } 291 292 image_id Id() const 293 { 294 return fId; 295 } 296 297 image_type Type() const 298 { 299 return fType; 300 } 301 302 const char* Name() const 303 { 304 return fName; 305 } 306 307 dev_t DeviceId() const 308 { 309 return fDeviceId; 310 } 311 312 ino_t NodeId() const 313 { 314 return fNodeId; 315 } 316 317 addr_t InitRoutine() const 318 { 319 return fInitRoutine; 320 } 321 322 addr_t TermRoutine() const 323 { 324 return fTermRoutine; 325 } 326 327 addr_t TextBase() const 328 { 329 return fText; 330 } 331 332 size_t TextSize() const 333 { 334 return fTextSize; 335 } 336 337 ssize_t TextDelta() const 338 { 339 return fTextDelta; 340 } 341 342 addr_t DataBase() const 343 { 344 return fData; 345 } 346 347 size_t DataSize() const 348 { 349 return fDataSize; 350 } 351 352 addr_t SymbolTable() const 353 { 354 return fSymbolTable; 355 } 356 357 addr_t SymbolHash() const 358 { 359 return fSymbolHash; 360 } 361 362 addr_t StringTable() const 363 { 364 return fStringTable; 365 } 366 367 elf_sym* SymbolTableData() const 368 { 369 return fSymbolTableData; 370 } 371 372 char* StringTableData() const 373 { 374 return fStringTableData; 375 } 376 377 uint32 SymbolCount() const 378 { 379 return fSymbolCount; 380 } 381 382 size_t StringTableSize() const 383 { 384 return fStringTableSize; 385 } 386 387 private: 388 void _GetCommpageSymbols() 389 { 390 image_id commpageId = get_commpage_image(); 391 392 // get the size of the tables 393 int32 symbolCount = 0; 394 size_t stringTableSize = 0; 395 status_t error = elf_read_kernel_image_symbols(commpageId, NULL, 396 &symbolCount, NULL, &stringTableSize, 397 NULL, true); 398 if (error != B_OK) 399 return; 400 if (symbolCount == 0 || stringTableSize == 0) 401 return; 402 403 // allocate the tables 404 fSymbolTableData = (elf_sym*)malloc(sizeof(elf_sym) * symbolCount); 405 fStringTableData = (char*)malloc(stringTableSize); 406 if (fSymbolTableData == NULL || fStringTableData == NULL) { 407 _FreeSymbolData(); 408 return; 409 } 410 411 fSymbolCount = symbolCount; 412 fStringTableSize = stringTableSize; 413 414 // get the data 415 error = elf_read_kernel_image_symbols(commpageId, 416 fSymbolTableData, &symbolCount, fStringTableData, &stringTableSize, 417 NULL, true); 418 if (error != B_OK) 419 _FreeSymbolData(); 420 } 421 422 void _FreeSymbolData() 423 { 424 free(fSymbolTableData); 425 free(fStringTableData); 426 427 fSymbolTableData = NULL; 428 fStringTableData = NULL; 429 fSymbolCount = 0; 430 fStringTableSize = 0; 431 } 432 433 private: 434 image_id fId; 435 image_type fType; 436 dev_t fDeviceId; 437 ino_t fNodeId; 438 char* fName; 439 addr_t fInitRoutine; 440 addr_t fTermRoutine; 441 addr_t fText; 442 addr_t fData; 443 size_t fTextSize; 444 size_t fDataSize; 445 ssize_t fTextDelta; 446 addr_t fSymbolTable; 447 addr_t fSymbolHash; 448 addr_t fStringTable; 449 // for commpage image 450 elf_sym* fSymbolTableData; 451 char* fStringTableData; 452 uint32 fSymbolCount; 453 size_t fStringTableSize; 454 }; 455 456 457 typedef DoublyLinkedList<ImageInfo> ImageInfoList; 458 459 460 struct AreaInfo : DoublyLinkedListLinkImpl<AreaInfo> { 461 static AreaInfo* Create(Allocator& allocator, VMArea* area, size_t ramSize, 462 dev_t deviceId, ino_t nodeId) 463 { 464 AreaInfo* areaInfo = allocator.New<AreaInfo>(); 465 const char* name = allocator.DuplicateString(area->name); 466 467 if (areaInfo != NULL) { 468 areaInfo->fId = area->id; 469 areaInfo->fName = name; 470 areaInfo->fBase = area->Base(); 471 areaInfo->fSize = area->Size(); 472 areaInfo->fLock = B_FULL_LOCK; 473 areaInfo->fProtection = area->protection; 474 areaInfo->fRamSize = ramSize; 475 areaInfo->fDeviceId = deviceId; 476 areaInfo->fNodeId = nodeId; 477 areaInfo->fCacheOffset = area->cache_offset; 478 areaInfo->fImageInfo = NULL; 479 } 480 481 return areaInfo; 482 } 483 484 area_id Id() const 485 { 486 return fId; 487 } 488 489 const char* Name() const 490 { 491 return fName; 492 } 493 494 addr_t Base() const 495 { 496 return fBase; 497 } 498 499 size_t Size() const 500 { 501 return fSize; 502 } 503 504 uint32 Lock() const 505 { 506 return fLock; 507 } 508 509 uint32 Protection() const 510 { 511 return fProtection; 512 } 513 514 size_t RamSize() const 515 { 516 return fRamSize; 517 } 518 519 off_t CacheOffset() const 520 { 521 return fCacheOffset; 522 } 523 524 dev_t DeviceId() const 525 { 526 return fDeviceId; 527 } 528 529 ino_t NodeId() const 530 { 531 return fNodeId; 532 } 533 534 ImageInfo* GetImageInfo() const 535 { 536 return fImageInfo; 537 } 538 539 void SetImageInfo(ImageInfo* imageInfo) 540 { 541 fImageInfo = imageInfo; 542 } 543 544 private: 545 area_id fId; 546 const char* fName; 547 addr_t fBase; 548 size_t fSize; 549 uint32 fLock; 550 uint32 fProtection; 551 size_t fRamSize; 552 dev_t fDeviceId; 553 ino_t fNodeId; 554 off_t fCacheOffset; 555 ImageInfo* fImageInfo; 556 }; 557 558 559 typedef DoublyLinkedList<AreaInfo> AreaInfoList; 560 561 562 struct BufferedFile { 563 BufferedFile() 564 : 565 fFd(-1), 566 fBuffer(NULL), 567 fCapacity(0), 568 fOffset(0), 569 fBuffered(0), 570 fStatus(B_NO_INIT) 571 { 572 } 573 574 ~BufferedFile() 575 { 576 if (fFd >= 0) 577 close(fFd); 578 579 free(fBuffer); 580 } 581 582 status_t Init(const char* path) 583 { 584 fCapacity = kBufferSize; 585 fBuffer = (uint8*)malloc(fCapacity); 586 if (fBuffer == NULL) 587 return B_NO_MEMORY; 588 589 fFd = open(path, O_WRONLY | O_CREAT | O_EXCL, S_IRUSR); 590 if (fFd < 0) 591 return errno; 592 593 fStatus = B_OK; 594 return B_OK; 595 } 596 597 status_t Status() const 598 { 599 return fStatus; 600 } 601 602 off_t EndOffset() const 603 { 604 return fOffset + (off_t)fBuffered; 605 } 606 607 status_t Flush() 608 { 609 if (fStatus != B_OK) 610 return fStatus; 611 612 if (fBuffered == 0) 613 return B_OK; 614 615 ssize_t written = pwrite(fFd, fBuffer, fBuffered, fOffset); 616 if (written < 0) 617 return fStatus = errno; 618 if ((size_t)written != fBuffered) 619 return fStatus = B_IO_ERROR; 620 621 fOffset += (off_t)fBuffered; 622 fBuffered = 0; 623 return B_OK; 624 } 625 626 status_t Seek(off_t offset) 627 { 628 if (fStatus != B_OK) 629 return fStatus; 630 631 if (fBuffered == 0) { 632 fOffset = offset; 633 } else if (offset != fOffset + (off_t)fBuffered) { 634 status_t error = Flush(); 635 if (error != B_OK) 636 return fStatus = error; 637 fOffset = offset; 638 } 639 640 return B_OK; 641 } 642 643 status_t Write(const void* data, size_t size) 644 { 645 if (fStatus != B_OK) 646 return fStatus; 647 648 if (size == 0) 649 return B_OK; 650 651 while (size > 0) { 652 size_t toWrite = std::min(size, fCapacity - fBuffered); 653 if (toWrite == 0) { 654 status_t error = Flush(); 655 if (error != B_OK) 656 return fStatus = error; 657 continue; 658 } 659 660 memcpy(fBuffer + fBuffered, data, toWrite); 661 fBuffered += toWrite; 662 size -= toWrite; 663 } 664 665 return B_OK; 666 } 667 668 template<typename Data> 669 status_t Write(const Data& data) 670 { 671 return Write(&data, sizeof(data)); 672 } 673 674 status_t WriteAt(off_t offset, const void* data, size_t size) 675 { 676 if (Seek(offset) != B_OK) 677 return fStatus; 678 679 return Write(data, size); 680 } 681 682 status_t WriteUserArea(addr_t base, size_t size) 683 { 684 uint8* data = (uint8*)base; 685 size = size / B_PAGE_SIZE * B_PAGE_SIZE; 686 687 // copy the area page-wise into the buffer, flushing when necessary 688 while (size > 0) { 689 if (fBuffered + B_PAGE_SIZE > fCapacity) { 690 status_t error = Flush(); 691 if (error != B_OK) 692 return error; 693 } 694 695 if (user_memcpy(fBuffer + fBuffered, data, B_PAGE_SIZE) != B_OK) 696 memset(fBuffer + fBuffered, 0, B_PAGE_SIZE); 697 698 fBuffered += B_PAGE_SIZE; 699 data += B_PAGE_SIZE; 700 size -= B_PAGE_SIZE; 701 } 702 703 return B_OK; 704 } 705 706 private: 707 int fFd; 708 uint8* fBuffer; 709 size_t fCapacity; 710 off_t fOffset; 711 size_t fBuffered; 712 status_t fStatus; 713 }; 714 715 716 struct DummyWriter { 717 DummyWriter() 718 : 719 fWritten(0) 720 { 721 } 722 723 status_t Status() const 724 { 725 return B_OK; 726 } 727 728 size_t BytesWritten() const 729 { 730 return fWritten; 731 } 732 733 status_t Write(const void* data, size_t size) 734 { 735 fWritten += size; 736 return B_OK; 737 } 738 739 template<typename Data> 740 status_t Write(const Data& data) 741 { 742 return Write(&data, sizeof(data)); 743 } 744 745 private: 746 size_t fWritten; 747 }; 748 749 750 struct CoreDumper { 751 CoreDumper() 752 : 753 fCurrentThread(thread_get_current_thread()), 754 fTeam(fCurrentThread->team), 755 fFile(), 756 fThreadCount(0), 757 fThreadStates(), 758 fPreAllocatedThreadStates(), 759 fAreaInfoAllocator(), 760 fAreaInfos(), 761 fImageInfos(), 762 fThreadBlockCondition() 763 { 764 fThreadBlockCondition.Init(this, "core dump"); 765 } 766 767 ~CoreDumper() 768 { 769 while (ThreadState* state = fThreadStates.RemoveHead()) 770 delete state; 771 while (ThreadState* state = fPreAllocatedThreadStates.RemoveHead()) 772 delete state; 773 while (ImageInfo* info = fImageInfos.RemoveHead()) 774 delete info; 775 } 776 777 status_t Dump(const char* path, bool killTeam) 778 { 779 // the path must be absolute 780 if (path[0] != '/') 781 return B_BAD_VALUE; 782 783 AutoLocker<Team> teamLocker(fTeam); 784 785 // indicate that we're dumping core 786 if ((atomic_or(&fTeam->flags, TEAM_FLAG_DUMP_CORE) 787 & TEAM_FLAG_DUMP_CORE) != 0) { 788 return B_BUSY; 789 } 790 791 fTeam->SetCoreDumpCondition(&fThreadBlockCondition); 792 793 int32 threadCount = _SetThreadsCoreDumpFlag(true); 794 795 teamLocker.Unlock(); 796 797 // write the core file 798 status_t error = _Dump(path, threadCount); 799 800 // send kill signal, if requested 801 if (killTeam) 802 kill_team(fTeam->id); 803 804 // clean up the team state and wake up waiting threads 805 teamLocker.Lock(); 806 807 fTeam->SetCoreDumpCondition(NULL); 808 809 atomic_and(&fTeam->flags, ~(int32)TEAM_FLAG_DUMP_CORE); 810 811 _SetThreadsCoreDumpFlag(false); 812 813 fThreadBlockCondition.NotifyAll(); 814 815 return error; 816 } 817 818 private: 819 status_t _Dump(const char* path, int32 threadCount) 820 { 821 status_t error = _GetTeamInfo(); 822 if (error != B_OK) 823 return error; 824 825 // pre-allocate a list of thread states 826 if (!_AllocateThreadStates(threadCount)) 827 return B_NO_MEMORY; 828 829 // collect the threads states 830 _GetThreadStates(); 831 832 // collect the other team information 833 if (!_GetAreaInfos() || !_GetImageInfos()) 834 return B_NO_MEMORY; 835 836 // open the file 837 error = fFile.Init(path); 838 if (error != B_OK) 839 return error; 840 841 _PrepareCoreFileInfo(); 842 843 // write ELF header 844 error = _WriteElfHeader(); 845 if (error != B_OK) 846 return error; 847 848 // write note segment 849 error = _WriteNotes(); 850 if (error != B_OK) 851 return error; 852 853 size_t notesEndOffset = (size_t)fFile.EndOffset(); 854 fNoteSegmentSize = notesEndOffset - fNoteSegmentOffset; 855 fFirstAreaSegmentOffset = (notesEndOffset + B_PAGE_SIZE - 1) 856 / B_PAGE_SIZE * B_PAGE_SIZE; 857 858 error = _WriteProgramHeaders(); 859 if (error != B_OK) 860 return error; 861 862 // write area segments 863 error = _WriteAreaSegments(); 864 if (error != B_OK) 865 return error; 866 867 return _WriteElfHeader(); 868 } 869 870 int32 _SetThreadsCoreDumpFlag(bool setFlag) 871 { 872 int32 count = 0; 873 874 for (Thread* thread = fTeam->thread_list; thread != NULL; 875 thread = thread->team_next) { 876 count++; 877 if (setFlag) { 878 atomic_or(&thread->flags, THREAD_FLAGS_TRAP_FOR_CORE_DUMP); 879 } else { 880 atomic_and(&thread->flags, 881 ~(int32)THREAD_FLAGS_TRAP_FOR_CORE_DUMP); 882 } 883 } 884 885 return count; 886 } 887 888 status_t _GetTeamInfo() 889 { 890 return get_team_info(fTeam->id, &fTeamInfo); 891 } 892 893 bool _AllocateThreadStates(int32 count) 894 { 895 if (!_PreAllocateThreadStates(count)) 896 return false; 897 898 TeamLocker teamLocker(fTeam); 899 900 for (;;) { 901 fThreadCount = 0; 902 int32 missing = 0; 903 904 for (Thread* thread = fTeam->thread_list; thread != NULL; 905 thread = thread->team_next) { 906 fThreadCount++; 907 ThreadState* state = fPreAllocatedThreadStates.RemoveHead(); 908 if (state != NULL) { 909 state->SetThread(thread); 910 fThreadStates.Insert(state); 911 } else 912 missing++; 913 } 914 915 if (missing == 0) 916 break; 917 918 teamLocker.Unlock(); 919 920 fPreAllocatedThreadStates.MoveFrom(&fThreadStates); 921 if (!_PreAllocateThreadStates(missing)) 922 return false; 923 924 teamLocker.Lock(); 925 } 926 927 return true; 928 } 929 930 bool _PreAllocateThreadStates(int32 count) 931 { 932 for (int32 i = 0; i < count; i++) { 933 ThreadState* state = ThreadState::Create(); 934 if (state == NULL) 935 return false; 936 fPreAllocatedThreadStates.Insert(state); 937 } 938 939 return true; 940 } 941 942 void _GetThreadStates() 943 { 944 for (;;) { 945 bool missing = false; 946 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 947 ThreadState* state = it.Next();) { 948 if (state->IsComplete()) 949 continue; 950 951 Thread* thread = state->GetThread(); 952 AutoLocker<Thread> threadLocker(thread); 953 if (thread->team != fTeam) { 954 // no longer in our team -- i.e. dying and transferred to 955 // the kernel team 956 threadLocker.Unlock(); 957 it.Remove(); 958 delete state; 959 fThreadCount--; 960 continue; 961 } 962 963 InterruptsSpinLocker schedulerLocker(&thread->scheduler_lock); 964 if (thread != fCurrentThread 965 && thread->state == B_THREAD_RUNNING) { 966 missing = true; 967 continue; 968 } 969 970 state->GetState(); 971 state->SetComplete(true); 972 } 973 974 if (!missing) 975 break; 976 977 // We still haven't got a state for all threads. Wait a moment and 978 // try again. 979 snooze(10000); 980 } 981 } 982 983 bool _GetAreaInfos() 984 { 985 for (;;) { 986 AddressSpaceReadLocker addressSpaceLocker(fTeam->address_space, 987 true); 988 989 for (VMAddressSpace::AreaIterator it 990 = addressSpaceLocker.AddressSpace()->GetAreaIterator(); 991 VMArea* area = it.Next();) { 992 993 VMCache* cache = vm_area_get_locked_cache(area); 994 size_t ramSize = (size_t)cache->page_count * B_PAGE_SIZE; 995 // simplified, but what the kernel uses as well ATM 996 997 // iterate to the root cache and, if it is a mapped file, get 998 // the file's node_ref 999 while (VMCache* source = cache->source) { 1000 source->Lock(); 1001 source->AcquireRefLocked(); 1002 cache->ReleaseRefAndUnlock(); 1003 cache = source; 1004 } 1005 1006 dev_t deviceId = -1; 1007 ino_t nodeId = -1; 1008 if (cache->type == CACHE_TYPE_VNODE) { 1009 VMVnodeCache* vnodeCache = (VMVnodeCache*)cache; 1010 deviceId = vnodeCache->DeviceId(); 1011 nodeId = vnodeCache->InodeId(); 1012 } 1013 1014 cache->ReleaseRefAndUnlock(); 1015 1016 AreaInfo* areaInfo = AreaInfo::Create(fAreaInfoAllocator, area, 1017 ramSize, deviceId, nodeId); 1018 1019 if (areaInfo != NULL) 1020 fAreaInfos.Insert(areaInfo); 1021 } 1022 1023 addressSpaceLocker.Unlock(); 1024 1025 if (!fAreaInfoAllocator.HasMissingAllocations()) 1026 return true; 1027 1028 if (!fAreaInfoAllocator.Reallocate()) 1029 return false; 1030 } 1031 } 1032 1033 bool _GetImageInfos() 1034 { 1035 return image_iterate_through_team_images(fTeam->id, 1036 &_GetImageInfoCallback, this) == NULL; 1037 } 1038 1039 static bool _GetImageInfoCallback(struct image* image, void* cookie) 1040 { 1041 return ((CoreDumper*)cookie)->_GetImageInfo(image); 1042 } 1043 1044 bool _GetImageInfo(struct image* image) 1045 { 1046 ImageInfo* info = ImageInfo::Create(image); 1047 if (info == NULL) 1048 return true; 1049 1050 fImageInfos.Insert(info); 1051 return false; 1052 } 1053 1054 void _PrepareCoreFileInfo() 1055 { 1056 // assign image infos to area infos where possible 1057 fAreaCount = 0; 1058 fMappedFilesCount = 0; 1059 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1060 AreaInfo* areaInfo = it.Next();) { 1061 fAreaCount++; 1062 dev_t deviceId = areaInfo->DeviceId(); 1063 if (deviceId < 0) 1064 continue; 1065 ImageInfo* imageInfo = _FindImageInfo(deviceId, areaInfo->NodeId()); 1066 if (imageInfo != NULL) { 1067 areaInfo->SetImageInfo(imageInfo); 1068 fMappedFilesCount++; 1069 } 1070 } 1071 1072 fImageCount = fImageInfos.Count(); 1073 fSegmentCount = 1 + fAreaCount; 1074 fProgramHeadersOffset = sizeof(elf_ehdr); 1075 fNoteSegmentOffset = fProgramHeadersOffset 1076 + sizeof(elf_phdr) * fSegmentCount; 1077 } 1078 1079 ImageInfo* _FindImageInfo(dev_t deviceId, ino_t nodeId) const 1080 { 1081 for (ImageInfoList::ConstIterator it = fImageInfos.GetIterator(); 1082 ImageInfo* info = it.Next();) { 1083 if (info->DeviceId() == deviceId && info->NodeId() == nodeId) 1084 return info; 1085 } 1086 1087 return NULL; 1088 } 1089 1090 status_t _WriteElfHeader() 1091 { 1092 elf_ehdr header; 1093 memset(&header, 0, sizeof(header)); 1094 1095 // e_ident 1096 header.e_ident[EI_MAG0] = ELFMAG[0]; 1097 header.e_ident[EI_MAG1] = ELFMAG[1]; 1098 header.e_ident[EI_MAG2] = ELFMAG[2]; 1099 header.e_ident[EI_MAG3] = ELFMAG[3]; 1100 #ifdef B_HAIKU_64_BIT 1101 header.e_ident[EI_CLASS] = ELFCLASS64; 1102 #else 1103 header.e_ident[EI_CLASS] = ELFCLASS32; 1104 #endif 1105 #if B_HOST_IS_LENDIAN 1106 header.e_ident[EI_DATA] = ELFDATA2LSB; 1107 #else 1108 header.e_ident[EI_DATA] = ELFDATA2MSB; 1109 #endif 1110 header.e_ident[EI_VERSION] = EV_CURRENT; 1111 1112 // e_type 1113 header.e_type = ET_CORE; 1114 1115 // e_machine 1116 #if defined(__HAIKU_ARCH_X86) 1117 header.e_machine = EM_386; 1118 #elif defined(__HAIKU_ARCH_X86_64) 1119 header.e_machine = EM_X86_64; 1120 #elif defined(__HAIKU_ARCH_PPC) 1121 header.e_machine = EM_PPC64; 1122 #elif defined(__HAIKU_ARCH_M68K) 1123 header.e_machine = EM_68K; 1124 #elif defined(__HAIKU_ARCH_MIPSEL) 1125 header.e_machine = EM_MIPS; 1126 #elif defined(__HAIKU_ARCH_ARM) 1127 header.e_machine = EM_ARM; 1128 #else 1129 # error Unsupported architecture! 1130 #endif 1131 1132 header.e_version = EV_CURRENT; 1133 header.e_entry = 0; 1134 header.e_phoff = sizeof(header); 1135 header.e_shoff = 0; 1136 header.e_flags = 0; 1137 header.e_ehsize = sizeof(header); 1138 header.e_phentsize = sizeof(elf_phdr); 1139 header.e_phnum = fSegmentCount; 1140 header.e_shentsize = sizeof(elf_shdr); 1141 header.e_shnum = 0; 1142 header.e_shstrndx = SHN_UNDEF; 1143 1144 return fFile.WriteAt(0, &header, sizeof(header)); 1145 } 1146 1147 status_t _WriteProgramHeaders() 1148 { 1149 fFile.Seek(fProgramHeadersOffset); 1150 1151 // write the header for the notes segment 1152 elf_phdr header; 1153 memset(&header, 0, sizeof(header)); 1154 header.p_type = PT_NOTE; 1155 header.p_flags = 0; 1156 header.p_offset = fNoteSegmentOffset; 1157 header.p_vaddr = 0; 1158 header.p_paddr = 0; 1159 header.p_filesz = fNoteSegmentSize; 1160 header.p_memsz = 0; 1161 header.p_align = 0; 1162 fFile.Write(header); 1163 1164 // write the headers for the area segments 1165 size_t segmentOffset = fFirstAreaSegmentOffset; 1166 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1167 AreaInfo* areaInfo = it.Next();) { 1168 memset(&header, 0, sizeof(header)); 1169 header.p_type = PT_LOAD; 1170 header.p_flags = 0; 1171 uint32 protection = areaInfo->Protection(); 1172 if ((protection & B_READ_AREA) != 0) 1173 header.p_flags |= PF_READ; 1174 if ((protection & B_WRITE_AREA) != 0) 1175 header.p_flags |= PF_WRITE; 1176 if ((protection & B_EXECUTE_AREA) != 0) 1177 header.p_flags |= PF_EXECUTE; 1178 header.p_offset = segmentOffset; 1179 header.p_vaddr = areaInfo->Base(); 1180 header.p_paddr = 0; 1181 header.p_filesz = areaInfo->Size(); 1182 header.p_memsz = areaInfo->Size(); 1183 header.p_align = 0; 1184 fFile.Write(header); 1185 1186 segmentOffset += areaInfo->Size(); 1187 } 1188 1189 return fFile.Status(); 1190 } 1191 1192 status_t _WriteAreaSegments() 1193 { 1194 fFile.Seek(fFirstAreaSegmentOffset); 1195 1196 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1197 AreaInfo* areaInfo = it.Next();) { 1198 status_t error = fFile.WriteUserArea(areaInfo->Base(), 1199 areaInfo->Size()); 1200 if (error != B_OK) 1201 return error; 1202 } 1203 1204 return fFile.Status(); 1205 } 1206 1207 status_t _WriteNotes() 1208 { 1209 status_t error = fFile.Seek((off_t)fNoteSegmentOffset); 1210 if (error != B_OK) 1211 return error; 1212 1213 error = _WriteFilesNote(); 1214 if (error != B_OK) 1215 return error; 1216 1217 error = _WriteTeamNote(); 1218 if (error != B_OK) 1219 return error; 1220 1221 error = _WriteAreasNote(); 1222 if (error != B_OK) 1223 return error; 1224 1225 error = _WriteImagesNote(); 1226 if (error != B_OK) 1227 return error; 1228 1229 error = _WriteImageSymbolsNotes(); 1230 if (error != B_OK) 1231 return error; 1232 1233 error = _WriteThreadsNote(); 1234 if (error != B_OK) 1235 return error; 1236 1237 return B_OK; 1238 } 1239 1240 template<typename Writer> 1241 void _WriteTeamNote(Writer& writer) 1242 { 1243 elf_note_team note; 1244 memset(¬e, 0, sizeof(note)); 1245 note.nt_id = fTeamInfo.team; 1246 note.nt_uid = fTeamInfo.uid; 1247 note.nt_gid = fTeamInfo.gid; 1248 writer.Write((uint32)sizeof(note)); 1249 writer.Write(note); 1250 1251 // write args 1252 const char* args = fTeamInfo.args; 1253 writer.Write(args, strlen(args) + 1); 1254 } 1255 1256 status_t _WriteTeamNote() 1257 { 1258 // determine needed size for the note's data 1259 DummyWriter dummyWriter; 1260 _WriteTeamNote(dummyWriter); 1261 size_t dataSize = dummyWriter.BytesWritten(); 1262 1263 // write the note header 1264 _WriteNoteHeader(kHaikuNote, NT_TEAM, dataSize); 1265 1266 // write the note data 1267 _WriteTeamNote(fFile); 1268 1269 // padding 1270 _WriteNotePadding(dataSize); 1271 1272 return fFile.Status(); 1273 } 1274 1275 template<typename Writer> 1276 void _WriteFilesNote(Writer& writer) 1277 { 1278 // file count and table size 1279 writer.Write(fMappedFilesCount); 1280 writer.Write((size_t)B_PAGE_SIZE); 1281 1282 // write table 1283 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1284 AreaInfo* areaInfo = it.Next();) { 1285 if (areaInfo->GetImageInfo() == NULL) 1286 continue; 1287 1288 // start address, end address, and file offset in pages 1289 writer.Write(areaInfo->Base()); 1290 writer.Write(areaInfo->Base() + areaInfo->Size()); 1291 writer.Write(size_t(areaInfo->CacheOffset() / B_PAGE_SIZE)); 1292 } 1293 1294 // write strings 1295 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1296 AreaInfo* areaInfo = it.Next();) { 1297 ImageInfo* imageInfo = areaInfo->GetImageInfo(); 1298 if (imageInfo == NULL) 1299 continue; 1300 1301 const char* name = imageInfo->Name(); 1302 writer.Write(name, strlen(name) + 1); 1303 } 1304 } 1305 1306 status_t _WriteFilesNote() 1307 { 1308 // determine needed size for the note's data 1309 DummyWriter dummyWriter; 1310 _WriteFilesNote(dummyWriter); 1311 size_t dataSize = dummyWriter.BytesWritten(); 1312 1313 // write the note header 1314 _WriteNoteHeader(kCoreNote, NT_FILE, dataSize); 1315 1316 // write the note data 1317 _WriteFilesNote(fFile); 1318 1319 // padding 1320 _WriteNotePadding(dataSize); 1321 1322 return fFile.Status(); 1323 } 1324 1325 template<typename Writer> 1326 void _WriteAreasNote(Writer& writer) 1327 { 1328 // area count 1329 writer.Write((uint32)fAreaCount); 1330 writer.Write((uint32)sizeof(elf_note_area_entry)); 1331 1332 // write table 1333 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1334 AreaInfo* areaInfo = it.Next();) { 1335 elf_note_area_entry entry; 1336 memset(&entry, 0, sizeof(entry)); 1337 entry.na_id = areaInfo->Id(); 1338 entry.na_lock = areaInfo->Lock(); 1339 entry.na_protection = areaInfo->Protection(); 1340 entry.na_base = areaInfo->Base(); 1341 entry.na_size = areaInfo->Size(); 1342 entry.na_ram_size = areaInfo->RamSize(); 1343 writer.Write(entry); 1344 } 1345 1346 // write strings 1347 for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); 1348 AreaInfo* areaInfo = it.Next();) { 1349 const char* name = areaInfo->Name(); 1350 writer.Write(name, strlen(name) + 1); 1351 } 1352 } 1353 1354 status_t _WriteAreasNote() 1355 { 1356 // determine needed size for the note's data 1357 DummyWriter dummyWriter; 1358 _WriteAreasNote(dummyWriter); 1359 size_t dataSize = dummyWriter.BytesWritten(); 1360 1361 // write the note header 1362 _WriteNoteHeader(kHaikuNote, NT_AREAS, dataSize); 1363 1364 // write the note data 1365 _WriteAreasNote(fFile); 1366 1367 // padding 1368 _WriteNotePadding(dataSize); 1369 1370 return fFile.Status(); 1371 } 1372 1373 template<typename Writer> 1374 void _WriteImagesNote(Writer& writer) 1375 { 1376 // image count 1377 writer.Write((uint32)fImageCount); 1378 writer.Write((uint32)sizeof(elf_note_image_entry)); 1379 1380 // write table 1381 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1382 ImageInfo* imageInfo = it.Next();) { 1383 elf_note_image_entry entry; 1384 memset(&entry, 0, sizeof(entry)); 1385 entry.ni_id = imageInfo->Id(); 1386 entry.ni_type = imageInfo->Type(); 1387 entry.ni_init_routine = imageInfo->InitRoutine(); 1388 entry.ni_term_routine = imageInfo->TermRoutine(); 1389 entry.ni_device = imageInfo->DeviceId(); 1390 entry.ni_node = imageInfo->NodeId(); 1391 entry.ni_text_base = imageInfo->TextBase(); 1392 entry.ni_text_size = imageInfo->TextSize(); 1393 entry.ni_data_base = imageInfo->DataBase(); 1394 entry.ni_data_size = imageInfo->DataSize(); 1395 entry.ni_text_delta = imageInfo->TextDelta(); 1396 entry.ni_symbol_table = imageInfo->SymbolTable(); 1397 entry.ni_symbol_hash = imageInfo->SymbolHash(); 1398 entry.ni_string_table = imageInfo->StringTable(); 1399 writer.Write(entry); 1400 } 1401 1402 // write strings 1403 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1404 ImageInfo* imageInfo = it.Next();) { 1405 const char* name = imageInfo->Name(); 1406 writer.Write(name, strlen(name) + 1); 1407 } 1408 } 1409 1410 status_t _WriteImagesNote() 1411 { 1412 // determine needed size for the note's data 1413 DummyWriter dummyWriter; 1414 _WriteImagesNote(dummyWriter); 1415 size_t dataSize = dummyWriter.BytesWritten(); 1416 1417 // write the note header 1418 _WriteNoteHeader(kHaikuNote, NT_IMAGES, dataSize); 1419 1420 // write the note data 1421 _WriteImagesNote(fFile); 1422 1423 // padding 1424 _WriteNotePadding(dataSize); 1425 1426 return fFile.Status(); 1427 } 1428 1429 status_t _WriteImageSymbolsNotes() 1430 { 1431 // write table 1432 for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); 1433 ImageInfo* imageInfo = it.Next();) { 1434 if (imageInfo->SymbolTableData() == NULL 1435 || imageInfo->StringTableData() == NULL) { 1436 continue; 1437 } 1438 1439 status_t error = _WriteImageSymbolsNote(imageInfo); 1440 if (error != B_OK) 1441 return error; 1442 } 1443 1444 return B_OK; 1445 } 1446 1447 template<typename Writer> 1448 void _WriteImageSymbolsNote(const ImageInfo* imageInfo, Writer& writer) 1449 { 1450 uint32 symbolCount = imageInfo->SymbolCount(); 1451 uint32 symbolEntrySize = (uint32)sizeof(elf_sym); 1452 1453 writer.Write((int32)imageInfo->Id()); 1454 writer.Write(symbolCount); 1455 writer.Write(symbolEntrySize); 1456 writer.Write(imageInfo->SymbolTableData(), 1457 symbolCount * symbolEntrySize); 1458 writer.Write(imageInfo->StringTableData(), 1459 imageInfo->StringTableSize()); 1460 } 1461 1462 status_t _WriteImageSymbolsNote(const ImageInfo* imageInfo) 1463 { 1464 // determine needed size for the note's data 1465 DummyWriter dummyWriter; 1466 _WriteImageSymbolsNote(imageInfo, dummyWriter); 1467 size_t dataSize = dummyWriter.BytesWritten(); 1468 1469 // write the note header 1470 _WriteNoteHeader(kHaikuNote, NT_SYMBOLS, dataSize); 1471 1472 // write the note data 1473 _WriteImageSymbolsNote(imageInfo, fFile); 1474 1475 // padding 1476 _WriteNotePadding(dataSize); 1477 1478 return fFile.Status(); 1479 } 1480 1481 template<typename Writer> 1482 void _WriteThreadsNote(Writer& writer) 1483 { 1484 // thread count and size of CPU state 1485 writer.Write((uint32)fThreadCount); 1486 writer.Write((uint32)sizeof(elf_note_thread_entry)); 1487 writer.Write((uint32)sizeof(debug_cpu_state)); 1488 1489 // write table 1490 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 1491 ThreadState* state = it.Next();) { 1492 elf_note_thread_entry entry; 1493 memset(&entry, 0, sizeof(entry)); 1494 entry.nth_id = state->GetThread()->id; 1495 entry.nth_state = state->State(); 1496 entry.nth_priority = state->Priority(); 1497 entry.nth_stack_base = state->StackBase(); 1498 entry.nth_stack_end = state->StackEnd(); 1499 writer.Write(&entry, sizeof(entry)); 1500 writer.Write(state->CpuState(), sizeof(debug_cpu_state)); 1501 } 1502 1503 // write strings 1504 for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); 1505 ThreadState* state = it.Next();) { 1506 const char* name = state->Name(); 1507 writer.Write(name, strlen(name) + 1); 1508 } 1509 } 1510 1511 status_t _WriteThreadsNote() 1512 { 1513 // determine needed size for the note's data 1514 DummyWriter dummyWriter; 1515 _WriteThreadsNote(dummyWriter); 1516 size_t dataSize = dummyWriter.BytesWritten(); 1517 1518 // write the note header 1519 _WriteNoteHeader(kHaikuNote, NT_THREADS, dataSize); 1520 1521 // write the note data 1522 _WriteThreadsNote(fFile); 1523 1524 // padding 1525 _WriteNotePadding(dataSize); 1526 1527 return fFile.Status(); 1528 } 1529 1530 status_t _WriteNoteHeader(const char* name, uint32 type, uint32 dataSize) 1531 { 1532 // prepare and write the header 1533 Elf32_Nhdr noteHeader; 1534 memset(¬eHeader, 0, sizeof(noteHeader)); 1535 size_t nameSize = strlen(name) + 1; 1536 noteHeader.n_namesz = nameSize; 1537 noteHeader.n_descsz = dataSize; 1538 noteHeader.n_type = type; 1539 fFile.Write(noteHeader); 1540 1541 // write the name 1542 fFile.Write(name, nameSize); 1543 // pad the name to 4 byte alignment 1544 _WriteNotePadding(nameSize); 1545 return fFile.Status(); 1546 } 1547 1548 status_t _WriteNotePadding(size_t sizeToPad) 1549 { 1550 if (sizeToPad % 4 != 0) { 1551 uint8 pad[3] = {}; 1552 fFile.Write(&pad, 4 - sizeToPad % 4); 1553 } 1554 return fFile.Status(); 1555 } 1556 1557 private: 1558 Thread* fCurrentThread; 1559 Team* fTeam; 1560 BufferedFile fFile; 1561 TeamInfo fTeamInfo; 1562 size_t fThreadCount; 1563 ThreadStateList fThreadStates; 1564 ThreadStateList fPreAllocatedThreadStates; 1565 Allocator fAreaInfoAllocator; 1566 AreaInfoList fAreaInfos; 1567 ImageInfoList fImageInfos; 1568 ConditionVariable fThreadBlockCondition; 1569 size_t fSegmentCount; 1570 size_t fProgramHeadersOffset; 1571 size_t fNoteSegmentOffset; 1572 size_t fNoteSegmentSize; 1573 size_t fFirstAreaSegmentOffset; 1574 size_t fAreaCount; 1575 size_t fImageCount; 1576 size_t fMappedFilesCount; 1577 }; 1578 1579 1580 } // unnamed namespace 1581 1582 1583 status_t 1584 core_dump_write_core_file(const char* path, bool killTeam) 1585 { 1586 TRACE("core_dump_write_core_file(\"%s\", %d): team: %" B_PRId32 "\n", path, 1587 killTeam, team_get_current_team_id()); 1588 1589 CoreDumper* coreDumper = new(std::nothrow) CoreDumper(); 1590 if (coreDumper == NULL) 1591 return B_NO_MEMORY; 1592 ObjectDeleter<CoreDumper> coreDumperDeleter(coreDumper); 1593 return coreDumper->Dump(path, killTeam); 1594 } 1595 1596 1597 void 1598 core_dump_trap_thread() 1599 { 1600 Thread* thread = thread_get_current_thread(); 1601 ConditionVariableEntry conditionVariableEntry; 1602 TeamLocker teamLocker(thread->team); 1603 1604 while ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP) 1605 != 0) { 1606 thread->team->CoreDumpCondition()->Add(&conditionVariableEntry); 1607 teamLocker.Unlock(); 1608 conditionVariableEntry.Wait(); 1609 teamLocker.Lock(); 1610 } 1611 } 1612