1 /* 2 * Copyright 2001-2017, Axel Dörfler, axeld@pinc-software.de. 3 * This file may be used under the terms of the MIT License. 4 */ 5 6 7 //! superblock, mounting, etc. 8 9 10 #include "Attribute.h" 11 #include "CheckVisitor.h" 12 #include "Debug.h" 13 #include "Inode.h" 14 #include "Journal.h" 15 #include "Query.h" 16 #include "Volume.h" 17 18 19 static const int32 kDesiredAllocationGroups = 56; 20 // This is the number of allocation groups that will be tried 21 // to be given for newly initialized disks. 22 // That's only relevant for smaller disks, though, since any 23 // of today's disk sizes already reach the maximum length 24 // of an allocation group (65536 blocks). 25 // It seems to create appropriate numbers for smaller disks 26 // with this setting, though (i.e. you can create a 400 MB 27 // file on a 1 GB disk without the need for double indirect 28 // blocks). 29 30 31 class DeviceOpener { 32 public: 33 DeviceOpener(int fd, int mode); 34 DeviceOpener(const char* device, int mode); 35 ~DeviceOpener(); 36 37 int Open(const char* device, int mode); 38 int Open(int fd, int mode); 39 void* InitCache(off_t numBlocks, uint32 blockSize); 40 void RemoveCache(bool allowWrites); 41 42 void Keep(); 43 44 int Device() const { return fDevice; } 45 int Mode() const { return fMode; } 46 bool IsReadOnly() const { return _IsReadOnly(fMode); } 47 48 status_t GetSize(off_t* _size, uint32* _blockSize = NULL); 49 50 private: 51 static bool _IsReadOnly(int mode) 52 { return (mode & O_RWMASK) == O_RDONLY;} 53 static bool _IsReadWrite(int mode) 54 { return (mode & O_RWMASK) == O_RDWR;} 55 56 int fDevice; 57 int fMode; 58 void* fBlockCache; 59 }; 60 61 62 DeviceOpener::DeviceOpener(const char* device, int mode) 63 : 64 fBlockCache(NULL) 65 { 66 Open(device, mode); 67 } 68 69 70 DeviceOpener::DeviceOpener(int fd, int mode) 71 : 72 fBlockCache(NULL) 73 { 74 Open(fd, mode); 75 } 76 77 78 DeviceOpener::~DeviceOpener() 79 { 80 if (fDevice >= 0) { 81 RemoveCache(false); 82 close(fDevice); 83 } 84 } 85 86 87 int 88 DeviceOpener::Open(const char* device, int mode) 89 { 90 fDevice = open(device, mode | O_NOCACHE); 91 if (fDevice < 0) 92 fDevice = errno; 93 94 if (fDevice < 0 && _IsReadWrite(mode)) { 95 // try again to open read-only (don't rely on a specific error code) 96 return Open(device, O_RDONLY | O_NOCACHE); 97 } 98 99 if (fDevice >= 0) { 100 // opening succeeded 101 fMode = mode; 102 if (_IsReadWrite(mode)) { 103 // check out if the device really allows for read/write access 104 device_geometry geometry; 105 if (!ioctl(fDevice, B_GET_GEOMETRY, &geometry)) { 106 if (geometry.read_only) { 107 // reopen device read-only 108 close(fDevice); 109 return Open(device, O_RDONLY | O_NOCACHE); 110 } 111 } 112 } 113 } 114 115 return fDevice; 116 } 117 118 119 int 120 DeviceOpener::Open(int fd, int mode) 121 { 122 fDevice = dup(fd); 123 if (fDevice < 0) 124 return errno; 125 126 fMode = mode; 127 128 return fDevice; 129 } 130 131 132 void* 133 DeviceOpener::InitCache(off_t numBlocks, uint32 blockSize) 134 { 135 return fBlockCache = block_cache_create(fDevice, numBlocks, blockSize, 136 IsReadOnly()); 137 } 138 139 140 void 141 DeviceOpener::RemoveCache(bool allowWrites) 142 { 143 if (fBlockCache == NULL) 144 return; 145 146 block_cache_delete(fBlockCache, allowWrites); 147 fBlockCache = NULL; 148 } 149 150 151 void 152 DeviceOpener::Keep() 153 { 154 fDevice = -1; 155 } 156 157 158 /*! Returns the size of the device in bytes. It uses B_GET_GEOMETRY 159 to compute the size, or fstat() if that failed. 160 */ 161 status_t 162 DeviceOpener::GetSize(off_t* _size, uint32* _blockSize) 163 { 164 device_geometry geometry; 165 if (ioctl(fDevice, B_GET_GEOMETRY, &geometry) < 0) { 166 // maybe it's just a file 167 struct stat stat; 168 if (fstat(fDevice, &stat) < 0) 169 return B_ERROR; 170 171 if (_size) 172 *_size = stat.st_size; 173 if (_blockSize) // that shouldn't cause us any problems 174 *_blockSize = 512; 175 176 return B_OK; 177 } 178 179 if (_size) { 180 *_size = 1LL * geometry.head_count * geometry.cylinder_count 181 * geometry.sectors_per_track * geometry.bytes_per_sector; 182 } 183 if (_blockSize) 184 *_blockSize = geometry.bytes_per_sector; 185 186 return B_OK; 187 } 188 189 190 // #pragma mark - 191 192 193 bool 194 disk_super_block::IsValid() const 195 { 196 if (Magic1() != (int32)SUPER_BLOCK_MAGIC1 197 || Magic2() != (int32)SUPER_BLOCK_MAGIC2 198 || Magic3() != (int32)SUPER_BLOCK_MAGIC3 199 || (int32)block_size != inode_size 200 || ByteOrder() != SUPER_BLOCK_FS_LENDIAN 201 || (1UL << BlockShift()) != BlockSize() 202 || AllocationGroups() < 1 203 || AllocationGroupShift() < 1 204 || BlocksPerAllocationGroup() < 1 205 || NumBlocks() < 10 206 || AllocationGroups() != divide_roundup(NumBlocks(), 207 1L << AllocationGroupShift())) 208 return false; 209 210 return true; 211 } 212 213 214 void 215 disk_super_block::Initialize(const char* diskName, off_t numBlocks, 216 uint32 blockSize) 217 { 218 memset(this, 0, sizeof(disk_super_block)); 219 220 magic1 = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_MAGIC1); 221 magic2 = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_MAGIC2); 222 magic3 = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_MAGIC3); 223 fs_byte_order = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_FS_LENDIAN); 224 flags = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_DISK_CLEAN); 225 226 strlcpy(name, diskName, sizeof(name)); 227 228 int32 blockShift = 9; 229 while ((1UL << blockShift) < blockSize) { 230 blockShift++; 231 } 232 233 block_size = inode_size = HOST_ENDIAN_TO_BFS_INT32(blockSize); 234 block_shift = HOST_ENDIAN_TO_BFS_INT32(blockShift); 235 236 num_blocks = HOST_ENDIAN_TO_BFS_INT64(numBlocks); 237 used_blocks = 0; 238 239 // Get the minimum ag_shift (that's determined by the block size) 240 241 int32 bitsPerBlock = blockSize << 3; 242 off_t bitmapBlocks = (numBlocks + bitsPerBlock - 1) / bitsPerBlock; 243 int32 blocksPerGroup = 1; 244 int32 groupShift = 13; 245 246 for (int32 i = 8192; i < bitsPerBlock; i *= 2) { 247 groupShift++; 248 } 249 250 // Many allocation groups help applying allocation policies, but if 251 // they are too small, we will need to many block_runs to cover large 252 // files (see above to get an explanation of the kDesiredAllocationGroups 253 // constant). 254 255 int32 numGroups; 256 257 while (true) { 258 numGroups = (bitmapBlocks + blocksPerGroup - 1) / blocksPerGroup; 259 if (numGroups > kDesiredAllocationGroups) { 260 if (groupShift == 16) 261 break; 262 263 groupShift++; 264 blocksPerGroup *= 2; 265 } else 266 break; 267 } 268 269 num_ags = HOST_ENDIAN_TO_BFS_INT32(numGroups); 270 blocks_per_ag = HOST_ENDIAN_TO_BFS_INT32(blocksPerGroup); 271 ag_shift = HOST_ENDIAN_TO_BFS_INT32(groupShift); 272 } 273 274 275 // #pragma mark - 276 277 278 Volume::Volume(fs_volume* volume) 279 : 280 fVolume(volume), 281 fBlockAllocator(this), 282 fRootNode(NULL), 283 fIndicesNode(NULL), 284 fDirtyCachedBlocks(0), 285 fFlags(0), 286 fCheckingThread(-1), 287 fCheckVisitor(NULL) 288 { 289 mutex_init(&fLock, "bfs volume"); 290 mutex_init(&fQueryLock, "bfs queries"); 291 } 292 293 294 Volume::~Volume() 295 { 296 mutex_destroy(&fQueryLock); 297 mutex_destroy(&fLock); 298 } 299 300 301 bool 302 Volume::IsValidSuperBlock() const 303 { 304 return fSuperBlock.IsValid(); 305 } 306 307 308 /*! Checks whether the given block number may be the location of an inode block. 309 */ 310 bool 311 Volume::IsValidInodeBlock(off_t block) const 312 { 313 return block > fSuperBlock.LogEnd() && block < NumBlocks(); 314 } 315 316 317 void 318 Volume::Panic() 319 { 320 FATAL(("Disk corrupted... switch to read-only mode!\n")); 321 fFlags |= VOLUME_READ_ONLY; 322 #if KDEBUG 323 kernel_debugger("BFS panics!"); 324 #endif 325 } 326 327 328 status_t 329 Volume::Mount(const char* deviceName, uint32 flags) 330 { 331 // TODO: validate the FS in write mode as well! 332 #if (B_HOST_IS_LENDIAN && defined(BFS_BIG_ENDIAN_ONLY)) \ 333 || (B_HOST_IS_BENDIAN && defined(BFS_LITTLE_ENDIAN_ONLY)) 334 // in big endian mode, we only mount read-only for now 335 flags |= B_MOUNT_READ_ONLY; 336 #endif 337 338 DeviceOpener opener(deviceName, (flags & B_MOUNT_READ_ONLY) != 0 339 ? O_RDONLY : O_RDWR); 340 fDevice = opener.Device(); 341 if (fDevice < B_OK) 342 RETURN_ERROR(fDevice); 343 344 if (opener.IsReadOnly()) 345 fFlags |= VOLUME_READ_ONLY; 346 347 // read the superblock 348 if (Identify(fDevice, &fSuperBlock) != B_OK) { 349 FATAL(("invalid superblock!\n")); 350 return B_BAD_VALUE; 351 } 352 353 // initialize short hands to the superblock (to save byte swapping) 354 fBlockSize = fSuperBlock.BlockSize(); 355 fBlockShift = fSuperBlock.BlockShift(); 356 fAllocationGroupShift = fSuperBlock.AllocationGroupShift(); 357 358 // check if the device size is large enough to hold the file system 359 off_t diskSize; 360 if (opener.GetSize(&diskSize, &fDeviceBlockSize) != B_OK) 361 RETURN_ERROR(B_ERROR); 362 if (diskSize < (NumBlocks() << BlockShift())) { 363 FATAL(("Disk size (%" B_PRIdOFF " bytes) < file system size (%" 364 B_PRIdOFF " bytes)!\n", diskSize, NumBlocks() << BlockShift())); 365 RETURN_ERROR(B_BAD_VALUE); 366 } 367 368 // set the current log pointers, so that journaling will work correctly 369 fLogStart = fSuperBlock.LogStart(); 370 fLogEnd = fSuperBlock.LogEnd(); 371 372 if ((fBlockCache = opener.InitCache(NumBlocks(), fBlockSize)) == NULL) 373 return B_ERROR; 374 375 fJournal = new(std::nothrow) Journal(this); 376 if (fJournal == NULL) 377 return B_NO_MEMORY; 378 379 status_t status = fJournal->InitCheck(); 380 if (status < B_OK) { 381 FATAL(("could not initialize journal: %s!\n", strerror(status))); 382 return status; 383 } 384 385 // replaying the log is the first thing we will do on this disk 386 status = fJournal->ReplayLog(); 387 if (status != B_OK) { 388 FATAL(("Replaying log failed, data may be corrupted, volume " 389 "read-only.\n")); 390 fFlags |= VOLUME_READ_ONLY; 391 // TODO: if this is the boot volume, Bootscript will assume this 392 // is a CD... 393 // TODO: it would be nice to have a user visible alert instead 394 // of letting him just find this in the syslog. 395 } 396 397 status = fBlockAllocator.Initialize(); 398 if (status != B_OK) { 399 FATAL(("could not initialize block bitmap allocator!\n")); 400 return status; 401 } 402 403 fRootNode = new(std::nothrow) Inode(this, ToVnode(Root())); 404 if (fRootNode != NULL && fRootNode->InitCheck() == B_OK) { 405 status = publish_vnode(fVolume, ToVnode(Root()), (void*)fRootNode, 406 &gBFSVnodeOps, fRootNode->Mode(), 0); 407 if (status == B_OK) { 408 // try to get indices root dir 409 410 if (!Indices().IsZero()) { 411 fIndicesNode = new(std::nothrow) Inode(this, 412 ToVnode(Indices())); 413 } 414 415 if (fIndicesNode == NULL 416 || fIndicesNode->InitCheck() < B_OK 417 || !fIndicesNode->IsContainer()) { 418 INFORM(("bfs: volume doesn't have indices!\n")); 419 420 if (fIndicesNode) { 421 // if this is the case, the index root node is gone bad, 422 // and BFS switch to read-only mode 423 fFlags |= VOLUME_READ_ONLY; 424 delete fIndicesNode; 425 fIndicesNode = NULL; 426 } 427 } else { 428 // we don't use the vnode layer to access the indices node 429 } 430 } else { 431 FATAL(("could not create root node: publish_vnode() failed!\n")); 432 delete fRootNode; 433 return status; 434 } 435 } else { 436 status = B_BAD_VALUE; 437 FATAL(("could not create root node!\n")); 438 return status; 439 } 440 441 // all went fine 442 opener.Keep(); 443 return B_OK; 444 } 445 446 447 status_t 448 Volume::Unmount() 449 { 450 put_vnode(fVolume, ToVnode(Root())); 451 452 fBlockAllocator.Uninitialize(); 453 454 // This will also flush the log & all blocks to disk 455 delete fJournal; 456 fJournal = NULL; 457 458 delete fIndicesNode; 459 460 block_cache_delete(fBlockCache, !IsReadOnly()); 461 close(fDevice); 462 463 return B_OK; 464 } 465 466 467 status_t 468 Volume::Sync() 469 { 470 return fJournal->FlushLogAndBlocks(); 471 } 472 473 474 status_t 475 Volume::ValidateBlockRun(block_run run) 476 { 477 if (run.AllocationGroup() < 0 478 || run.AllocationGroup() > (int32)AllocationGroups() 479 || run.Start() > (1UL << AllocationGroupShift()) 480 || run.length == 0 481 || uint32(run.Length() + run.Start()) 482 > (1UL << AllocationGroupShift())) { 483 Panic(); 484 FATAL(("*** invalid run(%d,%d,%d)\n", (int)run.AllocationGroup(), 485 run.Start(), run.Length())); 486 return B_BAD_DATA; 487 } 488 return B_OK; 489 } 490 491 492 block_run 493 Volume::ToBlockRun(off_t block) const 494 { 495 block_run run; 496 run.allocation_group = HOST_ENDIAN_TO_BFS_INT32( 497 block >> AllocationGroupShift()); 498 run.start = HOST_ENDIAN_TO_BFS_INT16( 499 block & ((1LL << AllocationGroupShift()) - 1)); 500 run.length = HOST_ENDIAN_TO_BFS_INT16(1); 501 return run; 502 } 503 504 505 status_t 506 Volume::CreateIndicesRoot(Transaction& transaction) 507 { 508 off_t id; 509 status_t status = Inode::Create(transaction, NULL, NULL, 510 S_INDEX_DIR | S_STR_INDEX | S_DIRECTORY | 0700, 0, 0, NULL, &id, 511 &fIndicesNode, NULL, BFS_DO_NOT_PUBLISH_VNODE); 512 if (status < B_OK) 513 RETURN_ERROR(status); 514 515 fSuperBlock.indices = ToBlockRun(id); 516 return WriteSuperBlock(); 517 } 518 519 520 status_t 521 Volume::CreateVolumeID(Transaction& transaction) 522 { 523 Attribute attr(fRootNode); 524 status_t status; 525 attr_cookie* cookie; 526 status = attr.Create("be:volume_id", B_UINT64_TYPE, O_RDWR, &cookie); 527 if (status == B_OK) { 528 static bool seeded = false; 529 if (!seeded) { 530 // seed the random number generator for the be:volume_id attribute. 531 srand(time(NULL)); 532 seeded = true; 533 } 534 uint64_t id; 535 size_t length = sizeof(id); 536 id = ((uint64_t)rand() << 32) | rand(); 537 attr.Write(transaction, cookie, 0, (uint8_t *)&id, &length, NULL); 538 } 539 return status; 540 } 541 542 543 544 status_t 545 Volume::AllocateForInode(Transaction& transaction, const Inode* parent, 546 mode_t type, block_run& run) 547 { 548 return fBlockAllocator.AllocateForInode(transaction, &parent->BlockRun(), 549 type, run); 550 } 551 552 553 status_t 554 Volume::WriteSuperBlock() 555 { 556 if (write_pos(fDevice, 512, &fSuperBlock, sizeof(disk_super_block)) 557 != sizeof(disk_super_block)) 558 return B_IO_ERROR; 559 560 return B_OK; 561 } 562 563 564 void 565 Volume::UpdateLiveQueries(Inode* inode, const char* attribute, int32 type, 566 const uint8* oldKey, size_t oldLength, const uint8* newKey, 567 size_t newLength) 568 { 569 MutexLocker _(fQueryLock); 570 571 SinglyLinkedList<Query>::Iterator iterator = fQueries.GetIterator(); 572 while (iterator.HasNext()) { 573 Query* query = iterator.Next(); 574 query->LiveUpdate(inode, attribute, type, oldKey, oldLength, newKey, 575 newLength); 576 } 577 } 578 579 580 void 581 Volume::UpdateLiveQueriesRenameMove(Inode* inode, ino_t oldDirectoryID, 582 const char* oldName, ino_t newDirectoryID, const char* newName) 583 { 584 MutexLocker _(fQueryLock); 585 586 size_t oldLength = strlen(oldName); 587 size_t newLength = strlen(newName); 588 589 SinglyLinkedList<Query>::Iterator iterator = fQueries.GetIterator(); 590 while (iterator.HasNext()) { 591 Query* query = iterator.Next(); 592 query->LiveUpdateRenameMove(inode, oldDirectoryID, oldName, oldLength, 593 newDirectoryID, newName, newLength); 594 } 595 } 596 597 598 /*! Checks if there is a live query whose results depend on the presence 599 or value of the specified attribute. 600 Don't use it if you already have all the data together to evaluate 601 the queries - it wouldn't safe you anything in this case. 602 */ 603 bool 604 Volume::CheckForLiveQuery(const char* attribute) 605 { 606 // TODO: check for a live query that depends on the specified attribute 607 return true; 608 } 609 610 611 void 612 Volume::AddQuery(Query* query) 613 { 614 MutexLocker _(fQueryLock); 615 fQueries.Add(query); 616 } 617 618 619 void 620 Volume::RemoveQuery(Query* query) 621 { 622 MutexLocker _(fQueryLock); 623 fQueries.Remove(query); 624 } 625 626 627 status_t 628 Volume::CreateCheckVisitor() 629 { 630 if (fCheckVisitor != NULL) 631 return B_BUSY; 632 633 fCheckVisitor = new(std::nothrow) ::CheckVisitor(this); 634 if (fCheckVisitor == NULL) 635 return B_NO_MEMORY; 636 637 return B_OK; 638 } 639 640 641 void 642 Volume::DeleteCheckVisitor() 643 { 644 delete fCheckVisitor; 645 fCheckVisitor = NULL; 646 } 647 648 649 // #pragma mark - Disk scanning and initialization 650 651 652 /*static*/ status_t 653 Volume::CheckSuperBlock(const uint8* data, uint32* _offset) 654 { 655 disk_super_block* superBlock = (disk_super_block*)(data + 512); 656 if (superBlock->IsValid()) { 657 if (_offset != NULL) 658 *_offset = 512; 659 return B_OK; 660 } 661 662 #ifndef BFS_LITTLE_ENDIAN_ONLY 663 // For PPC, the superblock might be located at offset 0 664 superBlock = (disk_super_block*)data; 665 if (superBlock->IsValid()) { 666 if (_offset != NULL) 667 *_offset = 0; 668 return B_OK; 669 } 670 #endif 671 672 return B_BAD_VALUE; 673 } 674 675 676 /*static*/ status_t 677 Volume::Identify(int fd, disk_super_block* superBlock) 678 { 679 uint8 buffer[1024]; 680 if (read_pos(fd, 0, buffer, sizeof(buffer)) != sizeof(buffer)) 681 return B_IO_ERROR; 682 683 uint32 offset; 684 if (CheckSuperBlock(buffer, &offset) != B_OK) 685 return B_BAD_VALUE; 686 687 memcpy(superBlock, buffer + offset, sizeof(disk_super_block)); 688 return B_OK; 689 } 690 691 692 status_t 693 Volume::Initialize(int fd, const char* name, uint32 blockSize, 694 uint32 flags) 695 { 696 // although there is no really good reason for it, we won't 697 // accept '/' in disk names (mkbfs does this, too - and since 698 // Tracker names mounted volumes like their name) 699 if (strchr(name, '/') != NULL) 700 return B_BAD_VALUE; 701 702 if (blockSize != 1024 && blockSize != 2048 && blockSize != 4096 703 && blockSize != 8192) 704 return B_BAD_VALUE; 705 706 DeviceOpener opener(fd, O_RDWR); 707 if (opener.Device() < B_OK) 708 return B_BAD_VALUE; 709 710 if (opener.IsReadOnly()) 711 return B_READ_ONLY_DEVICE; 712 713 fDevice = opener.Device(); 714 715 uint32 deviceBlockSize; 716 off_t deviceSize; 717 if (opener.GetSize(&deviceSize, &deviceBlockSize) < B_OK) 718 return B_ERROR; 719 720 off_t numBlocks = deviceSize / blockSize; 721 722 // create valid superblock 723 724 fSuperBlock.Initialize(name, numBlocks, blockSize); 725 726 // initialize short hands to the superblock (to save byte swapping) 727 fBlockSize = fSuperBlock.BlockSize(); 728 fBlockShift = fSuperBlock.BlockShift(); 729 fAllocationGroupShift = fSuperBlock.AllocationGroupShift(); 730 731 // determine log size depending on the size of the volume 732 off_t logSize = 2048; 733 if (numBlocks <= 20480) 734 logSize = 512; 735 if (deviceSize > 1LL * 1024 * 1024 * 1024) 736 logSize = 4096; 737 738 // since the allocator has not been initialized yet, we 739 // cannot use BlockAllocator::BitmapSize() here 740 off_t bitmapBlocks = (numBlocks + blockSize * 8 - 1) / (blockSize * 8); 741 742 fSuperBlock.log_blocks = ToBlockRun(bitmapBlocks + 1); 743 fSuperBlock.log_blocks.length = HOST_ENDIAN_TO_BFS_INT16(logSize); 744 fSuperBlock.log_start = fSuperBlock.log_end = HOST_ENDIAN_TO_BFS_INT64( 745 ToBlock(Log())); 746 747 // set the current log pointers, so that journaling will work correctly 748 fLogStart = fSuperBlock.LogStart(); 749 fLogEnd = fSuperBlock.LogEnd(); 750 751 if (!IsValidSuperBlock()) 752 RETURN_ERROR(B_ERROR); 753 754 if ((fBlockCache = opener.InitCache(NumBlocks(), fBlockSize)) == NULL) 755 return B_ERROR; 756 757 fJournal = new(std::nothrow) Journal(this); 758 if (fJournal == NULL || fJournal->InitCheck() < B_OK) 759 RETURN_ERROR(B_ERROR); 760 761 // ready to write data to disk 762 763 Transaction transaction(this, 0); 764 765 if (fBlockAllocator.InitializeAndClearBitmap(transaction) < B_OK) 766 RETURN_ERROR(B_ERROR); 767 768 off_t id; 769 status_t status = Inode::Create(transaction, NULL, NULL, 770 S_DIRECTORY | 0755, 0, 0, NULL, &id, &fRootNode); 771 if (status < B_OK) 772 RETURN_ERROR(status); 773 774 fSuperBlock.root_dir = ToBlockRun(id); 775 776 if ((flags & VOLUME_NO_INDICES) == 0) { 777 // The indices root directory will be created automatically 778 // when the standard indices are created (or any other). 779 Index index(this); 780 status = index.Create(transaction, "name", B_STRING_TYPE); 781 if (status < B_OK) 782 return status; 783 784 status = index.Create(transaction, "BEOS:APP_SIG", B_STRING_TYPE); 785 if (status < B_OK) 786 return status; 787 788 status = index.Create(transaction, "last_modified", B_INT64_TYPE); 789 if (status < B_OK) 790 return status; 791 792 status = index.Create(transaction, "size", B_INT64_TYPE); 793 if (status < B_OK) 794 return status; 795 } 796 797 status = CreateVolumeID(transaction); 798 if (status < B_OK) 799 return status; 800 801 status = _EraseUnusedBootBlock(); 802 if (status < B_OK) 803 return status; 804 805 status = WriteSuperBlock(); 806 if (status < B_OK) 807 return status; 808 809 status = transaction.Done(); 810 if (status < B_OK) 811 return status; 812 813 Sync(); 814 opener.RemoveCache(true); 815 return B_OK; 816 } 817 818 819 /*! Erase the first boot block, as we don't use it and there 820 * might be leftovers from other file systems. This can cause 821 * confusion for identifying the partition if not erased. 822 */ 823 status_t 824 Volume::_EraseUnusedBootBlock() 825 { 826 const int32 blockSize = 512; 827 const char emptySector[blockSize] = { 0 }; 828 if (write_pos(fDevice, 0, emptySector, blockSize) != blockSize) 829 return B_IO_ERROR; 830 831 return B_OK; 832 } 833