1 /* 2 * Copyright 2001-2009, Axel Dörfler, axeld@pinc-software.de. 3 * This file may be used under the terms of the MIT License. 4 */ 5 6 //! super block, mounting, etc. 7 8 9 #include "Attribute.h" 10 #include "Debug.h" 11 #include "Inode.h" 12 #include "Journal.h" 13 #include "Query.h" 14 #include "Volume.h" 15 16 17 static const int32 kDesiredAllocationGroups = 56; 18 // This is the number of allocation groups that will be tried 19 // to be given for newly initialized disks. 20 // That's only relevant for smaller disks, though, since any 21 // of today's disk sizes already reach the maximum length 22 // of an allocation group (65536 blocks). 23 // It seems to create appropriate numbers for smaller disks 24 // with this setting, though (i.e. you can create a 400 MB 25 // file on a 1 GB disk without the need for double indirect 26 // blocks). 27 28 29 class DeviceOpener { 30 public: 31 DeviceOpener(int fd, int mode); 32 DeviceOpener(const char* device, int mode); 33 ~DeviceOpener(); 34 35 int Open(const char* device, int mode); 36 int Open(int fd, int mode); 37 void* InitCache(off_t numBlocks, uint32 blockSize); 38 void RemoveCache(bool allowWrites); 39 40 void Keep(); 41 42 int Device() const { return fDevice; } 43 int Mode() const { return fMode; } 44 bool IsReadOnly() const { return _IsReadOnly(fMode); } 45 46 status_t GetSize(off_t* _size, uint32* _blockSize = NULL); 47 48 private: 49 static bool _IsReadOnly(int mode) 50 { return (mode & O_RWMASK) == O_RDONLY;} 51 static bool _IsReadWrite(int mode) 52 { return (mode & O_RWMASK) == O_RDWR;} 53 54 int fDevice; 55 int fMode; 56 void* fBlockCache; 57 }; 58 59 60 DeviceOpener::DeviceOpener(const char* device, int mode) 61 : 62 fBlockCache(NULL) 63 { 64 Open(device, mode); 65 } 66 67 68 DeviceOpener::DeviceOpener(int fd, int mode) 69 : 70 fBlockCache(NULL) 71 { 72 Open(fd, mode); 73 } 74 75 76 DeviceOpener::~DeviceOpener() 77 { 78 if (fDevice >= 0) { 79 RemoveCache(false); 80 close(fDevice); 81 } 82 } 83 84 85 int 86 DeviceOpener::Open(const char* device, int mode) 87 { 88 fDevice = open(device, mode | O_NOCACHE); 89 if (fDevice < 0) 90 fDevice = errno; 91 92 if (fDevice < 0 && _IsReadWrite(mode)) { 93 // try again to open read-only (don't rely on a specific error code) 94 return Open(device, O_RDONLY | O_NOCACHE); 95 } 96 97 if (fDevice >= 0) { 98 // opening succeeded 99 fMode = mode; 100 if (_IsReadWrite(mode)) { 101 // check out if the device really allows for read/write access 102 device_geometry geometry; 103 if (!ioctl(fDevice, B_GET_GEOMETRY, &geometry)) { 104 if (geometry.read_only) { 105 // reopen device read-only 106 close(fDevice); 107 return Open(device, O_RDONLY | O_NOCACHE); 108 } 109 } 110 } 111 } 112 113 return fDevice; 114 } 115 116 117 int 118 DeviceOpener::Open(int fd, int mode) 119 { 120 fDevice = dup(fd); 121 if (fDevice < 0) 122 return errno; 123 124 fMode = mode; 125 126 return fDevice; 127 } 128 129 130 void* 131 DeviceOpener::InitCache(off_t numBlocks, uint32 blockSize) 132 { 133 return fBlockCache = block_cache_create(fDevice, numBlocks, blockSize, 134 IsReadOnly()); 135 } 136 137 138 void 139 DeviceOpener::RemoveCache(bool allowWrites) 140 { 141 if (fBlockCache == NULL) 142 return; 143 144 block_cache_delete(fBlockCache, allowWrites); 145 fBlockCache = NULL; 146 } 147 148 149 void 150 DeviceOpener::Keep() 151 { 152 fDevice = -1; 153 } 154 155 156 /*! Returns the size of the device in bytes. It uses B_GET_GEOMETRY 157 to compute the size, or fstat() if that failed. 158 */ 159 status_t 160 DeviceOpener::GetSize(off_t* _size, uint32* _blockSize) 161 { 162 device_geometry geometry; 163 if (ioctl(fDevice, B_GET_GEOMETRY, &geometry) < 0) { 164 // maybe it's just a file 165 struct stat stat; 166 if (fstat(fDevice, &stat) < 0) 167 return B_ERROR; 168 169 if (_size) 170 *_size = stat.st_size; 171 if (_blockSize) // that shouldn't cause us any problems 172 *_blockSize = 512; 173 174 return B_OK; 175 } 176 177 if (_size) { 178 *_size = 1LL * geometry.head_count * geometry.cylinder_count 179 * geometry.sectors_per_track * geometry.bytes_per_sector; 180 } 181 if (_blockSize) 182 *_blockSize = geometry.bytes_per_sector; 183 184 return B_OK; 185 } 186 187 188 // #pragma mark - 189 190 191 bool 192 disk_super_block::IsValid() 193 { 194 if (Magic1() != (int32)SUPER_BLOCK_MAGIC1 195 || Magic2() != (int32)SUPER_BLOCK_MAGIC2 196 || Magic3() != (int32)SUPER_BLOCK_MAGIC3 197 || (int32)block_size != inode_size 198 || ByteOrder() != SUPER_BLOCK_FS_LENDIAN 199 || (1UL << BlockShift()) != BlockSize() 200 || AllocationGroups() < 1 201 || AllocationGroupShift() < 1 202 || BlocksPerAllocationGroup() < 1 203 || NumBlocks() < 10 204 || AllocationGroups() != divide_roundup(NumBlocks(), 205 1L << AllocationGroupShift())) 206 return false; 207 208 return true; 209 } 210 211 212 void 213 disk_super_block::Initialize(const char* diskName, off_t numBlocks, 214 uint32 blockSize) 215 { 216 memset(this, 0, sizeof(disk_super_block)); 217 218 magic1 = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_MAGIC1); 219 magic2 = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_MAGIC2); 220 magic3 = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_MAGIC3); 221 fs_byte_order = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_FS_LENDIAN); 222 flags = HOST_ENDIAN_TO_BFS_INT32(SUPER_BLOCK_DISK_CLEAN); 223 224 strlcpy(name, diskName, sizeof(name)); 225 226 int32 blockShift = 9; 227 while ((1UL << blockShift) < blockSize) { 228 blockShift++; 229 } 230 231 block_size = inode_size = HOST_ENDIAN_TO_BFS_INT32(blockSize); 232 block_shift = HOST_ENDIAN_TO_BFS_INT32(blockShift); 233 234 num_blocks = HOST_ENDIAN_TO_BFS_INT64(numBlocks); 235 used_blocks = 0; 236 237 // Get the minimum ag_shift (that's determined by the block size) 238 239 int32 bitsPerBlock = blockSize << 3; 240 off_t bitmapBlocks = (numBlocks + bitsPerBlock - 1) / bitsPerBlock; 241 int32 blocksPerGroup = 1; 242 int32 groupShift = 13; 243 244 for (int32 i = 8192; i < bitsPerBlock; i *= 2) { 245 groupShift++; 246 } 247 248 // Many allocation groups help applying allocation policies, but if 249 // they are too small, we will need to many block_runs to cover large 250 // files (see above to get an explanation of the kDesiredAllocationGroups 251 // constant). 252 253 int32 numGroups; 254 255 while (true) { 256 numGroups = (bitmapBlocks + blocksPerGroup - 1) / blocksPerGroup; 257 if (numGroups > kDesiredAllocationGroups) { 258 if (groupShift == 16) 259 break; 260 261 groupShift++; 262 blocksPerGroup *= 2; 263 } else 264 break; 265 } 266 267 num_ags = HOST_ENDIAN_TO_BFS_INT32(numGroups); 268 blocks_per_ag = HOST_ENDIAN_TO_BFS_INT32(blocksPerGroup); 269 ag_shift = HOST_ENDIAN_TO_BFS_INT32(groupShift); 270 } 271 272 273 // #pragma mark - 274 275 276 Volume::Volume(fs_volume* volume) 277 : 278 fVolume(volume), 279 fBlockAllocator(this), 280 fRootNode(NULL), 281 fIndicesNode(NULL), 282 fDirtyCachedBlocks(0), 283 fFlags(0), 284 fCheckingThread(-1) 285 { 286 mutex_init(&fLock, "bfs volume"); 287 mutex_init(&fQueryLock, "bfs queries"); 288 } 289 290 291 Volume::~Volume() 292 { 293 mutex_destroy(&fQueryLock); 294 mutex_destroy(&fLock); 295 } 296 297 298 bool 299 Volume::IsValidSuperBlock() 300 { 301 return fSuperBlock.IsValid(); 302 } 303 304 305 void 306 Volume::Panic() 307 { 308 FATAL(("Disk corrupted... switch to read-only mode!\n")); 309 fFlags |= VOLUME_READ_ONLY; 310 #if KDEBUG 311 kernel_debugger("BFS panics!"); 312 #endif 313 } 314 315 316 status_t 317 Volume::Mount(const char* deviceName, uint32 flags) 318 { 319 // TODO: validate the FS in write mode as well! 320 #if (B_HOST_IS_LENDIAN && defined(BFS_BIG_ENDIAN_ONLY)) \ 321 || (B_HOST_IS_BENDIAN && defined(BFS_LITTLE_ENDIAN_ONLY)) 322 // in big endian mode, we only mount read-only for now 323 flags |= B_MOUNT_READ_ONLY; 324 #endif 325 326 DeviceOpener opener(deviceName, (flags & B_MOUNT_READ_ONLY) != 0 327 ? O_RDONLY : O_RDWR); 328 fDevice = opener.Device(); 329 if (fDevice < B_OK) 330 RETURN_ERROR(fDevice); 331 332 if (opener.IsReadOnly()) 333 fFlags |= VOLUME_READ_ONLY; 334 335 // read the super block 336 if (Identify(fDevice, &fSuperBlock) != B_OK) { 337 FATAL(("invalid super block!\n")); 338 return B_BAD_VALUE; 339 } 340 341 // initialize short hands to the super block (to save byte swapping) 342 fBlockSize = fSuperBlock.BlockSize(); 343 fBlockShift = fSuperBlock.BlockShift(); 344 fAllocationGroupShift = fSuperBlock.AllocationGroupShift(); 345 346 // check if the device size is large enough to hold the file system 347 off_t diskSize; 348 if (opener.GetSize(&diskSize, &fDeviceBlockSize) != B_OK) 349 RETURN_ERROR(B_ERROR); 350 if (diskSize < (NumBlocks() << BlockShift())) 351 RETURN_ERROR(B_BAD_VALUE); 352 353 // set the current log pointers, so that journaling will work correctly 354 fLogStart = fSuperBlock.LogStart(); 355 fLogEnd = fSuperBlock.LogEnd(); 356 357 if ((fBlockCache = opener.InitCache(NumBlocks(), fBlockSize)) == NULL) 358 return B_ERROR; 359 360 fJournal = new(std::nothrow) Journal(this); 361 if (fJournal == NULL) 362 return B_NO_MEMORY; 363 364 status_t status = fJournal->InitCheck(); 365 if (status < B_OK) { 366 FATAL(("could not initialize journal: %s!\n", strerror(status))); 367 return status; 368 } 369 370 // replaying the log is the first thing we will do on this disk 371 status = fJournal->ReplayLog(); 372 if (status != B_OK) { 373 FATAL(("Replaying log failed, data may be corrupted, volume " 374 "read-only.\n")); 375 fFlags |= VOLUME_READ_ONLY; 376 // TODO: if this is the boot volume, Bootscript will assume this 377 // is a CD... 378 // TODO: it would be nice to have a user visible alert instead 379 // of letting him just find this in the syslog. 380 } 381 382 status = fBlockAllocator.Initialize(); 383 if (status != B_OK) { 384 FATAL(("could not initialize block bitmap allocator!\n")); 385 return status; 386 } 387 388 fRootNode = new(std::nothrow) Inode(this, ToVnode(Root())); 389 if (fRootNode != NULL && fRootNode->InitCheck() == B_OK) { 390 status = publish_vnode(fVolume, ToVnode(Root()), (void*)fRootNode, 391 &gBFSVnodeOps, fRootNode->Mode(), 0); 392 if (status == B_OK) { 393 // try to get indices root dir 394 395 if (!Indices().IsZero()) { 396 fIndicesNode = new(std::nothrow) Inode(this, 397 ToVnode(Indices())); 398 } 399 400 if (fIndicesNode == NULL 401 || fIndicesNode->InitCheck() < B_OK 402 || !fIndicesNode->IsContainer()) { 403 INFORM(("bfs: volume doesn't have indices!\n")); 404 405 if (fIndicesNode) { 406 // if this is the case, the index root node is gone bad, 407 // and BFS switch to read-only mode 408 fFlags |= VOLUME_READ_ONLY; 409 delete fIndicesNode; 410 fIndicesNode = NULL; 411 } 412 } else { 413 // we don't use the vnode layer to access the indices node 414 } 415 } else { 416 FATAL(("could not create root node: publish_vnode() failed!\n")); 417 delete fRootNode; 418 return status; 419 } 420 } else { 421 status = B_BAD_VALUE; 422 FATAL(("could not create root node!\n")); 423 return status; 424 } 425 426 if (!(fFlags & VOLUME_READ_ONLY)) { 427 Attribute attr(fRootNode); 428 if (attr.Get("be:volume_id") == B_ENTRY_NOT_FOUND) { 429 Transaction transaction(this, fRootNode->BlockNumber()); 430 fRootNode->WriteLockInTransaction(transaction); 431 CreateVolumeID(transaction); 432 transaction.Done(); 433 } 434 } 435 436 // all went fine 437 opener.Keep(); 438 return B_OK; 439 } 440 441 442 status_t 443 Volume::Unmount() 444 { 445 put_vnode(fVolume, ToVnode(Root())); 446 447 fBlockAllocator.Uninitialize(); 448 449 // This will also flush the log & all blocks to disk 450 delete fJournal; 451 fJournal = NULL; 452 453 delete fIndicesNode; 454 455 block_cache_delete(fBlockCache, !IsReadOnly()); 456 close(fDevice); 457 458 return B_OK; 459 } 460 461 462 status_t 463 Volume::Sync() 464 { 465 return fJournal->FlushLogAndBlocks(); 466 } 467 468 469 status_t 470 Volume::ValidateBlockRun(block_run run) 471 { 472 if (run.AllocationGroup() < 0 473 || run.AllocationGroup() > (int32)AllocationGroups() 474 || run.Start() > (1UL << AllocationGroupShift()) 475 || run.length == 0 476 || uint32(run.Length() + run.Start()) 477 > (1UL << AllocationGroupShift())) { 478 Panic(); 479 FATAL(("*** invalid run(%d,%d,%d)\n", (int)run.AllocationGroup(), 480 run.Start(), run.Length())); 481 return B_BAD_DATA; 482 } 483 return B_OK; 484 } 485 486 487 block_run 488 Volume::ToBlockRun(off_t block) const 489 { 490 block_run run; 491 run.allocation_group = HOST_ENDIAN_TO_BFS_INT32( 492 block >> AllocationGroupShift()); 493 run.start = HOST_ENDIAN_TO_BFS_INT16( 494 block & ((1LL << AllocationGroupShift()) - 1)); 495 run.length = HOST_ENDIAN_TO_BFS_INT16(1); 496 return run; 497 } 498 499 500 status_t 501 Volume::CreateIndicesRoot(Transaction& transaction) 502 { 503 off_t id; 504 status_t status = Inode::Create(transaction, NULL, NULL, 505 S_INDEX_DIR | S_STR_INDEX | S_DIRECTORY | 0700, 0, 0, NULL, &id, 506 &fIndicesNode); 507 if (status < B_OK) 508 RETURN_ERROR(status); 509 510 fSuperBlock.indices = ToBlockRun(id); 511 return WriteSuperBlock(); 512 } 513 514 515 status_t 516 Volume::CreateVolumeID(Transaction& transaction) 517 { 518 Attribute attr(fRootNode); 519 status_t status; 520 attr_cookie* cookie; 521 status = attr.Create("be:volume_id", B_UINT64_TYPE, O_RDWR, &cookie); 522 if (status == B_OK) { 523 static bool seeded = false; 524 if (!seeded) { 525 // seed the random number generator for the be:volume_id attribute. 526 srand(time(NULL)); 527 seeded = true; 528 } 529 uint64_t id; 530 size_t length = sizeof(id); 531 id = ((uint64_t)rand() << 32) | rand(); 532 attr.Write(transaction, cookie, 0, (uint8_t *)&id, &length, NULL); 533 } 534 return status; 535 } 536 537 538 539 status_t 540 Volume::AllocateForInode(Transaction& transaction, const Inode* parent, 541 mode_t type, block_run& run) 542 { 543 return fBlockAllocator.AllocateForInode(transaction, &parent->BlockRun(), 544 type, run); 545 } 546 547 548 status_t 549 Volume::WriteSuperBlock() 550 { 551 if (write_pos(fDevice, 512, &fSuperBlock, sizeof(disk_super_block)) 552 != sizeof(disk_super_block)) 553 return B_IO_ERROR; 554 555 return B_OK; 556 } 557 558 559 void 560 Volume::UpdateLiveQueries(Inode* inode, const char* attribute, int32 type, 561 const uint8* oldKey, size_t oldLength, const uint8* newKey, 562 size_t newLength) 563 { 564 MutexLocker _(fQueryLock); 565 566 SinglyLinkedList<Query>::Iterator iterator = fQueries.GetIterator(); 567 while (iterator.HasNext()) { 568 Query* query = iterator.Next(); 569 query->LiveUpdate(inode, attribute, type, oldKey, oldLength, newKey, 570 newLength); 571 } 572 } 573 574 575 void 576 Volume::UpdateLiveQueriesRenameMove(Inode* inode, ino_t oldDirectoryID, 577 const char* oldName, ino_t newDirectoryID, const char* newName) 578 { 579 MutexLocker _(fQueryLock); 580 581 size_t oldLength = strlen(oldName); 582 size_t newLength = strlen(newName); 583 584 SinglyLinkedList<Query>::Iterator iterator = fQueries.GetIterator(); 585 while (iterator.HasNext()) { 586 Query* query = iterator.Next(); 587 query->LiveUpdateRenameMove(inode, oldDirectoryID, oldName, oldLength, 588 newDirectoryID, newName, newLength); 589 } 590 } 591 592 593 /*! Checks if there is a live query whose results depend on the presence 594 or value of the specified attribute. 595 Don't use it if you already have all the data together to evaluate 596 the queries - it wouldn't safe you anything in this case. 597 */ 598 bool 599 Volume::CheckForLiveQuery(const char* attribute) 600 { 601 // TODO: check for a live query that depends on the specified attribute 602 return true; 603 } 604 605 606 void 607 Volume::AddQuery(Query* query) 608 { 609 MutexLocker _(fQueryLock); 610 fQueries.Add(query); 611 } 612 613 614 void 615 Volume::RemoveQuery(Query* query) 616 { 617 MutexLocker _(fQueryLock); 618 fQueries.Remove(query); 619 } 620 621 622 // #pragma mark - Disk scanning and initialization 623 624 625 /*static*/ status_t 626 Volume::CheckSuperBlock(const uint8* data, uint32* _offset) 627 { 628 disk_super_block* superBlock = (disk_super_block*)(data + 512); 629 if (superBlock->IsValid()) { 630 if (_offset != NULL) 631 *_offset = 512; 632 return B_OK; 633 } 634 635 #ifndef BFS_LITTLE_ENDIAN_ONLY 636 // For PPC, the super block might be located at offset 0 637 superBlock = (disk_super_block*)data; 638 if (superBlock->IsValid()) { 639 if (_offset != NULL) 640 *_offset = 0; 641 return B_OK; 642 } 643 #endif 644 645 return B_BAD_VALUE; 646 } 647 648 649 /*static*/ status_t 650 Volume::Identify(int fd, disk_super_block* superBlock) 651 { 652 uint8 buffer[1024]; 653 if (read_pos(fd, 0, buffer, sizeof(buffer)) != sizeof(buffer)) 654 return B_IO_ERROR; 655 656 uint32 offset; 657 if (CheckSuperBlock(buffer, &offset) != B_OK) 658 return B_BAD_VALUE; 659 660 memcpy(superBlock, buffer + offset, sizeof(disk_super_block)); 661 return B_OK; 662 } 663 664 665 status_t 666 Volume::Initialize(int fd, const char* name, uint32 blockSize, 667 uint32 flags) 668 { 669 // although there is no really good reason for it, we won't 670 // accept '/' in disk names (mkbfs does this, too - and since 671 // Tracker names mounted volumes like their name) 672 if (strchr(name, '/') != NULL) 673 return B_BAD_VALUE; 674 675 if (blockSize != 1024 && blockSize != 2048 && blockSize != 4096 676 && blockSize != 8192) 677 return B_BAD_VALUE; 678 679 DeviceOpener opener(fd, O_RDWR); 680 if (opener.Device() < B_OK) 681 return B_BAD_VALUE; 682 683 if (opener.IsReadOnly()) 684 return B_READ_ONLY_DEVICE; 685 686 fDevice = opener.Device(); 687 688 uint32 deviceBlockSize; 689 off_t deviceSize; 690 if (opener.GetSize(&deviceSize, &deviceBlockSize) < B_OK) 691 return B_ERROR; 692 693 off_t numBlocks = deviceSize / blockSize; 694 695 // create valid super block 696 697 fSuperBlock.Initialize(name, numBlocks, blockSize); 698 699 // initialize short hands to the super block (to save byte swapping) 700 fBlockSize = fSuperBlock.BlockSize(); 701 fBlockShift = fSuperBlock.BlockShift(); 702 fAllocationGroupShift = fSuperBlock.AllocationGroupShift(); 703 704 // determine log size depending on the size of the volume 705 off_t logSize = 2048; 706 if (numBlocks <= 20480) 707 logSize = 512; 708 if (deviceSize > 1LL * 1024 * 1024 * 1024) 709 logSize = 4096; 710 711 // since the allocator has not been initialized yet, we 712 // cannot use BlockAllocator::BitmapSize() here 713 off_t bitmapBlocks = (numBlocks + blockSize * 8 - 1) / (blockSize * 8); 714 715 fSuperBlock.log_blocks = ToBlockRun(bitmapBlocks + 1); 716 fSuperBlock.log_blocks.length = HOST_ENDIAN_TO_BFS_INT16(logSize); 717 fSuperBlock.log_start = fSuperBlock.log_end = HOST_ENDIAN_TO_BFS_INT64( 718 ToBlock(Log())); 719 720 // set the current log pointers, so that journaling will work correctly 721 fLogStart = fSuperBlock.LogStart(); 722 fLogEnd = fSuperBlock.LogEnd(); 723 724 if (!IsValidSuperBlock()) 725 RETURN_ERROR(B_ERROR); 726 727 if ((fBlockCache = opener.InitCache(NumBlocks(), fBlockSize)) == NULL) 728 return B_ERROR; 729 730 fJournal = new(std::nothrow) Journal(this); 731 if (fJournal == NULL || fJournal->InitCheck() < B_OK) 732 RETURN_ERROR(B_ERROR); 733 734 // ready to write data to disk 735 736 Transaction transaction(this, 0); 737 738 if (fBlockAllocator.InitializeAndClearBitmap(transaction) < B_OK) 739 RETURN_ERROR(B_ERROR); 740 741 off_t id; 742 status_t status = Inode::Create(transaction, NULL, NULL, 743 S_DIRECTORY | 0755, 0, 0, NULL, &id, &fRootNode); 744 if (status < B_OK) 745 RETURN_ERROR(status); 746 747 fSuperBlock.root_dir = ToBlockRun(id); 748 749 if ((flags & VOLUME_NO_INDICES) == 0) { 750 // The indices root directory will be created automatically 751 // when the standard indices are created (or any other). 752 Index index(this); 753 status = index.Create(transaction, "name", B_STRING_TYPE); 754 if (status < B_OK) 755 return status; 756 757 status = index.Create(transaction, "BEOS:APP_SIG", B_STRING_TYPE); 758 if (status < B_OK) 759 return status; 760 761 status = index.Create(transaction, "last_modified", B_INT64_TYPE); 762 if (status < B_OK) 763 return status; 764 765 status = index.Create(transaction, "size", B_INT64_TYPE); 766 if (status < B_OK) 767 return status; 768 } 769 770 CreateVolumeID(transaction); 771 772 WriteSuperBlock(); 773 transaction.Done(); 774 775 Sync(); 776 opener.RemoveCache(true); 777 return B_OK; 778 } 779