1 /* 2 * Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include "vnode_store.h" 8 9 #include <KernelExport.h> 10 #include <fs_cache.h> 11 12 #include <util/kernel_cpp.h> 13 #include <file_cache.h> 14 #include <vfs.h> 15 #include <vm.h> 16 #include <vm_page.h> 17 #include <vm_cache.h> 18 #include <generic_syscall.h> 19 20 #include <unistd.h> 21 #include <stdlib.h> 22 #include <string.h> 23 24 25 //#define TRACE_FILE_CACHE 26 #ifdef TRACE_FILE_CACHE 27 # define TRACE(x) dprintf x 28 #else 29 # define TRACE(x) ; 30 #endif 31 32 // maximum number of iovecs per request 33 #define MAX_IO_VECS 64 // 256 kB 34 #define MAX_FILE_IO_VECS 32 35 36 #define CACHED_FILE_EXTENTS 2 37 // must be smaller than MAX_FILE_IO_VECS 38 // ToDo: find out how much of these are typically used 39 40 struct file_extent { 41 off_t offset; 42 file_io_vec disk; 43 }; 44 45 struct file_map { 46 file_map(); 47 ~file_map(); 48 49 file_extent *operator[](uint32 index); 50 file_extent *ExtentAt(uint32 index); 51 status_t Add(file_io_vec *vecs, size_t vecCount); 52 void Free(); 53 54 union { 55 file_extent direct[CACHED_FILE_EXTENTS]; 56 file_extent *array; 57 }; 58 size_t count; 59 }; 60 61 struct file_cache_ref { 62 vm_cache_ref *cache; 63 void *vnode; 64 void *device; 65 void *cookie; 66 file_map map; 67 }; 68 69 70 static struct cache_module_info *sCacheModule; 71 72 73 file_map::file_map() 74 { 75 array = NULL; 76 count = 0; 77 } 78 79 80 file_map::~file_map() 81 { 82 Free(); 83 } 84 85 86 file_extent * 87 file_map::operator[](uint32 index) 88 { 89 return ExtentAt(index); 90 } 91 92 93 file_extent * 94 file_map::ExtentAt(uint32 index) 95 { 96 if (index >= count) 97 return NULL; 98 99 if (count > CACHED_FILE_EXTENTS) 100 return &array[index]; 101 102 return &direct[index]; 103 } 104 105 106 status_t 107 file_map::Add(file_io_vec *vecs, size_t vecCount) 108 { 109 off_t offset = 0; 110 111 if (vecCount <= CACHED_FILE_EXTENTS && count == 0) { 112 // just use the reserved area in the file_cache_ref structure 113 } else { 114 file_extent *newMap = (file_extent *)realloc(array, 115 (count + vecCount) * sizeof(file_extent)); 116 if (newMap == NULL) 117 return B_NO_MEMORY; 118 119 array = newMap; 120 121 if (count != 0) { 122 file_extent *extent = ExtentAt(count - 1); 123 offset = extent->offset + extent->disk.length; 124 } 125 } 126 127 count += vecCount; 128 129 for (uint32 i = 0; i < vecCount; i++) { 130 file_extent *extent = ExtentAt(i); 131 132 extent->offset = offset; 133 extent->disk = vecs[i]; 134 135 offset += extent->disk.length; 136 } 137 138 return B_OK; 139 } 140 141 142 void 143 file_map::Free() 144 { 145 if (count > CACHED_FILE_EXTENTS) 146 free(array); 147 148 array = NULL; 149 count = 0; 150 } 151 152 153 // #pragma mark - 154 155 156 static void 157 add_to_iovec(iovec *vecs, int32 &index, int32 max, addr_t address, size_t size) 158 { 159 if (index > 0 && (addr_t)vecs[index - 1].iov_base + vecs[index - 1].iov_len == address) { 160 // the iovec can be combined with the previous one 161 vecs[index - 1].iov_len += size; 162 return; 163 } 164 165 if (index == max) 166 panic("no more space for iovecs!"); 167 168 // we need to start a new iovec 169 vecs[index].iov_base = (void *)address; 170 vecs[index].iov_len = size; 171 index++; 172 } 173 174 175 static file_extent * 176 find_file_extent(file_cache_ref *ref, off_t offset, uint32 *_index) 177 { 178 // ToDo: do binary search 179 180 for (uint32 index = 0; index < ref->map.count; index++) { 181 file_extent *extent = ref->map[index]; 182 183 if (extent->offset <= offset 184 && extent->offset + extent->disk.length > offset) { 185 if (_index) 186 *_index = index; 187 return extent; 188 } 189 } 190 191 return NULL; 192 } 193 194 195 static status_t 196 get_file_map(file_cache_ref *ref, off_t offset, size_t size, 197 file_io_vec *vecs, size_t *_count) 198 { 199 size_t maxVecs = *_count; 200 201 if (ref->map.count == 0) { 202 // we don't yet have the map of this file, so let's grab it 203 // (ordered by offset, so that we can do a binary search on them) 204 205 mutex_lock(&ref->cache->lock); 206 207 // the file map could have been requested in the mean time 208 if (ref->map.count == 0) { 209 size_t vecCount = maxVecs; 210 status_t status; 211 off_t mapOffset = 0; 212 213 while (true) { 214 status = vfs_get_file_map(ref->vnode, mapOffset, ~0UL, vecs, &vecCount); 215 if (status < B_OK && status != B_BUFFER_OVERFLOW) { 216 mutex_unlock(&ref->cache->lock); 217 return status; 218 } 219 220 ref->map.Add(vecs, vecCount); 221 222 if (status != B_BUFFER_OVERFLOW) 223 break; 224 225 // when we are here, the map has been stored in the array, and 226 // the array size was still too small to cover the whole file 227 file_io_vec *last = &vecs[vecCount - 1]; 228 mapOffset += last->length; 229 vecCount = maxVecs; 230 } 231 } 232 233 mutex_unlock(&ref->cache->lock); 234 } 235 236 // We now have cached the map of this file, we now need to 237 // translate it for the requested access. 238 239 uint32 index; 240 file_extent *fileExtent = find_file_extent(ref, offset, &index); 241 if (fileExtent == NULL) { 242 // access outside file bounds? But that's not our problem 243 *_count = 0; 244 return B_OK; 245 } 246 247 offset -= fileExtent->offset; 248 vecs[0].offset = fileExtent->disk.offset + offset; 249 vecs[0].length = fileExtent->disk.length - offset; 250 251 if (vecs[0].length >= size || index >= ref->map.count - 1) { 252 *_count = 1; 253 return B_OK; 254 } 255 256 // copy the rest of the vecs 257 258 size -= vecs[0].length; 259 260 for (index = 1; index < ref->map.count;) { 261 fileExtent++; 262 263 vecs[index] = fileExtent->disk; 264 index++; 265 266 if (index >= maxVecs) { 267 *_count = index; 268 return B_BUFFER_OVERFLOW; 269 } 270 271 if (size <= fileExtent->disk.length) 272 break; 273 274 size -= fileExtent->disk.length; 275 } 276 277 *_count = index; 278 return B_OK; 279 } 280 281 282 static status_t 283 pages_io(file_cache_ref *ref, off_t offset, const iovec *vecs, size_t count, 284 size_t *_numBytes, bool doWrite) 285 { 286 TRACE(("pages_io: ref = %p, offset = %Ld, size = %lu, %s\n", ref, offset, 287 *_numBytes, doWrite ? "write" : "read")); 288 289 // translate the iovecs into direct device accesses 290 file_io_vec fileVecs[MAX_FILE_IO_VECS]; 291 size_t fileVecCount = MAX_FILE_IO_VECS; 292 size_t numBytes = *_numBytes; 293 294 status_t status = get_file_map(ref, offset, numBytes, fileVecs, &fileVecCount); 295 if (status < B_OK) { 296 TRACE(("get_file_map(offset = %Ld, numBytes = %lu) failed\n", offset, 297 numBytes)); 298 return status; 299 } 300 301 // ToDo: handle array overflow gracefully! 302 303 #ifdef TRACE_FILE_CACHE 304 dprintf("got %lu file vecs for %Ld:%lu:\n", fileVecCount, offset, numBytes); 305 for (size_t i = 0; i < fileVecCount; i++) 306 dprintf("[%lu] offset = %Ld, size = %Ld\n", i, fileVecs[i].offset, fileVecs[i].length); 307 #endif 308 309 uint32 fileVecIndex; 310 size_t size; 311 312 if (!doWrite) { 313 // now directly read the data from the device 314 // the first file_io_vec can be read directly 315 316 size = fileVecs[0].length; 317 if (size > numBytes) 318 size = numBytes; 319 320 status = vfs_read_pages(ref->device, ref->cookie, fileVecs[0].offset, vecs, count, &size); 321 if (status < B_OK) 322 return status; 323 324 // ToDo: this is a work-around for buggy device drivers! 325 // When our own drivers honour the length, we can: 326 // a) also use this direct I/O for writes (otherwise, it would overwrite precious data) 327 // b) panic if the term below is true (at least for writes) 328 if (size > fileVecs[0].length) { 329 //dprintf("warning: device driver %p doesn't respect total length in read_pages() call!\n", ref->device); 330 size = fileVecs[0].length; 331 } 332 333 ASSERT(size <= fileVecs[0].length); 334 335 // If the file portion was contiguous, we're already done now 336 if (size == numBytes) 337 return B_OK; 338 339 // if we reached the end of the file, we can return as well 340 if (size != fileVecs[0].length) { 341 *_numBytes = size; 342 return B_OK; 343 } 344 345 fileVecIndex = 1; 346 } else { 347 fileVecIndex = 0; 348 size = 0; 349 } 350 351 // Too bad, let's process the rest of the file_io_vecs 352 353 size_t totalSize = size; 354 355 // first, find out where we have to continue in our iovecs 356 uint32 i = 0; 357 for (; i < count; i++) { 358 if (size <= vecs[i].iov_len) 359 break; 360 361 size -= vecs[i].iov_len; 362 } 363 364 size_t vecOffset = size; 365 366 for (; fileVecIndex < fileVecCount; fileVecIndex++) { 367 file_io_vec &fileVec = fileVecs[fileVecIndex]; 368 iovec tempVecs[8]; 369 uint32 tempCount = 1; 370 371 tempVecs[0].iov_base = (void *)((addr_t)vecs[i].iov_base + vecOffset); 372 373 size = min_c(vecs[i].iov_len - vecOffset, fileVec.length); 374 tempVecs[0].iov_len = size; 375 376 TRACE(("fill vec %ld, offset = %lu, size = %lu\n", i, vecOffset, size)); 377 378 if (size >= fileVec.length) 379 vecOffset += size; 380 else 381 vecOffset = 0; 382 383 while (size < fileVec.length && ++i < count) { 384 tempVecs[tempCount].iov_base = vecs[i].iov_base; 385 tempCount++; 386 387 // is this iovec larger than the file_io_vec? 388 if (vecs[i].iov_len + size > fileVec.length) { 389 size += tempVecs[tempCount].iov_len = vecOffset = fileVec.length - size; 390 break; 391 } 392 393 size += tempVecs[tempCount].iov_len = vecs[i].iov_len; 394 } 395 396 size_t bytes = size; 397 if (doWrite) 398 status = vfs_write_pages(ref->device, ref->cookie, fileVec.offset, tempVecs, tempCount, &bytes); 399 else 400 status = vfs_read_pages(ref->device, ref->cookie, fileVec.offset, tempVecs, tempCount, &bytes); 401 if (status < B_OK) 402 return status; 403 404 totalSize += size; 405 406 if (size != bytes) { 407 // there are no more bytes, let's bail out 408 *_numBytes = totalSize; 409 return B_OK; 410 } 411 } 412 413 return B_OK; 414 } 415 416 417 /** This function is called by read_into_cache() (and from there only) - it 418 * can only handle a certain amount of bytes, and read_into_cache() makes 419 * sure that it matches that criterion. 420 */ 421 422 static inline status_t 423 read_chunk_into_cache(file_cache_ref *ref, off_t offset, size_t size, 424 int32 pageOffset, addr_t buffer, size_t bufferSize) 425 { 426 TRACE(("read_chunk(offset = %Ld, size = %lu, pageOffset = %ld, buffer = %#lx, bufferSize = %lu\n", 427 offset, size, pageOffset, buffer, bufferSize)); 428 429 vm_cache_ref *cache = ref->cache; 430 431 iovec vecs[MAX_IO_VECS]; 432 int32 vecCount = 0; 433 434 vm_page *pages[MAX_IO_VECS]; 435 int32 pageIndex = 0; 436 437 // allocate pages for the cache and mark them busy 438 for (size_t pos = 0; pos < size; pos += B_PAGE_SIZE) { 439 vm_page *page = pages[pageIndex++] = vm_page_allocate_page(PAGE_STATE_FREE); 440 if (page == NULL) 441 panic("no more pages!"); 442 443 page->state = PAGE_STATE_BUSY; 444 445 vm_cache_insert_page(cache, page, offset + pos); 446 447 addr_t virtualAddress; 448 if (vm_get_physical_page(page->ppn * B_PAGE_SIZE, &virtualAddress, PHYSICAL_PAGE_CAN_WAIT) < B_OK) 449 panic("could not get physical page"); 450 451 add_to_iovec(vecs, vecCount, MAX_IO_VECS, virtualAddress, B_PAGE_SIZE); 452 // ToDo: check if the array is large enough! 453 } 454 455 mutex_unlock(&cache->lock); 456 457 // read file into reserved pages 458 status_t status = pages_io(ref, offset, vecs, vecCount, &size, false); 459 if (status < B_OK) { 460 // reading failed, free allocated pages 461 462 dprintf("file_cache: read pages failed: %s\n", strerror(status)); 463 464 mutex_lock(&cache->lock); 465 for (int32 i = 0; i < pageIndex; i++) { 466 vm_cache_remove_page(cache, pages[i]); 467 vm_page_set_state(pages[i], PAGE_STATE_FREE); 468 } 469 470 return status; 471 } 472 473 // copy the pages and unmap them again 474 475 for (int32 i = 0; i < vecCount; i++) { 476 addr_t base = (addr_t)vecs[i].iov_base; 477 size_t size = vecs[i].iov_len; 478 479 // copy to user buffer if necessary 480 if (bufferSize != 0) { 481 size_t bytes = min_c(bufferSize, size - pageOffset); 482 483 user_memcpy((void *)buffer, (void *)(base + pageOffset), bytes); 484 buffer += bytes; 485 bufferSize -= bytes; 486 pageOffset = 0; 487 } 488 489 for (size_t pos = 0; pos < size; pos += B_PAGE_SIZE, base += B_PAGE_SIZE) 490 vm_put_physical_page(base); 491 } 492 493 mutex_lock(&cache->lock); 494 495 // make the pages accessible in the cache 496 for (int32 i = pageIndex; i-- > 0;) 497 pages[i]->state = PAGE_STATE_ACTIVE; 498 499 return B_OK; 500 } 501 502 503 /** This function reads \a size bytes directly from the file into the cache. 504 * If \a bufferSize does not equal zero, \a bufferSize bytes from the data 505 * read in are also copied to the provided \a buffer. 506 * This function always allocates all pages; it is the responsibility of the 507 * calling function to only ask for yet uncached ranges. 508 * The cache_ref lock must be hold when calling this function. 509 */ 510 511 static status_t 512 read_into_cache(file_cache_ref *ref, off_t offset, size_t size, addr_t buffer, size_t bufferSize) 513 { 514 TRACE(("read_from_cache: ref = %p, offset = %Ld, size = %lu, buffer = %p, bufferSize = %lu\n", 515 ref, offset, size, (void *)buffer, bufferSize)); 516 517 // do we have to read in anything at all? 518 if (size == 0) 519 return B_OK; 520 521 // make sure "offset" is page aligned - but also remember the page offset 522 int32 pageOffset = offset & (B_PAGE_SIZE - 1); 523 size = PAGE_ALIGN(size + pageOffset); 524 offset -= pageOffset; 525 526 while (true) { 527 size_t chunkSize = size; 528 if (chunkSize > (MAX_IO_VECS * B_PAGE_SIZE)) 529 chunkSize = MAX_IO_VECS * B_PAGE_SIZE; 530 531 status_t status = read_chunk_into_cache(ref, offset, chunkSize, pageOffset, 532 buffer, bufferSize); 533 if (status != B_OK) 534 return status; 535 536 if ((size -= chunkSize) == 0) 537 return B_OK; 538 539 if (chunkSize >= bufferSize) { 540 bufferSize = 0; 541 buffer = NULL; 542 } else { 543 bufferSize -= chunkSize - pageOffset; 544 buffer += chunkSize - pageOffset; 545 } 546 547 offset += chunkSize; 548 pageOffset = 0; 549 } 550 551 return B_OK; 552 } 553 554 555 /** Like read_chunk_into_cache() but writes data into the cache */ 556 557 static inline status_t 558 write_chunk_to_cache(file_cache_ref *ref, off_t offset, size_t size, 559 int32 pageOffset, addr_t buffer, size_t bufferSize) 560 { 561 iovec vecs[MAX_IO_VECS]; 562 int32 vecCount = 0; 563 vm_page *pages[MAX_IO_VECS]; 564 int32 pageIndex = 0; 565 status_t status = B_OK; 566 567 // ToDo: this should be settable somewhere 568 bool writeThrough = false; 569 570 // allocate pages for the cache and mark them busy 571 for (size_t pos = 0; pos < size; pos += B_PAGE_SIZE) { 572 // ToDo: if space is becoming tight, and this cache is already grown 573 // big - shouldn't we better steal the pages directly in that case? 574 // (a working set like approach for the file cache) 575 vm_page *page = pages[pageIndex++] = vm_page_allocate_page(PAGE_STATE_FREE); 576 page->state = PAGE_STATE_BUSY; 577 578 vm_cache_insert_page(ref->cache, page, offset + pos); 579 580 addr_t virtualAddress; 581 vm_get_physical_page(page->ppn * B_PAGE_SIZE, &virtualAddress, 582 PHYSICAL_PAGE_CAN_WAIT); 583 584 add_to_iovec(vecs, vecCount, MAX_IO_VECS, virtualAddress, B_PAGE_SIZE); 585 // ToDo: check if the array is large enough! 586 } 587 588 mutex_unlock(&ref->cache->lock); 589 590 // copy contents (and read in partially written pages first) 591 592 if (pageOffset != 0) { 593 // This is only a partial write, so we have to read the rest of the page 594 // from the file to have consistent data in the cache 595 iovec readVec = { vecs[0].iov_base, B_PAGE_SIZE }; 596 size_t bytesRead = B_PAGE_SIZE; 597 598 status = pages_io(ref, offset, &readVec, 1, &bytesRead, false); 599 // ToDo: handle errors for real! 600 if (status < B_OK) 601 panic("pages_io() failed!\n"); 602 } 603 604 addr_t lastPageOffset = (pageOffset + bufferSize) & (B_PAGE_SIZE - 1); 605 if (lastPageOffset != 0) { 606 // get the last page in the I/O vectors 607 addr_t last = (addr_t)vecs[vecCount - 1].iov_base 608 + vecs[vecCount - 1].iov_len - B_PAGE_SIZE; 609 610 if (offset + pageOffset + bufferSize == ref->cache->cache->virtual_size) { 611 // the space in the page after this write action needs to be cleaned 612 memset((void *)(last + lastPageOffset), 0, B_PAGE_SIZE - lastPageOffset); 613 } else if (vecCount > 1) { 614 // the end of this write does not happen on a page boundary, so we 615 // need to fetch the last page before we can update it 616 iovec readVec = { (void *)last, B_PAGE_SIZE }; 617 size_t bytesRead = B_PAGE_SIZE; 618 619 status = pages_io(ref, offset + size - B_PAGE_SIZE, &readVec, 1, 620 &bytesRead, false); 621 // ToDo: handle errors for real! 622 if (status < B_OK) 623 panic("pages_io() failed!\n"); 624 } 625 } 626 627 for (int32 i = 0; i < vecCount; i++) { 628 addr_t base = (addr_t)vecs[i].iov_base; 629 size_t bytes = min_c(bufferSize, size_t(vecs[i].iov_len - pageOffset)); 630 631 // copy data from user buffer 632 user_memcpy((void *)(base + pageOffset), (void *)buffer, bytes); 633 634 bufferSize -= bytes; 635 if (bufferSize == 0) 636 break; 637 638 buffer += bytes; 639 pageOffset = 0; 640 } 641 642 if (writeThrough) { 643 // write cached pages back to the file if we were asked to do that 644 status_t status = pages_io(ref, offset, vecs, vecCount, &size, true); 645 if (status < B_OK) { 646 // ToDo: remove allocated pages, ...? 647 panic("file_cache: remove allocated pages! write pages failed: %s\n", 648 strerror(status)); 649 } 650 } 651 652 mutex_lock(&ref->cache->lock); 653 654 // unmap the pages again 655 656 for (int32 i = 0; i < vecCount; i++) { 657 addr_t base = (addr_t)vecs[i].iov_base; 658 size_t size = vecs[i].iov_len; 659 for (size_t pos = 0; pos < size; pos += B_PAGE_SIZE, base += B_PAGE_SIZE) 660 vm_put_physical_page(base); 661 } 662 663 // make the pages accessible in the cache 664 for (int32 i = pageIndex; i-- > 0;) { 665 if (writeThrough) 666 pages[i]->state = PAGE_STATE_ACTIVE; 667 else 668 vm_page_set_state(pages[i], PAGE_STATE_MODIFIED); 669 } 670 671 return status; 672 } 673 674 675 /** Like read_into_cache() but writes data into the cache. To preserve data consistency, 676 * it might also read pages into the cache, though, if only a partial page gets written. 677 * The cache_ref lock must be hold when calling this function. 678 */ 679 680 static status_t 681 write_to_cache(file_cache_ref *ref, off_t offset, size_t size, addr_t buffer, size_t bufferSize) 682 { 683 TRACE(("write_to_cache: ref = %p, offset = %Ld, size = %lu, buffer = %p, bufferSize = %lu\n", 684 ref, offset, size, (void *)buffer, bufferSize)); 685 686 // make sure "offset" is page aligned - but also remember the page offset 687 int32 pageOffset = offset & (B_PAGE_SIZE - 1); 688 size = PAGE_ALIGN(size + pageOffset); 689 offset -= pageOffset; 690 691 while (true) { 692 size_t chunkSize = size; 693 if (chunkSize > (MAX_IO_VECS * B_PAGE_SIZE)) 694 chunkSize = MAX_IO_VECS * B_PAGE_SIZE; 695 696 status_t status = write_chunk_to_cache(ref, offset, chunkSize, pageOffset, buffer, bufferSize); 697 if (status != B_OK) 698 return status; 699 700 if ((size -= chunkSize) == 0) 701 return B_OK; 702 703 if (chunkSize >= bufferSize) { 704 bufferSize = 0; 705 buffer = NULL; 706 } else { 707 bufferSize -= chunkSize - pageOffset; 708 buffer += chunkSize - pageOffset; 709 } 710 711 offset += chunkSize; 712 pageOffset = 0; 713 } 714 715 return B_OK; 716 } 717 718 719 static status_t 720 cache_io(void *_cacheRef, off_t offset, addr_t buffer, size_t *_size, bool doWrite) 721 { 722 if (_cacheRef == NULL) 723 panic("cache_io() called with NULL ref!\n"); 724 725 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 726 vm_cache_ref *cache = ref->cache; 727 off_t fileSize = cache->cache->virtual_size; 728 729 TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %lu, %s)\n", 730 ref, offset, (void *)buffer, *_size, doWrite ? "write" : "read")); 731 732 // out of bounds access? 733 if (offset >= fileSize || offset < 0) { 734 *_size = 0; 735 return B_OK; 736 } 737 738 int32 pageOffset = offset & (B_PAGE_SIZE - 1); 739 size_t size = *_size; 740 offset -= pageOffset; 741 742 if (offset + pageOffset + size > fileSize) { 743 // adapt size to be within the file's offsets 744 size = fileSize - pageOffset - offset; 745 *_size = size; 746 } 747 748 // "offset" and "lastOffset" are always aligned to B_PAGE_SIZE, 749 // the "last*" variables always point to the end of the last 750 // satisfied request part 751 752 size_t bytesLeft = size, lastLeft = size; 753 int32 lastPageOffset = pageOffset; 754 addr_t lastBuffer = buffer; 755 off_t lastOffset = offset; 756 757 mutex_lock(&cache->lock); 758 759 for (; bytesLeft > 0; offset += B_PAGE_SIZE) { 760 // check if this page is already in memory 761 addr_t virtualAddress; 762 restart: 763 vm_page *page = vm_cache_lookup_page(cache, offset); 764 if (page != NULL && page->state == PAGE_STATE_BUSY) { 765 // ToDo: don't wait forever! 766 mutex_unlock(&cache->lock); 767 snooze(20000); 768 mutex_lock(&cache->lock); 769 goto restart; 770 } 771 772 size_t bytesInPage = min_c(size_t(B_PAGE_SIZE - pageOffset), bytesLeft); 773 774 TRACE(("lookup page from offset %Ld: %p, size = %lu, pageOffset = %lu\n", offset, page, bytesLeft, pageOffset)); 775 if (page != NULL 776 && vm_get_physical_page(page->ppn * B_PAGE_SIZE, 777 &virtualAddress, PHYSICAL_PAGE_CAN_WAIT) == B_OK) { 778 // it is, so let's satisfy the first part of the request, if we have to 779 if (lastBuffer != buffer) { 780 size_t requestSize = buffer - lastBuffer; 781 status_t status; 782 if (doWrite) { 783 status = write_to_cache(ref, lastOffset + lastPageOffset, 784 requestSize, lastBuffer, requestSize); 785 } else { 786 status = read_into_cache(ref, lastOffset + lastPageOffset, 787 requestSize, lastBuffer, requestSize); 788 } 789 if (status != B_OK) { 790 vm_put_physical_page(virtualAddress); 791 mutex_unlock(&cache->lock); 792 return B_IO_ERROR; 793 } 794 } 795 796 // and copy the contents of the page already in memory 797 if (doWrite) { 798 user_memcpy((void *)(virtualAddress + pageOffset), (void *)buffer, bytesInPage); 799 800 // make sure the page is in the modified list 801 if (page->state != PAGE_STATE_MODIFIED) 802 vm_page_set_state(page, PAGE_STATE_MODIFIED); 803 } else 804 user_memcpy((void *)buffer, (void *)(virtualAddress + pageOffset), bytesInPage); 805 806 vm_put_physical_page(virtualAddress); 807 808 if (bytesLeft <= bytesInPage) { 809 // we've read the last page, so we're done! 810 mutex_unlock(&cache->lock); 811 return B_OK; 812 } 813 814 // prepare a potential gap request 815 lastBuffer = buffer + bytesInPage; 816 lastLeft = bytesLeft - bytesInPage; 817 lastOffset = offset + B_PAGE_SIZE; 818 lastPageOffset = 0; 819 } 820 821 if (bytesLeft <= bytesInPage) 822 break; 823 824 buffer += bytesInPage; 825 bytesLeft -= bytesInPage; 826 pageOffset = 0; 827 } 828 829 // fill the last remaining bytes of the request (either write or read) 830 831 status_t status; 832 if (doWrite) 833 status = write_to_cache(ref, lastOffset + lastPageOffset, lastLeft, lastBuffer, lastLeft); 834 else 835 status = read_into_cache(ref, lastOffset + lastPageOffset, lastLeft, lastBuffer, lastLeft); 836 837 mutex_unlock(&cache->lock); 838 return status; 839 } 840 841 842 static status_t 843 file_cache_control(const char *subsystem, uint32 function, void *buffer, size_t bufferSize) 844 { 845 switch (function) { 846 case CACHE_CLEAR: 847 // ToDo: clear the cache 848 dprintf("cache_control: clear cache!\n"); 849 return B_OK; 850 851 case CACHE_SET_MODULE: 852 { 853 cache_module_info *module = sCacheModule; 854 855 // unset previous module 856 857 if (sCacheModule != NULL) { 858 sCacheModule = NULL; 859 snooze(100000); // 0.1 secs 860 put_module(module->info.name); 861 } 862 863 // get new module, if any 864 865 if (buffer == NULL) 866 return B_OK; 867 868 char name[B_FILE_NAME_LENGTH]; 869 if (!IS_USER_ADDRESS(buffer) 870 || user_strlcpy(name, (char *)buffer, B_FILE_NAME_LENGTH) < B_OK) 871 return B_BAD_ADDRESS; 872 873 if (strncmp(name, CACHE_MODULES_NAME, strlen(CACHE_MODULES_NAME))) 874 return B_BAD_VALUE; 875 876 dprintf("cache_control: set module %s!\n", name); 877 878 status_t status = get_module(name, (module_info **)&module); 879 if (status == B_OK) 880 sCacheModule = module; 881 882 return status; 883 } 884 } 885 886 return B_BAD_HANDLER; 887 } 888 889 890 // #pragma mark - 891 // kernel public API 892 893 894 extern "C" void 895 cache_prefetch_vnode(void *vnode, off_t offset, size_t size) 896 { 897 vm_cache_ref *cache; 898 if (vfs_get_vnode_cache(vnode, &cache, false) != B_OK) 899 return; 900 901 file_cache_ref *ref = (struct file_cache_ref *)((vnode_store *)cache->cache->store)->file_cache_ref; 902 off_t fileSize = cache->cache->virtual_size; 903 904 if (size > fileSize) 905 size = fileSize; 906 907 // we never fetch more than 4 MB at once 908 if (size > 4 * 1024 * 1024) 909 size = 4 * 1024 * 1024; 910 911 size_t bytesLeft = size, lastLeft = size; 912 off_t lastOffset = offset; 913 size_t lastSize = 0; 914 915 mutex_lock(&cache->lock); 916 917 for (; bytesLeft > 0; offset += B_PAGE_SIZE) { 918 // check if this page is already in memory 919 addr_t virtualAddress; 920 restart: 921 vm_page *page = vm_cache_lookup_page(cache, offset); 922 if (page != NULL) { 923 // it is, so let's satisfy in the first part of the request 924 if (lastOffset < offset) { 925 size_t requestSize = offset - lastOffset; 926 read_into_cache(ref, lastOffset, requestSize, NULL, 0); 927 } 928 929 if (bytesLeft <= B_PAGE_SIZE) { 930 // we've read the last page, so we're done! 931 goto out; 932 } 933 934 // prepare a potential gap request 935 lastOffset = offset + B_PAGE_SIZE; 936 lastLeft = bytesLeft - B_PAGE_SIZE; 937 } 938 939 if (bytesLeft <= B_PAGE_SIZE) 940 break; 941 942 bytesLeft -= B_PAGE_SIZE; 943 } 944 945 // read in the last part 946 read_into_cache(ref, lastOffset, lastLeft, NULL, 0); 947 948 out: 949 mutex_unlock(&cache->lock); 950 } 951 952 953 extern "C" void 954 cache_prefetch(mount_id mountID, vnode_id vnodeID, off_t offset, size_t size) 955 { 956 void *vnode; 957 958 // ToDo: schedule prefetch 959 960 TRACE(("cache_prefetch(vnode %ld:%Ld)\n", mountID, vnodeID)); 961 962 // get the vnode for the object, this also grabs a ref to it 963 if (vfs_get_vnode(mountID, vnodeID, &vnode) != B_OK) 964 return; 965 966 cache_prefetch_vnode(vnode, offset, size); 967 vfs_put_vnode(vnode); 968 } 969 970 971 extern "C" void 972 cache_node_opened(void *vnode, int32 fdType, vm_cache_ref *cache, mount_id mountID, 973 vnode_id parentID, vnode_id vnodeID, const char *name) 974 { 975 if (sCacheModule == NULL || sCacheModule->node_opened == NULL) 976 return; 977 978 off_t size = -1; 979 if (cache != NULL) { 980 file_cache_ref *ref = (file_cache_ref *)((vnode_store *)cache->cache->store)->file_cache_ref; 981 if (ref != NULL) 982 size = ref->cache->cache->virtual_size; 983 } 984 985 sCacheModule->node_opened(vnode, fdType, mountID, parentID, vnodeID, name, size); 986 } 987 988 989 extern "C" void 990 cache_node_closed(void *vnode, int32 fdType, vm_cache_ref *cache, 991 mount_id mountID, vnode_id vnodeID) 992 { 993 if (sCacheModule == NULL || sCacheModule->node_closed == NULL) 994 return; 995 996 int32 accessType = 0; 997 if (cache != NULL) { 998 // ToDo: set accessType 999 } 1000 1001 sCacheModule->node_closed(vnode, fdType, mountID, vnodeID, accessType); 1002 } 1003 1004 1005 extern "C" void 1006 cache_node_launched(size_t argCount, char * const *args) 1007 { 1008 if (sCacheModule == NULL || sCacheModule->node_launched == NULL) 1009 return; 1010 1011 sCacheModule->node_launched(argCount, args); 1012 } 1013 1014 1015 extern "C" status_t 1016 file_cache_init_post_boot_device(void) 1017 { 1018 // ToDo: get cache module out of driver settings 1019 1020 if (get_module("file_cache/launch_speedup/v1", (module_info **)&sCacheModule) == B_OK) { 1021 dprintf("** opened launch speedup: %Ld\n", system_time()); 1022 } else 1023 dprintf("** could not open launch speedup!\n"); 1024 1025 return B_OK; 1026 } 1027 1028 1029 extern "C" status_t 1030 file_cache_init(void) 1031 { 1032 register_generic_syscall(CACHE_SYSCALLS, file_cache_control, 1, 0); 1033 return B_OK; 1034 } 1035 1036 1037 // #pragma mark - 1038 // public FS API 1039 1040 1041 extern "C" void * 1042 file_cache_create(mount_id mountID, vnode_id vnodeID, off_t size, int fd) 1043 { 1044 TRACE(("file_cache_create(mountID = %ld, vnodeID = %Ld, size = %Ld, fd = %d)\n", mountID, vnodeID, size, fd)); 1045 1046 file_cache_ref *ref = new file_cache_ref; 1047 if (ref == NULL) 1048 return NULL; 1049 1050 // ToDo: delay vm_cache/vm_cache_ref creation until data is 1051 // requested/written for the first time? Listing lots of 1052 // files in Tracker (and elsewhere) could be slowed down. 1053 // Since the file_cache_ref itself doesn't have a lock, 1054 // we would need to "rent" one during construction, possibly 1055 // the vnode lock, maybe a dedicated one. 1056 // As there shouldn't be too much contention, we could also 1057 // use atomic_test_and_set(), and free the resources again 1058 // when that fails... 1059 1060 // get the vnode of the underlying device 1061 if (vfs_get_vnode_from_fd(fd, true, &ref->device) != B_OK) 1062 goto err1; 1063 1064 // we also need the cookie of the underlying device to properly access it 1065 if (vfs_get_cookie_from_fd(fd, &ref->cookie) != B_OK) 1066 goto err2; 1067 1068 // get the vnode for the object (note, this does not grab a reference to the node) 1069 if (vfs_lookup_vnode(mountID, vnodeID, &ref->vnode) != B_OK) 1070 goto err2; 1071 1072 if (vfs_get_vnode_cache(ref->vnode, &ref->cache, true) != B_OK) 1073 goto err3; 1074 1075 ref->cache->cache->virtual_size = size; 1076 ((vnode_store *)ref->cache->cache->store)->file_cache_ref = ref; 1077 return ref; 1078 1079 err3: 1080 vfs_put_vnode(ref->vnode); 1081 err2: 1082 vfs_put_vnode(ref->device); 1083 err1: 1084 delete ref; 1085 return NULL; 1086 } 1087 1088 1089 extern "C" void 1090 file_cache_delete(void *_cacheRef) 1091 { 1092 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 1093 1094 if (ref == NULL) 1095 return; 1096 1097 TRACE(("file_cache_delete(ref = %p)\n", ref)); 1098 1099 vfs_put_vnode(ref->device); 1100 delete ref; 1101 } 1102 1103 1104 extern "C" status_t 1105 file_cache_set_size(void *_cacheRef, off_t size) 1106 { 1107 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 1108 1109 TRACE(("file_cache_set_size(ref = %p, size = %Ld)\n", ref, size)); 1110 1111 if (ref == NULL) 1112 return B_OK; 1113 1114 file_cache_invalidate_file_map(_cacheRef, 0, size); 1115 // ToDo: make this better (we would only need to extend or shrink the map) 1116 1117 mutex_lock(&ref->cache->lock); 1118 status_t status = vm_cache_resize(ref->cache, size); 1119 mutex_unlock(&ref->cache->lock); 1120 1121 return status; 1122 } 1123 1124 1125 extern "C" status_t 1126 file_cache_sync(void *_cacheRef) 1127 { 1128 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 1129 if (ref == NULL) 1130 return B_BAD_VALUE; 1131 1132 return vm_cache_write_modified(ref->cache); 1133 } 1134 1135 1136 extern "C" status_t 1137 file_cache_read_pages(void *_cacheRef, off_t offset, const iovec *vecs, size_t count, size_t *_numBytes) 1138 { 1139 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 1140 1141 return pages_io(ref, offset, vecs, count, _numBytes, false); 1142 } 1143 1144 1145 extern "C" status_t 1146 file_cache_write_pages(void *_cacheRef, off_t offset, const iovec *vecs, size_t count, size_t *_numBytes) 1147 { 1148 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 1149 1150 status_t status = pages_io(ref, offset, vecs, count, _numBytes, true); 1151 TRACE(("file_cache_write_pages(ref = %p, offset = %Ld, vecs = %p, count = %lu, bytes = %lu) = %ld\n", 1152 ref, offset, vecs, count, *_numBytes, status)); 1153 1154 return status; 1155 } 1156 1157 1158 extern "C" status_t 1159 file_cache_read(void *_cacheRef, off_t offset, void *bufferBase, size_t *_size) 1160 { 1161 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 1162 1163 TRACE(("file_cache_read(ref = %p, offset = %Ld, buffer = %p, size = %lu)\n", 1164 ref, offset, bufferBase, *_size)); 1165 1166 return cache_io(ref, offset, (addr_t)bufferBase, _size, false); 1167 } 1168 1169 1170 extern "C" status_t 1171 file_cache_write(void *_cacheRef, off_t offset, const void *buffer, size_t *_size) 1172 { 1173 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 1174 1175 status_t status = cache_io(ref, offset, (addr_t)const_cast<void *>(buffer), _size, true); 1176 TRACE(("file_cache_write(ref = %p, offset = %Ld, buffer = %p, size = %lu) = %ld\n", 1177 ref, offset, buffer, *_size, status)); 1178 1179 return status; 1180 } 1181 1182 1183 extern "C" status_t 1184 file_cache_invalidate_file_map(void *_cacheRef, off_t offset, off_t size) 1185 { 1186 file_cache_ref *ref = (file_cache_ref *)_cacheRef; 1187 1188 // ToDo: honour offset/size parameters 1189 1190 TRACE(("file_cache_invalidate_file_map(offset = %Ld, size = %Ld)\n", offset, size)); 1191 mutex_lock(&ref->cache->lock); 1192 ref->map.Free(); 1193 mutex_unlock(&ref->cache->lock); 1194 return B_OK; 1195 } 1196