1 /* 2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 #include "vnode_store.h" 8 9 #include <stdlib.h> 10 #include <string.h> 11 12 #include <file_cache.h> 13 #include <slab/Slab.h> 14 #include <vfs.h> 15 #include <vm/vm.h> 16 17 #include "IORequest.h" 18 19 20 status_t 21 VMVnodeCache::Init(struct vnode* vnode, uint32 allocationFlags) 22 { 23 status_t error = VMCache::Init(CACHE_TYPE_VNODE, allocationFlags); 24 if (error != B_OK) 25 return error; 26 27 fVnode = vnode; 28 fFileCacheRef = NULL; 29 fVnodeDeleted = false; 30 31 vfs_vnode_to_node_ref(fVnode, &fDevice, &fInode); 32 33 return B_OK; 34 } 35 36 37 bool 38 VMVnodeCache::HasPage(off_t offset) 39 { 40 return ROUNDUP(offset, B_PAGE_SIZE) >= virtual_base 41 && offset < virtual_end; 42 } 43 44 45 status_t 46 VMVnodeCache::Read(off_t offset, const generic_io_vec* vecs, size_t count, 47 uint32 flags, generic_size_t* _numBytes) 48 { 49 generic_size_t bytesUntouched = *_numBytes; 50 51 status_t status = vfs_read_pages(fVnode, NULL, offset, vecs, count, 52 flags, _numBytes); 53 54 generic_size_t bytesEnd = *_numBytes; 55 56 if (offset + (off_t)bytesEnd > virtual_end) 57 bytesEnd = virtual_end - offset; 58 59 // If the request could be filled completely, or an error occured, 60 // we're done here 61 if (status != B_OK || bytesUntouched == bytesEnd) 62 return status; 63 64 bytesUntouched -= bytesEnd; 65 66 // Clear out any leftovers that were not touched by the above read - we're 67 // doing this here so that not every file system/device has to implement 68 // this 69 for (int32 i = count; i-- > 0 && bytesUntouched != 0;) { 70 generic_size_t length = min_c(bytesUntouched, vecs[i].length); 71 72 generic_addr_t address = vecs[i].base + vecs[i].length - length; 73 if ((flags & B_PHYSICAL_IO_REQUEST) != 0) 74 vm_memset_physical(address, 0, length); 75 else 76 memset((void*)(addr_t)address, 0, length); 77 78 bytesUntouched -= length; 79 } 80 81 return B_OK; 82 } 83 84 85 status_t 86 VMVnodeCache::Write(off_t offset, const generic_io_vec* vecs, size_t count, 87 uint32 flags, generic_size_t* _numBytes) 88 { 89 return vfs_write_pages(fVnode, NULL, offset, vecs, count, flags, _numBytes); 90 } 91 92 93 status_t 94 VMVnodeCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count, 95 generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback) 96 { 97 return vfs_asynchronous_write_pages(fVnode, NULL, offset, vecs, count, 98 numBytes, flags, callback); 99 } 100 101 102 status_t 103 VMVnodeCache::Fault(struct VMAddressSpace* aspace, off_t offset) 104 { 105 if (!HasPage(offset)) 106 return B_BAD_ADDRESS; 107 108 // vm_soft_fault() reads the page in. 109 return B_BAD_HANDLER; 110 } 111 112 113 bool 114 VMVnodeCache::CanWritePage(off_t offset) 115 { 116 // all pages can be written 117 return true; 118 } 119 120 121 status_t 122 VMVnodeCache::AcquireUnreferencedStoreRef() 123 { 124 // Quick check whether getting a vnode reference is still allowed. Only 125 // after a successful vfs_get_vnode() the check is safe (since then we've 126 // either got the reference to our vnode, or have been notified that it is 127 // toast), but the check is cheap and saves quite a bit of work in case the 128 // condition holds. 129 if (fVnodeDeleted) 130 return B_BUSY; 131 132 struct vnode* vnode; 133 status_t status = vfs_get_vnode(fDevice, fInode, false, &vnode); 134 135 // If successful, update the store's vnode pointer, so that release_ref() 136 // won't use a stale pointer. 137 if (status == B_OK && fVnodeDeleted) { 138 vfs_put_vnode(vnode); 139 status = B_BUSY; 140 } 141 142 return status; 143 } 144 145 146 void 147 VMVnodeCache::AcquireStoreRef() 148 { 149 vfs_acquire_vnode(fVnode); 150 } 151 152 153 void 154 VMVnodeCache::ReleaseStoreRef() 155 { 156 vfs_put_vnode(fVnode); 157 } 158 159 160 void 161 VMVnodeCache::Dump(bool showPages) const 162 { 163 VMCache::Dump(showPages); 164 165 kprintf(" vnode: %p <%" B_PRIdDEV ", %" B_PRIdINO ">\n", fVnode, 166 fDevice, fInode); 167 } 168 169 170 void 171 VMVnodeCache::DeleteObject() 172 { 173 object_cache_delete(gVnodeCacheObjectCache, this); 174 } 175