xref: /haiku/src/system/kernel/cache/vnode_store.cpp (revision 4f2fd49bdc6078128b1391191e4edac647044c3d)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include "vnode_store.h"
8 
9 #include <stdlib.h>
10 #include <string.h>
11 
12 #include <file_cache.h>
13 #include <vfs.h>
14 #include <vm.h>
15 
16 #include "io_requests.h"
17 
18 
19 status_t
20 VMVnodeCache::Init(struct vnode *vnode)
21 {
22 	status_t error = VMCache::Init(CACHE_TYPE_VNODE);
23 	if (error != B_OK)
24 		return error;
25 
26 	fVnode = vnode;
27 	fFileCacheRef = NULL;
28 
29 	vfs_vnode_to_node_ref(fVnode, &fDevice, &fInode);
30 
31 	return B_OK;
32 }
33 
34 
35 bool
36 VMVnodeCache::HasPage(off_t offset)
37 {
38 	// We always pretend to have the page - even if it's beyond the size of
39 	// the file. The read function will only cut down the size of the read,
40 	// it won't fail because of that.
41 	return true;
42 }
43 
44 
45 status_t
46 VMVnodeCache::Read(off_t offset, const iovec *vecs, size_t count,
47 	uint32 flags, size_t *_numBytes)
48 {
49 	size_t bytesUntouched = *_numBytes;
50 
51 	status_t status = vfs_read_pages(fVnode, NULL, offset, vecs, count,
52 		flags, _numBytes);
53 
54 	bytesUntouched -= *_numBytes;
55 
56 	// If the request could be filled completely, or an error occured,
57 	// we're done here
58 	if (status < B_OK || bytesUntouched == 0)
59 		return status;
60 
61 	// Clear out any leftovers that were not touched by the above read - we're
62 	// doing this here so that not every file system/device has to implement
63 	// this
64 	for (int32 i = count; i-- > 0 && bytesUntouched != 0;) {
65 		size_t length = min_c(bytesUntouched, vecs[i].iov_len);
66 
67 		addr_t address = (addr_t)vecs[i].iov_base + vecs[i].iov_len - length;
68 		if ((flags & B_PHYSICAL_IO_REQUEST) != 0)
69 			vm_memset_physical(address, 0, length);
70 		else
71 			memset((void*)address, 0, length);
72 
73 		bytesUntouched -= length;
74 	}
75 
76 	return B_OK;
77 }
78 
79 
80 status_t
81 VMVnodeCache::Write(off_t offset, const iovec *vecs, size_t count,
82 	uint32 flags, size_t *_numBytes)
83 {
84 	return vfs_write_pages(fVnode, NULL, offset, vecs, count, flags, _numBytes);
85 }
86 
87 
88 status_t
89 VMVnodeCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
90 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
91 {
92 	return vfs_asynchronous_write_pages(fVnode, NULL, offset, vecs, count,
93 		numBytes, flags, callback);
94 }
95 
96 
97 status_t
98 VMVnodeCache::Fault(struct vm_address_space *aspace, off_t offset)
99 {
100 	return B_BAD_HANDLER;
101 }
102 
103 
104 status_t
105 VMVnodeCache::AcquireUnreferencedStoreRef()
106 {
107 	struct vnode *vnode;
108 	status_t status = vfs_get_vnode(fDevice, fInode, false, &vnode);
109 
110 	// If successful, update the store's vnode pointer, so that release_ref()
111 	// won't use a stale pointer.
112 	if (status == B_OK)
113 		fVnode = vnode;
114 
115 	return status;
116 }
117 
118 
119 void
120 VMVnodeCache::AcquireStoreRef()
121 {
122 	vfs_acquire_vnode(fVnode);
123 }
124 
125 
126 void
127 VMVnodeCache::ReleaseStoreRef()
128 {
129 	vfs_put_vnode(fVnode);
130 }
131 
132