xref: /haiku/src/system/kernel/cache/vnode_store.cpp (revision 91054f1d38dd7827c0f0ba9490c213775ec7b471)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include "vnode_store.h"
8 
9 #include <stdlib.h>
10 #include <string.h>
11 
12 #include <file_cache.h>
13 #include <vfs.h>
14 #include <vm.h>
15 
16 #include "IORequest.h"
17 
18 
19 status_t
20 VMVnodeCache::Init(struct vnode *vnode)
21 {
22 	status_t error = VMCache::Init(CACHE_TYPE_VNODE);
23 	if (error != B_OK)
24 		return error;
25 
26 	fVnode = vnode;
27 	fFileCacheRef = NULL;
28 
29 	vfs_vnode_to_node_ref(fVnode, &fDevice, &fInode);
30 
31 	return B_OK;
32 }
33 
34 
35 bool
36 VMVnodeCache::HasPage(off_t offset)
37 {
38 	// We always pretend to have the page - even if it's beyond the size of
39 	// the file. The read function will only cut down the size of the read,
40 	// it won't fail because of that.
41 	return true;
42 }
43 
44 
45 status_t
46 VMVnodeCache::Read(off_t offset, const iovec *vecs, size_t count,
47 	uint32 flags, size_t *_numBytes)
48 {
49 	size_t bytesUntouched = *_numBytes;
50 
51 	status_t status = vfs_read_pages(fVnode, NULL, offset, vecs, count,
52 		flags, _numBytes);
53 
54 	bytesUntouched -= *_numBytes;
55 
56 	// If the request could be filled completely, or an error occured,
57 	// we're done here
58 	if (status < B_OK || bytesUntouched == 0)
59 		return status;
60 
61 	// Clear out any leftovers that were not touched by the above read - we're
62 	// doing this here so that not every file system/device has to implement
63 	// this
64 	for (int32 i = count; i-- > 0 && bytesUntouched != 0;) {
65 		size_t length = min_c(bytesUntouched, vecs[i].iov_len);
66 
67 		addr_t address = (addr_t)vecs[i].iov_base + vecs[i].iov_len - length;
68 		if ((flags & B_PHYSICAL_IO_REQUEST) != 0)
69 			vm_memset_physical(address, 0, length);
70 		else
71 			memset((void*)address, 0, length);
72 
73 		bytesUntouched -= length;
74 	}
75 
76 	return B_OK;
77 }
78 
79 
80 status_t
81 VMVnodeCache::Write(off_t offset, const iovec *vecs, size_t count,
82 	uint32 flags, size_t *_numBytes)
83 {
84 	return vfs_write_pages(fVnode, NULL, offset, vecs, count, flags, _numBytes);
85 }
86 
87 
88 status_t
89 VMVnodeCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
90 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
91 {
92 	return vfs_asynchronous_write_pages(fVnode, NULL, offset, vecs, count,
93 		numBytes, flags, callback);
94 }
95 
96 
97 status_t
98 VMVnodeCache::Fault(struct vm_address_space *aspace, off_t offset)
99 {
100 	return B_BAD_HANDLER;
101 }
102 
103 
104 bool
105 VMVnodeCache::CanWritePage(off_t offset)
106 {
107 	// all pages can be written
108 	return true;
109 }
110 
111 
112 status_t
113 VMVnodeCache::AcquireUnreferencedStoreRef()
114 {
115 	struct vnode *vnode;
116 	status_t status = vfs_get_vnode(fDevice, fInode, false, &vnode);
117 
118 	// If successful, update the store's vnode pointer, so that release_ref()
119 	// won't use a stale pointer.
120 	if (status == B_OK)
121 		fVnode = vnode;
122 
123 	return status;
124 }
125 
126 
127 void
128 VMVnodeCache::AcquireStoreRef()
129 {
130 	vfs_acquire_vnode(fVnode);
131 }
132 
133 
134 void
135 VMVnodeCache::ReleaseStoreRef()
136 {
137 	vfs_put_vnode(fVnode);
138 }
139 
140