xref: /haiku/src/system/kernel/cache/vnode_store.cpp (revision b289aaf66bbf6e173aa90fa194fc256965f1b34d)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include "vnode_store.h"
8 
9 #include <stdlib.h>
10 #include <string.h>
11 
12 #include <file_cache.h>
13 #include <vfs.h>
14 #include <vm/vm.h>
15 
16 #include "IORequest.h"
17 
18 
19 status_t
20 VMVnodeCache::Init(struct vnode *vnode, uint32 allocationFlags)
21 {
22 	status_t error = VMCache::Init(CACHE_TYPE_VNODE, allocationFlags);
23 	if (error != B_OK)
24 		return error;
25 
26 	fVnode = vnode;
27 	fFileCacheRef = NULL;
28 	fVnodeDeleted = false;
29 
30 	vfs_vnode_to_node_ref(fVnode, &fDevice, &fInode);
31 
32 	return B_OK;
33 }
34 
35 
36 bool
37 VMVnodeCache::HasPage(off_t offset)
38 {
39 	// We always pretend to have the page - even if it's beyond the size of
40 	// the file. The read function will only cut down the size of the read,
41 	// it won't fail because of that.
42 	return true;
43 }
44 
45 
46 status_t
47 VMVnodeCache::Read(off_t offset, const iovec *vecs, size_t count,
48 	uint32 flags, size_t *_numBytes)
49 {
50 	size_t bytesUntouched = *_numBytes;
51 
52 	status_t status = vfs_read_pages(fVnode, NULL, offset, vecs, count,
53 		flags, _numBytes);
54 
55 	size_t bytesEnd = *_numBytes;
56 
57 	if (offset + bytesEnd > virtual_end)
58 		bytesEnd = virtual_end - offset;
59 
60 	// If the request could be filled completely, or an error occured,
61 	// we're done here
62 	if (status != B_OK || bytesUntouched == bytesEnd)
63 		return status;
64 
65 	bytesUntouched -= bytesEnd;
66 
67 	// Clear out any leftovers that were not touched by the above read - we're
68 	// doing this here so that not every file system/device has to implement
69 	// this
70 	for (int32 i = count; i-- > 0 && bytesUntouched != 0;) {
71 		size_t length = min_c(bytesUntouched, vecs[i].iov_len);
72 
73 		addr_t address = (addr_t)vecs[i].iov_base + vecs[i].iov_len - length;
74 		if ((flags & B_PHYSICAL_IO_REQUEST) != 0)
75 			vm_memset_physical(address, 0, length);
76 		else
77 			memset((void*)address, 0, length);
78 
79 		bytesUntouched -= length;
80 	}
81 
82 	return B_OK;
83 }
84 
85 
86 status_t
87 VMVnodeCache::Write(off_t offset, const iovec *vecs, size_t count,
88 	uint32 flags, size_t *_numBytes)
89 {
90 	return vfs_write_pages(fVnode, NULL, offset, vecs, count, flags, _numBytes);
91 }
92 
93 
94 status_t
95 VMVnodeCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
96 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
97 {
98 	return vfs_asynchronous_write_pages(fVnode, NULL, offset, vecs, count,
99 		numBytes, flags, callback);
100 }
101 
102 
103 status_t
104 VMVnodeCache::Fault(struct VMAddressSpace *aspace, off_t offset)
105 {
106 	return B_BAD_HANDLER;
107 }
108 
109 
110 bool
111 VMVnodeCache::CanWritePage(off_t offset)
112 {
113 	// all pages can be written
114 	return true;
115 }
116 
117 
118 status_t
119 VMVnodeCache::AcquireUnreferencedStoreRef()
120 {
121 	// Quick check whether getting a vnode reference is still allowed. Only
122 	// after a successful vfs_get_vnode() the check is safe (since then we've
123 	// either got the reference to our vnode, or have been notified that it is
124 	// toast), but the check is cheap and saves quite a bit of work in case the
125 	// condition holds.
126 	if (fVnodeDeleted)
127 		return B_BUSY;
128 
129 	struct vnode *vnode;
130 	status_t status = vfs_get_vnode(fDevice, fInode, false, &vnode);
131 
132 	// If successful, update the store's vnode pointer, so that release_ref()
133 	// won't use a stale pointer.
134 	if (status == B_OK && fVnodeDeleted) {
135 		vfs_put_vnode(vnode);
136 		status = B_BUSY;
137 	}
138 
139 	return status;
140 }
141 
142 
143 void
144 VMVnodeCache::AcquireStoreRef()
145 {
146 	vfs_acquire_vnode(fVnode);
147 }
148 
149 
150 void
151 VMVnodeCache::ReleaseStoreRef()
152 {
153 	vfs_put_vnode(fVnode);
154 }
155 
156