xref: /haiku/src/system/kernel/cache/vnode_store.cpp (revision 97901ec593ec4dd50ac115c1c35a6d72f6e489a5)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include "vnode_store.h"
8 
9 #include <stdlib.h>
10 #include <string.h>
11 
12 #include <file_cache.h>
13 #include <vfs.h>
14 #include <vm/vm.h>
15 
16 #include "IORequest.h"
17 
18 
19 status_t
20 VMVnodeCache::Init(struct vnode *vnode, uint32 allocationFlags)
21 {
22 	status_t error = VMCache::Init(CACHE_TYPE_VNODE, allocationFlags);
23 	if (error != B_OK)
24 		return error;
25 
26 	fVnode = vnode;
27 	fFileCacheRef = NULL;
28 	fVnodeDeleted = false;
29 
30 	vfs_vnode_to_node_ref(fVnode, &fDevice, &fInode);
31 
32 	return B_OK;
33 }
34 
35 
36 bool
37 VMVnodeCache::HasPage(off_t offset)
38 {
39 	return ROUNDUP(offset, B_PAGE_SIZE) >= virtual_base
40 		&& offset < virtual_end;
41 }
42 
43 
44 status_t
45 VMVnodeCache::Read(off_t offset, const iovec *vecs, size_t count,
46 	uint32 flags, size_t *_numBytes)
47 {
48 	size_t bytesUntouched = *_numBytes;
49 
50 	status_t status = vfs_read_pages(fVnode, NULL, offset, vecs, count,
51 		flags, _numBytes);
52 
53 	size_t bytesEnd = *_numBytes;
54 
55 	if (offset + bytesEnd > virtual_end)
56 		bytesEnd = virtual_end - offset;
57 
58 	// If the request could be filled completely, or an error occured,
59 	// we're done here
60 	if (status != B_OK || bytesUntouched == bytesEnd)
61 		return status;
62 
63 	bytesUntouched -= bytesEnd;
64 
65 	// Clear out any leftovers that were not touched by the above read - we're
66 	// doing this here so that not every file system/device has to implement
67 	// this
68 	for (int32 i = count; i-- > 0 && bytesUntouched != 0;) {
69 		size_t length = min_c(bytesUntouched, vecs[i].iov_len);
70 
71 		addr_t address = (addr_t)vecs[i].iov_base + vecs[i].iov_len - length;
72 		if ((flags & B_PHYSICAL_IO_REQUEST) != 0)
73 			vm_memset_physical(address, 0, length);
74 		else
75 			memset((void*)address, 0, length);
76 
77 		bytesUntouched -= length;
78 	}
79 
80 	return B_OK;
81 }
82 
83 
84 status_t
85 VMVnodeCache::Write(off_t offset, const iovec *vecs, size_t count,
86 	uint32 flags, size_t *_numBytes)
87 {
88 	return vfs_write_pages(fVnode, NULL, offset, vecs, count, flags, _numBytes);
89 }
90 
91 
92 status_t
93 VMVnodeCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
94 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
95 {
96 	return vfs_asynchronous_write_pages(fVnode, NULL, offset, vecs, count,
97 		numBytes, flags, callback);
98 }
99 
100 
101 status_t
102 VMVnodeCache::Fault(struct VMAddressSpace *aspace, off_t offset)
103 {
104 	if (!HasPage(offset))
105 		return B_BAD_ADDRESS;
106 
107 	// vm_soft_fault() reads the page in.
108 	return B_BAD_HANDLER;
109 }
110 
111 
112 bool
113 VMVnodeCache::CanWritePage(off_t offset)
114 {
115 	// all pages can be written
116 	return true;
117 }
118 
119 
120 status_t
121 VMVnodeCache::AcquireUnreferencedStoreRef()
122 {
123 	// Quick check whether getting a vnode reference is still allowed. Only
124 	// after a successful vfs_get_vnode() the check is safe (since then we've
125 	// either got the reference to our vnode, or have been notified that it is
126 	// toast), but the check is cheap and saves quite a bit of work in case the
127 	// condition holds.
128 	if (fVnodeDeleted)
129 		return B_BUSY;
130 
131 	struct vnode *vnode;
132 	status_t status = vfs_get_vnode(fDevice, fInode, false, &vnode);
133 
134 	// If successful, update the store's vnode pointer, so that release_ref()
135 	// won't use a stale pointer.
136 	if (status == B_OK && fVnodeDeleted) {
137 		vfs_put_vnode(vnode);
138 		status = B_BUSY;
139 	}
140 
141 	return status;
142 }
143 
144 
145 void
146 VMVnodeCache::AcquireStoreRef()
147 {
148 	vfs_acquire_vnode(fVnode);
149 }
150 
151 
152 void
153 VMVnodeCache::ReleaseStoreRef()
154 {
155 	vfs_put_vnode(fVnode);
156 }
157 
158