xref: /haiku/src/system/kernel/cache/vnode_store.cpp (revision b671e9bbdbd10268a042b4f4cc4317ccd03d105e)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include "vnode_store.h"
8 
9 #include <stdlib.h>
10 #include <string.h>
11 
12 #include <file_cache.h>
13 #include <vfs.h>
14 #include <vm.h>
15 
16 #include "IORequest.h"
17 
18 
19 status_t
20 VMVnodeCache::Init(struct vnode *vnode)
21 {
22 	status_t error = VMCache::Init(CACHE_TYPE_VNODE);
23 	if (error != B_OK)
24 		return error;
25 
26 	fVnode = vnode;
27 	fFileCacheRef = NULL;
28 
29 	vfs_vnode_to_node_ref(fVnode, &fDevice, &fInode);
30 
31 	return B_OK;
32 }
33 
34 
35 bool
36 VMVnodeCache::HasPage(off_t offset)
37 {
38 	// We always pretend to have the page - even if it's beyond the size of
39 	// the file. The read function will only cut down the size of the read,
40 	// it won't fail because of that.
41 	return true;
42 }
43 
44 
45 status_t
46 VMVnodeCache::Read(off_t offset, const iovec *vecs, size_t count,
47 	uint32 flags, size_t *_numBytes)
48 {
49 	size_t bytesUntouched = *_numBytes;
50 
51 	status_t status = vfs_read_pages(fVnode, NULL, offset, vecs, count,
52 		flags, _numBytes);
53 
54 	size_t bytesEnd = *_numBytes;
55 
56 	if (offset + bytesEnd > virtual_end)
57 		bytesEnd = virtual_end - offset;
58 
59 	// If the request could be filled completely, or an error occured,
60 	// we're done here
61 	if (status != B_OK || bytesUntouched == bytesEnd)
62 		return status;
63 
64 	bytesUntouched -= bytesEnd;
65 
66 	// Clear out any leftovers that were not touched by the above read - we're
67 	// doing this here so that not every file system/device has to implement
68 	// this
69 	for (int32 i = count; i-- > 0 && bytesUntouched != 0;) {
70 		size_t length = min_c(bytesUntouched, vecs[i].iov_len);
71 
72 		addr_t address = (addr_t)vecs[i].iov_base + vecs[i].iov_len - length;
73 		if ((flags & B_PHYSICAL_IO_REQUEST) != 0)
74 			vm_memset_physical(address, 0, length);
75 		else
76 			memset((void*)address, 0, length);
77 
78 		bytesUntouched -= length;
79 	}
80 
81 	return B_OK;
82 }
83 
84 
85 status_t
86 VMVnodeCache::Write(off_t offset, const iovec *vecs, size_t count,
87 	uint32 flags, size_t *_numBytes)
88 {
89 	return vfs_write_pages(fVnode, NULL, offset, vecs, count, flags, _numBytes);
90 }
91 
92 
93 status_t
94 VMVnodeCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
95 	size_t numBytes, uint32 flags, AsyncIOCallback* callback)
96 {
97 	return vfs_asynchronous_write_pages(fVnode, NULL, offset, vecs, count,
98 		numBytes, flags, callback);
99 }
100 
101 
102 status_t
103 VMVnodeCache::Fault(struct vm_address_space *aspace, off_t offset)
104 {
105 	return B_BAD_HANDLER;
106 }
107 
108 
109 bool
110 VMVnodeCache::CanWritePage(off_t offset)
111 {
112 	// all pages can be written
113 	return true;
114 }
115 
116 
117 status_t
118 VMVnodeCache::AcquireUnreferencedStoreRef()
119 {
120 	struct vnode *vnode;
121 	status_t status = vfs_get_vnode(fDevice, fInode, false, &vnode);
122 
123 	// If successful, update the store's vnode pointer, so that release_ref()
124 	// won't use a stale pointer.
125 	if (status == B_OK)
126 		fVnode = vnode;
127 
128 	return status;
129 }
130 
131 
132 void
133 VMVnodeCache::AcquireStoreRef()
134 {
135 	vfs_acquire_vnode(fVnode);
136 }
137 
138 
139 void
140 VMVnodeCache::ReleaseStoreRef()
141 {
142 	vfs_put_vnode(fVnode);
143 }
144 
145