xref: /haiku/src/add-ons/kernel/file_systems/bfs/Volume.h (revision 9eb55bc1d104b8fda80898f8b25c94d8000c8255)
1 #ifndef VOLUME_H
2 #define VOLUME_H
3 /* Volume - BFS super block, mounting, etc.
4 **
5 ** Initial version by Axel Dörfler, axeld@pinc-software.de
6 ** This file may be used under the terms of the OpenBeOS License.
7 */
8 
9 
10 #include <KernelExport.h>
11 
12 extern "C" {
13 	#ifndef _IMPEXP_KERNEL
14 	#	define _IMPEXP_KERNEL
15 	#endif
16 	#include "fsproto.h"
17 	#include "lock.h"
18 	#include "cache.h"
19 }
20 
21 #include "bfs.h"
22 #include "BlockAllocator.h"
23 #include "BufferPool.h"
24 #include "Chain.h"
25 
26 class Journal;
27 class Inode;
28 class Query;
29 
30 enum volume_flags {
31 	VOLUME_READ_ONLY	= 0x0001
32 };
33 
34 enum volume_initialize_flags {
35 	VOLUME_NO_INDICES	= 0x0001,
36 };
37 
38 class Volume {
39 	public:
40 		Volume(nspace_id id);
41 		~Volume();
42 
43 		status_t			Mount(const char *device, uint32 flags);
44 		status_t			Unmount();
45 		status_t			Initialize(const char *device, const char *name,
46 								uint32 blockSize, uint32 flags);
47 
48 		bool				IsValidSuperBlock();
49 		bool				IsReadOnly() const;
50 		void				Panic();
51 		RecursiveLock		&Lock();
52 
53 		block_run			Root() const { return fSuperBlock.root_dir; }
54 		Inode				*RootNode() const { return fRootNode; }
55 		block_run			Indices() const { return fSuperBlock.indices; }
56 		Inode				*IndicesNode() const { return fIndicesNode; }
57 		block_run			Log() const { return fSuperBlock.log_blocks; }
58 		vint32				&LogStart() { return fLogStart; }
59 		vint32				&LogEnd() { return fLogEnd; }
60 		int					Device() const { return fDevice; }
61 
62 		nspace_id			ID() const { return fID; }
63 		const char			*Name() const { return fSuperBlock.name; }
64 
65 		off_t				NumBlocks() const { return fSuperBlock.NumBlocks(); }
66 		off_t				UsedBlocks() const { return fSuperBlock.UsedBlocks(); }
67 		off_t				FreeBlocks() const { return NumBlocks() - UsedBlocks(); }
68 
69 		uint32				BlockSize() const { return fBlockSize; }
70 		uint32				BlockShift() const { return fBlockShift; }
71 		uint32				InodeSize() const { return fSuperBlock.InodeSize(); }
72 		uint32				AllocationGroups() const { return fSuperBlock.AllocationGroups(); }
73 		uint32				AllocationGroupShift() const { return fAllocationGroupShift; }
74 		disk_super_block	&SuperBlock() { return fSuperBlock; }
75 
76 		off_t				ToOffset(block_run run) const { return ToBlock(run) << BlockShift(); }
77 		off_t				ToBlock(block_run run) const { return ((((off_t)run.AllocationGroup()) << AllocationGroupShift()) | (off_t)run.Start()); }
78 		block_run			ToBlockRun(off_t block) const;
79 		status_t			ValidateBlockRun(block_run run);
80 
81 		off_t				ToVnode(block_run run) const { return ToBlock(run); }
82 		off_t				ToVnode(off_t block) const { return block; }
83 		off_t				VnodeToBlock(vnode_id id) const { return (off_t)id; }
84 
85 		status_t			CreateIndicesRoot(Transaction *transaction);
86 
87 		// block bitmap
88 		BlockAllocator		&Allocator();
89 		status_t			AllocateForInode(Transaction *transaction, const Inode *parent,
90 								mode_t type, block_run &run);
91 		status_t			AllocateForInode(Transaction *transaction, const block_run *parent,
92 								mode_t type, block_run &run);
93 		status_t			Allocate(Transaction *transaction,const Inode *inode,
94 								off_t numBlocks, block_run &run, uint16 minimum = 1);
95 		status_t			Free(Transaction *transaction, block_run run);
96 
97 		// cache access
98 		status_t			WriteSuperBlock();
99 		status_t			WriteBlocks(off_t blockNumber, const uint8 *block, uint32 numBlocks);
100 		void				WriteCachedBlocksIfNecessary();
101 		status_t			FlushDevice();
102 
103 		// queries
104 		void				UpdateLiveQueries(Inode *inode, const char *attribute, int32 type,
105 								const uint8 *oldKey, size_t oldLength,
106 								const uint8 *newKey, size_t newLength);
107 		bool				CheckForLiveQuery(const char *attribute);
108 		void				AddQuery(Query *query);
109 		void				RemoveQuery(Query *query);
110 
111 		status_t			Sync();
112 		Journal				*GetJournal(off_t refBlock) const;
113 
114 		BufferPool			&Pool();
115 
116 		uint32				GetUniqueID();
117 
118 	protected:
119 		nspace_id			fID;
120 		int					fDevice;
121 		disk_super_block	fSuperBlock;
122 
123 		uint32				fBlockSize;
124 		uint32				fBlockShift;
125 		uint32				fAllocationGroupShift;
126 
127 		BlockAllocator		fBlockAllocator;
128 		RecursiveLock		fLock;
129 		Journal				*fJournal;
130 		vint32				fLogStart, fLogEnd;
131 
132 		Inode				*fRootNode;
133 		Inode				*fIndicesNode;
134 
135 		vint32				fDirtyCachedBlocks;
136 
137 		SimpleLock			fQueryLock;
138 		Chain<Query>		fQueries;
139 
140 		int32				fUniqueID;
141 		uint32				fFlags;
142 
143 		BufferPool			fBufferPool;
144 };
145 
146 
147 // inline functions
148 
149 inline bool
150 Volume::IsReadOnly() const
151 {
152 	 return fFlags & VOLUME_READ_ONLY;
153 }
154 
155 
156 inline RecursiveLock &
157 Volume::Lock()
158 {
159 	 return fLock;
160 }
161 
162 
163 inline BlockAllocator &
164 Volume::Allocator()
165 {
166 	 return fBlockAllocator;
167 }
168 
169 
170 inline status_t
171 Volume::AllocateForInode(Transaction *transaction, const block_run *parent, mode_t type, block_run &run)
172 {
173 	return fBlockAllocator.AllocateForInode(transaction, parent, type, run);
174 }
175 
176 
177 inline status_t
178 Volume::Allocate(Transaction *transaction, const Inode *inode, off_t numBlocks, block_run &run, uint16 minimum)
179 {
180 	return fBlockAllocator.Allocate(transaction, inode, numBlocks, run, minimum);
181 }
182 
183 
184 inline status_t
185 Volume::Free(Transaction *transaction, block_run run)
186 {
187 	return fBlockAllocator.Free(transaction, run);
188 }
189 
190 
191 inline status_t
192 Volume::WriteBlocks(off_t blockNumber, const uint8 *block, uint32 numBlocks)
193 {
194 	atomic_add(&fDirtyCachedBlocks, numBlocks);
195 	return cached_write(fDevice, blockNumber, block, numBlocks, fSuperBlock.block_size);
196 }
197 
198 
199 inline void
200 Volume::WriteCachedBlocksIfNecessary()
201 {
202 	// the specific values are only valid for the current BeOS cache
203 	if (fDirtyCachedBlocks > 128) {
204 		force_cache_flush(fDevice, false);
205 		atomic_add(&fDirtyCachedBlocks, -64);
206 	}
207 }
208 
209 
210 inline status_t
211 Volume::FlushDevice()
212 {
213 	fDirtyCachedBlocks = 0;
214 	return flush_device(fDevice, 0);
215 }
216 
217 
218 inline Journal *
219 Volume::GetJournal(off_t /*refBlock*/) const
220 {
221 	 return fJournal;
222 }
223 
224 
225 inline BufferPool &
226 Volume::Pool()
227 {
228 	 return fBufferPool;
229 }
230 
231 
232 inline uint32
233 Volume::GetUniqueID()
234 {
235 	 return atomic_add(&fUniqueID, 1);
236 }
237 
238 #endif	/* VOLUME_H */
239