xref: /haiku/src/add-ons/kernel/file_systems/bfs/Journal.cpp (revision 1d9d47fc72028bb71b5f232a877231e59cfe2438)
1 /* Journal - transaction and logging
2  *
3  * Copyright 2001-2005, Axel Dörfler, axeld@pinc-software.de.
4  * This file may be used under the terms of the MIT License.
5  */
6 
7 
8 #include "Journal.h"
9 #include "Inode.h"
10 #include "Debug.h"
11 
12 #include <Drivers.h>
13 #include <util/kernel_cpp.h>
14 #include <util/Stack.h>
15 #include <errno.h>
16 
17 
18 struct run_array {
19 	int32		count;
20 	union {
21 		int32	max_runs;
22 		int32	block_count;
23 	};
24 	block_run	runs[0];
25 
26 	int32 CountRuns() const { return BFS_ENDIAN_TO_HOST_INT32(count); }
27 	int32 MaxRuns() const { return BFS_ENDIAN_TO_HOST_INT32(max_runs) - 1; }
28 		// that -1 accounts for an off-by-one error in Be's BFS implementation
29 	const block_run &RunAt(int32 i) const { return runs[i]; }
30 
31 	static int32 MaxRuns(int32 blockSize)
32 		{ return (blockSize - sizeof(run_array)) / sizeof(block_run); }
33 };
34 
35 class RunArrays {
36 	public:
37 		RunArrays(Journal *journal);
38 		~RunArrays();
39 
40 		uint32 Length() const { return fLength; }
41 
42 		status_t Insert(off_t blockNumber);
43 
44 		run_array *ArrayAt(int32 i) { return fArrays.Array()[i]; }
45 		int32 CountArrays() const { return fArrays.CountItems(); }
46 
47 		int32 MaxArrayLength();
48 		void PrepareForWriting();
49 
50 	private:
51 		status_t _AddArray();
52 		bool _ContainsRun(block_run &run);
53 		bool _AddRun(block_run &run);
54 
55 		Journal		*fJournal;
56 		uint32		fLength;
57 		Stack<run_array *> fArrays;
58 		run_array	*fLastArray;
59 };
60 
61 class LogEntry : public DoublyLinkedListLinkImpl<LogEntry> {
62 	public:
63 		LogEntry(Journal *journal, uint32 logStart, uint32 length);
64 		~LogEntry();
65 
66 		uint32 Start() const { return fStart; }
67 		uint32 Length() const { return fLength; }
68 
69 		Journal *GetJournal() { return fJournal; }
70 
71 	private:
72 		Journal		*fJournal;
73 		uint32		fStart;
74 		uint32		fLength;
75 };
76 
77 
78 //	#pragma mark -
79 
80 
81 static void
82 add_to_iovec(iovec *vecs, int32 &index, int32 max, const void *address, size_t size)
83 {
84 	if (index > 0
85 		&& (addr_t)vecs[index - 1].iov_base + vecs[index - 1].iov_len == (addr_t)address) {
86 		// the iovec can be combined with the previous one
87 		vecs[index - 1].iov_len += size;
88 		return;
89 	}
90 
91 	if (index == max)
92 		panic("no more space for iovecs!");
93 
94 	// we need to start a new iovec
95 	vecs[index].iov_base = const_cast<void *>(address);
96 	vecs[index].iov_len = size;
97 	index++;
98 }
99 
100 
101 //	#pragma mark -
102 
103 
104 LogEntry::LogEntry(Journal *journal, uint32 start, uint32 length)
105 	:
106 	fJournal(journal),
107 	fStart(start),
108 	fLength(length)
109 {
110 }
111 
112 
113 LogEntry::~LogEntry()
114 {
115 }
116 
117 
118 //	#pragma mark -
119 
120 
121 RunArrays::RunArrays(Journal *journal)
122 	:
123 	fJournal(journal),
124 	fLength(0),
125 	fArrays(),
126 	fLastArray(NULL)
127 {
128 }
129 
130 
131 RunArrays::~RunArrays()
132 {
133 	run_array *array;
134 	while (fArrays.Pop(&array))
135 		free(array);
136 }
137 
138 
139 bool
140 RunArrays::_ContainsRun(block_run &run)
141 {
142 	for (int32 i = 0; i < CountArrays(); i++) {
143 		run_array *array = ArrayAt(i);
144 
145 		for (int32 j = 0; j < array->CountRuns(); j++) {
146 			block_run &arrayRun = array->runs[j];
147 			if (run.AllocationGroup() != arrayRun.AllocationGroup())
148 				continue;
149 
150 			if (run.Start() >= arrayRun.Start()
151 				&& run.Start() + run.Length() <= arrayRun.Start() + arrayRun.Length())
152 				return true;
153 		}
154 	}
155 
156 	return false;
157 }
158 
159 
160 /**	Adds the specified block_run into the array.
161  *	Note: it doesn't support overlapping - it must only be used
162  *	with block_runs of length 1!
163  */
164 
165 bool
166 RunArrays::_AddRun(block_run &run)
167 {
168 	ASSERT(run.length == 1);
169 
170 	// Be's BFS log replay routine can only deal with block_runs of size 1
171 	// A pity, isn't it? Too sad we have to be compatible.
172 #if 0
173 	// search for an existing adjacent block_run
174 	// ToDo: this could be improved by sorting and a binary search
175 
176 	for (int32 i = 0; i < CountArrays(); i++) {
177 		run_array *array = ArrayAt(i);
178 
179 		for (int32 j = 0; j < array->CountRuns(); j++) {
180 			block_run &arrayRun = array->runs[j];
181 			if (run.AllocationGroup() != arrayRun.AllocationGroup())
182 				continue;
183 
184 			if (run.Start() == arrayRun.Start() + arrayRun.Length()) {
185 				// matches the end
186 				arrayRun.length = HOST_ENDIAN_TO_BFS_INT16(arrayRun.Length() + 1);
187 				array->block_count++;
188 				fLength++;
189 				return true;
190 			} else if (run.start + 1 == arrayRun.start) {
191 				// matches the start
192 				arrayRun.start = run.start;
193 				arrayRun.length = HOST_ENDIAN_TO_BFS_INT16(arrayRun.Length() + 1);
194 				array->block_count++;
195 				fLength++;
196 				return true;
197 			}
198 		}
199 	}
200 #endif
201 
202 	// no entry found, add new to the last array
203 
204 	if (fLastArray == NULL || fLastArray->CountRuns() == fLastArray->MaxRuns())
205 		return false;
206 
207 	fLastArray->runs[fLastArray->CountRuns()] = run;
208 	fLastArray->count = HOST_ENDIAN_TO_BFS_INT16(fLastArray->CountRuns() + 1);
209 	fLastArray->block_count++;
210 	fLength++;
211 	return true;
212 }
213 
214 
215 status_t
216 RunArrays::_AddArray()
217 {
218 	int32 blockSize = fJournal->GetVolume()->BlockSize();
219 	run_array *array = (run_array *)malloc(blockSize);
220 	if (array == NULL)
221 		return B_NO_MEMORY;
222 
223 	if (fArrays.Push(array) != B_OK) {
224 		free(array);
225 		return B_NO_MEMORY;
226 	}
227 
228 	memset(array, 0, blockSize);
229 	array->block_count = 1;
230 	fLastArray = array;
231 	fLength++;
232 
233 	return B_OK;
234 }
235 
236 
237 status_t
238 RunArrays::Insert(off_t blockNumber)
239 {
240 	Volume *volume = fJournal->GetVolume();
241 	block_run run = volume->ToBlockRun(blockNumber);
242 
243 	if (fLastArray != NULL) {
244 		// check if the block is already in the array
245 		if (_ContainsRun(run))
246 			return B_OK;
247 	}
248 
249 	// insert block into array
250 
251 	if (!_AddRun(run)) {
252 		// array is full
253 		if (_AddArray() != B_OK)
254 			return B_NO_MEMORY;
255 
256 		// insert entry manually, because _AddRun() would search the
257 		// all arrays again for a free spot
258 		fLastArray->runs[0] = run;
259 		fLastArray->count = HOST_ENDIAN_TO_BFS_INT16(1);
260 		fLastArray->block_count++;
261 		fLength++;
262 	}
263 
264 	return B_OK;
265 }
266 
267 
268 int32
269 RunArrays::MaxArrayLength()
270 {
271 	int32 max = 0;
272 	for (int32 i = 0; i < CountArrays(); i++) {
273 		if (ArrayAt(i)->block_count > max)
274 			max = ArrayAt(i)->block_count;
275 	}
276 
277 	return max;
278 }
279 
280 
281 void
282 RunArrays::PrepareForWriting()
283 {
284 	int32 blockSize = fJournal->GetVolume()->BlockSize();
285 
286 	for (int32 i = 0; i < CountArrays(); i++) {
287 		ArrayAt(i)->max_runs = HOST_ENDIAN_TO_BFS_INT32(run_array::MaxRuns(blockSize));
288 	}
289 }
290 
291 
292 //	#pragma mark -
293 
294 
295 Journal::Journal(Volume *volume)
296 	:
297 	fVolume(volume),
298 	fLock("bfs journal"),
299 	fOwner(NULL),
300 	fLogSize(volume->Log().Length()),
301 	fMaxTransactionSize(fLogSize / 4 - 5),
302 	fUsed(0),
303 	fUnwrittenTransactions(0),
304 	fHasSubtransaction(false)
305 {
306 	if (fMaxTransactionSize > fLogSize / 2)
307 		fMaxTransactionSize = fLogSize / 2 - 5;
308 }
309 
310 
311 Journal::~Journal()
312 {
313 	FlushLogAndBlocks();
314 }
315 
316 
317 status_t
318 Journal::InitCheck()
319 {
320 	if (fVolume->LogStart() != fVolume->LogEnd()) {
321 		if (fVolume->SuperBlock().flags != SUPER_BLOCK_DISK_DIRTY)
322 			FATAL(("log_start and log_end differ, but disk is marked clean - trying to replay log...\n"));
323 
324 		return ReplayLog();
325 	}
326 
327 	return B_OK;
328 }
329 
330 
331 status_t
332 Journal::_CheckRunArray(const run_array *array)
333 {
334 	int32 maxRuns = run_array::MaxRuns(fVolume->BlockSize()) - 1;
335 		// the -1 works around an off-by-one bug in Be's BFS implementation,
336 		// same as in run_array::MaxRuns()
337 	if (array->MaxRuns() != maxRuns
338 		|| array->CountRuns() > maxRuns
339 		|| array->CountRuns() <= 0) {
340 		dprintf("run count: %ld, array max: %ld, max runs: %ld\n",
341 			array->CountRuns(), array->MaxRuns(), maxRuns);
342 		FATAL(("Log entry has broken header!\n"));
343 		return B_ERROR;
344 	}
345 
346 	for (int32 i = 0; i < array->CountRuns(); i++) {
347 		if (fVolume->ValidateBlockRun(array->RunAt(i)) != B_OK)
348 			return B_ERROR;
349 	}
350 
351 	PRINT(("Log entry has %ld entries\n", array->CountRuns()));
352 	return B_OK;
353 }
354 
355 
356 /**	Replays an entry in the log.
357  *	\a _start points to the entry in the log, and will be bumped to the next
358  *	one if replaying succeeded.
359  */
360 
361 status_t
362 Journal::_ReplayRunArray(int32 *_start)
363 {
364 	PRINT(("ReplayRunArray(start = %ld)\n", *_start));
365 
366 	off_t logOffset = fVolume->ToBlock(fVolume->Log());
367 	off_t blockNumber = *_start % fLogSize;
368 	int32 blockSize = fVolume->BlockSize();
369 	int32 count = 1;
370 
371 	CachedBlock cachedArray(fVolume);
372 
373 	const run_array *array = (const run_array *)cachedArray.SetTo(logOffset + blockNumber);
374 	if (array == NULL)
375 		return B_IO_ERROR;
376 
377 	if (_CheckRunArray(array) < B_OK)
378 		return B_BAD_DATA;
379 
380 	blockNumber = (blockNumber + 1) % fLogSize;
381 
382 	CachedBlock cached(fVolume);
383 	for (int32 index = 0; index < array->CountRuns(); index++) {
384 		const block_run &run = array->RunAt(index);
385 		PRINT(("replay block run %lu:%u:%u in log at %Ld!\n", run.AllocationGroup(),
386 			run.Start(), run.Length(), blockNumber));
387 
388 		off_t offset = fVolume->ToOffset(run);
389 		for (int32 i = 0; i < run.Length(); i++) {
390 			const uint8 *data = cached.SetTo(logOffset + blockNumber);
391 			if (data == NULL)
392 				RETURN_ERROR(B_IO_ERROR);
393 
394 			ssize_t written = write_pos(fVolume->Device(),
395 				offset + (i * blockSize), data, blockSize);
396 			if (written != blockSize)
397 				RETURN_ERROR(B_IO_ERROR);
398 
399 			blockNumber = (blockNumber + 1) % fLogSize;
400 			count++;
401 		}
402 	}
403 
404 	*_start += count;
405 	return B_OK;
406 }
407 
408 
409 /**	Replays all log entries - this will put the disk into a
410  *	consistent and clean state, if it was not correctly unmounted
411  *	before.
412  *	This method is called by Journal::InitCheck() if the log start
413  *	and end pointer don't match.
414  */
415 
416 status_t
417 Journal::ReplayLog()
418 {
419 	INFORM(("Replay log, disk was not correctly unmounted...\n"));
420 
421 	int32 start = fVolume->LogStart();
422 	int32 lastStart = -1;
423 	while (true) {
424 		// stop if the log is completely flushed
425 		if (start == fVolume->LogEnd())
426 			break;
427 
428 		if (start == lastStart) {
429 			// strange, flushing the log hasn't changed the log_start pointer
430 			return B_ERROR;
431 		}
432 		lastStart = start;
433 
434 		status_t status = _ReplayRunArray(&start);
435 		if (status < B_OK) {
436 			FATAL(("replaying log entry from %ld failed: %s\n", start, strerror(status)));
437 			return B_ERROR;
438 		}
439 		start = start % fLogSize;
440 	}
441 
442 	PRINT(("replaying worked fine!\n"));
443 	fVolume->SuperBlock().log_start = fVolume->LogEnd();
444 	fVolume->LogStart() = fVolume->LogEnd();
445 	fVolume->SuperBlock().flags = SUPER_BLOCK_DISK_CLEAN;
446 
447 	return fVolume->WriteSuperBlock();
448 }
449 
450 
451 /**	This is a callback function that is called by the cache, whenever
452  *	a block is flushed to disk that was updated as part of a transaction.
453  *	This is necessary to keep track of completed transactions, to be
454  *	able to update the log start pointer.
455  */
456 
457 void
458 Journal::_blockNotify(int32 transactionID, void *arg)
459 {
460 	LogEntry *logEntry = (LogEntry *)arg;
461 
462 	PRINT(("Log entry %p has been finished, transaction ID = %ld\n", logEntry, transactionID));
463 
464 	Journal *journal = logEntry->GetJournal();
465 	disk_super_block &superBlock = journal->fVolume->SuperBlock();
466 	bool update = false;
467 
468 	// Set log_start pointer if possible...
469 
470 	journal->fEntriesLock.Lock();
471 
472 	if (logEntry == journal->fEntries.First()) {
473 		LogEntry *next = journal->fEntries.GetNext(logEntry);
474 		if (next != NULL) {
475 			int32 length = next->Start() - logEntry->Start();
476 				// log entries inbetween could have been already released, so
477 				// we can't just use LogEntry::Length() here
478 			superBlock.log_start = (superBlock.log_start + length) % journal->fLogSize;
479 		} else
480 			superBlock.log_start = journal->fVolume->LogEnd();
481 
482 		update = true;
483 	}
484 
485 	journal->fUsed -= logEntry->Length();
486 	journal->fEntries.Remove(logEntry);
487 	journal->fEntriesLock.Unlock();
488 
489 	delete logEntry;
490 
491 	// update the super block, and change the disk's state, if necessary
492 
493 	if (update) {
494 		journal->fVolume->LogStart() = superBlock.log_start;
495 
496 		if (superBlock.log_start == superBlock.log_end)
497 			superBlock.flags = SUPER_BLOCK_DISK_CLEAN;
498 
499 		status_t status = journal->fVolume->WriteSuperBlock();
500 		if (status != B_OK) {
501 			FATAL(("blockNotify: could not write back super block: %s\n",
502 				strerror(status)));
503 		}
504 	}
505 }
506 
507 
508 status_t
509 Journal::_WriteTransactionToLog()
510 {
511 	// ToDo: in case of a failure, we need a backup plan like writing all
512 	//	changed blocks back to disk immediately
513 
514 	fUnwrittenTransactions = 0;
515 	fHasSubtransaction = false;
516 
517 	int32 blockShift = fVolume->BlockShift();
518 	off_t logOffset = fVolume->ToBlock(fVolume->Log()) << blockShift;
519 	off_t logStart = fVolume->LogEnd();
520 	off_t logPosition = logStart % fLogSize;
521 	status_t status;
522 
523 	// create run_array structures for all changed blocks
524 
525 	RunArrays runArrays(this);
526 
527 	uint32 cookie = 0;
528 	off_t blockNumber;
529 	while (cache_next_block_in_transaction(fVolume->BlockCache(), fTransactionID,
530 			&cookie, &blockNumber, NULL, NULL) == B_OK) {
531 		status = runArrays.Insert(blockNumber);
532 		if (status < B_OK) {
533 			FATAL(("filling log entry failed!"));
534 			return status;
535 		}
536 	}
537 
538 	if (runArrays.Length() == 0) {
539 		// nothing has changed during this transaction
540 		cache_end_transaction(fVolume->BlockCache(), fTransactionID, NULL, NULL);
541 		return B_OK;
542 	}
543 
544 	// Make sure there is enough space in the log.
545 	// If that fails for whatever reason, panic!
546 	// ToDo:
547 /*	force_cache_flush(fVolume->Device(), false);
548 	int32 tries = fLogSize / 2 + 1;
549 	while (TransactionSize() > FreeLogBlocks() && tries-- > 0)
550 		force_cache_flush(fVolume->Device(), true);
551 
552 	if (tries <= 0) {
553 		fVolume->Panic();
554 		return B_BAD_DATA;
555 	}
556 */
557 
558 	// Write log entries to disk
559 
560 	int32 maxVecs = runArrays.MaxArrayLength();
561 
562 	iovec *vecs = (iovec *)malloc(sizeof(iovec) * maxVecs);
563 	if (vecs == NULL) {
564 		// ToDo: write back log entries directly?
565 		return B_NO_MEMORY;
566 	}
567 
568 	runArrays.PrepareForWriting();
569 
570 	for (int32 k = 0; k < runArrays.CountArrays(); k++) {
571 		run_array *array = runArrays.ArrayAt(k);
572 		int32 index = 0, count = 1;
573 		int32 wrap = fLogSize - logStart;
574 
575 		add_to_iovec(vecs, index, maxVecs, (void *)array, fVolume->BlockSize());
576 
577 		// add block runs
578 
579 		for (int32 i = 0; i < array->CountRuns(); i++) {
580 			const block_run &run = array->RunAt(i);
581 			off_t blockNumber = fVolume->ToBlock(run);
582 
583 			for (int32 j = 0; j < run.Length(); j++) {
584 				if (count >= wrap) {
585 					// we need to write back the first half of the entry directly
586 					logPosition = logStart + count;
587 					if (writev_pos(fVolume->Device(), logOffset
588 						+ (logStart << blockShift), vecs, index) < 0)
589 						FATAL(("could not write log area!\n"));
590 
591 					logStart = 0;
592 					wrap = fLogSize;
593 					count = 0;
594 					index = 0;
595 				}
596 
597 				// make blocks available in the cache
598 				const void *data;
599 				if (j == 0) {
600 					data = block_cache_get_etc(fVolume->BlockCache(), blockNumber,
601 						blockNumber, run.Length());
602 				} else
603 					data = block_cache_get(fVolume->BlockCache(), blockNumber + j);
604 
605 				if (data == NULL)
606 					return B_IO_ERROR;
607 
608 				add_to_iovec(vecs, index, maxVecs, data, fVolume->BlockSize());
609 				count++;
610 			}
611 		}
612 
613 		// write back log entry
614 		if (count > 0) {
615 			logPosition = logStart + count;
616 			if (writev_pos(fVolume->Device(), logOffset + (logStart << blockShift),
617 				vecs, index) < 0)
618 				FATAL(("could not write log area: %s!\n", strerror(errno)));
619 		}
620 
621 		// release blocks again
622 		for (int32 i = 0; i < array->CountRuns(); i++) {
623 			const block_run &run = array->RunAt(i);
624 			off_t blockNumber = fVolume->ToBlock(run);
625 
626 			for (int32 j = 0; j < run.Length(); j++) {
627 				block_cache_put(fVolume->BlockCache(), blockNumber + j);
628 			}
629 		}
630 	}
631 
632 	LogEntry *logEntry = new LogEntry(this, fVolume->LogEnd(), runArrays.Length());
633 	if (logEntry == NULL) {
634 		FATAL(("no memory to allocate log entries!"));
635 		return B_NO_MEMORY;
636 	}
637 
638 	// Update the log end pointer in the super block
639 
640 	fVolume->SuperBlock().flags = SUPER_BLOCK_DISK_DIRTY;
641 	fVolume->SuperBlock().log_end = logPosition;
642 	fVolume->LogEnd() = logPosition;
643 
644 	status = fVolume->WriteSuperBlock();
645 
646 	// We need to flush the drives own cache here to ensure
647 	// disk consistency.
648 	// If that call fails, we can't do anything about it anyway
649 	ioctl(fVolume->Device(), B_FLUSH_DRIVE_CACHE);
650 
651 	// at this point, we can finally end the transaction - we're in
652 	// a guaranteed valid state
653 
654 	fEntriesLock.Lock();
655 	fEntries.Add(logEntry);
656 	fUsed += logEntry->Length();
657 	fEntriesLock.Unlock();
658 
659 	cache_end_transaction(fVolume->BlockCache(), fTransactionID, _blockNotify, logEntry);
660 
661 	// If the log goes to the next round (the log is written as a
662 	// circular buffer), all blocks will be flushed out which is
663 	// possible because we don't have any locked blocks at this
664 	// point.
665 	if (logPosition < logStart)
666 		fVolume->FlushDevice();
667 
668 	return status;
669 }
670 
671 
672 status_t
673 Journal::FlushLogAndBlocks()
674 {
675 	status_t status = fLock.Lock();
676 	if (status != B_OK)
677 		return status;
678 
679 	if (fLock.OwnerCount() > 1) {
680 		// whoa, FlushLogAndBlocks() was called from inside a transaction
681 		fLock.Unlock();
682 		return B_OK;
683 	}
684 
685 	// write the current log entry to disk
686 
687 	if (fUnwrittenTransactions != 0 && _TransactionSize() != 0) {
688 		status = _WriteTransactionToLog();
689 		if (status < B_OK)
690 			FATAL(("writing current log entry failed: %s\n", strerror(status)));
691 	}
692 
693 	status = fVolume->FlushDevice();
694 
695 	fLock.Unlock();
696 	return status;
697 }
698 
699 
700 status_t
701 Journal::Lock(Transaction *owner)
702 {
703 	status_t status = fLock.Lock();
704 	if (status != B_OK)
705 		return status;
706 
707 /*	ToDo:
708 	// if the last transaction is older than 2 secs, start a new one
709 	if (fTransactionsInEntry != 0 && system_time() - fTimestamp > 2000000L)
710 		WriteLogEntry();
711 */
712 
713 	if (fLock.OwnerCount() > 1) {
714 		// we'll just use the current transaction again
715 		return B_OK;
716 	}
717 
718 	fOwner = owner;
719 
720 	// ToDo: we need a way to find out how big the current transaction is;
721 	//	we need to be able to either detach the latest sub transaction on
722 	//	demand, as well as having some kind of fall back plan in case the
723 	//	sub transaction itself grows bigger than the log.
724 	//	For that, it would be nice to have some call-back interface in the
725 	//	cache transaction API...
726 
727 	if (fUnwrittenTransactions > 0) {
728 		// start a sub transaction
729 		cache_start_sub_transaction(fVolume->BlockCache(), fTransactionID);
730 		fHasSubtransaction = true;
731 	} else
732 		fTransactionID = cache_start_transaction(fVolume->BlockCache());
733 
734 	if (fTransactionID < B_OK) {
735 		fLock.Unlock();
736 		return fTransactionID;
737 	}
738 
739 	return B_OK;
740 }
741 
742 
743 void
744 Journal::Unlock(Transaction *owner, bool success)
745 {
746 	if (fLock.OwnerCount() == 1) {
747 		// we only end the transaction if we would really unlock it
748 		// ToDo: what about failing transactions that do not unlock?
749 		_TransactionDone(success);
750 
751 		fTimestamp = system_time();
752 		fOwner = NULL;
753 	}
754 
755 	fLock.Unlock();
756 }
757 
758 
759 uint32
760 Journal::_TransactionSize() const
761 {
762 	int32 count = cache_blocks_in_transaction(fVolume->BlockCache(), fTransactionID);
763 	if (count < 0)
764 		return 0;
765 
766 	return count;
767 }
768 
769 
770 status_t
771 Journal::_TransactionDone(bool success)
772 {
773 	if (!success) {
774 		if (_HasSubTransaction())
775 			cache_abort_sub_transaction(fVolume->BlockCache(), fTransactionID);
776 		else
777 			cache_abort_transaction(fVolume->BlockCache(), fTransactionID);
778 
779 		return B_OK;
780 	}
781 
782 	// Up to a maximum size, we will just batch several
783 	// transactions together to improve speed
784 	if (_TransactionSize() < fMaxTransactionSize) {
785 		fUnwrittenTransactions++;
786 		return B_OK;
787 	}
788 
789 	return _WriteTransactionToLog();
790 }
791 
792 
793 status_t
794 Journal::LogBlocks(off_t blockNumber, const uint8 *buffer, size_t numBlocks)
795 {
796 	panic("LogBlocks() called!\n");
797 #if 0
798 	// ToDo: that's for now - we should change the log file size here
799 	if (TransactionSize() + numBlocks + 1 > fLogSize)
800 		return B_DEVICE_FULL;
801 
802 	int32 blockSize = fVolume->BlockSize();
803 
804 	for (;numBlocks-- > 0; blockNumber++, buffer += blockSize) {
805 		if (fArray.Find(blockNumber) >= 0) {
806 			// The block is already in the log, so just update its data
807 			// Note, this is only necessary if this method is called with a buffer
808 			// different from the cached block buffer - which is unlikely but
809 			// we'll make sure this way (costs one cache lookup, though).
810 			// ToDo:
811 /*			status_t status = cached_write(fVolume->Device(), blockNumber, buffer, 1, blockSize);
812 			if (status < B_OK)
813 				return status;
814 */
815 			continue;
816 		}
817 
818 		// Insert the block into the transaction's array, and write the changes
819 		// back into the locked cache buffer
820 		fArray.Insert(blockNumber);
821 
822 		// ToDo:
823 /*		status_t status = cached_write_locked(fVolume->Device(), blockNumber, buffer, 1, blockSize);
824 		if (status < B_OK)
825 			return status;
826 */	}
827 
828 	// ToDo:
829 	// If necessary, flush the log, so that we have enough space for this transaction
830 /*	if (TransactionSize() > FreeLogBlocks())
831 		force_cache_flush(fVolume->Device(), true);
832 */
833 #endif
834 	return B_OK;
835 }
836 
837 
838 //	#pragma mark -
839 
840 
841 status_t
842 Transaction::Start(Volume *volume, off_t refBlock)
843 {
844 	// has it already been started?
845 	if (fJournal != NULL)
846 		return B_OK;
847 
848 	fJournal = volume->GetJournal(refBlock);
849 	if (fJournal != NULL && fJournal->Lock(this) == B_OK)
850 		return B_OK;
851 
852 	fJournal = NULL;
853 	return B_ERROR;
854 }
855 
856