xref: /haiku/src/system/kernel/fs/fifo.cpp (revision 002f37b0cca92e4cf72857c72ac95db5a8b09615)
1 /*
2  * Copyright 2007-2013, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "fifo.h"
9 
10 #include <limits.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/ioctl.h>
15 #include <sys/stat.h>
16 
17 #include <new>
18 
19 #include <KernelExport.h>
20 #include <NodeMonitor.h>
21 #include <Select.h>
22 
23 #include <condition_variable.h>
24 #include <debug_hex_dump.h>
25 #include <khash.h>
26 #include <lock.h>
27 #include <select_sync_pool.h>
28 #include <syscall_restart.h>
29 #include <team.h>
30 #include <thread.h>
31 #include <util/DoublyLinkedList.h>
32 #include <util/AutoLock.h>
33 #include <util/ring_buffer.h>
34 #include <vfs.h>
35 #include <vfs_defs.h>
36 #include <vm/vm.h>
37 
38 
39 //#define TRACE_FIFO
40 #ifdef TRACE_FIFO
41 #	define TRACE(x...) dprintf(x)
42 #else
43 #	define TRACE(x...)
44 #endif
45 
46 
47 #define PIPEFS_HASH_SIZE		16
48 
49 
50 namespace fifo {
51 
52 
53 struct file_cookie;
54 class Inode;
55 
56 
57 class RingBuffer {
58 public:
59 								RingBuffer();
60 								~RingBuffer();
61 
62 			status_t			CreateBuffer();
63 			void				DeleteBuffer();
64 
65 			ssize_t				Write(const void* buffer, size_t length,
66 									bool isUser);
67 			ssize_t				Read(void* buffer, size_t length, bool isUser);
68 			ssize_t				Peek(size_t offset, void* buffer,
69 									size_t length) const;
70 
71 			size_t				Readable() const;
72 			size_t				Writable() const;
73 
74 private:
75 			struct ring_buffer*	fBuffer;
76 };
77 
78 
79 class ReadRequest : public DoublyLinkedListLinkImpl<ReadRequest> {
80 public:
81 	ReadRequest(file_cookie* cookie)
82 		:
83 		fThread(thread_get_current_thread()),
84 		fCookie(cookie),
85 		fNotified(true)
86 	{
87 		B_INITIALIZE_SPINLOCK(&fLock);
88 	}
89 
90 	void SetNotified(bool notified)
91 	{
92 		InterruptsSpinLocker _(fLock);
93 		fNotified = notified;
94 	}
95 
96 	void Notify(status_t status = B_OK)
97 	{
98 		InterruptsSpinLocker _(fLock);
99 		TRACE("ReadRequest %p::Notify(), fNotified %d\n", this, fNotified);
100 
101 		if (!fNotified) {
102 			thread_unblock(fThread, status);
103 			fNotified = true;
104 		}
105 	}
106 
107 	Thread* GetThread() const
108 	{
109 		return fThread;
110 	}
111 
112 	file_cookie* Cookie() const
113 	{
114 		return fCookie;
115 	}
116 
117 private:
118 	spinlock		fLock;
119 	Thread*			fThread;
120 	file_cookie*	fCookie;
121 	volatile bool	fNotified;
122 };
123 
124 
125 class WriteRequest : public DoublyLinkedListLinkImpl<WriteRequest> {
126 public:
127 	WriteRequest(Thread* thread, size_t minimalWriteCount)
128 		:
129 		fThread(thread),
130 		fMinimalWriteCount(minimalWriteCount)
131 	{
132 	}
133 
134 	Thread* GetThread() const
135 	{
136 		return fThread;
137 	}
138 
139 	size_t MinimalWriteCount() const
140 	{
141 		return fMinimalWriteCount;
142 	}
143 
144 private:
145 	Thread*	fThread;
146 	size_t	fMinimalWriteCount;
147 };
148 
149 
150 typedef DoublyLinkedList<ReadRequest> ReadRequestList;
151 typedef DoublyLinkedList<WriteRequest> WriteRequestList;
152 
153 
154 class Inode {
155 public:
156 								Inode();
157 								~Inode();
158 
159 			status_t			InitCheck();
160 
161 			bool				IsActive() const { return fActive; }
162 			timespec			CreationTime() const { return fCreationTime; }
163 			void				SetCreationTime(timespec creationTime)
164 									{ fCreationTime = creationTime; }
165 			timespec			ModificationTime() const
166 									{ return fModificationTime; }
167 			void				SetModificationTime(timespec modificationTime)
168 									{ fModificationTime = modificationTime; }
169 
170 			mutex*				RequestLock() { return &fRequestLock; }
171 
172 			status_t			WriteDataToBuffer(const void* data,
173 									size_t* _length, bool nonBlocking,
174 									bool isUser);
175 			status_t			ReadDataFromBuffer(void* data, size_t* _length,
176 									bool nonBlocking, bool isUser,
177 									ReadRequest& request);
178 			size_t				BytesAvailable() const
179 									{ return fBuffer.Readable(); }
180 			size_t				BytesWritable() const
181 									{ return fBuffer.Writable(); }
182 
183 			void				AddReadRequest(ReadRequest& request);
184 			void				RemoveReadRequest(ReadRequest& request);
185 			status_t			WaitForReadRequest(ReadRequest& request);
186 
187 			void				NotifyBytesRead(size_t bytes);
188 			void				NotifyReadDone();
189 			void				NotifyBytesWritten(size_t bytes);
190 			void				NotifyEndClosed(bool writer);
191 
192 			void				Open(int openMode);
193 			void				Close(file_cookie* cookie);
194 			int32				ReaderCount() const { return fReaderCount; }
195 			int32				WriterCount() const { return fWriterCount; }
196 
197 			status_t			Select(uint8 event, selectsync* sync,
198 									int openMode);
199 			status_t			Deselect(uint8 event, selectsync* sync,
200 									int openMode);
201 
202 			void				Dump(bool dumpData) const;
203 	static	int					Dump(int argc, char** argv);
204 
205 private:
206 			timespec			fCreationTime;
207 			timespec			fModificationTime;
208 
209 			RingBuffer			fBuffer;
210 
211 			ReadRequestList		fReadRequests;
212 			WriteRequestList	fWriteRequests;
213 
214 			mutex				fRequestLock;
215 
216 			ConditionVariable	fWriteCondition;
217 
218 			int32				fReaderCount;
219 			int32				fWriterCount;
220 			bool				fActive;
221 
222 			select_sync_pool*	fReadSelectSyncPool;
223 			select_sync_pool*	fWriteSelectSyncPool;
224 };
225 
226 
227 class FIFOInode : public Inode {
228 public:
229 	FIFOInode(fs_vnode* vnode)
230 		:
231 		Inode(),
232 		fSuperVnode(*vnode)
233 	{
234 	}
235 
236 	fs_vnode*	SuperVnode() { return &fSuperVnode; }
237 
238 private:
239 	fs_vnode	fSuperVnode;
240 };
241 
242 
243 struct file_cookie {
244 	int	open_mode;
245 			// guarded by Inode::fRequestLock
246 
247 	void SetNonBlocking(bool nonBlocking)
248 	{
249 		if (nonBlocking)
250 			open_mode |= O_NONBLOCK;
251 		else
252 			open_mode &= ~(int)O_NONBLOCK;
253 	}
254 };
255 
256 
257 // #pragma mark -
258 
259 
260 RingBuffer::RingBuffer()
261 	:
262 	fBuffer(NULL)
263 {
264 }
265 
266 
267 RingBuffer::~RingBuffer()
268 {
269 	DeleteBuffer();
270 }
271 
272 
273 status_t
274 RingBuffer::CreateBuffer()
275 {
276 	if (fBuffer != NULL)
277 		return B_OK;
278 
279 	fBuffer = create_ring_buffer(VFS_FIFO_BUFFER_CAPACITY);
280 	return fBuffer != NULL ? B_OK : B_NO_MEMORY;
281 }
282 
283 
284 void
285 RingBuffer::DeleteBuffer()
286 {
287 	if (fBuffer != NULL) {
288 		delete_ring_buffer(fBuffer);
289 		fBuffer = NULL;
290 	}
291 }
292 
293 
294 inline ssize_t
295 RingBuffer::Write(const void* buffer, size_t length, bool isUser)
296 {
297 	if (fBuffer == NULL)
298 		return B_NO_MEMORY;
299 	if (isUser && !IS_USER_ADDRESS(buffer))
300 		return B_BAD_ADDRESS;
301 
302 	return isUser
303 		? ring_buffer_user_write(fBuffer, (const uint8*)buffer, length)
304 		: ring_buffer_write(fBuffer, (const uint8*)buffer, length);
305 }
306 
307 
308 inline ssize_t
309 RingBuffer::Read(void* buffer, size_t length, bool isUser)
310 {
311 	if (fBuffer == NULL)
312 		return B_NO_MEMORY;
313 	if (isUser && !IS_USER_ADDRESS(buffer))
314 		return B_BAD_ADDRESS;
315 
316 	return isUser
317 		? ring_buffer_user_read(fBuffer, (uint8*)buffer, length)
318 		: ring_buffer_read(fBuffer, (uint8*)buffer, length);
319 }
320 
321 
322 inline ssize_t
323 RingBuffer::Peek(size_t offset, void* buffer, size_t length) const
324 {
325 	if (fBuffer == NULL)
326 		return B_NO_MEMORY;
327 
328 	return ring_buffer_peek(fBuffer, offset, (uint8*)buffer, length);
329 }
330 
331 
332 inline size_t
333 RingBuffer::Readable() const
334 {
335 	return fBuffer != NULL ? ring_buffer_readable(fBuffer) : 0;
336 }
337 
338 
339 inline size_t
340 RingBuffer::Writable() const
341 {
342 	return fBuffer != NULL ? ring_buffer_writable(fBuffer) : 0;
343 }
344 
345 
346 //	#pragma mark -
347 
348 
349 Inode::Inode()
350 	:
351 	fReadRequests(),
352 	fWriteRequests(),
353 	fReaderCount(0),
354 	fWriterCount(0),
355 	fActive(false),
356 	fReadSelectSyncPool(NULL),
357 	fWriteSelectSyncPool(NULL)
358 {
359 	fWriteCondition.Publish(this, "pipe");
360 	mutex_init(&fRequestLock, "pipe request");
361 
362 	bigtime_t time = real_time_clock();
363 	fModificationTime.tv_sec = time / 1000000;
364 	fModificationTime.tv_nsec = (time % 1000000) * 1000;
365 	fCreationTime = fModificationTime;
366 }
367 
368 
369 Inode::~Inode()
370 {
371 	fWriteCondition.Unpublish();
372 	mutex_destroy(&fRequestLock);
373 }
374 
375 
376 status_t
377 Inode::InitCheck()
378 {
379 	return B_OK;
380 }
381 
382 
383 /*!	Writes the specified data bytes to the inode's ring buffer. The
384 	request lock must be held when calling this method.
385 	Notifies readers if necessary, so that blocking readers will get started.
386 	Returns B_OK for success, B_BAD_ADDRESS if copying from the buffer failed,
387 	and various semaphore errors (like B_WOULD_BLOCK in non-blocking mode). If
388 	the returned length is > 0, the returned error code can be ignored.
389 */
390 status_t
391 Inode::WriteDataToBuffer(const void* _data, size_t* _length, bool nonBlocking,
392 	bool isUser)
393 {
394 	const uint8* data = (const uint8*)_data;
395 	size_t dataSize = *_length;
396 	size_t& written = *_length;
397 	written = 0;
398 
399 	TRACE("Inode %p::WriteDataToBuffer(data = %p, bytes = %zu)\n", this, data,
400 		dataSize);
401 
402 	// A request up to VFS_FIFO_ATOMIC_WRITE_SIZE bytes shall not be
403 	// interleaved with other writer's data.
404 	size_t minToWrite = 1;
405 	if (dataSize <= VFS_FIFO_ATOMIC_WRITE_SIZE)
406 		minToWrite = dataSize;
407 
408 	while (dataSize > 0) {
409 		// Wait until enough space in the buffer is available.
410 		while (!fActive
411 				|| (fBuffer.Writable() < minToWrite && fReaderCount > 0)) {
412 			if (nonBlocking)
413 				return B_WOULD_BLOCK;
414 
415 			ConditionVariableEntry entry;
416 			entry.Add(this);
417 
418 			WriteRequest request(thread_get_current_thread(), minToWrite);
419 			fWriteRequests.Add(&request);
420 
421 			mutex_unlock(&fRequestLock);
422 			status_t status = entry.Wait(B_CAN_INTERRUPT);
423 			mutex_lock(&fRequestLock);
424 
425 			fWriteRequests.Remove(&request);
426 
427 			if (status != B_OK)
428 				return status;
429 		}
430 
431 		// write only as long as there are readers left
432 		if (fActive && fReaderCount == 0) {
433 			if (written == 0)
434 				send_signal(find_thread(NULL), SIGPIPE);
435 			return EPIPE;
436 		}
437 
438 		// write as much as we can
439 
440 		size_t toWrite = (fActive ? fBuffer.Writable() : 0);
441 		if (toWrite > dataSize)
442 			toWrite = dataSize;
443 
444 		if (toWrite > 0) {
445 			ssize_t bytesWritten = fBuffer.Write(data, toWrite, isUser);
446 			if (bytesWritten < 0)
447 				return bytesWritten;
448 		}
449 
450 		data += toWrite;
451 		dataSize -= toWrite;
452 		written += toWrite;
453 
454 		NotifyBytesWritten(toWrite);
455 	}
456 
457 	return B_OK;
458 }
459 
460 
461 status_t
462 Inode::ReadDataFromBuffer(void* data, size_t* _length, bool nonBlocking,
463 	bool isUser, ReadRequest& request)
464 {
465 	size_t dataSize = *_length;
466 	*_length = 0;
467 
468 	// wait until our request is first in queue
469 	status_t error;
470 	if (fReadRequests.Head() != &request) {
471 		if (nonBlocking)
472 			return B_WOULD_BLOCK;
473 
474 		TRACE("Inode %p::%s(): wait for request %p to become the first "
475 			"request.\n", this, __FUNCTION__, &request);
476 
477 		error = WaitForReadRequest(request);
478 		if (error != B_OK)
479 			return error;
480 	}
481 
482 	// wait until data are available
483 	while (fBuffer.Readable() == 0) {
484 		if (nonBlocking)
485 			return B_WOULD_BLOCK;
486 
487 		if (fActive && fWriterCount == 0)
488 			return B_OK;
489 
490 		TRACE("Inode %p::%s(): wait for data, request %p\n", this, __FUNCTION__,
491 			&request);
492 
493 		error = WaitForReadRequest(request);
494 		if (error != B_OK)
495 			return error;
496 	}
497 
498 	// read as much as we can
499 	size_t toRead = fBuffer.Readable();
500 	if (toRead > dataSize)
501 		toRead = dataSize;
502 
503 	ssize_t bytesRead = fBuffer.Read(data, toRead, isUser);
504 	if (bytesRead < 0)
505 		return bytesRead;
506 
507 	NotifyBytesRead(toRead);
508 
509 	*_length = toRead;
510 
511 	return B_OK;
512 }
513 
514 
515 void
516 Inode::AddReadRequest(ReadRequest& request)
517 {
518 	fReadRequests.Add(&request);
519 }
520 
521 
522 void
523 Inode::RemoveReadRequest(ReadRequest& request)
524 {
525 	fReadRequests.Remove(&request);
526 }
527 
528 
529 status_t
530 Inode::WaitForReadRequest(ReadRequest& request)
531 {
532 	// add the entry to wait on
533 	thread_prepare_to_block(thread_get_current_thread(), B_CAN_INTERRUPT,
534 		THREAD_BLOCK_TYPE_OTHER, "fifo read request");
535 
536 	request.SetNotified(false);
537 
538 	// wait
539 	mutex_unlock(&fRequestLock);
540 	status_t status = thread_block();
541 
542 	// Before going to lock again, we need to make sure no one tries to
543 	// unblock us. Otherwise that would screw with mutex_lock().
544 	request.SetNotified(true);
545 
546 	mutex_lock(&fRequestLock);
547 
548 	return status;
549 }
550 
551 
552 void
553 Inode::NotifyBytesRead(size_t bytes)
554 {
555 	// notify writer, if something can be written now
556 	size_t writable = fBuffer.Writable();
557 	if (bytes > 0) {
558 		// notify select()ors only, if nothing was writable before
559 		if (writable == bytes) {
560 			if (fWriteSelectSyncPool)
561 				notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE);
562 		}
563 
564 		// If any of the waiting writers has a minimal write count that has
565 		// now become satisfied, we notify all of them (condition variables
566 		// don't support doing that selectively).
567 		WriteRequest* request;
568 		WriteRequestList::Iterator iterator = fWriteRequests.GetIterator();
569 		while ((request = iterator.Next()) != NULL) {
570 			size_t minWriteCount = request->MinimalWriteCount();
571 			if (minWriteCount > 0 && minWriteCount <= writable
572 					&& minWriteCount > writable - bytes) {
573 				fWriteCondition.NotifyAll();
574 				break;
575 			}
576 		}
577 	}
578 }
579 
580 
581 void
582 Inode::NotifyReadDone()
583 {
584 	// notify next reader, if there's still something to be read
585 	if (fBuffer.Readable() > 0) {
586 		if (ReadRequest* request = fReadRequests.First())
587 			request->Notify();
588 	}
589 }
590 
591 
592 void
593 Inode::NotifyBytesWritten(size_t bytes)
594 {
595 	// notify reader, if something can be read now
596 	if (bytes > 0 && fBuffer.Readable() == bytes) {
597 		if (fReadSelectSyncPool)
598 			notify_select_event_pool(fReadSelectSyncPool, B_SELECT_READ);
599 
600 		if (ReadRequest* request = fReadRequests.First())
601 			request->Notify();
602 	}
603 }
604 
605 
606 void
607 Inode::NotifyEndClosed(bool writer)
608 {
609 	TRACE("Inode %p::%s(%s)\n", this, __FUNCTION__,
610 		writer ? "writer" : "reader");
611 
612 	if (writer) {
613 		// Our last writer has been closed; if the pipe
614 		// contains no data, unlock all waiting readers
615 		TRACE("  buffer readable: %zu\n", fBuffer.Readable());
616 		if (fBuffer.Readable() == 0) {
617 			ReadRequestList::Iterator iterator = fReadRequests.GetIterator();
618 			while (ReadRequest* request = iterator.Next())
619 				request->Notify();
620 
621 			if (fReadSelectSyncPool)
622 				notify_select_event_pool(fReadSelectSyncPool, B_SELECT_READ);
623 		}
624 	} else {
625 		// Last reader is gone. Wake up all writers.
626 		fWriteCondition.NotifyAll();
627 
628 		if (fWriteSelectSyncPool) {
629 			notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE);
630 			notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_ERROR);
631 		}
632 	}
633 }
634 
635 
636 void
637 Inode::Open(int openMode)
638 {
639 	MutexLocker locker(RequestLock());
640 
641 	if ((openMode & O_ACCMODE) == O_WRONLY)
642 		fWriterCount++;
643 
644 	if ((openMode & O_ACCMODE) == O_RDONLY || (openMode & O_ACCMODE) == O_RDWR)
645 		fReaderCount++;
646 
647 	if (fReaderCount > 0 && fWriterCount > 0) {
648 		TRACE("Inode %p::Open(): fifo becomes active\n", this);
649 		fBuffer.CreateBuffer();
650 		fActive = true;
651 
652 		// notify all waiting writers that they can start
653 		if (fWriteSelectSyncPool)
654 			notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE);
655 		fWriteCondition.NotifyAll();
656 	}
657 }
658 
659 
660 void
661 Inode::Close(file_cookie* cookie)
662 {
663 	TRACE("Inode %p::Close(openMode = %d)\n", this, openMode);
664 
665 	MutexLocker locker(RequestLock());
666 
667 	int openMode = cookie->open_mode;
668 
669 	// Notify all currently reading file descriptors
670 	ReadRequestList::Iterator iterator = fReadRequests.GetIterator();
671 	while (ReadRequest* request = iterator.Next()) {
672 		if (request->Cookie() == cookie)
673 			request->Notify(B_FILE_ERROR);
674 	}
675 
676 	if ((openMode & O_ACCMODE) == O_WRONLY && --fWriterCount == 0)
677 		NotifyEndClosed(true);
678 
679 	if ((openMode & O_ACCMODE) == O_RDONLY
680 		|| (openMode & O_ACCMODE) == O_RDWR) {
681 		if (--fReaderCount == 0)
682 			NotifyEndClosed(false);
683 	}
684 
685 	if (fWriterCount == 0) {
686 		// Notify any still reading writers to stop
687 		// TODO: This only works reliable if there is only one writer - we could
688 		// do the same thing done for the read requests.
689 		fWriteCondition.NotifyAll(B_FILE_ERROR);
690 	}
691 
692 	if (fReaderCount == 0 && fWriterCount == 0) {
693 		fActive = false;
694 		fBuffer.DeleteBuffer();
695 	}
696 }
697 
698 
699 status_t
700 Inode::Select(uint8 event, selectsync* sync, int openMode)
701 {
702 	bool writer = true;
703 	select_sync_pool** pool;
704 	if ((openMode & O_RWMASK) == O_RDONLY) {
705 		pool = &fReadSelectSyncPool;
706 		writer = false;
707 	} else if ((openMode & O_RWMASK) == O_WRONLY) {
708 		pool = &fWriteSelectSyncPool;
709 	} else
710 		return B_NOT_ALLOWED;
711 
712 	if (add_select_sync_pool_entry(pool, sync, event) != B_OK)
713 		return B_ERROR;
714 
715 	// signal right away, if the condition holds already
716 	if (writer) {
717 		if ((event == B_SELECT_WRITE
718 				&& (fBuffer.Writable() > 0 || fReaderCount == 0))
719 			|| (event == B_SELECT_ERROR && fReaderCount == 0)) {
720 			return notify_select_event(sync, event);
721 		}
722 	} else {
723 		if (event == B_SELECT_READ
724 				&& (fBuffer.Readable() > 0 || fWriterCount == 0)) {
725 			return notify_select_event(sync, event);
726 		}
727 	}
728 
729 	return B_OK;
730 }
731 
732 
733 status_t
734 Inode::Deselect(uint8 event, selectsync* sync, int openMode)
735 {
736 	select_sync_pool** pool;
737 	if ((openMode & O_RWMASK) == O_RDONLY) {
738 		pool = &fReadSelectSyncPool;
739 	} else if ((openMode & O_RWMASK) == O_WRONLY) {
740 		pool = &fWriteSelectSyncPool;
741 	} else
742 		return B_NOT_ALLOWED;
743 
744 	remove_select_sync_pool_entry(pool, sync, event);
745 	return B_OK;
746 }
747 
748 
749 void
750 Inode::Dump(bool dumpData) const
751 {
752 	kprintf("FIFO %p\n", this);
753 	kprintf("  active:        %s\n", fActive ? "true" : "false");
754 	kprintf("  readers:       %" B_PRId32 "\n", fReaderCount);
755 	kprintf("  writers:       %" B_PRId32 "\n", fWriterCount);
756 
757 	if (!fReadRequests.IsEmpty()) {
758 		kprintf(" pending readers:\n");
759 		for (ReadRequestList::ConstIterator it = fReadRequests.GetIterator();
760 			ReadRequest* request = it.Next();) {
761 			kprintf("    %p: thread %" B_PRId32 ", cookie: %p\n", request,
762 				request->GetThread()->id, request->Cookie());
763 		}
764 	}
765 
766 	if (!fWriteRequests.IsEmpty()) {
767 		kprintf(" pending writers:\n");
768 		for (WriteRequestList::ConstIterator it = fWriteRequests.GetIterator();
769 			WriteRequest* request = it.Next();) {
770 			kprintf("    %p:  thread %" B_PRId32 ", min count: %zu\n", request,
771 				request->GetThread()->id, request->MinimalWriteCount());
772 		}
773 	}
774 
775 	kprintf("  %zu bytes buffered\n", fBuffer.Readable());
776 
777 	if (dumpData && fBuffer.Readable() > 0) {
778 		struct DataProvider : BKernel::HexDumpDataProvider {
779 			DataProvider(const RingBuffer& buffer)
780 				:
781 				fBuffer(buffer),
782 				fOffset(0)
783 			{
784 			}
785 
786 			virtual bool HasMoreData() const
787 			{
788 				return fOffset < fBuffer.Readable();
789 			}
790 
791 			virtual uint8 NextByte()
792 			{
793 				uint8 byte = '\0';
794 				if (fOffset < fBuffer.Readable()) {
795 					fBuffer.Peek(fOffset, &byte, 1);
796 					fOffset++;
797 				}
798 				return byte;
799 			}
800 
801 			virtual bool GetAddressString(char* buffer, size_t bufferSize) const
802 			{
803 				snprintf(buffer, bufferSize, "    %4zx", fOffset);
804 				return true;
805 			}
806 
807 		private:
808 			const RingBuffer&	fBuffer;
809 			size_t				fOffset;
810 		};
811 
812 		DataProvider dataProvider(fBuffer);
813 		BKernel::print_hex_dump(dataProvider, fBuffer.Readable());
814 	}
815 }
816 
817 
818 /*static*/ int
819 Inode::Dump(int argc, char** argv)
820 {
821 	bool dumpData = false;
822 	int argi = 1;
823 	if (argi < argc && strcmp(argv[argi], "-d") == 0) {
824 		dumpData = true;
825 		argi++;
826 	}
827 
828 	if (argi >= argc || argi + 2 < argc) {
829 		print_debugger_command_usage(argv[0]);
830 		return 0;
831 	}
832 
833 	Inode* node = (Inode*)parse_expression(argv[argi]);
834 	if (IS_USER_ADDRESS(node)) {
835 		kprintf("invalid FIFO address\n");
836 		return 0;
837 	}
838 
839 	node->Dump(dumpData);
840 	return 0;
841 }
842 
843 
844 //	#pragma mark - vnode API
845 
846 
847 static status_t
848 fifo_put_vnode(fs_volume* volume, fs_vnode* vnode, bool reenter)
849 {
850 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
851 	fs_vnode* superVnode = fifo->SuperVnode();
852 
853 	status_t error = B_OK;
854 	if (superVnode->ops->put_vnode != NULL)
855 		error = superVnode->ops->put_vnode(volume, superVnode, reenter);
856 
857 	delete fifo;
858 
859 	return error;
860 }
861 
862 
863 static status_t
864 fifo_remove_vnode(fs_volume* volume, fs_vnode* vnode, bool reenter)
865 {
866 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
867 	fs_vnode* superVnode = fifo->SuperVnode();
868 
869 	status_t error = B_OK;
870 	if (superVnode->ops->remove_vnode != NULL)
871 		error = superVnode->ops->remove_vnode(volume, superVnode, reenter);
872 
873 	delete fifo;
874 
875 	return error;
876 }
877 
878 
879 static status_t
880 fifo_open(fs_volume* _volume, fs_vnode* _node, int openMode,
881 	void** _cookie)
882 {
883 	Inode* inode = (Inode*)_node->private_node;
884 
885 	TRACE("fifo_open(): node = %p, openMode = %d\n", inode, openMode);
886 
887 	file_cookie* cookie = (file_cookie*)malloc(sizeof(file_cookie));
888 	if (cookie == NULL)
889 		return B_NO_MEMORY;
890 
891 	TRACE("  open cookie = %p\n", cookie);
892 	cookie->open_mode = openMode;
893 	inode->Open(openMode);
894 
895 	*_cookie = (void*)cookie;
896 
897 	return B_OK;
898 }
899 
900 
901 static status_t
902 fifo_close(fs_volume* volume, fs_vnode* vnode, void* _cookie)
903 {
904 	file_cookie* cookie = (file_cookie*)_cookie;
905 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
906 
907 	fifo->Close(cookie);
908 
909 	return B_OK;
910 }
911 
912 
913 static status_t
914 fifo_free_cookie(fs_volume* _volume, fs_vnode* _node, void* _cookie)
915 {
916 	file_cookie* cookie = (file_cookie*)_cookie;
917 
918 	TRACE("fifo_freecookie: entry vnode %p, cookie %p\n", _node, _cookie);
919 
920 	free(cookie);
921 
922 	return B_OK;
923 }
924 
925 
926 static status_t
927 fifo_fsync(fs_volume* _volume, fs_vnode* _node)
928 {
929 	return B_OK;
930 }
931 
932 
933 static status_t
934 fifo_read(fs_volume* _volume, fs_vnode* _node, void* _cookie,
935 	off_t /*pos*/, void* buffer, size_t* _length)
936 {
937 	file_cookie* cookie = (file_cookie*)_cookie;
938 	Inode* inode = (Inode*)_node->private_node;
939 
940 	TRACE("fifo_read(vnode = %p, cookie = %p, length = %lu, mode = %d)\n",
941 		inode, cookie, *_length, cookie->open_mode);
942 
943 	MutexLocker locker(inode->RequestLock());
944 
945 	if ((cookie->open_mode & O_RWMASK) != O_RDONLY)
946 		return B_NOT_ALLOWED;
947 
948 	if (inode->IsActive() && inode->WriterCount() == 0) {
949 		// as long there is no writer, and the pipe is empty,
950 		// we always just return 0 to indicate end of file
951 		if (inode->BytesAvailable() == 0) {
952 			*_length = 0;
953 			return B_OK;
954 		}
955 	}
956 
957 	// issue read request
958 
959 	ReadRequest request(cookie);
960 	inode->AddReadRequest(request);
961 
962 	TRACE("  issue read request %p\n", &request);
963 
964 	size_t length = *_length;
965 	status_t status = inode->ReadDataFromBuffer(buffer, &length,
966 		(cookie->open_mode & O_NONBLOCK) != 0, is_called_via_syscall(),
967 		request);
968 
969 	inode->RemoveReadRequest(request);
970 	inode->NotifyReadDone();
971 
972 	TRACE("  done reading request %p, length %zu\n", &request, length);
973 
974 	if (length > 0)
975 		status = B_OK;
976 
977 	*_length = length;
978 	return status;
979 }
980 
981 
982 static status_t
983 fifo_write(fs_volume* _volume, fs_vnode* _node, void* _cookie,
984 	off_t /*pos*/, const void* buffer, size_t* _length)
985 {
986 	file_cookie* cookie = (file_cookie*)_cookie;
987 	Inode* inode = (Inode*)_node->private_node;
988 
989 	TRACE("fifo_write(vnode = %p, cookie = %p, length = %lu)\n",
990 		_node, cookie, *_length);
991 
992 	MutexLocker locker(inode->RequestLock());
993 
994 	if ((cookie->open_mode & O_RWMASK) != O_WRONLY)
995 		return B_NOT_ALLOWED;
996 
997 	size_t length = *_length;
998 	if (length == 0)
999 		return B_OK;
1000 
1001 	// copy data into ring buffer
1002 	status_t status = inode->WriteDataToBuffer(buffer, &length,
1003 		(cookie->open_mode & O_NONBLOCK) != 0, is_called_via_syscall());
1004 
1005 	if (length > 0)
1006 		status = B_OK;
1007 
1008 	*_length = length;
1009 	return status;
1010 }
1011 
1012 
1013 static status_t
1014 fifo_read_stat(fs_volume* volume, fs_vnode* vnode, struct ::stat* st)
1015 {
1016 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
1017 	fs_vnode* superVnode = fifo->SuperVnode();
1018 
1019 	if (superVnode->ops->read_stat == NULL)
1020 		return B_BAD_VALUE;
1021 
1022 	status_t error = superVnode->ops->read_stat(volume, superVnode, st);
1023 	if (error != B_OK)
1024 		return error;
1025 
1026 
1027 	MutexLocker locker(fifo->RequestLock());
1028 
1029 	st->st_size = fifo->BytesAvailable();
1030 
1031 	st->st_blksize = 4096;
1032 
1033 	// TODO: Just pass the changes to our modification time on to the super node.
1034 	st->st_atim.tv_sec = time(NULL);
1035 	st->st_atim.tv_nsec = 0;
1036 	st->st_mtim = st->st_ctim = fifo->ModificationTime();
1037 
1038 	return B_OK;
1039 }
1040 
1041 
1042 static status_t
1043 fifo_write_stat(fs_volume* volume, fs_vnode* vnode, const struct ::stat* st,
1044 	uint32 statMask)
1045 {
1046 	// we cannot change the size of anything
1047 	if ((statMask & B_STAT_SIZE) != 0)
1048 		return B_BAD_VALUE;
1049 
1050 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
1051 	fs_vnode* superVnode = fifo->SuperVnode();
1052 
1053 	if (superVnode->ops->write_stat == NULL)
1054 		return B_BAD_VALUE;
1055 
1056 	status_t error = superVnode->ops->write_stat(volume, superVnode, st,
1057 		statMask);
1058 	if (error != B_OK)
1059 		return error;
1060 
1061 	return B_OK;
1062 }
1063 
1064 
1065 static status_t
1066 fifo_ioctl(fs_volume* _volume, fs_vnode* _node, void* _cookie, uint32 op,
1067 	void* buffer, size_t length)
1068 {
1069 	file_cookie* cookie = (file_cookie*)_cookie;
1070 	Inode* inode = (Inode*)_node->private_node;
1071 
1072 	TRACE("fifo_ioctl: vnode %p, cookie %p, op %ld, buf %p, len %ld\n",
1073 		_vnode, _cookie, op, buffer, length);
1074 
1075 	switch (op) {
1076 		case FIONBIO:
1077 		{
1078 			if (buffer == NULL)
1079 				return B_BAD_VALUE;
1080 
1081 			int value;
1082 			if (is_called_via_syscall()) {
1083 				if (!IS_USER_ADDRESS(buffer)
1084 					|| user_memcpy(&value, buffer, sizeof(int)) != B_OK) {
1085 					return B_BAD_ADDRESS;
1086 				}
1087 			} else
1088 				value = *(int*)buffer;
1089 
1090 			MutexLocker locker(inode->RequestLock());
1091 			cookie->SetNonBlocking(value != 0);
1092 			return B_OK;
1093 		}
1094 
1095 		case FIONREAD:
1096 		{
1097 			if (buffer == NULL)
1098 				return B_BAD_VALUE;
1099 
1100 			MutexLocker locker(inode->RequestLock());
1101 			int available = (int)inode->BytesAvailable();
1102 			locker.Unlock();
1103 
1104 			if (is_called_via_syscall()) {
1105 				if (!IS_USER_ADDRESS(buffer)
1106 					|| user_memcpy(buffer, &available, sizeof(available))
1107 						!= B_OK) {
1108 					return B_BAD_ADDRESS;
1109 				}
1110 			} else
1111 				*(int*)buffer = available;
1112 
1113 			return B_OK;
1114 		}
1115 
1116 		case B_SET_BLOCKING_IO:
1117 		case B_SET_NONBLOCKING_IO:
1118 		{
1119 			MutexLocker locker(inode->RequestLock());
1120 			cookie->SetNonBlocking(op == B_SET_NONBLOCKING_IO);
1121 			return B_OK;
1122 		}
1123 	}
1124 
1125 	return EINVAL;
1126 }
1127 
1128 
1129 static status_t
1130 fifo_set_flags(fs_volume* _volume, fs_vnode* _node, void* _cookie,
1131 	int flags)
1132 {
1133 	Inode* inode = (Inode*)_node->private_node;
1134 	file_cookie* cookie = (file_cookie*)_cookie;
1135 
1136 	TRACE("fifo_set_flags(vnode = %p, flags = %x)\n", _vnode, flags);
1137 
1138 	MutexLocker locker(inode->RequestLock());
1139 	cookie->open_mode = (cookie->open_mode & ~(O_APPEND | O_NONBLOCK)) | flags;
1140 	return B_OK;
1141 }
1142 
1143 
1144 static status_t
1145 fifo_select(fs_volume* _volume, fs_vnode* _node, void* _cookie,
1146 	uint8 event, selectsync* sync)
1147 {
1148 	file_cookie* cookie = (file_cookie*)_cookie;
1149 
1150 	TRACE("fifo_select(vnode = %p)\n", _node);
1151 	Inode* inode = (Inode*)_node->private_node;
1152 	if (!inode)
1153 		return B_ERROR;
1154 
1155 	MutexLocker locker(inode->RequestLock());
1156 	return inode->Select(event, sync, cookie->open_mode);
1157 }
1158 
1159 
1160 static status_t
1161 fifo_deselect(fs_volume* _volume, fs_vnode* _node, void* _cookie,
1162 	uint8 event, selectsync* sync)
1163 {
1164 	file_cookie* cookie = (file_cookie*)_cookie;
1165 
1166 	TRACE("fifo_deselect(vnode = %p)\n", _node);
1167 	Inode* inode = (Inode*)_node->private_node;
1168 	if (inode == NULL)
1169 		return B_ERROR;
1170 
1171 	MutexLocker locker(inode->RequestLock());
1172 	return inode->Deselect(event, sync, cookie->open_mode);
1173 }
1174 
1175 
1176 static bool
1177 fifo_can_page(fs_volume* _volume, fs_vnode* _node, void* cookie)
1178 {
1179 	return false;
1180 }
1181 
1182 
1183 static status_t
1184 fifo_read_pages(fs_volume* _volume, fs_vnode* _node, void* cookie, off_t pos,
1185 	const iovec* vecs, size_t count, size_t* _numBytes)
1186 {
1187 	return B_NOT_ALLOWED;
1188 }
1189 
1190 
1191 static status_t
1192 fifo_write_pages(fs_volume* _volume, fs_vnode* _node, void* cookie,
1193 	off_t pos, const iovec* vecs, size_t count, size_t* _numBytes)
1194 {
1195 	return B_NOT_ALLOWED;
1196 }
1197 
1198 
1199 static status_t
1200 fifo_get_super_vnode(fs_volume* volume, fs_vnode* vnode, fs_volume* superVolume,
1201 	fs_vnode* _superVnode)
1202 {
1203 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
1204 	fs_vnode* superVnode = fifo->SuperVnode();
1205 
1206 	if (superVnode->ops->get_super_vnode != NULL) {
1207 		return superVnode->ops->get_super_vnode(volume, superVnode, superVolume,
1208 			_superVnode);
1209 	}
1210 
1211 	*_superVnode = *superVnode;
1212 
1213 	return B_OK;
1214 }
1215 
1216 
1217 static fs_vnode_ops sFIFOVnodeOps = {
1218 	NULL,	// lookup
1219 	NULL,	// get_vnode_name
1220 					// TODO: This is suboptimal! We'd need to forward the
1221 					// super node's hook, if it has got one.
1222 
1223 	&fifo_put_vnode,
1224 	&fifo_remove_vnode,
1225 
1226 	&fifo_can_page,
1227 	&fifo_read_pages,
1228 	&fifo_write_pages,
1229 
1230 	NULL,	// io()
1231 	NULL,	// cancel_io()
1232 
1233 	NULL,	// get_file_map
1234 
1235 	/* common */
1236 	&fifo_ioctl,
1237 	&fifo_set_flags,
1238 	&fifo_select,
1239 	&fifo_deselect,
1240 	&fifo_fsync,
1241 
1242 	NULL,	// fs_read_link
1243 	NULL,	// fs_symlink
1244 	NULL,	// fs_link
1245 	NULL,	// unlink
1246 	NULL,	// rename
1247 
1248 	NULL,	// fs_access()
1249 	&fifo_read_stat,
1250 	&fifo_write_stat,
1251 	NULL,
1252 
1253 	/* file */
1254 	NULL,	// create()
1255 	&fifo_open,
1256 	&fifo_close,
1257 	&fifo_free_cookie,
1258 	&fifo_read,
1259 	&fifo_write,
1260 
1261 	/* directory */
1262 	NULL,	// create_dir
1263 	NULL,	// remove_dir
1264 	NULL,	// open_dir
1265 	NULL,	// close_dir
1266 	NULL,	// free_dir_cookie
1267 	NULL,	// read_dir
1268 	NULL,	// rewind_dir
1269 
1270 	/* attribute directory operations */
1271 	NULL,	// open_attr_dir
1272 	NULL,	// close_attr_dir
1273 	NULL,	// free_attr_dir_cookie
1274 	NULL,	// read_attr_dir
1275 	NULL,	// rewind_attr_dir
1276 
1277 	/* attribute operations */
1278 	NULL,	// create_attr
1279 	NULL,	// open_attr
1280 	NULL,	// close_attr
1281 	NULL,	// free_attr_cookie
1282 	NULL,	// read_attr
1283 	NULL,	// write_attr
1284 
1285 	NULL,	// read_attr_stat
1286 	NULL,	// write_attr_stat
1287 	NULL,	// rename_attr
1288 	NULL,	// remove_attr
1289 
1290 	/* support for node and FS layers */
1291 	NULL,	// create_special_node
1292 	&fifo_get_super_vnode,
1293 };
1294 
1295 
1296 }	// namespace fifo
1297 
1298 
1299 using namespace fifo;
1300 
1301 
1302 // #pragma mark -
1303 
1304 
1305 status_t
1306 create_fifo_vnode(fs_volume* superVolume, fs_vnode* vnode)
1307 {
1308 	FIFOInode* fifo = new(std::nothrow) FIFOInode(vnode);
1309 	if (fifo == NULL)
1310 		return B_NO_MEMORY;
1311 
1312 	status_t status = fifo->InitCheck();
1313 	if (status != B_OK) {
1314 		delete fifo;
1315 		return status;
1316 	}
1317 
1318 	vnode->private_node = fifo;
1319 	vnode->ops = &sFIFOVnodeOps;
1320 
1321 	return B_OK;
1322 }
1323 
1324 
1325 void
1326 fifo_init()
1327 {
1328 	add_debugger_command_etc("fifo", &Inode::Dump,
1329 		"Print info about the specified FIFO node",
1330 		"[ \"-d\" ] <address>\n"
1331 		"Prints information about the FIFO node specified by address\n"
1332 		"<address>. If \"-d\" is given, the data in the FIFO's ring buffer\n"
1333 		"hexdumped as well.\n",
1334 		0);
1335 }
1336