xref: /haiku/src/system/kernel/fs/fifo.cpp (revision e4a557f372b21b348e0c6a2ae7157c1b73e0d738)
1 /*
2  * Copyright 2007-2013, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "fifo.h"
9 
10 #include <limits.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/ioctl.h>
15 #include <sys/stat.h>
16 
17 #include <new>
18 
19 #include <KernelExport.h>
20 #include <NodeMonitor.h>
21 #include <Select.h>
22 
23 #include <condition_variable.h>
24 #include <debug_hex_dump.h>
25 #include <lock.h>
26 #include <select_sync_pool.h>
27 #include <syscall_restart.h>
28 #include <team.h>
29 #include <thread.h>
30 #include <util/DoublyLinkedList.h>
31 #include <util/AutoLock.h>
32 #include <util/ring_buffer.h>
33 #include <vfs.h>
34 #include <vfs_defs.h>
35 #include <vm/vm.h>
36 
37 
38 //#define TRACE_FIFO
39 #ifdef TRACE_FIFO
40 #	define TRACE(x...) dprintf(x)
41 #else
42 #	define TRACE(x...)
43 #endif
44 
45 
46 namespace fifo {
47 
48 
49 struct file_cookie;
50 class Inode;
51 
52 
53 class RingBuffer {
54 public:
55 								RingBuffer();
56 								~RingBuffer();
57 
58 			status_t			CreateBuffer();
59 			void				DeleteBuffer();
60 
61 			ssize_t				Write(const void* buffer, size_t length,
62 									bool isUser);
63 			ssize_t				Read(void* buffer, size_t length, bool isUser);
64 			ssize_t				Peek(size_t offset, void* buffer,
65 									size_t length) const;
66 
67 			size_t				Readable() const;
68 			size_t				Writable() const;
69 
70 private:
71 			struct ring_buffer*	fBuffer;
72 };
73 
74 
75 class ReadRequest : public DoublyLinkedListLinkImpl<ReadRequest> {
76 public:
ReadRequest(file_cookie * cookie)77 	ReadRequest(file_cookie* cookie)
78 		:
79 		fThread(thread_get_current_thread()),
80 		fCookie(cookie),
81 		fNotified(true)
82 	{
83 		B_INITIALIZE_SPINLOCK(&fLock);
84 	}
85 
SetNotified(bool notified)86 	void SetNotified(bool notified)
87 	{
88 		InterruptsSpinLocker _(fLock);
89 		fNotified = notified;
90 	}
91 
Notify(status_t status=B_OK)92 	void Notify(status_t status = B_OK)
93 	{
94 		InterruptsSpinLocker _(fLock);
95 		TRACE("ReadRequest %p::Notify(), fNotified %d\n", this, fNotified);
96 
97 		if (!fNotified) {
98 			thread_unblock(fThread, status);
99 			fNotified = true;
100 		}
101 	}
102 
GetThread() const103 	Thread* GetThread() const
104 	{
105 		return fThread;
106 	}
107 
Cookie() const108 	file_cookie* Cookie() const
109 	{
110 		return fCookie;
111 	}
112 
113 private:
114 	spinlock		fLock;
115 	Thread*			fThread;
116 	file_cookie*	fCookie;
117 	volatile bool	fNotified;
118 };
119 
120 
121 class WriteRequest : public DoublyLinkedListLinkImpl<WriteRequest> {
122 public:
WriteRequest(Thread * thread,size_t minimalWriteCount)123 	WriteRequest(Thread* thread, size_t minimalWriteCount)
124 		:
125 		fThread(thread),
126 		fMinimalWriteCount(minimalWriteCount)
127 	{
128 	}
129 
GetThread() const130 	Thread* GetThread() const
131 	{
132 		return fThread;
133 	}
134 
MinimalWriteCount() const135 	size_t MinimalWriteCount() const
136 	{
137 		return fMinimalWriteCount;
138 	}
139 
140 private:
141 	Thread*	fThread;
142 	size_t	fMinimalWriteCount;
143 };
144 
145 
146 typedef DoublyLinkedList<ReadRequest> ReadRequestList;
147 typedef DoublyLinkedList<WriteRequest> WriteRequestList;
148 
149 
150 class Inode {
151 public:
152 								Inode();
153 								~Inode();
154 
155 			status_t			InitCheck();
156 
IsActive() const157 			bool				IsActive() const { return fActive; }
CreationTime() const158 			timespec			CreationTime() const { return fCreationTime; }
SetCreationTime(timespec creationTime)159 			void				SetCreationTime(timespec creationTime)
160 									{ fCreationTime = creationTime; }
ModificationTime() const161 			timespec			ModificationTime() const
162 									{ return fModificationTime; }
SetModificationTime(timespec modificationTime)163 			void				SetModificationTime(timespec modificationTime)
164 									{ fModificationTime = modificationTime; }
165 
RequestLock()166 			mutex*				RequestLock() { return &fRequestLock; }
167 
168 			status_t			WriteDataToBuffer(const void* data,
169 									size_t* _length, bool nonBlocking,
170 									bool isUser);
171 			status_t			ReadDataFromBuffer(void* data, size_t* _length,
172 									bool nonBlocking, bool isUser,
173 									ReadRequest& request);
BytesAvailable() const174 			size_t				BytesAvailable() const
175 									{ return fBuffer.Readable(); }
BytesWritable() const176 			size_t				BytesWritable() const
177 									{ return fBuffer.Writable(); }
178 
179 			void				AddReadRequest(ReadRequest& request);
180 			void				RemoveReadRequest(ReadRequest& request);
181 			status_t			WaitForReadRequest(ReadRequest& request);
182 
183 			void				NotifyBytesRead(size_t bytes);
184 			void				NotifyReadDone();
185 			void				NotifyBytesWritten(size_t bytes);
186 			void				NotifyEndClosed(bool writer);
187 
188 			status_t			Open(int openMode);
189 			void				Close(file_cookie* cookie);
ReaderCount() const190 			int32				ReaderCount() const { return fReaderCount; }
WriterCount() const191 			int32				WriterCount() const { return fWriterCount; }
192 
193 			status_t			Select(uint8 event, selectsync* sync,
194 									int openMode);
195 			status_t			Deselect(uint8 event, selectsync* sync,
196 									int openMode);
197 
198 			void				Dump(bool dumpData) const;
199 	static	int					Dump(int argc, char** argv);
200 
201 private:
202 			timespec			fCreationTime;
203 			timespec			fModificationTime;
204 
205 			RingBuffer			fBuffer;
206 
207 			ReadRequestList		fReadRequests;
208 			WriteRequestList	fWriteRequests;
209 
210 			mutex				fRequestLock;
211 
212 			ConditionVariable	fActiveCondition;
213 
214 			int32				fReaderCount;
215 			int32				fWriterCount;
216 			bool				fActive;
217 
218 			select_sync_pool*	fReadSelectSyncPool;
219 			select_sync_pool*	fWriteSelectSyncPool;
220 };
221 
222 
223 class FIFOInode : public Inode {
224 public:
FIFOInode(fs_vnode * vnode)225 	FIFOInode(fs_vnode* vnode)
226 		:
227 		Inode(),
228 		fSuperVnode(*vnode)
229 	{
230 	}
231 
SuperVnode()232 	fs_vnode*	SuperVnode() { return &fSuperVnode; }
233 
234 private:
235 	fs_vnode	fSuperVnode;
236 };
237 
238 
239 struct file_cookie {
240 	int	open_mode;
241 			// guarded by Inode::fRequestLock
242 
SetNonBlockingfifo::file_cookie243 	void SetNonBlocking(bool nonBlocking)
244 	{
245 		if (nonBlocking)
246 			open_mode |= O_NONBLOCK;
247 		else
248 			open_mode &= ~(int)O_NONBLOCK;
249 	}
250 };
251 
252 
253 // #pragma mark -
254 
255 
RingBuffer()256 RingBuffer::RingBuffer()
257 	:
258 	fBuffer(NULL)
259 {
260 }
261 
262 
~RingBuffer()263 RingBuffer::~RingBuffer()
264 {
265 	DeleteBuffer();
266 }
267 
268 
269 status_t
CreateBuffer()270 RingBuffer::CreateBuffer()
271 {
272 	if (fBuffer != NULL)
273 		return B_OK;
274 
275 	fBuffer = create_ring_buffer(VFS_FIFO_BUFFER_CAPACITY);
276 	return fBuffer != NULL ? B_OK : B_NO_MEMORY;
277 }
278 
279 
280 void
DeleteBuffer()281 RingBuffer::DeleteBuffer()
282 {
283 	if (fBuffer != NULL) {
284 		delete_ring_buffer(fBuffer);
285 		fBuffer = NULL;
286 	}
287 }
288 
289 
290 inline ssize_t
Write(const void * buffer,size_t length,bool isUser)291 RingBuffer::Write(const void* buffer, size_t length, bool isUser)
292 {
293 	if (fBuffer == NULL)
294 		return B_NO_MEMORY;
295 	if (isUser && !IS_USER_ADDRESS(buffer))
296 		return B_BAD_ADDRESS;
297 
298 	return isUser
299 		? ring_buffer_user_write(fBuffer, (const uint8*)buffer, length)
300 		: ring_buffer_write(fBuffer, (const uint8*)buffer, length);
301 }
302 
303 
304 inline ssize_t
Read(void * buffer,size_t length,bool isUser)305 RingBuffer::Read(void* buffer, size_t length, bool isUser)
306 {
307 	if (fBuffer == NULL)
308 		return B_NO_MEMORY;
309 	if (isUser && !IS_USER_ADDRESS(buffer))
310 		return B_BAD_ADDRESS;
311 
312 	return isUser
313 		? ring_buffer_user_read(fBuffer, (uint8*)buffer, length)
314 		: ring_buffer_read(fBuffer, (uint8*)buffer, length);
315 }
316 
317 
318 inline ssize_t
Peek(size_t offset,void * buffer,size_t length) const319 RingBuffer::Peek(size_t offset, void* buffer, size_t length) const
320 {
321 	if (fBuffer == NULL)
322 		return B_NO_MEMORY;
323 
324 	return ring_buffer_peek(fBuffer, offset, (uint8*)buffer, length);
325 }
326 
327 
328 inline size_t
Readable() const329 RingBuffer::Readable() const
330 {
331 	return fBuffer != NULL ? ring_buffer_readable(fBuffer) : 0;
332 }
333 
334 
335 inline size_t
Writable() const336 RingBuffer::Writable() const
337 {
338 	return fBuffer != NULL ? ring_buffer_writable(fBuffer) : 0;
339 }
340 
341 
342 //	#pragma mark -
343 
344 
Inode()345 Inode::Inode()
346 	:
347 	fReadRequests(),
348 	fWriteRequests(),
349 	fReaderCount(0),
350 	fWriterCount(0),
351 	fActive(false),
352 	fReadSelectSyncPool(NULL),
353 	fWriteSelectSyncPool(NULL)
354 {
355 	fActiveCondition.Publish(this, "pipe");
356 	mutex_init(&fRequestLock, "pipe request");
357 
358 	bigtime_t time = real_time_clock();
359 	fModificationTime.tv_sec = time / 1000000;
360 	fModificationTime.tv_nsec = (time % 1000000) * 1000;
361 	fCreationTime = fModificationTime;
362 }
363 
364 
~Inode()365 Inode::~Inode()
366 {
367 	fActiveCondition.Unpublish();
368 	mutex_destroy(&fRequestLock);
369 }
370 
371 
372 status_t
InitCheck()373 Inode::InitCheck()
374 {
375 	return B_OK;
376 }
377 
378 
379 /*!	Writes the specified data bytes to the inode's ring buffer. The
380 	request lock must be held when calling this method.
381 	Notifies readers if necessary, so that blocking readers will get started.
382 	Returns B_OK for success, B_BAD_ADDRESS if copying from the buffer failed,
383 	and various semaphore errors (like B_WOULD_BLOCK in non-blocking mode). If
384 	the returned length is > 0, the returned error code can be ignored.
385 */
386 status_t
WriteDataToBuffer(const void * _data,size_t * _length,bool nonBlocking,bool isUser)387 Inode::WriteDataToBuffer(const void* _data, size_t* _length, bool nonBlocking,
388 	bool isUser)
389 {
390 	const uint8* data = (const uint8*)_data;
391 	size_t dataSize = *_length;
392 	size_t& written = *_length;
393 	written = 0;
394 
395 	TRACE("Inode %p::WriteDataToBuffer(data = %p, bytes = %zu)\n", this, data,
396 		dataSize);
397 
398 	// A request up to VFS_FIFO_ATOMIC_WRITE_SIZE bytes shall not be
399 	// interleaved with other writer's data.
400 	size_t minToWrite = 1;
401 	if (dataSize <= VFS_FIFO_ATOMIC_WRITE_SIZE)
402 		minToWrite = dataSize;
403 
404 	while (dataSize > 0) {
405 		// Wait until enough space in the buffer is available.
406 		while (!fActive
407 				|| (fBuffer.Writable() < minToWrite && fReaderCount > 0)) {
408 			if (nonBlocking)
409 				return B_WOULD_BLOCK;
410 
411 			ConditionVariableEntry entry;
412 			entry.Add(this);
413 
414 			WriteRequest request(thread_get_current_thread(), minToWrite);
415 			fWriteRequests.Add(&request);
416 
417 			mutex_unlock(&fRequestLock);
418 			status_t status = entry.Wait(B_CAN_INTERRUPT);
419 			mutex_lock(&fRequestLock);
420 
421 			fWriteRequests.Remove(&request);
422 
423 			if (status != B_OK)
424 				return status;
425 		}
426 
427 		// write only as long as there are readers left
428 		if (fActive && fReaderCount == 0) {
429 			if (written == 0)
430 				send_signal(find_thread(NULL), SIGPIPE);
431 			return EPIPE;
432 		}
433 
434 		// write as much as we can
435 
436 		size_t toWrite = (fActive ? fBuffer.Writable() : 0);
437 		if (toWrite > dataSize)
438 			toWrite = dataSize;
439 
440 		if (toWrite > 0) {
441 			ssize_t bytesWritten = fBuffer.Write(data, toWrite, isUser);
442 			if (bytesWritten < 0)
443 				return bytesWritten;
444 		}
445 
446 		data += toWrite;
447 		dataSize -= toWrite;
448 		written += toWrite;
449 
450 		NotifyBytesWritten(toWrite);
451 	}
452 
453 	return B_OK;
454 }
455 
456 
457 status_t
ReadDataFromBuffer(void * data,size_t * _length,bool nonBlocking,bool isUser,ReadRequest & request)458 Inode::ReadDataFromBuffer(void* data, size_t* _length, bool nonBlocking,
459 	bool isUser, ReadRequest& request)
460 {
461 	size_t dataSize = *_length;
462 	*_length = 0;
463 
464 	// wait until our request is first in queue
465 	status_t error;
466 	if (fReadRequests.Head() != &request) {
467 		if (nonBlocking)
468 			return B_WOULD_BLOCK;
469 
470 		TRACE("Inode %p::%s(): wait for request %p to become the first "
471 			"request.\n", this, __FUNCTION__, &request);
472 
473 		error = WaitForReadRequest(request);
474 		if (error != B_OK)
475 			return error;
476 	}
477 
478 	// wait until data are available
479 	while (fBuffer.Readable() == 0) {
480 		if (nonBlocking)
481 			return B_WOULD_BLOCK;
482 
483 		if (fActive && fWriterCount == 0)
484 			return B_OK;
485 
486 		TRACE("Inode %p::%s(): wait for data, request %p\n", this, __FUNCTION__,
487 			&request);
488 
489 		error = WaitForReadRequest(request);
490 		if (error != B_OK)
491 			return error;
492 	}
493 
494 	// read as much as we can
495 	size_t toRead = fBuffer.Readable();
496 	if (toRead > dataSize)
497 		toRead = dataSize;
498 
499 	ssize_t bytesRead = fBuffer.Read(data, toRead, isUser);
500 	if (bytesRead < 0)
501 		return bytesRead;
502 
503 	NotifyBytesRead(toRead);
504 
505 	*_length = toRead;
506 
507 	return B_OK;
508 }
509 
510 
511 void
AddReadRequest(ReadRequest & request)512 Inode::AddReadRequest(ReadRequest& request)
513 {
514 	fReadRequests.Add(&request);
515 }
516 
517 
518 void
RemoveReadRequest(ReadRequest & request)519 Inode::RemoveReadRequest(ReadRequest& request)
520 {
521 	fReadRequests.Remove(&request);
522 }
523 
524 
525 status_t
WaitForReadRequest(ReadRequest & request)526 Inode::WaitForReadRequest(ReadRequest& request)
527 {
528 	// add the entry to wait on
529 	thread_prepare_to_block(thread_get_current_thread(), B_CAN_INTERRUPT,
530 		THREAD_BLOCK_TYPE_OTHER, "fifo read request");
531 
532 	request.SetNotified(false);
533 
534 	// wait
535 	mutex_unlock(&fRequestLock);
536 	status_t status = thread_block();
537 
538 	// Before going to lock again, we need to make sure no one tries to
539 	// unblock us. Otherwise that would screw with mutex_lock().
540 	request.SetNotified(true);
541 
542 	mutex_lock(&fRequestLock);
543 
544 	return status;
545 }
546 
547 
548 void
NotifyBytesRead(size_t bytes)549 Inode::NotifyBytesRead(size_t bytes)
550 {
551 	// notify writer, if something can be written now
552 	size_t writable = fBuffer.Writable();
553 	if (bytes > 0) {
554 		// notify select()ors only, if nothing was writable before
555 		if (writable == bytes) {
556 			if (fWriteSelectSyncPool)
557 				notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE);
558 		}
559 
560 		// If any of the waiting writers has a minimal write count that has
561 		// now become satisfied, we notify all of them (condition variables
562 		// don't support doing that selectively).
563 		WriteRequest* request;
564 		WriteRequestList::Iterator iterator = fWriteRequests.GetIterator();
565 		while ((request = iterator.Next()) != NULL) {
566 			size_t minWriteCount = request->MinimalWriteCount();
567 			if (minWriteCount > 0 && minWriteCount <= writable
568 					&& minWriteCount > writable - bytes) {
569 				fActiveCondition.NotifyAll();
570 				break;
571 			}
572 		}
573 	}
574 }
575 
576 
577 void
NotifyReadDone()578 Inode::NotifyReadDone()
579 {
580 	// notify next reader, if there's still something to be read
581 	if (fBuffer.Readable() > 0) {
582 		if (ReadRequest* request = fReadRequests.First())
583 			request->Notify();
584 	}
585 }
586 
587 
588 void
NotifyBytesWritten(size_t bytes)589 Inode::NotifyBytesWritten(size_t bytes)
590 {
591 	// notify reader, if something can be read now
592 	if (bytes > 0 && fBuffer.Readable() == bytes) {
593 		if (fReadSelectSyncPool)
594 			notify_select_event_pool(fReadSelectSyncPool, B_SELECT_READ);
595 
596 		if (ReadRequest* request = fReadRequests.First())
597 			request->Notify();
598 	}
599 }
600 
601 
602 void
NotifyEndClosed(bool writer)603 Inode::NotifyEndClosed(bool writer)
604 {
605 	TRACE("Inode %p::%s(%s)\n", this, __FUNCTION__,
606 		writer ? "writer" : "reader");
607 
608 	if (writer) {
609 		// Our last writer has been closed; if the pipe
610 		// contains no data, unlock all waiting readers
611 		TRACE("  buffer readable: %zu\n", fBuffer.Readable());
612 		if (fBuffer.Readable() == 0) {
613 			ReadRequestList::Iterator iterator = fReadRequests.GetIterator();
614 			while (ReadRequest* request = iterator.Next())
615 				request->Notify();
616 
617 			if (fReadSelectSyncPool)
618 				notify_select_event_pool(fReadSelectSyncPool, B_SELECT_DISCONNECTED);
619 
620 		}
621 	} else {
622 		// Last reader is gone. Wake up all writers.
623 		fActiveCondition.NotifyAll();
624 
625 		if (fWriteSelectSyncPool)
626 			notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_ERROR);
627 	}
628 }
629 
630 
631 status_t
Open(int openMode)632 Inode::Open(int openMode)
633 {
634 	MutexLocker locker(RequestLock());
635 
636 	if ((openMode & O_ACCMODE) == O_WRONLY || (openMode & O_ACCMODE) == O_RDWR)
637 		fWriterCount++;
638 
639 	if ((openMode & O_ACCMODE) == O_RDONLY || (openMode & O_ACCMODE) == O_RDWR)
640 		fReaderCount++;
641 
642 	bool shouldWait = false;
643 	if ((openMode & O_ACCMODE) == O_WRONLY && fReaderCount == 0) {
644 		if ((openMode & O_NONBLOCK) != 0)
645 			return ENXIO;
646 		shouldWait = true;
647 	}
648 	if ((openMode & O_ACCMODE) == O_RDONLY && fWriterCount == 0
649 		&& (openMode & O_NONBLOCK) == 0) {
650 		shouldWait = true;
651 	}
652 	if (shouldWait) {
653 		// prepare for waiting for the condition variable.
654 		ConditionVariableEntry waitEntry;
655 		fActiveCondition.Add(&waitEntry);
656 		locker.Unlock();
657 		status_t status = waitEntry.Wait(B_CAN_INTERRUPT);
658 		if (status != B_OK)
659 			return status;
660 		locker.Lock();
661 	}
662 
663 	if (fReaderCount > 0 && fWriterCount > 0) {
664 		TRACE("Inode %p::Open(): fifo becomes active\n", this);
665 		fBuffer.CreateBuffer();
666 		fActive = true;
667 
668 		// notify all waiting writers that they can start
669 		if (fWriteSelectSyncPool)
670 			notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE);
671 		fActiveCondition.NotifyAll();
672 	}
673 	return B_OK;
674 }
675 
676 
677 void
Close(file_cookie * cookie)678 Inode::Close(file_cookie* cookie)
679 {
680 
681 	MutexLocker locker(RequestLock());
682 
683 	int openMode = cookie->open_mode;
684 	TRACE("Inode %p::Close(openMode = %" B_PRId32 ")\n", this, openMode);
685 
686 	// Notify all currently reading file descriptors
687 	ReadRequestList::Iterator iterator = fReadRequests.GetIterator();
688 	while (ReadRequest* request = iterator.Next()) {
689 		if (request->Cookie() == cookie)
690 			request->Notify(B_FILE_ERROR);
691 	}
692 
693 	if ((openMode & O_ACCMODE) == O_WRONLY || (openMode & O_ACCMODE) == O_RDWR) {
694 		if (--fWriterCount == 0)
695 			NotifyEndClosed(true);
696 	}
697 
698 	if ((openMode & O_ACCMODE) == O_RDONLY || (openMode & O_ACCMODE) == O_RDWR) {
699 		if (--fReaderCount == 0)
700 			NotifyEndClosed(false);
701 	}
702 
703 	if (fWriterCount == 0) {
704 		// Notify any still reading writers to stop
705 		// TODO: This only works reliable if there is only one writer - we could
706 		// do the same thing done for the read requests.
707 		fActiveCondition.NotifyAll(B_FILE_ERROR);
708 	}
709 
710 	if (fReaderCount == 0 && fWriterCount == 0) {
711 		fActive = false;
712 		fBuffer.DeleteBuffer();
713 	}
714 }
715 
716 
717 status_t
Select(uint8 event,selectsync * sync,int openMode)718 Inode::Select(uint8 event, selectsync* sync, int openMode)
719 {
720 	bool writer = true;
721 	select_sync_pool** pool;
722 	// B_SELECT_READ can happen on write-only opened fds, so restrain B_SELECT_READ to O_RDWR
723 	if ((event == B_SELECT_READ && (openMode & O_RWMASK) == O_RDWR)
724 		|| (openMode & O_RWMASK) == O_RDONLY) {
725 		pool = &fReadSelectSyncPool;
726 		writer = false;
727 	} else if ((openMode & O_RWMASK) == O_RDWR || (openMode & O_RWMASK) == O_WRONLY) {
728 		pool = &fWriteSelectSyncPool;
729 	} else
730 		return B_NOT_ALLOWED;
731 
732 	if (add_select_sync_pool_entry(pool, sync, event) != B_OK)
733 		return B_ERROR;
734 
735 	// signal right away, if the condition holds already
736 	if (writer) {
737 		if ((event == B_SELECT_WRITE && fBuffer.Writable() > 0)
738 			|| (event == B_SELECT_ERROR && fReaderCount == 0)) {
739 			return notify_select_event(sync, event);
740 		}
741 	} else {
742 		if ((event == B_SELECT_READ && fBuffer.Readable() > 0)
743 			|| (event == B_SELECT_DISCONNECTED && fWriterCount == 0)) {
744 			return notify_select_event(sync, event);
745 		}
746 	}
747 
748 	return B_OK;
749 }
750 
751 
752 status_t
Deselect(uint8 event,selectsync * sync,int openMode)753 Inode::Deselect(uint8 event, selectsync* sync, int openMode)
754 {
755 	select_sync_pool** pool;
756 	if ((event == B_SELECT_READ && (openMode & O_RWMASK) == O_RDWR)
757 		|| (openMode & O_RWMASK) == O_RDONLY) {
758 		pool = &fReadSelectSyncPool;
759 	} else if ((openMode & O_RWMASK) == O_RDWR || (openMode & O_RWMASK) == O_WRONLY) {
760 		pool = &fWriteSelectSyncPool;
761 	} else
762 		return B_NOT_ALLOWED;
763 
764 	remove_select_sync_pool_entry(pool, sync, event);
765 	return B_OK;
766 }
767 
768 
769 void
Dump(bool dumpData) const770 Inode::Dump(bool dumpData) const
771 {
772 	kprintf("FIFO %p\n", this);
773 	kprintf("  active:        %s\n", fActive ? "true" : "false");
774 	kprintf("  readers:       %" B_PRId32 "\n", fReaderCount);
775 	kprintf("  writers:       %" B_PRId32 "\n", fWriterCount);
776 
777 	if (!fReadRequests.IsEmpty()) {
778 		kprintf(" pending readers:\n");
779 		for (ReadRequestList::ConstIterator it = fReadRequests.GetIterator();
780 			ReadRequest* request = it.Next();) {
781 			kprintf("    %p: thread %" B_PRId32 ", cookie: %p\n", request,
782 				request->GetThread()->id, request->Cookie());
783 		}
784 	}
785 
786 	if (!fWriteRequests.IsEmpty()) {
787 		kprintf(" pending writers:\n");
788 		for (WriteRequestList::ConstIterator it = fWriteRequests.GetIterator();
789 			WriteRequest* request = it.Next();) {
790 			kprintf("    %p:  thread %" B_PRId32 ", min count: %zu\n", request,
791 				request->GetThread()->id, request->MinimalWriteCount());
792 		}
793 	}
794 
795 	kprintf("  %zu bytes buffered\n", fBuffer.Readable());
796 
797 	if (dumpData && fBuffer.Readable() > 0) {
798 		struct DataProvider : BKernel::HexDumpDataProvider {
799 			DataProvider(const RingBuffer& buffer)
800 				:
801 				fBuffer(buffer),
802 				fOffset(0)
803 			{
804 			}
805 
806 			virtual bool HasMoreData() const
807 			{
808 				return fOffset < fBuffer.Readable();
809 			}
810 
811 			virtual uint8 NextByte()
812 			{
813 				uint8 byte = '\0';
814 				if (fOffset < fBuffer.Readable()) {
815 					fBuffer.Peek(fOffset, &byte, 1);
816 					fOffset++;
817 				}
818 				return byte;
819 			}
820 
821 			virtual bool GetAddressString(char* buffer, size_t bufferSize) const
822 			{
823 				snprintf(buffer, bufferSize, "    %4zx", fOffset);
824 				return true;
825 			}
826 
827 		private:
828 			const RingBuffer&	fBuffer;
829 			size_t				fOffset;
830 		};
831 
832 		DataProvider dataProvider(fBuffer);
833 		BKernel::print_hex_dump(dataProvider, fBuffer.Readable());
834 	}
835 }
836 
837 
838 /*static*/ int
Dump(int argc,char ** argv)839 Inode::Dump(int argc, char** argv)
840 {
841 	bool dumpData = false;
842 	int argi = 1;
843 	if (argi < argc && strcmp(argv[argi], "-d") == 0) {
844 		dumpData = true;
845 		argi++;
846 	}
847 
848 	if (argi >= argc || argi + 2 < argc) {
849 		print_debugger_command_usage(argv[0]);
850 		return 0;
851 	}
852 
853 	Inode* node = (Inode*)parse_expression(argv[argi]);
854 	if (IS_USER_ADDRESS(node)) {
855 		kprintf("invalid FIFO address\n");
856 		return 0;
857 	}
858 
859 	node->Dump(dumpData);
860 	return 0;
861 }
862 
863 
864 //	#pragma mark - vnode API
865 
866 
867 static status_t
fifo_put_vnode(fs_volume * volume,fs_vnode * vnode,bool reenter)868 fifo_put_vnode(fs_volume* volume, fs_vnode* vnode, bool reenter)
869 {
870 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
871 	fs_vnode* superVnode = fifo->SuperVnode();
872 
873 	status_t error = B_OK;
874 	if (superVnode->ops->put_vnode != NULL)
875 		error = superVnode->ops->put_vnode(volume, superVnode, reenter);
876 
877 	delete fifo;
878 
879 	return error;
880 }
881 
882 
883 static status_t
fifo_remove_vnode(fs_volume * volume,fs_vnode * vnode,bool reenter)884 fifo_remove_vnode(fs_volume* volume, fs_vnode* vnode, bool reenter)
885 {
886 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
887 	fs_vnode* superVnode = fifo->SuperVnode();
888 
889 	status_t error = B_OK;
890 	if (superVnode->ops->remove_vnode != NULL)
891 		error = superVnode->ops->remove_vnode(volume, superVnode, reenter);
892 
893 	delete fifo;
894 
895 	return error;
896 }
897 
898 
899 static status_t
fifo_open(fs_volume * _volume,fs_vnode * _node,int openMode,void ** _cookie)900 fifo_open(fs_volume* _volume, fs_vnode* _node, int openMode,
901 	void** _cookie)
902 {
903 	Inode* inode = (Inode*)_node->private_node;
904 
905 	TRACE("fifo_open(): node = %p, openMode = %d\n", inode, openMode);
906 
907 	file_cookie* cookie = (file_cookie*)malloc(sizeof(file_cookie));
908 	if (cookie == NULL)
909 		return B_NO_MEMORY;
910 
911 	TRACE("  open cookie = %p\n", cookie);
912 	cookie->open_mode = openMode;
913 	status_t status = inode->Open(openMode);
914 	if (status != B_OK) {
915 		free(cookie);
916 		return status;
917 	}
918 
919 	*_cookie = (void*)cookie;
920 
921 	return B_OK;
922 }
923 
924 
925 static status_t
fifo_close(fs_volume * volume,fs_vnode * vnode,void * _cookie)926 fifo_close(fs_volume* volume, fs_vnode* vnode, void* _cookie)
927 {
928 	file_cookie* cookie = (file_cookie*)_cookie;
929 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
930 
931 	fifo->Close(cookie);
932 
933 	return B_OK;
934 }
935 
936 
937 static status_t
fifo_free_cookie(fs_volume * _volume,fs_vnode * _node,void * _cookie)938 fifo_free_cookie(fs_volume* _volume, fs_vnode* _node, void* _cookie)
939 {
940 	file_cookie* cookie = (file_cookie*)_cookie;
941 
942 	TRACE("fifo_freecookie: entry vnode %p, cookie %p\n", _node, _cookie);
943 
944 	free(cookie);
945 
946 	return B_OK;
947 }
948 
949 
950 static status_t
fifo_fsync(fs_volume * _volume,fs_vnode * _node)951 fifo_fsync(fs_volume* _volume, fs_vnode* _node)
952 {
953 	return B_BAD_VALUE;
954 }
955 
956 
957 static status_t
fifo_read(fs_volume * _volume,fs_vnode * _node,void * _cookie,off_t,void * buffer,size_t * _length)958 fifo_read(fs_volume* _volume, fs_vnode* _node, void* _cookie,
959 	off_t /*pos*/, void* buffer, size_t* _length)
960 {
961 	file_cookie* cookie = (file_cookie*)_cookie;
962 	Inode* inode = (Inode*)_node->private_node;
963 
964 	TRACE("fifo_read(vnode = %p, cookie = %p, length = %lu, mode = %d)\n",
965 		inode, cookie, *_length, cookie->open_mode);
966 
967 	MutexLocker locker(inode->RequestLock());
968 
969 	if (inode->IsActive() && inode->WriterCount() == 0) {
970 		// as long there is no writer, and the pipe is empty,
971 		// we always just return 0 to indicate end of file
972 		if (inode->BytesAvailable() == 0) {
973 			*_length = 0;
974 			return B_OK;
975 		}
976 	}
977 
978 	// issue read request
979 
980 	ReadRequest request(cookie);
981 	inode->AddReadRequest(request);
982 
983 	TRACE("  issue read request %p\n", &request);
984 
985 	size_t length = *_length;
986 	status_t status = inode->ReadDataFromBuffer(buffer, &length,
987 		(cookie->open_mode & O_NONBLOCK) != 0, is_called_via_syscall(),
988 		request);
989 
990 	inode->RemoveReadRequest(request);
991 	inode->NotifyReadDone();
992 
993 	TRACE("  done reading request %p, length %zu\n", &request, length);
994 
995 	if (length > 0)
996 		status = B_OK;
997 
998 	*_length = length;
999 	return status;
1000 }
1001 
1002 
1003 static status_t
fifo_write(fs_volume * _volume,fs_vnode * _node,void * _cookie,off_t,const void * buffer,size_t * _length)1004 fifo_write(fs_volume* _volume, fs_vnode* _node, void* _cookie,
1005 	off_t /*pos*/, const void* buffer, size_t* _length)
1006 {
1007 	file_cookie* cookie = (file_cookie*)_cookie;
1008 	Inode* inode = (Inode*)_node->private_node;
1009 
1010 	TRACE("fifo_write(vnode = %p, cookie = %p, length = %lu)\n",
1011 		_node, cookie, *_length);
1012 
1013 	MutexLocker locker(inode->RequestLock());
1014 
1015 	size_t length = *_length;
1016 	if (length == 0)
1017 		return B_OK;
1018 
1019 	// copy data into ring buffer
1020 	status_t status = inode->WriteDataToBuffer(buffer, &length,
1021 		(cookie->open_mode & O_NONBLOCK) != 0, is_called_via_syscall());
1022 
1023 	if (length > 0)
1024 		status = B_OK;
1025 
1026 	*_length = length;
1027 	return status;
1028 }
1029 
1030 
1031 static status_t
fifo_read_stat(fs_volume * volume,fs_vnode * vnode,struct::stat * st)1032 fifo_read_stat(fs_volume* volume, fs_vnode* vnode, struct ::stat* st)
1033 {
1034 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
1035 	fs_vnode* superVnode = fifo->SuperVnode();
1036 
1037 	if (superVnode->ops->read_stat == NULL)
1038 		return B_BAD_VALUE;
1039 
1040 	status_t error = superVnode->ops->read_stat(volume, superVnode, st);
1041 	if (error != B_OK)
1042 		return error;
1043 
1044 
1045 	MutexLocker locker(fifo->RequestLock());
1046 
1047 	st->st_size = fifo->BytesAvailable();
1048 
1049 	st->st_blksize = 4096;
1050 
1051 	// TODO: Just pass the changes to our modification time on to the super node.
1052 	st->st_atim.tv_sec = time(NULL);
1053 	st->st_atim.tv_nsec = 0;
1054 	st->st_mtim = st->st_ctim = fifo->ModificationTime();
1055 
1056 	return B_OK;
1057 }
1058 
1059 
1060 static status_t
fifo_write_stat(fs_volume * volume,fs_vnode * vnode,const struct::stat * st,uint32 statMask)1061 fifo_write_stat(fs_volume* volume, fs_vnode* vnode, const struct ::stat* st,
1062 	uint32 statMask)
1063 {
1064 	// we cannot change the size of anything
1065 	if ((statMask & B_STAT_SIZE) != 0)
1066 		return B_BAD_VALUE;
1067 
1068 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
1069 	fs_vnode* superVnode = fifo->SuperVnode();
1070 
1071 	if (superVnode->ops->write_stat == NULL)
1072 		return B_BAD_VALUE;
1073 
1074 	status_t error = superVnode->ops->write_stat(volume, superVnode, st,
1075 		statMask);
1076 	if (error != B_OK)
1077 		return error;
1078 
1079 	return B_OK;
1080 }
1081 
1082 
1083 static status_t
fifo_ioctl(fs_volume * _volume,fs_vnode * _node,void * _cookie,uint32 op,void * buffer,size_t length)1084 fifo_ioctl(fs_volume* _volume, fs_vnode* _node, void* _cookie, uint32 op,
1085 	void* buffer, size_t length)
1086 {
1087 	file_cookie* cookie = (file_cookie*)_cookie;
1088 	Inode* inode = (Inode*)_node->private_node;
1089 
1090 	TRACE("fifo_ioctl: vnode %p, cookie %p, op %" B_PRId32 ", buf %p, len %ld\n",
1091 		_node, _cookie, op, buffer, length);
1092 
1093 	switch (op) {
1094 		case FIONREAD:
1095 		{
1096 			if (buffer == NULL)
1097 				return B_BAD_VALUE;
1098 
1099 			MutexLocker locker(inode->RequestLock());
1100 			int available = (int)inode->BytesAvailable();
1101 			locker.Unlock();
1102 
1103 			if (is_called_via_syscall()) {
1104 				if (!IS_USER_ADDRESS(buffer)
1105 					|| user_memcpy(buffer, &available, sizeof(available))
1106 						!= B_OK) {
1107 					return B_BAD_ADDRESS;
1108 				}
1109 			} else
1110 				*(int*)buffer = available;
1111 
1112 			return B_OK;
1113 		}
1114 
1115 		case B_SET_BLOCKING_IO:
1116 		case B_SET_NONBLOCKING_IO:
1117 		{
1118 			MutexLocker locker(inode->RequestLock());
1119 			cookie->SetNonBlocking(op == B_SET_NONBLOCKING_IO);
1120 			return B_OK;
1121 		}
1122 	}
1123 
1124 	return EINVAL;
1125 }
1126 
1127 
1128 static status_t
fifo_set_flags(fs_volume * _volume,fs_vnode * _node,void * _cookie,int flags)1129 fifo_set_flags(fs_volume* _volume, fs_vnode* _node, void* _cookie,
1130 	int flags)
1131 {
1132 	Inode* inode = (Inode*)_node->private_node;
1133 	file_cookie* cookie = (file_cookie*)_cookie;
1134 
1135 	TRACE("fifo_set_flags(vnode = %p, flags = %x)\n", _node, flags);
1136 
1137 	MutexLocker locker(inode->RequestLock());
1138 	cookie->open_mode = (cookie->open_mode & ~(O_APPEND | O_NONBLOCK)) | flags;
1139 	return B_OK;
1140 }
1141 
1142 
1143 static status_t
fifo_select(fs_volume * _volume,fs_vnode * _node,void * _cookie,uint8 event,selectsync * sync)1144 fifo_select(fs_volume* _volume, fs_vnode* _node, void* _cookie,
1145 	uint8 event, selectsync* sync)
1146 {
1147 	file_cookie* cookie = (file_cookie*)_cookie;
1148 
1149 	TRACE("fifo_select(vnode = %p)\n", _node);
1150 	Inode* inode = (Inode*)_node->private_node;
1151 	if (!inode)
1152 		return B_ERROR;
1153 
1154 	MutexLocker locker(inode->RequestLock());
1155 	return inode->Select(event, sync, cookie->open_mode);
1156 }
1157 
1158 
1159 static status_t
fifo_deselect(fs_volume * _volume,fs_vnode * _node,void * _cookie,uint8 event,selectsync * sync)1160 fifo_deselect(fs_volume* _volume, fs_vnode* _node, void* _cookie,
1161 	uint8 event, selectsync* sync)
1162 {
1163 	file_cookie* cookie = (file_cookie*)_cookie;
1164 
1165 	TRACE("fifo_deselect(vnode = %p)\n", _node);
1166 	Inode* inode = (Inode*)_node->private_node;
1167 	if (inode == NULL)
1168 		return B_ERROR;
1169 
1170 	MutexLocker locker(inode->RequestLock());
1171 	return inode->Deselect(event, sync, cookie->open_mode);
1172 }
1173 
1174 
1175 static bool
fifo_can_page(fs_volume * _volume,fs_vnode * _node,void * cookie)1176 fifo_can_page(fs_volume* _volume, fs_vnode* _node, void* cookie)
1177 {
1178 	return false;
1179 }
1180 
1181 
1182 static status_t
fifo_read_pages(fs_volume * _volume,fs_vnode * _node,void * cookie,off_t pos,const iovec * vecs,size_t count,size_t * _numBytes)1183 fifo_read_pages(fs_volume* _volume, fs_vnode* _node, void* cookie, off_t pos,
1184 	const iovec* vecs, size_t count, size_t* _numBytes)
1185 {
1186 	return B_NOT_ALLOWED;
1187 }
1188 
1189 
1190 static status_t
fifo_write_pages(fs_volume * _volume,fs_vnode * _node,void * cookie,off_t pos,const iovec * vecs,size_t count,size_t * _numBytes)1191 fifo_write_pages(fs_volume* _volume, fs_vnode* _node, void* cookie,
1192 	off_t pos, const iovec* vecs, size_t count, size_t* _numBytes)
1193 {
1194 	return B_NOT_ALLOWED;
1195 }
1196 
1197 
1198 static status_t
fifo_get_super_vnode(fs_volume * volume,fs_vnode * vnode,fs_volume * superVolume,fs_vnode * _superVnode)1199 fifo_get_super_vnode(fs_volume* volume, fs_vnode* vnode, fs_volume* superVolume,
1200 	fs_vnode* _superVnode)
1201 {
1202 	FIFOInode* fifo = (FIFOInode*)vnode->private_node;
1203 	fs_vnode* superVnode = fifo->SuperVnode();
1204 
1205 	if (superVnode->ops->get_super_vnode != NULL) {
1206 		return superVnode->ops->get_super_vnode(volume, superVnode, superVolume,
1207 			_superVnode);
1208 	}
1209 
1210 	*_superVnode = *superVnode;
1211 
1212 	return B_OK;
1213 }
1214 
1215 
1216 static fs_vnode_ops sFIFOVnodeOps = {
1217 	NULL,	// lookup
1218 	NULL,	// get_vnode_name
1219 					// TODO: This is suboptimal! We'd need to forward the
1220 					// super node's hook, if it has got one.
1221 
1222 	&fifo_put_vnode,
1223 	&fifo_remove_vnode,
1224 
1225 	&fifo_can_page,
1226 	&fifo_read_pages,
1227 	&fifo_write_pages,
1228 
1229 	NULL,	// io()
1230 	NULL,	// cancel_io()
1231 
1232 	NULL,	// get_file_map
1233 
1234 	/* common */
1235 	&fifo_ioctl,
1236 	&fifo_set_flags,
1237 	&fifo_select,
1238 	&fifo_deselect,
1239 	&fifo_fsync,
1240 
1241 	NULL,	// fs_read_link
1242 	NULL,	// fs_symlink
1243 	NULL,	// fs_link
1244 	NULL,	// unlink
1245 	NULL,	// rename
1246 
1247 	NULL,	// fs_access()
1248 	&fifo_read_stat,
1249 	&fifo_write_stat,
1250 	NULL,
1251 
1252 	/* file */
1253 	NULL,	// create()
1254 	&fifo_open,
1255 	&fifo_close,
1256 	&fifo_free_cookie,
1257 	&fifo_read,
1258 	&fifo_write,
1259 
1260 	/* directory */
1261 	NULL,	// create_dir
1262 	NULL,	// remove_dir
1263 	NULL,	// open_dir
1264 	NULL,	// close_dir
1265 	NULL,	// free_dir_cookie
1266 	NULL,	// read_dir
1267 	NULL,	// rewind_dir
1268 
1269 	/* attribute directory operations */
1270 	NULL,	// open_attr_dir
1271 	NULL,	// close_attr_dir
1272 	NULL,	// free_attr_dir_cookie
1273 	NULL,	// read_attr_dir
1274 	NULL,	// rewind_attr_dir
1275 
1276 	/* attribute operations */
1277 	NULL,	// create_attr
1278 	NULL,	// open_attr
1279 	NULL,	// close_attr
1280 	NULL,	// free_attr_cookie
1281 	NULL,	// read_attr
1282 	NULL,	// write_attr
1283 
1284 	NULL,	// read_attr_stat
1285 	NULL,	// write_attr_stat
1286 	NULL,	// rename_attr
1287 	NULL,	// remove_attr
1288 
1289 	/* support for node and FS layers */
1290 	NULL,	// create_special_node
1291 	&fifo_get_super_vnode,
1292 };
1293 
1294 
1295 }	// namespace fifo
1296 
1297 
1298 using namespace fifo;
1299 
1300 
1301 // #pragma mark -
1302 
1303 
1304 status_t
create_fifo_vnode(fs_volume * superVolume,fs_vnode * vnode)1305 create_fifo_vnode(fs_volume* superVolume, fs_vnode* vnode)
1306 {
1307 	FIFOInode* fifo = new(std::nothrow) FIFOInode(vnode);
1308 	if (fifo == NULL)
1309 		return B_NO_MEMORY;
1310 
1311 	status_t status = fifo->InitCheck();
1312 	if (status != B_OK) {
1313 		delete fifo;
1314 		return status;
1315 	}
1316 
1317 	vnode->private_node = fifo;
1318 	vnode->ops = &sFIFOVnodeOps;
1319 
1320 	return B_OK;
1321 }
1322 
1323 
1324 void
fifo_init()1325 fifo_init()
1326 {
1327 	add_debugger_command_etc("fifo", &Inode::Dump,
1328 		"Print info about the specified FIFO node",
1329 		"[ \"-d\" ] <address>\n"
1330 		"Prints information about the FIFO node specified by address\n"
1331 		"<address>. If \"-d\" is given, the data in the FIFO's ring buffer\n"
1332 		"hexdumped as well.\n",
1333 		0);
1334 }
1335