1 /* 2 * Copyright 2007-2008, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 #include <limits.h> 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <string.h> 11 #include <sys/stat.h> 12 13 #include <new> 14 15 #include <KernelExport.h> 16 #include <NodeMonitor.h> 17 #include <Select.h> 18 19 #include <condition_variable.h> 20 #include <debug.h> 21 #include <khash.h> 22 #include <lock.h> 23 #include <select_sync_pool.h> 24 #include <team.h> 25 #include <thread.h> 26 #include <util/DoublyLinkedList.h> 27 #include <util/AutoLock.h> 28 #include <util/ring_buffer.h> 29 #include <vfs.h> 30 #include <vm.h> 31 32 #include "fifo.h" 33 34 35 //#define TRACE_FIFO 36 #ifdef TRACE_FIFO 37 # define TRACE(x) dprintf x 38 #else 39 # define TRACE(x) 40 #endif 41 42 43 #define PIPEFS_HASH_SIZE 16 44 #define PIPEFS_MAX_BUFFER_SIZE 32768 45 46 47 // TODO: PIPE_BUF is supposed to be defined somewhere else. 48 #define PIPE_BUF _POSIX_PIPE_BUF 49 50 51 namespace fifo { 52 53 class Inode; 54 55 class RingBuffer { 56 public: 57 RingBuffer(); 58 ~RingBuffer(); 59 60 status_t CreateBuffer(); 61 void DeleteBuffer(); 62 63 size_t Write(const void *buffer, size_t length); 64 size_t Read(void *buffer, size_t length); 65 ssize_t UserWrite(const void *buffer, ssize_t length); 66 ssize_t UserRead(void *buffer, ssize_t length); 67 68 size_t Readable() const; 69 size_t Writable() const; 70 71 private: 72 struct ring_buffer *fBuffer; 73 }; 74 75 76 class ReadRequest : public DoublyLinkedListLinkImpl<ReadRequest> { 77 public: 78 ReadRequest() 79 : fThread(find_thread(NULL)) 80 { 81 } 82 83 void SetUnnotified() { fNotified = false; } 84 85 void Notify() 86 { 87 if (!fNotified) { 88 thread_unblock(fThread, B_OK); 89 fNotified = true; 90 } 91 } 92 93 private: 94 thread_id fThread; 95 bool fNotified; 96 }; 97 98 99 class WriteRequest : public DoublyLinkedListLinkImpl<WriteRequest> { 100 public: 101 WriteRequest(size_t minimalWriteCount) 102 : 103 fMinimalWriteCount(minimalWriteCount) 104 { 105 } 106 107 size_t MinimalWriteCount() const 108 { 109 return fMinimalWriteCount; 110 } 111 112 private: 113 size_t fMinimalWriteCount; 114 }; 115 116 117 typedef DoublyLinkedList<ReadRequest> ReadRequestList; 118 typedef DoublyLinkedList<WriteRequest> WriteRequestList; 119 120 121 class Inode { 122 public: 123 Inode(); 124 ~Inode(); 125 126 status_t InitCheck(); 127 128 bool IsActive() const { return fActive; } 129 time_t CreationTime() const { return fCreationTime; } 130 void SetCreationTime(time_t creationTime) 131 { fCreationTime = creationTime; } 132 time_t ModificationTime() const { return fModificationTime; } 133 void SetModificationTime(time_t modificationTime) 134 { fModificationTime = modificationTime; } 135 136 mutex *RequestLock() { return &fRequestLock; } 137 138 status_t WriteDataToBuffer(const void *data, size_t *_length, 139 bool nonBlocking); 140 status_t ReadDataFromBuffer(void *data, size_t *_length, 141 bool nonBlocking, ReadRequest &request); 142 size_t BytesAvailable() const { return fBuffer.Readable(); } 143 size_t BytesWritable() const { return fBuffer.Writable(); } 144 145 void AddReadRequest(ReadRequest &request); 146 void RemoveReadRequest(ReadRequest &request); 147 status_t WaitForReadRequest(ReadRequest &request); 148 149 void NotifyBytesRead(size_t bytes); 150 void NotifyReadDone(); 151 void NotifyBytesWritten(size_t bytes); 152 void NotifyEndClosed(bool writer); 153 154 void Open(int openMode); 155 void Close(int openMode); 156 int32 ReaderCount() const { return fReaderCount; } 157 int32 WriterCount() const { return fWriterCount; } 158 159 status_t Select(uint8 event, selectsync *sync, int openMode); 160 status_t Deselect(uint8 event, selectsync *sync, int openMode); 161 162 private: 163 time_t fCreationTime; 164 time_t fModificationTime; 165 166 RingBuffer fBuffer; 167 168 ReadRequestList fReadRequests; 169 WriteRequestList fWriteRequests; 170 171 mutex fRequestLock; 172 173 ConditionVariable fWriteCondition; 174 175 int32 fReaderCount; 176 int32 fWriterCount; 177 bool fActive; 178 179 select_sync_pool *fReadSelectSyncPool; 180 select_sync_pool *fWriteSelectSyncPool; 181 }; 182 183 184 class FIFOInode : public Inode { 185 public: 186 FIFOInode(fs_vnode* vnode) 187 : 188 Inode(), 189 fSuperVnode(*vnode) 190 { 191 } 192 193 fs_vnode* SuperVnode() { return &fSuperVnode; } 194 195 private: 196 fs_vnode fSuperVnode; 197 }; 198 199 200 struct file_cookie { 201 int open_mode; 202 }; 203 204 205 //--------------------- 206 207 208 RingBuffer::RingBuffer() 209 : fBuffer(NULL) 210 { 211 } 212 213 214 RingBuffer::~RingBuffer() 215 { 216 DeleteBuffer(); 217 } 218 219 220 status_t 221 RingBuffer::CreateBuffer() 222 { 223 if (fBuffer != NULL) 224 return B_OK; 225 226 fBuffer = create_ring_buffer(PIPEFS_MAX_BUFFER_SIZE); 227 return (fBuffer != NULL ? B_OK : B_NO_MEMORY); 228 } 229 230 231 void 232 RingBuffer::DeleteBuffer() 233 { 234 if (fBuffer != NULL) { 235 delete_ring_buffer(fBuffer); 236 fBuffer = NULL; 237 } 238 } 239 240 241 inline size_t 242 RingBuffer::Write(const void *buffer, size_t length) 243 { 244 if (fBuffer == NULL) 245 return B_NO_MEMORY; 246 247 return ring_buffer_write(fBuffer, (const uint8 *)buffer, length); 248 } 249 250 251 inline size_t 252 RingBuffer::Read(void *buffer, size_t length) 253 { 254 if (fBuffer == NULL) 255 return B_NO_MEMORY; 256 257 return ring_buffer_read(fBuffer, (uint8 *)buffer, length); 258 } 259 260 261 inline ssize_t 262 RingBuffer::UserWrite(const void *buffer, ssize_t length) 263 { 264 if (fBuffer == NULL) 265 return B_NO_MEMORY; 266 267 return ring_buffer_user_write(fBuffer, (const uint8 *)buffer, length); 268 } 269 270 271 inline ssize_t 272 RingBuffer::UserRead(void *buffer, ssize_t length) 273 { 274 if (fBuffer == NULL) 275 return B_NO_MEMORY; 276 277 return ring_buffer_user_read(fBuffer, (uint8 *)buffer, length); 278 } 279 280 281 inline size_t 282 RingBuffer::Readable() const 283 { 284 return (fBuffer != NULL ? ring_buffer_readable(fBuffer) : 0); 285 } 286 287 288 inline size_t 289 RingBuffer::Writable() const 290 { 291 return (fBuffer != NULL ? ring_buffer_writable(fBuffer) : 0); 292 } 293 294 295 // #pragma mark - 296 297 298 Inode::Inode() 299 : 300 fReadRequests(), 301 fWriteRequests(), 302 fReaderCount(0), 303 fWriterCount(0), 304 fActive(false), 305 fReadSelectSyncPool(NULL), 306 fWriteSelectSyncPool(NULL) 307 { 308 fWriteCondition.Publish(this, "pipe"); 309 mutex_init(&fRequestLock, "pipe request"); 310 311 fCreationTime = fModificationTime = time(NULL); 312 } 313 314 315 Inode::~Inode() 316 { 317 fWriteCondition.Unpublish(); 318 mutex_destroy(&fRequestLock); 319 } 320 321 322 status_t 323 Inode::InitCheck() 324 { 325 return B_OK; 326 } 327 328 329 /*! Writes the specified data bytes to the inode's ring buffer. The 330 request lock must be held when calling this method. 331 Notifies readers if necessary, so that blocking readers will get started. 332 Returns B_OK for success, B_BAD_ADDRESS if copying from the buffer failed, 333 and various semaphore errors (like B_WOULD_BLOCK in non-blocking mode). If 334 the returned length is > 0, the returned error code can be ignored. 335 */ 336 status_t 337 Inode::WriteDataToBuffer(const void *_data, size_t *_length, bool nonBlocking) 338 { 339 const uint8* data = (const uint8*)_data; 340 size_t dataSize = *_length; 341 size_t& written = *_length; 342 written = 0; 343 344 TRACE(("Inode::WriteDataToBuffer(data = %p, bytes = %lu)\n", 345 data, dataSize)); 346 347 // According to the standard, request up to PIPE_BUF bytes shall not be 348 // interleaved with other writer's data. 349 size_t minToWrite = 1; 350 if (dataSize <= PIPE_BUF) 351 minToWrite = dataSize; 352 353 while (dataSize > 0) { 354 // Wait until enough space in the buffer is available. 355 while (!fActive 356 || fBuffer.Writable() < minToWrite && fReaderCount > 0) { 357 if (nonBlocking) 358 return B_WOULD_BLOCK; 359 360 ConditionVariableEntry entry; 361 entry.Add(this); 362 363 WriteRequest request(minToWrite); 364 fWriteRequests.Add(&request); 365 366 mutex_unlock(&fRequestLock); 367 status_t status = entry.Wait(B_CAN_INTERRUPT); 368 mutex_lock(&fRequestLock); 369 370 fWriteRequests.Remove(&request); 371 372 if (status != B_OK) 373 return status; 374 } 375 376 // write only as long as there are readers left 377 if (fReaderCount == 0 && fActive) { 378 if (written == 0) 379 send_signal(find_thread(NULL), SIGPIPE); 380 return EPIPE; 381 } 382 383 // write as much as we can 384 385 size_t toWrite = (fActive ? fBuffer.Writable() : 0); 386 if (toWrite > dataSize) 387 toWrite = dataSize; 388 389 if (toWrite > 0 && fBuffer.UserWrite(data, toWrite) < B_OK) 390 return B_BAD_ADDRESS; 391 392 data += toWrite; 393 dataSize -= toWrite; 394 written += toWrite; 395 396 NotifyBytesWritten(toWrite); 397 } 398 399 return B_OK; 400 } 401 402 403 status_t 404 Inode::ReadDataFromBuffer(void *data, size_t *_length, bool nonBlocking, 405 ReadRequest &request) 406 { 407 size_t dataSize = *_length; 408 *_length = 0; 409 410 // wait until our request is first in queue 411 status_t error; 412 if (fReadRequests.Head() != &request) { 413 if (nonBlocking) 414 return B_WOULD_BLOCK; 415 416 error = WaitForReadRequest(request); 417 if (error != B_OK) 418 return error; 419 } 420 421 // wait until data are available 422 while (fBuffer.Readable() == 0) { 423 if (nonBlocking) 424 return B_WOULD_BLOCK; 425 426 if (fActive && fWriterCount == 0) 427 return B_OK; 428 429 error = WaitForReadRequest(request); 430 if (error != B_OK) 431 return error; 432 } 433 434 // read as much as we can 435 size_t toRead = fBuffer.Readable(); 436 if (toRead > dataSize) 437 toRead = dataSize; 438 439 if (fBuffer.UserRead(data, toRead) < B_OK) 440 return B_BAD_ADDRESS; 441 442 NotifyBytesRead(toRead); 443 444 *_length = toRead; 445 446 return B_OK; 447 } 448 449 450 void 451 Inode::AddReadRequest(ReadRequest &request) 452 { 453 fReadRequests.Add(&request); 454 } 455 456 457 void 458 Inode::RemoveReadRequest(ReadRequest &request) 459 { 460 fReadRequests.Remove(&request); 461 } 462 463 464 status_t 465 Inode::WaitForReadRequest(ReadRequest &request) 466 { 467 request.SetUnnotified(); 468 469 // add the entry to wait on 470 thread_prepare_to_block(thread_get_current_thread(), B_CAN_INTERRUPT, 471 THREAD_BLOCK_TYPE_OTHER, "fifo read request"); 472 473 // wait 474 mutex_unlock(&fRequestLock); 475 status_t status = thread_block(); 476 mutex_lock(&fRequestLock); 477 478 return status; 479 } 480 481 482 void 483 Inode::NotifyBytesRead(size_t bytes) 484 { 485 // notify writer, if something can be written now 486 size_t writable = fBuffer.Writable(); 487 if (bytes > 0) { 488 // notify select()ors only, if nothing was writable before 489 if (writable == bytes) { 490 if (fWriteSelectSyncPool) 491 notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE); 492 } 493 494 // If any of the waiting writers has a minimal write count that has 495 // now become satisfied, we notify all of them (condition variables 496 // don't support doing that selectively). 497 WriteRequest *request; 498 WriteRequestList::Iterator iterator = fWriteRequests.GetIterator(); 499 while ((request = iterator.Next()) != NULL) { 500 size_t minWriteCount = request->MinimalWriteCount(); 501 if (minWriteCount > 0 && minWriteCount <= writable 502 && minWriteCount > writable - bytes) { 503 fWriteCondition.NotifyAll(); 504 break; 505 } 506 } 507 } 508 } 509 510 511 void 512 Inode::NotifyReadDone() 513 { 514 // notify next reader, if there's still something to be read 515 if (fBuffer.Readable() > 0) { 516 if (ReadRequest* request = fReadRequests.First()) 517 request->Notify(); 518 } 519 } 520 521 522 void 523 Inode::NotifyBytesWritten(size_t bytes) 524 { 525 // notify reader, if something can be read now 526 if (bytes > 0 && fBuffer.Readable() == bytes) { 527 if (fReadSelectSyncPool) 528 notify_select_event_pool(fReadSelectSyncPool, B_SELECT_READ); 529 530 if (ReadRequest* request = fReadRequests.First()) 531 request->Notify(); 532 } 533 } 534 535 536 void 537 Inode::NotifyEndClosed(bool writer) 538 { 539 if (writer) { 540 // Our last writer has been closed; if the pipe 541 // contains no data, unlock all waiting readers 542 if (fBuffer.Readable() == 0) { 543 ReadRequest *request; 544 ReadRequestList::Iterator iterator = fReadRequests.GetIterator(); 545 while ((request = iterator.Next()) != NULL) 546 request->Notify(); 547 548 if (fReadSelectSyncPool) 549 notify_select_event_pool(fReadSelectSyncPool, B_SELECT_READ); 550 } 551 } else { 552 // Last reader is gone. Wake up all writers. 553 fWriteCondition.NotifyAll(); 554 555 if (fWriteSelectSyncPool) { 556 notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE); 557 notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_ERROR); 558 } 559 } 560 } 561 562 563 void 564 Inode::Open(int openMode) 565 { 566 MutexLocker locker(RequestLock()); 567 568 if ((openMode & O_ACCMODE) == O_WRONLY) 569 fWriterCount++; 570 571 if ((openMode & O_ACCMODE) == O_RDONLY || (openMode & O_ACCMODE) == O_RDWR) 572 fReaderCount++; 573 574 if (fReaderCount > 0 && fWriterCount > 0) { 575 fBuffer.CreateBuffer(); 576 fActive = true; 577 578 // notify all waiting writers that they can start 579 if (fWriteSelectSyncPool) 580 notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE); 581 fWriteCondition.NotifyAll(); 582 } 583 } 584 585 586 void 587 Inode::Close(int openMode) 588 { 589 TRACE(("Inode::Close(openMode = %d)\n", openMode)); 590 591 MutexLocker locker(RequestLock()); 592 593 if ((openMode & O_ACCMODE) == O_WRONLY && --fWriterCount == 0) 594 NotifyEndClosed(true); 595 596 if ((openMode & O_ACCMODE) == O_RDONLY || (openMode & O_ACCMODE) == O_RDWR) { 597 if (--fReaderCount == 0) 598 NotifyEndClosed(false); 599 } 600 601 if (fReaderCount == 0 && fWriterCount == 0) { 602 fActive = false; 603 fBuffer.DeleteBuffer(); 604 } 605 } 606 607 608 status_t 609 Inode::Select(uint8 event, selectsync *sync, int openMode) 610 { 611 bool writer = true; 612 select_sync_pool** pool; 613 if ((openMode & O_RWMASK) == O_RDONLY) { 614 pool = &fReadSelectSyncPool; 615 writer = false; 616 } else if ((openMode & O_RWMASK) == O_WRONLY) { 617 pool = &fWriteSelectSyncPool; 618 } else 619 return B_NOT_ALLOWED; 620 621 if (add_select_sync_pool_entry(pool, sync, event) != B_OK) 622 return B_ERROR; 623 624 // signal right away, if the condition holds already 625 if (writer) { 626 if (event == B_SELECT_WRITE 627 && (fBuffer.Writable() > 0 || fReaderCount == 0) 628 || event == B_SELECT_ERROR && fReaderCount == 0) { 629 return notify_select_event(sync, event); 630 } 631 } else { 632 if (event == B_SELECT_READ 633 && (fBuffer.Readable() > 0 || fWriterCount == 0)) { 634 return notify_select_event(sync, event); 635 } 636 } 637 638 return B_OK; 639 } 640 641 642 status_t 643 Inode::Deselect(uint8 event, selectsync *sync, int openMode) 644 { 645 select_sync_pool** pool; 646 if ((openMode & O_RWMASK) == O_RDONLY) { 647 pool = &fReadSelectSyncPool; 648 } else if ((openMode & O_RWMASK) == O_WRONLY) { 649 pool = &fWriteSelectSyncPool; 650 } else 651 return B_NOT_ALLOWED; 652 653 remove_select_sync_pool_entry(pool, sync, event); 654 return B_OK; 655 } 656 657 658 // #pragma mark - 659 660 661 static status_t 662 fifo_put_vnode(fs_volume *volume, fs_vnode *vnode, bool reenter) 663 { 664 FIFOInode* fifo = (FIFOInode*)vnode->private_node; 665 fs_vnode* superVnode = fifo->SuperVnode(); 666 667 status_t error = B_OK; 668 if (superVnode->ops->put_vnode != NULL) 669 error = superVnode->ops->put_vnode(volume, superVnode, reenter); 670 671 delete fifo; 672 673 return error; 674 } 675 676 677 static status_t 678 fifo_remove_vnode(fs_volume *volume, fs_vnode *vnode, bool reenter) 679 { 680 FIFOInode* fifo = (FIFOInode*)vnode->private_node; 681 fs_vnode* superVnode = fifo->SuperVnode(); 682 683 status_t error = B_OK; 684 if (superVnode->ops->remove_vnode != NULL) 685 error = superVnode->ops->remove_vnode(volume, superVnode, reenter); 686 687 delete fifo; 688 689 return error; 690 } 691 692 693 static status_t 694 fifo_open(fs_volume *_volume, fs_vnode *_node, int openMode, 695 void **_cookie) 696 { 697 Inode *inode = (Inode *)_node->private_node; 698 699 TRACE(("fifo_open(): node = %p, openMode = %d\n", inode, openMode)); 700 701 file_cookie *cookie = (file_cookie *)malloc(sizeof(file_cookie)); 702 if (cookie == NULL) 703 return B_NO_MEMORY; 704 705 TRACE((" open cookie = %p\n", cookie)); 706 cookie->open_mode = openMode; 707 inode->Open(openMode); 708 709 *_cookie = (void *)cookie; 710 711 return B_OK; 712 } 713 714 715 static status_t 716 fifo_close(fs_volume *volume, fs_vnode *vnode, void *_cookie) 717 { 718 file_cookie *cookie = (file_cookie *)_cookie; 719 FIFOInode* fifo = (FIFOInode*)vnode->private_node; 720 721 fifo->Close(cookie->open_mode); 722 723 return B_OK; 724 } 725 726 727 static status_t 728 fifo_free_cookie(fs_volume *_volume, fs_vnode *_node, void *_cookie) 729 { 730 file_cookie *cookie = (file_cookie *)_cookie; 731 732 TRACE(("fifo_freecookie: entry vnode %p, cookie %p\n", _node, _cookie)); 733 734 free(cookie); 735 736 return B_OK; 737 } 738 739 740 static status_t 741 fifo_fsync(fs_volume *_volume, fs_vnode *_v) 742 { 743 return B_OK; 744 } 745 746 747 static status_t 748 fifo_read(fs_volume *_volume, fs_vnode *_node, void *_cookie, 749 off_t /*pos*/, void *buffer, size_t *_length) 750 { 751 file_cookie *cookie = (file_cookie *)_cookie; 752 Inode *inode = (Inode *)_node->private_node; 753 754 TRACE(("fifo_read(vnode = %p, cookie = %p, length = %lu, mode = %d)\n", 755 inode, cookie, *_length, cookie->open_mode)); 756 757 if ((cookie->open_mode & O_RWMASK) != O_RDONLY) 758 return B_NOT_ALLOWED; 759 760 MutexLocker locker(inode->RequestLock()); 761 762 if (inode->IsActive() && inode->WriterCount() == 0) { 763 // as long there is no writer, and the pipe is empty, 764 // we always just return 0 to indicate end of file 765 if (inode->BytesAvailable() == 0) { 766 *_length = 0; 767 return B_OK; 768 } 769 } 770 771 // issue read request 772 773 ReadRequest request; 774 inode->AddReadRequest(request); 775 776 size_t length = *_length; 777 status_t status = inode->ReadDataFromBuffer(buffer, &length, 778 (cookie->open_mode & O_NONBLOCK) != 0, request); 779 780 inode->RemoveReadRequest(request); 781 inode->NotifyReadDone(); 782 783 if (length > 0) 784 status = B_OK; 785 786 *_length = length; 787 return status; 788 } 789 790 791 static status_t 792 fifo_write(fs_volume *_volume, fs_vnode *_node, void *_cookie, 793 off_t /*pos*/, const void *buffer, size_t *_length) 794 { 795 file_cookie *cookie = (file_cookie *)_cookie; 796 Inode *inode = (Inode *)_node->private_node; 797 798 TRACE(("fifo_write(vnode = %p, cookie = %p, length = %lu)\n", 799 _node, cookie, *_length)); 800 801 if ((cookie->open_mode & O_RWMASK) != O_WRONLY) 802 return B_NOT_ALLOWED; 803 804 MutexLocker locker(inode->RequestLock()); 805 806 size_t length = *_length; 807 if (length == 0) 808 return B_OK; 809 810 // copy data into ring buffer 811 status_t status = inode->WriteDataToBuffer(buffer, &length, 812 (cookie->open_mode & O_NONBLOCK) != 0); 813 814 if (length > 0) 815 status = B_OK; 816 817 *_length = length; 818 return status; 819 } 820 821 822 static status_t 823 fifo_read_stat(fs_volume *volume, fs_vnode *vnode, struct ::stat *st) 824 { 825 FIFOInode* fifo = (FIFOInode*)vnode->private_node; 826 fs_vnode* superVnode = fifo->SuperVnode(); 827 828 if (superVnode->ops->read_stat == NULL) 829 return B_BAD_VALUE; 830 831 status_t error = superVnode->ops->read_stat(volume, superVnode, st); 832 if (error != B_OK) 833 return error; 834 835 836 MutexLocker locker(fifo->RequestLock()); 837 838 st->st_size = fifo->BytesAvailable(); 839 840 st->st_blksize = 4096; 841 842 // TODO: Just pass the changes to our modification time on to the super node. 843 st->st_atime = time(NULL); 844 st->st_mtime = st->st_ctime = fifo->ModificationTime(); 845 // st->st_crtime = inode->CreationTime(); 846 847 return B_OK; 848 } 849 850 851 static status_t 852 fifo_write_stat(fs_volume *volume, fs_vnode *vnode, const struct ::stat *st, 853 uint32 statMask) 854 { 855 // we cannot change the size of anything 856 if (statMask & B_STAT_SIZE) 857 return B_BAD_VALUE; 858 859 FIFOInode* fifo = (FIFOInode*)vnode->private_node; 860 fs_vnode* superVnode = fifo->SuperVnode(); 861 862 if (superVnode->ops->write_stat == NULL) 863 return B_BAD_VALUE; 864 865 status_t error = superVnode->ops->write_stat(volume, superVnode, st, 866 statMask); 867 if (error != B_OK) 868 return error; 869 870 return B_OK; 871 } 872 873 874 static status_t 875 fifo_ioctl(fs_volume *_volume, fs_vnode *_vnode, void *_cookie, ulong op, 876 void *buffer, size_t length) 877 { 878 TRACE(("fifo_ioctl: vnode %p, cookie %p, op %ld, buf %p, len %ld\n", 879 _vnode, _cookie, op, buffer, length)); 880 881 return EINVAL; 882 } 883 884 885 static status_t 886 fifo_set_flags(fs_volume *_volume, fs_vnode *_vnode, void *_cookie, 887 int flags) 888 { 889 file_cookie *cookie = (file_cookie *)_cookie; 890 891 TRACE(("fifo_set_flags(vnode = %p, flags = %x)\n", _vnode, flags)); 892 cookie->open_mode = (cookie->open_mode & ~(O_APPEND | O_NONBLOCK)) | flags; 893 return B_OK; 894 } 895 896 897 static status_t 898 fifo_select(fs_volume *_volume, fs_vnode *_node, void *_cookie, 899 uint8 event, selectsync *sync) 900 { 901 file_cookie *cookie = (file_cookie *)_cookie; 902 903 TRACE(("fifo_select(vnode = %p)\n", _node)); 904 Inode *inode = (Inode *)_node->private_node; 905 if (!inode) 906 return B_ERROR; 907 908 MutexLocker locker(inode->RequestLock()); 909 return inode->Select(event, sync, cookie->open_mode); 910 } 911 912 913 static status_t 914 fifo_deselect(fs_volume *_volume, fs_vnode *_node, void *_cookie, 915 uint8 event, selectsync *sync) 916 { 917 file_cookie *cookie = (file_cookie *)_cookie; 918 919 TRACE(("fifo_deselect(vnode = %p)\n", _node)); 920 Inode *inode = (Inode *)_node->private_node; 921 if (!inode) 922 return B_ERROR; 923 924 MutexLocker locker(inode->RequestLock()); 925 return inode->Deselect(event, sync, cookie->open_mode); 926 } 927 928 929 static bool 930 fifo_can_page(fs_volume *_volume, fs_vnode *_v, void *cookie) 931 { 932 return false; 933 } 934 935 936 static status_t 937 fifo_read_pages(fs_volume *_volume, fs_vnode *_v, void *cookie, off_t pos, 938 const iovec *vecs, size_t count, size_t *_numBytes) 939 { 940 return B_NOT_ALLOWED; 941 } 942 943 944 static status_t 945 fifo_write_pages(fs_volume *_volume, fs_vnode *_v, void *cookie, 946 off_t pos, const iovec *vecs, size_t count, size_t *_numBytes) 947 { 948 return B_NOT_ALLOWED; 949 } 950 951 952 static status_t 953 fifo_get_super_vnode(fs_volume *volume, fs_vnode *vnode, fs_volume *superVolume, 954 fs_vnode *_superVnode) 955 { 956 FIFOInode* fifo = (FIFOInode*)vnode->private_node; 957 fs_vnode* superVnode = fifo->SuperVnode(); 958 959 if (superVnode->ops->get_super_vnode != NULL) { 960 return superVnode->ops->get_super_vnode(volume, superVnode, superVolume, 961 _superVnode); 962 } 963 964 *_superVnode = *superVnode; 965 966 return B_OK; 967 } 968 969 970 static fs_vnode_ops sFIFOVnodeOps = { 971 NULL, // lookup 972 NULL, // get_vnode_name 973 // TODO: This is suboptimal! We'd need to forward the 974 // super node's hook, if it has got one. 975 976 &fifo_put_vnode, 977 &fifo_remove_vnode, 978 979 &fifo_can_page, 980 &fifo_read_pages, 981 &fifo_write_pages, 982 983 NULL, // io() 984 NULL, // cancel_io() 985 986 NULL, // get_file_map 987 988 /* common */ 989 &fifo_ioctl, 990 &fifo_set_flags, 991 &fifo_select, 992 &fifo_deselect, 993 &fifo_fsync, 994 995 NULL, // fs_read_link 996 NULL, // fs_symlink 997 NULL, // fs_link 998 NULL, // unlink 999 NULL, // rename 1000 1001 NULL, // fs_access() 1002 &fifo_read_stat, 1003 &fifo_write_stat, 1004 1005 /* file */ 1006 NULL, // create() 1007 &fifo_open, 1008 &fifo_close, 1009 &fifo_free_cookie, 1010 &fifo_read, 1011 &fifo_write, 1012 1013 /* directory */ 1014 NULL, // create_dir 1015 NULL, // remove_dir 1016 NULL, // open_dir 1017 NULL, // close_dir 1018 NULL, // free_dir_cookie 1019 NULL, // read_dir 1020 NULL, // rewind_dir 1021 1022 /* attribute directory operations */ 1023 NULL, // open_attr_dir 1024 NULL, // close_attr_dir 1025 NULL, // free_attr_dir_cookie 1026 NULL, // read_attr_dir 1027 NULL, // rewind_attr_dir 1028 1029 /* attribute operations */ 1030 NULL, // create_attr 1031 NULL, // open_attr 1032 NULL, // close_attr 1033 NULL, // free_attr_cookie 1034 NULL, // read_attr 1035 NULL, // write_attr 1036 1037 NULL, // read_attr_stat 1038 NULL, // write_attr_stat 1039 NULL, // rename_attr 1040 NULL, // remove_attr 1041 1042 /* support for node and FS layers */ 1043 NULL, // create_special_node 1044 &fifo_get_super_vnode, 1045 }; 1046 1047 1048 } // namespace fifo 1049 1050 using namespace fifo; 1051 1052 1053 // #pragma mark - 1054 1055 1056 status_t 1057 create_fifo_vnode(fs_volume* superVolume, fs_vnode* vnode) 1058 { 1059 FIFOInode *fifo = new(std::nothrow) FIFOInode(vnode); 1060 if (fifo == NULL) 1061 return B_NO_MEMORY; 1062 1063 status_t status = fifo->InitCheck(); 1064 if (status != B_OK) { 1065 delete fifo; 1066 return status; 1067 } 1068 1069 vnode->private_node = fifo; 1070 vnode->ops = &sFIFOVnodeOps; 1071 1072 return B_OK; 1073 } 1074