1 /* Operations on file descriptors 2 * 3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 #include <fd.h> 8 9 #include <stdlib.h> 10 #include <string.h> 11 12 #include <OS.h> 13 14 #include <syscalls.h> 15 #include <util/AutoLock.h> 16 #include <vfs.h> 17 #include <wait_for_objects.h> 18 19 20 //#define TRACE_FD 21 #ifdef TRACE_FD 22 # define TRACE(x) dprintf x 23 #else 24 # define TRACE(x) 25 #endif 26 27 28 static struct file_descriptor* get_fd_locked(struct io_context* context, 29 int fd); 30 static void deselect_select_infos(file_descriptor* descriptor, 31 select_info* infos); 32 33 34 struct FDGetterLocking { 35 inline bool Lock(file_descriptor* /*lockable*/) 36 { 37 return false; 38 } 39 40 inline void Unlock(file_descriptor* lockable) 41 { 42 put_fd(lockable); 43 } 44 }; 45 46 class FDGetter : public AutoLocker<file_descriptor, FDGetterLocking> { 47 public: 48 inline FDGetter() 49 : AutoLocker<file_descriptor, FDGetterLocking>() 50 { 51 } 52 53 inline FDGetter(io_context* context, int fd, bool contextLocked = false) 54 : AutoLocker<file_descriptor, FDGetterLocking>( 55 contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd)) 56 { 57 } 58 59 inline file_descriptor* SetTo(io_context* context, int fd, 60 bool contextLocked = false) 61 { 62 file_descriptor* descriptor 63 = contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd); 64 AutoLocker<file_descriptor, FDGetterLocking>::SetTo(descriptor, true); 65 return descriptor; 66 } 67 68 inline file_descriptor* FD() const 69 { 70 return fLockable; 71 } 72 }; 73 74 75 /*** General fd routines ***/ 76 77 78 #ifdef DEBUG 79 void dump_fd(int fd, struct file_descriptor *descriptor); 80 81 void 82 dump_fd(int fd,struct file_descriptor *descriptor) 83 { 84 dprintf("fd[%d] = %p: type = %ld, ref_count = %ld, ops = %p, u.vnode = %p, u.mount = %p, cookie = %p, open_mode = %lx, pos = %Ld\n", 85 fd, descriptor, descriptor->type, descriptor->ref_count, descriptor->ops, 86 descriptor->u.vnode, descriptor->u.mount, descriptor->cookie, descriptor->open_mode, descriptor->pos); 87 } 88 #endif 89 90 91 /** Allocates and initializes a new file_descriptor */ 92 93 struct file_descriptor * 94 alloc_fd(void) 95 { 96 file_descriptor *descriptor 97 = (file_descriptor*)malloc(sizeof(struct file_descriptor)); 98 if (descriptor == NULL) 99 return NULL; 100 101 descriptor->u.vnode = NULL; 102 descriptor->cookie = NULL; 103 descriptor->ref_count = 1; 104 descriptor->open_count = 0; 105 descriptor->open_mode = 0; 106 descriptor->pos = 0; 107 108 return descriptor; 109 } 110 111 112 bool 113 fd_close_on_exec(struct io_context *context, int fd) 114 { 115 return CHECK_BIT(context->fds_close_on_exec[fd / 8], fd & 7) ? true : false; 116 } 117 118 119 void 120 fd_set_close_on_exec(struct io_context *context, int fd, bool closeFD) 121 { 122 if (closeFD) 123 context->fds_close_on_exec[fd / 8] |= (1 << (fd & 7)); 124 else 125 context->fds_close_on_exec[fd / 8] &= ~(1 << (fd & 7)); 126 } 127 128 129 /** Searches a free slot in the FD table of the provided I/O context, and inserts 130 * the specified descriptor into it. 131 */ 132 133 int 134 new_fd_etc(struct io_context *context, struct file_descriptor *descriptor, int firstIndex) 135 { 136 int fd = -1; 137 uint32 i; 138 139 mutex_lock(&context->io_mutex); 140 141 for (i = firstIndex; i < context->table_size; i++) { 142 if (!context->fds[i]) { 143 fd = i; 144 break; 145 } 146 } 147 if (fd < 0) { 148 fd = B_NO_MORE_FDS; 149 goto err; 150 } 151 152 context->fds[fd] = descriptor; 153 context->num_used_fds++; 154 atomic_add(&descriptor->open_count, 1); 155 156 err: 157 mutex_unlock(&context->io_mutex); 158 159 return fd; 160 } 161 162 163 int 164 new_fd(struct io_context *context, struct file_descriptor *descriptor) 165 { 166 return new_fd_etc(context, descriptor, 0); 167 } 168 169 170 /** Reduces the descriptor's reference counter, and frees all resources 171 * when it's no longer used. 172 */ 173 174 void 175 put_fd(struct file_descriptor *descriptor) 176 { 177 int32 previous = atomic_add(&descriptor->ref_count, -1); 178 179 TRACE(("put_fd(descriptor = %p [ref = %ld, cookie = %p])\n", 180 descriptor, descriptor->ref_count, descriptor->cookie)); 181 182 // free the descriptor if we don't need it anymore 183 if (previous == 1) { 184 // free the underlying object 185 if (descriptor->ops != NULL && descriptor->ops->fd_free != NULL) 186 descriptor->ops->fd_free(descriptor); 187 188 free(descriptor); 189 } else if ((descriptor->open_mode & O_DISCONNECTED) != 0 190 && previous - 1 == descriptor->open_count 191 && descriptor->ops != NULL) { 192 // the descriptor has been disconnected - it cannot 193 // be accessed anymore, let's close it (no one is 194 // currently accessing this descriptor) 195 196 if (descriptor->ops->fd_close) 197 descriptor->ops->fd_close(descriptor); 198 if (descriptor->ops->fd_free) 199 descriptor->ops->fd_free(descriptor); 200 201 // prevent this descriptor from being closed/freed again 202 descriptor->open_count = -1; 203 descriptor->ref_count = -1; 204 descriptor->ops = NULL; 205 descriptor->u.vnode = NULL; 206 207 // the file descriptor is kept intact, so that it's not 208 // reused until someone explicetly closes it 209 } 210 } 211 212 213 /** Decrements the open counter of the file descriptor and invokes 214 * its close hook when appropriate. 215 */ 216 217 void 218 close_fd(struct file_descriptor *descriptor) 219 { 220 if (atomic_add(&descriptor->open_count, -1) == 1) { 221 vfs_unlock_vnode_if_locked(descriptor); 222 223 if (descriptor->ops != NULL && descriptor->ops->fd_close != NULL) 224 descriptor->ops->fd_close(descriptor); 225 } 226 } 227 228 229 /** This descriptor's underlying object will be closed and freed 230 * as soon as possible (in one of the next calls to put_fd() - 231 * get_fd() will no longer succeed on this descriptor). 232 * This is useful if the underlying object is gone, for instance 233 * when a (mounted) volume got removed unexpectedly. 234 */ 235 236 void 237 disconnect_fd(struct file_descriptor *descriptor) 238 { 239 descriptor->open_mode |= O_DISCONNECTED; 240 } 241 242 243 void 244 inc_fd_ref_count(struct file_descriptor *descriptor) 245 { 246 atomic_add(&descriptor->ref_count, 1); 247 } 248 249 250 static struct file_descriptor * 251 get_fd_locked(struct io_context *context, int fd) 252 { 253 if (fd < 0 || (uint32)fd >= context->table_size) 254 return NULL; 255 256 struct file_descriptor *descriptor = context->fds[fd]; 257 258 if (descriptor != NULL) { 259 // Disconnected descriptors cannot be accessed anymore 260 if (descriptor->open_mode & O_DISCONNECTED) 261 descriptor = NULL; 262 else 263 inc_fd_ref_count(descriptor); 264 } 265 266 return descriptor; 267 } 268 269 270 struct file_descriptor * 271 get_fd(struct io_context *context, int fd) 272 { 273 MutexLocker(context->io_mutex); 274 275 return get_fd_locked(context, fd); 276 } 277 278 279 /** Removes the file descriptor from the specified slot. 280 */ 281 282 static struct file_descriptor * 283 remove_fd(struct io_context *context, int fd) 284 { 285 struct file_descriptor *descriptor = NULL; 286 287 if (fd < 0) 288 return NULL; 289 290 mutex_lock(&context->io_mutex); 291 292 if ((uint32)fd < context->table_size) 293 descriptor = context->fds[fd]; 294 295 select_info* selectInfos = NULL; 296 bool disconnected = false; 297 298 if (descriptor) { 299 // fd is valid 300 context->fds[fd] = NULL; 301 fd_set_close_on_exec(context, fd, false); 302 context->num_used_fds--; 303 304 selectInfos = context->select_infos[fd]; 305 context->select_infos[fd] = NULL; 306 307 disconnected = (descriptor->open_mode & O_DISCONNECTED); 308 } 309 310 mutex_unlock(&context->io_mutex); 311 312 if (selectInfos != NULL) 313 deselect_select_infos(descriptor, selectInfos); 314 315 return disconnected ? NULL : descriptor; 316 } 317 318 319 static int 320 dup_fd(int fd, bool kernel) 321 { 322 struct io_context *context = get_current_io_context(kernel); 323 struct file_descriptor *descriptor; 324 int status; 325 326 TRACE(("dup_fd: fd = %d\n", fd)); 327 328 // Try to get the fd structure 329 descriptor = get_fd(context, fd); 330 if (descriptor == NULL) 331 return B_FILE_ERROR; 332 333 // now put the fd in place 334 status = new_fd(context, descriptor); 335 if (status < 0) 336 put_fd(descriptor); 337 else { 338 mutex_lock(&context->io_mutex); 339 fd_set_close_on_exec(context, status, false); 340 mutex_unlock(&context->io_mutex); 341 } 342 343 return status; 344 } 345 346 347 /** POSIX says this should be the same as: 348 * close(newfd); 349 * fcntl(oldfd, F_DUPFD, newfd); 350 * 351 * We do dup2() directly to be thread-safe. 352 */ 353 static int 354 dup2_fd(int oldfd, int newfd, bool kernel) 355 { 356 struct file_descriptor *evicted = NULL; 357 struct io_context *context; 358 359 TRACE(("dup2_fd: ofd = %d, nfd = %d\n", oldfd, newfd)); 360 361 // quick check 362 if (oldfd < 0 || newfd < 0) 363 return B_FILE_ERROR; 364 365 // Get current I/O context and lock it 366 context = get_current_io_context(kernel); 367 mutex_lock(&context->io_mutex); 368 369 // Check if the fds are valid (mutex must be locked because 370 // the table size could be changed) 371 if ((uint32)oldfd >= context->table_size 372 || (uint32)newfd >= context->table_size 373 || context->fds[oldfd] == NULL) { 374 mutex_unlock(&context->io_mutex); 375 return B_FILE_ERROR; 376 } 377 378 // Check for identity, note that it cannot be made above 379 // because we always want to return an error on invalid 380 // handles 381 select_info* selectInfos = NULL; 382 if (oldfd != newfd) { 383 // Now do the work 384 evicted = context->fds[newfd]; 385 selectInfos = context->select_infos[newfd]; 386 context->select_infos[newfd] = NULL; 387 atomic_add(&context->fds[oldfd]->ref_count, 1); 388 atomic_add(&context->fds[oldfd]->open_count, 1); 389 context->fds[newfd] = context->fds[oldfd]; 390 391 if (evicted == NULL) 392 context->num_used_fds++; 393 } 394 395 fd_set_close_on_exec(context, newfd, false); 396 397 mutex_unlock(&context->io_mutex); 398 399 // Say bye bye to the evicted fd 400 if (evicted) { 401 deselect_select_infos(evicted, selectInfos); 402 close_fd(evicted); 403 put_fd(evicted); 404 } 405 406 return newfd; 407 } 408 409 410 static status_t 411 fd_ioctl(bool kernelFD, int fd, ulong op, void *buffer, size_t length) 412 { 413 struct file_descriptor *descriptor; 414 int status; 415 416 descriptor = get_fd(get_current_io_context(kernelFD), fd); 417 if (descriptor == NULL) 418 return B_FILE_ERROR; 419 420 if (descriptor->ops->fd_ioctl) 421 status = descriptor->ops->fd_ioctl(descriptor, op, buffer, length); 422 else 423 status = EOPNOTSUPP; 424 425 put_fd(descriptor); 426 return status; 427 } 428 429 430 static void 431 deselect_select_infos(file_descriptor* descriptor, select_info* infos) 432 { 433 TRACE(("deselect_select_infos(%p, %p)\n", descriptor, infos)); 434 435 select_info* info = infos; 436 while (info != NULL) { 437 select_sync* sync = info->sync; 438 439 // deselect the selected events 440 if (descriptor->ops->fd_deselect && info->selected_events) { 441 for (uint16 event = 1; event < 16; event++) { 442 if (info->selected_events & SELECT_FLAG(event)) { 443 descriptor->ops->fd_deselect(descriptor, event, 444 (selectsync*)info); 445 } 446 } 447 } 448 449 notify_select_events(info, B_EVENT_INVALID); 450 info = info->next; 451 put_select_sync(sync); 452 } 453 } 454 455 456 status_t 457 select_fd(int32 fd, struct select_info* info, bool kernel) 458 { 459 TRACE(("select_fd(fd = %d, info = %p (%p), 0x%x)\n", fd, info, 460 info->sync, info.selected_events)); 461 462 FDGetter fdGetter; 463 // define before the context locker, so it will be destroyed after it 464 465 io_context* context = get_current_io_context(kernel); 466 MutexLocker locker(context->io_mutex); 467 468 struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true); 469 if (descriptor == NULL) 470 return B_FILE_ERROR; 471 472 if (info->selected_events == 0) 473 return B_OK; 474 475 if (!descriptor->ops->fd_select) { 476 // if the I/O subsystem doesn't support select(), we will 477 // immediately notify the select call 478 return notify_select_events(info, info->selected_events); 479 } 480 481 // add the info to the IO context 482 info->next = context->select_infos[fd]; 483 context->select_infos[fd] = info; 484 485 // as long as the info is in the list, we keep a reference to the sync 486 // object 487 atomic_add(&info->sync->ref_count, 1); 488 489 locker.Unlock(); 490 491 // select any events asked for 492 uint32 selectedEvents = 0; 493 494 for (uint16 event = 1; event < 16; event++) { 495 if (info->selected_events & SELECT_FLAG(event) 496 && descriptor->ops->fd_select(descriptor, event, 497 (selectsync*)info) == B_OK) { 498 selectedEvents |= SELECT_FLAG(event); 499 } 500 } 501 info->selected_events = selectedEvents; 502 503 // if nothing has been selected, we deselect immediately 504 if (selectedEvents == 0) 505 deselect_fd(fd, info, kernel); 506 507 return B_OK; 508 } 509 510 511 status_t 512 deselect_fd(int32 fd, struct select_info* info, bool kernel) 513 { 514 TRACE(("deselect_fd(fd = %d, info = %p (%p), 0x%x)\n", fd, info, 515 info->sync, info.selected_events)); 516 517 if (info->selected_events == 0) 518 return B_OK; 519 520 FDGetter fdGetter; 521 // define before the context locker, so it will be destroyed after it 522 523 io_context* context = get_current_io_context(kernel); 524 MutexLocker locker(context->io_mutex); 525 526 struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true); 527 if (descriptor == NULL) 528 return B_FILE_ERROR; 529 530 // remove the info from the IO context 531 532 select_info** infoLocation = &context->select_infos[fd]; 533 while (*infoLocation != NULL && *infoLocation != info) 534 infoLocation = &(*infoLocation)->next; 535 536 // If not found, someone else beat us to it. 537 if (*infoLocation != info) 538 return B_OK; 539 540 *infoLocation = info->next; 541 542 locker.Unlock(); 543 544 // deselect the selected events 545 if (descriptor->ops->fd_deselect && info->selected_events) { 546 for (uint16 event = 1; event < 16; event++) { 547 if (info->selected_events & SELECT_FLAG(event)) { 548 descriptor->ops->fd_deselect(descriptor, event, 549 (selectsync*)info); 550 } 551 } 552 } 553 554 put_select_sync(info->sync); 555 556 return B_OK; 557 } 558 559 560 /** This function checks if the specified fd is valid in the current 561 * context. It can be used for a quick check; the fd is not locked 562 * so it could become invalid immediately after this check. 563 */ 564 565 bool 566 fd_is_valid(int fd, bool kernel) 567 { 568 struct file_descriptor *descriptor = get_fd(get_current_io_context(kernel), fd); 569 if (descriptor == NULL) 570 return false; 571 572 put_fd(descriptor); 573 return true; 574 } 575 576 577 struct vnode * 578 fd_vnode(struct file_descriptor *descriptor) 579 { 580 switch (descriptor->type) { 581 case FDTYPE_FILE: 582 case FDTYPE_DIR: 583 case FDTYPE_ATTR_DIR: 584 case FDTYPE_ATTR: 585 return descriptor->u.vnode; 586 } 587 588 return NULL; 589 } 590 591 592 static status_t 593 common_close(int fd, bool kernel) 594 { 595 struct io_context *io = get_current_io_context(kernel); 596 struct file_descriptor *descriptor = remove_fd(io, fd); 597 598 if (descriptor == NULL) 599 return B_FILE_ERROR; 600 601 #ifdef TRACE_FD 602 if (!kernel) 603 TRACE(("_user_close(descriptor = %p)\n", descriptor)); 604 #endif 605 606 close_fd(descriptor); 607 put_fd(descriptor); 608 // the reference associated with the slot 609 610 return B_OK; 611 } 612 613 614 // #pragma mark - 615 // User syscalls 616 617 618 ssize_t 619 _user_read(int fd, off_t pos, void *buffer, size_t length) 620 { 621 struct file_descriptor *descriptor; 622 ssize_t bytesRead; 623 624 /* This is a user_function, so abort if we have a kernel address */ 625 if (!IS_USER_ADDRESS(buffer)) 626 return B_BAD_ADDRESS; 627 628 if (pos < -1) 629 return B_BAD_VALUE; 630 631 descriptor = get_fd(get_current_io_context(false), fd); 632 if (!descriptor) 633 return B_FILE_ERROR; 634 if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) { 635 put_fd(descriptor); 636 return B_FILE_ERROR; 637 } 638 639 if (pos == -1) 640 pos = descriptor->pos; 641 642 if (descriptor->ops->fd_read) { 643 bytesRead = descriptor->ops->fd_read(descriptor, pos, buffer, &length); 644 if (bytesRead >= B_OK) { 645 if (length > SSIZE_MAX) 646 bytesRead = SSIZE_MAX; 647 else 648 bytesRead = (ssize_t)length; 649 650 descriptor->pos = pos + length; 651 } 652 } else 653 bytesRead = B_BAD_VALUE; 654 655 put_fd(descriptor); 656 return bytesRead; 657 } 658 659 660 ssize_t 661 _user_readv(int fd, off_t pos, const iovec *userVecs, size_t count) 662 { 663 struct file_descriptor *descriptor; 664 ssize_t bytesRead = 0; 665 status_t status; 666 iovec *vecs; 667 uint32 i; 668 669 /* This is a user_function, so abort if we have a kernel address */ 670 if (!IS_USER_ADDRESS(userVecs)) 671 return B_BAD_ADDRESS; 672 673 if (pos < -1) 674 return B_BAD_VALUE; 675 676 /* prevent integer overflow exploit in malloc() */ 677 if (count > IOV_MAX) 678 return B_BAD_VALUE; 679 680 descriptor = get_fd(get_current_io_context(false), fd); 681 if (!descriptor) 682 return B_FILE_ERROR; 683 if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) { 684 status = B_FILE_ERROR; 685 goto err1; 686 } 687 688 vecs = (iovec*)malloc(sizeof(iovec) * count); 689 if (vecs == NULL) { 690 status = B_NO_MEMORY; 691 goto err1; 692 } 693 694 if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) < B_OK) { 695 status = B_BAD_ADDRESS; 696 goto err2; 697 } 698 699 if (pos == -1) 700 pos = descriptor->pos; 701 702 if (descriptor->ops->fd_read) { 703 for (i = 0; i < count; i++) { 704 size_t length = vecs[i].iov_len; 705 status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base, &length); 706 if (status < B_OK) { 707 bytesRead = status; 708 break; 709 } 710 711 if ((uint64)bytesRead + length > SSIZE_MAX) 712 bytesRead = SSIZE_MAX; 713 else 714 bytesRead += (ssize_t)length; 715 716 pos += vecs[i].iov_len; 717 } 718 } else 719 bytesRead = B_BAD_VALUE; 720 721 status = bytesRead; 722 descriptor->pos = pos; 723 724 err2: 725 free(vecs); 726 err1: 727 put_fd(descriptor); 728 return status; 729 } 730 731 732 ssize_t 733 _user_write(int fd, off_t pos, const void *buffer, size_t length) 734 { 735 struct file_descriptor *descriptor; 736 ssize_t bytesWritten = 0; 737 738 if (IS_KERNEL_ADDRESS(buffer)) 739 return B_BAD_ADDRESS; 740 741 if (pos < -1) 742 return B_BAD_VALUE; 743 744 descriptor = get_fd(get_current_io_context(false), fd); 745 if (!descriptor) 746 return B_FILE_ERROR; 747 if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) { 748 put_fd(descriptor); 749 return B_FILE_ERROR; 750 } 751 752 if (pos == -1) 753 pos = descriptor->pos; 754 755 if (descriptor->ops->fd_write) { 756 bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer, &length); 757 if (bytesWritten >= B_OK) { 758 if (length > SSIZE_MAX) 759 bytesWritten = SSIZE_MAX; 760 else 761 bytesWritten = (ssize_t)length; 762 763 descriptor->pos = pos + length; 764 } 765 } else 766 bytesWritten = B_BAD_VALUE; 767 768 put_fd(descriptor); 769 return bytesWritten; 770 } 771 772 773 ssize_t 774 _user_writev(int fd, off_t pos, const iovec *userVecs, size_t count) 775 { 776 struct file_descriptor *descriptor; 777 ssize_t bytesWritten = 0; 778 status_t status; 779 iovec *vecs; 780 uint32 i; 781 782 /* This is a user_function, so abort if we have a kernel address */ 783 if (!IS_USER_ADDRESS(userVecs)) 784 return B_BAD_ADDRESS; 785 786 if (pos < -1) 787 return B_BAD_VALUE; 788 789 /* prevent integer overflow exploit in malloc() */ 790 if (count > IOV_MAX) 791 return B_BAD_VALUE; 792 793 descriptor = get_fd(get_current_io_context(false), fd); 794 if (!descriptor) 795 return B_FILE_ERROR; 796 if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) { 797 status = B_FILE_ERROR; 798 goto err1; 799 } 800 801 vecs = (iovec*)malloc(sizeof(iovec) * count); 802 if (vecs == NULL) { 803 status = B_NO_MEMORY; 804 goto err1; 805 } 806 807 if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) < B_OK) { 808 status = B_BAD_ADDRESS; 809 goto err2; 810 } 811 812 if (pos == -1) 813 pos = descriptor->pos; 814 815 if (descriptor->ops->fd_write) { 816 for (i = 0; i < count; i++) { 817 size_t length = vecs[i].iov_len; 818 status = descriptor->ops->fd_write(descriptor, pos, vecs[i].iov_base, &length); 819 if (status < B_OK) { 820 bytesWritten = status; 821 break; 822 } 823 824 if ((uint64)bytesWritten + length > SSIZE_MAX) 825 bytesWritten = SSIZE_MAX; 826 else 827 bytesWritten += (ssize_t)length; 828 829 pos += vecs[i].iov_len; 830 } 831 } else 832 bytesWritten = B_BAD_VALUE; 833 834 status = bytesWritten; 835 descriptor->pos = pos; 836 837 err2: 838 free(vecs); 839 err1: 840 put_fd(descriptor); 841 return status; 842 } 843 844 845 off_t 846 _user_seek(int fd, off_t pos, int seekType) 847 { 848 syscall_64_bit_return_value(); 849 850 struct file_descriptor *descriptor; 851 852 descriptor = get_fd(get_current_io_context(false), fd); 853 if (!descriptor) 854 return B_FILE_ERROR; 855 856 TRACE(("user_seek(descriptor = %p)\n", descriptor)); 857 858 if (descriptor->ops->fd_seek) 859 pos = descriptor->ops->fd_seek(descriptor, pos, seekType); 860 else 861 pos = ESPIPE; 862 863 put_fd(descriptor); 864 return pos; 865 } 866 867 868 status_t 869 _user_ioctl(int fd, ulong op, void *buffer, size_t length) 870 { 871 struct file_descriptor *descriptor; 872 int status; 873 874 if (IS_KERNEL_ADDRESS(buffer)) 875 return B_BAD_ADDRESS; 876 877 TRACE(("user_ioctl: fd %d\n", fd)); 878 879 return fd_ioctl(false, fd, op, buffer, length); 880 } 881 882 883 ssize_t 884 _user_read_dir(int fd, struct dirent *buffer, size_t bufferSize, uint32 maxCount) 885 { 886 struct file_descriptor *descriptor; 887 ssize_t retval; 888 889 if (IS_KERNEL_ADDRESS(buffer)) 890 return B_BAD_ADDRESS; 891 892 TRACE(("user_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %lu)\n", fd, buffer, bufferSize, maxCount)); 893 894 descriptor = get_fd(get_current_io_context(false), fd); 895 if (descriptor == NULL) 896 return B_FILE_ERROR; 897 898 if (descriptor->ops->fd_read_dir) { 899 uint32 count = maxCount; 900 retval = descriptor->ops->fd_read_dir(descriptor, buffer, bufferSize, &count); 901 if (retval >= 0) 902 retval = count; 903 } else 904 retval = EOPNOTSUPP; 905 906 put_fd(descriptor); 907 return retval; 908 } 909 910 911 status_t 912 _user_rewind_dir(int fd) 913 { 914 struct file_descriptor *descriptor; 915 status_t status; 916 917 TRACE(("user_rewind_dir(fd = %d)\n", fd)); 918 919 descriptor = get_fd(get_current_io_context(false), fd); 920 if (descriptor == NULL) 921 return B_FILE_ERROR; 922 923 if (descriptor->ops->fd_rewind_dir) 924 status = descriptor->ops->fd_rewind_dir(descriptor); 925 else 926 status = EOPNOTSUPP; 927 928 put_fd(descriptor); 929 return status; 930 } 931 932 933 status_t 934 _user_close(int fd) 935 { 936 return common_close(fd, false); 937 } 938 939 940 int 941 _user_dup(int fd) 942 { 943 return dup_fd(fd, false); 944 } 945 946 947 int 948 _user_dup2(int ofd, int nfd) 949 { 950 return dup2_fd(ofd, nfd, false); 951 } 952 953 954 // #pragma mark - 955 // Kernel calls 956 957 958 ssize_t 959 _kern_read(int fd, off_t pos, void *buffer, size_t length) 960 { 961 struct file_descriptor *descriptor; 962 ssize_t bytesRead; 963 964 if (pos < -1) 965 return B_BAD_VALUE; 966 967 descriptor = get_fd(get_current_io_context(true), fd); 968 if (!descriptor) 969 return B_FILE_ERROR; 970 if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) { 971 put_fd(descriptor); 972 return B_FILE_ERROR; 973 } 974 975 if (pos == -1) 976 pos = descriptor->pos; 977 978 if (descriptor->ops->fd_read) { 979 bytesRead = descriptor->ops->fd_read(descriptor, pos, buffer, &length); 980 if (bytesRead >= B_OK) { 981 if (length > SSIZE_MAX) 982 bytesRead = SSIZE_MAX; 983 else 984 bytesRead = (ssize_t)length; 985 986 descriptor->pos = pos + length; 987 } 988 } else 989 bytesRead = B_BAD_VALUE; 990 991 put_fd(descriptor); 992 return bytesRead; 993 } 994 995 996 ssize_t 997 _kern_readv(int fd, off_t pos, const iovec *vecs, size_t count) 998 { 999 struct file_descriptor *descriptor; 1000 ssize_t bytesRead = 0; 1001 status_t status; 1002 uint32 i; 1003 1004 if (pos < -1) 1005 return B_BAD_VALUE; 1006 1007 descriptor = get_fd(get_current_io_context(true), fd); 1008 if (!descriptor) 1009 return B_FILE_ERROR; 1010 if ((descriptor->open_mode & O_RWMASK) == O_WRONLY) { 1011 put_fd(descriptor); 1012 return B_FILE_ERROR; 1013 } 1014 1015 if (pos == -1) 1016 pos = descriptor->pos; 1017 1018 if (descriptor->ops->fd_read) { 1019 for (i = 0; i < count; i++) { 1020 size_t length = vecs[i].iov_len; 1021 status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base, &length); 1022 if (status < B_OK) { 1023 bytesRead = status; 1024 break; 1025 } 1026 1027 if ((uint64)bytesRead + length > SSIZE_MAX) 1028 bytesRead = SSIZE_MAX; 1029 else 1030 bytesRead += (ssize_t)length; 1031 1032 pos += vecs[i].iov_len; 1033 } 1034 } else 1035 bytesRead = B_BAD_VALUE; 1036 1037 descriptor->pos = pos; 1038 put_fd(descriptor); 1039 return bytesRead; 1040 } 1041 1042 1043 ssize_t 1044 _kern_write(int fd, off_t pos, const void *buffer, size_t length) 1045 { 1046 struct file_descriptor *descriptor; 1047 ssize_t bytesWritten; 1048 1049 if (pos < -1) 1050 return B_BAD_VALUE; 1051 1052 descriptor = get_fd(get_current_io_context(true), fd); 1053 if (descriptor == NULL) 1054 return B_FILE_ERROR; 1055 if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) { 1056 put_fd(descriptor); 1057 return B_FILE_ERROR; 1058 } 1059 1060 if (pos == -1) 1061 pos = descriptor->pos; 1062 1063 if (descriptor->ops->fd_write) { 1064 bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer, &length); 1065 if (bytesWritten >= B_OK) { 1066 if (length > SSIZE_MAX) 1067 bytesWritten = SSIZE_MAX; 1068 else 1069 bytesWritten = (ssize_t)length; 1070 1071 descriptor->pos = pos + length; 1072 } 1073 } else 1074 bytesWritten = B_BAD_VALUE; 1075 1076 put_fd(descriptor); 1077 return bytesWritten; 1078 } 1079 1080 1081 ssize_t 1082 _kern_writev(int fd, off_t pos, const iovec *vecs, size_t count) 1083 { 1084 struct file_descriptor *descriptor; 1085 ssize_t bytesWritten = 0; 1086 status_t status; 1087 uint32 i; 1088 1089 if (pos < -1) 1090 return B_BAD_VALUE; 1091 1092 descriptor = get_fd(get_current_io_context(true), fd); 1093 if (!descriptor) 1094 return B_FILE_ERROR; 1095 if ((descriptor->open_mode & O_RWMASK) == O_RDONLY) { 1096 put_fd(descriptor); 1097 return B_FILE_ERROR; 1098 } 1099 1100 if (pos == -1) 1101 pos = descriptor->pos; 1102 1103 if (descriptor->ops->fd_write) { 1104 for (i = 0; i < count; i++) { 1105 size_t length = vecs[i].iov_len; 1106 status = descriptor->ops->fd_write(descriptor, pos, vecs[i].iov_base, &length); 1107 if (status < B_OK) { 1108 bytesWritten = status; 1109 break; 1110 } 1111 1112 if ((uint64)bytesWritten + length > SSIZE_MAX) 1113 bytesWritten = SSIZE_MAX; 1114 else 1115 bytesWritten += (ssize_t)length; 1116 1117 pos += vecs[i].iov_len; 1118 } 1119 } else 1120 bytesWritten = B_BAD_VALUE; 1121 1122 descriptor->pos = pos; 1123 put_fd(descriptor); 1124 return bytesWritten; 1125 } 1126 1127 1128 off_t 1129 _kern_seek(int fd, off_t pos, int seekType) 1130 { 1131 struct file_descriptor *descriptor; 1132 1133 descriptor = get_fd(get_current_io_context(true), fd); 1134 if (!descriptor) 1135 return B_FILE_ERROR; 1136 1137 if (descriptor->ops->fd_seek) 1138 pos = descriptor->ops->fd_seek(descriptor, pos, seekType); 1139 else 1140 pos = ESPIPE; 1141 1142 put_fd(descriptor); 1143 return pos; 1144 } 1145 1146 1147 status_t 1148 _kern_ioctl(int fd, ulong op, void *buffer, size_t length) 1149 { 1150 TRACE(("kern_ioctl: fd %d\n", fd)); 1151 1152 return fd_ioctl(true, fd, op, buffer, length); 1153 } 1154 1155 1156 status_t 1157 user_fd_kernel_ioctl(int fd, ulong op, void *buffer, size_t length) 1158 { 1159 TRACE(("user_fd_kernel_ioctl: fd %d\n", fd)); 1160 1161 return fd_ioctl(false, fd, op, buffer, length); 1162 } 1163 1164 1165 ssize_t 1166 _kern_read_dir(int fd, struct dirent *buffer, size_t bufferSize, uint32 maxCount) 1167 { 1168 struct file_descriptor *descriptor; 1169 ssize_t retval; 1170 1171 TRACE(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %lu)\n",fd, buffer, bufferSize, maxCount)); 1172 1173 descriptor = get_fd(get_current_io_context(true), fd); 1174 if (descriptor == NULL) 1175 return B_FILE_ERROR; 1176 1177 if (descriptor->ops->fd_read_dir) { 1178 uint32 count = maxCount; 1179 retval = descriptor->ops->fd_read_dir(descriptor, buffer, bufferSize, &count); 1180 if (retval >= 0) 1181 retval = count; 1182 } else 1183 retval = EOPNOTSUPP; 1184 1185 put_fd(descriptor); 1186 return retval; 1187 } 1188 1189 1190 status_t 1191 _kern_rewind_dir(int fd) 1192 { 1193 struct file_descriptor *descriptor; 1194 status_t status; 1195 1196 TRACE(("sys_rewind_dir(fd = %d)\n",fd)); 1197 1198 descriptor = get_fd(get_current_io_context(true), fd); 1199 if (descriptor == NULL) 1200 return B_FILE_ERROR; 1201 1202 if (descriptor->ops->fd_rewind_dir) 1203 status = descriptor->ops->fd_rewind_dir(descriptor); 1204 else 1205 status = EOPNOTSUPP; 1206 1207 put_fd(descriptor); 1208 return status; 1209 } 1210 1211 1212 status_t 1213 _kern_close(int fd) 1214 { 1215 return common_close(fd, true); 1216 } 1217 1218 1219 int 1220 _kern_dup(int fd) 1221 { 1222 return dup_fd(fd, true); 1223 } 1224 1225 1226 int 1227 _kern_dup2(int ofd, int nfd) 1228 { 1229 return dup2_fd(ofd, nfd, true); 1230 } 1231 1232