1 /* Operations on file descriptors 2 * 3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 #include "fd.h" 8 9 #include <stdlib.h> 10 11 #include "fssh_fcntl.h" 12 #include "fssh_kernel_export.h" 13 #include "fssh_kernel_priv.h" 14 #include "fssh_string.h" 15 #include "fssh_uio.h" 16 #include "syscalls.h" 17 18 19 //#define TRACE_FD 20 #ifdef TRACE_FD 21 # define TRACE(x) dprintf x 22 #else 23 # define TRACE(x) 24 #endif 25 26 27 namespace FSShell { 28 29 30 io_context* gKernelIOContext; 31 32 33 /*** General fd routines ***/ 34 35 36 #ifdef DEBUG 37 void dump_fd(int fd, struct file_descriptor *descriptor); 38 39 void 40 dump_fd(int fd,struct file_descriptor *descriptor) 41 { 42 fssh_dprintf("fd[%d] = %p: type = %d, ref_count = %d, ops = %p, u.vnode = %p, u.mount = %p, cookie = %p, open_mode = %x, pos = %Ld\n", 43 fd, descriptor, (int)descriptor->type, (int)descriptor->ref_count, descriptor->ops, 44 descriptor->u.vnode, descriptor->u.mount, descriptor->cookie, (int)descriptor->open_mode, descriptor->pos); 45 } 46 #endif 47 48 49 /** Allocates and initializes a new file_descriptor */ 50 51 struct file_descriptor * 52 alloc_fd(void) 53 { 54 struct file_descriptor *descriptor; 55 56 descriptor = (file_descriptor*)malloc(sizeof(struct file_descriptor)); 57 if (descriptor == NULL) 58 return NULL; 59 60 descriptor->u.vnode = NULL; 61 descriptor->cookie = NULL; 62 descriptor->ref_count = 1; 63 descriptor->open_count = 0; 64 descriptor->open_mode = 0; 65 descriptor->pos = 0; 66 67 return descriptor; 68 } 69 70 71 bool 72 fd_close_on_exec(struct io_context *context, int fd) 73 { 74 return CHECK_BIT(context->fds_close_on_exec[fd / 8], fd & 7) ? true : false; 75 } 76 77 78 void 79 fd_set_close_on_exec(struct io_context *context, int fd, bool closeFD) 80 { 81 if (closeFD) 82 context->fds_close_on_exec[fd / 8] |= (1 << (fd & 7)); 83 else 84 context->fds_close_on_exec[fd / 8] &= ~(1 << (fd & 7)); 85 } 86 87 88 /** Searches a free slot in the FD table of the provided I/O context, and inserts 89 * the specified descriptor into it. 90 */ 91 92 int 93 new_fd_etc(struct io_context *context, struct file_descriptor *descriptor, 94 int firstIndex) 95 { 96 int fd = -1; 97 uint32_t i; 98 99 mutex_lock(&context->io_mutex); 100 101 for (i = firstIndex; i < context->table_size; i++) { 102 if (!context->fds[i]) { 103 fd = i; 104 break; 105 } 106 } 107 if (fd < 0) { 108 fd = FSSH_B_NO_MORE_FDS; 109 goto err; 110 } 111 112 context->fds[fd] = descriptor; 113 context->num_used_fds++; 114 fssh_atomic_add(&descriptor->open_count, 1); 115 116 err: 117 mutex_unlock(&context->io_mutex); 118 119 return fd; 120 } 121 122 123 int 124 new_fd(struct io_context *context, struct file_descriptor *descriptor) 125 { 126 return new_fd_etc(context, descriptor, 0); 127 } 128 129 130 /** Reduces the descriptor's reference counter, and frees all resources 131 * when it's no longer used. 132 */ 133 134 void 135 put_fd(struct file_descriptor *descriptor) 136 { 137 int32_t previous = fssh_atomic_add(&descriptor->ref_count, -1); 138 139 TRACE(("put_fd(descriptor = %p [ref = %ld, cookie = %p])\n", 140 descriptor, descriptor->ref_count, descriptor->cookie)); 141 142 // free the descriptor if we don't need it anymore 143 if (previous == 1) { 144 // free the underlying object 145 if (descriptor->ops != NULL && descriptor->ops->fd_free != NULL) 146 descriptor->ops->fd_free(descriptor); 147 148 free(descriptor); 149 } else if ((descriptor->open_mode & FSSH_O_DISCONNECTED) != 0 150 && previous - 1 == descriptor->open_count 151 && descriptor->ops != NULL) { 152 // the descriptor has been disconnected - it cannot 153 // be accessed anymore, let's close it (no one is 154 // currently accessing this descriptor) 155 156 if (descriptor->ops->fd_close) 157 descriptor->ops->fd_close(descriptor); 158 if (descriptor->ops->fd_free) 159 descriptor->ops->fd_free(descriptor); 160 161 // prevent this descriptor from being closed/freed again 162 descriptor->open_count = -1; 163 descriptor->ref_count = -1; 164 descriptor->ops = NULL; 165 descriptor->u.vnode = NULL; 166 167 // the file descriptor is kept intact, so that it's not 168 // reused until someone explicetly closes it 169 } 170 } 171 172 173 /** Decrements the open counter of the file descriptor and invokes 174 * its close hook when appropriate. 175 */ 176 177 void 178 close_fd(struct file_descriptor *descriptor) 179 { 180 if (fssh_atomic_add(&descriptor->open_count, -1) == 1) { 181 vfs_unlock_vnode_if_locked(descriptor); 182 183 if (descriptor->ops != NULL && descriptor->ops->fd_close != NULL) 184 descriptor->ops->fd_close(descriptor); 185 } 186 } 187 188 189 /** This descriptor's underlying object will be closed and freed 190 * as soon as possible (in one of the next calls to put_fd() - 191 * get_fd() will no longer succeed on this descriptor). 192 * This is useful if the underlying object is gone, for instance 193 * when a (mounted) volume got removed unexpectedly. 194 */ 195 196 void 197 disconnect_fd(struct file_descriptor *descriptor) 198 { 199 descriptor->open_mode |= FSSH_O_DISCONNECTED; 200 } 201 202 203 void 204 inc_fd_ref_count(struct file_descriptor *descriptor) 205 { 206 fssh_atomic_add(&descriptor->ref_count, 1); 207 } 208 209 210 struct file_descriptor * 211 get_fd(struct io_context *context, int fd) 212 { 213 struct file_descriptor *descriptor = NULL; 214 215 if (fd < 0) 216 return NULL; 217 218 mutex_lock(&context->io_mutex); 219 220 if ((uint32_t)fd < context->table_size) 221 descriptor = context->fds[fd]; 222 223 if (descriptor != NULL) { 224 // Disconnected descriptors cannot be accessed anymore 225 if (descriptor->open_mode & FSSH_O_DISCONNECTED) 226 descriptor = NULL; 227 else 228 inc_fd_ref_count(descriptor); 229 } 230 231 mutex_unlock(&context->io_mutex); 232 233 return descriptor; 234 } 235 236 237 /** Removes the file descriptor from the specified slot. 238 */ 239 240 static struct file_descriptor * 241 remove_fd(struct io_context *context, int fd) 242 { 243 struct file_descriptor *descriptor = NULL; 244 245 if (fd < 0) 246 return NULL; 247 248 mutex_lock(&context->io_mutex); 249 250 if ((uint32_t)fd < context->table_size) 251 descriptor = context->fds[fd]; 252 253 if (descriptor) { 254 // fd is valid 255 context->fds[fd] = NULL; 256 fd_set_close_on_exec(context, fd, false); 257 context->num_used_fds--; 258 259 if (descriptor->open_mode & FSSH_O_DISCONNECTED) 260 descriptor = NULL; 261 } 262 263 mutex_unlock(&context->io_mutex); 264 265 return descriptor; 266 } 267 268 269 static int 270 dup_fd(int fd, bool kernel) 271 { 272 struct io_context *context = get_current_io_context(kernel); 273 struct file_descriptor *descriptor; 274 int status; 275 276 TRACE(("dup_fd: fd = %d\n", fd)); 277 278 // Try to get the fd structure 279 descriptor = get_fd(context, fd); 280 if (descriptor == NULL) 281 return FSSH_B_FILE_ERROR; 282 283 // now put the fd in place 284 status = new_fd(context, descriptor); 285 if (status < 0) 286 put_fd(descriptor); 287 else { 288 mutex_lock(&context->io_mutex); 289 fd_set_close_on_exec(context, status, false); 290 mutex_unlock(&context->io_mutex); 291 } 292 293 return status; 294 } 295 296 297 /** POSIX says this should be the same as: 298 * close(newfd); 299 * fcntl(oldfd, F_DUPFD, newfd); 300 * 301 * We do dup2() directly to be thread-safe. 302 */ 303 304 static int 305 dup2_fd(int oldfd, int newfd, bool kernel) 306 { 307 struct file_descriptor *evicted = NULL; 308 struct io_context *context; 309 310 TRACE(("dup2_fd: ofd = %d, nfd = %d\n", oldfd, newfd)); 311 312 // quick check 313 if (oldfd < 0 || newfd < 0) 314 return FSSH_B_FILE_ERROR; 315 316 // Get current I/O context and lock it 317 context = get_current_io_context(kernel); 318 mutex_lock(&context->io_mutex); 319 320 // Check if the fds are valid (mutex must be locked because 321 // the table size could be changed) 322 if ((uint32_t)oldfd >= context->table_size 323 || (uint32_t)newfd >= context->table_size 324 || context->fds[oldfd] == NULL) { 325 mutex_unlock(&context->io_mutex); 326 return FSSH_B_FILE_ERROR; 327 } 328 329 // Check for identity, note that it cannot be made above 330 // because we always want to return an error on invalid 331 // handles 332 if (oldfd != newfd) { 333 // Now do the work 334 evicted = context->fds[newfd]; 335 fssh_atomic_add(&context->fds[oldfd]->ref_count, 1); 336 fssh_atomic_add(&context->fds[oldfd]->open_count, 1); 337 context->fds[newfd] = context->fds[oldfd]; 338 339 if (evicted == NULL) 340 context->num_used_fds++; 341 } 342 343 fd_set_close_on_exec(context, newfd, false); 344 345 mutex_unlock(&context->io_mutex); 346 347 // Say bye bye to the evicted fd 348 if (evicted) { 349 close_fd(evicted); 350 put_fd(evicted); 351 } 352 353 return newfd; 354 } 355 356 357 fssh_status_t 358 select_fd(int fd, uint8_t event, uint32_t ref, struct select_sync *sync, bool kernel) 359 { 360 // struct file_descriptor *descriptor; 361 // fssh_status_t status; 362 // 363 // TRACE(("select_fd(fd = %d, event = %u, ref = %lu, selectsync = %p)\n", fd, event, ref, sync)); 364 // 365 // descriptor = get_fd(get_current_io_context(kernel), fd); 366 // if (descriptor == NULL) 367 // return FSSH_B_FILE_ERROR; 368 // 369 // if (descriptor->ops->fd_select) { 370 // status = descriptor->ops->fd_select(descriptor, event, ref, sync); 371 // } else { 372 // // if the I/O subsystem doesn't support select(), we will 373 // // immediately notify the select call 374 // status = notify_select_event((void *)sync, ref, event); 375 // } 376 // 377 // put_fd(descriptor); 378 // return status; 379 380 return FSSH_B_BAD_VALUE; 381 } 382 383 384 fssh_status_t 385 deselect_fd(int fd, uint8_t event, struct select_sync *sync, bool kernel) 386 { 387 // struct file_descriptor *descriptor; 388 // fssh_status_t status; 389 // 390 // TRACE(("deselect_fd(fd = %d, event = %u, selectsync = %p)\n", fd, event, sync)); 391 // 392 // descriptor = get_fd(get_current_io_context(kernel), fd); 393 // if (descriptor == NULL) 394 // return FSSH_B_FILE_ERROR; 395 // 396 // if (descriptor->ops->fd_deselect) 397 // status = descriptor->ops->fd_deselect(descriptor, event, sync); 398 // else 399 // status = FSSH_B_OK; 400 // 401 // put_fd(descriptor); 402 // return status; 403 404 return FSSH_B_BAD_VALUE; 405 } 406 407 408 /** This function checks if the specified fd is valid in the current 409 * context. It can be used for a quick check; the fd is not locked 410 * so it could become invalid immediately after this check. 411 */ 412 413 bool 414 fd_is_valid(int fd, bool kernel) 415 { 416 struct file_descriptor *descriptor = get_fd(get_current_io_context(kernel), fd); 417 if (descriptor == NULL) 418 return false; 419 420 put_fd(descriptor); 421 return true; 422 } 423 424 425 struct vnode * 426 fd_vnode(struct file_descriptor *descriptor) 427 { 428 switch (descriptor->type) { 429 case FDTYPE_FILE: 430 case FDTYPE_DIR: 431 case FDTYPE_ATTR_DIR: 432 case FDTYPE_ATTR: 433 return descriptor->u.vnode; 434 } 435 436 return NULL; 437 } 438 439 440 static fssh_status_t 441 common_close(int fd, bool kernel) 442 { 443 struct io_context *io = get_current_io_context(kernel); 444 struct file_descriptor *descriptor = remove_fd(io, fd); 445 446 if (descriptor == NULL) 447 return FSSH_B_FILE_ERROR; 448 449 #ifdef TRACE_FD 450 if (!kernel) 451 TRACE(("_user_close(descriptor = %p)\n", descriptor)); 452 #endif 453 454 close_fd(descriptor); 455 put_fd(descriptor); 456 // the reference associated with the slot 457 458 return FSSH_B_OK; 459 } 460 461 462 // #pragma mark - 463 // Kernel calls 464 465 466 fssh_ssize_t 467 _kern_read(int fd, fssh_off_t pos, void *buffer, fssh_size_t length) 468 { 469 struct file_descriptor *descriptor; 470 fssh_ssize_t bytesRead; 471 472 descriptor = get_fd(get_current_io_context(true), fd); 473 if (!descriptor) 474 return FSSH_B_FILE_ERROR; 475 if ((descriptor->open_mode & FSSH_O_RWMASK) == FSSH_O_WRONLY) { 476 put_fd(descriptor); 477 return FSSH_B_FILE_ERROR; 478 } 479 480 if (pos == -1) 481 pos = descriptor->pos; 482 483 if (descriptor->ops->fd_read) { 484 bytesRead = descriptor->ops->fd_read(descriptor, pos, buffer, &length); 485 if (bytesRead >= FSSH_B_OK) { 486 if (length > SSIZE_MAX) 487 bytesRead = SSIZE_MAX; 488 else 489 bytesRead = (fssh_ssize_t)length; 490 491 descriptor->pos = pos + length; 492 } 493 } else 494 bytesRead = FSSH_B_BAD_VALUE; 495 496 put_fd(descriptor); 497 return bytesRead; 498 } 499 500 501 fssh_ssize_t 502 _kern_readv(int fd, fssh_off_t pos, const fssh_iovec *vecs, fssh_size_t count) 503 { 504 struct file_descriptor *descriptor; 505 fssh_ssize_t bytesRead = 0; 506 fssh_status_t status; 507 uint32_t i; 508 509 descriptor = get_fd(get_current_io_context(true), fd); 510 if (!descriptor) 511 return FSSH_B_FILE_ERROR; 512 if ((descriptor->open_mode & FSSH_O_RWMASK) == FSSH_O_WRONLY) { 513 put_fd(descriptor); 514 return FSSH_B_FILE_ERROR; 515 } 516 517 if (pos == -1) 518 pos = descriptor->pos; 519 520 if (descriptor->ops->fd_read) { 521 for (i = 0; i < count; i++) { 522 fssh_size_t length = vecs[i].iov_len; 523 status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base, &length); 524 if (status < FSSH_B_OK) { 525 bytesRead = status; 526 break; 527 } 528 529 if ((uint32_t)bytesRead + length > SSIZE_MAX) 530 bytesRead = SSIZE_MAX; 531 else 532 bytesRead += (fssh_ssize_t)length; 533 534 pos += vecs[i].iov_len; 535 } 536 } else 537 bytesRead = FSSH_B_BAD_VALUE; 538 539 descriptor->pos = pos; 540 put_fd(descriptor); 541 return bytesRead; 542 } 543 544 545 fssh_ssize_t 546 _kern_write(int fd, fssh_off_t pos, const void *buffer, fssh_size_t length) 547 { 548 struct file_descriptor *descriptor; 549 fssh_ssize_t bytesWritten; 550 551 descriptor = get_fd(get_current_io_context(true), fd); 552 if (descriptor == NULL) 553 return FSSH_B_FILE_ERROR; 554 if ((descriptor->open_mode & FSSH_O_RWMASK) == FSSH_O_RDONLY) { 555 put_fd(descriptor); 556 return FSSH_B_FILE_ERROR; 557 } 558 559 if (pos == -1) 560 pos = descriptor->pos; 561 562 if (descriptor->ops->fd_write) { 563 bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer, &length); 564 if (bytesWritten >= FSSH_B_OK) { 565 if (length > SSIZE_MAX) 566 bytesWritten = SSIZE_MAX; 567 else 568 bytesWritten = (fssh_ssize_t)length; 569 570 descriptor->pos = pos + length; 571 } 572 } else 573 bytesWritten = FSSH_B_BAD_VALUE; 574 575 put_fd(descriptor); 576 return bytesWritten; 577 } 578 579 580 fssh_ssize_t 581 _kern_writev(int fd, fssh_off_t pos, const fssh_iovec *vecs, fssh_size_t count) 582 { 583 struct file_descriptor *descriptor; 584 fssh_ssize_t bytesWritten = 0; 585 fssh_status_t status; 586 uint32_t i; 587 588 descriptor = get_fd(get_current_io_context(true), fd); 589 if (!descriptor) 590 return FSSH_B_FILE_ERROR; 591 if ((descriptor->open_mode & FSSH_O_RWMASK) == FSSH_O_RDONLY) { 592 put_fd(descriptor); 593 return FSSH_B_FILE_ERROR; 594 } 595 596 if (pos == -1) 597 pos = descriptor->pos; 598 599 if (descriptor->ops->fd_write) { 600 for (i = 0; i < count; i++) { 601 fssh_size_t length = vecs[i].iov_len; 602 status = descriptor->ops->fd_write(descriptor, pos, vecs[i].iov_base, &length); 603 if (status < FSSH_B_OK) { 604 bytesWritten = status; 605 break; 606 } 607 608 if ((uint32_t)bytesWritten + length > SSIZE_MAX) 609 bytesWritten = SSIZE_MAX; 610 else 611 bytesWritten += (fssh_ssize_t)length; 612 613 pos += vecs[i].iov_len; 614 } 615 } else 616 bytesWritten = FSSH_B_BAD_VALUE; 617 618 descriptor->pos = pos; 619 put_fd(descriptor); 620 return bytesWritten; 621 } 622 623 624 fssh_off_t 625 _kern_seek(int fd, fssh_off_t pos, int seekType) 626 { 627 struct file_descriptor *descriptor; 628 629 descriptor = get_fd(get_current_io_context(true), fd); 630 if (!descriptor) 631 return FSSH_B_FILE_ERROR; 632 633 if (descriptor->ops->fd_seek) 634 pos = descriptor->ops->fd_seek(descriptor, pos, seekType); 635 else 636 pos = FSSH_ESPIPE; 637 638 put_fd(descriptor); 639 return pos; 640 } 641 642 643 fssh_status_t 644 _kern_ioctl(int fd, uint32_t op, void *buffer, fssh_size_t length) 645 { 646 struct file_descriptor *descriptor; 647 int status; 648 649 TRACE(("sys_ioctl: fd %d\n", fd)); 650 651 descriptor = get_fd(get_current_io_context(true), fd); 652 if (descriptor == NULL) 653 return FSSH_B_FILE_ERROR; 654 655 if (descriptor->ops->fd_ioctl) 656 status = descriptor->ops->fd_ioctl(descriptor, op, buffer, length); 657 else 658 status = FSSH_EOPNOTSUPP; 659 660 put_fd(descriptor); 661 return status; 662 } 663 664 665 fssh_ssize_t 666 _kern_read_dir(int fd, struct fssh_dirent *buffer, fssh_size_t bufferSize, uint32_t maxCount) 667 { 668 struct file_descriptor *descriptor; 669 fssh_ssize_t retval; 670 671 TRACE(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = %lu)\n",fd, buffer, bufferSize, maxCount)); 672 673 descriptor = get_fd(get_current_io_context(true), fd); 674 if (descriptor == NULL) 675 return FSSH_B_FILE_ERROR; 676 677 if (descriptor->ops->fd_read_dir) { 678 uint32_t count = maxCount; 679 retval = descriptor->ops->fd_read_dir(descriptor, buffer, bufferSize, &count); 680 if (retval >= 0) 681 retval = count; 682 } else 683 retval = FSSH_EOPNOTSUPP; 684 685 put_fd(descriptor); 686 return retval; 687 } 688 689 690 fssh_status_t 691 _kern_rewind_dir(int fd) 692 { 693 struct file_descriptor *descriptor; 694 fssh_status_t status; 695 696 TRACE(("sys_rewind_dir(fd = %d)\n",fd)); 697 698 descriptor = get_fd(get_current_io_context(true), fd); 699 if (descriptor == NULL) 700 return FSSH_B_FILE_ERROR; 701 702 if (descriptor->ops->fd_rewind_dir) 703 status = descriptor->ops->fd_rewind_dir(descriptor); 704 else 705 status = FSSH_EOPNOTSUPP; 706 707 put_fd(descriptor); 708 return status; 709 } 710 711 712 fssh_status_t 713 _kern_close(int fd) 714 { 715 return common_close(fd, true); 716 } 717 718 719 int 720 _kern_dup(int fd) 721 { 722 return dup_fd(fd, true); 723 } 724 725 726 int 727 _kern_dup2(int ofd, int nfd) 728 { 729 return dup2_fd(ofd, nfd, true); 730 } 731 732 } // namespace FSShell 733