1 /* 2 * Copyright 2006-2007, François Revol. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 /* 7 * nbd driver for Haiku 8 * 9 * Maps a Network Block Device as virtual partitions. 10 */ 11 12 13 #include <ByteOrder.h> 14 #include <KernelExport.h> 15 #include <Drivers.h> 16 #include <driver_settings.h> 17 #include <Errors.h> 18 #include <errno.h> 19 #include <stdio.h> 20 #include <stdlib.h> 21 #include <string.h> 22 #include <unistd.h> 23 #include <ksocket.h> 24 #include <netinet/in.h> 25 26 //#define DEBUG 1 27 28 /* on the first open(), open ourselves for some seconds, 29 * to avoid trying to reconnect and failing on a 2nd open, 30 * as it happens with the python server. 31 */ 32 //#define MOUNT_KLUDGE 33 34 35 /* names, ohh names... */ 36 #ifndef SHUT_RDWR 37 #define SHUT_RDWR SHUTDOWN_BOTH 38 #endif 39 40 /* locking support */ 41 #ifdef __HAIKU__ 42 #include <kernel/lock.h> 43 #else 44 /* wrappers for R5 */ 45 #ifndef _IMPEXP_KERNEL 46 #define _IMPEXP_KERNEL 47 #endif 48 #include "lock.h" 49 #define mutex lock 50 #define mutex_init new_lock 51 #define mutex_destroy free_lock 52 #define mutex_lock LOCK 53 #define mutex_unlock UNLOCK 54 #endif 55 56 #define DEBUG 1 57 58 #include "nbd.h" 59 60 #define DRV "nbd" 61 #define DP "nbd:" 62 #define MAX_NBDS 4 63 #define DEVICE_PREFIX "disk/virtual/nbd/" 64 #define DEVICE_FMT DEVICE_PREFIX "%d/raw" 65 #define DEVICE_NAME_MAX 32 66 #define MAX_REQ_SIZE (32*1024*1024) 67 #define BLKSIZE 512 68 69 /* debugging */ 70 #if DEBUG 71 #define PRINT(a) dprintf a 72 #define WHICH(dev) ((int)(dev - nbd_devices)) 73 #else 74 #define PRINT(a) 75 #endif 76 77 struct nbd_request_entry { 78 struct nbd_request_entry *next; 79 struct nbd_request req; /* net byte order */ 80 struct nbd_reply reply; /* net byte order */ 81 sem_id sem; 82 bool replied; 83 bool discard; 84 uint64 handle; 85 uint32 type; 86 uint64 from; 87 size_t len; 88 void *buffer; /* write: ptr to passed buffer; read: ptr to malloc()ed extra */ 89 }; 90 91 struct nbd_device { 92 bool valid; 93 bool readonly; 94 struct sockaddr_in server; 95 mutex ben; 96 vint32 refcnt; 97 uint64 req; /* next ID for requests */ 98 int sock; 99 thread_id postoffice; 100 uint64 size; 101 struct nbd_request_entry *reqs; 102 #ifdef MOUNT_KLUDGE 103 int kludge; 104 #endif 105 }; 106 107 typedef struct cookie { 108 struct nbd_device *dev; 109 110 } cookie_t; 111 112 /* data=NULL on read */ 113 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data); 114 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req); 115 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req); 116 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req); 117 118 struct nbd_device *nbd_find_device(const char* name); 119 120 int32 nbd_postoffice(void *arg); 121 status_t nbd_connect(struct nbd_device *dev); 122 status_t nbd_teardown(struct nbd_device *dev); 123 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req); 124 125 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie); 126 status_t nbd_close(cookie_t *cookie); 127 status_t nbd_free(cookie_t *cookie); 128 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len); 129 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes); 130 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes); 131 132 KSOCKET_MODULE_DECL; 133 134 /* HACK: 135 * In BONE at least, if connect() fails (EINTR or ETIMEDOUT) 136 * keeps locked pages around (likely a bone_data, 137 * until TCP gets the last ACK). If that happens, we snooze() 138 * in unload_driver() to let TCP timeout before the kernel 139 * tries to delete the image. */ 140 bool gDelayUnload = false; 141 #define BONE_TEARDOWN_DELAY 60000000 142 143 #if 0 144 #pragma mark ==== support ==== 145 #endif 146 147 // move that to ksocket inlined 148 static int kinet_aton(const char *in, struct in_addr *addr) 149 { 150 int i; 151 unsigned long a; 152 uint32 inaddr = 0L; 153 char *p = (char *)in; 154 for (i = 0; i < 4; i++) { 155 a = strtoul(p, &p, 10); 156 if (!p) 157 return -1; 158 inaddr = (inaddr >> 8) | ((a & 0x0ff) << 24); 159 *(uint32 *)addr = inaddr; 160 if (!*p) 161 return 0; 162 p++; 163 } 164 return 0; 165 } 166 167 #if 0 168 #pragma mark ==== request manager ==== 169 #endif 170 171 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data) 172 { 173 bool w = (type == NBD_CMD_WRITE); 174 struct nbd_request_entry *r; 175 status_t err = EINVAL; 176 uint64 handle; 177 PRINT((DP ">%s(%" B_PRIu32 ", %" B_PRIdOFF ", %ld)\n", __FUNCTION__, type, 178 from, len)); 179 180 if (type != NBD_CMD_READ && type != NBD_CMD_WRITE && type != NBD_CMD_DISC) 181 return err; 182 if (!dev || !req || from < 0) 183 return err; 184 185 //LOCK 186 err = mutex_lock(&dev->ben); 187 if (err) 188 return err; 189 190 // atomic 191 handle = dev->req++; 192 193 194 //UNLOCK 195 mutex_unlock(&dev->ben); 196 197 err = ENOMEM; 198 r = malloc(sizeof(struct nbd_request_entry) + (w ? 0 : len)); 199 if (r == NULL) 200 goto err0; 201 r->next = NULL; 202 err = r->sem = create_sem(0, "nbd request sem"); 203 if (err < 0) 204 goto err1; 205 206 r->replied = false; 207 r->discard = false; 208 r->handle = handle; 209 r->type = type; 210 r->from = from; 211 r->len = len; 212 213 r->req.magic = B_HOST_TO_BENDIAN_INT32(NBD_REQUEST_MAGIC); 214 r->req.type = B_HOST_TO_BENDIAN_INT32(type); 215 r->req.handle = B_HOST_TO_BENDIAN_INT64(r->handle); 216 r->req.from = B_HOST_TO_BENDIAN_INT64(r->from); 217 r->req.len = B_HOST_TO_BENDIAN_INT32(len); 218 219 r->buffer = (void *)(w ? data : (((char *)r) + sizeof(struct nbd_request_entry))); 220 221 *req = r; 222 return B_OK; 223 224 err1: 225 free(r); 226 err0: 227 dprintf(DP " %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err); 228 return err; 229 } 230 231 232 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req) 233 { 234 PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle)); 235 req->next = dev->reqs; 236 dev->reqs = req; 237 return B_OK; 238 } 239 240 241 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req) 242 { 243 struct nbd_request_entry *r, *prev; 244 PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, handle)); 245 r = dev->reqs; 246 prev = NULL; 247 while (r && r->handle != handle) { 248 prev = r; 249 r = r->next; 250 } 251 if (!r) 252 return ENOENT; 253 254 if (prev) 255 prev->next = r->next; 256 else 257 dev->reqs = r->next; 258 259 *req = r; 260 return B_OK; 261 } 262 263 264 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req) 265 { 266 PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle)); 267 delete_sem(req->sem); 268 free(req); 269 return B_OK; 270 } 271 272 273 #if 0 274 #pragma mark ==== nbd handler ==== 275 #endif 276 277 int32 nbd_postoffice(void *arg) 278 { 279 struct nbd_device *dev = (struct nbd_device *)arg; 280 struct nbd_request_entry *req = NULL; 281 struct nbd_reply reply; 282 status_t err; 283 const char *reason; 284 PRINT((DP ">%s()\n", __FUNCTION__)); 285 286 for (;;) { 287 reason = "recv"; 288 err = krecv(dev->sock, &reply, sizeof(reply), 0); 289 if (err == -1 && errno < 0) 290 err = errno; 291 if (err < 0) 292 goto err; 293 reason = "recv:size"; 294 if (err < sizeof(reply)) 295 err = EINVAL; 296 if (err < 0) 297 goto err; 298 reason = "magic"; 299 err = EINVAL; 300 if (B_BENDIAN_TO_HOST_INT32(reply.magic) != NBD_REPLY_MAGIC) 301 goto err; 302 303 reason = "lock"; 304 //LOCK 305 err = mutex_lock(&dev->ben); 306 if (err) 307 goto err; 308 309 reason = "dequeue_request"; 310 err = nbd_dequeue_request(dev, B_BENDIAN_TO_HOST_INT64(reply.handle), &req); 311 312 //UNLOCK 313 mutex_unlock(&dev->ben); 314 315 if (!err && !req) { 316 dprintf(DP "nbd_dequeue_rquest found NULL!\n"); 317 err = ENOENT; 318 } 319 320 if (err == B_OK) { 321 memcpy(&req->reply, &reply, sizeof(reply)); 322 if (req->type == NBD_CMD_READ) { 323 err = 0; 324 reason = "recv(data)"; 325 if (reply.error == 0) 326 err = krecv(dev->sock, req->buffer, req->len, 0); 327 if (err < 0) 328 goto err; 329 /* tell back how much we've got (?) */ 330 req->len = err; 331 } else { 332 if (reply.error) 333 req->len = 0; 334 } 335 336 reason = "lock"; 337 //LOCK 338 err = mutex_lock(&dev->ben); 339 if (err) 340 goto err; 341 342 // this also must be atomic! 343 release_sem(req->sem); 344 req->replied = true; 345 if (req->discard) 346 nbd_free_request(dev, req); 347 348 //UNLOCK 349 mutex_unlock(&dev->ben); 350 } 351 352 } 353 354 PRINT((DP "<%s\n", __FUNCTION__)); 355 return 0; 356 357 err: 358 dprintf(DP "%s: %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, reason, err); 359 return err; 360 } 361 362 363 status_t nbd_connect(struct nbd_device *dev) 364 { 365 struct nbd_init_packet initpkt; 366 status_t err; 367 PRINT((DP ">%s()\n", __FUNCTION__)); 368 369 PRINT((DP " %s: socket()\n", __FUNCTION__)); 370 err = dev->sock = ksocket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 371 if (err == -1 && errno < 0) 372 err = errno; 373 if (err < 0) 374 goto err0; 375 376 PRINT((DP " %s: connect()\n", __FUNCTION__)); 377 err = kconnect(dev->sock, (struct sockaddr *)&dev->server, sizeof(dev->server)); 378 //err = ENOSYS; 379 if (err == -1 && errno < 0) 380 err = errno; 381 /* HACK: avoid the kernel unloading us with locked pages from TCP */ 382 if (err) 383 gDelayUnload = true; 384 if (err) 385 goto err1; 386 387 PRINT((DP " %s: recv(initpkt)\n", __FUNCTION__)); 388 err = krecv(dev->sock, &initpkt, sizeof(initpkt), 0); 389 if (err == -1 && errno < 0) 390 err = errno; 391 if (err < sizeof(initpkt)) 392 goto err2; 393 err = EINVAL;//EPROTO; 394 if (memcmp(initpkt.passwd, NBD_INIT_PASSWD, sizeof(initpkt.passwd))) 395 goto err3; 396 if (B_BENDIAN_TO_HOST_INT64(initpkt.magic) != NBD_INIT_MAGIC) 397 goto err3; 398 399 dev->size = B_BENDIAN_TO_HOST_INT64(initpkt.device_size); 400 401 dprintf(DP " %s: connected, device size %" B_PRIu64 " bytes.\n", 402 __FUNCTION__, dev->size); 403 404 err = dev->postoffice = spawn_kernel_thread(nbd_postoffice, "nbd postoffice", B_REAL_TIME_PRIORITY, dev); 405 if (err < B_OK) 406 goto err4; 407 resume_thread(dev->postoffice); 408 409 PRINT((DP "<%s\n", __FUNCTION__)); 410 return B_OK; 411 412 err4: 413 dev->postoffice = -1; 414 err3: 415 err2: 416 err1: 417 kclosesocket(dev->sock); 418 dev->sock = -1; 419 err0: 420 dprintf(DP "<%s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err); 421 return err; 422 } 423 424 425 status_t nbd_teardown(struct nbd_device *dev) 426 { 427 status_t ret; 428 PRINT((DP ">%s()\n", __FUNCTION__)); 429 kshutdown(dev->sock, SHUT_RDWR); 430 kclosesocket(dev->sock); 431 dev->sock = -1; 432 wait_for_thread(dev->postoffice, &ret); 433 return B_OK; 434 } 435 436 437 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req) 438 { 439 status_t err; 440 PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle)); 441 442 err = ksend(dev->sock, &req->req, sizeof(req->req), 0); 443 if (err < 0) 444 return err; 445 446 if (req->type == NBD_CMD_WRITE) 447 err = ksend(dev->sock, req->buffer, req->len, 0); 448 if (err < 0) 449 return err; 450 else 451 req->len = err; 452 453 err = nbd_queue_request(dev, req); 454 return err; 455 } 456 457 458 #if 0 459 #pragma mark ==== device hooks ==== 460 #endif 461 462 static struct nbd_device nbd_devices[MAX_NBDS]; 463 464 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie) { 465 status_t err; 466 #ifdef MOUNT_KLUDGE 467 int32 refcnt; 468 int kfd; 469 #endif 470 struct nbd_device *dev = NULL; 471 PRINT((DP ">%s(%s, %" B_PRIx32 ", )\n", __FUNCTION__, name, flags)); 472 (void)name; (void)flags; 473 dev = nbd_find_device(name); 474 if (!dev || !dev->valid) 475 return ENOENT; 476 err = ENOMEM; 477 *cookie = (void*)malloc(sizeof(cookie_t)); 478 if (*cookie == NULL) 479 goto err0; 480 memset(*cookie, 0, sizeof(cookie_t)); 481 (*cookie)->dev = dev; 482 err = mutex_lock(&dev->ben); 483 if (err) 484 goto err1; 485 /* */ 486 if (dev->sock < 0) 487 err = nbd_connect(dev); 488 if (err) 489 goto err2; 490 #ifdef MOUNT_KLUDGE 491 refcnt = dev->refcnt++; 492 kfd = dev->kludge; 493 dev->kludge = -1; 494 #endif 495 mutex_unlock(&dev->ben); 496 497 #ifdef MOUNT_KLUDGE 498 if (refcnt == 0) { 499 char buf[32]; 500 sprintf(buf, "/dev/%s", name); 501 dev->kludge = open(buf, O_RDONLY); 502 } else if (kfd) { 503 close(kfd); 504 } 505 #endif 506 507 return B_OK; 508 509 err2: 510 mutex_unlock(&dev->ben); 511 err1: 512 free(*cookie); 513 err0: 514 dprintf(DP " %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err); 515 return err; 516 } 517 518 519 status_t nbd_close(cookie_t *cookie) { 520 struct nbd_device *dev = cookie->dev; 521 status_t err; 522 #ifdef MOUNT_KLUDGE 523 int kfd = -1; 524 #endif 525 PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev))); 526 527 err = mutex_lock(&dev->ben); 528 if (err) 529 return err; 530 531 // XXX: do something ? 532 #ifdef MOUNT_KLUDGE 533 kfd = dev->kludge; 534 dev->kludge = -1; 535 #endif 536 537 mutex_unlock(&dev->ben); 538 539 #ifdef MOUNT_KLUDGE 540 if (kfd > -1) { 541 close(kfd); 542 } 543 #endif 544 return B_OK; 545 } 546 547 548 status_t nbd_free(cookie_t *cookie) { 549 struct nbd_device *dev = cookie->dev; 550 status_t err; 551 PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev))); 552 553 err = mutex_lock(&dev->ben); 554 if (err) 555 return err; 556 557 if (--dev->refcnt == 0) { 558 err = nbd_teardown(dev); 559 } 560 561 mutex_unlock(&dev->ben); 562 563 free(cookie); 564 return err; 565 } 566 567 568 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len) { 569 PRINT((DP ">%s(%d, %" B_PRIu32 ", , %ld)\n", __FUNCTION__, 570 WHICH(cookie->dev), op, len)); 571 switch (op) { 572 case B_GET_DEVICE_SIZE: /* this one is broken anyway... */ 573 if (data) { 574 *(size_t *)data = (size_t)cookie->dev->size; 575 return B_OK; 576 } 577 return EINVAL; 578 case B_SET_DEVICE_SIZE: /* broken */ 579 return EINVAL; 580 case B_SET_NONBLOCKING_IO: 581 return EINVAL; 582 case B_SET_BLOCKING_IO: 583 return B_OK; 584 case B_GET_READ_STATUS: 585 case B_GET_WRITE_STATUS: 586 if (data) { 587 *(bool *)data = false; 588 return B_OK; 589 } 590 return EINVAL; 591 case B_GET_GEOMETRY: 592 case B_GET_BIOS_GEOMETRY: 593 if (data) { 594 device_geometry *geom = (device_geometry *)data; 595 geom->bytes_per_sector = BLKSIZE; 596 geom->sectors_per_track = 1; 597 geom->cylinder_count = cookie->dev->size / BLKSIZE; 598 geom->head_count = 1; 599 geom->device_type = B_DISK; 600 geom->removable = false; 601 geom->read_only = cookie->dev->readonly; 602 geom->write_once = false; 603 return B_OK; 604 } 605 return EINVAL; 606 case B_GET_MEDIA_STATUS: 607 if (data) { 608 *(status_t *)data = B_OK; 609 return B_OK; 610 } 611 return EINVAL; 612 613 case B_EJECT_DEVICE: 614 case B_LOAD_MEDIA: 615 return B_BAD_VALUE; 616 case B_FLUSH_DRIVE_CACHE: /* wait for request list to be empty ? */ 617 return B_OK; 618 default: 619 return B_BAD_VALUE; 620 } 621 return B_NOT_ALLOWED; 622 } 623 624 625 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes) { 626 struct nbd_device *dev = cookie->dev; 627 struct nbd_request_entry *req; 628 status_t err, semerr; 629 PRINT((DP ">%s(%d, %" B_PRIdOFF ", , )\n", __FUNCTION__, 630 WHICH(cookie->dev), position)); 631 632 if (position < 0) 633 return EINVAL; 634 if (!data) 635 return EINVAL; 636 637 err = nbd_alloc_request(dev, &req, NBD_CMD_READ, position, *numbytes, NULL); 638 if (err) 639 goto err0; 640 641 //LOCK 642 err = mutex_lock(&dev->ben); 643 if (err) 644 goto err1; 645 646 err = nbd_post_request(dev, req); 647 648 //UNLOCK 649 mutex_unlock(&dev->ben); 650 651 if (err) 652 goto err2; 653 654 655 semerr = acquire_sem(req->sem); 656 657 //LOCK 658 err = mutex_lock(&dev->ben); 659 if(err) 660 goto err3; 661 662 /* bad scenarii */ 663 if (!req->replied) 664 req->discard = true; 665 else if (semerr) 666 nbd_free_request(dev, req); 667 668 //UNLOCK 669 mutex_unlock(&dev->ben); 670 671 if (semerr == B_OK) { 672 *numbytes = req->len; 673 memcpy(data, req->buffer, req->len); 674 err = B_OK; 675 if (*numbytes == 0 && req->reply.error) 676 err = EIO; 677 nbd_free_request(dev, req); 678 return err; 679 } 680 681 *numbytes = 0; 682 return semerr; 683 684 685 err3: 686 err2: 687 err1: 688 nbd_free_request(dev, req); 689 err0: 690 *numbytes = 0; 691 return err; 692 } 693 694 695 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes) { 696 struct nbd_device *dev = cookie->dev; 697 struct nbd_request_entry *req; 698 status_t err, semerr; 699 PRINT((DP ">%s(%d, %" B_PRIdOFF ", %ld, )\n", __FUNCTION__, 700 WHICH(cookie->dev), position, *numbytes)); 701 702 if (position < 0) 703 return EINVAL; 704 if (!data) 705 return EINVAL; 706 err = B_NOT_ALLOWED; 707 if (dev->readonly) 708 goto err0; 709 710 err = nbd_alloc_request(dev, &req, NBD_CMD_WRITE, position, *numbytes, data); 711 if (err) 712 goto err0; 713 714 //LOCK 715 err = mutex_lock(&dev->ben); 716 if (err) 717 goto err1; 718 719 /* sending request+data must be atomic */ 720 err = nbd_post_request(dev, req); 721 722 //UNLOCK 723 mutex_unlock(&dev->ben); 724 725 if (err) 726 goto err2; 727 728 729 semerr = acquire_sem(req->sem); 730 731 //LOCK 732 err = mutex_lock(&dev->ben); 733 if(err) 734 goto err3; 735 736 /* bad scenarii */ 737 if (!req->replied) 738 req->discard = true; 739 else if (semerr) 740 nbd_free_request(dev, req); 741 742 //UNLOCK 743 mutex_unlock(&dev->ben); 744 745 if (semerr == B_OK) { 746 *numbytes = req->len; 747 err = B_OK; 748 if (*numbytes == 0 && req->reply.error) 749 err = EIO; 750 nbd_free_request(dev, req); 751 return err; 752 } 753 754 *numbytes = 0; 755 return semerr; 756 757 758 err3: 759 err2: 760 err1: 761 nbd_free_request(dev, req); 762 err0: 763 *numbytes = 0; 764 return err; 765 } 766 767 768 device_hooks nbd_hooks={ 769 (device_open_hook)nbd_open, 770 (device_close_hook)nbd_close, 771 (device_free_hook)nbd_free, 772 (device_control_hook)nbd_control, 773 (device_read_hook)nbd_read, 774 (device_write_hook)nbd_write, 775 NULL, 776 NULL, 777 NULL, 778 NULL 779 }; 780 781 #if 0 782 #pragma mark ==== driver hooks ==== 783 #endif 784 785 int32 api_version = B_CUR_DRIVER_API_VERSION; 786 787 static char *nbd_name[MAX_NBDS+1] = { 788 NULL 789 }; 790 791 792 status_t 793 init_hardware (void) 794 { 795 PRINT((DP ">%s()\n", __FUNCTION__)); 796 return B_OK; 797 } 798 799 800 status_t 801 init_driver (void) 802 { 803 status_t err; 804 int i, j; 805 // XXX: load settings 806 void *handle; 807 char **names = nbd_name; 808 PRINT((DP ">%s()\n", __FUNCTION__)); 809 810 handle = load_driver_settings(DRV); 811 if (handle == NULL) 812 return ENOENT; 813 // XXX: test for boot args ? 814 815 816 err = ksocket_init(); 817 if (err < B_OK) 818 return err; 819 820 for (i = 0; i < MAX_NBDS; i++) { 821 nbd_devices[i].valid = false; 822 nbd_devices[i].readonly = false; 823 mutex_init(&nbd_devices[i].ben, "nbd lock"); 824 nbd_devices[i].refcnt = 0; 825 nbd_devices[i].req = 0LL; /* next ID for requests */ 826 nbd_devices[i].sock = -1; 827 nbd_devices[i].postoffice = -1; 828 nbd_devices[i].size = 0LL; 829 nbd_devices[i].reqs = NULL; 830 #ifdef MOUNT_KLUDGE 831 nbd_devices[i].kludge = -1; 832 #endif 833 nbd_name[i] = NULL; 834 } 835 836 for (i = 0; i < MAX_NBDS; i++) { 837 const driver_settings *settings = get_driver_settings(handle); 838 driver_parameter *p = NULL; 839 char keyname[10]; 840 sprintf(keyname, "%d", i); 841 for (j = 0; j < settings->parameter_count; j++) 842 if (!strcmp(settings->parameters[j].name, keyname)) 843 p = &settings->parameters[j]; 844 if (!p) 845 continue; 846 for (j = 0; j < p->parameter_count; j++) { 847 if (!strcmp(p->parameters[j].name, "readonly")) 848 nbd_devices[i].readonly = true; 849 if (!strcmp(p->parameters[j].name, "server")) { 850 if (p->parameters[j].value_count < 2) 851 continue; 852 nbd_devices[i].server.sin_len = sizeof(struct sockaddr_in); 853 nbd_devices[i].server.sin_family = AF_INET; 854 kinet_aton(p->parameters[j].values[0], &nbd_devices[i].server.sin_addr); 855 nbd_devices[i].server.sin_port = htons(atoi(p->parameters[j].values[1])); 856 dprintf(DP " configured [%d]\n", i); 857 *(names) = malloc(DEVICE_NAME_MAX); 858 if (*(names) == NULL) 859 return ENOMEM; 860 sprintf(*(names++), DEVICE_FMT, i); 861 nbd_devices[i].valid = true; 862 } 863 } 864 } 865 *names = NULL; 866 867 unload_driver_settings(handle); 868 return B_OK; 869 } 870 871 872 void 873 uninit_driver (void) 874 { 875 int i; 876 PRINT((DP ">%s()\n", __FUNCTION__)); 877 for (i = 0; i < MAX_NBDS; i++) { 878 free(nbd_name[i]); 879 mutex_destroy(&nbd_devices[i].ben); 880 } 881 ksocket_cleanup(); 882 /* HACK */ 883 if (gDelayUnload) 884 snooze(BONE_TEARDOWN_DELAY); 885 } 886 887 888 const char** 889 publish_devices() 890 { 891 PRINT((DP ">%s()\n", __FUNCTION__)); 892 return (const char **)nbd_name; 893 } 894 895 896 device_hooks* 897 find_device(const char* name) 898 { 899 PRINT((DP ">%s(%s)\n", __FUNCTION__, name)); 900 return &nbd_hooks; 901 } 902 903 904 struct nbd_device* 905 nbd_find_device(const char* name) 906 { 907 int i; 908 PRINT((DP ">%s(%s)\n", __FUNCTION__, name)); 909 for (i = 0; i < MAX_NBDS; i++) { 910 char buf[DEVICE_NAME_MAX]; 911 sprintf(buf, DEVICE_FMT, i); 912 if (!strcmp(buf, name)) 913 return &nbd_devices[i]; 914 } 915 return NULL; 916 } 917 918