xref: /haiku/src/add-ons/kernel/drivers/disk/virtual/nbd/nbd.c (revision 6dcd0ccf238263a3e5eb2e2a44e2ed0da1617a42)
1 /*
2  * Copyright 2006-2007, François Revol. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 /*
7  * nbd driver for Haiku
8  *
9  * Maps a Network Block Device as virtual partitions.
10  */
11 
12 #include <ByteOrder.h>
13 #include <KernelExport.h>
14 #include <Drivers.h>
15 #include <driver_settings.h>
16 #include <Errors.h>
17 #include <errno.h>
18 #include <ksocket.h>
19 #include <netinet/in.h>
20 
21 //#define DEBUG 1
22 
23 /* on the first open(), open ourselves for some seconds,
24  * to avoid trying to reconnect and failing on a 2nd open,
25  * as it happens with the python server.
26  */
27 //#define MOUNT_KLUDGE
28 
29 
30 /* names, ohh names... */
31 #ifndef SHUT_RDWR
32 #define SHUT_RDWR SHUTDOWN_BOTH
33 #endif
34 
35 /* locking support */
36 #ifdef __HAIKU__
37 #include <kernel/lock.h>
38 #else
39 /* wrappers for R5 */
40 #ifndef _IMPEXP_KERNEL
41 #define _IMPEXP_KERNEL
42 #endif
43 #include "lock.h"
44 #define benaphore lock
45 #define benaphore_init new_lock
46 #define benaphore_destroy free_lock
47 #define benaphore_lock LOCK
48 #define benaphore_unlock UNLOCK
49 #endif
50 
51 #define DEBUG 1
52 
53 #include "nbd.h"
54 
55 #define DRV "nbd"
56 #define DP "nbd:"
57 #define MAX_NBDS 4
58 #define DEVICE_PREFIX "disk/virtual/nbd/"
59 #define DEVICE_FMT DEVICE_PREFIX "%d/raw"
60 #define DEVICE_NAME_MAX 32
61 #define MAX_REQ_SIZE (32*1024*1024)
62 #define BLKSIZE 512
63 
64 /* debugging */
65 #if DEBUG
66 #define PRINT(a) dprintf a
67 #define WHICH(dev) ((int)(dev - nbd_devices))
68 #else
69 #define PRINT(a)
70 #endif
71 
72 struct nbd_request_entry {
73 	struct nbd_request_entry *next;
74 	struct nbd_request req; /* net byte order */
75 	struct nbd_reply reply; /* net byte order */
76 	sem_id sem;
77 	bool replied;
78 	bool discard;
79 	uint64 handle;
80 	uint32 type;
81 	uint64 from;
82 	size_t len;
83 	void *buffer; /* write: ptr to passed buffer; read: ptr to malloc()ed extra */
84 };
85 
86 struct nbd_device {
87 	bool valid;
88 	bool readonly;
89 	struct sockaddr_in server;
90 	benaphore ben;
91 	vint32 refcnt;
92 	uint64 req; /* next ID for requests */
93 	int sock;
94 	thread_id postoffice;
95 	uint64 size;
96 	struct nbd_request_entry *reqs;
97 #ifdef MOUNT_KLUDGE
98 	int kludge;
99 #endif
100 };
101 
102 typedef struct cookie {
103 	struct nbd_device *dev;
104 
105 } cookie_t;
106 
107 /* data=NULL on read */
108 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data);
109 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req);
110 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req);
111 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req);
112 
113 struct nbd_device *nbd_find_device(const char* name);
114 
115 KSOCKET_MODULE_DECL;
116 
117 /* HACK:
118  * In BONE at least, if connect() fails (EINTR or ETIMEDOUT)
119  * keeps locked pages around (likely a bone_data,
120  * until TCP gets the last ACK). If that happens, we snooze()
121  * in unload_driver() to let TCP timeout before the kernel
122  * tries to delete the image. */
123 bool gDelayUnload = false;
124 #define BONE_TEARDOWN_DELAY 60000000
125 
126 #pragma mark ==== support ====
127 
128 // move that to ksocket inlined
129 static int kinet_aton(const char *in, struct in_addr *addr)
130 {
131 	int i;
132 	unsigned long a;
133 	uint32 inaddr = 0L;
134 	const char *p = in;
135 	for (i = 0; i < 4; i++) {
136 		a = strtoul(p, &p, 10);
137 		if (!p)
138 			return -1;
139 		inaddr = (inaddr >> 8) | ((a & 0x0ff) << 24);
140 		*(uint32 *)addr = inaddr;
141 		if (!*p)
142 			return 0;
143 		p++;
144 	}
145 	return 0;
146 }
147 
148 #pragma mark ==== request manager ====
149 
150 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data)
151 {
152 	bool w = (type == NBD_CMD_WRITE);
153 	struct nbd_request_entry *r;
154 	status_t err = EINVAL;
155 	uint64 handle;
156 	PRINT((DP ">%s(%ld, %Ld, %ld)\n", __FUNCTION__, type, from, len));
157 
158 	if (type != NBD_CMD_READ && type != NBD_CMD_WRITE && type != NBD_CMD_DISC)
159 		return err;
160 	if (!dev || !req || from < 0)
161 		return err;
162 
163 	//LOCK
164 	err = benaphore_lock(&dev->ben);
165 	if (err)
166 		return err;
167 
168 	// atomic
169 	handle = dev->req++;
170 
171 
172 	//UNLOCK
173 	benaphore_unlock(&dev->ben);
174 
175 	err = ENOMEM;
176 	r = malloc(sizeof(struct nbd_request_entry) + (w ? 0 : len));
177 	if (r == NULL)
178 		goto err0;
179 	r->next = NULL;
180 	err = r->sem = create_sem(0, "nbd request sem");
181 	if (err < 0)
182 		goto err1;
183 
184 	r->replied = false;
185 	r->discard = false;
186 	r->handle = handle;
187 	r->type = type;
188 	r->from = from;
189 	r->len = len;
190 
191 	r->req.magic = B_HOST_TO_BENDIAN_INT32(NBD_REQUEST_MAGIC);
192 	r->req.type = B_HOST_TO_BENDIAN_INT32(type);
193 	r->req.handle = B_HOST_TO_BENDIAN_INT64(r->handle);
194 	r->req.from = B_HOST_TO_BENDIAN_INT64(r->from);
195 	r->req.len = B_HOST_TO_BENDIAN_INT32(len);
196 
197 	r->buffer = w ? data : (((char *)r) + sizeof(struct nbd_request_entry));
198 
199 	*req = r;
200 	return B_OK;
201 
202 err1:
203 	free(r);
204 err0:
205 	dprintf(DP " %s: error 0x%08lx\n", __FUNCTION__, err);
206 	return err;
207 }
208 
209 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req)
210 {
211 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
212 	req->next = dev->reqs;
213 	dev->reqs = req;
214 	return B_OK;
215 }
216 
217 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req)
218 {
219 	struct nbd_request_entry *r, *prev;
220 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, handle));
221 	r = dev->reqs;
222 	prev = NULL;
223 	while (r && r->handle != handle) {
224 		prev = r;
225 		r = r->next;
226 	}
227 	if (!r)
228 		return ENOENT;
229 
230 	if (prev)
231 		prev->next = r->next;
232 	else
233 		dev->reqs = r->next;
234 
235 	*req = r;
236 	return B_OK;
237 }
238 
239 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req)
240 {
241 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
242 	delete_sem(req->sem);
243 	free(req);
244 	return B_OK;
245 }
246 
247 
248 
249 #pragma mark ==== nbd handler ====
250 
251 int32 nbd_postoffice(void *arg)
252 {
253 	struct nbd_device *dev = (struct nbd_device *)arg;
254 	struct nbd_request_entry *req = NULL;
255 	struct nbd_reply reply;
256 	int sock = dev->sock;
257 	status_t err;
258 	const char *reason;
259 	PRINT((DP ">%s()\n", __FUNCTION__));
260 
261 	for (;;) {
262 		reason = "recv";
263 		err = krecv(dev->sock, &reply, sizeof(reply), 0);
264 		if (err == -1 && errno < 0)
265 			err = errno;
266 		if (err < 0)
267 			goto err;
268 		reason = "recv:size";
269 		if (err < sizeof(reply))
270 			err = EINVAL;
271 		if (err < 0)
272 			goto err;
273 		reason = "magic";
274 		err = EINVAL;
275 		if (B_BENDIAN_TO_HOST_INT32(reply.magic) != NBD_REPLY_MAGIC)
276 			goto err;
277 
278 		reason = "lock";
279 		//LOCK
280 		err = benaphore_lock(&dev->ben);
281 		if (err)
282 			goto err;
283 
284 		reason = "dequeue_request";
285 		err = nbd_dequeue_request(dev, B_BENDIAN_TO_HOST_INT64(reply.handle), &req);
286 
287 		//UNLOCK
288 		benaphore_unlock(&dev->ben);
289 
290 		if (!err && !req) {
291 			dprintf(DP "nbd_dequeue_rquest found NULL!\n");
292 			err = ENOENT;
293 		}
294 
295 		if (err == B_OK) {
296 			memcpy(&req->reply, &reply, sizeof(reply));
297 			if (req->type == NBD_CMD_READ) {
298 				err = 0;
299 				reason = "recv(data)";
300 				if (reply.error == 0)
301 					err = krecv(dev->sock, req->buffer, req->len, 0);
302 				if (err < 0)
303 					goto err;
304 				/* tell back how much we've got (?) */
305 				req->len = err;
306 			} else {
307 				if (reply.error)
308 					req->len = 0;
309 			}
310 
311 			reason = "lock";
312 			//LOCK
313 			err = benaphore_lock(&dev->ben);
314 			if (err)
315 				goto err;
316 
317 			// this also must be atomic!
318 			release_sem(req->sem);
319 			req->replied = true;
320 			if (req->discard)
321 				nbd_free_request(dev, req);
322 
323 			//UNLOCK
324 			benaphore_unlock(&dev->ben);
325 		}
326 
327 	}
328 
329 	PRINT((DP "<%s\n", __FUNCTION__));
330 	return 0;
331 
332 err:
333 	dprintf(DP "%s: %s: error 0x%08lx\n", __FUNCTION__, reason, err);
334 	return err;
335 }
336 
337 status_t nbd_connect(struct nbd_device *dev)
338 {
339 	struct nbd_init_packet initpkt;
340 	status_t err;
341 	PRINT((DP ">%s()\n", __FUNCTION__));
342 
343 	PRINT((DP " %s: socket()\n", __FUNCTION__));
344 	err = dev->sock = ksocket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
345 	if (err == -1 && errno < 0)
346 		err = errno;
347 	if (err < 0)
348 		goto err0;
349 
350 	PRINT((DP " %s: connect()\n", __FUNCTION__));
351 	err = kconnect(dev->sock, (struct sockaddr *)&dev->server, sizeof(dev->server));
352 	//err = ENOSYS;
353 	if (err == -1 && errno < 0)
354 		err = errno;
355 	/* HACK: avoid the kernel unlading us with locked pages from TCP */
356 	if (err)
357 		gDelayUnload = true;
358 	if (err)
359 		goto err1;
360 
361 	PRINT((DP " %s: recv(initpkt)\n", __FUNCTION__));
362 	err = krecv(dev->sock, &initpkt, sizeof(initpkt), 0);
363 	if (err == -1 && errno < 0)
364 		err = errno;
365 	if (err < sizeof(initpkt))
366 		goto err2;
367 	err = EINVAL;//EPROTO;
368 	if (memcmp(initpkt.passwd, NBD_INIT_PASSWD, sizeof(initpkt.passwd)))
369 		goto err3;
370 	if (B_BENDIAN_TO_HOST_INT64(initpkt.magic) != NBD_INIT_MAGIC)
371 		goto err3;
372 
373 	dev->size = B_BENDIAN_TO_HOST_INT64(initpkt.device_size);
374 
375 	dprintf(DP " %s: connected, device size %Ld bytes.\n", __FUNCTION__, dev->size);
376 
377 	err = dev->postoffice = spawn_kernel_thread(nbd_postoffice, "nbd postoffice", B_REAL_TIME_PRIORITY, dev);
378 	if (err < B_OK)
379 		goto err4;
380 	resume_thread(dev->postoffice);
381 
382 	PRINT((DP "<%s\n", __FUNCTION__));
383 	return B_OK;
384 
385 err4:
386 	dev->postoffice = -1;
387 err3:
388 err2:
389 err1:
390 	kclosesocket(dev->sock);
391 	dev->sock = -1;
392 err0:
393 	dprintf(DP "<%s: error 0x%08lx\n", __FUNCTION__, err);
394 	return err;
395 }
396 
397 status_t nbd_teardown(struct nbd_device *dev)
398 {
399 	status_t err, ret;
400 	PRINT((DP ">%s()\n", __FUNCTION__));
401 	kshutdown(dev->sock, SHUT_RDWR);
402 	kclosesocket(dev->sock);
403 	dev->sock = -1;
404 	err = wait_for_thread(dev->postoffice, &ret);
405 	return B_OK;
406 }
407 
408 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req)
409 {
410 	status_t err;
411 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
412 
413 	err = ksend(dev->sock, &req->req, sizeof(req->req), 0);
414 	if (err < 0)
415 		return err;
416 
417 	if (req->type == NBD_CMD_WRITE)
418 		err = ksend(dev->sock, req->buffer, req->len, 0);
419 	if (err < 0)
420 		return err;
421 	else
422 		req->len = err;
423 
424 	err = nbd_queue_request(dev, req);
425 	return err;
426 }
427 
428 
429 #pragma mark ==== device hooks ====
430 
431 static struct nbd_device nbd_devices[MAX_NBDS];
432 
433 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie) {
434 	status_t err;
435 	int32 refcnt;
436 #ifdef MOUNT_KLUDGE
437 	int kfd;
438 #endif
439 	struct nbd_device *dev = NULL;
440 	PRINT((DP ">%s(%s, %x, )\n", __FUNCTION__, name, flags));
441 	(void)name; (void)flags;
442 	dev = nbd_find_device(name);
443 	if (!dev || !dev->valid)
444 		return ENOENT;
445 	err = ENOMEM;
446 	*cookie = (void*)malloc(sizeof(cookie_t));
447 	if (*cookie == NULL)
448 		goto err0;
449 	memset(*cookie, 0, sizeof(cookie_t));
450 	(*cookie)->dev = dev;
451 	err = benaphore_lock(&dev->ben);
452 	if (err)
453 		goto err1;
454 	/*  */
455 	if (dev->sock < 0)
456 		err = nbd_connect(dev);
457 	if (err)
458 		goto err2;
459 	refcnt = dev->refcnt++;
460 #ifdef MOUNT_KLUDGE
461 	kfd = dev->kludge;
462 	dev->kludge = -1;
463 #endif
464 	benaphore_unlock(&dev->ben);
465 
466 #ifdef MOUNT_KLUDGE
467 	if (refcnt == 0) {
468 		char buf[32];
469 		sprintf(buf, "/dev/%s", name);
470 		dev->kludge = open(buf, O_RDONLY);
471 	} else if (kfd) {
472 		close(kfd);
473 	}
474 #endif
475 
476 	return B_OK;
477 
478 err2:
479 	benaphore_unlock(&dev->ben);
480 err1:
481 	free(*cookie);
482 err0:
483 	dprintf(DP " %s: error 0x%08lx\n", __FUNCTION__, err);
484 	return err;
485 }
486 
487 status_t nbd_close(cookie_t *cookie) {
488 	struct nbd_device *dev = cookie->dev;
489 	status_t err;
490 #ifdef MOUNT_KLUDGE
491 	int kfd = -1;
492 #endif
493 	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
494 
495 	err = benaphore_lock(&dev->ben);
496 	if (err)
497 		return err;
498 
499 	// XXX: do something ?
500 #ifdef MOUNT_KLUDGE
501 	kfd = dev->kludge;
502 	dev->kludge = -1;
503 #endif
504 
505 	benaphore_unlock(&dev->ben);
506 
507 #ifdef MOUNT_KLUDGE
508 	if (kfd > -1) {
509 		close(kfd);
510 	}
511 #endif
512 	return B_OK;
513 }
514 
515 status_t nbd_free(cookie_t *cookie) {
516 	struct nbd_device *dev = cookie->dev;
517 	status_t err;
518 	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
519 
520 	err = benaphore_lock(&dev->ben);
521 	if (err)
522 		return err;
523 
524 	if (--dev->refcnt == 0) {
525 		err = nbd_teardown(dev);
526 	}
527 
528 	benaphore_unlock(&dev->ben);
529 
530 	free(cookie);
531 	return err;
532 }
533 
534 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len) {
535 	PRINT((DP ">%s(%d, %lu, , %d)\n", __FUNCTION__, WHICH(cookie->dev), op, len));
536 	switch (op) {
537 	case B_GET_DEVICE_SIZE: /* this one is broken anyway... */
538 		if (data) {
539 			*(size_t *)data = (size_t)cookie->dev->size;
540 			return B_OK;
541 		}
542 		return EINVAL;
543 	case B_SET_DEVICE_SIZE: /* broken */
544 		return EINVAL;
545 	case B_SET_NONBLOCKING_IO:
546 		return EINVAL;
547 	case B_SET_BLOCKING_IO:
548 		return B_OK;
549 	case B_GET_READ_STATUS:
550 	case B_GET_WRITE_STATUS:
551 		if (data) {
552 			*(bool *)data = false;
553 			return B_OK;
554 		}
555 		return EINVAL;
556 	case B_GET_GEOMETRY:
557 	case B_GET_BIOS_GEOMETRY:
558 		if (data) {
559 			device_geometry *geom = (device_geometry *)data;
560 			geom->bytes_per_sector = BLKSIZE;
561 			geom->sectors_per_track = 1;
562 			geom->cylinder_count = cookie->dev->size / BLKSIZE;
563 			geom->head_count = 1;
564 			geom->device_type = B_DISK;
565 			geom->removable = false;
566 			geom->read_only = cookie->dev->readonly;
567 			geom->write_once = false;
568 			return B_OK;
569 		}
570 		return EINVAL;
571 	case B_GET_MEDIA_STATUS:
572 		if (data) {
573 			*(status_t *)data = B_OK;
574 			return B_OK;
575 		}
576 		return EINVAL;
577 
578 	case B_EJECT_DEVICE:
579 	case B_LOAD_MEDIA:
580 		return B_BAD_VALUE;
581 	case B_FLUSH_DRIVE_CACHE: /* wait for request list to be empty ? */
582 		return B_OK;
583 	default:
584 		return B_BAD_VALUE;
585 	}
586 	return B_NOT_ALLOWED;
587 }
588 
589 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes) {
590 	struct nbd_device *dev = cookie->dev;
591 	struct nbd_request_entry *req;
592 	status_t err, semerr;
593 	PRINT((DP ">%s(%d, %Ld, , )\n", __FUNCTION__, WHICH(cookie->dev), position));
594 
595 	if (position < 0)
596 		return EINVAL;
597 	if (!data)
598 		return EINVAL;
599 
600 	err = nbd_alloc_request(dev, &req, NBD_CMD_READ, position, *numbytes, NULL);
601 	if (err)
602 		goto err0;
603 
604 	//LOCK
605 	err = benaphore_lock(&dev->ben);
606 	if (err)
607 		goto err1;
608 
609 	err = nbd_post_request(dev, req);
610 
611 	//UNLOCK
612 	benaphore_unlock(&dev->ben);
613 
614 	if (err)
615 		goto err2;
616 
617 
618 	semerr = acquire_sem(req->sem);
619 
620 	//LOCK
621 	err = benaphore_lock(&dev->ben);
622 	if(err)
623 		goto err3;
624 
625 	/* bad scenarii */
626 	if (!req->replied)
627 		req->discard = true;
628 	else if (semerr)
629 		nbd_free_request(dev, req);
630 
631 	//UNLOCK
632 	benaphore_unlock(&dev->ben);
633 
634 	if (semerr == B_OK) {
635 		*numbytes = req->len;
636 		memcpy(data, req->buffer, req->len);
637 		err = B_OK;
638 		if (*numbytes == 0 && req->reply.error)
639 			err = EIO;
640 		nbd_free_request(dev, req);
641 		return err;
642 	}
643 
644 	*numbytes = 0;
645 	return semerr;
646 
647 
648 err3:
649 err2:
650 err1:
651 	nbd_free_request(dev, req);
652 err0:
653 	*numbytes = 0;
654 	return err;
655 }
656 
657 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes) {
658 	struct nbd_device *dev = cookie->dev;
659 	struct nbd_request_entry *req;
660 	status_t err, semerr;
661 	PRINT((DP ">%s(%d, %Ld, %ld, )\n", __FUNCTION__, WHICH(cookie->dev), position, *numbytes));
662 
663 	if (position < 0)
664 		return EINVAL;
665 	if (!data)
666 		return EINVAL;
667 	err = B_NOT_ALLOWED;
668 	if (dev->readonly)
669 		goto err0;
670 
671 	err = nbd_alloc_request(dev, &req, NBD_CMD_WRITE, position, *numbytes, data);
672 	if (err)
673 		goto err0;
674 
675 	//LOCK
676 	err = benaphore_lock(&dev->ben);
677 	if (err)
678 		goto err1;
679 
680 	/* sending request+data must be atomic */
681 	err = nbd_post_request(dev, req);
682 
683 	//UNLOCK
684 	benaphore_unlock(&dev->ben);
685 
686 	if (err)
687 		goto err2;
688 
689 
690 	semerr = acquire_sem(req->sem);
691 
692 	//LOCK
693 	err = benaphore_lock(&dev->ben);
694 	if(err)
695 		goto err3;
696 
697 	/* bad scenarii */
698 	if (!req->replied)
699 		req->discard = true;
700 	else if (semerr)
701 		nbd_free_request(dev, req);
702 
703 	//UNLOCK
704 	benaphore_unlock(&dev->ben);
705 
706 	if (semerr == B_OK) {
707 		*numbytes = req->len;
708 		err = B_OK;
709 		if (*numbytes == 0 && req->reply.error)
710 			err = EIO;
711 		nbd_free_request(dev, req);
712 		return err;
713 	}
714 
715 	*numbytes = 0;
716 	return semerr;
717 
718 
719 err3:
720 err2:
721 err1:
722 	nbd_free_request(dev, req);
723 err0:
724 	*numbytes = 0;
725 	return err;
726 }
727 
728 device_hooks nbd_hooks={
729 	(device_open_hook)nbd_open,
730 	(device_close_hook)nbd_close,
731 	(device_free_hook)nbd_free,
732 	(device_control_hook)nbd_control,
733 	(device_read_hook)nbd_read,
734 	(device_write_hook)nbd_write,
735 	NULL,
736 	NULL,
737 	NULL,
738 	NULL
739 };
740 
741 #pragma mark ==== driver hooks ====
742 
743 int32 api_version = B_CUR_DRIVER_API_VERSION;
744 
745 static char *nbd_name[MAX_NBDS+1] = {
746 	NULL
747 };
748 
749 status_t
750 init_hardware (void)
751 {
752 	PRINT((DP ">%s()\n", __FUNCTION__));
753 	return B_OK;
754 }
755 
756 status_t
757 init_driver (void)
758 {
759 	status_t err;
760 	int i, j;
761 	// XXX: load settings
762 	void *handle;
763 	char **names = nbd_name;
764 	PRINT((DP ">%s()\n", __FUNCTION__));
765 
766 	handle = load_driver_settings(DRV);
767 	if (handle == NULL)
768 		return ENOENT;
769 	// XXX: test for boot args ?
770 
771 
772 	err = ksocket_init();
773 	if (err < B_OK)
774 		return err;
775 
776 	for (i = 0; i < MAX_NBDS; i++) {
777 		nbd_devices[i].valid = false;
778 		nbd_devices[i].readonly = false;
779 		err = benaphore_init(&nbd_devices[i].ben, "nbd lock");
780 		if (err < B_OK)
781 			return err; // XXX
782 		nbd_devices[i].refcnt = 0;
783 		nbd_devices[i].req = 0LL; /* next ID for requests */
784 		nbd_devices[i].sock = -1;
785 		nbd_devices[i].postoffice = -1;
786 		nbd_devices[i].size = 0LL;
787 		nbd_devices[i].reqs = NULL;
788 #ifdef MOUNT_KLUDGE
789 		nbd_devices[i].kludge = -1;
790 #endif
791 		nbd_name[i] = NULL;
792 	}
793 
794 	for (i = 0; i < MAX_NBDS; i++) {
795 		const driver_settings *settings = get_driver_settings(handle);
796 		driver_parameter *p = NULL;
797 		char keyname[10];
798 		sprintf(keyname, "%d", i);
799 		for (j = 0; j < settings->parameter_count; j++)
800 			if (!strcmp(settings->parameters[j].name, keyname))
801 				p = &settings->parameters[j];
802 		if (!p)
803 			continue;
804 		for (j = 0; j < p->parameter_count; j++) {
805 			if (!strcmp(p->parameters[j].name, "readonly"))
806 				nbd_devices[i].readonly = true;
807 			if (!strcmp(p->parameters[j].name, "server")) {
808 				if (p->parameters[j].value_count < 2)
809 					continue;
810 				nbd_devices[i].server.sin_len = sizeof(struct sockaddr_in);
811 				nbd_devices[i].server.sin_family = AF_INET;
812 				kinet_aton(p->parameters[j].values[0], &nbd_devices[i].server.sin_addr);
813 				nbd_devices[i].server.sin_port = htons(atoi(p->parameters[j].values[1]));
814 				dprintf(DP " configured [%d]\n", i);
815 				*(names) = malloc(DEVICE_NAME_MAX);
816 				if (*(names) == NULL)
817 					return ENOMEM;
818 				sprintf(*(names++), DEVICE_FMT, i);
819 				nbd_devices[i].valid = true;
820 			}
821 		}
822 	}
823 	*names = NULL;
824 
825 	unload_driver_settings(handle);
826 	return B_OK;
827 }
828 
829 void
830 uninit_driver (void)
831 {
832 	status_t err;
833 	int i;
834 	PRINT((DP ">%s()\n", __FUNCTION__));
835 	for (i = 0; i < MAX_NBDS; i++) {
836 		free(nbd_name[i]);
837 		benaphore_destroy(&nbd_devices[i].ben);
838 	}
839 	err = ksocket_cleanup();
840 	/* HACK */
841 	if (gDelayUnload)
842 		snooze(BONE_TEARDOWN_DELAY);
843 }
844 
845 const char**
846 publish_devices()
847 {
848 	PRINT((DP ">%s()\n", __FUNCTION__));
849 	return (const char **)nbd_name;
850 }
851 
852 device_hooks*
853 find_device(const char* name)
854 {
855 	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
856 	return &nbd_hooks;
857 }
858 
859 struct nbd_device*
860 nbd_find_device(const char* name)
861 {
862 	int i;
863 	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
864 	for (i = 0; i < MAX_NBDS; i++) {
865 		char buf[DEVICE_NAME_MAX];
866 		sprintf(buf, DEVICE_FMT, i);
867 		if (!strcmp(buf, name))
868 			return &nbd_devices[i];
869 	}
870 	return NULL;
871 }
872