xref: /haiku/src/add-ons/kernel/drivers/disk/virtual/nbd/nbd.c (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2006-2007, François Revol. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 /*
7  * nbd driver for Haiku
8  *
9  * Maps a Network Block Device as virtual partitions.
10  */
11 
12 
13 #include <ByteOrder.h>
14 #include <KernelExport.h>
15 #include <Drivers.h>
16 #include <driver_settings.h>
17 #include <Errors.h>
18 #include <errno.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #include <unistd.h>
23 #include <ksocket.h>
24 #include <netinet/in.h>
25 
26 //#define DEBUG 1
27 
28 /* on the first open(), open ourselves for some seconds,
29  * to avoid trying to reconnect and failing on a 2nd open,
30  * as it happens with the python server.
31  */
32 //#define MOUNT_KLUDGE
33 
34 
35 /* names, ohh names... */
36 #ifndef SHUT_RDWR
37 #define SHUT_RDWR SHUTDOWN_BOTH
38 #endif
39 
40 /* locking support */
41 #ifdef __HAIKU__
42 #include <kernel/lock.h>
43 #else
44 /* wrappers for R5 */
45 #ifndef _IMPEXP_KERNEL
46 #define _IMPEXP_KERNEL
47 #endif
48 #include "lock.h"
49 #define mutex lock
50 #define mutex_init new_lock
51 #define mutex_destroy free_lock
52 #define mutex_lock LOCK
53 #define mutex_unlock UNLOCK
54 #endif
55 
56 #include "nbd.h"
57 
58 #define DRV "nbd"
59 #define DP "nbd:"
60 #define MAX_NBDS 4
61 #define DEVICE_PREFIX "disk/virtual/nbd/"
62 #define DEVICE_FMT DEVICE_PREFIX "%d/raw"
63 #define DEVICE_NAME_MAX 32
64 #define MAX_REQ_SIZE (32*1024*1024)
65 #define BLKSIZE 512
66 
67 /* debugging */
68 #if DEBUG
69 #define PRINT(a) dprintf a
70 #define WHICH(dev) ((int)(dev - nbd_devices))
71 #else
72 #define PRINT(a)
73 #endif
74 
75 struct nbd_request_entry {
76 	struct nbd_request_entry *next;
77 	struct nbd_request req; /* net byte order */
78 	struct nbd_reply reply; /* net byte order */
79 	sem_id sem;
80 	bool replied;
81 	bool discard;
82 	uint64 handle;
83 	uint32 type;
84 	uint64 from;
85 	size_t len;
86 	void *buffer; /* write: ptr to passed buffer; read: ptr to malloc()ed extra */
87 };
88 
89 struct nbd_device {
90 	bool valid;
91 	bool readonly;
92 	struct sockaddr_in server;
93 	mutex ben;
94 	vint32 refcnt;
95 	uint64 req; /* next ID for requests */
96 	int sock;
97 	thread_id postoffice;
98 	uint64 size;
99 	struct nbd_request_entry *reqs;
100 #ifdef MOUNT_KLUDGE
101 	int kludge;
102 #endif
103 };
104 
105 typedef struct cookie {
106 	struct nbd_device *dev;
107 
108 } cookie_t;
109 
110 /* data=NULL on read */
111 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data);
112 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req);
113 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req);
114 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req);
115 
116 struct nbd_device *nbd_find_device(const char* name);
117 
118 int32 nbd_postoffice(void *arg);
119 status_t nbd_connect(struct nbd_device *dev);
120 status_t nbd_teardown(struct nbd_device *dev);
121 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req);
122 
123 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie);
124 status_t nbd_close(cookie_t *cookie);
125 status_t nbd_free(cookie_t *cookie);
126 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len);
127 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes);
128 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes);
129 
130 KSOCKET_MODULE_DECL;
131 
132 /* HACK:
133  * In BONE at least, if connect() fails (EINTR or ETIMEDOUT)
134  * keeps locked pages around (likely a bone_data,
135  * until TCP gets the last ACK). If that happens, we snooze()
136  * in unload_driver() to let TCP timeout before the kernel
137  * tries to delete the image. */
138 bool gDelayUnload = false;
139 #define BONE_TEARDOWN_DELAY 60000000
140 
141 #if 0
142 #pragma mark ==== support ====
143 #endif
144 
145 // move that to ksocket inlined
146 static int kinet_aton(const char *in, struct in_addr *addr)
147 {
148 	int i;
149 	unsigned long a;
150 	uint32 inaddr = 0L;
151 	char *p = (char *)in;
152 	for (i = 0; i < 4; i++) {
153 		a = strtoul(p, &p, 10);
154 		if (!p)
155 			return -1;
156 		inaddr = (inaddr >> 8) | ((a & 0x0ff) << 24);
157 		*(uint32 *)addr = inaddr;
158 		if (!*p)
159 			return 0;
160 		p++;
161 	}
162 	return 0;
163 }
164 
165 #if 0
166 #pragma mark ==== request manager ====
167 #endif
168 
169 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data)
170 {
171 	bool w = (type == NBD_CMD_WRITE);
172 	struct nbd_request_entry *r;
173 	status_t err = EINVAL;
174 	uint64 handle;
175 	PRINT((DP ">%s(%" B_PRIu32 ", %" B_PRIdOFF ", %ld)\n", __FUNCTION__, type,
176 		from, len));
177 
178 	if (type != NBD_CMD_READ && type != NBD_CMD_WRITE && type != NBD_CMD_DISC)
179 		return err;
180 	if (!dev || !req || from < 0)
181 		return err;
182 
183 	//LOCK
184 	err = mutex_lock(&dev->ben);
185 	if (err)
186 		return err;
187 
188 	// atomic
189 	handle = dev->req++;
190 
191 
192 	//UNLOCK
193 	mutex_unlock(&dev->ben);
194 
195 	err = ENOMEM;
196 	r = malloc(sizeof(struct nbd_request_entry) + (w ? 0 : len));
197 	if (r == NULL)
198 		goto err0;
199 	r->next = NULL;
200 	err = r->sem = create_sem(0, "nbd request sem");
201 	if (err < 0)
202 		goto err1;
203 
204 	r->replied = false;
205 	r->discard = false;
206 	r->handle = handle;
207 	r->type = type;
208 	r->from = from;
209 	r->len = len;
210 
211 	r->req.magic = B_HOST_TO_BENDIAN_INT32(NBD_REQUEST_MAGIC);
212 	r->req.type = B_HOST_TO_BENDIAN_INT32(type);
213 	r->req.handle = B_HOST_TO_BENDIAN_INT64(r->handle);
214 	r->req.from = B_HOST_TO_BENDIAN_INT64(r->from);
215 	r->req.len = B_HOST_TO_BENDIAN_INT32(len);
216 
217 	r->buffer = (void *)(w ? data : (((char *)r) + sizeof(struct nbd_request_entry)));
218 
219 	*req = r;
220 	return B_OK;
221 
222 err1:
223 	free(r);
224 err0:
225 	dprintf(DP " %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err);
226 	return err;
227 }
228 
229 
230 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req)
231 {
232 	PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle));
233 	req->next = dev->reqs;
234 	dev->reqs = req;
235 	return B_OK;
236 }
237 
238 
239 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req)
240 {
241 	struct nbd_request_entry *r, *prev;
242 	PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, handle));
243 	r = dev->reqs;
244 	prev = NULL;
245 	while (r && r->handle != handle) {
246 		prev = r;
247 		r = r->next;
248 	}
249 	if (!r)
250 		return ENOENT;
251 
252 	if (prev)
253 		prev->next = r->next;
254 	else
255 		dev->reqs = r->next;
256 
257 	*req = r;
258 	return B_OK;
259 }
260 
261 
262 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req)
263 {
264 	PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle));
265 	delete_sem(req->sem);
266 	free(req);
267 	return B_OK;
268 }
269 
270 
271 #if 0
272 #pragma mark ==== nbd handler ====
273 #endif
274 
275 int32 nbd_postoffice(void *arg)
276 {
277 	struct nbd_device *dev = (struct nbd_device *)arg;
278 	struct nbd_request_entry *req = NULL;
279 	struct nbd_reply reply;
280 	status_t err;
281 	const char *reason;
282 	PRINT((DP ">%s()\n", __FUNCTION__));
283 
284 	for (;;) {
285 		reason = "recv";
286 		err = krecv(dev->sock, &reply, sizeof(reply), 0);
287 		if (err == -1 && errno < 0)
288 			err = errno;
289 		if (err < 0)
290 			goto err;
291 		reason = "recv:size";
292 		if (err < sizeof(reply))
293 			err = EINVAL;
294 		if (err < 0)
295 			goto err;
296 		reason = "magic";
297 		err = EINVAL;
298 		if (B_BENDIAN_TO_HOST_INT32(reply.magic) != NBD_REPLY_MAGIC)
299 			goto err;
300 
301 		reason = "lock";
302 		//LOCK
303 		err = mutex_lock(&dev->ben);
304 		if (err)
305 			goto err;
306 
307 		reason = "dequeue_request";
308 		err = nbd_dequeue_request(dev, B_BENDIAN_TO_HOST_INT64(reply.handle), &req);
309 
310 		//UNLOCK
311 		mutex_unlock(&dev->ben);
312 
313 		if (!err && !req) {
314 			dprintf(DP "nbd_dequeue_rquest found NULL!\n");
315 			err = ENOENT;
316 		}
317 
318 		if (err == B_OK) {
319 			memcpy(&req->reply, &reply, sizeof(reply));
320 			if (req->type == NBD_CMD_READ) {
321 				err = 0;
322 				reason = "recv(data)";
323 				if (reply.error == 0)
324 					err = krecv(dev->sock, req->buffer, req->len, 0);
325 				if (err < 0)
326 					goto err;
327 				/* tell back how much we've got (?) */
328 				req->len = err;
329 			} else {
330 				if (reply.error)
331 					req->len = 0;
332 			}
333 
334 			reason = "lock";
335 			//LOCK
336 			err = mutex_lock(&dev->ben);
337 			if (err)
338 				goto err;
339 
340 			// this also must be atomic!
341 			release_sem(req->sem);
342 			req->replied = true;
343 			if (req->discard)
344 				nbd_free_request(dev, req);
345 
346 			//UNLOCK
347 			mutex_unlock(&dev->ben);
348 		}
349 
350 	}
351 
352 	PRINT((DP "<%s\n", __FUNCTION__));
353 	return 0;
354 
355 err:
356 	dprintf(DP "%s: %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, reason, err);
357 	return err;
358 }
359 
360 
361 status_t nbd_connect(struct nbd_device *dev)
362 {
363 	struct nbd_init_packet initpkt;
364 	status_t err;
365 	PRINT((DP ">%s()\n", __FUNCTION__));
366 
367 	PRINT((DP " %s: socket()\n", __FUNCTION__));
368 	err = dev->sock = ksocket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
369 	if (err == -1 && errno < 0)
370 		err = errno;
371 	if (err < 0)
372 		goto err0;
373 
374 	PRINT((DP " %s: connect()\n", __FUNCTION__));
375 	err = kconnect(dev->sock, (struct sockaddr *)&dev->server, sizeof(dev->server));
376 	//err = ENOSYS;
377 	if (err == -1 && errno < 0)
378 		err = errno;
379 	/* HACK: avoid the kernel unloading us with locked pages from TCP */
380 	if (err)
381 		gDelayUnload = true;
382 	if (err)
383 		goto err1;
384 
385 	PRINT((DP " %s: recv(initpkt)\n", __FUNCTION__));
386 	err = krecv(dev->sock, &initpkt, sizeof(initpkt), 0);
387 	if (err == -1 && errno < 0)
388 		err = errno;
389 	if (err < sizeof(initpkt))
390 		goto err2;
391 	err = EINVAL;//EPROTO;
392 	if (memcmp(initpkt.passwd, NBD_INIT_PASSWD, sizeof(initpkt.passwd)))
393 		goto err3;
394 	if (B_BENDIAN_TO_HOST_INT64(initpkt.magic) != NBD_INIT_MAGIC)
395 		goto err3;
396 
397 	dev->size = B_BENDIAN_TO_HOST_INT64(initpkt.device_size);
398 
399 	dprintf(DP " %s: connected, device size %" B_PRIu64 " bytes.\n",
400 		__FUNCTION__, dev->size);
401 
402 	err = dev->postoffice = spawn_kernel_thread(nbd_postoffice, "nbd postoffice", B_REAL_TIME_PRIORITY, dev);
403 	if (err < B_OK)
404 		goto err4;
405 	resume_thread(dev->postoffice);
406 
407 	PRINT((DP "<%s\n", __FUNCTION__));
408 	return B_OK;
409 
410 err4:
411 	dev->postoffice = -1;
412 err3:
413 err2:
414 err1:
415 	kclosesocket(dev->sock);
416 	dev->sock = -1;
417 err0:
418 	dprintf(DP "<%s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err);
419 	return err;
420 }
421 
422 
423 status_t nbd_teardown(struct nbd_device *dev)
424 {
425 	status_t ret;
426 	PRINT((DP ">%s()\n", __FUNCTION__));
427 	kshutdown(dev->sock, SHUT_RDWR);
428 	kclosesocket(dev->sock);
429 	dev->sock = -1;
430 	wait_for_thread(dev->postoffice, &ret);
431 	return B_OK;
432 }
433 
434 
435 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req)
436 {
437 	status_t err;
438 	PRINT((DP ">%s(handle:%" B_PRIu64 ")\n", __FUNCTION__, req->handle));
439 
440 	err = ksend(dev->sock, &req->req, sizeof(req->req), 0);
441 	if (err < 0)
442 		return err;
443 
444 	if (req->type == NBD_CMD_WRITE)
445 		err = ksend(dev->sock, req->buffer, req->len, 0);
446 	if (err < 0)
447 		return err;
448 	else
449 		req->len = err;
450 
451 	err = nbd_queue_request(dev, req);
452 	return err;
453 }
454 
455 
456 #if 0
457 #pragma mark ==== device hooks ====
458 #endif
459 
460 static struct nbd_device nbd_devices[MAX_NBDS];
461 
462 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie) {
463 	status_t err;
464 #ifdef MOUNT_KLUDGE
465 	int32 refcnt;
466 	int kfd;
467 #endif
468 	struct nbd_device *dev = NULL;
469 	PRINT((DP ">%s(%s, %" B_PRIx32 ", )\n", __FUNCTION__, name, flags));
470 	(void)name; (void)flags;
471 	dev = nbd_find_device(name);
472 	if (!dev || !dev->valid)
473 		return ENOENT;
474 	err = ENOMEM;
475 	*cookie = (void*)malloc(sizeof(cookie_t));
476 	if (*cookie == NULL)
477 		goto err0;
478 	memset(*cookie, 0, sizeof(cookie_t));
479 	(*cookie)->dev = dev;
480 	err = mutex_lock(&dev->ben);
481 	if (err)
482 		goto err1;
483 	/*  */
484 	if (dev->sock < 0)
485 		err = nbd_connect(dev);
486 	if (err)
487 		goto err2;
488 #ifdef MOUNT_KLUDGE
489 	refcnt = dev->refcnt++;
490 	kfd = dev->kludge;
491 	dev->kludge = -1;
492 #endif
493 	mutex_unlock(&dev->ben);
494 
495 #ifdef MOUNT_KLUDGE
496 	if (refcnt == 0) {
497 		char buf[32];
498 		sprintf(buf, "/dev/%s", name);
499 		dev->kludge = open(buf, O_RDONLY);
500 	} else if (kfd) {
501 		close(kfd);
502 	}
503 #endif
504 
505 	return B_OK;
506 
507 err2:
508 	mutex_unlock(&dev->ben);
509 err1:
510 	free(*cookie);
511 err0:
512 	dprintf(DP " %s: error 0x%08" B_PRIx32 "\n", __FUNCTION__, err);
513 	return err;
514 }
515 
516 
517 status_t nbd_close(cookie_t *cookie) {
518 	struct nbd_device *dev = cookie->dev;
519 	status_t err;
520 #ifdef MOUNT_KLUDGE
521 	int kfd = -1;
522 #endif
523 	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
524 
525 	err = mutex_lock(&dev->ben);
526 	if (err)
527 		return err;
528 
529 	// XXX: do something ?
530 #ifdef MOUNT_KLUDGE
531 	kfd = dev->kludge;
532 	dev->kludge = -1;
533 #endif
534 
535 	mutex_unlock(&dev->ben);
536 
537 #ifdef MOUNT_KLUDGE
538 	if (kfd > -1) {
539 		close(kfd);
540 	}
541 #endif
542 	return B_OK;
543 }
544 
545 
546 status_t nbd_free(cookie_t *cookie) {
547 	struct nbd_device *dev = cookie->dev;
548 	status_t err;
549 	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
550 
551 	err = mutex_lock(&dev->ben);
552 	if (err)
553 		return err;
554 
555 	if (--dev->refcnt == 0) {
556 		err = nbd_teardown(dev);
557 	}
558 
559 	mutex_unlock(&dev->ben);
560 
561 	free(cookie);
562 	return err;
563 }
564 
565 
566 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len) {
567 	PRINT((DP ">%s(%d, %" B_PRIu32 ", , %ld)\n", __FUNCTION__,
568 		WHICH(cookie->dev), op, len));
569 	switch (op) {
570 	case B_GET_DEVICE_SIZE: /* this one is broken anyway... */
571 		if (data) {
572 			*(size_t *)data = (size_t)cookie->dev->size;
573 			return B_OK;
574 		}
575 		return EINVAL;
576 	case B_SET_DEVICE_SIZE: /* broken */
577 		return EINVAL;
578 	case B_SET_NONBLOCKING_IO:
579 		return EINVAL;
580 	case B_SET_BLOCKING_IO:
581 		return B_OK;
582 	case B_GET_READ_STATUS:
583 	case B_GET_WRITE_STATUS:
584 		if (data) {
585 			*(bool *)data = false;
586 			return B_OK;
587 		}
588 		return EINVAL;
589 	case B_GET_GEOMETRY:
590 	case B_GET_BIOS_GEOMETRY:
591 		if (data) {
592 			device_geometry *geom = (device_geometry *)data;
593 			geom->bytes_per_sector = BLKSIZE;
594 			geom->sectors_per_track = 1;
595 			geom->cylinder_count = cookie->dev->size / BLKSIZE;
596 			geom->head_count = 1;
597 			geom->device_type = B_DISK;
598 			geom->removable = false;
599 			geom->read_only = cookie->dev->readonly;
600 			geom->write_once = false;
601 			return B_OK;
602 		}
603 		return EINVAL;
604 	case B_GET_MEDIA_STATUS:
605 		if (data) {
606 			*(status_t *)data = B_OK;
607 			return B_OK;
608 		}
609 		return EINVAL;
610 
611 	case B_EJECT_DEVICE:
612 	case B_LOAD_MEDIA:
613 		return B_BAD_VALUE;
614 	case B_FLUSH_DRIVE_CACHE: /* wait for request list to be empty ? */
615 		return B_OK;
616 	default:
617 		return B_BAD_VALUE;
618 	}
619 	return B_NOT_ALLOWED;
620 }
621 
622 
623 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes) {
624 	struct nbd_device *dev = cookie->dev;
625 	struct nbd_request_entry *req;
626 	status_t err, semerr;
627 	PRINT((DP ">%s(%d, %" B_PRIdOFF ", , )\n", __FUNCTION__,
628 		WHICH(cookie->dev), position));
629 
630 	if (position < 0)
631 		return EINVAL;
632 	if (!data)
633 		return EINVAL;
634 
635 	err = nbd_alloc_request(dev, &req, NBD_CMD_READ, position, *numbytes, NULL);
636 	if (err)
637 		goto err0;
638 
639 	//LOCK
640 	err = mutex_lock(&dev->ben);
641 	if (err)
642 		goto err1;
643 
644 	err = nbd_post_request(dev, req);
645 
646 	//UNLOCK
647 	mutex_unlock(&dev->ben);
648 
649 	if (err)
650 		goto err2;
651 
652 
653 	semerr = acquire_sem(req->sem);
654 
655 	//LOCK
656 	err = mutex_lock(&dev->ben);
657 	if(err)
658 		goto err3;
659 
660 	/* bad scenarii */
661 	if (!req->replied)
662 		req->discard = true;
663 	else if (semerr)
664 		nbd_free_request(dev, req);
665 
666 	//UNLOCK
667 	mutex_unlock(&dev->ben);
668 
669 	if (semerr == B_OK) {
670 		*numbytes = req->len;
671 		memcpy(data, req->buffer, req->len);
672 		err = B_OK;
673 		if (*numbytes == 0 && req->reply.error)
674 			err = EIO;
675 		nbd_free_request(dev, req);
676 		return err;
677 	}
678 
679 	*numbytes = 0;
680 	return semerr;
681 
682 
683 err3:
684 err2:
685 err1:
686 	nbd_free_request(dev, req);
687 err0:
688 	*numbytes = 0;
689 	return err;
690 }
691 
692 
693 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes) {
694 	struct nbd_device *dev = cookie->dev;
695 	struct nbd_request_entry *req;
696 	status_t err, semerr;
697 	PRINT((DP ">%s(%d, %" B_PRIdOFF ", %ld, )\n", __FUNCTION__,
698 		WHICH(cookie->dev), position, *numbytes));
699 
700 	if (position < 0)
701 		return EINVAL;
702 	if (!data)
703 		return EINVAL;
704 	err = B_NOT_ALLOWED;
705 	if (dev->readonly)
706 		goto err0;
707 
708 	err = nbd_alloc_request(dev, &req, NBD_CMD_WRITE, position, *numbytes, data);
709 	if (err)
710 		goto err0;
711 
712 	//LOCK
713 	err = mutex_lock(&dev->ben);
714 	if (err)
715 		goto err1;
716 
717 	/* sending request+data must be atomic */
718 	err = nbd_post_request(dev, req);
719 
720 	//UNLOCK
721 	mutex_unlock(&dev->ben);
722 
723 	if (err)
724 		goto err2;
725 
726 
727 	semerr = acquire_sem(req->sem);
728 
729 	//LOCK
730 	err = mutex_lock(&dev->ben);
731 	if(err)
732 		goto err3;
733 
734 	/* bad scenarii */
735 	if (!req->replied)
736 		req->discard = true;
737 	else if (semerr)
738 		nbd_free_request(dev, req);
739 
740 	//UNLOCK
741 	mutex_unlock(&dev->ben);
742 
743 	if (semerr == B_OK) {
744 		*numbytes = req->len;
745 		err = B_OK;
746 		if (*numbytes == 0 && req->reply.error)
747 			err = EIO;
748 		nbd_free_request(dev, req);
749 		return err;
750 	}
751 
752 	*numbytes = 0;
753 	return semerr;
754 
755 
756 err3:
757 err2:
758 err1:
759 	nbd_free_request(dev, req);
760 err0:
761 	*numbytes = 0;
762 	return err;
763 }
764 
765 
766 device_hooks nbd_hooks={
767 	(device_open_hook)nbd_open,
768 	(device_close_hook)nbd_close,
769 	(device_free_hook)nbd_free,
770 	(device_control_hook)nbd_control,
771 	(device_read_hook)nbd_read,
772 	(device_write_hook)nbd_write,
773 	NULL,
774 	NULL,
775 	NULL,
776 	NULL
777 };
778 
779 #if 0
780 #pragma mark ==== driver hooks ====
781 #endif
782 
783 int32 api_version = B_CUR_DRIVER_API_VERSION;
784 
785 static char *nbd_name[MAX_NBDS+1] = {
786 	NULL
787 };
788 
789 
790 status_t
791 init_hardware (void)
792 {
793 	PRINT((DP ">%s()\n", __FUNCTION__));
794 	return B_OK;
795 }
796 
797 
798 status_t
799 init_driver (void)
800 {
801 	status_t err;
802 	int i, j;
803 	// XXX: load settings
804 	void *handle;
805 	char **names = nbd_name;
806 	PRINT((DP ">%s()\n", __FUNCTION__));
807 
808 	handle = load_driver_settings(DRV);
809 	if (handle == NULL)
810 		return ENOENT;
811 	// XXX: test for boot args ?
812 
813 
814 	err = ksocket_init();
815 	if (err < B_OK)
816 		return err;
817 
818 	for (i = 0; i < MAX_NBDS; i++) {
819 		nbd_devices[i].valid = false;
820 		nbd_devices[i].readonly = false;
821 		mutex_init(&nbd_devices[i].ben, "nbd lock");
822 		nbd_devices[i].refcnt = 0;
823 		nbd_devices[i].req = 0LL; /* next ID for requests */
824 		nbd_devices[i].sock = -1;
825 		nbd_devices[i].postoffice = -1;
826 		nbd_devices[i].size = 0LL;
827 		nbd_devices[i].reqs = NULL;
828 #ifdef MOUNT_KLUDGE
829 		nbd_devices[i].kludge = -1;
830 #endif
831 		nbd_name[i] = NULL;
832 	}
833 
834 	for (i = 0; i < MAX_NBDS; i++) {
835 		const driver_settings *settings = get_driver_settings(handle);
836 		driver_parameter *p = NULL;
837 		char keyname[10];
838 		sprintf(keyname, "%d", i);
839 		for (j = 0; j < settings->parameter_count; j++)
840 			if (!strcmp(settings->parameters[j].name, keyname))
841 				p = &settings->parameters[j];
842 		if (!p)
843 			continue;
844 		for (j = 0; j < p->parameter_count; j++) {
845 			if (!strcmp(p->parameters[j].name, "readonly"))
846 				nbd_devices[i].readonly = true;
847 			if (!strcmp(p->parameters[j].name, "server")) {
848 				if (p->parameters[j].value_count < 2)
849 					continue;
850 				nbd_devices[i].server.sin_len = sizeof(struct sockaddr_in);
851 				nbd_devices[i].server.sin_family = AF_INET;
852 				kinet_aton(p->parameters[j].values[0], &nbd_devices[i].server.sin_addr);
853 				nbd_devices[i].server.sin_port = htons(atoi(p->parameters[j].values[1]));
854 				dprintf(DP " configured [%d]\n", i);
855 				*(names) = malloc(DEVICE_NAME_MAX);
856 				if (*(names) == NULL)
857 					return ENOMEM;
858 				sprintf(*(names++), DEVICE_FMT, i);
859 				nbd_devices[i].valid = true;
860 			}
861 		}
862 	}
863 	*names = NULL;
864 
865 	unload_driver_settings(handle);
866 	return B_OK;
867 }
868 
869 
870 void
871 uninit_driver (void)
872 {
873 	int i;
874 	PRINT((DP ">%s()\n", __FUNCTION__));
875 	for (i = 0; i < MAX_NBDS; i++) {
876 		free(nbd_name[i]);
877 		mutex_destroy(&nbd_devices[i].ben);
878 	}
879 	ksocket_cleanup();
880 	/* HACK */
881 	if (gDelayUnload)
882 		snooze(BONE_TEARDOWN_DELAY);
883 }
884 
885 
886 const char**
887 publish_devices()
888 {
889 	PRINT((DP ">%s()\n", __FUNCTION__));
890 	return (const char **)nbd_name;
891 }
892 
893 
894 device_hooks*
895 find_device(const char* name)
896 {
897 	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
898 	return &nbd_hooks;
899 }
900 
901 
902 struct nbd_device*
903 nbd_find_device(const char* name)
904 {
905 	int i;
906 	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
907 	for (i = 0; i < MAX_NBDS; i++) {
908 		char buf[DEVICE_NAME_MAX];
909 		sprintf(buf, DEVICE_FMT, i);
910 		if (!strcmp(buf, name))
911 			return &nbd_devices[i];
912 	}
913 	return NULL;
914 }
915 
916