xref: /haiku/src/add-ons/kernel/drivers/disk/virtual/nbd/nbd.c (revision 0f609eb2902eb8d945af61802e6f3d3de68cda5b)
1 /*
2  * Copyright 2006-2007, François Revol. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 /*
7  * nbd driver for Haiku
8  *
9  * Maps a Network Block Device as virtual partitions.
10  */
11 
12 #include <ByteOrder.h>
13 #include <KernelExport.h>
14 #include <Drivers.h>
15 #include <driver_settings.h>
16 #include <Errors.h>
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <unistd.h>
21 #include <ksocket.h>
22 #include <netinet/in.h>
23 
24 //#define DEBUG 1
25 
26 /* on the first open(), open ourselves for some seconds,
27  * to avoid trying to reconnect and failing on a 2nd open,
28  * as it happens with the python server.
29  */
30 //#define MOUNT_KLUDGE
31 
32 
33 /* names, ohh names... */
34 #ifndef SHUT_RDWR
35 #define SHUT_RDWR SHUTDOWN_BOTH
36 #endif
37 
38 /* locking support */
39 #ifdef __HAIKU__
40 #include <kernel/lock.h>
41 #else
42 /* wrappers for R5 */
43 #ifndef _IMPEXP_KERNEL
44 #define _IMPEXP_KERNEL
45 #endif
46 #include "lock.h"
47 #define benaphore lock
48 #define benaphore_init new_lock
49 #define benaphore_destroy free_lock
50 #define benaphore_lock LOCK
51 #define benaphore_unlock UNLOCK
52 #endif
53 
54 #define DEBUG 1
55 
56 #include "nbd.h"
57 
58 #define DRV "nbd"
59 #define DP "nbd:"
60 #define MAX_NBDS 4
61 #define DEVICE_PREFIX "disk/virtual/nbd/"
62 #define DEVICE_FMT DEVICE_PREFIX "%d/raw"
63 #define DEVICE_NAME_MAX 32
64 #define MAX_REQ_SIZE (32*1024*1024)
65 #define BLKSIZE 512
66 
67 /* debugging */
68 #if DEBUG
69 #define PRINT(a) dprintf a
70 #define WHICH(dev) ((int)(dev - nbd_devices))
71 #else
72 #define PRINT(a)
73 #endif
74 
75 struct nbd_request_entry {
76 	struct nbd_request_entry *next;
77 	struct nbd_request req; /* net byte order */
78 	struct nbd_reply reply; /* net byte order */
79 	sem_id sem;
80 	bool replied;
81 	bool discard;
82 	uint64 handle;
83 	uint32 type;
84 	uint64 from;
85 	size_t len;
86 	void *buffer; /* write: ptr to passed buffer; read: ptr to malloc()ed extra */
87 };
88 
89 struct nbd_device {
90 	bool valid;
91 	bool readonly;
92 	struct sockaddr_in server;
93 	benaphore ben;
94 	vint32 refcnt;
95 	uint64 req; /* next ID for requests */
96 	int sock;
97 	thread_id postoffice;
98 	uint64 size;
99 	struct nbd_request_entry *reqs;
100 #ifdef MOUNT_KLUDGE
101 	int kludge;
102 #endif
103 };
104 
105 typedef struct cookie {
106 	struct nbd_device *dev;
107 
108 } cookie_t;
109 
110 /* data=NULL on read */
111 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data);
112 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req);
113 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req);
114 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req);
115 
116 struct nbd_device *nbd_find_device(const char* name);
117 
118 int32 nbd_postoffice(void *arg);
119 status_t nbd_connect(struct nbd_device *dev);
120 status_t nbd_teardown(struct nbd_device *dev);
121 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req);
122 
123 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie);
124 status_t nbd_close(cookie_t *cookie);
125 status_t nbd_free(cookie_t *cookie);
126 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len);
127 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes);
128 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes);
129 
130 KSOCKET_MODULE_DECL;
131 
132 /* HACK:
133  * In BONE at least, if connect() fails (EINTR or ETIMEDOUT)
134  * keeps locked pages around (likely a bone_data,
135  * until TCP gets the last ACK). If that happens, we snooze()
136  * in unload_driver() to let TCP timeout before the kernel
137  * tries to delete the image. */
138 bool gDelayUnload = false;
139 #define BONE_TEARDOWN_DELAY 60000000
140 
141 #if 0
142 #pragma mark ==== support ====
143 #endif
144 
145 // move that to ksocket inlined
146 static int kinet_aton(const char *in, struct in_addr *addr)
147 {
148 	int i;
149 	unsigned long a;
150 	uint32 inaddr = 0L;
151 	char *p = (char *)in;
152 	for (i = 0; i < 4; i++) {
153 		a = strtoul(p, &p, 10);
154 		if (!p)
155 			return -1;
156 		inaddr = (inaddr >> 8) | ((a & 0x0ff) << 24);
157 		*(uint32 *)addr = inaddr;
158 		if (!*p)
159 			return 0;
160 		p++;
161 	}
162 	return 0;
163 }
164 
165 #if 0
166 #pragma mark ==== request manager ====
167 #endif
168 
169 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data)
170 {
171 	bool w = (type == NBD_CMD_WRITE);
172 	struct nbd_request_entry *r;
173 	status_t err = EINVAL;
174 	uint64 handle;
175 	PRINT((DP ">%s(%ld, %Ld, %ld)\n", __FUNCTION__, type, from, len));
176 
177 	if (type != NBD_CMD_READ && type != NBD_CMD_WRITE && type != NBD_CMD_DISC)
178 		return err;
179 	if (!dev || !req || from < 0)
180 		return err;
181 
182 	//LOCK
183 	err = benaphore_lock(&dev->ben);
184 	if (err)
185 		return err;
186 
187 	// atomic
188 	handle = dev->req++;
189 
190 
191 	//UNLOCK
192 	benaphore_unlock(&dev->ben);
193 
194 	err = ENOMEM;
195 	r = malloc(sizeof(struct nbd_request_entry) + (w ? 0 : len));
196 	if (r == NULL)
197 		goto err0;
198 	r->next = NULL;
199 	err = r->sem = create_sem(0, "nbd request sem");
200 	if (err < 0)
201 		goto err1;
202 
203 	r->replied = false;
204 	r->discard = false;
205 	r->handle = handle;
206 	r->type = type;
207 	r->from = from;
208 	r->len = len;
209 
210 	r->req.magic = B_HOST_TO_BENDIAN_INT32(NBD_REQUEST_MAGIC);
211 	r->req.type = B_HOST_TO_BENDIAN_INT32(type);
212 	r->req.handle = B_HOST_TO_BENDIAN_INT64(r->handle);
213 	r->req.from = B_HOST_TO_BENDIAN_INT64(r->from);
214 	r->req.len = B_HOST_TO_BENDIAN_INT32(len);
215 
216 	r->buffer = (void *)(w ? data : (((char *)r) + sizeof(struct nbd_request_entry)));
217 
218 	*req = r;
219 	return B_OK;
220 
221 err1:
222 	free(r);
223 err0:
224 	dprintf(DP " %s: error 0x%08lx\n", __FUNCTION__, err);
225 	return err;
226 }
227 
228 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req)
229 {
230 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
231 	req->next = dev->reqs;
232 	dev->reqs = req;
233 	return B_OK;
234 }
235 
236 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req)
237 {
238 	struct nbd_request_entry *r, *prev;
239 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, handle));
240 	r = dev->reqs;
241 	prev = NULL;
242 	while (r && r->handle != handle) {
243 		prev = r;
244 		r = r->next;
245 	}
246 	if (!r)
247 		return ENOENT;
248 
249 	if (prev)
250 		prev->next = r->next;
251 	else
252 		dev->reqs = r->next;
253 
254 	*req = r;
255 	return B_OK;
256 }
257 
258 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req)
259 {
260 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
261 	delete_sem(req->sem);
262 	free(req);
263 	return B_OK;
264 }
265 
266 
267 
268 #if 0
269 #pragma mark ==== nbd handler ====
270 #endif
271 
272 int32 nbd_postoffice(void *arg)
273 {
274 	struct nbd_device *dev = (struct nbd_device *)arg;
275 	struct nbd_request_entry *req = NULL;
276 	struct nbd_reply reply;
277 	status_t err;
278 	const char *reason;
279 	PRINT((DP ">%s()\n", __FUNCTION__));
280 
281 	for (;;) {
282 		reason = "recv";
283 		err = krecv(dev->sock, &reply, sizeof(reply), 0);
284 		if (err == -1 && errno < 0)
285 			err = errno;
286 		if (err < 0)
287 			goto err;
288 		reason = "recv:size";
289 		if (err < sizeof(reply))
290 			err = EINVAL;
291 		if (err < 0)
292 			goto err;
293 		reason = "magic";
294 		err = EINVAL;
295 		if (B_BENDIAN_TO_HOST_INT32(reply.magic) != NBD_REPLY_MAGIC)
296 			goto err;
297 
298 		reason = "lock";
299 		//LOCK
300 		err = benaphore_lock(&dev->ben);
301 		if (err)
302 			goto err;
303 
304 		reason = "dequeue_request";
305 		err = nbd_dequeue_request(dev, B_BENDIAN_TO_HOST_INT64(reply.handle), &req);
306 
307 		//UNLOCK
308 		benaphore_unlock(&dev->ben);
309 
310 		if (!err && !req) {
311 			dprintf(DP "nbd_dequeue_rquest found NULL!\n");
312 			err = ENOENT;
313 		}
314 
315 		if (err == B_OK) {
316 			memcpy(&req->reply, &reply, sizeof(reply));
317 			if (req->type == NBD_CMD_READ) {
318 				err = 0;
319 				reason = "recv(data)";
320 				if (reply.error == 0)
321 					err = krecv(dev->sock, req->buffer, req->len, 0);
322 				if (err < 0)
323 					goto err;
324 				/* tell back how much we've got (?) */
325 				req->len = err;
326 			} else {
327 				if (reply.error)
328 					req->len = 0;
329 			}
330 
331 			reason = "lock";
332 			//LOCK
333 			err = benaphore_lock(&dev->ben);
334 			if (err)
335 				goto err;
336 
337 			// this also must be atomic!
338 			release_sem(req->sem);
339 			req->replied = true;
340 			if (req->discard)
341 				nbd_free_request(dev, req);
342 
343 			//UNLOCK
344 			benaphore_unlock(&dev->ben);
345 		}
346 
347 	}
348 
349 	PRINT((DP "<%s\n", __FUNCTION__));
350 	return 0;
351 
352 err:
353 	dprintf(DP "%s: %s: error 0x%08lx\n", __FUNCTION__, reason, err);
354 	return err;
355 }
356 
357 status_t nbd_connect(struct nbd_device *dev)
358 {
359 	struct nbd_init_packet initpkt;
360 	status_t err;
361 	PRINT((DP ">%s()\n", __FUNCTION__));
362 
363 	PRINT((DP " %s: socket()\n", __FUNCTION__));
364 	err = dev->sock = ksocket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
365 	if (err == -1 && errno < 0)
366 		err = errno;
367 	if (err < 0)
368 		goto err0;
369 
370 	PRINT((DP " %s: connect()\n", __FUNCTION__));
371 	err = kconnect(dev->sock, (struct sockaddr *)&dev->server, sizeof(dev->server));
372 	//err = ENOSYS;
373 	if (err == -1 && errno < 0)
374 		err = errno;
375 	/* HACK: avoid the kernel unloading us with locked pages from TCP */
376 	if (err)
377 		gDelayUnload = true;
378 	if (err)
379 		goto err1;
380 
381 	PRINT((DP " %s: recv(initpkt)\n", __FUNCTION__));
382 	err = krecv(dev->sock, &initpkt, sizeof(initpkt), 0);
383 	if (err == -1 && errno < 0)
384 		err = errno;
385 	if (err < sizeof(initpkt))
386 		goto err2;
387 	err = EINVAL;//EPROTO;
388 	if (memcmp(initpkt.passwd, NBD_INIT_PASSWD, sizeof(initpkt.passwd)))
389 		goto err3;
390 	if (B_BENDIAN_TO_HOST_INT64(initpkt.magic) != NBD_INIT_MAGIC)
391 		goto err3;
392 
393 	dev->size = B_BENDIAN_TO_HOST_INT64(initpkt.device_size);
394 
395 	dprintf(DP " %s: connected, device size %Ld bytes.\n", __FUNCTION__, dev->size);
396 
397 	err = dev->postoffice = spawn_kernel_thread(nbd_postoffice, "nbd postoffice", B_REAL_TIME_PRIORITY, dev);
398 	if (err < B_OK)
399 		goto err4;
400 	resume_thread(dev->postoffice);
401 
402 	PRINT((DP "<%s\n", __FUNCTION__));
403 	return B_OK;
404 
405 err4:
406 	dev->postoffice = -1;
407 err3:
408 err2:
409 err1:
410 	kclosesocket(dev->sock);
411 	dev->sock = -1;
412 err0:
413 	dprintf(DP "<%s: error 0x%08lx\n", __FUNCTION__, err);
414 	return err;
415 }
416 
417 status_t nbd_teardown(struct nbd_device *dev)
418 {
419 	status_t err, ret;
420 	PRINT((DP ">%s()\n", __FUNCTION__));
421 	kshutdown(dev->sock, SHUT_RDWR);
422 	kclosesocket(dev->sock);
423 	dev->sock = -1;
424 	err = wait_for_thread(dev->postoffice, &ret);
425 	return B_OK;
426 }
427 
428 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req)
429 {
430 	status_t err;
431 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
432 
433 	err = ksend(dev->sock, &req->req, sizeof(req->req), 0);
434 	if (err < 0)
435 		return err;
436 
437 	if (req->type == NBD_CMD_WRITE)
438 		err = ksend(dev->sock, req->buffer, req->len, 0);
439 	if (err < 0)
440 		return err;
441 	else
442 		req->len = err;
443 
444 	err = nbd_queue_request(dev, req);
445 	return err;
446 }
447 
448 
449 #if 0
450 #pragma mark ==== device hooks ====
451 #endif
452 
453 static struct nbd_device nbd_devices[MAX_NBDS];
454 
455 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie) {
456 	status_t err;
457 	int32 refcnt;
458 #ifdef MOUNT_KLUDGE
459 	int kfd;
460 #endif
461 	struct nbd_device *dev = NULL;
462 	PRINT((DP ">%s(%s, %lx, )\n", __FUNCTION__, name, flags));
463 	(void)name; (void)flags;
464 	dev = nbd_find_device(name);
465 	if (!dev || !dev->valid)
466 		return ENOENT;
467 	err = ENOMEM;
468 	*cookie = (void*)malloc(sizeof(cookie_t));
469 	if (*cookie == NULL)
470 		goto err0;
471 	memset(*cookie, 0, sizeof(cookie_t));
472 	(*cookie)->dev = dev;
473 	err = benaphore_lock(&dev->ben);
474 	if (err)
475 		goto err1;
476 	/*  */
477 	if (dev->sock < 0)
478 		err = nbd_connect(dev);
479 	if (err)
480 		goto err2;
481 	refcnt = dev->refcnt++;
482 #ifdef MOUNT_KLUDGE
483 	kfd = dev->kludge;
484 	dev->kludge = -1;
485 #endif
486 	benaphore_unlock(&dev->ben);
487 
488 #ifdef MOUNT_KLUDGE
489 	if (refcnt == 0) {
490 		char buf[32];
491 		sprintf(buf, "/dev/%s", name);
492 		dev->kludge = open(buf, O_RDONLY);
493 	} else if (kfd) {
494 		close(kfd);
495 	}
496 #endif
497 
498 	return B_OK;
499 
500 err2:
501 	benaphore_unlock(&dev->ben);
502 err1:
503 	free(*cookie);
504 err0:
505 	dprintf(DP " %s: error 0x%08lx\n", __FUNCTION__, err);
506 	return err;
507 }
508 
509 status_t nbd_close(cookie_t *cookie) {
510 	struct nbd_device *dev = cookie->dev;
511 	status_t err;
512 #ifdef MOUNT_KLUDGE
513 	int kfd = -1;
514 #endif
515 	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
516 
517 	err = benaphore_lock(&dev->ben);
518 	if (err)
519 		return err;
520 
521 	// XXX: do something ?
522 #ifdef MOUNT_KLUDGE
523 	kfd = dev->kludge;
524 	dev->kludge = -1;
525 #endif
526 
527 	benaphore_unlock(&dev->ben);
528 
529 #ifdef MOUNT_KLUDGE
530 	if (kfd > -1) {
531 		close(kfd);
532 	}
533 #endif
534 	return B_OK;
535 }
536 
537 status_t nbd_free(cookie_t *cookie) {
538 	struct nbd_device *dev = cookie->dev;
539 	status_t err;
540 	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
541 
542 	err = benaphore_lock(&dev->ben);
543 	if (err)
544 		return err;
545 
546 	if (--dev->refcnt == 0) {
547 		err = nbd_teardown(dev);
548 	}
549 
550 	benaphore_unlock(&dev->ben);
551 
552 	free(cookie);
553 	return err;
554 }
555 
556 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len) {
557 	PRINT((DP ">%s(%d, %lu, , %ld)\n", __FUNCTION__, WHICH(cookie->dev), op, len));
558 	switch (op) {
559 	case B_GET_DEVICE_SIZE: /* this one is broken anyway... */
560 		if (data) {
561 			*(size_t *)data = (size_t)cookie->dev->size;
562 			return B_OK;
563 		}
564 		return EINVAL;
565 	case B_SET_DEVICE_SIZE: /* broken */
566 		return EINVAL;
567 	case B_SET_NONBLOCKING_IO:
568 		return EINVAL;
569 	case B_SET_BLOCKING_IO:
570 		return B_OK;
571 	case B_GET_READ_STATUS:
572 	case B_GET_WRITE_STATUS:
573 		if (data) {
574 			*(bool *)data = false;
575 			return B_OK;
576 		}
577 		return EINVAL;
578 	case B_GET_GEOMETRY:
579 	case B_GET_BIOS_GEOMETRY:
580 		if (data) {
581 			device_geometry *geom = (device_geometry *)data;
582 			geom->bytes_per_sector = BLKSIZE;
583 			geom->sectors_per_track = 1;
584 			geom->cylinder_count = cookie->dev->size / BLKSIZE;
585 			geom->head_count = 1;
586 			geom->device_type = B_DISK;
587 			geom->removable = false;
588 			geom->read_only = cookie->dev->readonly;
589 			geom->write_once = false;
590 			return B_OK;
591 		}
592 		return EINVAL;
593 	case B_GET_MEDIA_STATUS:
594 		if (data) {
595 			*(status_t *)data = B_OK;
596 			return B_OK;
597 		}
598 		return EINVAL;
599 
600 	case B_EJECT_DEVICE:
601 	case B_LOAD_MEDIA:
602 		return B_BAD_VALUE;
603 	case B_FLUSH_DRIVE_CACHE: /* wait for request list to be empty ? */
604 		return B_OK;
605 	default:
606 		return B_BAD_VALUE;
607 	}
608 	return B_NOT_ALLOWED;
609 }
610 
611 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes) {
612 	struct nbd_device *dev = cookie->dev;
613 	struct nbd_request_entry *req;
614 	status_t err, semerr;
615 	PRINT((DP ">%s(%d, %Ld, , )\n", __FUNCTION__, WHICH(cookie->dev), position));
616 
617 	if (position < 0)
618 		return EINVAL;
619 	if (!data)
620 		return EINVAL;
621 
622 	err = nbd_alloc_request(dev, &req, NBD_CMD_READ, position, *numbytes, NULL);
623 	if (err)
624 		goto err0;
625 
626 	//LOCK
627 	err = benaphore_lock(&dev->ben);
628 	if (err)
629 		goto err1;
630 
631 	err = nbd_post_request(dev, req);
632 
633 	//UNLOCK
634 	benaphore_unlock(&dev->ben);
635 
636 	if (err)
637 		goto err2;
638 
639 
640 	semerr = acquire_sem(req->sem);
641 
642 	//LOCK
643 	err = benaphore_lock(&dev->ben);
644 	if(err)
645 		goto err3;
646 
647 	/* bad scenarii */
648 	if (!req->replied)
649 		req->discard = true;
650 	else if (semerr)
651 		nbd_free_request(dev, req);
652 
653 	//UNLOCK
654 	benaphore_unlock(&dev->ben);
655 
656 	if (semerr == B_OK) {
657 		*numbytes = req->len;
658 		memcpy(data, req->buffer, req->len);
659 		err = B_OK;
660 		if (*numbytes == 0 && req->reply.error)
661 			err = EIO;
662 		nbd_free_request(dev, req);
663 		return err;
664 	}
665 
666 	*numbytes = 0;
667 	return semerr;
668 
669 
670 err3:
671 err2:
672 err1:
673 	nbd_free_request(dev, req);
674 err0:
675 	*numbytes = 0;
676 	return err;
677 }
678 
679 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes) {
680 	struct nbd_device *dev = cookie->dev;
681 	struct nbd_request_entry *req;
682 	status_t err, semerr;
683 	PRINT((DP ">%s(%d, %Ld, %ld, )\n", __FUNCTION__, WHICH(cookie->dev), position, *numbytes));
684 
685 	if (position < 0)
686 		return EINVAL;
687 	if (!data)
688 		return EINVAL;
689 	err = B_NOT_ALLOWED;
690 	if (dev->readonly)
691 		goto err0;
692 
693 	err = nbd_alloc_request(dev, &req, NBD_CMD_WRITE, position, *numbytes, data);
694 	if (err)
695 		goto err0;
696 
697 	//LOCK
698 	err = benaphore_lock(&dev->ben);
699 	if (err)
700 		goto err1;
701 
702 	/* sending request+data must be atomic */
703 	err = nbd_post_request(dev, req);
704 
705 	//UNLOCK
706 	benaphore_unlock(&dev->ben);
707 
708 	if (err)
709 		goto err2;
710 
711 
712 	semerr = acquire_sem(req->sem);
713 
714 	//LOCK
715 	err = benaphore_lock(&dev->ben);
716 	if(err)
717 		goto err3;
718 
719 	/* bad scenarii */
720 	if (!req->replied)
721 		req->discard = true;
722 	else if (semerr)
723 		nbd_free_request(dev, req);
724 
725 	//UNLOCK
726 	benaphore_unlock(&dev->ben);
727 
728 	if (semerr == B_OK) {
729 		*numbytes = req->len;
730 		err = B_OK;
731 		if (*numbytes == 0 && req->reply.error)
732 			err = EIO;
733 		nbd_free_request(dev, req);
734 		return err;
735 	}
736 
737 	*numbytes = 0;
738 	return semerr;
739 
740 
741 err3:
742 err2:
743 err1:
744 	nbd_free_request(dev, req);
745 err0:
746 	*numbytes = 0;
747 	return err;
748 }
749 
750 device_hooks nbd_hooks={
751 	(device_open_hook)nbd_open,
752 	(device_close_hook)nbd_close,
753 	(device_free_hook)nbd_free,
754 	(device_control_hook)nbd_control,
755 	(device_read_hook)nbd_read,
756 	(device_write_hook)nbd_write,
757 	NULL,
758 	NULL,
759 	NULL,
760 	NULL
761 };
762 
763 #if 0
764 #pragma mark ==== driver hooks ====
765 #endif
766 
767 int32 api_version = B_CUR_DRIVER_API_VERSION;
768 
769 static char *nbd_name[MAX_NBDS+1] = {
770 	NULL
771 };
772 
773 status_t
774 init_hardware (void)
775 {
776 	PRINT((DP ">%s()\n", __FUNCTION__));
777 	return B_OK;
778 }
779 
780 status_t
781 init_driver (void)
782 {
783 	status_t err;
784 	int i, j;
785 	// XXX: load settings
786 	void *handle;
787 	char **names = nbd_name;
788 	PRINT((DP ">%s()\n", __FUNCTION__));
789 
790 	handle = load_driver_settings(DRV);
791 	if (handle == NULL)
792 		return ENOENT;
793 	// XXX: test for boot args ?
794 
795 
796 	err = ksocket_init();
797 	if (err < B_OK)
798 		return err;
799 
800 	for (i = 0; i < MAX_NBDS; i++) {
801 		nbd_devices[i].valid = false;
802 		nbd_devices[i].readonly = false;
803 		err = benaphore_init(&nbd_devices[i].ben, "nbd lock");
804 		if (err < B_OK)
805 			return err; // XXX
806 		nbd_devices[i].refcnt = 0;
807 		nbd_devices[i].req = 0LL; /* next ID for requests */
808 		nbd_devices[i].sock = -1;
809 		nbd_devices[i].postoffice = -1;
810 		nbd_devices[i].size = 0LL;
811 		nbd_devices[i].reqs = NULL;
812 #ifdef MOUNT_KLUDGE
813 		nbd_devices[i].kludge = -1;
814 #endif
815 		nbd_name[i] = NULL;
816 	}
817 
818 	for (i = 0; i < MAX_NBDS; i++) {
819 		const driver_settings *settings = get_driver_settings(handle);
820 		driver_parameter *p = NULL;
821 		char keyname[10];
822 		sprintf(keyname, "%d", i);
823 		for (j = 0; j < settings->parameter_count; j++)
824 			if (!strcmp(settings->parameters[j].name, keyname))
825 				p = &settings->parameters[j];
826 		if (!p)
827 			continue;
828 		for (j = 0; j < p->parameter_count; j++) {
829 			if (!strcmp(p->parameters[j].name, "readonly"))
830 				nbd_devices[i].readonly = true;
831 			if (!strcmp(p->parameters[j].name, "server")) {
832 				if (p->parameters[j].value_count < 2)
833 					continue;
834 				nbd_devices[i].server.sin_len = sizeof(struct sockaddr_in);
835 				nbd_devices[i].server.sin_family = AF_INET;
836 				kinet_aton(p->parameters[j].values[0], &nbd_devices[i].server.sin_addr);
837 				nbd_devices[i].server.sin_port = htons(atoi(p->parameters[j].values[1]));
838 				dprintf(DP " configured [%d]\n", i);
839 				*(names) = malloc(DEVICE_NAME_MAX);
840 				if (*(names) == NULL)
841 					return ENOMEM;
842 				sprintf(*(names++), DEVICE_FMT, i);
843 				nbd_devices[i].valid = true;
844 			}
845 		}
846 	}
847 	*names = NULL;
848 
849 	unload_driver_settings(handle);
850 	return B_OK;
851 }
852 
853 void
854 uninit_driver (void)
855 {
856 	status_t err;
857 	int i;
858 	PRINT((DP ">%s()\n", __FUNCTION__));
859 	for (i = 0; i < MAX_NBDS; i++) {
860 		free(nbd_name[i]);
861 		benaphore_destroy(&nbd_devices[i].ben);
862 	}
863 	err = ksocket_cleanup();
864 	/* HACK */
865 	if (gDelayUnload)
866 		snooze(BONE_TEARDOWN_DELAY);
867 }
868 
869 const char**
870 publish_devices()
871 {
872 	PRINT((DP ">%s()\n", __FUNCTION__));
873 	return (const char **)nbd_name;
874 }
875 
876 device_hooks*
877 find_device(const char* name)
878 {
879 	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
880 	return &nbd_hooks;
881 }
882 
883 struct nbd_device*
884 nbd_find_device(const char* name)
885 {
886 	int i;
887 	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
888 	for (i = 0; i < MAX_NBDS; i++) {
889 		char buf[DEVICE_NAME_MAX];
890 		sprintf(buf, DEVICE_FMT, i);
891 		if (!strcmp(buf, name))
892 			return &nbd_devices[i];
893 	}
894 	return NULL;
895 }
896