xref: /haiku/src/add-ons/kernel/drivers/disk/virtual/nbd/nbd.c (revision 2600324b57fa31cdea1627d584d314f2a579c4a8)
1 /*
2  * Copyright 2006-2007, François Revol. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 /*
7  * nbd driver for Haiku
8  *
9  * Maps a Network Block Device as virtual partitions.
10  */
11 
12 #include <ByteOrder.h>
13 #include <KernelExport.h>
14 #include <Drivers.h>
15 #include <driver_settings.h>
16 #include <Errors.h>
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <ksocket.h>
23 #include <netinet/in.h>
24 
25 //#define DEBUG 1
26 
27 /* on the first open(), open ourselves for some seconds,
28  * to avoid trying to reconnect and failing on a 2nd open,
29  * as it happens with the python server.
30  */
31 //#define MOUNT_KLUDGE
32 
33 
34 /* names, ohh names... */
35 #ifndef SHUT_RDWR
36 #define SHUT_RDWR SHUTDOWN_BOTH
37 #endif
38 
39 /* locking support */
40 #ifdef __HAIKU__
41 #include <kernel/lock.h>
42 #else
43 /* wrappers for R5 */
44 #ifndef _IMPEXP_KERNEL
45 #define _IMPEXP_KERNEL
46 #endif
47 #include "lock.h"
48 #define mutex lock
49 #define mutex_init new_lock
50 #define mutex_destroy free_lock
51 #define mutex_lock LOCK
52 #define mutex_unlock UNLOCK
53 #endif
54 
55 #define DEBUG 1
56 
57 #include "nbd.h"
58 
59 #define DRV "nbd"
60 #define DP "nbd:"
61 #define MAX_NBDS 4
62 #define DEVICE_PREFIX "disk/virtual/nbd/"
63 #define DEVICE_FMT DEVICE_PREFIX "%d/raw"
64 #define DEVICE_NAME_MAX 32
65 #define MAX_REQ_SIZE (32*1024*1024)
66 #define BLKSIZE 512
67 
68 /* debugging */
69 #if DEBUG
70 #define PRINT(a) dprintf a
71 #define WHICH(dev) ((int)(dev - nbd_devices))
72 #else
73 #define PRINT(a)
74 #endif
75 
76 struct nbd_request_entry {
77 	struct nbd_request_entry *next;
78 	struct nbd_request req; /* net byte order */
79 	struct nbd_reply reply; /* net byte order */
80 	sem_id sem;
81 	bool replied;
82 	bool discard;
83 	uint64 handle;
84 	uint32 type;
85 	uint64 from;
86 	size_t len;
87 	void *buffer; /* write: ptr to passed buffer; read: ptr to malloc()ed extra */
88 };
89 
90 struct nbd_device {
91 	bool valid;
92 	bool readonly;
93 	struct sockaddr_in server;
94 	mutex ben;
95 	vint32 refcnt;
96 	uint64 req; /* next ID for requests */
97 	int sock;
98 	thread_id postoffice;
99 	uint64 size;
100 	struct nbd_request_entry *reqs;
101 #ifdef MOUNT_KLUDGE
102 	int kludge;
103 #endif
104 };
105 
106 typedef struct cookie {
107 	struct nbd_device *dev;
108 
109 } cookie_t;
110 
111 /* data=NULL on read */
112 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data);
113 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req);
114 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req);
115 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req);
116 
117 struct nbd_device *nbd_find_device(const char* name);
118 
119 int32 nbd_postoffice(void *arg);
120 status_t nbd_connect(struct nbd_device *dev);
121 status_t nbd_teardown(struct nbd_device *dev);
122 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req);
123 
124 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie);
125 status_t nbd_close(cookie_t *cookie);
126 status_t nbd_free(cookie_t *cookie);
127 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len);
128 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes);
129 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes);
130 
131 KSOCKET_MODULE_DECL;
132 
133 /* HACK:
134  * In BONE at least, if connect() fails (EINTR or ETIMEDOUT)
135  * keeps locked pages around (likely a bone_data,
136  * until TCP gets the last ACK). If that happens, we snooze()
137  * in unload_driver() to let TCP timeout before the kernel
138  * tries to delete the image. */
139 bool gDelayUnload = false;
140 #define BONE_TEARDOWN_DELAY 60000000
141 
142 #if 0
143 #pragma mark ==== support ====
144 #endif
145 
146 // move that to ksocket inlined
147 static int kinet_aton(const char *in, struct in_addr *addr)
148 {
149 	int i;
150 	unsigned long a;
151 	uint32 inaddr = 0L;
152 	char *p = (char *)in;
153 	for (i = 0; i < 4; i++) {
154 		a = strtoul(p, &p, 10);
155 		if (!p)
156 			return -1;
157 		inaddr = (inaddr >> 8) | ((a & 0x0ff) << 24);
158 		*(uint32 *)addr = inaddr;
159 		if (!*p)
160 			return 0;
161 		p++;
162 	}
163 	return 0;
164 }
165 
166 #if 0
167 #pragma mark ==== request manager ====
168 #endif
169 
170 status_t nbd_alloc_request(struct nbd_device *dev, struct nbd_request_entry **req, uint32 type, off_t from, size_t len, const char *data)
171 {
172 	bool w = (type == NBD_CMD_WRITE);
173 	struct nbd_request_entry *r;
174 	status_t err = EINVAL;
175 	uint64 handle;
176 	PRINT((DP ">%s(%ld, %Ld, %ld)\n", __FUNCTION__, type, from, len));
177 
178 	if (type != NBD_CMD_READ && type != NBD_CMD_WRITE && type != NBD_CMD_DISC)
179 		return err;
180 	if (!dev || !req || from < 0)
181 		return err;
182 
183 	//LOCK
184 	err = mutex_lock(&dev->ben);
185 	if (err)
186 		return err;
187 
188 	// atomic
189 	handle = dev->req++;
190 
191 
192 	//UNLOCK
193 	mutex_unlock(&dev->ben);
194 
195 	err = ENOMEM;
196 	r = malloc(sizeof(struct nbd_request_entry) + (w ? 0 : len));
197 	if (r == NULL)
198 		goto err0;
199 	r->next = NULL;
200 	err = r->sem = create_sem(0, "nbd request sem");
201 	if (err < 0)
202 		goto err1;
203 
204 	r->replied = false;
205 	r->discard = false;
206 	r->handle = handle;
207 	r->type = type;
208 	r->from = from;
209 	r->len = len;
210 
211 	r->req.magic = B_HOST_TO_BENDIAN_INT32(NBD_REQUEST_MAGIC);
212 	r->req.type = B_HOST_TO_BENDIAN_INT32(type);
213 	r->req.handle = B_HOST_TO_BENDIAN_INT64(r->handle);
214 	r->req.from = B_HOST_TO_BENDIAN_INT64(r->from);
215 	r->req.len = B_HOST_TO_BENDIAN_INT32(len);
216 
217 	r->buffer = (void *)(w ? data : (((char *)r) + sizeof(struct nbd_request_entry)));
218 
219 	*req = r;
220 	return B_OK;
221 
222 err1:
223 	free(r);
224 err0:
225 	dprintf(DP " %s: error 0x%08lx\n", __FUNCTION__, err);
226 	return err;
227 }
228 
229 status_t nbd_queue_request(struct nbd_device *dev, struct nbd_request_entry *req)
230 {
231 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
232 	req->next = dev->reqs;
233 	dev->reqs = req;
234 	return B_OK;
235 }
236 
237 status_t nbd_dequeue_request(struct nbd_device *dev, uint64 handle, struct nbd_request_entry **req)
238 {
239 	struct nbd_request_entry *r, *prev;
240 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, handle));
241 	r = dev->reqs;
242 	prev = NULL;
243 	while (r && r->handle != handle) {
244 		prev = r;
245 		r = r->next;
246 	}
247 	if (!r)
248 		return ENOENT;
249 
250 	if (prev)
251 		prev->next = r->next;
252 	else
253 		dev->reqs = r->next;
254 
255 	*req = r;
256 	return B_OK;
257 }
258 
259 status_t nbd_free_request(struct nbd_device *dev, struct nbd_request_entry *req)
260 {
261 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
262 	delete_sem(req->sem);
263 	free(req);
264 	return B_OK;
265 }
266 
267 
268 
269 #if 0
270 #pragma mark ==== nbd handler ====
271 #endif
272 
273 int32 nbd_postoffice(void *arg)
274 {
275 	struct nbd_device *dev = (struct nbd_device *)arg;
276 	struct nbd_request_entry *req = NULL;
277 	struct nbd_reply reply;
278 	status_t err;
279 	const char *reason;
280 	PRINT((DP ">%s()\n", __FUNCTION__));
281 
282 	for (;;) {
283 		reason = "recv";
284 		err = krecv(dev->sock, &reply, sizeof(reply), 0);
285 		if (err == -1 && errno < 0)
286 			err = errno;
287 		if (err < 0)
288 			goto err;
289 		reason = "recv:size";
290 		if (err < sizeof(reply))
291 			err = EINVAL;
292 		if (err < 0)
293 			goto err;
294 		reason = "magic";
295 		err = EINVAL;
296 		if (B_BENDIAN_TO_HOST_INT32(reply.magic) != NBD_REPLY_MAGIC)
297 			goto err;
298 
299 		reason = "lock";
300 		//LOCK
301 		err = mutex_lock(&dev->ben);
302 		if (err)
303 			goto err;
304 
305 		reason = "dequeue_request";
306 		err = nbd_dequeue_request(dev, B_BENDIAN_TO_HOST_INT64(reply.handle), &req);
307 
308 		//UNLOCK
309 		mutex_unlock(&dev->ben);
310 
311 		if (!err && !req) {
312 			dprintf(DP "nbd_dequeue_rquest found NULL!\n");
313 			err = ENOENT;
314 		}
315 
316 		if (err == B_OK) {
317 			memcpy(&req->reply, &reply, sizeof(reply));
318 			if (req->type == NBD_CMD_READ) {
319 				err = 0;
320 				reason = "recv(data)";
321 				if (reply.error == 0)
322 					err = krecv(dev->sock, req->buffer, req->len, 0);
323 				if (err < 0)
324 					goto err;
325 				/* tell back how much we've got (?) */
326 				req->len = err;
327 			} else {
328 				if (reply.error)
329 					req->len = 0;
330 			}
331 
332 			reason = "lock";
333 			//LOCK
334 			err = mutex_lock(&dev->ben);
335 			if (err)
336 				goto err;
337 
338 			// this also must be atomic!
339 			release_sem(req->sem);
340 			req->replied = true;
341 			if (req->discard)
342 				nbd_free_request(dev, req);
343 
344 			//UNLOCK
345 			mutex_unlock(&dev->ben);
346 		}
347 
348 	}
349 
350 	PRINT((DP "<%s\n", __FUNCTION__));
351 	return 0;
352 
353 err:
354 	dprintf(DP "%s: %s: error 0x%08lx\n", __FUNCTION__, reason, err);
355 	return err;
356 }
357 
358 status_t nbd_connect(struct nbd_device *dev)
359 {
360 	struct nbd_init_packet initpkt;
361 	status_t err;
362 	PRINT((DP ">%s()\n", __FUNCTION__));
363 
364 	PRINT((DP " %s: socket()\n", __FUNCTION__));
365 	err = dev->sock = ksocket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
366 	if (err == -1 && errno < 0)
367 		err = errno;
368 	if (err < 0)
369 		goto err0;
370 
371 	PRINT((DP " %s: connect()\n", __FUNCTION__));
372 	err = kconnect(dev->sock, (struct sockaddr *)&dev->server, sizeof(dev->server));
373 	//err = ENOSYS;
374 	if (err == -1 && errno < 0)
375 		err = errno;
376 	/* HACK: avoid the kernel unloading us with locked pages from TCP */
377 	if (err)
378 		gDelayUnload = true;
379 	if (err)
380 		goto err1;
381 
382 	PRINT((DP " %s: recv(initpkt)\n", __FUNCTION__));
383 	err = krecv(dev->sock, &initpkt, sizeof(initpkt), 0);
384 	if (err == -1 && errno < 0)
385 		err = errno;
386 	if (err < sizeof(initpkt))
387 		goto err2;
388 	err = EINVAL;//EPROTO;
389 	if (memcmp(initpkt.passwd, NBD_INIT_PASSWD, sizeof(initpkt.passwd)))
390 		goto err3;
391 	if (B_BENDIAN_TO_HOST_INT64(initpkt.magic) != NBD_INIT_MAGIC)
392 		goto err3;
393 
394 	dev->size = B_BENDIAN_TO_HOST_INT64(initpkt.device_size);
395 
396 	dprintf(DP " %s: connected, device size %Ld bytes.\n", __FUNCTION__, dev->size);
397 
398 	err = dev->postoffice = spawn_kernel_thread(nbd_postoffice, "nbd postoffice", B_REAL_TIME_PRIORITY, dev);
399 	if (err < B_OK)
400 		goto err4;
401 	resume_thread(dev->postoffice);
402 
403 	PRINT((DP "<%s\n", __FUNCTION__));
404 	return B_OK;
405 
406 err4:
407 	dev->postoffice = -1;
408 err3:
409 err2:
410 err1:
411 	kclosesocket(dev->sock);
412 	dev->sock = -1;
413 err0:
414 	dprintf(DP "<%s: error 0x%08lx\n", __FUNCTION__, err);
415 	return err;
416 }
417 
418 status_t nbd_teardown(struct nbd_device *dev)
419 {
420 	status_t err, ret;
421 	PRINT((DP ">%s()\n", __FUNCTION__));
422 	kshutdown(dev->sock, SHUT_RDWR);
423 	kclosesocket(dev->sock);
424 	dev->sock = -1;
425 	err = wait_for_thread(dev->postoffice, &ret);
426 	return B_OK;
427 }
428 
429 status_t nbd_post_request(struct nbd_device *dev, struct nbd_request_entry *req)
430 {
431 	status_t err;
432 	PRINT((DP ">%s(handle:%Ld)\n", __FUNCTION__, req->handle));
433 
434 	err = ksend(dev->sock, &req->req, sizeof(req->req), 0);
435 	if (err < 0)
436 		return err;
437 
438 	if (req->type == NBD_CMD_WRITE)
439 		err = ksend(dev->sock, req->buffer, req->len, 0);
440 	if (err < 0)
441 		return err;
442 	else
443 		req->len = err;
444 
445 	err = nbd_queue_request(dev, req);
446 	return err;
447 }
448 
449 
450 #if 0
451 #pragma mark ==== device hooks ====
452 #endif
453 
454 static struct nbd_device nbd_devices[MAX_NBDS];
455 
456 status_t nbd_open(const char *name, uint32 flags, cookie_t **cookie) {
457 	status_t err;
458 	int32 refcnt;
459 #ifdef MOUNT_KLUDGE
460 	int kfd;
461 #endif
462 	struct nbd_device *dev = NULL;
463 	PRINT((DP ">%s(%s, %lx, )\n", __FUNCTION__, name, flags));
464 	(void)name; (void)flags;
465 	dev = nbd_find_device(name);
466 	if (!dev || !dev->valid)
467 		return ENOENT;
468 	err = ENOMEM;
469 	*cookie = (void*)malloc(sizeof(cookie_t));
470 	if (*cookie == NULL)
471 		goto err0;
472 	memset(*cookie, 0, sizeof(cookie_t));
473 	(*cookie)->dev = dev;
474 	err = mutex_lock(&dev->ben);
475 	if (err)
476 		goto err1;
477 	/*  */
478 	if (dev->sock < 0)
479 		err = nbd_connect(dev);
480 	if (err)
481 		goto err2;
482 	refcnt = dev->refcnt++;
483 #ifdef MOUNT_KLUDGE
484 	kfd = dev->kludge;
485 	dev->kludge = -1;
486 #endif
487 	mutex_unlock(&dev->ben);
488 
489 #ifdef MOUNT_KLUDGE
490 	if (refcnt == 0) {
491 		char buf[32];
492 		sprintf(buf, "/dev/%s", name);
493 		dev->kludge = open(buf, O_RDONLY);
494 	} else if (kfd) {
495 		close(kfd);
496 	}
497 #endif
498 
499 	return B_OK;
500 
501 err2:
502 	mutex_unlock(&dev->ben);
503 err1:
504 	free(*cookie);
505 err0:
506 	dprintf(DP " %s: error 0x%08lx\n", __FUNCTION__, err);
507 	return err;
508 }
509 
510 status_t nbd_close(cookie_t *cookie) {
511 	struct nbd_device *dev = cookie->dev;
512 	status_t err;
513 #ifdef MOUNT_KLUDGE
514 	int kfd = -1;
515 #endif
516 	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
517 
518 	err = mutex_lock(&dev->ben);
519 	if (err)
520 		return err;
521 
522 	// XXX: do something ?
523 #ifdef MOUNT_KLUDGE
524 	kfd = dev->kludge;
525 	dev->kludge = -1;
526 #endif
527 
528 	mutex_unlock(&dev->ben);
529 
530 #ifdef MOUNT_KLUDGE
531 	if (kfd > -1) {
532 		close(kfd);
533 	}
534 #endif
535 	return B_OK;
536 }
537 
538 status_t nbd_free(cookie_t *cookie) {
539 	struct nbd_device *dev = cookie->dev;
540 	status_t err;
541 	PRINT((DP ">%s(%d)\n", __FUNCTION__, WHICH(cookie->dev)));
542 
543 	err = mutex_lock(&dev->ben);
544 	if (err)
545 		return err;
546 
547 	if (--dev->refcnt == 0) {
548 		err = nbd_teardown(dev);
549 	}
550 
551 	mutex_unlock(&dev->ben);
552 
553 	free(cookie);
554 	return err;
555 }
556 
557 status_t nbd_control(cookie_t *cookie, uint32 op, void *data, size_t len) {
558 	PRINT((DP ">%s(%d, %lu, , %ld)\n", __FUNCTION__, WHICH(cookie->dev), op, len));
559 	switch (op) {
560 	case B_GET_DEVICE_SIZE: /* this one is broken anyway... */
561 		if (data) {
562 			*(size_t *)data = (size_t)cookie->dev->size;
563 			return B_OK;
564 		}
565 		return EINVAL;
566 	case B_SET_DEVICE_SIZE: /* broken */
567 		return EINVAL;
568 	case B_SET_NONBLOCKING_IO:
569 		return EINVAL;
570 	case B_SET_BLOCKING_IO:
571 		return B_OK;
572 	case B_GET_READ_STATUS:
573 	case B_GET_WRITE_STATUS:
574 		if (data) {
575 			*(bool *)data = false;
576 			return B_OK;
577 		}
578 		return EINVAL;
579 	case B_GET_GEOMETRY:
580 	case B_GET_BIOS_GEOMETRY:
581 		if (data) {
582 			device_geometry *geom = (device_geometry *)data;
583 			geom->bytes_per_sector = BLKSIZE;
584 			geom->sectors_per_track = 1;
585 			geom->cylinder_count = cookie->dev->size / BLKSIZE;
586 			geom->head_count = 1;
587 			geom->device_type = B_DISK;
588 			geom->removable = false;
589 			geom->read_only = cookie->dev->readonly;
590 			geom->write_once = false;
591 			return B_OK;
592 		}
593 		return EINVAL;
594 	case B_GET_MEDIA_STATUS:
595 		if (data) {
596 			*(status_t *)data = B_OK;
597 			return B_OK;
598 		}
599 		return EINVAL;
600 
601 	case B_EJECT_DEVICE:
602 	case B_LOAD_MEDIA:
603 		return B_BAD_VALUE;
604 	case B_FLUSH_DRIVE_CACHE: /* wait for request list to be empty ? */
605 		return B_OK;
606 	default:
607 		return B_BAD_VALUE;
608 	}
609 	return B_NOT_ALLOWED;
610 }
611 
612 status_t nbd_read(cookie_t *cookie, off_t position, void *data, size_t *numbytes) {
613 	struct nbd_device *dev = cookie->dev;
614 	struct nbd_request_entry *req;
615 	status_t err, semerr;
616 	PRINT((DP ">%s(%d, %Ld, , )\n", __FUNCTION__, WHICH(cookie->dev), position));
617 
618 	if (position < 0)
619 		return EINVAL;
620 	if (!data)
621 		return EINVAL;
622 
623 	err = nbd_alloc_request(dev, &req, NBD_CMD_READ, position, *numbytes, NULL);
624 	if (err)
625 		goto err0;
626 
627 	//LOCK
628 	err = mutex_lock(&dev->ben);
629 	if (err)
630 		goto err1;
631 
632 	err = nbd_post_request(dev, req);
633 
634 	//UNLOCK
635 	mutex_unlock(&dev->ben);
636 
637 	if (err)
638 		goto err2;
639 
640 
641 	semerr = acquire_sem(req->sem);
642 
643 	//LOCK
644 	err = mutex_lock(&dev->ben);
645 	if(err)
646 		goto err3;
647 
648 	/* bad scenarii */
649 	if (!req->replied)
650 		req->discard = true;
651 	else if (semerr)
652 		nbd_free_request(dev, req);
653 
654 	//UNLOCK
655 	mutex_unlock(&dev->ben);
656 
657 	if (semerr == B_OK) {
658 		*numbytes = req->len;
659 		memcpy(data, req->buffer, req->len);
660 		err = B_OK;
661 		if (*numbytes == 0 && req->reply.error)
662 			err = EIO;
663 		nbd_free_request(dev, req);
664 		return err;
665 	}
666 
667 	*numbytes = 0;
668 	return semerr;
669 
670 
671 err3:
672 err2:
673 err1:
674 	nbd_free_request(dev, req);
675 err0:
676 	*numbytes = 0;
677 	return err;
678 }
679 
680 status_t nbd_write(cookie_t *cookie, off_t position, const void *data, size_t *numbytes) {
681 	struct nbd_device *dev = cookie->dev;
682 	struct nbd_request_entry *req;
683 	status_t err, semerr;
684 	PRINT((DP ">%s(%d, %Ld, %ld, )\n", __FUNCTION__, WHICH(cookie->dev), position, *numbytes));
685 
686 	if (position < 0)
687 		return EINVAL;
688 	if (!data)
689 		return EINVAL;
690 	err = B_NOT_ALLOWED;
691 	if (dev->readonly)
692 		goto err0;
693 
694 	err = nbd_alloc_request(dev, &req, NBD_CMD_WRITE, position, *numbytes, data);
695 	if (err)
696 		goto err0;
697 
698 	//LOCK
699 	err = mutex_lock(&dev->ben);
700 	if (err)
701 		goto err1;
702 
703 	/* sending request+data must be atomic */
704 	err = nbd_post_request(dev, req);
705 
706 	//UNLOCK
707 	mutex_unlock(&dev->ben);
708 
709 	if (err)
710 		goto err2;
711 
712 
713 	semerr = acquire_sem(req->sem);
714 
715 	//LOCK
716 	err = mutex_lock(&dev->ben);
717 	if(err)
718 		goto err3;
719 
720 	/* bad scenarii */
721 	if (!req->replied)
722 		req->discard = true;
723 	else if (semerr)
724 		nbd_free_request(dev, req);
725 
726 	//UNLOCK
727 	mutex_unlock(&dev->ben);
728 
729 	if (semerr == B_OK) {
730 		*numbytes = req->len;
731 		err = B_OK;
732 		if (*numbytes == 0 && req->reply.error)
733 			err = EIO;
734 		nbd_free_request(dev, req);
735 		return err;
736 	}
737 
738 	*numbytes = 0;
739 	return semerr;
740 
741 
742 err3:
743 err2:
744 err1:
745 	nbd_free_request(dev, req);
746 err0:
747 	*numbytes = 0;
748 	return err;
749 }
750 
751 device_hooks nbd_hooks={
752 	(device_open_hook)nbd_open,
753 	(device_close_hook)nbd_close,
754 	(device_free_hook)nbd_free,
755 	(device_control_hook)nbd_control,
756 	(device_read_hook)nbd_read,
757 	(device_write_hook)nbd_write,
758 	NULL,
759 	NULL,
760 	NULL,
761 	NULL
762 };
763 
764 #if 0
765 #pragma mark ==== driver hooks ====
766 #endif
767 
768 int32 api_version = B_CUR_DRIVER_API_VERSION;
769 
770 static char *nbd_name[MAX_NBDS+1] = {
771 	NULL
772 };
773 
774 status_t
775 init_hardware (void)
776 {
777 	PRINT((DP ">%s()\n", __FUNCTION__));
778 	return B_OK;
779 }
780 
781 status_t
782 init_driver (void)
783 {
784 	status_t err;
785 	int i, j;
786 	// XXX: load settings
787 	void *handle;
788 	char **names = nbd_name;
789 	PRINT((DP ">%s()\n", __FUNCTION__));
790 
791 	handle = load_driver_settings(DRV);
792 	if (handle == NULL)
793 		return ENOENT;
794 	// XXX: test for boot args ?
795 
796 
797 	err = ksocket_init();
798 	if (err < B_OK)
799 		return err;
800 
801 	for (i = 0; i < MAX_NBDS; i++) {
802 		nbd_devices[i].valid = false;
803 		nbd_devices[i].readonly = false;
804 		mutex_init(&nbd_devices[i].ben, "nbd lock");
805 		nbd_devices[i].refcnt = 0;
806 		nbd_devices[i].req = 0LL; /* next ID for requests */
807 		nbd_devices[i].sock = -1;
808 		nbd_devices[i].postoffice = -1;
809 		nbd_devices[i].size = 0LL;
810 		nbd_devices[i].reqs = NULL;
811 #ifdef MOUNT_KLUDGE
812 		nbd_devices[i].kludge = -1;
813 #endif
814 		nbd_name[i] = NULL;
815 	}
816 
817 	for (i = 0; i < MAX_NBDS; i++) {
818 		const driver_settings *settings = get_driver_settings(handle);
819 		driver_parameter *p = NULL;
820 		char keyname[10];
821 		sprintf(keyname, "%d", i);
822 		for (j = 0; j < settings->parameter_count; j++)
823 			if (!strcmp(settings->parameters[j].name, keyname))
824 				p = &settings->parameters[j];
825 		if (!p)
826 			continue;
827 		for (j = 0; j < p->parameter_count; j++) {
828 			if (!strcmp(p->parameters[j].name, "readonly"))
829 				nbd_devices[i].readonly = true;
830 			if (!strcmp(p->parameters[j].name, "server")) {
831 				if (p->parameters[j].value_count < 2)
832 					continue;
833 				nbd_devices[i].server.sin_len = sizeof(struct sockaddr_in);
834 				nbd_devices[i].server.sin_family = AF_INET;
835 				kinet_aton(p->parameters[j].values[0], &nbd_devices[i].server.sin_addr);
836 				nbd_devices[i].server.sin_port = htons(atoi(p->parameters[j].values[1]));
837 				dprintf(DP " configured [%d]\n", i);
838 				*(names) = malloc(DEVICE_NAME_MAX);
839 				if (*(names) == NULL)
840 					return ENOMEM;
841 				sprintf(*(names++), DEVICE_FMT, i);
842 				nbd_devices[i].valid = true;
843 			}
844 		}
845 	}
846 	*names = NULL;
847 
848 	unload_driver_settings(handle);
849 	return B_OK;
850 }
851 
852 void
853 uninit_driver (void)
854 {
855 	status_t err;
856 	int i;
857 	PRINT((DP ">%s()\n", __FUNCTION__));
858 	for (i = 0; i < MAX_NBDS; i++) {
859 		free(nbd_name[i]);
860 		mutex_destroy(&nbd_devices[i].ben);
861 	}
862 	err = ksocket_cleanup();
863 	/* HACK */
864 	if (gDelayUnload)
865 		snooze(BONE_TEARDOWN_DELAY);
866 }
867 
868 const char**
869 publish_devices()
870 {
871 	PRINT((DP ">%s()\n", __FUNCTION__));
872 	return (const char **)nbd_name;
873 }
874 
875 device_hooks*
876 find_device(const char* name)
877 {
878 	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
879 	return &nbd_hooks;
880 }
881 
882 struct nbd_device*
883 nbd_find_device(const char* name)
884 {
885 	int i;
886 	PRINT((DP ">%s(%s)\n", __FUNCTION__, name));
887 	for (i = 0; i < MAX_NBDS; i++) {
888 		char buf[DEVICE_NAME_MAX];
889 		sprintf(buf, DEVICE_FMT, i);
890 		if (!strcmp(buf, name))
891 			return &nbd_devices[i];
892 	}
893 	return NULL;
894 }
895