xref: /haiku/src/add-ons/kernel/network/devices/tunnel/tunnel.cpp (revision 82bfaa954dcfd90582fb2c1a0e918971eea57091)
1 /*
2  * Copyright 2023, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Augustin Cavalier <waddlesplash>
7  *		Axel Dörfler, axeld@pinc-software.de
8  *		Sean Brady, swangeon@gmail.com
9  */
10 
11 #include <new>
12 #include <string.h>
13 
14 #include <fs/select_sync_pool.h>
15 #include <fs/devfs.h>
16 #include <util/AutoLock.h>
17 #include <util/Random.h>
18 
19 #include <net_buffer.h>
20 #include <net_device.h>
21 #include <net_stack.h>
22 #include <NetBufferUtilities.h>
23 
24 #include <net/if.h>
25 #include <net/if_dl.h>
26 #include <net/if_media.h>
27 #include <net/if_types.h>
28 #include <net/if_tun.h>
29 #include <netinet/in.h>
30 #include <ethernet.h>
31 
32 
33 //#define TRACE_TUNNEL
34 #ifdef TRACE_TUNNEL
35 #	define TRACE(x...) dprintf("network/tunnel: " x)
36 #else
37 #   define TRACE(x...)
38 #endif
39 
40 #define CALLED(x...)			TRACE("CALLED %s\n", __PRETTY_FUNCTION__)
41 #define TRACE_ALWAYS(x...)		dprintf("network/tunnel: " x)
42 
43 
44 struct tunnel_device : net_device {
45 	bool				is_tap;
46 
47 	net_fifo			send_queue, receive_queue;
48 
49 	int32				open_count;
50 
51 	mutex				select_lock;
52 	select_sync_pool*	select_pool;
53 };
54 
55 #define TUNNEL_QUEUE_MAX (ETHER_MAX_FRAME_SIZE * 32)
56 
57 
58 struct net_buffer_module_info* gBufferModule;
59 static net_stack_module_info* gStackModule;
60 
61 
62 //	#pragma mark - devices array
63 
64 
65 static tunnel_device* gDevices[10] = {};
66 static mutex gDevicesLock = MUTEX_INITIALIZER("tunnel devices");
67 
68 
69 static tunnel_device*
70 find_tunnel_device(const char* name)
71 {
72 	ASSERT_LOCKED_MUTEX(&gDevicesLock);
73 	for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
74 		if (gDevices[i] == NULL)
75 			continue;
76 
77 		if (strcmp(gDevices[i]->name, name) == 0)
78 			return gDevices[i];
79 	}
80 	return NULL;
81 }
82 
83 
84 //	#pragma mark - devfs device
85 
86 
87 struct tunnel_cookie {
88 	tunnel_device*	device;
89 	uint32		flags;
90 };
91 
92 
93 status_t
94 tunnel_open(const char* name, uint32 flags, void** _cookie)
95 {
96 	MutexLocker devicesLocker(gDevicesLock);
97 	tunnel_device* device = find_tunnel_device(name);
98 	if (device == NULL)
99 		return ENODEV;
100 	if (atomic_or(&device->open_count, 1) != 0)
101 		return EBUSY;
102 
103 	tunnel_cookie* cookie = new(std::nothrow) tunnel_cookie;
104 	if (cookie == NULL)
105 		return B_NO_MEMORY;
106 
107 	cookie->device = device;
108 	cookie->flags = flags;
109 
110 	*_cookie = cookie;
111 	return B_OK;
112 }
113 
114 
115 status_t
116 tunnel_close(void* _cookie)
117 {
118 	tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
119 
120 	// Wake up the send queue, so that any threads waiting to read return at once.
121 	release_sem_etc(cookie->device->send_queue.notify, B_INTERRUPTED, B_RELEASE_ALL);
122 
123 	return B_OK;
124 }
125 
126 
127 status_t
128 tunnel_free(void* _cookie)
129 {
130 	tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
131 	atomic_and(&cookie->device->open_count, 0);
132 	delete cookie;
133 	return B_OK;
134 }
135 
136 
137 status_t
138 tunnel_control(void* _cookie, uint32 op, void* data, size_t len)
139 {
140 	tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
141 
142 	switch (op) {
143 		case B_SET_NONBLOCKING_IO:
144 			cookie->flags |= O_NONBLOCK;
145 			return B_OK;
146 		case B_SET_BLOCKING_IO:
147 			cookie->flags &= ~O_NONBLOCK;
148 			return B_OK;
149 	}
150 
151 	return B_DEV_INVALID_IOCTL;
152 }
153 
154 
155 status_t
156 tunnel_read(void* _cookie, off_t position, void* data, size_t* _length)
157 {
158 	tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
159 
160 	net_buffer* buffer = NULL;
161 	status_t status = gStackModule->fifo_dequeue_buffer(
162 		&cookie->device->send_queue, 0, B_INFINITE_TIMEOUT, &buffer);
163 	if (status != B_OK)
164 		return status;
165 
166 	size_t offset = 0;
167 	if (!cookie->device->is_tap) {
168 		// TUN: Skip ethernet header.
169 		offset = ETHER_HEADER_LENGTH;
170 	}
171 
172 	const size_t length = min_c(*_length, buffer->size - offset);
173 	status = gBufferModule->read(buffer, offset, data, length);
174 	if (status != B_OK)
175 		return status;
176 	*_length = length;
177 
178 	gBufferModule->free(buffer);
179 	return B_OK;
180 }
181 
182 
183 status_t
184 tunnel_write(void* _cookie, off_t position, const void* data, size_t* _length)
185 {
186 	tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
187 
188 	net_buffer* buffer = gBufferModule->create(256);
189 	if (buffer == NULL)
190 		return B_NO_MEMORY;
191 
192 	status_t status = gBufferModule->append(buffer, data, *_length);
193 	if (status != B_OK) {
194 		gBufferModule->free(buffer);
195 		return status;
196 	}
197 
198 	if (!cookie->device->is_tap) {
199 		// TUN: Detect packet type and prepend ethernet header.
200 		uint8 version;
201 		status = gBufferModule->read(buffer, 0, &version, 1);
202 		if (status != B_OK) {
203 			gBufferModule->free(buffer);
204 			return status;
205 		}
206 
207 		version = (version & 0xF0) >> 4;
208 		if (version != 4 && version != 6) {
209 			// Not any IP packet we recognize.
210 			gBufferModule->free(buffer);
211 			return B_BAD_DATA;
212 		}
213 		buffer->type = (version == 6) ? B_NET_FRAME_TYPE_IPV6
214 			: B_NET_FRAME_TYPE_IPV4;
215 
216 		NetBufferPrepend<ether_header> bufferHeader(buffer);
217 		if (bufferHeader.Status() != B_OK) {
218 			gBufferModule->free(buffer);
219 			return bufferHeader.Status();
220 		}
221 
222 		ether_header &header = bufferHeader.Data();
223 		header.type = (version == 6) ? htons(ETHER_TYPE_IPV6)
224 			: htons(ETHER_TYPE_IP);
225 
226 		memset(header.source, 0, ETHER_ADDRESS_LENGTH);
227 		memset(header.destination, 0, ETHER_ADDRESS_LENGTH);
228 		bufferHeader.Sync();
229 
230 		// At least sdl_type and sdl_e_type must be set.
231 		struct sockaddr_dl& src = *(struct sockaddr_dl*)buffer->source;
232 		struct sockaddr_dl& dst = *(struct sockaddr_dl*)buffer->destination;
233 		src.sdl_len		= dst.sdl_len		= sizeof(sockaddr_dl);
234 		src.sdl_family	= dst.sdl_family	= AF_LINK;
235 		src.sdl_index	= dst.sdl_index		= cookie->device->index;
236 		src.sdl_type	= dst.sdl_type		= IFT_ETHER;
237 		src.sdl_e_type	= dst.sdl_e_type	= header.type;
238 		src.sdl_nlen	= src.sdl_slen = dst.sdl_nlen = dst.sdl_slen = 0;
239 		src.sdl_alen	= dst.sdl_alen = 0;
240 	}
241 
242 	// We use a queue and the receive_data() hook instead of device_enqueue_buffer()
243 	// for two reasons: 1. listeners (e.g. packet capture) are only processed by the
244 	// reader thread that calls receive_data(), and 2. device_enqueue_buffer() has
245 	// to look up the device interface every time, which is inefficient.
246 	status = gStackModule->fifo_enqueue_buffer(&cookie->device->receive_queue, buffer);
247 	if (status != B_OK)
248 		gBufferModule->free(buffer);
249 
250 	if (status == B_OK) {
251 		atomic_add((int32*)&cookie->device->stats.receive.packets, 1);
252 		atomic_add64((int64*)&cookie->device->stats.receive.bytes, buffer->size);
253 	} else {
254 		atomic_add((int32*)&cookie->device->stats.receive.errors, 1);
255 	}
256 
257 	return status;
258 }
259 
260 
261 status_t
262 tunnel_select(void* _cookie, uint8 event, uint32 ref, selectsync* sync)
263 {
264 	tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
265 
266 	if (event != B_SELECT_READ && event != B_SELECT_WRITE)
267 		return B_BAD_VALUE;
268 
269 	MutexLocker selectLocker(cookie->device->select_lock);
270 	status_t status = add_select_sync_pool_entry(&cookie->device->select_pool, sync, event);
271 	if (status != B_OK)
272 		return B_BAD_VALUE;
273 	selectLocker.Unlock();
274 
275 	MutexLocker fifoLocker(cookie->device->send_queue.lock);
276 	if (event == B_SELECT_READ && cookie->device->send_queue.current_bytes != 0)
277 		notify_select_event(sync, event);
278 	if (event == B_SELECT_WRITE)
279 		notify_select_event(sync, event);
280 
281 	return B_OK;
282 }
283 
284 
285 status_t
286 tunnel_deselect(void* _cookie, uint8 event, selectsync* sync)
287 {
288 	tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
289 
290 	MutexLocker selectLocker(cookie->device->select_lock);
291 	if (event != B_SELECT_READ && event != B_SELECT_WRITE)
292 		return B_BAD_VALUE;
293 	return remove_select_sync_pool_entry(&cookie->device->select_pool, sync, event);
294 }
295 
296 
297 static device_hooks sDeviceHooks = {
298 	tunnel_open,
299 	tunnel_close,
300 	tunnel_free,
301 	tunnel_control,
302 	tunnel_read,
303 	tunnel_write,
304 	tunnel_select,
305 	tunnel_deselect,
306 };
307 
308 
309 //	#pragma mark - network stack device
310 
311 
312 status_t
313 tunnel_init(const char* name, net_device** _device)
314 {
315 	const bool isTAP = strncmp(name, "tap/", 4) == 0;
316 	if (!isTAP && strncmp(name, "tun/", 4) != 0)
317 		return B_BAD_VALUE;
318 	if (strlen(name) >= sizeof(tunnel_device::name))
319 		return ENAMETOOLONG;
320 
321 	// Make sure this device doesn't already exist.
322 	MutexLocker devicesLocker(gDevicesLock);
323 	if (find_tunnel_device(name) != NULL)
324 		return EEXIST;
325 
326 	tunnel_device* device = new(std::nothrow) tunnel_device;
327 	if (device == NULL)
328 		return B_NO_MEMORY;
329 
330 	ssize_t index = -1;
331 	for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
332 		if (gDevices[i] != NULL)
333 			continue;
334 
335 		gDevices[i] = device;
336 		index = i;
337 		break;
338 	}
339 	if (index < 0) {
340 		delete device;
341 		return ENOSPC;
342 	}
343 	devicesLocker.Unlock();
344 
345 	memset(device, 0, sizeof(tunnel_device));
346 	strcpy(device->name, name);
347 
348 	device->mtu = ETHER_MAX_FRAME_SIZE;
349 	device->media = IFM_ACTIVE;
350 
351 	device->is_tap = isTAP;
352 	if (device->is_tap) {
353 		device->flags = IFF_BROADCAST | IFF_ALLMULTI | IFF_LINK;
354 		device->type = IFT_ETHER;
355 
356 		// Generate a random MAC address.
357 		for (int i = 0; i < ETHER_ADDRESS_LENGTH; i++)
358 			device->address.data[i] = secure_get_random<uint8>();
359 		device->address.data[0] &= 0xFE; // multicast
360 		device->address.data[0] |= 0x02; // local assignment
361 
362 		device->address.length = ETHER_ADDRESS_LENGTH;
363 	} else {
364 		device->flags = IFF_POINTOPOINT | IFF_LINK;
365 		device->type = IFT_TUNNEL;
366 	}
367 
368 	status_t status = gStackModule->init_fifo(&device->send_queue,
369 		"tunnel send queue", TUNNEL_QUEUE_MAX);
370 	if (status != B_OK) {
371 		delete device;
372 		return status;
373 	}
374 
375 	status = gStackModule->init_fifo(&device->receive_queue,
376 		"tunnel receive queue", TUNNEL_QUEUE_MAX);
377 	if (status != B_OK) {
378 		gStackModule->uninit_fifo(&device->send_queue);
379 		delete device;
380 		return status;
381 	}
382 
383 	mutex_init(&device->select_lock, "tunnel select lock");
384 
385 	status = devfs_publish_device(name, &sDeviceHooks);
386 	if (status != B_OK) {
387 		gStackModule->uninit_fifo(&device->send_queue);
388 		gStackModule->uninit_fifo(&device->receive_queue);
389 		delete device;
390 		return status;
391 	}
392 
393 	*_device = device;
394 	return B_OK;
395 }
396 
397 
398 status_t
399 tunnel_uninit(net_device* _device)
400 {
401 	tunnel_device* device = (tunnel_device*)_device;
402 
403 	MutexLocker devicesLocker(gDevicesLock);
404 	if (atomic_get(&device->open_count) != 0)
405 		return EBUSY;
406 
407 	for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
408 		if (gDevices[i] != device)
409 			continue;
410 
411 		gDevices[i] = NULL;
412 		break;
413 	}
414 	status_t status = devfs_unpublish_device(device->name, false);
415 	if (status != B_OK)
416 		panic("devfs_unpublish_device failed: %" B_PRId32, status);
417 
418 	gStackModule->uninit_fifo(&device->send_queue);
419 	gStackModule->uninit_fifo(&device->receive_queue);
420 	mutex_destroy(&device->select_lock);
421 	delete device;
422 	return B_OK;
423 }
424 
425 
426 status_t
427 tunnel_up(net_device* _device)
428 {
429 	return B_OK;
430 }
431 
432 
433 void
434 tunnel_down(net_device* _device)
435 {
436 	tunnel_device* device = (tunnel_device*)_device;
437 
438 	// Wake up the receive queue, so that the reader thread returns at once.
439 	release_sem_etc(device->receive_queue.notify, B_INTERRUPTED, B_RELEASE_ALL);
440 }
441 
442 
443 status_t
444 tunnel_control(net_device* device, int32 op, void* argument, size_t length)
445 {
446 	return B_BAD_VALUE;
447 }
448 
449 
450 status_t
451 tunnel_send_data(net_device* _device, net_buffer* buffer)
452 {
453 	tunnel_device* device = (tunnel_device*)_device;
454 
455 	status_t status = B_OK;
456 	if (!device->is_tap) {
457 		// Ensure this is an IP frame.
458 		uint16 type;
459 		status = gBufferModule->read(buffer, offsetof(ether_header, type),
460 			&type, sizeof(type));
461 		if (status != B_OK)
462 			return status;
463 
464 		if (type != htons(ETHER_TYPE_IP) && type != htons(ETHER_TYPE_IPV6))
465 			return B_BAD_DATA;
466 	}
467 
468 	status = gStackModule->fifo_enqueue_buffer(
469 		&device->send_queue, buffer);
470 	if (status == B_OK) {
471 		atomic_add((int32*)&device->stats.send.packets, 1);
472 		atomic_add64((int64*)&device->stats.send.bytes, buffer->size);
473 	} else {
474 		atomic_add((int32*)&device->stats.send.errors, 1);
475 	}
476 
477 	MutexLocker selectLocker(device->select_lock);
478 	notify_select_event_pool(device->select_pool, B_SELECT_READ);
479 	return status;
480 }
481 
482 
483 status_t
484 tunnel_receive_data(net_device* _device, net_buffer** _buffer)
485 {
486 	tunnel_device* device = (tunnel_device*)_device;
487 	return gStackModule->fifo_dequeue_buffer(&device->receive_queue,
488 		0, B_INFINITE_TIMEOUT, _buffer);
489 }
490 
491 
492 status_t
493 tunnel_set_mtu(net_device* device, size_t mtu)
494 {
495 	if (mtu > 65536 || mtu < 16)
496 		return B_BAD_VALUE;
497 
498 	device->mtu = mtu;
499 	return B_OK;
500 }
501 
502 
503 status_t
504 tunnel_set_promiscuous(net_device* device, bool promiscuous)
505 {
506 	return EOPNOTSUPP;
507 }
508 
509 
510 status_t
511 tunnel_set_media(net_device* device, uint32 media)
512 {
513 	return EOPNOTSUPP;
514 }
515 
516 
517 status_t
518 tunnel_add_multicast(net_device* device, const sockaddr* address)
519 {
520 	return B_OK;
521 }
522 
523 
524 status_t
525 tunnel_remove_multicast(net_device* device, const sockaddr* address)
526 {
527 	return B_OK;
528 }
529 
530 
531 net_device_module_info sTunModule = {
532 	{
533 		"network/devices/tunnel/v1",
534 		0,
535 		NULL
536 	},
537 	tunnel_init,
538 	tunnel_uninit,
539 	tunnel_up,
540 	tunnel_down,
541 	tunnel_control,
542 	tunnel_send_data,
543 	tunnel_receive_data,
544 	tunnel_set_mtu,
545 	tunnel_set_promiscuous,
546 	tunnel_set_media,
547 	tunnel_add_multicast,
548 	tunnel_remove_multicast,
549 };
550 
551 module_dependency module_dependencies[] = {
552 	{NET_STACK_MODULE_NAME, (module_info**)&gStackModule},
553 	{NET_BUFFER_MODULE_NAME, (module_info**)&gBufferModule},
554 	{}
555 };
556 
557 module_info* modules[] = {
558 	(module_info*)&sTunModule,
559 	NULL
560 };
561