1 /*
2 * Copyright 2023, Haiku, Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 * Augustin Cavalier <waddlesplash>
7 * Axel Dörfler, axeld@pinc-software.de
8 * Sean Brady, swangeon@gmail.com
9 */
10
11 #include <new>
12 #include <string.h>
13
14 #include <fs/select_sync_pool.h>
15 #include <fs/devfs.h>
16 #include <util/AutoLock.h>
17 #include <util/Random.h>
18
19 #include <net_buffer.h>
20 #include <net_device.h>
21 #include <net_stack.h>
22
23 #include <net/if.h>
24 #include <net/if_dl.h>
25 #include <net/if_media.h>
26 #include <net/if_types.h>
27 #include <net/if_tun.h>
28 #include <netinet/in.h>
29 #include <ethernet.h>
30
31
32 //#define TRACE_TUNNEL
33 #ifdef TRACE_TUNNEL
34 # define TRACE(x...) dprintf("network/tunnel: " x)
35 #else
36 # define TRACE(x...)
37 #endif
38
39 #define CALLED(x...) TRACE("CALLED %s\n", __PRETTY_FUNCTION__)
40 #define TRACE_ALWAYS(x...) dprintf("network/tunnel: " x)
41
42
43 struct tunnel_device : net_device {
44 bool is_tap;
45
46 net_fifo send_queue, receive_queue;
47
48 int32 open_count;
49
50 mutex select_lock;
51 select_sync_pool* select_pool;
52 };
53
54 #define TUNNEL_QUEUE_MAX (ETHER_MAX_FRAME_SIZE * 32)
55
56
57 struct net_buffer_module_info* gBufferModule;
58 static net_stack_module_info* gStackModule;
59
60
61 // #pragma mark - devices array
62
63
64 static tunnel_device* gDevices[10] = {};
65 static mutex gDevicesLock = MUTEX_INITIALIZER("tunnel devices");
66
67
68 static tunnel_device*
find_tunnel_device(const char * name)69 find_tunnel_device(const char* name)
70 {
71 ASSERT_LOCKED_MUTEX(&gDevicesLock);
72 for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
73 if (gDevices[i] == NULL)
74 continue;
75
76 if (strcmp(gDevices[i]->name, name) == 0)
77 return gDevices[i];
78 }
79 return NULL;
80 }
81
82
83 // #pragma mark - devfs device
84
85
86 struct tunnel_cookie {
87 tunnel_device* device;
88 uint32 flags;
89 };
90
91
92 status_t
tunnel_open(const char * name,uint32 flags,void ** _cookie)93 tunnel_open(const char* name, uint32 flags, void** _cookie)
94 {
95 MutexLocker devicesLocker(gDevicesLock);
96 tunnel_device* device = find_tunnel_device(name);
97 if (device == NULL)
98 return ENODEV;
99 if (atomic_or(&device->open_count, 1) != 0)
100 return EBUSY;
101
102 tunnel_cookie* cookie = new(std::nothrow) tunnel_cookie;
103 if (cookie == NULL)
104 return B_NO_MEMORY;
105
106 cookie->device = device;
107 cookie->flags = flags;
108
109 *_cookie = cookie;
110 return B_OK;
111 }
112
113
114 status_t
tunnel_close(void * _cookie)115 tunnel_close(void* _cookie)
116 {
117 tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
118
119 // Wake up the send queue, so that any threads waiting to read return at once.
120 release_sem_etc(cookie->device->send_queue.notify, B_INTERRUPTED, B_RELEASE_ALL);
121
122 return B_OK;
123 }
124
125
126 status_t
tunnel_free(void * _cookie)127 tunnel_free(void* _cookie)
128 {
129 tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
130 atomic_and(&cookie->device->open_count, 0);
131 delete cookie;
132 return B_OK;
133 }
134
135
136 status_t
tunnel_control(void * _cookie,uint32 op,void * data,size_t len)137 tunnel_control(void* _cookie, uint32 op, void* data, size_t len)
138 {
139 tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
140
141 switch (op) {
142 case B_SET_NONBLOCKING_IO:
143 cookie->flags |= O_NONBLOCK;
144 return B_OK;
145 case B_SET_BLOCKING_IO:
146 cookie->flags &= ~O_NONBLOCK;
147 return B_OK;
148 }
149
150 return B_DEV_INVALID_IOCTL;
151 }
152
153
154 status_t
tunnel_read(void * _cookie,off_t position,void * data,size_t * _length)155 tunnel_read(void* _cookie, off_t position, void* data, size_t* _length)
156 {
157 tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
158
159 net_buffer* buffer = NULL;
160 status_t status = gStackModule->fifo_dequeue_buffer(
161 &cookie->device->send_queue, 0, B_INFINITE_TIMEOUT, &buffer);
162 if (status != B_OK)
163 return status;
164
165 const size_t length = min_c(*_length, buffer->size);
166 status = gBufferModule->read(buffer, 0, data, length);
167 if (status != B_OK)
168 return status;
169 *_length = length;
170
171 gBufferModule->free(buffer);
172 return B_OK;
173 }
174
175
176 status_t
tunnel_write(void * _cookie,off_t position,const void * data,size_t * _length)177 tunnel_write(void* _cookie, off_t position, const void* data, size_t* _length)
178 {
179 tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
180
181 net_buffer* buffer = gBufferModule->create(256);
182 if (buffer == NULL)
183 return B_NO_MEMORY;
184
185 status_t status = gBufferModule->append(buffer, data, *_length);
186 if (status != B_OK) {
187 gBufferModule->free(buffer);
188 return status;
189 }
190
191 if (!cookie->device->is_tap) {
192 // TUN: Detect packet type.
193 uint8 version;
194 status = gBufferModule->read(buffer, 0, &version, 1);
195 if (status != B_OK) {
196 gBufferModule->free(buffer);
197 return status;
198 }
199
200 version = (version & 0xF0) >> 4;
201 if (version != 4 && version != 6) {
202 // Not any IP packet we recognize.
203 gBufferModule->free(buffer);
204 return B_BAD_DATA;
205 }
206 buffer->type = (version == 6) ? B_NET_FRAME_TYPE_IPV6
207 : B_NET_FRAME_TYPE_IPV4;
208
209 struct sockaddr_in& src = *(struct sockaddr_in*)buffer->source;
210 struct sockaddr_in& dst = *(struct sockaddr_in*)buffer->destination;
211 src.sin_len = dst.sin_len = sizeof(sockaddr_in);
212 src.sin_family = dst.sin_family = (version == 6) ? AF_INET6 : AF_INET;
213 src.sin_port = dst.sin_port = 0;
214 src.sin_addr.s_addr = dst.sin_addr.s_addr = 0;
215 }
216
217 // We use a queue and the receive_data() hook instead of device_enqueue_buffer()
218 // for two reasons: 1. listeners (e.g. packet capture) are only processed by the
219 // reader thread that calls receive_data(), and 2. device_enqueue_buffer() has
220 // to look up the device interface every time, which is inefficient.
221 status = gStackModule->fifo_enqueue_buffer(&cookie->device->receive_queue, buffer);
222 if (status != B_OK)
223 gBufferModule->free(buffer);
224
225 return status;
226 }
227
228
229 status_t
tunnel_select(void * _cookie,uint8 event,uint32 ref,selectsync * sync)230 tunnel_select(void* _cookie, uint8 event, uint32 ref, selectsync* sync)
231 {
232 tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
233
234 if (event != B_SELECT_READ && event != B_SELECT_WRITE)
235 return B_BAD_VALUE;
236
237 MutexLocker selectLocker(cookie->device->select_lock);
238 status_t status = add_select_sync_pool_entry(&cookie->device->select_pool, sync, event);
239 if (status != B_OK)
240 return B_BAD_VALUE;
241 selectLocker.Unlock();
242
243 MutexLocker fifoLocker(cookie->device->send_queue.lock);
244 if (event == B_SELECT_READ && cookie->device->send_queue.current_bytes != 0)
245 notify_select_event(sync, event);
246 if (event == B_SELECT_WRITE)
247 notify_select_event(sync, event);
248
249 return B_OK;
250 }
251
252
253 status_t
tunnel_deselect(void * _cookie,uint8 event,selectsync * sync)254 tunnel_deselect(void* _cookie, uint8 event, selectsync* sync)
255 {
256 tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
257
258 MutexLocker selectLocker(cookie->device->select_lock);
259 if (event != B_SELECT_READ && event != B_SELECT_WRITE)
260 return B_BAD_VALUE;
261 return remove_select_sync_pool_entry(&cookie->device->select_pool, sync, event);
262 }
263
264
265 static device_hooks sDeviceHooks = {
266 tunnel_open,
267 tunnel_close,
268 tunnel_free,
269 tunnel_control,
270 tunnel_read,
271 tunnel_write,
272 tunnel_select,
273 tunnel_deselect,
274 };
275
276
277 // #pragma mark - network stack device
278
279
280 status_t
tunnel_init(const char * name,net_device ** _device)281 tunnel_init(const char* name, net_device** _device)
282 {
283 const bool isTAP = strncmp(name, "tap/", 4) == 0;
284 if (!isTAP && strncmp(name, "tun/", 4) != 0)
285 return B_BAD_VALUE;
286 if (strlen(name) >= sizeof(tunnel_device::name))
287 return ENAMETOOLONG;
288
289 // Make sure this device doesn't already exist.
290 MutexLocker devicesLocker(gDevicesLock);
291 if (find_tunnel_device(name) != NULL)
292 return EEXIST;
293
294 tunnel_device* device = new(std::nothrow) tunnel_device;
295 if (device == NULL)
296 return B_NO_MEMORY;
297
298 ssize_t index = -1;
299 for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
300 if (gDevices[i] != NULL)
301 continue;
302
303 gDevices[i] = device;
304 index = i;
305 break;
306 }
307 if (index < 0) {
308 delete device;
309 return ENOSPC;
310 }
311 devicesLocker.Unlock();
312
313 memset(device, 0, sizeof(tunnel_device));
314 strcpy(device->name, name);
315
316 device->mtu = ETHER_MAX_FRAME_SIZE;
317 device->media = IFM_ACTIVE;
318
319 device->is_tap = isTAP;
320 if (device->is_tap) {
321 device->flags = IFF_BROADCAST | IFF_ALLMULTI | IFF_LINK;
322 device->type = IFT_ETHER;
323
324 // Generate a random MAC address.
325 for (int i = 0; i < ETHER_ADDRESS_LENGTH; i++)
326 device->address.data[i] = secure_get_random<uint8>();
327 device->address.data[0] &= 0xFE; // multicast
328 device->address.data[0] |= 0x02; // local assignment
329
330 device->address.length = ETHER_ADDRESS_LENGTH;
331 } else {
332 device->flags = IFF_POINTOPOINT | IFF_LINK;
333 device->type = IFT_TUNNEL;
334 }
335
336 status_t status = gStackModule->init_fifo(&device->send_queue,
337 "tunnel send queue", TUNNEL_QUEUE_MAX);
338 if (status != B_OK) {
339 delete device;
340 return status;
341 }
342
343 status = gStackModule->init_fifo(&device->receive_queue,
344 "tunnel receive queue", TUNNEL_QUEUE_MAX);
345 if (status != B_OK) {
346 gStackModule->uninit_fifo(&device->send_queue);
347 delete device;
348 return status;
349 }
350
351 mutex_init(&device->select_lock, "tunnel select lock");
352
353 status = devfs_publish_device(name, &sDeviceHooks);
354 if (status != B_OK) {
355 gStackModule->uninit_fifo(&device->send_queue);
356 gStackModule->uninit_fifo(&device->receive_queue);
357 delete device;
358 return status;
359 }
360
361 *_device = device;
362 return B_OK;
363 }
364
365
366 status_t
tunnel_uninit(net_device * _device)367 tunnel_uninit(net_device* _device)
368 {
369 tunnel_device* device = (tunnel_device*)_device;
370
371 MutexLocker devicesLocker(gDevicesLock);
372 if (atomic_get(&device->open_count) != 0)
373 return EBUSY;
374
375 for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
376 if (gDevices[i] != device)
377 continue;
378
379 gDevices[i] = NULL;
380 break;
381 }
382 status_t status = devfs_unpublish_device(device->name, false);
383 if (status != B_OK)
384 panic("devfs_unpublish_device failed: %" B_PRId32, status);
385
386 gStackModule->uninit_fifo(&device->send_queue);
387 gStackModule->uninit_fifo(&device->receive_queue);
388 mutex_destroy(&device->select_lock);
389 delete device;
390 return B_OK;
391 }
392
393
394 status_t
tunnel_up(net_device * _device)395 tunnel_up(net_device* _device)
396 {
397 return B_OK;
398 }
399
400
401 void
tunnel_down(net_device * _device)402 tunnel_down(net_device* _device)
403 {
404 tunnel_device* device = (tunnel_device*)_device;
405
406 // Wake up the receive queue, so that the reader thread returns at once.
407 release_sem_etc(device->receive_queue.notify, B_INTERRUPTED, B_RELEASE_ALL);
408 }
409
410
411 status_t
tunnel_control(net_device * device,int32 op,void * argument,size_t length)412 tunnel_control(net_device* device, int32 op, void* argument, size_t length)
413 {
414 return B_BAD_VALUE;
415 }
416
417
418 status_t
tunnel_send_data(net_device * _device,net_buffer * buffer)419 tunnel_send_data(net_device* _device, net_buffer* buffer)
420 {
421 tunnel_device* device = (tunnel_device*)_device;
422
423 status_t status = B_OK;
424 if (!device->is_tap) {
425 // Ensure this is an IP frame.
426 struct sockaddr_in& dst = *(struct sockaddr_in*)buffer->destination;
427 if (dst.sin_family != AF_INET && dst.sin_family != AF_INET6)
428 return B_BAD_DATA;
429 }
430
431 status = gStackModule->fifo_enqueue_buffer(
432 &device->send_queue, buffer);
433 if (status == B_OK) {
434 MutexLocker selectLocker(device->select_lock);
435 notify_select_event_pool(device->select_pool, B_SELECT_READ);
436 }
437
438 return status;
439 }
440
441
442 status_t
tunnel_receive_data(net_device * _device,net_buffer ** _buffer)443 tunnel_receive_data(net_device* _device, net_buffer** _buffer)
444 {
445 tunnel_device* device = (tunnel_device*)_device;
446 return gStackModule->fifo_dequeue_buffer(&device->receive_queue,
447 0, B_INFINITE_TIMEOUT, _buffer);
448 }
449
450
451 status_t
tunnel_set_mtu(net_device * device,size_t mtu)452 tunnel_set_mtu(net_device* device, size_t mtu)
453 {
454 if (mtu > 65536 || mtu < 16)
455 return B_BAD_VALUE;
456
457 device->mtu = mtu;
458 return B_OK;
459 }
460
461
462 status_t
tunnel_set_promiscuous(net_device * device,bool promiscuous)463 tunnel_set_promiscuous(net_device* device, bool promiscuous)
464 {
465 return EOPNOTSUPP;
466 }
467
468
469 status_t
tunnel_set_media(net_device * device,uint32 media)470 tunnel_set_media(net_device* device, uint32 media)
471 {
472 return EOPNOTSUPP;
473 }
474
475
476 status_t
tunnel_add_multicast(net_device * device,const sockaddr * address)477 tunnel_add_multicast(net_device* device, const sockaddr* address)
478 {
479 return B_OK;
480 }
481
482
483 status_t
tunnel_remove_multicast(net_device * device,const sockaddr * address)484 tunnel_remove_multicast(net_device* device, const sockaddr* address)
485 {
486 return B_OK;
487 }
488
489
490 net_device_module_info sTunModule = {
491 {
492 "network/devices/tunnel/v1",
493 0,
494 NULL
495 },
496 tunnel_init,
497 tunnel_uninit,
498 tunnel_up,
499 tunnel_down,
500 tunnel_control,
501 tunnel_send_data,
502 tunnel_receive_data,
503 tunnel_set_mtu,
504 tunnel_set_promiscuous,
505 tunnel_set_media,
506 tunnel_add_multicast,
507 tunnel_remove_multicast,
508 };
509
510 module_dependency module_dependencies[] = {
511 {NET_STACK_MODULE_NAME, (module_info**)&gStackModule},
512 {NET_BUFFER_MODULE_NAME, (module_info**)&gBufferModule},
513 {}
514 };
515
516 module_info* modules[] = {
517 (module_info*)&sTunModule,
518 NULL
519 };
520