1 /* 2 * Copyright 2023, Haiku, Inc. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Augustin Cavalier <waddlesplash> 7 * Axel Dörfler, axeld@pinc-software.de 8 * Sean Brady, swangeon@gmail.com 9 */ 10 11 #include <new> 12 #include <string.h> 13 14 #include <fs/select_sync_pool.h> 15 #include <fs/devfs.h> 16 #include <util/AutoLock.h> 17 #include <util/Random.h> 18 19 #include <net_buffer.h> 20 #include <net_device.h> 21 #include <net_stack.h> 22 #include <NetBufferUtilities.h> 23 24 #include <net/if.h> 25 #include <net/if_dl.h> 26 #include <net/if_media.h> 27 #include <net/if_types.h> 28 #include <net/if_tun.h> 29 #include <netinet/in.h> 30 #include <ethernet.h> 31 32 33 //#define TRACE_TUNNEL 34 #ifdef TRACE_TUNNEL 35 # define TRACE(x...) dprintf("network/tunnel: " x) 36 #else 37 # define TRACE(x...) 38 #endif 39 40 #define CALLED(x...) TRACE("CALLED %s\n", __PRETTY_FUNCTION__) 41 #define TRACE_ALWAYS(x...) dprintf("network/tunnel: " x) 42 43 44 struct tunnel_device : net_device { 45 bool is_tap; 46 47 net_fifo send_queue, receive_queue; 48 49 int32 open_count; 50 51 mutex select_lock; 52 select_sync_pool* select_pool; 53 }; 54 55 #define TUNNEL_QUEUE_MAX (ETHER_MAX_FRAME_SIZE * 32) 56 57 58 struct net_buffer_module_info* gBufferModule; 59 static net_stack_module_info* gStackModule; 60 61 62 // #pragma mark - devices array 63 64 65 static tunnel_device* gDevices[10] = {}; 66 static mutex gDevicesLock = MUTEX_INITIALIZER("tunnel devices"); 67 68 69 static tunnel_device* 70 find_tunnel_device(const char* name) 71 { 72 ASSERT_LOCKED_MUTEX(&gDevicesLock); 73 for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) { 74 if (gDevices[i] == NULL) 75 continue; 76 77 if (strcmp(gDevices[i]->name, name) == 0) 78 return gDevices[i]; 79 } 80 return NULL; 81 } 82 83 84 // #pragma mark - devfs device 85 86 87 struct tunnel_cookie { 88 tunnel_device* device; 89 uint32 flags; 90 }; 91 92 93 status_t 94 tunnel_open(const char* name, uint32 flags, void** _cookie) 95 { 96 MutexLocker devicesLocker(gDevicesLock); 97 tunnel_device* device = find_tunnel_device(name); 98 if (device == NULL) 99 return ENODEV; 100 if (atomic_or(&device->open_count, 1) != 0) 101 return EBUSY; 102 103 tunnel_cookie* cookie = new(std::nothrow) tunnel_cookie; 104 if (cookie == NULL) 105 return B_NO_MEMORY; 106 107 cookie->device = device; 108 cookie->flags = flags; 109 110 *_cookie = cookie; 111 return B_OK; 112 } 113 114 115 status_t 116 tunnel_close(void* _cookie) 117 { 118 tunnel_cookie* cookie = (tunnel_cookie*)_cookie; 119 120 // Wake up the send queue, so that any threads waiting to read return at once. 121 release_sem_etc(cookie->device->send_queue.notify, B_INTERRUPTED, B_RELEASE_ALL); 122 123 return B_OK; 124 } 125 126 127 status_t 128 tunnel_free(void* _cookie) 129 { 130 tunnel_cookie* cookie = (tunnel_cookie*)_cookie; 131 atomic_and(&cookie->device->open_count, 0); 132 delete cookie; 133 return B_OK; 134 } 135 136 137 status_t 138 tunnel_control(void* _cookie, uint32 op, void* data, size_t len) 139 { 140 tunnel_cookie* cookie = (tunnel_cookie*)_cookie; 141 142 switch (op) { 143 case B_SET_NONBLOCKING_IO: 144 cookie->flags |= O_NONBLOCK; 145 return B_OK; 146 case B_SET_BLOCKING_IO: 147 cookie->flags &= ~O_NONBLOCK; 148 return B_OK; 149 } 150 151 return B_DEV_INVALID_IOCTL; 152 } 153 154 155 status_t 156 tunnel_read(void* _cookie, off_t position, void* data, size_t* _length) 157 { 158 tunnel_cookie* cookie = (tunnel_cookie*)_cookie; 159 160 net_buffer* buffer = NULL; 161 status_t status = gStackModule->fifo_dequeue_buffer( 162 &cookie->device->send_queue, 0, B_INFINITE_TIMEOUT, &buffer); 163 if (status != B_OK) 164 return status; 165 166 size_t offset = 0; 167 if (!cookie->device->is_tap) { 168 // TUN: Skip ethernet header. 169 offset = ETHER_HEADER_LENGTH; 170 } 171 172 const size_t length = min_c(*_length, buffer->size - offset); 173 status = gBufferModule->read(buffer, offset, data, length); 174 if (status != B_OK) 175 return status; 176 *_length = length; 177 178 gBufferModule->free(buffer); 179 return B_OK; 180 } 181 182 183 status_t 184 tunnel_write(void* _cookie, off_t position, const void* data, size_t* _length) 185 { 186 tunnel_cookie* cookie = (tunnel_cookie*)_cookie; 187 188 net_buffer* buffer = gBufferModule->create(256); 189 if (buffer == NULL) 190 return B_NO_MEMORY; 191 192 status_t status = gBufferModule->append(buffer, data, *_length); 193 if (status != B_OK) { 194 gBufferModule->free(buffer); 195 return status; 196 } 197 198 if (!cookie->device->is_tap) { 199 // TUN: Detect packet type and prepend ethernet header. 200 uint8 version; 201 status = gBufferModule->read(buffer, 0, &version, 1); 202 if (status != B_OK) { 203 gBufferModule->free(buffer); 204 return status; 205 } 206 207 version = (version & 0xF0) >> 4; 208 if (version != 4 && version != 6) { 209 // Not any IP packet we recognize. 210 gBufferModule->free(buffer); 211 return B_BAD_DATA; 212 } 213 buffer->type = (version == 6) ? B_NET_FRAME_TYPE_IPV6 214 : B_NET_FRAME_TYPE_IPV4; 215 216 NetBufferPrepend<ether_header> bufferHeader(buffer); 217 if (bufferHeader.Status() != B_OK) { 218 gBufferModule->free(buffer); 219 return bufferHeader.Status(); 220 } 221 222 ether_header &header = bufferHeader.Data(); 223 header.type = (version == 6) ? htons(ETHER_TYPE_IPV6) 224 : htons(ETHER_TYPE_IP); 225 226 memset(header.source, 0, ETHER_ADDRESS_LENGTH); 227 memset(header.destination, 0, ETHER_ADDRESS_LENGTH); 228 bufferHeader.Sync(); 229 230 // At least sdl_type and sdl_e_type must be set. 231 struct sockaddr_dl& src = *(struct sockaddr_dl*)buffer->source; 232 struct sockaddr_dl& dst = *(struct sockaddr_dl*)buffer->destination; 233 src.sdl_len = dst.sdl_len = sizeof(sockaddr_dl); 234 src.sdl_family = dst.sdl_family = AF_LINK; 235 src.sdl_index = dst.sdl_index = cookie->device->index; 236 src.sdl_type = dst.sdl_type = IFT_ETHER; 237 src.sdl_e_type = dst.sdl_e_type = header.type; 238 src.sdl_nlen = src.sdl_slen = dst.sdl_nlen = dst.sdl_slen = 0; 239 src.sdl_alen = dst.sdl_alen = 0; 240 } 241 242 // We use a queue and the receive_data() hook instead of device_enqueue_buffer() 243 // for two reasons: 1. listeners (e.g. packet capture) are only processed by the 244 // reader thread that calls receive_data(), and 2. device_enqueue_buffer() has 245 // to look up the device interface every time, which is inefficient. 246 status = gStackModule->fifo_enqueue_buffer(&cookie->device->receive_queue, buffer); 247 if (status != B_OK) 248 gBufferModule->free(buffer); 249 250 return status; 251 } 252 253 254 status_t 255 tunnel_select(void* _cookie, uint8 event, uint32 ref, selectsync* sync) 256 { 257 tunnel_cookie* cookie = (tunnel_cookie*)_cookie; 258 259 if (event != B_SELECT_READ && event != B_SELECT_WRITE) 260 return B_BAD_VALUE; 261 262 MutexLocker selectLocker(cookie->device->select_lock); 263 status_t status = add_select_sync_pool_entry(&cookie->device->select_pool, sync, event); 264 if (status != B_OK) 265 return B_BAD_VALUE; 266 selectLocker.Unlock(); 267 268 MutexLocker fifoLocker(cookie->device->send_queue.lock); 269 if (event == B_SELECT_READ && cookie->device->send_queue.current_bytes != 0) 270 notify_select_event(sync, event); 271 if (event == B_SELECT_WRITE) 272 notify_select_event(sync, event); 273 274 return B_OK; 275 } 276 277 278 status_t 279 tunnel_deselect(void* _cookie, uint8 event, selectsync* sync) 280 { 281 tunnel_cookie* cookie = (tunnel_cookie*)_cookie; 282 283 MutexLocker selectLocker(cookie->device->select_lock); 284 if (event != B_SELECT_READ && event != B_SELECT_WRITE) 285 return B_BAD_VALUE; 286 return remove_select_sync_pool_entry(&cookie->device->select_pool, sync, event); 287 } 288 289 290 static device_hooks sDeviceHooks = { 291 tunnel_open, 292 tunnel_close, 293 tunnel_free, 294 tunnel_control, 295 tunnel_read, 296 tunnel_write, 297 tunnel_select, 298 tunnel_deselect, 299 }; 300 301 302 // #pragma mark - network stack device 303 304 305 status_t 306 tunnel_init(const char* name, net_device** _device) 307 { 308 const bool isTAP = strncmp(name, "tap/", 4) == 0; 309 if (!isTAP && strncmp(name, "tun/", 4) != 0) 310 return B_BAD_VALUE; 311 if (strlen(name) >= sizeof(tunnel_device::name)) 312 return ENAMETOOLONG; 313 314 // Make sure this device doesn't already exist. 315 MutexLocker devicesLocker(gDevicesLock); 316 if (find_tunnel_device(name) != NULL) 317 return EEXIST; 318 319 tunnel_device* device = new(std::nothrow) tunnel_device; 320 if (device == NULL) 321 return B_NO_MEMORY; 322 323 ssize_t index = -1; 324 for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) { 325 if (gDevices[i] != NULL) 326 continue; 327 328 gDevices[i] = device; 329 index = i; 330 break; 331 } 332 if (index < 0) { 333 delete device; 334 return ENOSPC; 335 } 336 devicesLocker.Unlock(); 337 338 memset(device, 0, sizeof(tunnel_device)); 339 strcpy(device->name, name); 340 341 device->mtu = ETHER_MAX_FRAME_SIZE; 342 device->media = IFM_ACTIVE; 343 344 device->is_tap = isTAP; 345 if (device->is_tap) { 346 device->flags = IFF_BROADCAST | IFF_ALLMULTI | IFF_LINK; 347 device->type = IFT_ETHER; 348 349 // Generate a random MAC address. 350 for (int i = 0; i < ETHER_ADDRESS_LENGTH; i++) 351 device->address.data[i] = secure_get_random<uint8>(); 352 device->address.data[0] &= 0xFE; // multicast 353 device->address.data[0] |= 0x02; // local assignment 354 355 device->address.length = ETHER_ADDRESS_LENGTH; 356 } else { 357 device->flags = IFF_POINTOPOINT | IFF_LINK; 358 device->type = IFT_TUNNEL; 359 } 360 361 status_t status = gStackModule->init_fifo(&device->send_queue, 362 "tunnel send queue", TUNNEL_QUEUE_MAX); 363 if (status != B_OK) { 364 delete device; 365 return status; 366 } 367 368 status = gStackModule->init_fifo(&device->receive_queue, 369 "tunnel receive queue", TUNNEL_QUEUE_MAX); 370 if (status != B_OK) { 371 gStackModule->uninit_fifo(&device->send_queue); 372 delete device; 373 return status; 374 } 375 376 mutex_init(&device->select_lock, "tunnel select lock"); 377 378 status = devfs_publish_device(name, &sDeviceHooks); 379 if (status != B_OK) { 380 gStackModule->uninit_fifo(&device->send_queue); 381 gStackModule->uninit_fifo(&device->receive_queue); 382 delete device; 383 return status; 384 } 385 386 *_device = device; 387 return B_OK; 388 } 389 390 391 status_t 392 tunnel_uninit(net_device* _device) 393 { 394 tunnel_device* device = (tunnel_device*)_device; 395 396 MutexLocker devicesLocker(gDevicesLock); 397 if (atomic_get(&device->open_count) != 0) 398 return EBUSY; 399 400 for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) { 401 if (gDevices[i] != device) 402 continue; 403 404 gDevices[i] = NULL; 405 break; 406 } 407 status_t status = devfs_unpublish_device(device->name, false); 408 if (status != B_OK) 409 panic("devfs_unpublish_device failed: %" B_PRId32, status); 410 411 gStackModule->uninit_fifo(&device->send_queue); 412 gStackModule->uninit_fifo(&device->receive_queue); 413 mutex_destroy(&device->select_lock); 414 delete device; 415 return B_OK; 416 } 417 418 419 status_t 420 tunnel_up(net_device* _device) 421 { 422 return B_OK; 423 } 424 425 426 void 427 tunnel_down(net_device* _device) 428 { 429 tunnel_device* device = (tunnel_device*)_device; 430 431 // Wake up the receive queue, so that the reader thread returns at once. 432 release_sem_etc(device->receive_queue.notify, B_INTERRUPTED, B_RELEASE_ALL); 433 } 434 435 436 status_t 437 tunnel_control(net_device* device, int32 op, void* argument, size_t length) 438 { 439 return B_BAD_VALUE; 440 } 441 442 443 status_t 444 tunnel_send_data(net_device* _device, net_buffer* buffer) 445 { 446 tunnel_device* device = (tunnel_device*)_device; 447 448 status_t status = B_OK; 449 if (!device->is_tap) { 450 // Ensure this is an IP frame. 451 uint16 type; 452 status = gBufferModule->read(buffer, offsetof(ether_header, type), 453 &type, sizeof(type)); 454 if (status != B_OK) 455 return status; 456 457 if (type != htons(ETHER_TYPE_IP) && type != htons(ETHER_TYPE_IPV6)) 458 return B_BAD_DATA; 459 } 460 461 status = gStackModule->fifo_enqueue_buffer( 462 &device->send_queue, buffer); 463 if (status == B_OK) { 464 MutexLocker selectLocker(device->select_lock); 465 notify_select_event_pool(device->select_pool, B_SELECT_READ); 466 } 467 468 return status; 469 } 470 471 472 status_t 473 tunnel_receive_data(net_device* _device, net_buffer** _buffer) 474 { 475 tunnel_device* device = (tunnel_device*)_device; 476 return gStackModule->fifo_dequeue_buffer(&device->receive_queue, 477 0, B_INFINITE_TIMEOUT, _buffer); 478 } 479 480 481 status_t 482 tunnel_set_mtu(net_device* device, size_t mtu) 483 { 484 if (mtu > 65536 || mtu < 16) 485 return B_BAD_VALUE; 486 487 device->mtu = mtu; 488 return B_OK; 489 } 490 491 492 status_t 493 tunnel_set_promiscuous(net_device* device, bool promiscuous) 494 { 495 return EOPNOTSUPP; 496 } 497 498 499 status_t 500 tunnel_set_media(net_device* device, uint32 media) 501 { 502 return EOPNOTSUPP; 503 } 504 505 506 status_t 507 tunnel_add_multicast(net_device* device, const sockaddr* address) 508 { 509 return B_OK; 510 } 511 512 513 status_t 514 tunnel_remove_multicast(net_device* device, const sockaddr* address) 515 { 516 return B_OK; 517 } 518 519 520 net_device_module_info sTunModule = { 521 { 522 "network/devices/tunnel/v1", 523 0, 524 NULL 525 }, 526 tunnel_init, 527 tunnel_uninit, 528 tunnel_up, 529 tunnel_down, 530 tunnel_control, 531 tunnel_send_data, 532 tunnel_receive_data, 533 tunnel_set_mtu, 534 tunnel_set_promiscuous, 535 tunnel_set_media, 536 tunnel_add_multicast, 537 tunnel_remove_multicast, 538 }; 539 540 module_dependency module_dependencies[] = { 541 {NET_STACK_MODULE_NAME, (module_info**)&gStackModule}, 542 {NET_BUFFER_MODULE_NAME, (module_info**)&gBufferModule}, 543 {} 544 }; 545 546 module_info* modules[] = { 547 (module_info*)&sTunModule, 548 NULL 549 }; 550