1 /* 2 * Copyright 2006-2007, Haiku, Inc. All Rights Reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Axel Dörfler, axeld@pinc-software.de 7 */ 8 9 10 #include "utility.h" 11 12 #include <net_buffer.h> 13 #include <slab/Slab.h> 14 #include <util/list.h> 15 16 #include <ByteOrder.h> 17 #include <debug.h> 18 #include <KernelExport.h> 19 #include <util/AutoLock.h> 20 #include <util/DoublyLinkedList.h> 21 22 #include <algorithm> 23 #include <stdlib.h> 24 #include <string.h> 25 #include <sys/uio.h> 26 27 28 //#define TRACE_BUFFER 29 #ifdef TRACE_BUFFER 30 # define TRACE(x) dprintf x 31 #else 32 # define TRACE(x) ; 33 #endif 34 35 #define BUFFER_SIZE 2048 36 // maximum implementation derived buffer size is 65536 37 38 //#define ENABLE_DEBUGGER_COMMANDS 1 39 40 struct data_node { 41 struct list_link link; 42 struct data_header *header; 43 struct data_header *located; 44 size_t offset; // the net_buffer-wide offset of this node 45 uint8 *start; // points to the start of the data 46 uint16 used; // defines how much memory is used by this node 47 uint16 header_space; 48 uint16 tail_space; 49 }; 50 51 struct free_data { 52 struct free_data *next; 53 uint16 size; 54 }; 55 56 struct data_header { 57 int32 ref_count; 58 addr_t physical_address; 59 free_data *first_free; 60 uint8 *data_end; 61 size_t data_space; 62 data_node *first_node; 63 }; 64 65 #define MAX_ANCILLARY_DATA_SIZE 128 66 67 struct ancillary_data : DoublyLinkedListLinkImpl<ancillary_data> { 68 void* Data() 69 { 70 return (char*)this + _ALIGN(sizeof(ancillary_data)); 71 } 72 73 static ancillary_data* FromData(void* data) 74 { 75 return (ancillary_data*)((char*)data - _ALIGN(sizeof(ancillary_data))); 76 } 77 78 ancillary_data_header header; 79 void (*destructor)(const ancillary_data_header*, void*); 80 }; 81 82 typedef DoublyLinkedList<ancillary_data> ancillary_data_list; 83 84 #define MAX_FREE_BUFFER_SIZE (BUFFER_SIZE - sizeof(data_header)) 85 86 struct net_buffer_private : net_buffer { 87 struct list buffers; 88 data_node first_node; 89 ancillary_data_list ancillary_data; 90 91 struct { 92 struct sockaddr_storage source; 93 struct sockaddr_storage destination; 94 } storage; 95 }; 96 97 98 static object_cache *sNetBufferCache; 99 static object_cache *sDataNodeCache; 100 101 102 static status_t append_data(net_buffer *buffer, const void *data, size_t size); 103 static status_t trim_data(net_buffer *_buffer, size_t newSize); 104 static status_t remove_header(net_buffer *_buffer, size_t bytes); 105 static status_t remove_trailer(net_buffer *_buffer, size_t bytes); 106 107 #if ENABLE_DEBUGGER_COMMANDS 108 static vint32 sAllocatedDataHeaderCount = 0; 109 static vint32 sAllocatedNetBufferCount = 0; 110 static vint32 sEverAllocatedDataHeaderCount = 0; 111 static vint32 sEverAllocatedNetBufferCount = 0; 112 #endif 113 114 115 #if 1 116 static void 117 dump_buffer(net_buffer *_buffer) 118 { 119 net_buffer_private *buffer = (net_buffer_private *)_buffer; 120 121 dprintf("buffer %p, size %ld\n", buffer, buffer->size); 122 data_node *node = NULL; 123 while ((node = (data_node *)list_get_next_item(&buffer->buffers, node)) != NULL) { 124 dprintf(" node %p, offset %lu, used %u, header %u, tail %u, header %p\n", 125 node, node->offset, node->used, node->header_space, node->tail_space, node->header); 126 //dump_block((char *)node->start, node->used, " "); 127 dump_block((char *)node->start, min_c(node->used, 32), " "); 128 } 129 } 130 #endif 131 132 133 #if ENABLE_DEBUGGER_COMMANDS 134 135 static int 136 dump_net_buffer_stats(int argc, char** argv) 137 { 138 kprintf("allocated data headers: %7ld / %7ld\n", sAllocatedDataHeaderCount, 139 sEverAllocatedDataHeaderCount); 140 kprintf("allocated net buffers: %7ld / %7ld\n", sAllocatedNetBufferCount, 141 sEverAllocatedNetBufferCount); 142 return 0; 143 } 144 145 #endif // ENABLE_DEBUGGER_COMMANDS 146 147 148 static inline data_header * 149 allocate_data_header() 150 { 151 #if ENABLE_DEBUGGER_COMMANDS 152 atomic_add(&sAllocatedDataHeaderCount, 1); 153 atomic_add(&sEverAllocatedDataHeaderCount, 1); 154 #endif 155 return (data_header *)object_cache_alloc(sDataNodeCache, CACHE_DONT_SLEEP); 156 } 157 158 159 static inline net_buffer_private * 160 allocate_net_buffer() 161 { 162 #if ENABLE_DEBUGGER_COMMANDS 163 atomic_add(&sAllocatedNetBufferCount, 1); 164 atomic_add(&sEverAllocatedNetBufferCount, 1); 165 #endif 166 return (net_buffer_private *)object_cache_alloc(sNetBufferCache, 167 CACHE_DONT_SLEEP); 168 } 169 170 171 static inline void 172 free_data_header(data_header *header) 173 { 174 #if ENABLE_DEBUGGER_COMMANDS 175 if (header != NULL) 176 atomic_add(&sAllocatedDataHeaderCount, -1); 177 #endif 178 object_cache_free(sDataNodeCache, header); 179 } 180 181 182 static inline void 183 free_net_buffer(net_buffer_private *buffer) 184 { 185 #if ENABLE_DEBUGGER_COMMANDS 186 if (buffer != NULL) 187 atomic_add(&sAllocatedNetBufferCount, -1); 188 #endif 189 object_cache_free(sNetBufferCache, buffer); 190 } 191 192 193 static data_header * 194 create_data_header(size_t headerSpace) 195 { 196 data_header *header = allocate_data_header(); 197 if (header == NULL) 198 return NULL; 199 200 header->ref_count = 1; 201 header->physical_address = 0; 202 // TODO: initialize this correctly 203 header->data_space = headerSpace; 204 header->data_end = (uint8 *)header + sizeof(struct data_header); 205 header->first_free = NULL; 206 header->first_node = NULL; 207 208 TRACE(("%ld: create new data header %p\n", find_thread(NULL), header)); 209 return header; 210 } 211 212 213 static void 214 release_data_header(data_header *header) 215 { 216 if (atomic_add(&header->ref_count, -1) != 1) 217 return; 218 219 TRACE(("%ld: free header %p\n", find_thread(NULL), header)); 220 free_data_header(header); 221 } 222 223 224 inline void 225 acquire_data_header(data_header *header) 226 { 227 atomic_add(&header->ref_count, 1); 228 } 229 230 231 static void 232 free_data_header_space(data_header *header, uint8 *data, size_t size) 233 { 234 if (size < sizeof(free_data)) 235 size = sizeof(free_data); 236 237 free_data *freeData = (free_data *)data; 238 freeData->next = header->first_free; 239 freeData->size = size; 240 241 header->first_free = freeData; 242 header->data_space += size; 243 // TODO: the first node's header space could grow again 244 } 245 246 247 /*! 248 Tries to allocate \a size bytes from the free space in the header. 249 */ 250 static uint8 * 251 alloc_data_header_space(data_header *header, size_t size) 252 { 253 if (size < sizeof(free_data)) 254 size = sizeof(free_data); 255 256 if (header->first_free != NULL && header->first_free->size >= size) { 257 // the first entry of the header space matches the allocation's needs 258 uint8 *data = (uint8 *)header->first_free; 259 header->first_free = header->first_free->next; 260 return data; 261 } 262 263 if (header->data_space < size) { 264 // there is no free space left, search free list 265 free_data *freeData = header->first_free; 266 free_data *last = NULL; 267 while (freeData != NULL) { 268 if (last != NULL && freeData->size >= size) { 269 // take this one 270 last->next = freeData->next; 271 return (uint8 *)freeData; 272 } 273 274 last = freeData; 275 freeData = freeData->next; 276 } 277 278 return NULL; 279 } 280 281 // allocate new space 282 283 uint8 *data = header->data_end; 284 header->data_end += size; 285 header->data_space -= size; 286 287 if (header->first_node != NULL) 288 header->first_node->header_space -= size; 289 290 return data; 291 } 292 293 294 /*! 295 Initializes the first data_node of a data_header. 296 The node must have been assigned to the header already. 297 */ 298 static void 299 init_first_data_node(data_node *node) 300 { 301 data_header *header = node->header; 302 303 node->offset = 0; 304 node->start = header->data_end + header->data_space; 305 node->used = 0; 306 node->header_space = header->data_space; 307 node->tail_space = (uint8 *)header + BUFFER_SIZE - node->start; 308 309 header->first_node = node; 310 } 311 312 313 static data_node * 314 add_data_node(data_header *header, data_header *located = NULL) 315 { 316 if (located == NULL) 317 located = header; 318 319 data_node *node = (data_node *)alloc_data_header_space(located, 320 sizeof(data_node)); 321 if (node == NULL) 322 return NULL; 323 324 TRACE(("%ld: add data node %p to header %p\n", find_thread(NULL), node, 325 header)); 326 327 acquire_data_header(header); 328 if (located != header) 329 acquire_data_header(located); 330 331 memset(node, 0, sizeof(struct data_node)); 332 node->located = located; 333 node->header = header; 334 return node; 335 } 336 337 338 void 339 remove_data_node(data_node *node) 340 { 341 data_header *located = node->located; 342 343 TRACE(("%ld: remove data node %p from header %p (located %p)\n", 344 find_thread(NULL), node, node->header, located)); 345 346 if (located != node->header) 347 release_data_header(node->header); 348 349 if (located == NULL) 350 return; 351 352 free_data_header_space(located, (uint8 *)node, sizeof(data_node)); 353 if (located->first_node == node) { 354 located->first_node = NULL; 355 located->data_space = 0; 356 } 357 358 release_data_header(located); 359 } 360 361 362 static inline data_node * 363 get_node_at_offset(net_buffer_private *buffer, size_t offset) 364 { 365 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 366 while (node->offset + node->used <= offset) { 367 node = (data_node *)list_get_next_item(&buffer->buffers, node); 368 if (node == NULL) 369 return NULL; 370 } 371 372 return node; 373 } 374 375 376 static void 377 copy_metadata(net_buffer *destination, const net_buffer *source) 378 { 379 memcpy(destination->source, source->source, 380 min_c(source->source->sa_len, sizeof(sockaddr_storage))); 381 memcpy(destination->destination, source->destination, 382 min_c(source->destination->sa_len, sizeof(sockaddr_storage))); 383 384 destination->flags = source->flags; 385 destination->interface = source->interface; 386 destination->offset = source->offset; 387 destination->size = source->size; 388 destination->protocol = source->protocol; 389 destination->type = source->type; 390 } 391 392 393 // #pragma mark - module API 394 395 396 static net_buffer * 397 create_buffer(size_t headerSpace) 398 { 399 net_buffer_private *buffer = allocate_net_buffer(); 400 if (buffer == NULL) 401 return NULL; 402 403 TRACE(("%ld: create buffer %p\n", find_thread(NULL), buffer)); 404 405 data_header *header = create_data_header(headerSpace); 406 if (header == NULL) { 407 free_net_buffer(buffer); 408 return NULL; 409 } 410 411 buffer->first_node.header = header; 412 buffer->first_node.located = NULL; 413 init_first_data_node(&buffer->first_node); 414 415 list_init(&buffer->buffers); 416 list_add_item(&buffer->buffers, &buffer->first_node); 417 418 new(&buffer->ancillary_data) ancillary_data_list; 419 420 buffer->source = (sockaddr *)&buffer->storage.source; 421 buffer->destination = (sockaddr *)&buffer->storage.destination; 422 423 buffer->storage.source.ss_len = 0; 424 buffer->storage.destination.ss_len = 0; 425 426 buffer->interface = NULL; 427 buffer->offset = 0; 428 buffer->flags = 0; 429 buffer->size = 0; 430 431 buffer->type = -1; 432 433 return buffer; 434 } 435 436 437 static void 438 free_buffer(net_buffer *_buffer) 439 { 440 net_buffer_private *buffer = (net_buffer_private *)_buffer; 441 442 TRACE(("%ld: free buffer %p\n", find_thread(NULL), buffer)); 443 444 data_node *node; 445 while ((node = (data_node *)list_remove_head_item(&buffer->buffers)) != NULL) { 446 remove_data_node(node); 447 } 448 449 while (ancillary_data* data = buffer->ancillary_data.RemoveHead()) { 450 if (data->destructor != NULL) 451 data->destructor(&data->header, data->Data()); 452 free(data); 453 } 454 455 free_net_buffer(buffer); 456 } 457 458 459 /*! Creates a duplicate of the \a buffer. The new buffer does not share internal 460 storage; they are completely independent from each other. 461 */ 462 static net_buffer * 463 duplicate_buffer(net_buffer *_buffer) 464 { 465 net_buffer_private *buffer = (net_buffer_private *)_buffer; 466 467 TRACE(("%ld: duplicate_buffer(buffer %p)\n", find_thread(NULL), buffer)); 468 469 net_buffer *duplicate = create_buffer(buffer->first_node.header_space); 470 if (duplicate == NULL) 471 return NULL; 472 473 TRACE(("%ld: duplicate: %p)\n", find_thread(NULL), duplicate)); 474 475 // copy the data from the source buffer 476 477 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 478 while (true) { 479 if (append_data(duplicate, node->start, node->used) < B_OK) { 480 free_buffer(duplicate); 481 return NULL; 482 } 483 484 node = (data_node *)list_get_next_item(&buffer->buffers, node); 485 if (node == NULL) 486 break; 487 } 488 489 copy_metadata(duplicate, buffer); 490 491 return duplicate; 492 } 493 494 495 /*! Clones the buffer by grabbing another reference to the underlying data. 496 If that data changes, it will be changed in the clone as well. 497 498 If \a shareFreeSpace is \c true, the cloned buffer may claim the free 499 space in the original buffer as the original buffer can still do. If you 500 are using this, it's your responsibility that only one of the buffers 501 will do this. 502 */ 503 static net_buffer * 504 clone_buffer(net_buffer *_buffer, bool shareFreeSpace) 505 { 506 net_buffer_private *buffer = (net_buffer_private *)_buffer; 507 508 TRACE(("%ld: clone_buffer(buffer %p)\n", find_thread(NULL), buffer)); 509 510 net_buffer_private *clone = allocate_net_buffer(); 511 if (clone == NULL) 512 return NULL; 513 514 TRACE(("%ld: clone: %p\n", find_thread(NULL), buffer)); 515 516 data_node *sourceNode = (data_node *)list_get_first_item(&buffer->buffers); 517 if (sourceNode == NULL) { 518 free_net_buffer(clone); 519 return NULL; 520 } 521 522 clone->source = (sockaddr *)&clone->storage.source; 523 clone->destination = (sockaddr *)&clone->storage.destination; 524 525 list_init(&clone->buffers); 526 527 // grab reference to this buffer - all additional nodes will get 528 // theirs in add_data_node() 529 acquire_data_header(sourceNode->header); 530 data_node *node = &clone->first_node; 531 node->header = sourceNode->header; 532 node->located = NULL; 533 534 while (sourceNode != NULL) { 535 node->start = sourceNode->start; 536 node->used = sourceNode->used; 537 node->offset = sourceNode->offset; 538 539 if (shareFreeSpace) { 540 // both buffers could claim the free space - note that this option 541 // has to be used carefully 542 node->header_space = sourceNode->header_space; 543 node->tail_space = sourceNode->tail_space; 544 } else { 545 // the free space stays with the original buffer 546 node->header_space = 0; 547 node->tail_space = 0; 548 } 549 550 // add node to clone's list of buffers 551 list_add_item(&clone->buffers, node); 552 553 sourceNode = (data_node *)list_get_next_item(&buffer->buffers, sourceNode); 554 if (sourceNode == NULL) 555 break; 556 557 node = add_data_node(sourceNode->header); 558 if (node == NULL) { 559 // There was not enough space left for another node in this buffer 560 // TODO: handle this case! 561 panic("clone buffer hits size limit... (fix me)"); 562 free_net_buffer(clone); 563 return NULL; 564 } 565 } 566 567 copy_metadata(clone, buffer); 568 569 return clone; 570 } 571 572 573 /*! 574 Split the buffer at offset, the header data 575 is returned as new buffer. 576 TODO: optimize and avoid making a copy. 577 */ 578 static net_buffer * 579 split_buffer(net_buffer *from, uint32 offset) 580 { 581 net_buffer *buffer = duplicate_buffer(from); 582 if (buffer == NULL) 583 return NULL; 584 585 TRACE(("%ld: split_buffer(buffer %p -> %p, offset %ld)\n", 586 find_thread(NULL), from, buffer, offset)); 587 588 if (trim_data(buffer, offset) == B_OK) { 589 if (remove_header(from, offset) == B_OK) 590 return buffer; 591 } 592 593 free_buffer(buffer); 594 return NULL; 595 } 596 597 598 /*! 599 Merges the second buffer with the first. If \a after is \c true, the 600 second buffer's contents will be appended to the first ones, else they 601 will be prepended. 602 The second buffer will be freed if this function succeeds. 603 */ 604 static status_t 605 merge_buffer(net_buffer *_buffer, net_buffer *_with, bool after) 606 { 607 net_buffer_private *buffer = (net_buffer_private *)_buffer; 608 net_buffer_private *with = (net_buffer_private *)_with; 609 if (with == NULL) 610 return B_BAD_VALUE; 611 612 TRACE(("%ld: merge buffer %p with %p (%s)\n", find_thread(NULL), buffer, 613 with, after ? "after" : "before")); 614 //dump_buffer(buffer); 615 //dprintf("with:\n"); 616 //dump_buffer(with); 617 618 // TODO: this is currently very simplistic, I really need to finish the 619 // harder part of this implementation (data_node management per header) 620 621 data_node *before = NULL; 622 623 if (!after) { 624 // change offset of all nodes already in the buffer 625 data_node *node = NULL; 626 while (true) { 627 node = (data_node *)list_get_next_item(&buffer->buffers, node); 628 if (node == NULL) 629 break; 630 631 node->offset += with->size; 632 if (before == NULL) 633 before = node; 634 } 635 } 636 637 data_node *last = NULL; 638 639 while (true) { 640 data_node *node = (data_node *)list_get_next_item(&with->buffers, last); 641 if (node == NULL) 642 break; 643 644 if ((uint8 *)node > (uint8 *)node->header 645 && (uint8 *)node < (uint8 *)node->header + BUFFER_SIZE) { 646 // The node is already in the buffer, we can just move it 647 // over to the new owner 648 list_remove_item(&with->buffers, node); 649 } else { 650 // we need a new place for this node 651 data_node *newNode = add_data_node(node->header); 652 if (newNode == NULL) { 653 // try again on the buffers own header 654 newNode = add_data_node(node->header, buffer->first_node.header); 655 if (newNode == NULL) 656 // TODO: try to revert buffers to their initial state!! 657 return ENOBUFS; 658 } 659 660 last = node; 661 *newNode = *node; 662 node = newNode; 663 // the old node will get freed with its buffer 664 } 665 666 if (after) { 667 list_add_item(&buffer->buffers, node); 668 node->offset = buffer->size; 669 } else 670 list_insert_item_before(&buffer->buffers, before, node); 671 672 buffer->size += node->used; 673 } 674 675 // the data has been merged completely at this point 676 free_buffer(with); 677 678 //dprintf(" merge result:\n"); 679 //dump_buffer(buffer); 680 return B_OK; 681 } 682 683 684 /*! Writes into existing allocated memory. 685 \return B_BAD_VALUE if you write outside of the buffers current 686 bounds. 687 */ 688 static status_t 689 write_data(net_buffer *_buffer, size_t offset, const void *data, size_t size) 690 { 691 net_buffer_private *buffer = (net_buffer_private *)_buffer; 692 693 if (offset + size > buffer->size) 694 return B_BAD_VALUE; 695 if (size == 0) 696 return B_OK; 697 698 // find first node to write into 699 data_node *node = get_node_at_offset(buffer, offset); 700 if (node == NULL) 701 return B_BAD_VALUE; 702 703 offset -= node->offset; 704 705 while (true) { 706 size_t written = min_c(size, node->used - offset); 707 memcpy(node->start + offset, data, written); 708 709 size -= written; 710 if (size == 0) 711 break; 712 713 offset = 0; 714 data = (void *)((uint8 *)data + written); 715 716 node = (data_node *)list_get_next_item(&buffer->buffers, node); 717 if (node == NULL) 718 return B_BAD_VALUE; 719 } 720 721 return B_OK; 722 } 723 724 725 static status_t 726 read_data(net_buffer *_buffer, size_t offset, void *data, size_t size) 727 { 728 net_buffer_private *buffer = (net_buffer_private *)_buffer; 729 730 if (offset + size > buffer->size) 731 return B_BAD_VALUE; 732 if (size == 0) 733 return B_OK; 734 735 // find first node to read from 736 data_node *node = get_node_at_offset(buffer, offset); 737 if (node == NULL) 738 return B_BAD_VALUE; 739 740 offset -= node->offset; 741 742 while (true) { 743 size_t bytesRead = min_c(size, node->used - offset); 744 memcpy(data, node->start + offset, bytesRead); 745 746 size -= bytesRead; 747 if (size == 0) 748 break; 749 750 offset = 0; 751 data = (void *)((uint8 *)data + bytesRead); 752 753 node = (data_node *)list_get_next_item(&buffer->buffers, node); 754 if (node == NULL) 755 return B_BAD_VALUE; 756 } 757 758 return B_OK; 759 } 760 761 762 static status_t 763 prepend_size(net_buffer *_buffer, size_t size, void **_contiguousBuffer) 764 { 765 net_buffer_private *buffer = (net_buffer_private *)_buffer; 766 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 767 768 TRACE(("%ld: prepend_size(buffer %p, size %ld) [has %u]\n", 769 find_thread(NULL), buffer, size, node->header_space)); 770 //dump_buffer(buffer); 771 772 if (node->header_space < size) { 773 // we need to prepend new buffers 774 775 size_t bytesLeft = size; 776 do { 777 if (node->header_space == 0) { 778 size_t headerSpace = MAX_FREE_BUFFER_SIZE; 779 data_header *header = create_data_header(headerSpace); 780 if (header == NULL) { 781 // TODO: free up headers we already allocated! 782 return B_NO_MEMORY; 783 } 784 785 data_node *previous = node; 786 787 node = (data_node *)add_data_node(header); 788 init_first_data_node(node); 789 790 list_insert_item_before(&buffer->buffers, previous, node); 791 792 // Release the initial reference to the header, so that it will 793 // be deleted when the node is removed. 794 release_data_header(header); 795 } 796 797 size_t willConsume = min_c(bytesLeft, node->header_space); 798 799 node->header_space -= willConsume; 800 node->start -= willConsume; 801 node->used += willConsume; 802 bytesLeft -= willConsume; 803 } while (bytesLeft > 0); 804 805 // correct data offset in all nodes 806 807 size_t offset = 0; 808 node = NULL; 809 while ((node = (data_node *)list_get_next_item(&buffer->buffers, 810 node)) != NULL) { 811 node->offset = offset; 812 offset += node->used; 813 } 814 815 if (_contiguousBuffer) 816 *_contiguousBuffer = NULL; 817 } else { 818 // the data fits into this buffer 819 node->header_space -= size; 820 node->start -= size; 821 node->used += size; 822 823 if (_contiguousBuffer) 824 *_contiguousBuffer = node->start; 825 826 // adjust offset of following nodes 827 while ((node = (data_node *)list_get_next_item(&buffer->buffers, node)) != NULL) { 828 node->offset += size; 829 } 830 } 831 832 buffer->size += size; 833 834 //dprintf(" prepend_size result:\n"); 835 //dump_buffer(buffer); 836 return B_OK; 837 } 838 839 840 static status_t 841 prepend_data(net_buffer *buffer, const void *data, size_t size) 842 { 843 void *contiguousBuffer; 844 status_t status = prepend_size(buffer, size, &contiguousBuffer); 845 if (status < B_OK) 846 return status; 847 848 if (contiguousBuffer) 849 memcpy(contiguousBuffer, data, size); 850 else 851 write_data(buffer, 0, data, size); 852 853 //dprintf(" prepend result:\n"); 854 //dump_buffer(buffer); 855 856 return B_OK; 857 } 858 859 860 static status_t 861 append_size(net_buffer *_buffer, size_t size, void **_contiguousBuffer) 862 { 863 net_buffer_private *buffer = (net_buffer_private *)_buffer; 864 data_node *node = (data_node *)list_get_last_item(&buffer->buffers); 865 866 TRACE(("%ld: append_size(buffer %p, size %ld)\n", find_thread(NULL), 867 buffer, size)); 868 //dump_buffer(buffer); 869 870 if (node->tail_space < size) { 871 // we need to append a new buffer 872 873 // compute how many buffers we're going to need 874 // TODO: this doesn't leave any tail space, if that should be desired... 875 uint32 previousTailSpace = node->tail_space; 876 uint32 minimalHeaderSpace = sizeof(data_header) + 3 * sizeof(data_node); 877 uint32 sizeNeeded = size - previousTailSpace; 878 uint32 count = (sizeNeeded + BUFFER_SIZE - minimalHeaderSpace - 1) 879 / (BUFFER_SIZE - minimalHeaderSpace); 880 uint32 headerSpace = BUFFER_SIZE - sizeNeeded / count - sizeof(data_header); 881 uint32 sizeUsed = MAX_FREE_BUFFER_SIZE - headerSpace; 882 uint32 sizeAdded = previousTailSpace; 883 884 // allocate space left in the node 885 node->tail_space = 0; 886 node->used += previousTailSpace; 887 buffer->size += previousTailSpace; 888 889 // allocate all buffers 890 891 for (uint32 i = 0; i < count; i++) { 892 if (i == count - 1) { 893 // last data_header - compensate rounding errors 894 sizeUsed = size - sizeAdded; 895 headerSpace = MAX_FREE_BUFFER_SIZE - sizeUsed; 896 } 897 898 data_header *header = create_data_header(headerSpace); 899 if (header == NULL) { 900 // TODO: free up headers we already allocated! 901 return B_NO_MEMORY; 902 } 903 904 node = (data_node *)add_data_node(header); 905 // this can't fail as we made sure there will be enough header space 906 907 init_first_data_node(node); 908 node->tail_space -= sizeUsed; 909 node->used = sizeUsed; 910 node->offset = buffer->size; 911 912 buffer->size += sizeUsed; 913 sizeAdded += sizeUsed; 914 915 list_add_item(&buffer->buffers, node); 916 917 // Release the initial reference to the header, so that it will 918 // be deleted when the node is removed. 919 release_data_header(header); 920 } 921 922 if (_contiguousBuffer) 923 *_contiguousBuffer = NULL; 924 925 //dprintf(" append result 1:\n"); 926 //dump_buffer(buffer); 927 return B_OK; 928 } 929 930 // the data fits into this buffer 931 node->tail_space -= size; 932 933 if (_contiguousBuffer) 934 *_contiguousBuffer = node->start + node->used; 935 936 node->used += size; 937 buffer->size += size; 938 939 //dprintf(" append result 2:\n"); 940 //dump_buffer(buffer); 941 return B_OK; 942 } 943 944 945 static status_t 946 append_data(net_buffer *buffer, const void *data, size_t size) 947 { 948 size_t used = buffer->size; 949 950 void *contiguousBuffer; 951 status_t status = append_size(buffer, size, &contiguousBuffer); 952 if (status < B_OK) 953 return status; 954 955 if (contiguousBuffer) 956 memcpy(contiguousBuffer, data, size); 957 else 958 write_data(buffer, used, data, size); 959 960 return B_OK; 961 } 962 963 964 /*! 965 Removes bytes from the beginning of the buffer. 966 */ 967 static status_t 968 remove_header(net_buffer *_buffer, size_t bytes) 969 { 970 net_buffer_private *buffer = (net_buffer_private *)_buffer; 971 972 if (bytes > buffer->size) 973 return B_BAD_VALUE; 974 975 TRACE(("%ld: remove_header(buffer %p, %ld bytes)\n", find_thread(NULL), 976 buffer, bytes)); 977 //dump_buffer(buffer); 978 979 size_t left = bytes; 980 data_node *node = NULL; 981 982 while (left >= 0) { 983 node = (data_node *)list_get_first_item(&buffer->buffers); 984 if (node == NULL) { 985 if (left == 0) 986 break; 987 return B_ERROR; 988 } 989 990 if (node->used > left) 991 break; 992 993 // node will be removed completely 994 list_remove_item(&buffer->buffers, node); 995 left -= node->used; 996 remove_data_node(node); 997 node = NULL; 998 } 999 1000 // cut remaining node, if any 1001 1002 if (node != NULL) { 1003 size_t cut = min_c(node->used, left); 1004 node->offset = 0; 1005 node->start += cut; 1006 node->header_space += cut; 1007 node->used -= cut; 1008 1009 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1010 } 1011 1012 // adjust offset of following nodes 1013 while (node != NULL) { 1014 node->offset -= bytes; 1015 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1016 } 1017 1018 buffer->size -= bytes; 1019 1020 //dprintf(" remove result:\n"); 1021 //dump_buffer(buffer); 1022 return B_OK; 1023 } 1024 1025 1026 /*! 1027 Removes bytes from the end of the buffer. 1028 */ 1029 static status_t 1030 remove_trailer(net_buffer *buffer, size_t bytes) 1031 { 1032 return trim_data(buffer, buffer->size - bytes); 1033 } 1034 1035 1036 /*! 1037 Trims the buffer to the specified \a newSize by removing space from 1038 the end of the buffer. 1039 */ 1040 static status_t 1041 trim_data(net_buffer *_buffer, size_t newSize) 1042 { 1043 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1044 TRACE(("%ld: trim_data(buffer %p, newSize = %ld, buffer size = %ld)\n", 1045 find_thread(NULL), buffer, newSize, buffer->size)); 1046 //dump_buffer(buffer); 1047 1048 if (newSize > buffer->size) 1049 return B_BAD_VALUE; 1050 if (newSize == buffer->size) 1051 return B_OK; 1052 1053 data_node *node = get_node_at_offset(buffer, newSize); 1054 if (node == NULL) { 1055 // trim size greater than buffer size 1056 return B_BAD_VALUE; 1057 } 1058 1059 int32 diff = node->used + node->offset - newSize; 1060 node->tail_space += diff; 1061 node->used -= diff; 1062 1063 if (node->used > 0) 1064 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1065 1066 while (node != NULL) { 1067 data_node *next = (data_node *)list_get_next_item(&buffer->buffers, node); 1068 list_remove_item(&buffer->buffers, node); 1069 remove_data_node(node); 1070 1071 node = next; 1072 } 1073 1074 buffer->size = newSize; 1075 1076 //dprintf(" trim result:\n"); 1077 //dump_buffer(buffer); 1078 return B_OK; 1079 } 1080 1081 1082 /*! 1083 Appends data coming from buffer \a source to the buffer \a buffer. It only 1084 clones the data, though, that is the data is not copied, just referenced. 1085 */ 1086 status_t 1087 append_cloned_data(net_buffer *_buffer, net_buffer *_source, uint32 offset, 1088 size_t bytes) 1089 { 1090 if (bytes == 0) 1091 return B_OK; 1092 1093 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1094 net_buffer_private *source = (net_buffer_private *)_source; 1095 TRACE(("%ld: append_cloned_data(buffer %p, source %p, offset = %ld, " 1096 "bytes = %ld)\n", find_thread(NULL), buffer, source, offset, bytes)); 1097 1098 if (source->size < offset + bytes || source->size < offset) 1099 return B_BAD_VALUE; 1100 1101 // find data_node to start with from the source buffer 1102 data_node *node = get_node_at_offset(source, offset); 1103 if (node == NULL) { 1104 // trim size greater than buffer size 1105 return B_BAD_VALUE; 1106 } 1107 1108 while (node != NULL && bytes > 0) { 1109 data_node *clone = add_data_node(node->header, buffer->first_node.header); 1110 if (clone == NULL) 1111 clone = add_data_node(node->header); 1112 if (clone == NULL) { 1113 // There is not enough space in the buffer for another node 1114 // TODO: handle this case! 1115 dump_buffer(buffer); 1116 dprintf("SOURCE:\n"); 1117 dump_buffer(source); 1118 panic("appending clone buffer in new header not implemented\n"); 1119 return ENOBUFS; 1120 } 1121 1122 if (offset) 1123 offset -= node->offset; 1124 1125 clone->offset = buffer->size; 1126 clone->start = node->start + offset; 1127 clone->used = min_c(bytes, node->used - offset); 1128 clone->header_space = 0; 1129 clone->tail_space = 0; 1130 1131 list_add_item(&buffer->buffers, clone); 1132 1133 offset = 0; 1134 bytes -= clone->used; 1135 buffer->size += clone->used; 1136 node = (data_node *)list_get_next_item(&source->buffers, node); 1137 } 1138 1139 if (bytes != 0) 1140 panic("add_cloned_data() failed, bytes != 0!\n"); 1141 1142 //dprintf(" append cloned result:\n"); 1143 //dump_buffer(buffer); 1144 return B_OK; 1145 } 1146 1147 1148 /*! 1149 Attaches ancillary data to the given buffer. The data are completely 1150 orthogonal to the data the buffer stores. 1151 1152 \param buffer The buffer. 1153 \param header Description of the data. 1154 \param data If not \c NULL, the data are copied into the allocated storage. 1155 \param destructor If not \c NULL, this function will be invoked with the 1156 data as parameter when the buffer is destroyed. 1157 \param _allocatedData Will be set to the storage allocated for the data. 1158 \return \c B_OK when everything goes well, another error code otherwise. 1159 */ 1160 static status_t 1161 attach_ancillary_data(net_buffer *_buffer, const ancillary_data_header *header, 1162 const void *data, void (*destructor)(const ancillary_data_header*, void*), 1163 void **_allocatedData) 1164 { 1165 // TODO: Obviously it would be nice to allocate the memory for the 1166 // ancillary data in the buffer. 1167 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1168 1169 // check parameters 1170 if (header == NULL) 1171 return B_BAD_VALUE; 1172 1173 if (header->len > MAX_ANCILLARY_DATA_SIZE) 1174 return ENOBUFS; 1175 1176 // allocate buffer 1177 void *dataBuffer = malloc(_ALIGN(sizeof(ancillary_data)) + header->len); 1178 if (dataBuffer == NULL) 1179 return B_NO_MEMORY; 1180 1181 // init and attach the structure 1182 ancillary_data *ancillaryData = new(dataBuffer) ancillary_data; 1183 ancillaryData->header = *header; 1184 ancillaryData->destructor = destructor; 1185 1186 buffer->ancillary_data.Add(ancillaryData); 1187 1188 if (data != NULL) 1189 memcpy(ancillaryData->Data(), data, header->len); 1190 1191 if (_allocatedData != NULL) 1192 *_allocatedData = ancillaryData->Data(); 1193 1194 return B_OK; 1195 } 1196 1197 1198 /*! 1199 Detaches ancillary data from the given buffer. The associated memory is 1200 free, i.e. the \a data pointer must no longer be used after calling this 1201 function. Depending on \a destroy, the destructor is invoked before freeing 1202 the data. 1203 1204 \param buffer The buffer. 1205 \param data Pointer to the data to be removed (as returned by 1206 attach_ancillary_data() or next_ancillary_data()). 1207 \param destroy If \c true, the destructor, if one was passed to 1208 attach_ancillary_data(), is invoked for the data. 1209 \return \c B_OK when everything goes well, another error code otherwise. 1210 */ 1211 static status_t 1212 detach_ancillary_data(net_buffer *_buffer, void *data, bool destroy) 1213 { 1214 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1215 1216 if (data == NULL) 1217 return B_BAD_VALUE; 1218 1219 ancillary_data *ancillaryData = ancillary_data::FromData(data); 1220 1221 buffer->ancillary_data.Remove(ancillaryData); 1222 1223 if (destroy && ancillaryData->destructor != NULL) { 1224 ancillaryData->destructor(&ancillaryData->header, 1225 ancillaryData->Data()); 1226 } 1227 1228 free(ancillaryData); 1229 1230 return B_OK; 1231 } 1232 1233 1234 /*! 1235 Moves all ancillary data from buffer \c from to the end of the list of 1236 ancillary data of buffer \c to. Note, that this is the only function that 1237 transfers or copies ancillary data from one buffer to another. 1238 1239 \param from The buffer from which to remove the ancillary data. 1240 \param to The buffer to which to add teh ancillary data. 1241 \return A pointer to the first of the moved ancillary data, if any, \c NULL 1242 otherwise. 1243 */ 1244 static void * 1245 transfer_ancillary_data(net_buffer *_from, net_buffer *_to) 1246 { 1247 net_buffer_private *from = (net_buffer_private *)_from; 1248 net_buffer_private *to = (net_buffer_private *)_to; 1249 1250 if (from == NULL || to == NULL) 1251 return NULL; 1252 1253 ancillary_data *ancillaryData = from->ancillary_data.Head(); 1254 to->ancillary_data.MoveFrom(&from->ancillary_data); 1255 1256 return ancillaryData != NULL ? ancillaryData->Data() : NULL; 1257 } 1258 1259 1260 /*! 1261 Returns the next ancillary data. When iterating over the data, initially 1262 a \c NULL pointer shall be passed as \a previousData, subsequently the 1263 previously returned data pointer. After the last item, \c NULL is returned. 1264 1265 Note, that it is not safe to call detach_ancillary_data() for a data item 1266 and then pass that pointer to this function. First get the next item, then 1267 detach the previous one. 1268 1269 \param buffer The buffer. 1270 \param previousData The pointer to the previous data returned by this 1271 function. Initially \c NULL shall be passed. 1272 \param header Pointer to allocated storage into which the data description 1273 is written. May be \c NULL. 1274 \return A pointer to the next ancillary data in the buffer. \c NULL after 1275 the last one. 1276 */ 1277 static void* 1278 next_ancillary_data(net_buffer *_buffer, void *previousData, 1279 ancillary_data_header *_header) 1280 { 1281 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1282 1283 ancillary_data *ancillaryData; 1284 1285 if (previousData == NULL) { 1286 ancillaryData = buffer->ancillary_data.Head(); 1287 } else { 1288 ancillaryData = ancillary_data::FromData(previousData); 1289 ancillaryData = buffer->ancillary_data.GetNext(ancillaryData); 1290 } 1291 1292 if (ancillaryData == NULL) 1293 return NULL; 1294 1295 if (_header != NULL) 1296 *_header = ancillaryData->header; 1297 1298 return ancillaryData->Data(); 1299 } 1300 1301 1302 /*! 1303 Tries to directly access the requested space in the buffer. 1304 If the space is contiguous, the function will succeed and place a pointer 1305 to that space into \a _contiguousBuffer. 1306 1307 \return B_BAD_VALUE if the offset is outside of the buffer's bounds. 1308 \return B_ERROR in case the buffer is not contiguous at that location. 1309 */ 1310 static status_t 1311 direct_access(net_buffer *_buffer, uint32 offset, size_t size, 1312 void **_contiguousBuffer) 1313 { 1314 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1315 1316 //TRACE(("direct_access(buffer %p, offset %ld, size %ld)\n", buffer, offset, size)); 1317 1318 if (offset + size > buffer->size) 1319 return B_BAD_VALUE; 1320 1321 // find node to access 1322 data_node *node = get_node_at_offset(buffer, offset); 1323 if (node == NULL) 1324 return B_BAD_VALUE; 1325 1326 offset -= node->offset; 1327 1328 if (size > node->used - offset) 1329 return B_ERROR; 1330 1331 *_contiguousBuffer = node->start + offset; 1332 return B_OK; 1333 } 1334 1335 1336 static int32 1337 checksum_data(net_buffer *_buffer, uint32 offset, size_t size, bool finalize) 1338 { 1339 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1340 1341 if (offset + size > buffer->size || size == 0) 1342 return B_BAD_VALUE; 1343 1344 // find first node to read from 1345 data_node *node = get_node_at_offset(buffer, offset); 1346 if (node == NULL) 1347 return B_ERROR; 1348 1349 offset -= node->offset; 1350 1351 // Since the maximum buffer size is 65536 bytes, it's impossible 1352 // to overlap 32 bit - we don't need to handle this overlap in 1353 // the loop, we can safely do it afterwards 1354 uint32 sum = 0; 1355 1356 while (true) { 1357 size_t bytes = min_c(size, node->used - offset); 1358 if ((offset + node->offset) & 1) { 1359 // if we're at an uneven offset, we have to swap the checksum 1360 sum += __swap_int16(compute_checksum(node->start + offset, bytes)); 1361 } else 1362 sum += compute_checksum(node->start + offset, bytes); 1363 1364 size -= bytes; 1365 if (size == 0) 1366 break; 1367 1368 offset = 0; 1369 1370 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1371 if (node == NULL) 1372 return B_ERROR; 1373 } 1374 1375 while (sum >> 16) { 1376 sum = (sum & 0xffff) + (sum >> 16); 1377 } 1378 1379 if (!finalize) 1380 return (uint16)sum; 1381 1382 return (uint16)~sum; 1383 } 1384 1385 1386 static uint32 1387 get_iovecs(net_buffer *_buffer, struct iovec *iovecs, uint32 vecCount) 1388 { 1389 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1390 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 1391 uint32 count = 0; 1392 1393 while (node != NULL && count < vecCount) { 1394 if (node->used > 0) { 1395 iovecs[count].iov_base = node->start; 1396 iovecs[count].iov_len = node->used; 1397 count++; 1398 } 1399 1400 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1401 } 1402 1403 return count; 1404 } 1405 1406 1407 static uint32 1408 count_iovecs(net_buffer *_buffer) 1409 { 1410 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1411 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 1412 uint32 count = 0; 1413 1414 while (node != NULL) { 1415 if (node->used > 0) 1416 count++; 1417 1418 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1419 } 1420 1421 return count; 1422 } 1423 1424 1425 static void 1426 swap_addresses(net_buffer *buffer) 1427 { 1428 std::swap(buffer->source, buffer->destination); 1429 } 1430 1431 1432 static status_t 1433 std_ops(int32 op, ...) 1434 { 1435 switch (op) { 1436 case B_MODULE_INIT: 1437 // TODO: improve our code a bit so we can add constructors 1438 // and keep around half-constructed buffers in the slab 1439 1440 sNetBufferCache = create_object_cache("net buffer cache", 1441 sizeof(net_buffer_private), 8, NULL, NULL, NULL); 1442 if (sNetBufferCache == NULL) 1443 return B_NO_MEMORY; 1444 1445 sDataNodeCache = create_object_cache("data node cache", BUFFER_SIZE, 1446 0, NULL, NULL, NULL); 1447 if (sDataNodeCache == NULL) { 1448 delete_object_cache(sNetBufferCache); 1449 return B_NO_MEMORY; 1450 } 1451 1452 #if ENABLE_DEBUGGER_COMMANDS 1453 add_debugger_command_etc("net_buffer_stats", &dump_net_buffer_stats, 1454 "Print net buffer statistics", 1455 "\nPrint net buffer statistics.\n", 0); 1456 #endif 1457 1458 return B_OK; 1459 1460 case B_MODULE_UNINIT: 1461 #if ENABLE_DEBUGGER_COMMANDS 1462 remove_debugger_command("net_buffer_stats", &dump_net_buffer_stats); 1463 #endif 1464 delete_object_cache(sNetBufferCache); 1465 delete_object_cache(sDataNodeCache); 1466 return B_OK; 1467 1468 default: 1469 return B_ERROR; 1470 } 1471 } 1472 1473 1474 net_buffer_module_info gNetBufferModule = { 1475 { 1476 NET_BUFFER_MODULE_NAME, 1477 0, 1478 std_ops 1479 }, 1480 create_buffer, 1481 free_buffer, 1482 1483 duplicate_buffer, 1484 clone_buffer, 1485 split_buffer, 1486 merge_buffer, 1487 1488 prepend_size, 1489 prepend_data, 1490 append_size, 1491 append_data, 1492 NULL, // insert 1493 NULL, // remove 1494 remove_header, 1495 remove_trailer, 1496 trim_data, 1497 append_cloned_data, 1498 1499 NULL, // associate_data 1500 1501 attach_ancillary_data, 1502 detach_ancillary_data, 1503 transfer_ancillary_data, 1504 next_ancillary_data, 1505 1506 direct_access, 1507 read_data, 1508 write_data, 1509 1510 checksum_data, 1511 1512 NULL, // get_memory_map 1513 get_iovecs, 1514 count_iovecs, 1515 1516 swap_addresses, 1517 1518 dump_buffer, // dump 1519 }; 1520 1521