1 /* 2 * Copyright 2006-2007, Haiku, Inc. All Rights Reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Axel Dörfler, axeld@pinc-software.de 7 */ 8 9 10 #include "utility.h" 11 12 #include <net_buffer.h> 13 #include <util/list.h> 14 15 #include <ByteOrder.h> 16 #include <KernelExport.h> 17 18 #include <stdlib.h> 19 #include <string.h> 20 #include <sys/uio.h> 21 22 23 //#define TRACE_BUFFER 24 #ifdef TRACE_BUFFER 25 # define TRACE(x) dprintf x 26 #else 27 # define TRACE(x) ; 28 #endif 29 30 #define BUFFER_SIZE 2048 31 // maximum implementation derived buffer size is 65536 32 33 struct data_node { 34 struct list_link link; 35 struct data_header *header; 36 struct data_header *located; 37 size_t offset; // the net_buffer-wide offset of this node 38 uint8 *start; // points to the start of the data 39 uint16 used; // defines how much memory is used by this node 40 uint16 header_space; 41 uint16 tail_space; 42 }; 43 44 struct free_data { 45 struct free_data *next; 46 uint16 size; 47 }; 48 49 struct data_header { 50 int32 ref_count; 51 addr_t physical_address; 52 free_data *first_free; 53 uint8 *data_end; 54 size_t data_space; 55 data_node *first_node; 56 }; 57 58 #define MAX_FREE_BUFFER_SIZE (BUFFER_SIZE - sizeof(data_header)) 59 60 struct net_buffer_private : net_buffer { 61 struct list buffers; 62 data_node first_node; 63 }; 64 65 66 static status_t append_data(net_buffer *buffer, const void *data, size_t size); 67 static status_t trim_data(net_buffer *_buffer, size_t newSize); 68 static status_t remove_header(net_buffer *_buffer, size_t bytes); 69 static status_t remove_trailer(net_buffer *_buffer, size_t bytes); 70 71 72 #if 1 73 static void 74 dump_buffer(net_buffer *_buffer) 75 { 76 net_buffer_private *buffer = (net_buffer_private *)_buffer; 77 78 dprintf("buffer %p, size %ld\n", buffer, buffer->size); 79 data_node *node = NULL; 80 while ((node = (data_node *)list_get_next_item(&buffer->buffers, node)) != NULL) { 81 dprintf(" node %p, offset %lu, used %u, header %u, tail %u, header %p\n", 82 node, node->offset, node->used, node->header_space, node->tail_space, node->header); 83 //dump_block((char *)node->start, node->used, " "); 84 dump_block((char *)node->start, min_c(node->used, 32), " "); 85 } 86 } 87 #endif 88 89 90 static data_header * 91 create_data_header(size_t headerSpace) 92 { 93 // TODO: don't use malloc! 94 data_header *header = (data_header *)malloc(BUFFER_SIZE); 95 if (header == NULL) 96 return NULL; 97 98 header->ref_count = 1; 99 header->physical_address = 0; 100 // TODO: initialize this correctly 101 header->data_space = headerSpace; 102 header->data_end = (uint8 *)header + sizeof(struct data_header); 103 header->first_free = NULL; 104 header->first_node = NULL; 105 106 TRACE((" create new data header %p\n", header)); 107 return header; 108 } 109 110 111 static void 112 release_data_header(data_header *header) 113 { 114 if (atomic_add(&header->ref_count, -1) != 1) 115 return; 116 117 TRACE((" free header %p\n", header)); 118 free(header); 119 } 120 121 122 inline void 123 acquire_data_header(data_header *header) 124 { 125 atomic_add(&header->ref_count, 1); 126 } 127 128 129 static void 130 free_data_header_space(data_header *header, uint8 *data, size_t size) 131 { 132 if (size < sizeof(free_data)) 133 size = sizeof(free_data); 134 135 free_data *freeData = (free_data *)data; 136 freeData->next = header->first_free; 137 freeData->size = size; 138 139 header->first_free = freeData; 140 header->data_space += size; 141 // TODO: the first node's header space could grow again 142 } 143 144 145 /*! 146 Tries to allocate \a size bytes from the free space in the header. 147 */ 148 static uint8 * 149 alloc_data_header_space(data_header *header, size_t size) 150 { 151 if (size < sizeof(free_data)) 152 size = sizeof(free_data); 153 154 if (header->first_free != NULL && header->first_free->size >= size) { 155 // the first entry of the header space matches the allocation's needs 156 uint8 *data = (uint8 *)header->first_free; 157 header->first_free = header->first_free->next; 158 return data; 159 } 160 161 if (header->data_space < size) { 162 // there is no free space left, search free list 163 free_data *freeData = header->first_free; 164 free_data *last = NULL; 165 while (freeData != NULL) { 166 if (last != NULL && freeData->size >= size) { 167 // take this one 168 last->next = freeData->next; 169 return (uint8 *)freeData; 170 } 171 172 last = freeData; 173 freeData = freeData->next; 174 } 175 176 return NULL; 177 } 178 179 // allocate new space 180 181 uint8 *data = header->data_end; 182 header->data_end += size; 183 header->data_space -= size; 184 185 if (header->first_node != NULL) 186 header->first_node->header_space -= size; 187 188 return data; 189 } 190 191 192 /*! 193 Initializes the first data_node of a data_header. 194 The node must have been assigned to the header already. 195 */ 196 static void 197 init_first_data_node(data_node *node) 198 { 199 data_header *header = node->header; 200 201 node->offset = 0; 202 node->start = header->data_end + header->data_space; 203 node->used = 0; 204 node->header_space = header->data_space; 205 node->tail_space = MAX_FREE_BUFFER_SIZE - header->data_space; 206 207 header->first_node = node; 208 } 209 210 211 static data_node * 212 add_data_node(data_header *header, data_header *located = NULL) 213 { 214 if (located == NULL) 215 located = header; 216 217 data_node *node = (data_node *)alloc_data_header_space(located, 218 sizeof(data_node)); 219 if (node == NULL) 220 return NULL; 221 222 TRACE((" add data node %p to header %p\n", node, header)); 223 acquire_data_header(header); 224 if (located != header) 225 acquire_data_header(located); 226 227 memset(node, 0, sizeof(struct data_node)); 228 node->located = located; 229 node->header = header; 230 return node; 231 } 232 233 234 void 235 remove_data_node(data_node *node) 236 { 237 data_header *located = node->located; 238 239 TRACE((" remove data node %p from header %p (located %p)\n", 240 node, node->header, located)); 241 242 if (located != node->header) 243 release_data_header(node->header); 244 245 if (located == NULL) 246 return; 247 248 free_data_header_space(located, (uint8 *)node, sizeof(data_node)); 249 if (located->first_node == node) { 250 located->first_node = NULL; 251 located->data_space = 0; 252 } 253 254 release_data_header(located); 255 } 256 257 258 static inline data_node * 259 get_node_at_offset(net_buffer_private *buffer, size_t offset) 260 { 261 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 262 while (node->offset + node->used < offset) { 263 node = (data_node *)list_get_next_item(&buffer->buffers, node); 264 if (node == NULL) 265 return NULL; 266 } 267 268 return node; 269 } 270 271 272 // #pragma mark - 273 274 275 static net_buffer * 276 create_buffer(size_t headerSpace) 277 { 278 net_buffer_private *buffer = (net_buffer_private *)malloc( 279 sizeof(struct net_buffer_private)); 280 if (buffer == NULL) 281 return NULL; 282 283 TRACE(("create buffer %p\n", buffer)); 284 285 data_header *header = create_data_header(headerSpace); 286 if (header == NULL) { 287 free(buffer); 288 return NULL; 289 } 290 291 buffer->first_node.header = header; 292 buffer->first_node.located = NULL; 293 init_first_data_node(&buffer->first_node); 294 295 list_init(&buffer->buffers); 296 list_add_item(&buffer->buffers, &buffer->first_node); 297 298 buffer->source.ss_len = 0; 299 buffer->destination.ss_len = 0; 300 buffer->interface = NULL; 301 buffer->offset = 0; 302 buffer->flags = 0; 303 buffer->size = 0; 304 305 return buffer; 306 } 307 308 309 static void 310 free_buffer(net_buffer *_buffer) 311 { 312 net_buffer_private *buffer = (net_buffer_private *)_buffer; 313 314 TRACE(("free buffer %p\n", buffer)); 315 316 data_node *node; 317 while ((node = (data_node *)list_remove_head_item(&buffer->buffers)) != NULL) { 318 remove_data_node(node); 319 } 320 321 free(buffer); 322 } 323 324 325 /*! Creates a duplicate of the \a buffer. The new buffer does not share internal 326 storage; they are completely independent from each other. 327 */ 328 static net_buffer * 329 duplicate_buffer(net_buffer *_buffer) 330 { 331 net_buffer_private *buffer = (net_buffer_private *)_buffer; 332 333 net_buffer *duplicate = create_buffer(buffer->first_node.header_space); 334 if (duplicate == NULL) 335 return NULL; 336 337 // copy the data from the source buffer 338 339 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 340 while (true) { 341 if (append_data(duplicate, node->start, node->used) < B_OK) { 342 free_buffer(duplicate); 343 return NULL; 344 } 345 346 node = (data_node *)list_get_next_item(&buffer->buffers, node); 347 if (node == NULL) 348 break; 349 } 350 351 // copy meta data from source buffer 352 353 memcpy(&duplicate->source, &buffer->source, 354 min_c(buffer->source.ss_len, sizeof(sockaddr_storage))); 355 memcpy(&duplicate->destination, &buffer->destination, 356 min_c(buffer->destination.ss_len, sizeof(sockaddr_storage))); 357 358 duplicate->flags = buffer->flags; 359 duplicate->interface = buffer->interface; 360 duplicate->offset = buffer->offset; 361 duplicate->size = buffer->size; 362 duplicate->protocol = buffer->protocol; 363 364 return duplicate; 365 } 366 367 368 /*! Clones the buffer by grabbing another reference to the underlying data. 369 If that data changes, it will be changed in the clone as well. 370 371 If \a shareFreeSpace is \c true, the cloned buffer may claim the free 372 space in the original buffer as the original buffer can still do. If you 373 are using this, it's your responsibility that only one of the buffers 374 will do this. 375 */ 376 static net_buffer * 377 clone_buffer(net_buffer *_buffer, bool shareFreeSpace) 378 { 379 net_buffer_private *buffer = (net_buffer_private *)_buffer; 380 381 TRACE(("clone_buffer(buffer %p)\n", buffer)); 382 383 net_buffer_private *clone = (net_buffer_private *)malloc( 384 sizeof(struct net_buffer_private)); 385 if (clone == NULL) 386 return NULL; 387 388 data_node *sourceNode = (data_node *)list_get_first_item(&buffer->buffers); 389 if (sourceNode == NULL) { 390 free(clone); 391 return NULL; 392 } 393 394 list_init(&clone->buffers); 395 396 // grab reference to this buffer - all additional nodes will get 397 // theirs in add_data_node() 398 acquire_data_header(sourceNode->header); 399 data_node *node = &clone->first_node; 400 node->header = sourceNode->header; 401 node->located = NULL; 402 403 while (sourceNode != NULL) { 404 node->start = sourceNode->start; 405 node->used = sourceNode->used; 406 node->offset = sourceNode->offset; 407 408 if (shareFreeSpace) { 409 // both buffers could claim the free space - note that this option 410 // has to be used carefully 411 node->header_space = sourceNode->header_space; 412 node->tail_space = sourceNode->tail_space; 413 } else { 414 // the free space stays with the original buffer 415 node->header_space = 0; 416 node->tail_space = 0; 417 } 418 419 // add node to clone's list of buffers 420 list_add_item(&clone->buffers, node); 421 422 sourceNode = (data_node *)list_get_next_item(&buffer->buffers, sourceNode); 423 if (sourceNode == NULL) 424 break; 425 426 node = add_data_node(sourceNode->header); 427 if (node == NULL) { 428 // There was not enough space left for another node in this buffer 429 // TODO: handle this case! 430 panic("clone buffer hits size limit... (fix me)"); 431 free(clone); 432 return NULL; 433 } 434 } 435 436 // copy meta data from source buffer 437 438 memcpy(&clone->source, &buffer->source, 439 min_c(buffer->source.ss_len, sizeof(sockaddr_storage))); 440 memcpy(&clone->destination, &buffer->destination, 441 min_c(buffer->destination.ss_len, sizeof(sockaddr_storage))); 442 443 clone->flags = buffer->flags; 444 clone->interface = buffer->interface; 445 clone->offset = buffer->offset; 446 clone->size = buffer->size; 447 clone->protocol = buffer->protocol; 448 449 return clone; 450 } 451 452 453 /*! 454 Split the buffer at offset, the header data 455 is returned as new buffer. 456 TODO: optimize and avoid making a copy. 457 */ 458 static net_buffer * 459 split_buffer(net_buffer *from, uint32 offset) 460 { 461 net_buffer *buffer = duplicate_buffer(from); 462 if (buffer == NULL) 463 return NULL; 464 465 TRACE(("split_buffer(buffer %p -> %p, offset %ld)\n", from, buffer, offset)); 466 467 if (trim_data(buffer, offset) == B_OK) { 468 if (remove_header(from, offset) == B_OK) 469 return buffer; 470 } 471 472 free_buffer(buffer); 473 return NULL; 474 } 475 476 477 /*! 478 Merges the second buffer with the first. If \a after is \c true, the 479 second buffer's contents will be appended to the first ones, else they 480 will be prepended. 481 The second buffer will be freed if this function succeeds. 482 */ 483 static status_t 484 merge_buffer(net_buffer *_buffer, net_buffer *_with, bool after) 485 { 486 net_buffer_private *buffer = (net_buffer_private *)_buffer; 487 net_buffer_private *with = (net_buffer_private *)_with; 488 if (with == NULL) 489 return B_BAD_VALUE; 490 491 TRACE(("merge buffer %p with %p (%s)\n", buffer, with, after ? "after" : "before")); 492 //dump_buffer(buffer); 493 //dprintf("with:\n"); 494 //dump_buffer(with); 495 496 // TODO: this is currently very simplistic, I really need to finish the 497 // harder part of this implementation (data_node management per header) 498 499 data_node *before = NULL; 500 501 if (!after) { 502 // change offset of all nodes already in the buffer 503 data_node *node = NULL; 504 while (true) { 505 node = (data_node *)list_get_next_item(&buffer->buffers, node); 506 if (node == NULL) 507 break; 508 509 node->offset += with->size; 510 if (before == NULL) 511 before = node; 512 } 513 } 514 515 data_node *last = NULL; 516 517 while (true) { 518 data_node *node = (data_node *)list_get_next_item(&with->buffers, last); 519 if (node == NULL) 520 break; 521 522 if ((uint8 *)node > (uint8 *)node->header 523 && (uint8 *)node < (uint8 *)node->header + BUFFER_SIZE) { 524 // The node is already in the buffer, we can just move it 525 // over to the new owner 526 list_remove_item(&with->buffers, node); 527 } else { 528 // we need a new place for this node 529 data_node *newNode = add_data_node(node->header); 530 if (newNode == NULL) { 531 // try again on the buffers own header 532 newNode = add_data_node(node->header, buffer->first_node.header); 533 if (newNode == NULL) 534 // TODO: try to revert buffers to their initial state!! 535 return ENOBUFS; 536 } 537 538 last = node; 539 *newNode = *node; 540 node = newNode; 541 // the old node will get freed with its buffer 542 } 543 544 if (after) { 545 list_add_item(&buffer->buffers, node); 546 node->offset = buffer->size; 547 } else 548 list_insert_item_before(&buffer->buffers, before, node); 549 550 buffer->size += node->used; 551 } 552 553 // the data has been merged completely at this point 554 free_buffer(with); 555 556 //dprintf(" merge result:\n"); 557 //dump_buffer(buffer); 558 return B_OK; 559 } 560 561 562 /*! Writes into existing allocated memory. 563 \return B_BAD_VALUE if you write outside of the buffers current 564 bounds. 565 */ 566 static status_t 567 write_data(net_buffer *_buffer, size_t offset, const void *data, size_t size) 568 { 569 net_buffer_private *buffer = (net_buffer_private *)_buffer; 570 571 if (offset + size > buffer->size) 572 return B_BAD_VALUE; 573 if (size == 0) 574 return B_OK; 575 576 // find first node to write into 577 data_node *node = get_node_at_offset(buffer, offset); 578 if (node == NULL) 579 return B_BAD_VALUE; 580 581 offset -= node->offset; 582 583 while (true) { 584 size_t written = min_c(size, node->used - offset); 585 memcpy(node->start + offset, data, written); 586 587 size -= written; 588 if (size == 0) 589 break; 590 591 offset = 0; 592 data = (void *)((uint8 *)data + written); 593 594 node = (data_node *)list_get_next_item(&buffer->buffers, node); 595 if (node == NULL) 596 return B_BAD_VALUE; 597 } 598 599 return B_OK; 600 } 601 602 603 static status_t 604 read_data(net_buffer *_buffer, size_t offset, void *data, size_t size) 605 { 606 net_buffer_private *buffer = (net_buffer_private *)_buffer; 607 608 if (offset + size > buffer->size) 609 return B_BAD_VALUE; 610 if (size == 0) 611 return B_OK; 612 613 // find first node to read from 614 data_node *node = get_node_at_offset(buffer, offset); 615 if (node == NULL) 616 return B_BAD_VALUE; 617 618 offset -= node->offset; 619 620 while (true) { 621 size_t bytesRead = min_c(size, node->used - offset); 622 memcpy(data, node->start + offset, bytesRead); 623 624 size -= bytesRead; 625 if (size == 0) 626 break; 627 628 offset = 0; 629 data = (void *)((uint8 *)data + bytesRead); 630 631 node = (data_node *)list_get_next_item(&buffer->buffers, node); 632 if (node == NULL) 633 return B_BAD_VALUE; 634 } 635 636 return B_OK; 637 } 638 639 640 static status_t 641 prepend_size(net_buffer *_buffer, size_t size, void **_contiguousBuffer) 642 { 643 net_buffer_private *buffer = (net_buffer_private *)_buffer; 644 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 645 646 TRACE(("prepend_size(buffer %p, size %ld) [has %ld]\n", buffer, size, 647 node->header_space)); 648 //dump_buffer(buffer); 649 650 if (node->header_space < size) { 651 // we need to prepend new buffers 652 653 size_t bytesLeft = size; 654 do { 655 if (node->header_space == 0) { 656 size_t headerSpace = MAX_FREE_BUFFER_SIZE; 657 data_header *header = create_data_header(headerSpace); 658 if (header == NULL) { 659 // TODO: free up headers we already allocated! 660 return B_NO_MEMORY; 661 } 662 663 data_node *previous = node; 664 665 node = (data_node *)add_data_node(header); 666 init_first_data_node(node); 667 668 list_insert_item_before(&buffer->buffers, previous, node); 669 } 670 671 size_t willConsume = min_c(bytesLeft, node->header_space); 672 673 node->header_space -= willConsume; 674 node->start -= willConsume; 675 node->used += willConsume; 676 bytesLeft -= willConsume; 677 } while (bytesLeft > 0); 678 679 // correct data offset in all nodes 680 681 size_t offset = 0; 682 node = NULL; 683 while ((node = (data_node *)list_get_next_item(&buffer->buffers, 684 node)) != NULL) { 685 node->offset = offset; 686 offset += node->used; 687 } 688 689 if (_contiguousBuffer) 690 *_contiguousBuffer = NULL; 691 } else { 692 // the data fits into this buffer 693 node->header_space -= size; 694 node->start -= size; 695 node->used += size; 696 697 if (_contiguousBuffer) 698 *_contiguousBuffer = node->start; 699 700 // adjust offset of following nodes 701 while ((node = (data_node *)list_get_next_item(&buffer->buffers, node)) != NULL) { 702 node->offset += size; 703 } 704 } 705 706 buffer->size += size; 707 708 //dprintf(" prepend_size result:\n"); 709 //dump_buffer(buffer); 710 return B_OK; 711 } 712 713 714 static status_t 715 prepend_data(net_buffer *buffer, const void *data, size_t size) 716 { 717 void *contiguousBuffer; 718 status_t status = prepend_size(buffer, size, &contiguousBuffer); 719 if (status < B_OK) 720 return status; 721 722 if (contiguousBuffer) 723 memcpy(contiguousBuffer, data, size); 724 else 725 write_data(buffer, 0, data, size); 726 727 //dprintf(" prepend result:\n"); 728 //dump_buffer(buffer); 729 730 return B_OK; 731 } 732 733 734 static status_t 735 append_size(net_buffer *_buffer, size_t size, void **_contiguousBuffer) 736 { 737 net_buffer_private *buffer = (net_buffer_private *)_buffer; 738 data_node *node = (data_node *)list_get_last_item(&buffer->buffers); 739 740 TRACE(("append_size(buffer %p, size %ld)\n", buffer, size)); 741 //dump_buffer(buffer); 742 743 if (node->tail_space < size) { 744 // we need to append a new buffer 745 746 // compute how many buffers we're going to need 747 // TODO: this doesn't leave any tail space, if that should be desired... 748 uint32 tailSpace = node->tail_space; 749 uint32 minimalHeaderSpace = sizeof(data_header) + 3 * sizeof(data_node); 750 uint32 sizeNeeded = size - tailSpace; 751 uint32 count = (sizeNeeded + BUFFER_SIZE - minimalHeaderSpace - 1) 752 / (BUFFER_SIZE - minimalHeaderSpace); 753 uint32 headerSpace = BUFFER_SIZE - sizeNeeded / count - sizeof(data_header); 754 uint32 sizeUsed = MAX_FREE_BUFFER_SIZE - headerSpace; 755 uint32 sizeAdded = tailSpace; 756 757 // allocate space left in the node 758 node->tail_space -= tailSpace; 759 node->used += tailSpace; 760 buffer->size += tailSpace; 761 762 // allocate all buffers 763 764 for (uint32 i = 0; i < count; i++) { 765 if (i == count - 1) { 766 // last data_header - compensate rounding errors 767 sizeUsed = size - sizeAdded; 768 headerSpace = MAX_FREE_BUFFER_SIZE - sizeUsed; 769 } 770 771 data_header *header = create_data_header(headerSpace); 772 if (header == NULL) { 773 // TODO: free up headers we already allocated! 774 return B_NO_MEMORY; 775 } 776 777 node = (data_node *)add_data_node(header); 778 // this can't fail as we made sure there will be enough header space 779 780 init_first_data_node(node); 781 node->tail_space -= sizeUsed; 782 node->used = sizeUsed; 783 node->offset = buffer->size; 784 785 buffer->size += sizeUsed; 786 sizeAdded += sizeUsed; 787 788 list_add_item(&buffer->buffers, node); 789 } 790 791 if (_contiguousBuffer) 792 *_contiguousBuffer = NULL; 793 794 //dprintf(" append result 1:\n"); 795 //dump_buffer(buffer); 796 return B_OK; 797 } 798 799 // the data fits into this buffer 800 node->tail_space -= size; 801 802 if (_contiguousBuffer) 803 *_contiguousBuffer = node->start + node->used; 804 805 node->used += size; 806 buffer->size += size; 807 808 //dprintf(" append result 2:\n"); 809 //dump_buffer(buffer); 810 return B_OK; 811 } 812 813 814 static status_t 815 append_data(net_buffer *buffer, const void *data, size_t size) 816 { 817 size_t used = buffer->size; 818 819 void *contiguousBuffer; 820 status_t status = append_size(buffer, size, &contiguousBuffer); 821 if (status < B_OK) 822 return status; 823 824 if (contiguousBuffer) 825 memcpy(contiguousBuffer, data, size); 826 else 827 write_data(buffer, used, data, size); 828 829 return B_OK; 830 } 831 832 833 /*! 834 Removes bytes from the beginning of the buffer. 835 */ 836 static status_t 837 remove_header(net_buffer *_buffer, size_t bytes) 838 { 839 net_buffer_private *buffer = (net_buffer_private *)_buffer; 840 841 if (bytes > buffer->size) 842 return B_BAD_VALUE; 843 844 TRACE(("remove_header(buffer %p, %ld bytes)\n", buffer, bytes)); 845 //dump_buffer(buffer); 846 847 size_t left = bytes; 848 data_node *node = NULL; 849 850 while (left >= 0) { 851 node = (data_node *)list_get_first_item(&buffer->buffers); 852 if (node == NULL) { 853 if (left == 0) 854 break; 855 return B_ERROR; 856 } 857 858 if (node->used > left) 859 break; 860 861 // node will be removed completely 862 list_remove_item(&buffer->buffers, node); 863 left -= node->used; 864 remove_data_node(node); 865 node = NULL; 866 } 867 868 // cut remaining node, if any 869 870 if (node != NULL) { 871 size_t cut = min_c(node->used, left); 872 node->offset = 0; 873 node->start += cut; 874 node->header_space += cut; 875 node->used -= cut; 876 877 node = (data_node *)list_get_next_item(&buffer->buffers, node); 878 } 879 880 // adjust offset of following nodes 881 while (node != NULL) { 882 node->offset -= bytes; 883 node = (data_node *)list_get_next_item(&buffer->buffers, node); 884 } 885 886 buffer->size -= bytes; 887 888 //dprintf(" remove result:\n"); 889 //dump_buffer(buffer); 890 return B_OK; 891 } 892 893 894 /*! 895 Removes bytes from the end of the buffer. 896 */ 897 static status_t 898 remove_trailer(net_buffer *buffer, size_t bytes) 899 { 900 return trim_data(buffer, buffer->size - bytes); 901 } 902 903 904 /*! 905 Trims the buffer to the specified \a newSize by removing space from 906 the end of the buffer. 907 */ 908 static status_t 909 trim_data(net_buffer *_buffer, size_t newSize) 910 { 911 net_buffer_private *buffer = (net_buffer_private *)_buffer; 912 TRACE(("trim_data(buffer %p, newSize = %ld, buffer size = %ld)\n", 913 buffer, newSize, buffer->size)); 914 //dump_buffer(buffer); 915 916 if (newSize > buffer->size) 917 return B_BAD_VALUE; 918 if (newSize == buffer->size) 919 return B_OK; 920 921 data_node *node = get_node_at_offset(buffer, newSize); 922 if (node == NULL) { 923 // trim size greater than buffer size 924 return B_BAD_VALUE; 925 } 926 927 int32 diff = node->used + node->offset - newSize; 928 node->tail_space += diff; 929 node->used -= diff; 930 931 if (node->used > 0) 932 node = (data_node *)list_get_next_item(&buffer->buffers, node); 933 934 while (node != NULL) { 935 data_node *next = (data_node *)list_get_next_item(&buffer->buffers, node); 936 list_remove_item(&buffer->buffers, node); 937 remove_data_node(node); 938 939 node = next; 940 } 941 942 buffer->size = newSize; 943 944 //dprintf(" trim result:\n"); 945 //dump_buffer(buffer); 946 return B_OK; 947 } 948 949 950 /*! 951 Appends data coming from buffer \a source to the buffer \a buffer. It only 952 clones the data, though, that is the data is not copied, just referenced. 953 */ 954 status_t 955 append_cloned_data(net_buffer *_buffer, net_buffer *_source, uint32 offset, 956 size_t bytes) 957 { 958 if (bytes == 0) 959 return B_OK; 960 961 net_buffer_private *buffer = (net_buffer_private *)_buffer; 962 net_buffer_private *source = (net_buffer_private *)_source; 963 TRACE(("append_cloned_data(buffer %p, source %p, offset = %ld, bytes = %ld)\n", 964 buffer, source, offset, bytes)); 965 966 if (source->size < offset + bytes || source->size < offset) 967 return B_BAD_VALUE; 968 969 // find data_node to start with from the source buffer 970 data_node *node = get_node_at_offset(source, offset); 971 if (node == NULL) { 972 // trim size greater than buffer size 973 return B_BAD_VALUE; 974 } 975 976 while (node != NULL && bytes > 0) { 977 data_node *clone = add_data_node(node->header, buffer->first_node.header); 978 if (clone == NULL) 979 clone = add_data_node(node->header); 980 if (clone == NULL) { 981 // There is not enough space in the buffer for another node 982 // TODO: handle this case! 983 dump_buffer(buffer); 984 dprintf("SOURCE:\n"); 985 dump_buffer(source); 986 panic("appending clone buffer in new header not implemented\n"); 987 return ENOBUFS; 988 } 989 990 if (offset) 991 offset -= node->offset; 992 993 clone->offset = buffer->size; 994 clone->start = node->start + offset; 995 clone->used = min_c(bytes, node->used - offset); 996 clone->header_space = 0; 997 clone->tail_space = 0; 998 999 list_add_item(&buffer->buffers, clone); 1000 1001 offset = 0; 1002 bytes -= clone->used; 1003 buffer->size += clone->used; 1004 node = (data_node *)list_get_next_item(&source->buffers, node); 1005 } 1006 1007 if (bytes != 0) 1008 panic("add_cloned_data() failed, bytes != 0!\n"); 1009 1010 //dprintf(" append cloned result:\n"); 1011 //dump_buffer(buffer); 1012 return B_OK; 1013 } 1014 1015 1016 /*! 1017 Tries to directly access the requested space in the buffer. 1018 If the space is contiguous, the function will succeed and place a pointer 1019 to that space into \a _contiguousBuffer. 1020 1021 \return B_BAD_VALUE if the offset is outside of the buffer's bounds. 1022 \return B_ERROR in case the buffer is not contiguous at that location. 1023 */ 1024 static status_t 1025 direct_access(net_buffer *_buffer, uint32 offset, size_t size, 1026 void **_contiguousBuffer) 1027 { 1028 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1029 1030 //TRACE(("direct_access(buffer %p, offset %ld, size %ld)\n", buffer, offset, size)); 1031 1032 if (offset + size > buffer->size) 1033 return B_BAD_VALUE; 1034 1035 // find node to access 1036 data_node *node = get_node_at_offset(buffer, offset); 1037 if (node == NULL) 1038 return B_BAD_VALUE; 1039 1040 offset -= node->offset; 1041 1042 if (size > node->used - offset) 1043 return B_ERROR; 1044 1045 *_contiguousBuffer = node->start + offset; 1046 return B_OK; 1047 } 1048 1049 1050 static int32 1051 checksum_data(net_buffer *_buffer, uint32 offset, size_t size, bool finalize) 1052 { 1053 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1054 1055 if (offset + size > buffer->size || size == 0) 1056 return B_BAD_VALUE; 1057 1058 // find first node to read from 1059 data_node *node = get_node_at_offset(buffer, offset); 1060 if (node == NULL) 1061 return B_ERROR; 1062 1063 offset -= node->offset; 1064 1065 // Since the maximum buffer size is 65536 bytes, it's impossible 1066 // to overlap 32 bit - we don't need to handle this overlap in 1067 // the loop, we can safely do it afterwards 1068 uint32 sum = 0; 1069 1070 while (true) { 1071 size_t bytes = min_c(size, node->used - offset); 1072 if ((offset + node->offset) & 1) { 1073 // if we're at an uneven offset, we have to swap the checksum 1074 sum += __swap_int16(compute_checksum(node->start + offset, bytes)); 1075 } else 1076 sum += compute_checksum(node->start + offset, bytes); 1077 1078 size -= bytes; 1079 if (size == 0) 1080 break; 1081 1082 offset = 0; 1083 1084 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1085 if (node == NULL) 1086 return B_ERROR; 1087 } 1088 1089 while (sum >> 16) { 1090 sum = (sum & 0xffff) + (sum >> 16); 1091 } 1092 1093 if (!finalize) 1094 return (uint16)sum; 1095 1096 return (uint16)~sum; 1097 } 1098 1099 1100 static uint32 1101 get_iovecs(net_buffer *_buffer, struct iovec *iovecs, uint32 vecCount) 1102 { 1103 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1104 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 1105 uint32 count = 0; 1106 1107 while (node != NULL && count < vecCount) { 1108 if (node->used > 0) { 1109 iovecs[count].iov_base = node->start; 1110 iovecs[count].iov_len = node->used; 1111 count++; 1112 } 1113 1114 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1115 } 1116 1117 return count; 1118 } 1119 1120 1121 static uint32 1122 count_iovecs(net_buffer *_buffer) 1123 { 1124 net_buffer_private *buffer = (net_buffer_private *)_buffer; 1125 data_node *node = (data_node *)list_get_first_item(&buffer->buffers); 1126 uint32 count = 0; 1127 1128 while (node != NULL) { 1129 if (node->used > 0) 1130 count++; 1131 1132 node = (data_node *)list_get_next_item(&buffer->buffers, node); 1133 } 1134 1135 return count; 1136 } 1137 1138 1139 static status_t 1140 std_ops(int32 op, ...) 1141 { 1142 switch (op) { 1143 case B_MODULE_INIT: 1144 case B_MODULE_UNINIT: 1145 return B_OK; 1146 1147 default: 1148 return B_ERROR; 1149 } 1150 } 1151 1152 1153 net_buffer_module_info gNetBufferModule = { 1154 { 1155 NET_BUFFER_MODULE_NAME, 1156 0, 1157 std_ops 1158 }, 1159 create_buffer, 1160 free_buffer, 1161 1162 duplicate_buffer, 1163 clone_buffer, 1164 split_buffer, 1165 merge_buffer, 1166 1167 prepend_size, 1168 prepend_data, 1169 append_size, 1170 append_data, 1171 NULL, // insert 1172 NULL, // remove 1173 remove_header, 1174 remove_trailer, 1175 trim_data, 1176 append_cloned_data, 1177 1178 NULL, // associate_data 1179 1180 direct_access, 1181 read_data, 1182 write_data, 1183 1184 checksum_data, 1185 1186 NULL, // get_memory_map 1187 get_iovecs, 1188 count_iovecs, 1189 1190 dump_buffer, // dump 1191 }; 1192 1193