1 /*
2 * Copyright 2006-2016, Haiku, Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 * Axel Dörfler, axeld@pinc-software.de
7 * Ingo Weinhold, ingo_weinhold@gmx.de
8 */
9
10
11 #include "utility.h"
12
13 #include <net_buffer.h>
14 #include <slab/Slab.h>
15 #include <tracing.h>
16 #include <util/list.h>
17
18 #include <ByteOrder.h>
19 #include <debug.h>
20 #include <kernel.h>
21 #include <KernelExport.h>
22 #include <util/DoublyLinkedList.h>
23
24 #include <algorithm>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/param.h>
28 #include <sys/uio.h>
29
30 #include "ancillary_data.h"
31 #include "interfaces.h"
32
33 #include "paranoia_config.h"
34
35
36 //#define TRACE_BUFFER
37 #ifdef TRACE_BUFFER
38 # define TRACE(x) dprintf x
39 #else
40 # define TRACE(x) ;
41 #endif
42
43 #define BUFFER_SIZE 2048
44 // maximum implementation derived buffer size is 65536
45
46 #define ENABLE_DEBUGGER_COMMANDS 1
47 #define ENABLE_STATS 1
48 #define PARANOID_BUFFER_CHECK NET_BUFFER_PARANOIA
49
50 #define COMPONENT_PARANOIA_LEVEL NET_BUFFER_PARANOIA
51 #include <debug_paranoia.h>
52
53 #define DATA_NODE_READ_ONLY 0x1
54 #define DATA_NODE_STORED_HEADER 0x2
55
56 struct header_space {
57 uint16 size;
58 uint16 free;
59 };
60
61 struct free_data {
62 struct free_data* next;
63 uint16 size;
64 };
65
66 struct data_header {
67 int32 ref_count;
68 addr_t physical_address;
69 free_data* first_free;
70 uint8* data_end;
71 header_space space;
72 uint16 tail_space;
73 };
74
75 struct data_node {
76 struct list_link link;
77 struct data_header* header;
78 struct data_header* located;
79 size_t offset; // the net_buffer-wide offset of this node
80 uint8* start; // points to the start of the data
81 uint16 flags;
82 uint16 used; // defines how much memory is used by this node
83
HeaderSpacedata_node84 uint16 HeaderSpace() const
85 {
86 if ((flags & DATA_NODE_READ_ONLY) != 0)
87 return 0;
88 return header->space.free;
89 }
90
AddHeaderSpacedata_node91 void AddHeaderSpace(uint16 toAdd)
92 {
93 if ((flags & DATA_NODE_READ_ONLY) == 0) {
94 header->space.size += toAdd;
95 header->space.free += toAdd;
96 }
97 }
98
SubtractHeaderSpacedata_node99 void SubtractHeaderSpace(uint16 toSubtract)
100 {
101 if ((flags & DATA_NODE_READ_ONLY) == 0) {
102 header->space.size -= toSubtract;
103 header->space.free -= toSubtract;
104 }
105 }
106
TailSpacedata_node107 uint16 TailSpace() const
108 {
109 if ((flags & DATA_NODE_READ_ONLY) != 0)
110 return 0;
111 return header->tail_space;
112 }
113
SetTailSpacedata_node114 void SetTailSpace(uint16 space)
115 {
116 if ((flags & DATA_NODE_READ_ONLY) == 0)
117 header->tail_space = space;
118 }
119
FreeSpacedata_node120 void FreeSpace()
121 {
122 if ((flags & DATA_NODE_READ_ONLY) == 0) {
123 uint16 space = used + header->tail_space;
124 header->space.size += space;
125 header->space.free += space;
126 header->tail_space = 0;
127 }
128 }
129 };
130
131
132 // TODO: we should think about moving the address fields into the buffer
133 // data itself via associated data or something like this. Or this
134 // structure as a whole, too...
135 struct net_buffer_private : net_buffer {
136 struct list buffers;
137 data_header* allocation_header;
138 // the current place where we allocate header space (nodes, ...)
139 ancillary_data_container* ancillary_data;
140 size_t stored_header_length;
141
142 struct {
143 struct sockaddr_storage source;
144 struct sockaddr_storage destination;
145 } storage;
146 };
147
148
149 #define DATA_HEADER_SIZE _ALIGN(sizeof(data_header))
150 #define DATA_NODE_SIZE _ALIGN(sizeof(data_node))
151 #define MAX_FREE_BUFFER_SIZE (BUFFER_SIZE - DATA_HEADER_SIZE)
152
153
154 static object_cache* sNetBufferCache;
155 static object_cache* sDataNodeCache;
156
157
158 static status_t append_data(net_buffer* buffer, const void* data, size_t size);
159 static status_t trim_data(net_buffer* _buffer, size_t newSize);
160 static status_t remove_header(net_buffer* _buffer, size_t bytes);
161 static status_t remove_trailer(net_buffer* _buffer, size_t bytes);
162 static status_t append_cloned_data(net_buffer* _buffer, net_buffer* _source,
163 uint32 offset, size_t bytes);
164 static status_t read_data(net_buffer* _buffer, size_t offset, void* data,
165 size_t size);
166
167
168 #if ENABLE_STATS
169 static int32 sAllocatedDataHeaderCount = 0;
170 static int32 sAllocatedNetBufferCount = 0;
171 static int32 sEverAllocatedDataHeaderCount = 0;
172 static int32 sEverAllocatedNetBufferCount = 0;
173 static int32 sMaxAllocatedDataHeaderCount = 0;
174 static int32 sMaxAllocatedNetBufferCount = 0;
175 #endif
176
177
178 #if NET_BUFFER_TRACING
179
180
181 namespace NetBufferTracing {
182
183
184 class NetBufferTraceEntry : public AbstractTraceEntry {
185 public:
NetBufferTraceEntry(net_buffer * buffer)186 NetBufferTraceEntry(net_buffer* buffer)
187 :
188 fBuffer(buffer)
189 {
190 #if NET_BUFFER_TRACING_STACK_TRACE
191 fStackTrace = capture_tracing_stack_trace(
192 NET_BUFFER_TRACING_STACK_TRACE, 0, false);
193 #endif
194 }
195
196 #if NET_BUFFER_TRACING_STACK_TRACE
DumpStackTrace(TraceOutput & out)197 virtual void DumpStackTrace(TraceOutput& out)
198 {
199 out.PrintStackTrace(fStackTrace);
200 }
201 #endif
202
203 protected:
204 net_buffer* fBuffer;
205 #if NET_BUFFER_TRACING_STACK_TRACE
206 tracing_stack_trace* fStackTrace;
207 #endif
208 };
209
210
211 class Create : public NetBufferTraceEntry {
212 public:
Create(size_t headerSpace,net_buffer * buffer)213 Create(size_t headerSpace, net_buffer* buffer)
214 :
215 NetBufferTraceEntry(buffer),
216 fHeaderSpace(headerSpace)
217 {
218 Initialized();
219 }
220
AddDump(TraceOutput & out)221 virtual void AddDump(TraceOutput& out)
222 {
223 out.Print("net buffer create: header space: %lu -> buffer: %p",
224 fHeaderSpace, fBuffer);
225 }
226
227 private:
228 size_t fHeaderSpace;
229 };
230
231
232 class Free : public NetBufferTraceEntry {
233 public:
Free(net_buffer * buffer)234 Free(net_buffer* buffer)
235 :
236 NetBufferTraceEntry(buffer)
237 {
238 Initialized();
239 }
240
AddDump(TraceOutput & out)241 virtual void AddDump(TraceOutput& out)
242 {
243 out.Print("net buffer free: buffer: %p", fBuffer);
244 }
245 };
246
247
248 class Duplicate : public NetBufferTraceEntry {
249 public:
Duplicate(net_buffer * buffer,net_buffer * clone)250 Duplicate(net_buffer* buffer, net_buffer* clone)
251 :
252 NetBufferTraceEntry(buffer),
253 fClone(clone)
254 {
255 Initialized();
256 }
257
AddDump(TraceOutput & out)258 virtual void AddDump(TraceOutput& out)
259 {
260 out.Print("net buffer dup: buffer: %p -> %p", fBuffer, fClone);
261 }
262
263 private:
264 net_buffer* fClone;
265 };
266
267
268 class Clone : public NetBufferTraceEntry {
269 public:
Clone(net_buffer * buffer,bool shareFreeSpace,net_buffer * clone)270 Clone(net_buffer* buffer, bool shareFreeSpace, net_buffer* clone)
271 :
272 NetBufferTraceEntry(buffer),
273 fClone(clone),
274 fShareFreeSpace(shareFreeSpace)
275 {
276 Initialized();
277 }
278
AddDump(TraceOutput & out)279 virtual void AddDump(TraceOutput& out)
280 {
281 out.Print("net buffer clone: buffer: %p, share free space: %s "
282 "-> %p", fBuffer, fShareFreeSpace ? "true" : "false", fClone);
283 }
284
285 private:
286 net_buffer* fClone;
287 bool fShareFreeSpace;
288 };
289
290
291 class Split : public NetBufferTraceEntry {
292 public:
Split(net_buffer * buffer,uint32 offset,net_buffer * newBuffer)293 Split(net_buffer* buffer, uint32 offset, net_buffer* newBuffer)
294 :
295 NetBufferTraceEntry(buffer),
296 fNewBuffer(newBuffer),
297 fOffset(offset)
298 {
299 Initialized();
300 }
301
AddDump(TraceOutput & out)302 virtual void AddDump(TraceOutput& out)
303 {
304 out.Print("net buffer split: buffer: %p, offset: %lu "
305 "-> %p", fBuffer, fOffset, fNewBuffer);
306 }
307
308 private:
309 net_buffer* fNewBuffer;
310 uint32 fOffset;
311 };
312
313
314 class Merge : public NetBufferTraceEntry {
315 public:
Merge(net_buffer * buffer,net_buffer * otherBuffer,bool after)316 Merge(net_buffer* buffer, net_buffer* otherBuffer, bool after)
317 :
318 NetBufferTraceEntry(buffer),
319 fOtherBuffer(otherBuffer),
320 fAfter(after)
321 {
322 Initialized();
323 }
324
AddDump(TraceOutput & out)325 virtual void AddDump(TraceOutput& out)
326 {
327 out.Print("net buffer merge: buffers: %p + %p, after: %s "
328 "-> %p", fBuffer, fOtherBuffer, fAfter ? "true" : "false",
329 fOtherBuffer);
330 }
331
332 private:
333 net_buffer* fOtherBuffer;
334 bool fAfter;
335 };
336
337
338 class AppendCloned : public NetBufferTraceEntry {
339 public:
AppendCloned(net_buffer * buffer,net_buffer * source,uint32 offset,size_t size)340 AppendCloned(net_buffer* buffer, net_buffer* source, uint32 offset,
341 size_t size)
342 :
343 NetBufferTraceEntry(buffer),
344 fSource(source),
345 fOffset(offset),
346 fSize(size)
347 {
348 Initialized();
349 }
350
AddDump(TraceOutput & out)351 virtual void AddDump(TraceOutput& out)
352 {
353 out.Print("net buffer append cloned: buffer: %p, from: %p, "
354 "offset: %lu, size: %lu", fBuffer, fSource, fOffset, fSize);
355 }
356
357 private:
358 net_buffer* fSource;
359 uint32 fOffset;
360 size_t fSize;
361 };
362
363
364 class PrependSize : public NetBufferTraceEntry {
365 public:
PrependSize(net_buffer * buffer,size_t size)366 PrependSize(net_buffer* buffer, size_t size)
367 :
368 NetBufferTraceEntry(buffer),
369 fSize(size)
370 {
371 Initialized();
372 }
373
AddDump(TraceOutput & out)374 virtual void AddDump(TraceOutput& out)
375 {
376 out.Print("net buffer prepend size: buffer: %p, size: %lu", fBuffer,
377 fSize);
378 }
379
380 private:
381 size_t fSize;
382 };
383
384
385 class AppendSize : public NetBufferTraceEntry {
386 public:
AppendSize(net_buffer * buffer,size_t size)387 AppendSize(net_buffer* buffer, size_t size)
388 :
389 NetBufferTraceEntry(buffer),
390 fSize(size)
391 {
392 Initialized();
393 }
394
AddDump(TraceOutput & out)395 virtual void AddDump(TraceOutput& out)
396 {
397 out.Print("net buffer append size: buffer: %p, size: %lu", fBuffer,
398 fSize);
399 }
400
401 private:
402 size_t fSize;
403 };
404
405
406 class RemoveHeader : public NetBufferTraceEntry {
407 public:
RemoveHeader(net_buffer * buffer,size_t size)408 RemoveHeader(net_buffer* buffer, size_t size)
409 :
410 NetBufferTraceEntry(buffer),
411 fSize(size)
412 {
413 Initialized();
414 }
415
AddDump(TraceOutput & out)416 virtual void AddDump(TraceOutput& out)
417 {
418 out.Print("net buffer remove header: buffer: %p, size: %lu",
419 fBuffer, fSize);
420 }
421
422 private:
423 size_t fSize;
424 };
425
426
427 class Trim : public NetBufferTraceEntry {
428 public:
Trim(net_buffer * buffer,size_t size)429 Trim(net_buffer* buffer, size_t size)
430 :
431 NetBufferTraceEntry(buffer),
432 fSize(size)
433 {
434 Initialized();
435 }
436
AddDump(TraceOutput & out)437 virtual void AddDump(TraceOutput& out)
438 {
439 out.Print("net buffer trim: buffer: %p, size: %lu",
440 fBuffer, fSize);
441 }
442
443 private:
444 size_t fSize;
445 };
446
447
448 class Read : public NetBufferTraceEntry {
449 public:
Read(net_buffer * buffer,uint32 offset,void * data,size_t size)450 Read(net_buffer* buffer, uint32 offset, void* data, size_t size)
451 :
452 NetBufferTraceEntry(buffer),
453 fData(data),
454 fOffset(offset),
455 fSize(size)
456 {
457 Initialized();
458 }
459
AddDump(TraceOutput & out)460 virtual void AddDump(TraceOutput& out)
461 {
462 out.Print("net buffer read: buffer: %p, offset: %lu, size: %lu, "
463 "data: %p", fBuffer, fOffset, fSize, fData);
464 }
465
466 private:
467 void* fData;
468 uint32 fOffset;
469 size_t fSize;
470 };
471
472
473 class Write : public NetBufferTraceEntry {
474 public:
Write(net_buffer * buffer,uint32 offset,const void * data,size_t size)475 Write(net_buffer* buffer, uint32 offset, const void* data, size_t size)
476 :
477 NetBufferTraceEntry(buffer),
478 fData(data),
479 fOffset(offset),
480 fSize(size)
481 {
482 Initialized();
483 }
484
AddDump(TraceOutput & out)485 virtual void AddDump(TraceOutput& out)
486 {
487 out.Print("net buffer write: buffer: %p, offset: %lu, size: %lu, "
488 "data: %p", fBuffer, fOffset, fSize, fData);
489 }
490
491 private:
492 const void* fData;
493 uint32 fOffset;
494 size_t fSize;
495 };
496
497
498 #if NET_BUFFER_TRACING >= 2
499
500 class DataHeaderTraceEntry : public AbstractTraceEntry {
501 public:
DataHeaderTraceEntry(data_header * header)502 DataHeaderTraceEntry(data_header* header)
503 :
504 fHeader(header)
505 {
506 }
507
508 protected:
509 data_header* fHeader;
510 };
511
512
513 class CreateDataHeader : public DataHeaderTraceEntry {
514 public:
CreateDataHeader(data_header * header)515 CreateDataHeader(data_header* header)
516 :
517 DataHeaderTraceEntry(header)
518 {
519 Initialized();
520 }
521
AddDump(TraceOutput & out)522 virtual void AddDump(TraceOutput& out)
523 {
524 out.Print("net buffer data header create: header: %p", fHeader);
525 }
526 };
527
528
529 class AcquireDataHeader : public DataHeaderTraceEntry {
530 public:
AcquireDataHeader(data_header * header,int32 refCount)531 AcquireDataHeader(data_header* header, int32 refCount)
532 :
533 DataHeaderTraceEntry(header),
534 fRefCount(refCount)
535 {
536 Initialized();
537 }
538
AddDump(TraceOutput & out)539 virtual void AddDump(TraceOutput& out)
540 {
541 out.Print("net buffer data header acquire: header: %p "
542 "-> ref count: %ld", fHeader, fRefCount);
543 }
544
545 private:
546 int32 fRefCount;
547 };
548
549
550 class ReleaseDataHeader : public DataHeaderTraceEntry {
551 public:
ReleaseDataHeader(data_header * header,int32 refCount)552 ReleaseDataHeader(data_header* header, int32 refCount)
553 :
554 DataHeaderTraceEntry(header),
555 fRefCount(refCount)
556 {
557 Initialized();
558 }
559
AddDump(TraceOutput & out)560 virtual void AddDump(TraceOutput& out)
561 {
562 out.Print("net buffer data header release: header: %p "
563 "-> ref count: %ld", fHeader, fRefCount);
564 }
565
566 private:
567 int32 fRefCount;
568 };
569
570 # define T2(x) new(std::nothrow) NetBufferTracing::x
571 #else
572 # define T2(x)
573 #endif // NET_BUFFER_TRACING >= 2
574
575 } // namespace NetBufferTracing
576
577 # define T(x) new(std::nothrow) NetBufferTracing::x
578
579 #else
580 # define T(x)
581 # define T2(x)
582 #endif // NET_BUFFER_TRACING
583
584
585 static void
dump_address(const char * prefix,sockaddr * address,net_interface_address * interfaceAddress)586 dump_address(const char* prefix, sockaddr* address,
587 net_interface_address* interfaceAddress)
588 {
589 if (address == NULL || address->sa_len == 0)
590 return;
591
592 if (interfaceAddress == NULL || interfaceAddress->domain == NULL) {
593 dprintf(" %s: length %u, family %u\n", prefix, address->sa_len,
594 address->sa_family);
595
596 dump_block((char*)address + 2, address->sa_len - 2, " ");
597 } else {
598 char buffer[64];
599 interfaceAddress->domain->address_module->print_address_buffer(address,
600 buffer, sizeof(buffer), true);
601
602 dprintf(" %s: %s\n", prefix, buffer);
603 }
604 }
605
606
607 static void
dump_buffer(net_buffer * _buffer)608 dump_buffer(net_buffer* _buffer)
609 {
610 net_buffer_private* buffer = (net_buffer_private*)_buffer;
611
612 dprintf("buffer %p, size %" B_PRIu32 ", msg_flags %" B_PRIx32 ", buffer_flags %" B_PRIx16
613 ", stored header %" B_PRIuSIZE ", interface address %p\n", buffer, buffer->size,
614 buffer->msg_flags, buffer->buffer_flags, buffer->stored_header_length,
615 buffer->interface_address);
616
617 dump_address("source", buffer->source, buffer->interface_address);
618 dump_address("destination", buffer->destination, buffer->interface_address);
619
620 data_node* node = NULL;
621 while ((node = (data_node*)list_get_next_item(&buffer->buffers, node))
622 != NULL) {
623 dprintf(" node %p, offset %lu, used %u, header %u, tail %u, "
624 "header %p\n", node, node->offset, node->used, node->HeaderSpace(),
625 node->TailSpace(), node->header);
626
627 if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
628 dump_block((char*)node->start - buffer->stored_header_length,
629 min_c(buffer->stored_header_length, 64), " s ");
630 }
631 dump_block((char*)node->start, min_c(node->used, 64), " ");
632 }
633 }
634
635 #if ENABLE_DEBUGGER_COMMANDS
636
637 static int
dump_net_buffer(int argc,char ** argv)638 dump_net_buffer(int argc, char** argv)
639 {
640 if (argc != 2) {
641 kprintf("usage: %s [address]\n", argv[0]);
642 return 0;
643 }
644
645 dump_buffer((net_buffer*)parse_expression(argv[1]));
646 return 0;
647 }
648
649 #endif // ENABLE_DEBUGGER_COMMANDS
650
651 #if ENABLE_STATS
652
653 static int
dump_net_buffer_stats(int argc,char ** argv)654 dump_net_buffer_stats(int argc, char** argv)
655 {
656 kprintf("allocated data headers: %7" B_PRId32 " / %7" B_PRId32 ", peak %7"
657 B_PRId32 "\n", sAllocatedDataHeaderCount, sEverAllocatedDataHeaderCount,
658 sMaxAllocatedDataHeaderCount);
659 kprintf("allocated net buffers: %7" B_PRId32 " / %7" B_PRId32 ", peak %7"
660 B_PRId32 "\n", sAllocatedNetBufferCount, sEverAllocatedNetBufferCount,
661 sMaxAllocatedNetBufferCount);
662 return 0;
663 }
664
665 #endif // ENABLE_STATS
666
667 #if PARANOID_BUFFER_CHECK
668
669 static void
check_buffer(net_buffer * _buffer)670 check_buffer(net_buffer* _buffer)
671 {
672 net_buffer_private* buffer = (net_buffer_private*)_buffer;
673
674 // sum up the size of all nodes
675 size_t size = 0;
676
677 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
678 while (node != NULL) {
679 if (node->offset != size) {
680 panic("net_buffer %p: bad node %p offset (%lu vs. %lu)",
681 buffer, node, node->offset, size);
682 return;
683 }
684 size += node->used;
685 node = (data_node*)list_get_next_item(&buffer->buffers, node);
686 }
687
688 if (size != buffer->size) {
689 panic("net_buffer %p size != sum of its data node sizes (%lu vs. %lu)",
690 buffer, buffer->size, size);
691 return;
692 }
693 }
694
695
696 #if 0
697 static void
698 check_buffer_contents(net_buffer* buffer, size_t offset, const void* data,
699 size_t size)
700 {
701 void* bufferData = malloc(size);
702 if (bufferData == NULL)
703 return;
704
705 if (read_data(buffer, offset, bufferData, size) == B_OK) {
706 if (memcmp(bufferData, data, size) != 0) {
707 int32 index = 0;
708 while (((uint8*)data)[index] == ((uint8*)bufferData)[index])
709 index++;
710 panic("check_buffer_contents(): contents check failed at index "
711 "%ld, buffer: %p, offset: %lu, size: %lu", index, buffer,
712 offset, size);
713 }
714 } else {
715 panic("failed to read from buffer %p, offset: %lu, size: %lu",
716 buffer, offset, size);
717 }
718
719 free(bufferData);
720 }
721
722
723 static void
724 check_buffer_contents(net_buffer* buffer, size_t offset, net_buffer* source,
725 size_t sourceOffset, size_t size)
726 {
727 void* bufferData = malloc(size);
728 if (bufferData == NULL)
729 return;
730
731 if (read_data(source, sourceOffset, bufferData, size) == B_OK) {
732 check_buffer_contents(buffer, offset, bufferData, size);
733 } else {
734 panic("failed to read from source buffer %p, offset: %lu, size: %lu",
735 source, sourceOffset, size);
736 }
737
738 free(bufferData);
739 }
740 #endif
741
742
743 # define CHECK_BUFFER(buffer) check_buffer(buffer)
744 #else
745 # define CHECK_BUFFER(buffer) do {} while (false)
746 #endif // !PARANOID_BUFFER_CHECK
747
748
749 static inline data_header*
allocate_data_header()750 allocate_data_header()
751 {
752 #if ENABLE_STATS
753 int32 current = atomic_add(&sAllocatedDataHeaderCount, 1) + 1;
754 int32 max = atomic_get(&sMaxAllocatedDataHeaderCount);
755 if (current > max)
756 atomic_test_and_set(&sMaxAllocatedDataHeaderCount, current, max);
757
758 atomic_add(&sEverAllocatedDataHeaderCount, 1);
759 #endif
760 return (data_header*)object_cache_alloc(sDataNodeCache, 0);
761 }
762
763
764 static inline net_buffer_private*
allocate_net_buffer()765 allocate_net_buffer()
766 {
767 #if ENABLE_STATS
768 int32 current = atomic_add(&sAllocatedNetBufferCount, 1) + 1;
769 int32 max = atomic_get(&sMaxAllocatedNetBufferCount);
770 if (current > max)
771 atomic_test_and_set(&sMaxAllocatedNetBufferCount, current, max);
772
773 atomic_add(&sEverAllocatedNetBufferCount, 1);
774 #endif
775 return (net_buffer_private*)object_cache_alloc(sNetBufferCache, 0);
776 }
777
778
779 static inline void
free_data_header(data_header * header)780 free_data_header(data_header* header)
781 {
782 #if ENABLE_STATS
783 if (header != NULL)
784 atomic_add(&sAllocatedDataHeaderCount, -1);
785 #endif
786 object_cache_free(sDataNodeCache, header, 0);
787 }
788
789
790 static inline void
free_net_buffer(net_buffer_private * buffer)791 free_net_buffer(net_buffer_private* buffer)
792 {
793 #if ENABLE_STATS
794 if (buffer != NULL)
795 atomic_add(&sAllocatedNetBufferCount, -1);
796 #endif
797 object_cache_free(sNetBufferCache, buffer, 0);
798 }
799
800
801 static data_header*
create_data_header(size_t headerSpace)802 create_data_header(size_t headerSpace)
803 {
804 data_header* header = allocate_data_header();
805 if (header == NULL)
806 return NULL;
807
808 header->ref_count = 1;
809 header->physical_address = 0;
810 // TODO: initialize this correctly
811 header->space.size = headerSpace;
812 header->space.free = headerSpace;
813 header->data_end = (uint8*)header + DATA_HEADER_SIZE;
814 header->tail_space = (uint8*)header + BUFFER_SIZE - header->data_end
815 - headerSpace;
816 header->first_free = NULL;
817
818 TRACE(("%d: create new data header %p\n", find_thread(NULL), header));
819 T2(CreateDataHeader(header));
820 return header;
821 }
822
823
824 static void
release_data_header(data_header * header)825 release_data_header(data_header* header)
826 {
827 int32 refCount = atomic_add(&header->ref_count, -1);
828 T2(ReleaseDataHeader(header, refCount - 1));
829 if (refCount != 1)
830 return;
831
832 TRACE(("%d: free header %p\n", find_thread(NULL), header));
833 free_data_header(header);
834 }
835
836
837 inline void
acquire_data_header(data_header * header)838 acquire_data_header(data_header* header)
839 {
840 int32 refCount = atomic_add(&header->ref_count, 1);
841 (void)refCount;
842 T2(AcquireDataHeader(header, refCount + 1));
843 }
844
845
846 static void
free_data_header_space(data_header * header,uint8 * data,size_t size)847 free_data_header_space(data_header* header, uint8* data, size_t size)
848 {
849 if (size < sizeof(free_data))
850 size = sizeof(free_data);
851
852 free_data* freeData = (free_data*)data;
853 freeData->next = header->first_free;
854 freeData->size = size;
855
856 header->first_free = freeData;
857 }
858
859
860 /*! Tries to allocate \a size bytes from the free space in the header.
861 */
862 static uint8*
alloc_data_header_space(data_header * header,size_t size)863 alloc_data_header_space(data_header* header, size_t size)
864 {
865 if (size < sizeof(free_data))
866 size = sizeof(free_data);
867 size = _ALIGN(size);
868
869 if (header->first_free != NULL && header->first_free->size >= size) {
870 // the first entry of the header space matches the allocation's needs
871
872 // TODO: If the free space is greater than what shall be allocated, we
873 // leak the remainder of the space. We should only allocate multiples of
874 // _ALIGN(sizeof(free_data)) and split free space in this case. It's not
875 // that pressing, since the only thing allocated ATM are data_nodes, and
876 // thus the free space entries will always have the right size.
877 uint8* data = (uint8*)header->first_free;
878 header->first_free = header->first_free->next;
879 return data;
880 }
881
882 if (header->space.free < size) {
883 // there is no free space left, search free list
884 free_data* freeData = header->first_free;
885 free_data* last = NULL;
886 while (freeData != NULL) {
887 if (last != NULL && freeData->size >= size) {
888 // take this one
889 last->next = freeData->next;
890 return (uint8*)freeData;
891 }
892
893 last = freeData;
894 freeData = freeData->next;
895 }
896
897 return NULL;
898 }
899
900 // allocate new space
901
902 uint8* data = header->data_end;
903 header->data_end += size;
904 header->space.free -= size;
905
906 return data;
907 }
908
909
910 static uint8*
alloc_data_header_space(net_buffer_private * buffer,size_t size,data_header ** _header=NULL)911 alloc_data_header_space(net_buffer_private* buffer, size_t size,
912 data_header** _header = NULL)
913 {
914 // try to allocate in our current allocation header
915 uint8* allocated = alloc_data_header_space(buffer->allocation_header, size);
916 if (allocated == NULL) {
917 // not enough header space left -- create a fresh buffer for headers
918 data_header* header = create_data_header(MAX_FREE_BUFFER_SIZE);
919 if (header == NULL)
920 return NULL;
921
922 // release our reference to the old header -- it will will stay around
923 // until the last reference to it is released
924 release_data_header(buffer->allocation_header);
925 buffer->allocation_header = header;
926 // We keep the initial reference.
927
928 // now the allocation can only fail, if size is too big
929 allocated = alloc_data_header_space(buffer->allocation_header, size);
930 }
931
932 if (_header != NULL)
933 *_header = buffer->allocation_header;
934
935 return allocated;
936 }
937
938
939 static data_node*
add_first_data_node(data_header * header)940 add_first_data_node(data_header* header)
941 {
942 data_node* node = (data_node*)alloc_data_header_space(header,
943 sizeof(data_node));
944 if (node == NULL)
945 return NULL;
946
947 TRACE(("%d: add first data node %p to header %p\n", find_thread(NULL),
948 node, header));
949
950 acquire_data_header(header);
951
952 memset(node, 0, sizeof(struct data_node));
953 node->located = header;
954 node->header = header;
955 node->offset = 0;
956 node->start = header->data_end + header->space.free;
957 node->used = 0;
958 node->flags = 0;
959
960 return node;
961 }
962
963
964 static data_node*
add_data_node(net_buffer_private * buffer,data_header * header)965 add_data_node(net_buffer_private* buffer, data_header* header)
966 {
967 data_header* located;
968 data_node* node = (data_node*)alloc_data_header_space(buffer,
969 sizeof(data_node), &located);
970 if (node == NULL)
971 return NULL;
972
973 TRACE(("%d: add data node %p to header %p\n", find_thread(NULL), node,
974 header));
975
976 acquire_data_header(header);
977 if (located != header)
978 acquire_data_header(located);
979
980 memset(node, 0, sizeof(struct data_node));
981 node->located = located;
982 node->header = header;
983 node->flags = 0;
984 return node;
985 }
986
987
988 void
remove_data_node(data_node * node)989 remove_data_node(data_node* node)
990 {
991 data_header* located = node->located;
992
993 TRACE(("%d: remove data node %p from header %p (located %p)\n",
994 find_thread(NULL), node, node->header, located));
995
996 // Move all used and tail space to the header space, which is useful in case
997 // this is the first node of a buffer (i.e. the header is an allocation
998 // header).
999 node->FreeSpace();
1000
1001 if (located != node->header)
1002 release_data_header(node->header);
1003
1004 if (located == NULL)
1005 return;
1006
1007 free_data_header_space(located, (uint8*)node, sizeof(data_node));
1008
1009 release_data_header(located);
1010 }
1011
1012
1013 static inline data_node*
get_node_at_offset(net_buffer_private * buffer,size_t offset)1014 get_node_at_offset(net_buffer_private* buffer, size_t offset)
1015 {
1016 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1017 while (node != NULL && node->offset + node->used <= offset)
1018 node = (data_node*)list_get_next_item(&buffer->buffers, node);
1019
1020 return node;
1021 }
1022
1023
1024 /*! Appends up to \a size bytes from the data of the \a from net_buffer to the
1025 \a to net_buffer. The source buffer will remain unchanged.
1026 */
1027 static status_t
append_data_from_buffer(net_buffer * to,const net_buffer * from,size_t size)1028 append_data_from_buffer(net_buffer* to, const net_buffer* from, size_t size)
1029 {
1030 net_buffer_private* source = (net_buffer_private*)from;
1031 net_buffer_private* dest = (net_buffer_private*)to;
1032
1033 if (size > from->size)
1034 return B_BAD_VALUE;
1035 if (size == 0)
1036 return B_OK;
1037
1038 data_node* nodeTo = get_node_at_offset(source, size);
1039 if (nodeTo == NULL)
1040 return B_BAD_VALUE;
1041
1042 data_node* node = (data_node*)list_get_first_item(&source->buffers);
1043 if (node == NULL) {
1044 CHECK_BUFFER(source);
1045 return B_ERROR;
1046 }
1047
1048 while (node != nodeTo) {
1049 if (append_data(dest, node->start, node->used) < B_OK) {
1050 CHECK_BUFFER(dest);
1051 return B_ERROR;
1052 }
1053
1054 node = (data_node*)list_get_next_item(&source->buffers, node);
1055 }
1056
1057 int32 diff = node->offset + node->used - size;
1058 if (append_data(dest, node->start, node->used - diff) < B_OK) {
1059 CHECK_BUFFER(dest);
1060 return B_ERROR;
1061 }
1062
1063 CHECK_BUFFER(dest);
1064
1065 return B_OK;
1066 }
1067
1068
1069 static void
copy_metadata(net_buffer * destination,const net_buffer * source)1070 copy_metadata(net_buffer* destination, const net_buffer* source)
1071 {
1072 memcpy(destination->source, source->source,
1073 min_c(source->source->sa_len, sizeof(sockaddr_storage)));
1074 memcpy(destination->destination, source->destination,
1075 min_c(source->destination->sa_len, sizeof(sockaddr_storage)));
1076
1077 destination->msg_flags = source->msg_flags;
1078 destination->buffer_flags = source->buffer_flags;
1079 destination->interface_address = source->interface_address;
1080 if (destination->interface_address != NULL)
1081 ((InterfaceAddress*)destination->interface_address)->AcquireReference();
1082
1083 destination->offset = source->offset;
1084 destination->protocol = source->protocol;
1085 destination->type = source->type;
1086 }
1087
1088
1089 // #pragma mark - module API
1090
1091
1092 static net_buffer*
create_buffer(size_t headerSpace)1093 create_buffer(size_t headerSpace)
1094 {
1095 net_buffer_private* buffer = allocate_net_buffer();
1096 if (buffer == NULL)
1097 return NULL;
1098
1099 TRACE(("%d: create buffer %p\n", find_thread(NULL), buffer));
1100
1101 // Make sure headerSpace is valid and at least the initial node fits.
1102 headerSpace = _ALIGN(headerSpace);
1103 if (headerSpace < DATA_NODE_SIZE)
1104 headerSpace = DATA_NODE_SIZE;
1105 else if (headerSpace > MAX_FREE_BUFFER_SIZE)
1106 headerSpace = MAX_FREE_BUFFER_SIZE;
1107
1108 data_header* header = create_data_header(headerSpace);
1109 if (header == NULL) {
1110 free_net_buffer(buffer);
1111 return NULL;
1112 }
1113 buffer->allocation_header = header;
1114
1115 data_node* node = add_first_data_node(header);
1116
1117 list_init(&buffer->buffers);
1118 list_add_item(&buffer->buffers, node);
1119
1120 buffer->ancillary_data = NULL;
1121 buffer->stored_header_length = 0;
1122
1123 buffer->source = (sockaddr*)&buffer->storage.source;
1124 buffer->destination = (sockaddr*)&buffer->storage.destination;
1125
1126 buffer->storage.source.ss_len = 0;
1127 buffer->storage.destination.ss_len = 0;
1128
1129 buffer->interface_address = NULL;
1130 buffer->offset = 0;
1131 buffer->msg_flags = 0;
1132 buffer->buffer_flags = 0;
1133 buffer->size = 0;
1134
1135 CHECK_BUFFER(buffer);
1136 CREATE_PARANOIA_CHECK_SET(buffer, "net_buffer");
1137 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1138 sizeof(buffer->size));
1139
1140 T(Create(headerSpace, buffer));
1141
1142 return buffer;
1143 }
1144
1145
1146 static void
free_buffer(net_buffer * _buffer)1147 free_buffer(net_buffer* _buffer)
1148 {
1149 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1150
1151 TRACE(("%d: free buffer %p\n", find_thread(NULL), buffer));
1152 T(Free(buffer));
1153
1154 CHECK_BUFFER(buffer);
1155 DELETE_PARANOIA_CHECK_SET(buffer);
1156
1157 while (data_node* node
1158 = (data_node*)list_remove_head_item(&buffer->buffers)) {
1159 remove_data_node(node);
1160 }
1161
1162 delete_ancillary_data_container(buffer->ancillary_data);
1163
1164 release_data_header(buffer->allocation_header);
1165
1166 if (buffer->interface_address != NULL)
1167 ((InterfaceAddress*)buffer->interface_address)->ReleaseReference();
1168
1169 free_net_buffer(buffer);
1170 }
1171
1172
1173 /*! Creates a duplicate of the \a buffer. The new buffer does not share internal
1174 storage; they are completely independent from each other.
1175 */
1176 static net_buffer*
duplicate_buffer(net_buffer * _buffer)1177 duplicate_buffer(net_buffer* _buffer)
1178 {
1179 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1180
1181 ParanoiaChecker _(buffer);
1182
1183 TRACE(("%d: duplicate_buffer(buffer %p)\n", find_thread(NULL), buffer));
1184
1185 // TODO: We might want to choose a better header space. The minimal
1186 // one doesn't allow to prepend any data without allocating a new header.
1187 // The same holds for appending cloned data.
1188 net_buffer* duplicate = create_buffer(DATA_NODE_SIZE);
1189 if (duplicate == NULL)
1190 return NULL;
1191
1192 TRACE(("%d: duplicate: %p)\n", find_thread(NULL), duplicate));
1193
1194 // copy the data from the source buffer
1195
1196 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1197 while (node != NULL) {
1198 if (append_data(duplicate, node->start, node->used) < B_OK) {
1199 free_buffer(duplicate);
1200 CHECK_BUFFER(buffer);
1201 return NULL;
1202 }
1203
1204 node = (data_node*)list_get_next_item(&buffer->buffers, node);
1205 }
1206
1207 copy_metadata(duplicate, buffer);
1208
1209 ASSERT(duplicate->size == buffer->size);
1210 CHECK_BUFFER(buffer);
1211 CHECK_BUFFER(duplicate);
1212 RUN_PARANOIA_CHECKS(duplicate);
1213
1214 T(Duplicate(buffer, duplicate));
1215
1216 return duplicate;
1217 }
1218
1219
1220 /*! Clones the buffer by grabbing another reference to the underlying data.
1221 If that data changes, it will be changed in the clone as well.
1222
1223 If \a shareFreeSpace is \c true, the cloned buffer may claim the free
1224 space in the original buffer as the original buffer can still do. If you
1225 are using this, it's your responsibility that only one of the buffers
1226 will do this.
1227 */
1228 static net_buffer*
clone_buffer(net_buffer * _buffer,bool shareFreeSpace)1229 clone_buffer(net_buffer* _buffer, bool shareFreeSpace)
1230 {
1231 // TODO: See, if the commented out code can be fixed in a safe way. We could
1232 // probably place cloned nodes on a header not belonging to our buffer, if
1233 // we don't free the header space for the node when removing it. Otherwise we
1234 // mess with the header's free list which might at the same time be accessed
1235 // by another thread.
1236 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1237
1238 net_buffer* clone = create_buffer(MAX_FREE_BUFFER_SIZE);
1239 if (clone == NULL)
1240 return NULL;
1241
1242 if (append_cloned_data(clone, buffer, 0, buffer->size) != B_OK) {
1243 free_buffer(clone);
1244 return NULL;
1245 }
1246
1247 copy_metadata(clone, buffer);
1248 ASSERT(clone->size == buffer->size);
1249
1250 return clone;
1251
1252 #if 0
1253 ParanoiaChecker _(buffer);
1254
1255 TRACE(("%d: clone_buffer(buffer %p)\n", find_thread(NULL), buffer));
1256
1257 net_buffer_private* clone = allocate_net_buffer();
1258 if (clone == NULL)
1259 return NULL;
1260
1261 TRACE(("%d: clone: %p\n", find_thread(NULL), buffer));
1262
1263 data_node* sourceNode = (data_node*)list_get_first_item(&buffer->buffers);
1264 if (sourceNode == NULL) {
1265 free_net_buffer(clone);
1266 return NULL;
1267 }
1268
1269 clone->source = (sockaddr*)&clone->storage.source;
1270 clone->destination = (sockaddr*)&clone->storage.destination;
1271
1272 list_init(&clone->buffers);
1273
1274 // grab reference to this buffer - all additional nodes will get
1275 // theirs in add_data_node()
1276 acquire_data_header(sourceNode->header);
1277 data_node* node = &clone->first_node;
1278 node->header = sourceNode->header;
1279 node->located = NULL;
1280 node->used_header_space = &node->own_header_space;
1281
1282 while (sourceNode != NULL) {
1283 node->start = sourceNode->start;
1284 node->used = sourceNode->used;
1285 node->offset = sourceNode->offset;
1286
1287 if (shareFreeSpace) {
1288 // both buffers could claim the free space - note that this option
1289 // has to be used carefully
1290 node->used_header_space = &sourceNode->header->space;
1291 node->tail_space = sourceNode->tail_space;
1292 } else {
1293 // the free space stays with the original buffer
1294 node->used_header_space->size = 0;
1295 node->used_header_space->free = 0;
1296 node->tail_space = 0;
1297 }
1298
1299 // add node to clone's list of buffers
1300 list_add_item(&clone->buffers, node);
1301
1302 sourceNode = (data_node*)list_get_next_item(&buffer->buffers,
1303 sourceNode);
1304 if (sourceNode == NULL)
1305 break;
1306
1307 node = add_data_node(sourceNode->header);
1308 if (node == NULL) {
1309 // There was not enough space left for another node in this buffer
1310 // TODO: handle this case!
1311 panic("clone buffer hits size limit... (fix me)");
1312 free_net_buffer(clone);
1313 return NULL;
1314 }
1315 }
1316
1317 copy_metadata(clone, buffer);
1318
1319 ASSERT(clone->size == buffer->size);
1320 CREATE_PARANOIA_CHECK_SET(clone, "net_buffer");
1321 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, clone, &clone->size,
1322 sizeof(clone->size));
1323 CHECK_BUFFER(buffer);
1324 CHECK_BUFFER(clone);
1325
1326 T(Clone(buffer, shareFreeSpace, clone));
1327
1328 return clone;
1329 #endif
1330 }
1331
1332
1333 /*! Split the buffer at offset, the header data
1334 is returned as new buffer.
1335 */
1336 static net_buffer*
split_buffer(net_buffer * from,uint32 offset)1337 split_buffer(net_buffer* from, uint32 offset)
1338 {
1339 net_buffer* buffer = create_buffer(DATA_NODE_SIZE);
1340 if (buffer == NULL)
1341 return NULL;
1342
1343 copy_metadata(buffer, from);
1344
1345 ParanoiaChecker _(from);
1346 ParanoiaChecker _2(buffer);
1347
1348 TRACE(("%d: split_buffer(buffer %p -> %p, offset %" B_PRIu32 ")\n",
1349 find_thread(NULL), from, buffer, offset));
1350
1351 if (append_data_from_buffer(buffer, from, offset) == B_OK) {
1352 if (remove_header(from, offset) == B_OK) {
1353 CHECK_BUFFER(from);
1354 CHECK_BUFFER(buffer);
1355 T(Split(from, offset, buffer));
1356 return buffer;
1357 }
1358 }
1359
1360 free_buffer(buffer);
1361 CHECK_BUFFER(from);
1362 return NULL;
1363 }
1364
1365
1366 /*! Merges the second buffer with the first. If \a after is \c true, the
1367 second buffer's contents will be appended to the first ones, else they
1368 will be prepended.
1369 The second buffer will be freed if this function succeeds.
1370 */
1371 static status_t
merge_buffer(net_buffer * _buffer,net_buffer * _with,bool after)1372 merge_buffer(net_buffer* _buffer, net_buffer* _with, bool after)
1373 {
1374 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1375 net_buffer_private* with = (net_buffer_private*)_with;
1376 if (with == NULL)
1377 return B_BAD_VALUE;
1378
1379 TRACE(("%d: merge buffer %p with %p (%s)\n", find_thread(NULL), buffer,
1380 with, after ? "after" : "before"));
1381 T(Merge(buffer, with, after));
1382 //dump_buffer(buffer);
1383 //dprintf("with:\n");
1384 //dump_buffer(with);
1385
1386 ParanoiaChecker _(buffer);
1387 CHECK_BUFFER(buffer);
1388 CHECK_BUFFER(with);
1389
1390 // TODO: this is currently very simplistic, I really need to finish the
1391 // harder part of this implementation (data_node management per header)
1392
1393 data_node* before = NULL;
1394
1395 // TODO: Do allocating nodes (the only part that can fail) upfront. Put them
1396 // in a list, so we can easily clean up, if necessary.
1397
1398 if (!after) {
1399 // change offset of all nodes already in the buffer
1400 data_node* node = NULL;
1401 while (true) {
1402 node = (data_node*)list_get_next_item(&buffer->buffers, node);
1403 if (node == NULL)
1404 break;
1405
1406 node->offset += with->size;
1407 if (before == NULL)
1408 before = node;
1409 }
1410 }
1411
1412 data_node* last = NULL;
1413
1414 while (true) {
1415 data_node* node = (data_node*)list_get_next_item(&with->buffers, last);
1416 if (node == NULL)
1417 break;
1418
1419 if ((uint8*)node > (uint8*)node->header
1420 && (uint8*)node < (uint8*)node->header + BUFFER_SIZE) {
1421 // The node is already in the buffer, we can just move it
1422 // over to the new owner
1423 list_remove_item(&with->buffers, node);
1424 with->size -= node->used;
1425 } else {
1426 // we need a new place for this node
1427 data_node* newNode = add_data_node(buffer, node->header);
1428 if (newNode == NULL) {
1429 // TODO: try to revert buffers to their initial state!!
1430 return ENOBUFS;
1431 }
1432
1433 last = node;
1434 *newNode = *node;
1435 node = newNode;
1436 // the old node will get freed with its buffer
1437 }
1438
1439 if (after) {
1440 list_add_item(&buffer->buffers, node);
1441 node->offset = buffer->size;
1442 } else
1443 list_insert_item_before(&buffer->buffers, before, node);
1444
1445 buffer->size += node->used;
1446 }
1447
1448 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1449 sizeof(buffer->size));
1450
1451 // the data has been merged completely at this point
1452 free_buffer(with);
1453
1454 //dprintf(" merge result:\n");
1455 //dump_buffer(buffer);
1456 CHECK_BUFFER(buffer);
1457
1458 return B_OK;
1459 }
1460
1461
1462 /*! Writes into existing allocated memory.
1463 \return B_BAD_VALUE if you write outside of the buffers current
1464 bounds.
1465 */
1466 static status_t
write_data(net_buffer * _buffer,size_t offset,const void * data,size_t size)1467 write_data(net_buffer* _buffer, size_t offset, const void* data, size_t size)
1468 {
1469 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1470
1471 T(Write(buffer, offset, data, size));
1472
1473 ParanoiaChecker _(buffer);
1474
1475 if (offset + size > buffer->size)
1476 return B_BAD_VALUE;
1477 if (size == 0)
1478 return B_OK;
1479
1480 // find first node to write into
1481 data_node* node = get_node_at_offset(buffer, offset);
1482 if (node == NULL)
1483 return B_BAD_VALUE;
1484
1485 offset -= node->offset;
1486
1487 while (true) {
1488 size_t written = min_c(size, node->used - offset);
1489 if (IS_USER_ADDRESS(data)) {
1490 if (user_memcpy(node->start + offset, data, written) != B_OK)
1491 return B_BAD_ADDRESS;
1492 } else
1493 memcpy(node->start + offset, data, written);
1494
1495 size -= written;
1496 if (size == 0)
1497 break;
1498
1499 offset = 0;
1500 data = (void*)((uint8*)data + written);
1501
1502 node = (data_node*)list_get_next_item(&buffer->buffers, node);
1503 if (node == NULL)
1504 return B_BAD_VALUE;
1505 }
1506
1507 CHECK_BUFFER(buffer);
1508
1509 return B_OK;
1510 }
1511
1512
1513 static status_t
read_data(net_buffer * _buffer,size_t offset,void * data,size_t size)1514 read_data(net_buffer* _buffer, size_t offset, void* data, size_t size)
1515 {
1516 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1517
1518 T(Read(buffer, offset, data, size));
1519
1520 ParanoiaChecker _(buffer);
1521
1522 if (offset + size > buffer->size)
1523 return B_BAD_VALUE;
1524 if (size == 0)
1525 return B_OK;
1526
1527 // find first node to read from
1528 data_node* node = get_node_at_offset(buffer, offset);
1529 if (node == NULL)
1530 return B_BAD_VALUE;
1531
1532 offset -= node->offset;
1533
1534 while (true) {
1535 size_t bytesRead = min_c(size, node->used - offset);
1536 if (IS_USER_ADDRESS(data)) {
1537 if (user_memcpy(data, node->start + offset, bytesRead) != B_OK)
1538 return B_BAD_ADDRESS;
1539 } else
1540 memcpy(data, node->start + offset, bytesRead);
1541
1542 size -= bytesRead;
1543 if (size == 0)
1544 break;
1545
1546 offset = 0;
1547 data = (void*)((uint8*)data + bytesRead);
1548
1549 node = (data_node*)list_get_next_item(&buffer->buffers, node);
1550 if (node == NULL)
1551 return B_BAD_VALUE;
1552 }
1553
1554 CHECK_BUFFER(buffer);
1555
1556 return B_OK;
1557 }
1558
1559
1560 static status_t
prepend_size(net_buffer * _buffer,size_t size,void ** _contiguousBuffer)1561 prepend_size(net_buffer* _buffer, size_t size, void** _contiguousBuffer)
1562 {
1563 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1564 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1565 if (node == NULL) {
1566 node = add_first_data_node(buffer->allocation_header);
1567 if (node == NULL)
1568 return B_NO_MEMORY;
1569 }
1570
1571 T(PrependSize(buffer, size));
1572
1573 ParanoiaChecker _(buffer);
1574
1575 TRACE(("%d: prepend_size(buffer %p, size %ld) [has %u]\n",
1576 find_thread(NULL), buffer, size, node->HeaderSpace()));
1577 //dump_buffer(buffer);
1578
1579 if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
1580 // throw any stored headers away
1581 node->AddHeaderSpace(buffer->stored_header_length);
1582 node->flags &= ~DATA_NODE_STORED_HEADER;
1583 buffer->stored_header_length = 0;
1584 }
1585
1586 if (node->HeaderSpace() < size) {
1587 // we need to prepend new buffers
1588
1589 size_t bytesLeft = size;
1590 size_t sizePrepended = 0;
1591 do {
1592 if (node->HeaderSpace() == 0) {
1593 size_t headerSpace = MAX_FREE_BUFFER_SIZE;
1594 data_header* header = create_data_header(headerSpace);
1595 if (header == NULL) {
1596 remove_header(buffer, sizePrepended);
1597 return B_NO_MEMORY;
1598 }
1599
1600 data_node* previous = node;
1601
1602 node = (data_node*)add_first_data_node(header);
1603
1604 list_insert_item_before(&buffer->buffers, previous, node);
1605
1606 // Release the initial reference to the header, so that it will
1607 // be deleted when the node is removed.
1608 release_data_header(header);
1609 }
1610
1611 size_t willConsume = min_c(bytesLeft, node->HeaderSpace());
1612
1613 node->SubtractHeaderSpace(willConsume);
1614 node->start -= willConsume;
1615 node->used += willConsume;
1616 bytesLeft -= willConsume;
1617 sizePrepended += willConsume;
1618 } while (bytesLeft > 0);
1619
1620 // correct data offset in all nodes
1621
1622 size_t offset = 0;
1623 node = NULL;
1624 while ((node = (data_node*)list_get_next_item(&buffer->buffers,
1625 node)) != NULL) {
1626 node->offset = offset;
1627 offset += node->used;
1628 }
1629
1630 if (_contiguousBuffer)
1631 *_contiguousBuffer = NULL;
1632 } else {
1633 // the data fits into this buffer
1634 node->SubtractHeaderSpace(size);
1635 node->start -= size;
1636 node->used += size;
1637
1638 if (_contiguousBuffer)
1639 *_contiguousBuffer = node->start;
1640
1641 // adjust offset of following nodes
1642 while ((node = (data_node*)list_get_next_item(&buffer->buffers, node))
1643 != NULL) {
1644 node->offset += size;
1645 }
1646 }
1647
1648 buffer->size += size;
1649
1650 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1651 sizeof(buffer->size));
1652
1653 //dprintf(" prepend_size result:\n");
1654 //dump_buffer(buffer);
1655 CHECK_BUFFER(buffer);
1656 return B_OK;
1657 }
1658
1659
1660 static status_t
prepend_data(net_buffer * buffer,const void * data,size_t size)1661 prepend_data(net_buffer* buffer, const void* data, size_t size)
1662 {
1663 void* contiguousBuffer;
1664 status_t status = prepend_size(buffer, size, &contiguousBuffer);
1665 if (status < B_OK)
1666 return status;
1667
1668 if (contiguousBuffer) {
1669 if (IS_USER_ADDRESS(data)) {
1670 if (user_memcpy(contiguousBuffer, data, size) != B_OK)
1671 return B_BAD_ADDRESS;
1672 } else
1673 memcpy(contiguousBuffer, data, size);
1674 } else
1675 write_data(buffer, 0, data, size);
1676
1677 //dprintf(" prepend result:\n");
1678 //dump_buffer(buffer);
1679
1680 return B_OK;
1681 }
1682
1683
1684 static status_t
append_size(net_buffer * _buffer,size_t size,void ** _contiguousBuffer)1685 append_size(net_buffer* _buffer, size_t size, void** _contiguousBuffer)
1686 {
1687 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1688 data_node* node = (data_node*)list_get_last_item(&buffer->buffers);
1689 if (node == NULL) {
1690 node = add_first_data_node(buffer->allocation_header);
1691 if (node == NULL)
1692 return B_NO_MEMORY;
1693 }
1694
1695 T(AppendSize(buffer, size));
1696
1697 ParanoiaChecker _(buffer);
1698
1699 TRACE(("%d: append_size(buffer %p, size %ld)\n", find_thread(NULL),
1700 buffer, size));
1701 //dump_buffer(buffer);
1702
1703 if (node->TailSpace() < size) {
1704 // we need to append at least one new buffer
1705 uint32 previousTailSpace = node->TailSpace();
1706 uint32 headerSpace = DATA_NODE_SIZE;
1707 uint32 sizeUsed = MAX_FREE_BUFFER_SIZE - headerSpace;
1708
1709 // allocate space left in the node
1710 node->SetTailSpace(0);
1711 node->used += previousTailSpace;
1712 buffer->size += previousTailSpace;
1713 uint32 sizeAdded = previousTailSpace;
1714 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1715 sizeof(buffer->size));
1716
1717 // allocate all buffers
1718
1719 while (sizeAdded < size) {
1720 if (sizeAdded + sizeUsed > size) {
1721 // last data_header and not all available space is used
1722 sizeUsed = size - sizeAdded;
1723 }
1724
1725 data_header* header = create_data_header(headerSpace);
1726 if (header == NULL) {
1727 remove_trailer(buffer, sizeAdded);
1728 return B_NO_MEMORY;
1729 }
1730
1731 node = add_first_data_node(header);
1732 if (node == NULL) {
1733 release_data_header(header);
1734 return B_NO_MEMORY;
1735 }
1736
1737 node->SetTailSpace(node->TailSpace() - sizeUsed);
1738 node->used = sizeUsed;
1739 node->offset = buffer->size;
1740
1741 buffer->size += sizeUsed;
1742 sizeAdded += sizeUsed;
1743 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1744 sizeof(buffer->size));
1745
1746 list_add_item(&buffer->buffers, node);
1747
1748 // Release the initial reference to the header, so that it will
1749 // be deleted when the node is removed.
1750 release_data_header(header);
1751 }
1752
1753 if (_contiguousBuffer)
1754 *_contiguousBuffer = NULL;
1755
1756 //dprintf(" append result 1:\n");
1757 //dump_buffer(buffer);
1758 CHECK_BUFFER(buffer);
1759
1760 return B_OK;
1761 }
1762
1763 // the data fits into this buffer
1764 node->SetTailSpace(node->TailSpace() - size);
1765
1766 if (_contiguousBuffer)
1767 *_contiguousBuffer = node->start + node->used;
1768
1769 node->used += size;
1770 buffer->size += size;
1771 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1772 sizeof(buffer->size));
1773
1774 //dprintf(" append result 2:\n");
1775 //dump_buffer(buffer);
1776 CHECK_BUFFER(buffer);
1777
1778 return B_OK;
1779 }
1780
1781
1782 static status_t
append_data(net_buffer * buffer,const void * data,size_t size)1783 append_data(net_buffer* buffer, const void* data, size_t size)
1784 {
1785 size_t used = buffer->size;
1786
1787 void* contiguousBuffer;
1788 status_t status = append_size(buffer, size, &contiguousBuffer);
1789 if (status < B_OK)
1790 return status;
1791
1792 if (contiguousBuffer) {
1793 if (IS_USER_ADDRESS(data)) {
1794 if (user_memcpy(contiguousBuffer, data, size) != B_OK)
1795 return B_BAD_ADDRESS;
1796 } else
1797 memcpy(contiguousBuffer, data, size);
1798 } else
1799 write_data(buffer, used, data, size);
1800
1801 return B_OK;
1802 }
1803
1804
1805 /*! Removes bytes from the beginning of the buffer.
1806 */
1807 static status_t
remove_header(net_buffer * _buffer,size_t bytes)1808 remove_header(net_buffer* _buffer, size_t bytes)
1809 {
1810 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1811
1812 T(RemoveHeader(buffer, bytes));
1813
1814 ParanoiaChecker _(buffer);
1815
1816 if (bytes > buffer->size)
1817 return B_BAD_VALUE;
1818
1819 TRACE(("%d: remove_header(buffer %p, %ld bytes)\n", find_thread(NULL),
1820 buffer, bytes));
1821 //dump_buffer(buffer);
1822
1823 size_t left = bytes;
1824 data_node* node = NULL;
1825
1826 while (true) {
1827 node = (data_node*)list_get_first_item(&buffer->buffers);
1828 if (node == NULL) {
1829 if (left == 0)
1830 break;
1831 CHECK_BUFFER(buffer);
1832 return B_ERROR;
1833 }
1834
1835 if (node->used > left)
1836 break;
1837
1838 // node will be removed completely
1839 list_remove_item(&buffer->buffers, node);
1840 left -= node->used;
1841 remove_data_node(node);
1842 node = NULL;
1843 buffer->stored_header_length = 0;
1844 }
1845
1846 // cut remaining node, if any
1847
1848 if (node != NULL) {
1849 size_t cut = min_c(node->used, left);
1850 node->offset = 0;
1851 node->start += cut;
1852 if ((node->flags & DATA_NODE_STORED_HEADER) != 0)
1853 buffer->stored_header_length += cut;
1854 else
1855 node->AddHeaderSpace(cut);
1856 node->used -= cut;
1857
1858 node = (data_node*)list_get_next_item(&buffer->buffers, node);
1859 }
1860
1861 // adjust offset of following nodes
1862 while (node != NULL) {
1863 node->offset -= bytes;
1864 node = (data_node*)list_get_next_item(&buffer->buffers, node);
1865 }
1866
1867 buffer->size -= bytes;
1868 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1869 sizeof(buffer->size));
1870
1871 //dprintf(" remove result:\n");
1872 //dump_buffer(buffer);
1873 CHECK_BUFFER(buffer);
1874
1875 return B_OK;
1876 }
1877
1878
1879 /*! Removes bytes from the end of the buffer.
1880 */
1881 static status_t
remove_trailer(net_buffer * buffer,size_t bytes)1882 remove_trailer(net_buffer* buffer, size_t bytes)
1883 {
1884 return trim_data(buffer, buffer->size - bytes);
1885 }
1886
1887
1888 /*! Trims the buffer to the specified \a newSize by removing space from
1889 the end of the buffer.
1890 */
1891 static status_t
trim_data(net_buffer * _buffer,size_t newSize)1892 trim_data(net_buffer* _buffer, size_t newSize)
1893 {
1894 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1895 TRACE(("%d: trim_data(buffer %p, newSize = %ld, buffer size = %" B_PRIu32 ")\n",
1896 find_thread(NULL), buffer, newSize, buffer->size));
1897 T(Trim(buffer, newSize));
1898 //dump_buffer(buffer);
1899
1900 ParanoiaChecker _(buffer);
1901
1902 if (newSize > buffer->size)
1903 return B_BAD_VALUE;
1904 if (newSize == buffer->size)
1905 return B_OK;
1906
1907 data_node* node = get_node_at_offset(buffer, newSize);
1908 if (node == NULL) {
1909 // trim size greater than buffer size
1910 return B_BAD_VALUE;
1911 }
1912
1913 int32 diff = node->used + node->offset - newSize;
1914 node->SetTailSpace(node->TailSpace() + diff);
1915 node->used -= diff;
1916
1917 if (node->used > 0)
1918 node = (data_node*)list_get_next_item(&buffer->buffers, node);
1919
1920 while (node != NULL) {
1921 data_node* next = (data_node*)list_get_next_item(&buffer->buffers, node);
1922 list_remove_item(&buffer->buffers, node);
1923 remove_data_node(node);
1924
1925 node = next;
1926 }
1927
1928 buffer->size = newSize;
1929 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1930 sizeof(buffer->size));
1931
1932 //dprintf(" trim result:\n");
1933 //dump_buffer(buffer);
1934 CHECK_BUFFER(buffer);
1935
1936 return B_OK;
1937 }
1938
1939
1940 /*! Appends data coming from buffer \a source to the buffer \a buffer. It only
1941 clones the data, though, that is the data is not copied, just referenced.
1942 */
1943 static status_t
append_cloned_data(net_buffer * _buffer,net_buffer * _source,uint32 offset,size_t bytes)1944 append_cloned_data(net_buffer* _buffer, net_buffer* _source, uint32 offset,
1945 size_t bytes)
1946 {
1947 if (bytes == 0)
1948 return B_OK;
1949
1950 net_buffer_private* buffer = (net_buffer_private*)_buffer;
1951 net_buffer_private* source = (net_buffer_private*)_source;
1952 TRACE(("%d: append_cloned_data(buffer %p, source %p, offset = %" B_PRIu32 ", "
1953 "bytes = %ld)\n", find_thread(NULL), buffer, source, offset, bytes));
1954 T(AppendCloned(buffer, source, offset, bytes));
1955
1956 ParanoiaChecker _(buffer);
1957 ParanoiaChecker _2(source);
1958
1959 if (source->size < offset + bytes || source->size < offset)
1960 return B_BAD_VALUE;
1961
1962 // find data_node to start with from the source buffer
1963 data_node* node = get_node_at_offset(source, offset);
1964 if (node == NULL) {
1965 // trim size greater than buffer size
1966 return B_BAD_VALUE;
1967 }
1968
1969 size_t sizeAppended = 0;
1970
1971 while (node != NULL && bytes > 0) {
1972 data_node* clone = add_data_node(buffer, node->header);
1973 if (clone == NULL) {
1974 remove_trailer(buffer, sizeAppended);
1975 return ENOBUFS;
1976 }
1977
1978 if (offset)
1979 offset -= node->offset;
1980
1981 clone->offset = buffer->size;
1982 clone->start = node->start + offset;
1983 clone->used = min_c(bytes, node->used - offset);
1984 if (list_is_empty(&buffer->buffers)) {
1985 // take over stored offset
1986 buffer->stored_header_length = source->stored_header_length;
1987 clone->flags = node->flags | DATA_NODE_READ_ONLY;
1988 } else
1989 clone->flags = DATA_NODE_READ_ONLY;
1990
1991 list_add_item(&buffer->buffers, clone);
1992
1993 offset = 0;
1994 bytes -= clone->used;
1995 buffer->size += clone->used;
1996 sizeAppended += clone->used;
1997 node = (data_node*)list_get_next_item(&source->buffers, node);
1998 }
1999
2000 if (bytes != 0)
2001 panic("add_cloned_data() failed, bytes != 0!\n");
2002
2003 //dprintf(" append cloned result:\n");
2004 //dump_buffer(buffer);
2005 CHECK_BUFFER(source);
2006 CHECK_BUFFER(buffer);
2007 SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
2008 sizeof(buffer->size));
2009
2010 return B_OK;
2011 }
2012
2013
2014 void
set_ancillary_data(net_buffer * buffer,ancillary_data_container * container)2015 set_ancillary_data(net_buffer* buffer, ancillary_data_container* container)
2016 {
2017 ((net_buffer_private*)buffer)->ancillary_data = container;
2018 }
2019
2020
2021 ancillary_data_container*
get_ancillary_data(net_buffer * buffer)2022 get_ancillary_data(net_buffer* buffer)
2023 {
2024 return ((net_buffer_private*)buffer)->ancillary_data;
2025 }
2026
2027
2028 /*! Moves all ancillary data from buffer \c from to the end of the list of
2029 ancillary data of buffer \c to. Note, that this is the only function that
2030 transfers or copies ancillary data from one buffer to another.
2031
2032 \param from The buffer from which to remove the ancillary data.
2033 \param to The buffer to which to add the ancillary data.
2034 \return A pointer to the first of the moved ancillary data, if any, \c NULL
2035 otherwise.
2036 */
2037 static void*
transfer_ancillary_data(net_buffer * _from,net_buffer * _to)2038 transfer_ancillary_data(net_buffer* _from, net_buffer* _to)
2039 {
2040 net_buffer_private* from = (net_buffer_private*)_from;
2041 net_buffer_private* to = (net_buffer_private*)_to;
2042
2043 if (from == NULL || to == NULL)
2044 return NULL;
2045
2046 if (from->ancillary_data == NULL)
2047 return NULL;
2048
2049 if (to->ancillary_data == NULL) {
2050 // no ancillary data in the target buffer
2051 to->ancillary_data = from->ancillary_data;
2052 from->ancillary_data = NULL;
2053 return next_ancillary_data(to->ancillary_data, NULL, NULL);
2054 }
2055
2056 // both have ancillary data
2057 void* data = move_ancillary_data(from->ancillary_data,
2058 to->ancillary_data);
2059 delete_ancillary_data_container(from->ancillary_data);
2060 from->ancillary_data = NULL;
2061
2062 return data;
2063 }
2064
2065
2066 /*! Stores the current header position; even if the header is removed with
2067 remove_header(), you can still reclaim it later using restore_header(),
2068 unless you prepended different data (in which case restoring will fail).
2069 */
2070 status_t
store_header(net_buffer * _buffer)2071 store_header(net_buffer* _buffer)
2072 {
2073 net_buffer_private* buffer = (net_buffer_private*)_buffer;
2074 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2075 if (node == NULL)
2076 return B_ERROR;
2077
2078 if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
2079 // Someone else already stored the header - since we cannot
2080 // differentiate between them, we throw away everything
2081 node->AddHeaderSpace(buffer->stored_header_length);
2082 node->flags &= ~DATA_NODE_STORED_HEADER;
2083 buffer->stored_header_length = 0;
2084
2085 return B_ERROR;
2086 }
2087
2088 buffer->stored_header_length = 0;
2089 node->flags |= DATA_NODE_STORED_HEADER;
2090
2091 return B_OK;
2092 }
2093
2094
2095 ssize_t
stored_header_length(net_buffer * _buffer)2096 stored_header_length(net_buffer* _buffer)
2097 {
2098 net_buffer_private* buffer = (net_buffer_private*)_buffer;
2099 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2100 if (node == NULL || (node->flags & DATA_NODE_STORED_HEADER) == 0)
2101 return B_BAD_VALUE;
2102
2103 return buffer->stored_header_length;
2104 }
2105
2106
2107 /*! Reads from the complete buffer with an eventually stored header.
2108 This function does not care whether or not there is a stored header at
2109 all - you have to use the stored_header_length() function to find out.
2110 */
2111 status_t
restore_header(net_buffer * _buffer,uint32 offset,void * data,size_t bytes)2112 restore_header(net_buffer* _buffer, uint32 offset, void* data, size_t bytes)
2113 {
2114 net_buffer_private* buffer = (net_buffer_private*)_buffer;
2115
2116 if (offset < buffer->stored_header_length) {
2117 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2118 if (node == NULL
2119 || offset + bytes > buffer->stored_header_length + buffer->size)
2120 return B_BAD_VALUE;
2121
2122 // We have the data, so copy it out
2123
2124 size_t copied = std::min(bytes, buffer->stored_header_length - offset);
2125 memcpy(data, node->start + offset - buffer->stored_header_length,
2126 copied);
2127
2128 if (copied == bytes)
2129 return B_OK;
2130
2131 data = (uint8*)data + copied;
2132 bytes -= copied;
2133 offset = 0;
2134 } else
2135 offset -= buffer->stored_header_length;
2136
2137 return read_data(_buffer, offset, data, bytes);
2138 }
2139
2140
2141 /*! Copies from the complete \a source buffer with an eventually stored header
2142 to the specified target \a buffer.
2143 This function does not care whether or not there is a stored header at
2144 all - you have to use the stored_header_length() function to find out.
2145 */
2146 status_t
append_restored_header(net_buffer * buffer,net_buffer * _source,uint32 offset,size_t bytes)2147 append_restored_header(net_buffer* buffer, net_buffer* _source, uint32 offset,
2148 size_t bytes)
2149 {
2150 net_buffer_private* source = (net_buffer_private*)_source;
2151
2152 if (offset < source->stored_header_length) {
2153 data_node* node = (data_node*)list_get_first_item(&source->buffers);
2154 if (node == NULL
2155 || offset + bytes > source->stored_header_length + source->size)
2156 return B_BAD_VALUE;
2157
2158 // We have the data, so copy it out
2159
2160 size_t appended = std::min(bytes, source->stored_header_length - offset);
2161 status_t status = append_data(buffer,
2162 node->start + offset - source->stored_header_length, appended);
2163 if (status != B_OK)
2164 return status;
2165
2166 if (appended == bytes)
2167 return B_OK;
2168
2169 bytes -= appended;
2170 offset = 0;
2171 } else
2172 offset -= source->stored_header_length;
2173
2174 return append_cloned_data(buffer, source, offset, bytes);
2175 }
2176
2177
2178 /*! Tries to directly access the requested space in the buffer.
2179 If the space is contiguous, the function will succeed and place a pointer
2180 to that space into \a _contiguousBuffer.
2181
2182 \return B_BAD_VALUE if the offset is outside of the buffer's bounds.
2183 \return B_ERROR in case the buffer is not contiguous at that location.
2184 */
2185 static status_t
direct_access(net_buffer * _buffer,uint32 offset,size_t size,void ** _contiguousBuffer)2186 direct_access(net_buffer* _buffer, uint32 offset, size_t size,
2187 void** _contiguousBuffer)
2188 {
2189 net_buffer_private* buffer = (net_buffer_private*)_buffer;
2190
2191 ParanoiaChecker _(buffer);
2192
2193 //TRACE(("direct_access(buffer %p, offset %ld, size %ld)\n", buffer, offset,
2194 // size));
2195
2196 if (offset + size > buffer->size)
2197 return B_BAD_VALUE;
2198
2199 // find node to access
2200 data_node* node = get_node_at_offset(buffer, offset);
2201 if (node == NULL)
2202 return B_BAD_VALUE;
2203
2204 offset -= node->offset;
2205
2206 if (size > node->used - offset)
2207 return B_ERROR;
2208
2209 *_contiguousBuffer = node->start + offset;
2210 return B_OK;
2211 }
2212
2213
2214 static int32
checksum_data(net_buffer * _buffer,uint32 offset,size_t size,bool finalize)2215 checksum_data(net_buffer* _buffer, uint32 offset, size_t size, bool finalize)
2216 {
2217 net_buffer_private* buffer = (net_buffer_private*)_buffer;
2218
2219 if (offset + size > buffer->size || size == 0)
2220 return B_BAD_VALUE;
2221
2222 // find first node to read from
2223 data_node* node = get_node_at_offset(buffer, offset);
2224 if (node == NULL)
2225 return B_ERROR;
2226
2227 offset -= node->offset;
2228
2229 // Since the maximum buffer size is 65536 bytes, it's impossible
2230 // to overlap 32 bit - we don't need to handle this overlap in
2231 // the loop, we can safely do it afterwards
2232 uint32 sum = 0;
2233
2234 while (true) {
2235 size_t bytes = min_c(size, node->used - offset);
2236 if ((offset + node->offset) & 1) {
2237 // if we're at an uneven offset, we have to swap the checksum
2238 sum += __swap_int16(compute_checksum(node->start + offset, bytes));
2239 } else
2240 sum += compute_checksum(node->start + offset, bytes);
2241
2242 size -= bytes;
2243 if (size == 0)
2244 break;
2245
2246 offset = 0;
2247
2248 node = (data_node*)list_get_next_item(&buffer->buffers, node);
2249 if (node == NULL)
2250 return B_ERROR;
2251 }
2252
2253 while (sum >> 16) {
2254 sum = (sum & 0xffff) + (sum >> 16);
2255 }
2256
2257 if (!finalize)
2258 return (uint16)sum;
2259
2260 return (uint16)~sum;
2261 }
2262
2263
2264 static uint32
get_iovecs(net_buffer * _buffer,struct iovec * iovecs,uint32 vecCount)2265 get_iovecs(net_buffer* _buffer, struct iovec* iovecs, uint32 vecCount)
2266 {
2267 net_buffer_private* buffer = (net_buffer_private*)_buffer;
2268 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2269 uint32 count = 0;
2270
2271 while (node != NULL && count < vecCount) {
2272 if (node->used > 0) {
2273 iovecs[count].iov_base = node->start;
2274 iovecs[count].iov_len = node->used;
2275 count++;
2276 }
2277
2278 node = (data_node*)list_get_next_item(&buffer->buffers, node);
2279 }
2280
2281 return count;
2282 }
2283
2284
2285 static uint32
count_iovecs(net_buffer * _buffer)2286 count_iovecs(net_buffer* _buffer)
2287 {
2288 net_buffer_private* buffer = (net_buffer_private*)_buffer;
2289 data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2290 uint32 count = 0;
2291
2292 while (node != NULL) {
2293 if (node->used > 0)
2294 count++;
2295
2296 node = (data_node*)list_get_next_item(&buffer->buffers, node);
2297 }
2298
2299 return count;
2300 }
2301
2302
2303 static void
swap_addresses(net_buffer * buffer)2304 swap_addresses(net_buffer* buffer)
2305 {
2306 std::swap(buffer->source, buffer->destination);
2307 }
2308
2309
2310 static status_t
std_ops(int32 op,...)2311 std_ops(int32 op, ...)
2312 {
2313 switch (op) {
2314 case B_MODULE_INIT:
2315 // TODO: improve our code a bit so we can add constructors
2316 // and keep around half-constructed buffers in the slab
2317
2318 sNetBufferCache = create_object_cache("net buffer cache",
2319 sizeof(net_buffer_private), 8, NULL, NULL, NULL);
2320 if (sNetBufferCache == NULL)
2321 return B_NO_MEMORY;
2322
2323 sDataNodeCache = create_object_cache("data node cache", BUFFER_SIZE,
2324 0, NULL, NULL, NULL);
2325 if (sDataNodeCache == NULL) {
2326 delete_object_cache(sNetBufferCache);
2327 return B_NO_MEMORY;
2328 }
2329
2330 #if ENABLE_STATS
2331 add_debugger_command_etc("net_buffer_stats", &dump_net_buffer_stats,
2332 "Print net buffer statistics",
2333 "\nPrint net buffer statistics.\n", 0);
2334 #endif
2335 #if ENABLE_DEBUGGER_COMMANDS
2336 add_debugger_command_etc("net_buffer", &dump_net_buffer,
2337 "Dump net buffer",
2338 "\nDump the net buffer's internal structures.\n", 0);
2339 #endif
2340 return B_OK;
2341
2342 case B_MODULE_UNINIT:
2343 #if ENABLE_STATS
2344 remove_debugger_command("net_buffer_stats", &dump_net_buffer_stats);
2345 #endif
2346 #if ENABLE_DEBUGGER_COMMANDS
2347 remove_debugger_command("net_buffer", &dump_net_buffer);
2348 #endif
2349 delete_object_cache(sNetBufferCache);
2350 delete_object_cache(sDataNodeCache);
2351 return B_OK;
2352
2353 default:
2354 return B_ERROR;
2355 }
2356 }
2357
2358
2359 net_buffer_module_info gNetBufferModule = {
2360 {
2361 NET_BUFFER_MODULE_NAME,
2362 0,
2363 std_ops
2364 },
2365 create_buffer,
2366 free_buffer,
2367
2368 duplicate_buffer,
2369 clone_buffer,
2370 split_buffer,
2371 merge_buffer,
2372
2373 prepend_size,
2374 prepend_data,
2375 append_size,
2376 append_data,
2377 NULL, // insert
2378 NULL, // remove
2379 remove_header,
2380 remove_trailer,
2381 trim_data,
2382 append_cloned_data,
2383
2384 NULL, // associate_data
2385
2386 set_ancillary_data,
2387 get_ancillary_data,
2388 transfer_ancillary_data,
2389
2390 store_header,
2391 stored_header_length,
2392 restore_header,
2393 append_restored_header,
2394
2395 direct_access,
2396 read_data,
2397 write_data,
2398
2399 checksum_data,
2400
2401 NULL, // get_memory_map
2402 get_iovecs,
2403 count_iovecs,
2404
2405 swap_addresses,
2406
2407 dump_buffer, // dump
2408 };
2409
2410