xref: /haiku/src/add-ons/kernel/network/stack/net_buffer.cpp (revision e277f0be5755a37e30f098deb6fb7542ac850a47)
1 /*
2  * Copyright 2006-2010, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  *		Ingo Weinhold, ingo_weinhold@gmx.de
8  */
9 
10 
11 #include "utility.h"
12 
13 #include <net_buffer.h>
14 #include <slab/Slab.h>
15 #include <tracing.h>
16 #include <util/list.h>
17 
18 #include <ByteOrder.h>
19 #include <debug.h>
20 #include <kernel.h>
21 #include <KernelExport.h>
22 #include <util/DoublyLinkedList.h>
23 
24 #include <algorithm>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/param.h>
28 #include <sys/uio.h>
29 
30 #include "ancillary_data.h"
31 #include "interfaces.h"
32 
33 #include "paranoia_config.h"
34 
35 
36 //#define TRACE_BUFFER
37 #ifdef TRACE_BUFFER
38 #	define TRACE(x) dprintf x
39 #else
40 #	define TRACE(x) ;
41 #endif
42 
43 #define BUFFER_SIZE 2048
44 	// maximum implementation derived buffer size is 65536
45 
46 #define ENABLE_DEBUGGER_COMMANDS	1
47 #define ENABLE_STATS				1
48 #define PARANOID_BUFFER_CHECK		NET_BUFFER_PARANOIA
49 
50 #define COMPONENT_PARANOIA_LEVEL	NET_BUFFER_PARANOIA
51 #include <debug_paranoia.h>
52 
53 #define DATA_NODE_READ_ONLY		0x1
54 #define DATA_NODE_STORED_HEADER	0x2
55 
56 struct header_space {
57 	uint16	size;
58 	uint16	free;
59 };
60 
61 struct free_data {
62 	struct free_data* next;
63 	uint16			size;
64 };
65 
66 struct data_header {
67 	int32			ref_count;
68 	addr_t			physical_address;
69 	free_data*		first_free;
70 	uint8*			data_end;
71 	header_space	space;
72 	uint16			tail_space;
73 };
74 
75 struct data_node {
76 	struct list_link link;
77 	struct data_header* header;
78 	struct data_header* located;
79 	size_t			offset;		// the net_buffer-wide offset of this node
80 	uint8*			start;		// points to the start of the data
81 	uint16			flags;
82 	uint16			used;		// defines how much memory is used by this node
83 
84 	uint16 HeaderSpace() const
85 	{
86 		if ((flags & DATA_NODE_READ_ONLY) != 0)
87 			return 0;
88 		return header->space.free;
89 	}
90 
91 	void AddHeaderSpace(uint16 toAdd)
92 	{
93 		if ((flags & DATA_NODE_READ_ONLY) == 0) {
94 			header->space.size += toAdd;
95 			header->space.free += toAdd;
96 		}
97 	}
98 
99 	void SubtractHeaderSpace(uint16 toSubtract)
100 	{
101 		if ((flags & DATA_NODE_READ_ONLY) == 0) {
102 			header->space.size -= toSubtract;
103 			header->space.free -= toSubtract;
104 		}
105 	}
106 
107 	uint16 TailSpace() const
108 	{
109 		if ((flags & DATA_NODE_READ_ONLY) != 0)
110 			return 0;
111 		return header->tail_space;
112 	}
113 
114 	void SetTailSpace(uint16 space)
115 	{
116 		if ((flags & DATA_NODE_READ_ONLY) == 0)
117 			header->tail_space = space;
118 	}
119 
120 	void FreeSpace()
121 	{
122 		if ((flags & DATA_NODE_READ_ONLY) == 0) {
123 			uint16 space = used + header->tail_space;
124 			header->space.size += space;
125 			header->space.free += space;
126 			header->tail_space = 0;
127 		}
128 	}
129 };
130 
131 
132 // TODO: we should think about moving the address fields into the buffer
133 // data itself via associated data or something like this. Or this
134 // structure as a whole, too...
135 struct net_buffer_private : net_buffer {
136 	struct list					buffers;
137 	data_header*				allocation_header;
138 		// the current place where we allocate header space (nodes, ...)
139 	ancillary_data_container*	ancillary_data;
140 	size_t						stored_header_length;
141 
142 	struct {
143 		struct sockaddr_storage	source;
144 		struct sockaddr_storage	destination;
145 	} storage;
146 };
147 
148 
149 #define DATA_HEADER_SIZE				_ALIGN(sizeof(data_header))
150 #define DATA_NODE_SIZE					_ALIGN(sizeof(data_node))
151 #define MAX_FREE_BUFFER_SIZE			(BUFFER_SIZE - DATA_HEADER_SIZE)
152 
153 
154 static object_cache* sNetBufferCache;
155 static object_cache* sDataNodeCache;
156 
157 
158 static status_t append_data(net_buffer* buffer, const void* data, size_t size);
159 static status_t trim_data(net_buffer* _buffer, size_t newSize);
160 static status_t remove_header(net_buffer* _buffer, size_t bytes);
161 static status_t remove_trailer(net_buffer* _buffer, size_t bytes);
162 static status_t append_cloned_data(net_buffer* _buffer, net_buffer* _source,
163 					uint32 offset, size_t bytes);
164 static status_t read_data(net_buffer* _buffer, size_t offset, void* data,
165 					size_t size);
166 
167 
168 #if ENABLE_STATS
169 static vint32 sAllocatedDataHeaderCount = 0;
170 static vint32 sAllocatedNetBufferCount = 0;
171 static vint32 sEverAllocatedDataHeaderCount = 0;
172 static vint32 sEverAllocatedNetBufferCount = 0;
173 static vint32 sMaxAllocatedDataHeaderCount = 0;
174 static vint32 sMaxAllocatedNetBufferCount = 0;
175 #endif
176 
177 
178 #if NET_BUFFER_TRACING
179 
180 
181 namespace NetBufferTracing {
182 
183 
184 class NetBufferTraceEntry : public AbstractTraceEntry {
185 public:
186 	NetBufferTraceEntry(net_buffer* buffer)
187 		:
188 		fBuffer(buffer)
189 	{
190 #if NET_BUFFER_TRACING_STACK_TRACE
191 	fStackTrace = capture_tracing_stack_trace(
192 		NET_BUFFER_TRACING_STACK_TRACE, 0, false);
193 #endif
194 	}
195 
196 #if NET_BUFFER_TRACING_STACK_TRACE
197 	virtual void DumpStackTrace(TraceOutput& out)
198 	{
199 		out.PrintStackTrace(fStackTrace);
200 	}
201 #endif
202 
203 protected:
204 	net_buffer*	fBuffer;
205 #if NET_BUFFER_TRACING_STACK_TRACE
206 	tracing_stack_trace* fStackTrace;
207 #endif
208 };
209 
210 
211 class Create : public NetBufferTraceEntry {
212 public:
213 	Create(size_t headerSpace, net_buffer* buffer)
214 		:
215 		NetBufferTraceEntry(buffer),
216 		fHeaderSpace(headerSpace)
217 	{
218 		Initialized();
219 	}
220 
221 	virtual void AddDump(TraceOutput& out)
222 	{
223 		out.Print("net buffer create: header space: %lu -> buffer: %p",
224 			fHeaderSpace, fBuffer);
225 	}
226 
227 private:
228 	size_t		fHeaderSpace;
229 };
230 
231 
232 class Free : public NetBufferTraceEntry {
233 public:
234 	Free(net_buffer* buffer)
235 		:
236 		NetBufferTraceEntry(buffer)
237 	{
238 		Initialized();
239 	}
240 
241 	virtual void AddDump(TraceOutput& out)
242 	{
243 		out.Print("net buffer free: buffer: %p", fBuffer);
244 	}
245 };
246 
247 
248 class Duplicate : public NetBufferTraceEntry {
249 public:
250 	Duplicate(net_buffer* buffer, net_buffer* clone)
251 		:
252 		NetBufferTraceEntry(buffer),
253 		fClone(clone)
254 	{
255 		Initialized();
256 	}
257 
258 	virtual void AddDump(TraceOutput& out)
259 	{
260 		out.Print("net buffer dup: buffer: %p -> %p", fBuffer, fClone);
261 	}
262 
263 private:
264 	net_buffer*		fClone;
265 };
266 
267 
268 class Clone : public NetBufferTraceEntry {
269 public:
270 	Clone(net_buffer* buffer, bool shareFreeSpace, net_buffer* clone)
271 		:
272 		NetBufferTraceEntry(buffer),
273 		fClone(clone),
274 		fShareFreeSpace(shareFreeSpace)
275 	{
276 		Initialized();
277 	}
278 
279 	virtual void AddDump(TraceOutput& out)
280 	{
281 		out.Print("net buffer clone: buffer: %p, share free space: %s "
282 			"-> %p", fBuffer, fShareFreeSpace ? "true" : "false", fClone);
283 	}
284 
285 private:
286 	net_buffer*		fClone;
287 	bool			fShareFreeSpace;
288 };
289 
290 
291 class Split : public NetBufferTraceEntry {
292 public:
293 	Split(net_buffer* buffer, uint32 offset, net_buffer* newBuffer)
294 		:
295 		NetBufferTraceEntry(buffer),
296 		fNewBuffer(newBuffer),
297 		fOffset(offset)
298 	{
299 		Initialized();
300 	}
301 
302 	virtual void AddDump(TraceOutput& out)
303 	{
304 		out.Print("net buffer split: buffer: %p, offset: %lu "
305 			"-> %p", fBuffer, fOffset, fNewBuffer);
306 	}
307 
308 private:
309 	net_buffer*		fNewBuffer;
310 	uint32			fOffset;
311 };
312 
313 
314 class Merge : public NetBufferTraceEntry {
315 public:
316 	Merge(net_buffer* buffer, net_buffer* otherBuffer, bool after)
317 		:
318 		NetBufferTraceEntry(buffer),
319 		fOtherBuffer(otherBuffer),
320 		fAfter(after)
321 	{
322 		Initialized();
323 	}
324 
325 	virtual void AddDump(TraceOutput& out)
326 	{
327 		out.Print("net buffer merge: buffers: %p + %p, after: %s "
328 			"-> %p", fBuffer, fOtherBuffer, fAfter ? "true" : "false",
329 			fOtherBuffer);
330 	}
331 
332 private:
333 	net_buffer*		fOtherBuffer;
334 	bool			fAfter;
335 };
336 
337 
338 class AppendCloned : public NetBufferTraceEntry {
339 public:
340 	AppendCloned(net_buffer* buffer, net_buffer* source, uint32 offset,
341 		size_t size)
342 		:
343 		NetBufferTraceEntry(buffer),
344 		fSource(source),
345 		fOffset(offset),
346 		fSize(size)
347 	{
348 		Initialized();
349 	}
350 
351 	virtual void AddDump(TraceOutput& out)
352 	{
353 		out.Print("net buffer append cloned: buffer: %p, from: %p, "
354 			"offset: %lu, size: %lu", fBuffer, fSource, fOffset, fSize);
355 	}
356 
357 private:
358 	net_buffer*		fSource;
359 	uint32			fOffset;
360 	size_t			fSize;
361 };
362 
363 
364 class PrependSize : public NetBufferTraceEntry {
365 public:
366 	PrependSize(net_buffer* buffer, size_t size)
367 		:
368 		NetBufferTraceEntry(buffer),
369 		fSize(size)
370 	{
371 		Initialized();
372 	}
373 
374 	virtual void AddDump(TraceOutput& out)
375 	{
376 		out.Print("net buffer prepend size: buffer: %p, size: %lu", fBuffer,
377 			fSize);
378 	}
379 
380 private:
381 	size_t			fSize;
382 };
383 
384 
385 class AppendSize : public NetBufferTraceEntry {
386 public:
387 	AppendSize(net_buffer* buffer, size_t size)
388 		:
389 		NetBufferTraceEntry(buffer),
390 		fSize(size)
391 	{
392 		Initialized();
393 	}
394 
395 	virtual void AddDump(TraceOutput& out)
396 	{
397 		out.Print("net buffer append size: buffer: %p, size: %lu", fBuffer,
398 			fSize);
399 	}
400 
401 private:
402 	size_t			fSize;
403 };
404 
405 
406 class RemoveHeader : public NetBufferTraceEntry {
407 public:
408 	RemoveHeader(net_buffer* buffer, size_t size)
409 		:
410 		NetBufferTraceEntry(buffer),
411 		fSize(size)
412 	{
413 		Initialized();
414 	}
415 
416 	virtual void AddDump(TraceOutput& out)
417 	{
418 		out.Print("net buffer remove header: buffer: %p, size: %lu",
419 			fBuffer, fSize);
420 	}
421 
422 private:
423 	size_t			fSize;
424 };
425 
426 
427 class Trim : public NetBufferTraceEntry {
428 public:
429 	Trim(net_buffer* buffer, size_t size)
430 		:
431 		NetBufferTraceEntry(buffer),
432 		fSize(size)
433 	{
434 		Initialized();
435 	}
436 
437 	virtual void AddDump(TraceOutput& out)
438 	{
439 		out.Print("net buffer trim: buffer: %p, size: %lu",
440 			fBuffer, fSize);
441 	}
442 
443 private:
444 	size_t			fSize;
445 };
446 
447 
448 class Read : public NetBufferTraceEntry {
449 public:
450 	Read(net_buffer* buffer, uint32 offset, void* data, size_t size)
451 		:
452 		NetBufferTraceEntry(buffer),
453 		fData(data),
454 		fOffset(offset),
455 		fSize(size)
456 	{
457 		Initialized();
458 	}
459 
460 	virtual void AddDump(TraceOutput& out)
461 	{
462 		out.Print("net buffer read: buffer: %p, offset: %lu, size: %lu, "
463 			"data: %p", fBuffer, fOffset, fSize, fData);
464 	}
465 
466 private:
467 	void*			fData;
468 	uint32			fOffset;
469 	size_t			fSize;
470 };
471 
472 
473 class Write : public NetBufferTraceEntry {
474 public:
475 	Write(net_buffer* buffer, uint32 offset, const void* data, size_t size)
476 		:
477 		NetBufferTraceEntry(buffer),
478 		fData(data),
479 		fOffset(offset),
480 		fSize(size)
481 	{
482 		Initialized();
483 	}
484 
485 	virtual void AddDump(TraceOutput& out)
486 	{
487 		out.Print("net buffer write: buffer: %p, offset: %lu, size: %lu, "
488 			"data: %p", fBuffer, fOffset, fSize, fData);
489 	}
490 
491 private:
492 	const void*		fData;
493 	uint32			fOffset;
494 	size_t			fSize;
495 };
496 
497 
498 #if NET_BUFFER_TRACING >= 2
499 
500 class DataHeaderTraceEntry : public AbstractTraceEntry {
501 public:
502 	DataHeaderTraceEntry(data_header* header)
503 		:
504 		fHeader(header)
505 	{
506 	}
507 
508 protected:
509 	data_header*	fHeader;
510 };
511 
512 
513 class CreateDataHeader : public DataHeaderTraceEntry {
514 public:
515 	CreateDataHeader(data_header* header)
516 		:
517 		DataHeaderTraceEntry(header)
518 	{
519 		Initialized();
520 	}
521 
522 	virtual void AddDump(TraceOutput& out)
523 	{
524 		out.Print("net buffer data header create:  header: %p", fHeader);
525 	}
526 };
527 
528 
529 class AcquireDataHeader : public DataHeaderTraceEntry {
530 public:
531 	AcquireDataHeader(data_header* header, int32 refCount)
532 		:
533 		DataHeaderTraceEntry(header),
534 		fRefCount(refCount)
535 	{
536 		Initialized();
537 	}
538 
539 	virtual void AddDump(TraceOutput& out)
540 	{
541 		out.Print("net buffer data header acquire: header: %p "
542 			"-> ref count: %ld", fHeader, fRefCount);
543 	}
544 
545 private:
546 	int32			fRefCount;
547 };
548 
549 
550 class ReleaseDataHeader : public DataHeaderTraceEntry {
551 public:
552 	ReleaseDataHeader(data_header* header, int32 refCount)
553 		:
554 		DataHeaderTraceEntry(header),
555 		fRefCount(refCount)
556 	{
557 		Initialized();
558 	}
559 
560 	virtual void AddDump(TraceOutput& out)
561 	{
562 		out.Print("net buffer data header release: header: %p "
563 			"-> ref count: %ld", fHeader, fRefCount);
564 	}
565 
566 private:
567 	int32			fRefCount;
568 };
569 
570 #	define T2(x)	new(std::nothrow) NetBufferTracing::x
571 #else
572 #	define T2(x)
573 #endif	// NET_BUFFER_TRACING >= 2
574 
575 }	// namespace NetBufferTracing
576 
577 #	define T(x)	new(std::nothrow) NetBufferTracing::x
578 
579 #else
580 #	define T(x)
581 #	define T2(x)
582 #endif	// NET_BUFFER_TRACING
583 
584 
585 static void
586 dump_address(const char* prefix, sockaddr* address,
587 	net_interface_address* interfaceAddress)
588 {
589 	if (address == NULL || address->sa_len == 0)
590 		return;
591 
592 	if (interfaceAddress == NULL || interfaceAddress->domain == NULL) {
593 		dprintf("  %s: length %u, family %u\n", prefix, address->sa_len,
594 			address->sa_family);
595 
596 		dump_block((char*)address + 2, address->sa_len - 2, "    ");
597 	} else {
598 		char buffer[64];
599 		interfaceAddress->domain->address_module->print_address_buffer(address,
600 			buffer, sizeof(buffer), true);
601 
602 		dprintf("  %s: %s\n", prefix, buffer);
603 	}
604 }
605 
606 
607 static void
608 dump_buffer(net_buffer* _buffer)
609 {
610 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
611 
612 	dprintf("buffer %p, size %" B_PRIu32 ", flags %" B_PRIx32 ", stored header "
613 		"%" B_PRIu32 ", interface address %p\n", buffer, buffer->size,
614 		buffer->flags, buffer->stored_header_length, buffer->interface_address);
615 
616 	dump_address("source", buffer->source, buffer->interface_address);
617 	dump_address("destination", buffer->destination, buffer->interface_address);
618 
619 	data_node* node = NULL;
620 	while ((node = (data_node*)list_get_next_item(&buffer->buffers, node))
621 			!= NULL) {
622 		dprintf("  node %p, offset %lu, used %u, header %u, tail %u, "
623 			"header %p\n", node, node->offset, node->used, node->HeaderSpace(),
624 			node->TailSpace(), node->header);
625 
626 		if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
627 			dump_block((char*)node->start - buffer->stored_header_length,
628 				min_c(buffer->stored_header_length, 64), "  s ");
629 		}
630 		dump_block((char*)node->start, min_c(node->used, 64), "    ");
631 	}
632 }
633 
634 #if ENABLE_DEBUGGER_COMMANDS
635 
636 static int
637 dump_net_buffer(int argc, char** argv)
638 {
639 	if (argc != 2) {
640 		kprintf("usage: %s [address]\n", argv[0]);
641 		return 0;
642 	}
643 
644 	dump_buffer((net_buffer*)parse_expression(argv[1]));
645 	return 0;
646 }
647 
648 #endif	// ENABLE_DEBUGGER_COMMANDS
649 
650 #if ENABLE_STATS
651 
652 static int
653 dump_net_buffer_stats(int argc, char** argv)
654 {
655 	kprintf("allocated data headers: %7ld / %7ld, peak %7ld\n",
656 		sAllocatedDataHeaderCount, sEverAllocatedDataHeaderCount,
657 		sMaxAllocatedDataHeaderCount);
658 	kprintf("allocated net buffers:  %7ld / %7ld, peak %7ld\n",
659 		sAllocatedNetBufferCount, sEverAllocatedNetBufferCount,
660 		sMaxAllocatedNetBufferCount);
661 	return 0;
662 }
663 
664 #endif	// ENABLE_STATS
665 
666 #if PARANOID_BUFFER_CHECK
667 
668 static void
669 check_buffer(net_buffer* _buffer)
670 {
671 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
672 
673 	// sum up the size of all nodes
674 	size_t size = 0;
675 
676 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
677 	while (node != NULL) {
678 		if (node->offset != size) {
679 			panic("net_buffer %p: bad node %p offset (%lu vs. %lu)",
680 				buffer, node, node->offset, size);
681 			return;
682 		}
683 		size += node->used;
684 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
685 	}
686 
687 	if (size != buffer->size) {
688 		panic("net_buffer %p size != sum of its data node sizes (%lu vs. %lu)",
689 			buffer, buffer->size, size);
690 		return;
691 	}
692 }
693 
694 
695 #if 0
696 static void
697 check_buffer_contents(net_buffer* buffer, size_t offset, const void* data,
698 	size_t size)
699 {
700 	void* bufferData = malloc(size);
701 	if (bufferData == NULL)
702 		return;
703 
704 	if (read_data(buffer, offset, bufferData, size) == B_OK) {
705 		if (memcmp(bufferData, data, size) != 0) {
706 			int32 index = 0;
707 			while (((uint8*)data)[index] == ((uint8*)bufferData)[index])
708 				index++;
709 			panic("check_buffer_contents(): contents check failed at index "
710 				"%ld, buffer: %p, offset: %lu, size: %lu", index, buffer,
711 				offset, size);
712 		}
713 	} else {
714 		panic("failed to read from buffer %p, offset: %lu, size: %lu",
715 			buffer, offset, size);
716 	}
717 
718 	free(bufferData);
719 }
720 
721 
722 static void
723 check_buffer_contents(net_buffer* buffer, size_t offset, net_buffer* source,
724 	size_t sourceOffset, size_t size)
725 {
726 	void* bufferData = malloc(size);
727 	if (bufferData == NULL)
728 		return;
729 
730 	if (read_data(source, sourceOffset, bufferData, size) == B_OK) {
731 		check_buffer_contents(buffer, offset, bufferData, size);
732 	} else {
733 		panic("failed to read from source buffer %p, offset: %lu, size: %lu",
734 			source, sourceOffset, size);
735 	}
736 
737 	free(bufferData);
738 }
739 #endif
740 
741 
742 # 	define CHECK_BUFFER(buffer)	check_buffer(buffer)
743 #else
744 # 	define CHECK_BUFFER(buffer)	do {} while (false)
745 #endif	// !PARANOID_BUFFER_CHECK
746 
747 
748 static inline data_header*
749 allocate_data_header()
750 {
751 #if ENABLE_STATS
752 	int32 current = atomic_add(&sAllocatedDataHeaderCount, 1) + 1;
753 	int32 max = atomic_get(&sMaxAllocatedDataHeaderCount);
754 	if (current > max)
755 		atomic_test_and_set(&sMaxAllocatedDataHeaderCount, current, max);
756 
757 	atomic_add(&sEverAllocatedDataHeaderCount, 1);
758 #endif
759 	return (data_header*)object_cache_alloc(sDataNodeCache, 0);
760 }
761 
762 
763 static inline net_buffer_private*
764 allocate_net_buffer()
765 {
766 #if ENABLE_STATS
767 	int32 current = atomic_add(&sAllocatedNetBufferCount, 1) + 1;
768 	int32 max = atomic_get(&sMaxAllocatedNetBufferCount);
769 	if (current > max)
770 		atomic_test_and_set(&sMaxAllocatedNetBufferCount, current, max);
771 
772 	atomic_add(&sEverAllocatedNetBufferCount, 1);
773 #endif
774 	return (net_buffer_private*)object_cache_alloc(sNetBufferCache, 0);
775 }
776 
777 
778 static inline void
779 free_data_header(data_header* header)
780 {
781 #if ENABLE_STATS
782 	if (header != NULL)
783 		atomic_add(&sAllocatedDataHeaderCount, -1);
784 #endif
785 	object_cache_free(sDataNodeCache, header, 0);
786 }
787 
788 
789 static inline void
790 free_net_buffer(net_buffer_private* buffer)
791 {
792 #if ENABLE_STATS
793 	if (buffer != NULL)
794 		atomic_add(&sAllocatedNetBufferCount, -1);
795 #endif
796 	object_cache_free(sNetBufferCache, buffer, 0);
797 }
798 
799 
800 static data_header*
801 create_data_header(size_t headerSpace)
802 {
803 	data_header* header = allocate_data_header();
804 	if (header == NULL)
805 		return NULL;
806 
807 	header->ref_count = 1;
808 	header->physical_address = 0;
809 		// TODO: initialize this correctly
810 	header->space.size = headerSpace;
811 	header->space.free = headerSpace;
812 	header->data_end = (uint8*)header + DATA_HEADER_SIZE;
813 	header->tail_space = (uint8*)header + BUFFER_SIZE - header->data_end
814 		- headerSpace;
815 	header->first_free = NULL;
816 
817 	TRACE(("%ld:   create new data header %p\n", find_thread(NULL), header));
818 	T2(CreateDataHeader(header));
819 	return header;
820 }
821 
822 
823 static void
824 release_data_header(data_header* header)
825 {
826 	int32 refCount = atomic_add(&header->ref_count, -1);
827 	T2(ReleaseDataHeader(header, refCount - 1));
828 	if (refCount != 1)
829 		return;
830 
831 	TRACE(("%ld:   free header %p\n", find_thread(NULL), header));
832 	free_data_header(header);
833 }
834 
835 
836 inline void
837 acquire_data_header(data_header* header)
838 {
839 	int32 refCount = atomic_add(&header->ref_count, 1);
840 	(void)refCount;
841 	T2(AcquireDataHeader(header, refCount + 1));
842 }
843 
844 
845 static void
846 free_data_header_space(data_header* header, uint8* data, size_t size)
847 {
848 	if (size < sizeof(free_data))
849 		size = sizeof(free_data);
850 
851 	free_data* freeData = (free_data*)data;
852 	freeData->next = header->first_free;
853 	freeData->size = size;
854 
855 	header->first_free = freeData;
856 }
857 
858 
859 /*!	Tries to allocate \a size bytes from the free space in the header.
860 */
861 static uint8*
862 alloc_data_header_space(data_header* header, size_t size)
863 {
864 	if (size < sizeof(free_data))
865 		size = sizeof(free_data);
866 	size = _ALIGN(size);
867 
868 	if (header->first_free != NULL && header->first_free->size >= size) {
869 		// the first entry of the header space matches the allocation's needs
870 
871 		// TODO: If the free space is greater than what shall be allocated, we
872 		// leak the remainder of the space. We should only allocate multiples of
873 		// _ALIGN(sizeof(free_data)) and split free space in this case. It's not
874 		// that pressing, since the only thing allocated ATM are data_nodes, and
875 		// thus the free space entries will always have the right size.
876 		uint8* data = (uint8*)header->first_free;
877 		header->first_free = header->first_free->next;
878 		return data;
879 	}
880 
881 	if (header->space.free < size) {
882 		// there is no free space left, search free list
883 		free_data* freeData = header->first_free;
884 		free_data* last = NULL;
885 		while (freeData != NULL) {
886 			if (last != NULL && freeData->size >= size) {
887 				// take this one
888 				last->next = freeData->next;
889 				return (uint8*)freeData;
890 			}
891 
892 			last = freeData;
893 			freeData = freeData->next;
894 		}
895 
896 		return NULL;
897 	}
898 
899 	// allocate new space
900 
901 	uint8* data = header->data_end;
902 	header->data_end += size;
903 	header->space.free -= size;
904 
905 	return data;
906 }
907 
908 
909 static uint8*
910 alloc_data_header_space(net_buffer_private* buffer, size_t size,
911 	data_header** _header = NULL)
912 {
913 	// try to allocate in our current allocation header
914 	uint8* allocated = alloc_data_header_space(buffer->allocation_header, size);
915 	if (allocated == NULL) {
916 		// not enough header space left -- create a fresh buffer for headers
917 		data_header* header = create_data_header(MAX_FREE_BUFFER_SIZE);
918 		if (header == NULL)
919 			return NULL;
920 
921 		// release our reference to the old header -- it will will stay around
922 		// until the last reference to it is released
923 		release_data_header(buffer->allocation_header);
924 		buffer->allocation_header = header;
925 			// We keep the initial reference.
926 
927 		// now the allocation can only fail, if size is too big
928 		allocated = alloc_data_header_space(buffer->allocation_header, size);
929 	}
930 
931 	if (_header != NULL)
932 		*_header = buffer->allocation_header;
933 
934 	return allocated;
935 }
936 
937 
938 static data_node*
939 add_first_data_node(data_header* header)
940 {
941 	data_node* node = (data_node*)alloc_data_header_space(header,
942 		sizeof(data_node));
943 	if (node == NULL)
944 		return NULL;
945 
946 	TRACE(("%ld:   add first data node %p to header %p\n", find_thread(NULL),
947 		node, header));
948 
949 	acquire_data_header(header);
950 
951 	memset(node, 0, sizeof(struct data_node));
952 	node->located = header;
953 	node->header = header;
954 	node->offset = 0;
955 	node->start = header->data_end + header->space.free;
956 	node->used = 0;
957 	node->flags = 0;
958 
959 	return node;
960 }
961 
962 
963 static data_node*
964 add_data_node(net_buffer_private* buffer, data_header* header)
965 {
966 	data_header* located;
967 	data_node* node = (data_node*)alloc_data_header_space(buffer,
968 		sizeof(data_node), &located);
969 	if (node == NULL)
970 		return NULL;
971 
972 	TRACE(("%ld:   add data node %p to header %p\n", find_thread(NULL), node,
973 		header));
974 
975 	acquire_data_header(header);
976 	if (located != header)
977 		acquire_data_header(located);
978 
979 	memset(node, 0, sizeof(struct data_node));
980 	node->located = located;
981 	node->header = header;
982 	node->flags = 0;
983 	return node;
984 }
985 
986 
987 void
988 remove_data_node(data_node* node)
989 {
990 	data_header* located = node->located;
991 
992 	TRACE(("%ld:   remove data node %p from header %p (located %p)\n",
993 		find_thread(NULL), node, node->header, located));
994 
995 	// Move all used and tail space to the header space, which is useful in case
996 	// this is the first node of a buffer (i.e. the header is an allocation
997 	// header).
998 	node->FreeSpace();
999 
1000 	if (located != node->header)
1001 		release_data_header(node->header);
1002 
1003 	if (located == NULL)
1004 		return;
1005 
1006 	free_data_header_space(located, (uint8*)node, sizeof(data_node));
1007 
1008 	release_data_header(located);
1009 }
1010 
1011 
1012 static inline data_node*
1013 get_node_at_offset(net_buffer_private* buffer, size_t offset)
1014 {
1015 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1016 	while (node != NULL && node->offset + node->used <= offset)
1017 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1018 
1019 	return node;
1020 }
1021 
1022 
1023 /*!	Appends up to \a size bytes from the data of the \a from net_buffer to the
1024 	\a to net_buffer. The source buffer will remain unchanged.
1025 */
1026 static status_t
1027 append_data_from_buffer(net_buffer* to, const net_buffer* from, size_t size)
1028 {
1029 	net_buffer_private* source = (net_buffer_private*)from;
1030 	net_buffer_private* dest = (net_buffer_private*)to;
1031 
1032 	if (size > from->size)
1033 		return B_BAD_VALUE;
1034 	if (size == 0)
1035 		return B_OK;
1036 
1037 	data_node* nodeTo = get_node_at_offset(source, size);
1038 	if (nodeTo == NULL)
1039 		return B_BAD_VALUE;
1040 
1041 	data_node* node = (data_node*)list_get_first_item(&source->buffers);
1042 	if (node == NULL) {
1043 		CHECK_BUFFER(source);
1044 		return B_ERROR;
1045 	}
1046 
1047 	while (node != nodeTo) {
1048 		if (append_data(dest, node->start, node->used) < B_OK) {
1049 			CHECK_BUFFER(dest);
1050 			return B_ERROR;
1051 		}
1052 
1053 		node = (data_node*)list_get_next_item(&source->buffers, node);
1054 	}
1055 
1056 	int32 diff = node->offset + node->used - size;
1057 	if (append_data(dest, node->start, node->used - diff) < B_OK) {
1058 		CHECK_BUFFER(dest);
1059 		return B_ERROR;
1060 	}
1061 
1062 	CHECK_BUFFER(dest);
1063 
1064 	return B_OK;
1065 }
1066 
1067 
1068 static void
1069 copy_metadata(net_buffer* destination, const net_buffer* source)
1070 {
1071 	memcpy(destination->source, source->source,
1072 		min_c(source->source->sa_len, sizeof(sockaddr_storage)));
1073 	memcpy(destination->destination, source->destination,
1074 		min_c(source->destination->sa_len, sizeof(sockaddr_storage)));
1075 
1076 	destination->flags = source->flags;
1077 	destination->interface_address = source->interface_address;
1078 	destination->offset = source->offset;
1079 	destination->protocol = source->protocol;
1080 	destination->type = source->type;
1081 }
1082 
1083 
1084 //	#pragma mark - module API
1085 
1086 
1087 static net_buffer*
1088 create_buffer(size_t headerSpace)
1089 {
1090 	net_buffer_private* buffer = allocate_net_buffer();
1091 	if (buffer == NULL)
1092 		return NULL;
1093 
1094 	TRACE(("%ld: create buffer %p\n", find_thread(NULL), buffer));
1095 
1096 	// Make sure headerSpace is valid and at least the initial node fits.
1097 	headerSpace = _ALIGN(headerSpace);
1098 	if (headerSpace < DATA_NODE_SIZE)
1099 		headerSpace = DATA_NODE_SIZE;
1100 	else if (headerSpace > MAX_FREE_BUFFER_SIZE)
1101 		headerSpace = MAX_FREE_BUFFER_SIZE;
1102 
1103 	data_header* header = create_data_header(headerSpace);
1104 	if (header == NULL) {
1105 		free_net_buffer(buffer);
1106 		return NULL;
1107 	}
1108 	buffer->allocation_header = header;
1109 
1110 	data_node* node = add_first_data_node(header);
1111 
1112 	list_init(&buffer->buffers);
1113 	list_add_item(&buffer->buffers, node);
1114 
1115 	buffer->ancillary_data = NULL;
1116 	buffer->stored_header_length = 0;
1117 
1118 	buffer->source = (sockaddr*)&buffer->storage.source;
1119 	buffer->destination = (sockaddr*)&buffer->storage.destination;
1120 
1121 	buffer->storage.source.ss_len = 0;
1122 	buffer->storage.destination.ss_len = 0;
1123 
1124 	buffer->interface_address = NULL;
1125 	buffer->offset = 0;
1126 	buffer->flags = 0;
1127 	buffer->size = 0;
1128 
1129 	CHECK_BUFFER(buffer);
1130 	CREATE_PARANOIA_CHECK_SET(buffer, "net_buffer");
1131 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1132 		sizeof(buffer->size));
1133 
1134 	T(Create(headerSpace, buffer));
1135 
1136 	return buffer;
1137 }
1138 
1139 
1140 static void
1141 free_buffer(net_buffer* _buffer)
1142 {
1143 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1144 
1145 	TRACE(("%ld: free buffer %p\n", find_thread(NULL), buffer));
1146 	T(Free(buffer));
1147 
1148 	CHECK_BUFFER(buffer);
1149 	DELETE_PARANOIA_CHECK_SET(buffer);
1150 
1151 	while (data_node* node
1152 			= (data_node*)list_remove_head_item(&buffer->buffers)) {
1153 		remove_data_node(node);
1154 	}
1155 
1156 	delete_ancillary_data_container(buffer->ancillary_data);
1157 
1158 	release_data_header(buffer->allocation_header);
1159 
1160 	free_net_buffer(buffer);
1161 }
1162 
1163 
1164 /*!	Creates a duplicate of the \a buffer. The new buffer does not share internal
1165 	storage; they are completely independent from each other.
1166 */
1167 static net_buffer*
1168 duplicate_buffer(net_buffer* _buffer)
1169 {
1170 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1171 
1172 	ParanoiaChecker _(buffer);
1173 
1174 	TRACE(("%ld: duplicate_buffer(buffer %p)\n", find_thread(NULL), buffer));
1175 
1176 	// TODO: We might want to choose a better header space. The minimal
1177 	// one doesn't allow to prepend any data without allocating a new header.
1178 	// The same holds for appending cloned data.
1179 	net_buffer* duplicate = create_buffer(DATA_NODE_SIZE);
1180 	if (duplicate == NULL)
1181 		return NULL;
1182 
1183 	TRACE(("%ld:   duplicate: %p)\n", find_thread(NULL), duplicate));
1184 
1185 	// copy the data from the source buffer
1186 
1187 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1188 	while (node != NULL) {
1189 		if (append_data(duplicate, node->start, node->used) < B_OK) {
1190 			free_buffer(duplicate);
1191 			CHECK_BUFFER(buffer);
1192 			return NULL;
1193 		}
1194 
1195 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1196 	}
1197 
1198 	copy_metadata(duplicate, buffer);
1199 
1200 	ASSERT(duplicate->size == buffer->size);
1201 	CHECK_BUFFER(buffer);
1202 	CHECK_BUFFER(duplicate);
1203 	RUN_PARANOIA_CHECKS(duplicate);
1204 
1205 	T(Duplicate(buffer, duplicate));
1206 
1207 	return duplicate;
1208 }
1209 
1210 
1211 /*!	Clones the buffer by grabbing another reference to the underlying data.
1212 	If that data changes, it will be changed in the clone as well.
1213 
1214 	If \a shareFreeSpace is \c true, the cloned buffer may claim the free
1215 	space in the original buffer as the original buffer can still do. If you
1216 	are using this, it's your responsibility that only one of the buffers
1217 	will do this.
1218 */
1219 static net_buffer*
1220 clone_buffer(net_buffer* _buffer, bool shareFreeSpace)
1221 {
1222 	// TODO: See, if the commented out code can be fixed in a safe way. We could
1223 	// probably place cloned nodes on a header not belonging to our buffer, if
1224 	// we don't free the header space for the node when removing it. Otherwise we
1225 	// mess with the header's free list which might at the same time be accessed
1226 	// by another thread.
1227 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1228 
1229 	net_buffer* clone = create_buffer(MAX_FREE_BUFFER_SIZE);
1230 	if (clone == NULL)
1231 		return NULL;
1232 
1233 	if (append_cloned_data(clone, buffer, 0, buffer->size) != B_OK) {
1234 		free_buffer(clone);
1235 		return NULL;
1236 	}
1237 
1238 	copy_metadata(clone, buffer);
1239 	ASSERT(clone->size == buffer->size);
1240 
1241 	return clone;
1242 
1243 #if 0
1244 	ParanoiaChecker _(buffer);
1245 
1246 	TRACE(("%ld: clone_buffer(buffer %p)\n", find_thread(NULL), buffer));
1247 
1248 	net_buffer_private* clone = allocate_net_buffer();
1249 	if (clone == NULL)
1250 		return NULL;
1251 
1252 	TRACE(("%ld:   clone: %p\n", find_thread(NULL), buffer));
1253 
1254 	data_node* sourceNode = (data_node*)list_get_first_item(&buffer->buffers);
1255 	if (sourceNode == NULL) {
1256 		free_net_buffer(clone);
1257 		return NULL;
1258 	}
1259 
1260 	clone->source = (sockaddr*)&clone->storage.source;
1261 	clone->destination = (sockaddr*)&clone->storage.destination;
1262 
1263 	list_init(&clone->buffers);
1264 
1265 	// grab reference to this buffer - all additional nodes will get
1266 	// theirs in add_data_node()
1267 	acquire_data_header(sourceNode->header);
1268 	data_node* node = &clone->first_node;
1269 	node->header = sourceNode->header;
1270 	node->located = NULL;
1271 	node->used_header_space = &node->own_header_space;
1272 
1273 	while (sourceNode != NULL) {
1274 		node->start = sourceNode->start;
1275 		node->used = sourceNode->used;
1276 		node->offset = sourceNode->offset;
1277 
1278 		if (shareFreeSpace) {
1279 			// both buffers could claim the free space - note that this option
1280 			// has to be used carefully
1281 			node->used_header_space = &sourceNode->header->space;
1282 			node->tail_space = sourceNode->tail_space;
1283 		} else {
1284 			// the free space stays with the original buffer
1285 			node->used_header_space->size = 0;
1286 			node->used_header_space->free = 0;
1287 			node->tail_space = 0;
1288 		}
1289 
1290 		// add node to clone's list of buffers
1291 		list_add_item(&clone->buffers, node);
1292 
1293 		sourceNode = (data_node*)list_get_next_item(&buffer->buffers,
1294 			sourceNode);
1295 		if (sourceNode == NULL)
1296 			break;
1297 
1298 		node = add_data_node(sourceNode->header);
1299 		if (node == NULL) {
1300 			// There was not enough space left for another node in this buffer
1301 			// TODO: handle this case!
1302 			panic("clone buffer hits size limit... (fix me)");
1303 			free_net_buffer(clone);
1304 			return NULL;
1305 		}
1306 	}
1307 
1308 	copy_metadata(clone, buffer);
1309 
1310 	ASSERT(clone->size == buffer->size);
1311 	CREATE_PARANOIA_CHECK_SET(clone, "net_buffer");
1312 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, clone, &clone->size,
1313 		sizeof(clone->size));
1314 	CHECK_BUFFER(buffer);
1315 	CHECK_BUFFER(clone);
1316 
1317 	T(Clone(buffer, shareFreeSpace, clone));
1318 
1319 	return clone;
1320 #endif
1321 }
1322 
1323 
1324 /*!	Split the buffer at offset, the header data
1325 	is returned as new buffer.
1326 */
1327 static net_buffer*
1328 split_buffer(net_buffer* from, uint32 offset)
1329 {
1330 	net_buffer* buffer = create_buffer(DATA_NODE_SIZE);
1331 	if (buffer == NULL)
1332 		return NULL;
1333 
1334 	copy_metadata(buffer, from);
1335 
1336 	ParanoiaChecker _(from);
1337 	ParanoiaChecker _2(buffer);
1338 
1339 	TRACE(("%ld: split_buffer(buffer %p -> %p, offset %ld)\n",
1340 		find_thread(NULL), from, buffer, offset));
1341 
1342 	if (append_data_from_buffer(buffer, from, offset) == B_OK) {
1343 		if (remove_header(from, offset) == B_OK) {
1344 			CHECK_BUFFER(from);
1345 			CHECK_BUFFER(buffer);
1346 			T(Split(from, offset, buffer));
1347 			return buffer;
1348 		}
1349 	}
1350 
1351 	free_buffer(buffer);
1352 	CHECK_BUFFER(from);
1353 	return NULL;
1354 }
1355 
1356 
1357 /*!	Merges the second buffer with the first. If \a after is \c true, the
1358 	second buffer's contents will be appended to the first ones, else they
1359 	will be prepended.
1360 	The second buffer will be freed if this function succeeds.
1361 */
1362 static status_t
1363 merge_buffer(net_buffer* _buffer, net_buffer* _with, bool after)
1364 {
1365 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1366 	net_buffer_private* with = (net_buffer_private*)_with;
1367 	if (with == NULL)
1368 		return B_BAD_VALUE;
1369 
1370 	TRACE(("%ld: merge buffer %p with %p (%s)\n", find_thread(NULL), buffer,
1371 		with, after ? "after" : "before"));
1372 	T(Merge(buffer, with, after));
1373 	//dump_buffer(buffer);
1374 	//dprintf("with:\n");
1375 	//dump_buffer(with);
1376 
1377 	ParanoiaChecker _(buffer);
1378 	CHECK_BUFFER(buffer);
1379 	CHECK_BUFFER(with);
1380 
1381 	// TODO: this is currently very simplistic, I really need to finish the
1382 	//	harder part of this implementation (data_node management per header)
1383 
1384 	data_node* before = NULL;
1385 
1386 	// TODO: Do allocating nodes (the only part that can fail) upfront. Put them
1387 	// in a list, so we can easily clean up, if necessary.
1388 
1389 	if (!after) {
1390 		// change offset of all nodes already in the buffer
1391 		data_node* node = NULL;
1392 		while (true) {
1393 			node = (data_node*)list_get_next_item(&buffer->buffers, node);
1394 			if (node == NULL)
1395 				break;
1396 
1397 			node->offset += with->size;
1398 			if (before == NULL)
1399 				before = node;
1400 		}
1401 	}
1402 
1403 	data_node* last = NULL;
1404 
1405 	while (true) {
1406 		data_node* node = (data_node*)list_get_next_item(&with->buffers, last);
1407 		if (node == NULL)
1408 			break;
1409 
1410 		if ((uint8*)node > (uint8*)node->header
1411 			&& (uint8*)node < (uint8*)node->header + BUFFER_SIZE) {
1412 			// The node is already in the buffer, we can just move it
1413 			// over to the new owner
1414 			list_remove_item(&with->buffers, node);
1415 			with->size -= node->used;
1416 		} else {
1417 			// we need a new place for this node
1418 			data_node* newNode = add_data_node(buffer, node->header);
1419 			if (newNode == NULL) {
1420 				// TODO: try to revert buffers to their initial state!!
1421 				return ENOBUFS;
1422 			}
1423 
1424 			last = node;
1425 			*newNode = *node;
1426 			node = newNode;
1427 				// the old node will get freed with its buffer
1428 		}
1429 
1430 		if (after) {
1431 			list_add_item(&buffer->buffers, node);
1432 			node->offset = buffer->size;
1433 		} else
1434 			list_insert_item_before(&buffer->buffers, before, node);
1435 
1436 		buffer->size += node->used;
1437 	}
1438 
1439 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1440 		sizeof(buffer->size));
1441 
1442 	// the data has been merged completely at this point
1443 	free_buffer(with);
1444 
1445 	//dprintf(" merge result:\n");
1446 	//dump_buffer(buffer);
1447 	CHECK_BUFFER(buffer);
1448 
1449 	return B_OK;
1450 }
1451 
1452 
1453 /*!	Writes into existing allocated memory.
1454 	\return B_BAD_VALUE if you write outside of the buffers current
1455 		bounds.
1456 */
1457 static status_t
1458 write_data(net_buffer* _buffer, size_t offset, const void* data, size_t size)
1459 {
1460 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1461 
1462 	T(Write(buffer, offset, data, size));
1463 
1464 	ParanoiaChecker _(buffer);
1465 
1466 	if (offset + size > buffer->size)
1467 		return B_BAD_VALUE;
1468 	if (size == 0)
1469 		return B_OK;
1470 
1471 	// find first node to write into
1472 	data_node* node = get_node_at_offset(buffer, offset);
1473 	if (node == NULL)
1474 		return B_BAD_VALUE;
1475 
1476 	offset -= node->offset;
1477 
1478 	while (true) {
1479 		size_t written = min_c(size, node->used - offset);
1480 		if (IS_USER_ADDRESS(data)) {
1481 			if (user_memcpy(node->start + offset, data, written) != B_OK)
1482 				return B_BAD_ADDRESS;
1483 		} else
1484 			memcpy(node->start + offset, data, written);
1485 
1486 		size -= written;
1487 		if (size == 0)
1488 			break;
1489 
1490 		offset = 0;
1491 		data = (void*)((uint8*)data + written);
1492 
1493 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1494 		if (node == NULL)
1495 			return B_BAD_VALUE;
1496 	}
1497 
1498 	CHECK_BUFFER(buffer);
1499 
1500 	return B_OK;
1501 }
1502 
1503 
1504 static status_t
1505 read_data(net_buffer* _buffer, size_t offset, void* data, size_t size)
1506 {
1507 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1508 
1509 	T(Read(buffer, offset, data, size));
1510 
1511 	ParanoiaChecker _(buffer);
1512 
1513 	if (offset + size > buffer->size)
1514 		return B_BAD_VALUE;
1515 	if (size == 0)
1516 		return B_OK;
1517 
1518 	// find first node to read from
1519 	data_node* node = get_node_at_offset(buffer, offset);
1520 	if (node == NULL)
1521 		return B_BAD_VALUE;
1522 
1523 	offset -= node->offset;
1524 
1525 	while (true) {
1526 		size_t bytesRead = min_c(size, node->used - offset);
1527 		if (IS_USER_ADDRESS(data)) {
1528 			if (user_memcpy(data, node->start + offset, bytesRead) != B_OK)
1529 				return B_BAD_ADDRESS;
1530 		} else
1531 			memcpy(data, node->start + offset, bytesRead);
1532 
1533 		size -= bytesRead;
1534 		if (size == 0)
1535 			break;
1536 
1537 		offset = 0;
1538 		data = (void*)((uint8*)data + bytesRead);
1539 
1540 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1541 		if (node == NULL)
1542 			return B_BAD_VALUE;
1543 	}
1544 
1545 	CHECK_BUFFER(buffer);
1546 
1547 	return B_OK;
1548 }
1549 
1550 
1551 static status_t
1552 prepend_size(net_buffer* _buffer, size_t size, void** _contiguousBuffer)
1553 {
1554 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1555 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1556 	if (node == NULL) {
1557 		node = add_first_data_node(buffer->allocation_header);
1558 		if (node == NULL)
1559 			return B_NO_MEMORY;
1560 	}
1561 
1562 	T(PrependSize(buffer, size));
1563 
1564 	ParanoiaChecker _(buffer);
1565 
1566 	TRACE(("%ld: prepend_size(buffer %p, size %ld) [has %u]\n",
1567 		find_thread(NULL), buffer, size, node->HeaderSpace()));
1568 	//dump_buffer(buffer);
1569 
1570 	if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
1571 		// throw any stored headers away
1572 		node->AddHeaderSpace(buffer->stored_header_length);
1573 		node->flags &= ~DATA_NODE_STORED_HEADER;
1574 		buffer->stored_header_length = 0;
1575 	}
1576 
1577 	if (node->HeaderSpace() < size) {
1578 		// we need to prepend new buffers
1579 
1580 		size_t bytesLeft = size;
1581 		size_t sizePrepended = 0;
1582 		do {
1583 			if (node->HeaderSpace() == 0) {
1584 				size_t headerSpace = MAX_FREE_BUFFER_SIZE;
1585 				data_header* header = create_data_header(headerSpace);
1586 				if (header == NULL) {
1587 					remove_header(buffer, sizePrepended);
1588 					return B_NO_MEMORY;
1589 				}
1590 
1591 				data_node* previous = node;
1592 
1593 				node = (data_node*)add_first_data_node(header);
1594 
1595 				list_insert_item_before(&buffer->buffers, previous, node);
1596 
1597 				// Release the initial reference to the header, so that it will
1598 				// be deleted when the node is removed.
1599 				release_data_header(header);
1600 			}
1601 
1602 			size_t willConsume = min_c(bytesLeft, node->HeaderSpace());
1603 
1604 			node->SubtractHeaderSpace(willConsume);
1605 			node->start -= willConsume;
1606 			node->used += willConsume;
1607 			bytesLeft -= willConsume;
1608 			sizePrepended += willConsume;
1609 		} while (bytesLeft > 0);
1610 
1611 		// correct data offset in all nodes
1612 
1613 		size_t offset = 0;
1614 		node = NULL;
1615 		while ((node = (data_node*)list_get_next_item(&buffer->buffers,
1616 				node)) != NULL) {
1617 			node->offset = offset;
1618 			offset += node->used;
1619 		}
1620 
1621 		if (_contiguousBuffer)
1622 			*_contiguousBuffer = NULL;
1623 	} else {
1624 		// the data fits into this buffer
1625 		node->SubtractHeaderSpace(size);
1626 		node->start -= size;
1627 		node->used += size;
1628 
1629 		if (_contiguousBuffer)
1630 			*_contiguousBuffer = node->start;
1631 
1632 		// adjust offset of following nodes
1633 		while ((node = (data_node*)list_get_next_item(&buffer->buffers, node))
1634 				!= NULL) {
1635 			node->offset += size;
1636 		}
1637 	}
1638 
1639 	buffer->size += size;
1640 
1641 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1642 		sizeof(buffer->size));
1643 
1644 	//dprintf(" prepend_size result:\n");
1645 	//dump_buffer(buffer);
1646 	CHECK_BUFFER(buffer);
1647 	return B_OK;
1648 }
1649 
1650 
1651 static status_t
1652 prepend_data(net_buffer* buffer, const void* data, size_t size)
1653 {
1654 	void* contiguousBuffer;
1655 	status_t status = prepend_size(buffer, size, &contiguousBuffer);
1656 	if (status < B_OK)
1657 		return status;
1658 
1659 	if (contiguousBuffer) {
1660 		if (IS_USER_ADDRESS(data)) {
1661 			if (user_memcpy(contiguousBuffer, data, size) != B_OK)
1662 				return B_BAD_ADDRESS;
1663 		} else
1664 			memcpy(contiguousBuffer, data, size);
1665 	} else
1666 		write_data(buffer, 0, data, size);
1667 
1668 	//dprintf(" prepend result:\n");
1669 	//dump_buffer(buffer);
1670 
1671 	return B_OK;
1672 }
1673 
1674 
1675 static status_t
1676 append_size(net_buffer* _buffer, size_t size, void** _contiguousBuffer)
1677 {
1678 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1679 	data_node* node = (data_node*)list_get_last_item(&buffer->buffers);
1680 	if (node == NULL) {
1681 		node = add_first_data_node(buffer->allocation_header);
1682 		if (node == NULL)
1683 			return B_NO_MEMORY;
1684 	}
1685 
1686 	T(AppendSize(buffer, size));
1687 
1688 	ParanoiaChecker _(buffer);
1689 
1690 	TRACE(("%ld: append_size(buffer %p, size %ld)\n", find_thread(NULL),
1691 		buffer, size));
1692 	//dump_buffer(buffer);
1693 
1694 	if (node->TailSpace() < size) {
1695 		// we need to append at least one new buffer
1696 		uint32 previousTailSpace = node->TailSpace();
1697 		uint32 headerSpace = DATA_NODE_SIZE;
1698 		uint32 sizeUsed = MAX_FREE_BUFFER_SIZE - headerSpace;
1699 
1700 		// allocate space left in the node
1701 		node->SetTailSpace(0);
1702 		node->used += previousTailSpace;
1703 		buffer->size += previousTailSpace;
1704 		uint32 sizeAdded = previousTailSpace;
1705 		SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1706 			sizeof(buffer->size));
1707 
1708 		// allocate all buffers
1709 
1710 		while (sizeAdded < size) {
1711 			if (sizeAdded + sizeUsed > size) {
1712 				// last data_header and not all available space is used
1713 				sizeUsed = size - sizeAdded;
1714 			}
1715 
1716 			data_header* header = create_data_header(headerSpace);
1717 			if (header == NULL) {
1718 				remove_trailer(buffer, sizeAdded);
1719 				return B_NO_MEMORY;
1720 			}
1721 
1722 			node = add_first_data_node(header);
1723 			if (node == NULL) {
1724 				release_data_header(header);
1725 				return B_NO_MEMORY;
1726 			}
1727 
1728 			node->SetTailSpace(node->TailSpace() - sizeUsed);
1729 			node->used = sizeUsed;
1730 			node->offset = buffer->size;
1731 
1732 			buffer->size += sizeUsed;
1733 			sizeAdded += sizeUsed;
1734 			SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1735 				sizeof(buffer->size));
1736 
1737 			list_add_item(&buffer->buffers, node);
1738 
1739 			// Release the initial reference to the header, so that it will
1740 			// be deleted when the node is removed.
1741 			release_data_header(header);
1742 		}
1743 
1744 		if (_contiguousBuffer)
1745 			*_contiguousBuffer = NULL;
1746 
1747 		//dprintf(" append result 1:\n");
1748 		//dump_buffer(buffer);
1749 		CHECK_BUFFER(buffer);
1750 
1751 		return B_OK;
1752 	}
1753 
1754 	// the data fits into this buffer
1755 	node->SetTailSpace(node->TailSpace() - size);
1756 
1757 	if (_contiguousBuffer)
1758 		*_contiguousBuffer = node->start + node->used;
1759 
1760 	node->used += size;
1761 	buffer->size += size;
1762 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1763 		sizeof(buffer->size));
1764 
1765 	//dprintf(" append result 2:\n");
1766 	//dump_buffer(buffer);
1767 	CHECK_BUFFER(buffer);
1768 
1769 	return B_OK;
1770 }
1771 
1772 
1773 static status_t
1774 append_data(net_buffer* buffer, const void* data, size_t size)
1775 {
1776 	size_t used = buffer->size;
1777 
1778 	void* contiguousBuffer;
1779 	status_t status = append_size(buffer, size, &contiguousBuffer);
1780 	if (status < B_OK)
1781 		return status;
1782 
1783 	if (contiguousBuffer) {
1784 		if (IS_USER_ADDRESS(data)) {
1785 			if (user_memcpy(contiguousBuffer, data, size) != B_OK)
1786 				return B_BAD_ADDRESS;
1787 		} else
1788 			memcpy(contiguousBuffer, data, size);
1789 	} else
1790 		write_data(buffer, used, data, size);
1791 
1792 	return B_OK;
1793 }
1794 
1795 
1796 /*!	Removes bytes from the beginning of the buffer.
1797 */
1798 static status_t
1799 remove_header(net_buffer* _buffer, size_t bytes)
1800 {
1801 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1802 
1803 	T(RemoveHeader(buffer, bytes));
1804 
1805 	ParanoiaChecker _(buffer);
1806 
1807 	if (bytes > buffer->size)
1808 		return B_BAD_VALUE;
1809 
1810 	TRACE(("%ld: remove_header(buffer %p, %ld bytes)\n", find_thread(NULL),
1811 		buffer, bytes));
1812 	//dump_buffer(buffer);
1813 
1814 	size_t left = bytes;
1815 	data_node* node = NULL;
1816 
1817 	while (left >= 0) {
1818 		node = (data_node*)list_get_first_item(&buffer->buffers);
1819 		if (node == NULL) {
1820 			if (left == 0)
1821 				break;
1822 			CHECK_BUFFER(buffer);
1823 			return B_ERROR;
1824 		}
1825 
1826 		if (node->used > left)
1827 			break;
1828 
1829 		// node will be removed completely
1830 		list_remove_item(&buffer->buffers, node);
1831 		left -= node->used;
1832 		remove_data_node(node);
1833 		node = NULL;
1834 		buffer->stored_header_length = 0;
1835 	}
1836 
1837 	// cut remaining node, if any
1838 
1839 	if (node != NULL) {
1840 		size_t cut = min_c(node->used, left);
1841 		node->offset = 0;
1842 		node->start += cut;
1843 		if ((node->flags & DATA_NODE_STORED_HEADER) != 0)
1844 			buffer->stored_header_length += cut;
1845 		else
1846 			node->AddHeaderSpace(cut);
1847 		node->used -= cut;
1848 
1849 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1850 	}
1851 
1852 	// adjust offset of following nodes
1853 	while (node != NULL) {
1854 		node->offset -= bytes;
1855 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1856 	}
1857 
1858 	buffer->size -= bytes;
1859 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1860 		sizeof(buffer->size));
1861 
1862 	//dprintf(" remove result:\n");
1863 	//dump_buffer(buffer);
1864 	CHECK_BUFFER(buffer);
1865 
1866 	return B_OK;
1867 }
1868 
1869 
1870 /*!	Removes bytes from the end of the buffer.
1871 */
1872 static status_t
1873 remove_trailer(net_buffer* buffer, size_t bytes)
1874 {
1875 	return trim_data(buffer, buffer->size - bytes);
1876 }
1877 
1878 
1879 /*!	Trims the buffer to the specified \a newSize by removing space from
1880 	the end of the buffer.
1881 */
1882 static status_t
1883 trim_data(net_buffer* _buffer, size_t newSize)
1884 {
1885 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1886 	TRACE(("%ld: trim_data(buffer %p, newSize = %ld, buffer size = %ld)\n",
1887 		find_thread(NULL), buffer, newSize, buffer->size));
1888 	T(Trim(buffer, newSize));
1889 	//dump_buffer(buffer);
1890 
1891 	ParanoiaChecker _(buffer);
1892 
1893 	if (newSize > buffer->size)
1894 		return B_BAD_VALUE;
1895 	if (newSize == buffer->size)
1896 		return B_OK;
1897 
1898 	data_node* node = get_node_at_offset(buffer, newSize);
1899 	if (node == NULL) {
1900 		// trim size greater than buffer size
1901 		return B_BAD_VALUE;
1902 	}
1903 
1904 	int32 diff = node->used + node->offset - newSize;
1905 	node->SetTailSpace(node->TailSpace() + diff);
1906 	node->used -= diff;
1907 
1908 	if (node->used > 0)
1909 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1910 
1911 	while (node != NULL) {
1912 		data_node* next = (data_node*)list_get_next_item(&buffer->buffers, node);
1913 		list_remove_item(&buffer->buffers, node);
1914 		remove_data_node(node);
1915 
1916 		node = next;
1917 	}
1918 
1919 	buffer->size = newSize;
1920 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1921 		sizeof(buffer->size));
1922 
1923 	//dprintf(" trim result:\n");
1924 	//dump_buffer(buffer);
1925 	CHECK_BUFFER(buffer);
1926 
1927 	return B_OK;
1928 }
1929 
1930 
1931 /*!	Appends data coming from buffer \a source to the buffer \a buffer. It only
1932 	clones the data, though, that is the data is not copied, just referenced.
1933 */
1934 static status_t
1935 append_cloned_data(net_buffer* _buffer, net_buffer* _source, uint32 offset,
1936 	size_t bytes)
1937 {
1938 	if (bytes == 0)
1939 		return B_OK;
1940 
1941 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1942 	net_buffer_private* source = (net_buffer_private*)_source;
1943 	TRACE(("%ld: append_cloned_data(buffer %p, source %p, offset = %ld, "
1944 		"bytes = %ld)\n", find_thread(NULL), buffer, source, offset, bytes));
1945 	T(AppendCloned(buffer, source, offset, bytes));
1946 
1947 	ParanoiaChecker _(buffer);
1948 	ParanoiaChecker _2(source);
1949 
1950 	if (source->size < offset + bytes || source->size < offset)
1951 		return B_BAD_VALUE;
1952 
1953 	// find data_node to start with from the source buffer
1954 	data_node* node = get_node_at_offset(source, offset);
1955 	if (node == NULL) {
1956 		// trim size greater than buffer size
1957 		return B_BAD_VALUE;
1958 	}
1959 
1960 	size_t sizeAppended = 0;
1961 
1962 	while (node != NULL && bytes > 0) {
1963 		data_node* clone = add_data_node(buffer, node->header);
1964 		if (clone == NULL) {
1965 			remove_trailer(buffer, sizeAppended);
1966 			return ENOBUFS;
1967 		}
1968 
1969 		if (offset)
1970 			offset -= node->offset;
1971 
1972 		clone->offset = buffer->size;
1973 		clone->start = node->start + offset;
1974 		clone->used = min_c(bytes, node->used - offset);
1975 		clone->flags |= DATA_NODE_READ_ONLY;
1976 
1977 		list_add_item(&buffer->buffers, clone);
1978 
1979 		offset = 0;
1980 		bytes -= clone->used;
1981 		buffer->size += clone->used;
1982 		sizeAppended += clone->used;
1983 		node = (data_node*)list_get_next_item(&source->buffers, node);
1984 	}
1985 
1986 	if (bytes != 0)
1987 		panic("add_cloned_data() failed, bytes != 0!\n");
1988 
1989 	//dprintf(" append cloned result:\n");
1990 	//dump_buffer(buffer);
1991 	CHECK_BUFFER(source);
1992 	CHECK_BUFFER(buffer);
1993 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1994 		sizeof(buffer->size));
1995 
1996 	return B_OK;
1997 }
1998 
1999 
2000 void
2001 set_ancillary_data(net_buffer* buffer, ancillary_data_container* container)
2002 {
2003 	((net_buffer_private*)buffer)->ancillary_data = container;
2004 }
2005 
2006 
2007 ancillary_data_container*
2008 get_ancillary_data(net_buffer* buffer)
2009 {
2010 	return ((net_buffer_private*)buffer)->ancillary_data;
2011 }
2012 
2013 
2014 /*!	Moves all ancillary data from buffer \c from to the end of the list of
2015 	ancillary data of buffer \c to. Note, that this is the only function that
2016 	transfers or copies ancillary data from one buffer to another.
2017 
2018 	\param from The buffer from which to remove the ancillary data.
2019 	\param to The buffer to which to add the ancillary data.
2020 	\return A pointer to the first of the moved ancillary data, if any, \c NULL
2021 		otherwise.
2022 */
2023 static void*
2024 transfer_ancillary_data(net_buffer* _from, net_buffer* _to)
2025 {
2026 	net_buffer_private* from = (net_buffer_private*)_from;
2027 	net_buffer_private* to = (net_buffer_private*)_to;
2028 
2029 	if (from == NULL || to == NULL)
2030 		return NULL;
2031 
2032 	if (from->ancillary_data == NULL)
2033 		return NULL;
2034 
2035 	if (to->ancillary_data == NULL) {
2036 		// no ancillary data in the target buffer
2037 		to->ancillary_data = from->ancillary_data;
2038 		from->ancillary_data = NULL;
2039 		return next_ancillary_data(to->ancillary_data, NULL, NULL);
2040 	}
2041 
2042 	// both have ancillary data
2043 	void* data = move_ancillary_data(from->ancillary_data,
2044 		to->ancillary_data);
2045 	delete_ancillary_data_container(from->ancillary_data);
2046 	from->ancillary_data = NULL;
2047 
2048 	return data;
2049 }
2050 
2051 
2052 /*!	Stores the current header position; even if the header is removed with
2053 	remove_header(), you can still reclaim it later using restore_header(),
2054 	unless you prepended different data (in which case restoring will fail).
2055 */
2056 status_t
2057 store_header(net_buffer* _buffer)
2058 {
2059 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2060 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2061 	if (node == NULL)
2062 		return B_ERROR;
2063 
2064 	if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
2065 		// Someone else already stored the header - since we cannot
2066 		// differentiate between them, we throw away everything
2067 		node->AddHeaderSpace(buffer->stored_header_length);
2068 		node->flags &= ~DATA_NODE_STORED_HEADER;
2069 		buffer->stored_header_length = 0;
2070 
2071 		return B_ERROR;
2072 	}
2073 
2074 	buffer->stored_header_length = 0;
2075 	node->flags |= DATA_NODE_STORED_HEADER;
2076 
2077 	return B_OK;
2078 }
2079 
2080 
2081 ssize_t
2082 stored_header_length(net_buffer* _buffer)
2083 {
2084 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2085 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2086 	if (node == NULL || (node->flags & DATA_NODE_STORED_HEADER) == 0)
2087 		return B_BAD_VALUE;
2088 
2089 	return buffer->stored_header_length;
2090 }
2091 
2092 
2093 /*!	Reads from the complete buffer with an eventually stored header.
2094 	This function does not care whether or not there is a stored header at
2095 	all - you have to use the stored_header_length() function to find out.
2096 */
2097 status_t
2098 restore_header(net_buffer* _buffer, uint32 offset, void* data, size_t bytes)
2099 {
2100 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2101 
2102 	if (offset < buffer->stored_header_length) {
2103 		data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2104 		if (node == NULL
2105 			|| offset + bytes > buffer->stored_header_length + buffer->size)
2106 			return B_BAD_VALUE;
2107 
2108 		// We have the data, so copy it out
2109 
2110 		size_t copied = std::min(bytes, buffer->stored_header_length - offset);
2111 		memcpy(data, node->start + offset - buffer->stored_header_length,
2112 			copied);
2113 
2114 		if (copied == bytes)
2115 			return B_OK;
2116 
2117 		data = (uint8*)data + copied;
2118 		bytes -= copied;
2119 		offset = 0;
2120 	} else
2121 		offset -= buffer->stored_header_length;
2122 
2123 	return read_data(_buffer, offset, data, bytes);
2124 }
2125 
2126 
2127 /*!	Copies from the complete \a source buffer with an eventually stored header
2128 	to the specified target \a buffer.
2129 	This function does not care whether or not there is a stored header at
2130 	all - you have to use the stored_header_length() function to find out.
2131 */
2132 status_t
2133 append_restored_header(net_buffer* buffer, net_buffer* _source, uint32 offset,
2134 	size_t bytes)
2135 {
2136 	net_buffer_private* source = (net_buffer_private*)_source;
2137 
2138 	if (offset < source->stored_header_length) {
2139 		data_node* node = (data_node*)list_get_first_item(&source->buffers);
2140 		if (node == NULL
2141 			|| offset + bytes > source->stored_header_length + source->size)
2142 			return B_BAD_VALUE;
2143 
2144 		// We have the data, so copy it out
2145 
2146 		size_t appended = std::min(bytes, source->stored_header_length - offset);
2147 		status_t status = append_data(buffer,
2148 			node->start + offset - source->stored_header_length, appended);
2149 		if (status != B_OK)
2150 			return status;
2151 
2152 		if (appended == bytes)
2153 			return B_OK;
2154 
2155 		bytes -= appended;
2156 		offset = 0;
2157 	} else
2158 		offset -= source->stored_header_length;
2159 
2160 	return append_cloned_data(buffer, source, offset, bytes);
2161 }
2162 
2163 
2164 /*!	Tries to directly access the requested space in the buffer.
2165 	If the space is contiguous, the function will succeed and place a pointer
2166 	to that space into \a _contiguousBuffer.
2167 
2168 	\return B_BAD_VALUE if the offset is outside of the buffer's bounds.
2169 	\return B_ERROR in case the buffer is not contiguous at that location.
2170 */
2171 static status_t
2172 direct_access(net_buffer* _buffer, uint32 offset, size_t size,
2173 	void** _contiguousBuffer)
2174 {
2175 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2176 
2177 	ParanoiaChecker _(buffer);
2178 
2179 	//TRACE(("direct_access(buffer %p, offset %ld, size %ld)\n", buffer, offset,
2180 	//	size));
2181 
2182 	if (offset + size > buffer->size)
2183 		return B_BAD_VALUE;
2184 
2185 	// find node to access
2186 	data_node* node = get_node_at_offset(buffer, offset);
2187 	if (node == NULL)
2188 		return B_BAD_VALUE;
2189 
2190 	offset -= node->offset;
2191 
2192 	if (size > node->used - offset)
2193 		return B_ERROR;
2194 
2195 	*_contiguousBuffer = node->start + offset;
2196 	return B_OK;
2197 }
2198 
2199 
2200 static int32
2201 checksum_data(net_buffer* _buffer, uint32 offset, size_t size, bool finalize)
2202 {
2203 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2204 
2205 	if (offset + size > buffer->size || size == 0)
2206 		return B_BAD_VALUE;
2207 
2208 	// find first node to read from
2209 	data_node* node = get_node_at_offset(buffer, offset);
2210 	if (node == NULL)
2211 		return B_ERROR;
2212 
2213 	offset -= node->offset;
2214 
2215 	// Since the maximum buffer size is 65536 bytes, it's impossible
2216 	// to overlap 32 bit - we don't need to handle this overlap in
2217 	// the loop, we can safely do it afterwards
2218 	uint32 sum = 0;
2219 
2220 	while (true) {
2221 		size_t bytes = min_c(size, node->used - offset);
2222 		if ((offset + node->offset) & 1) {
2223 			// if we're at an uneven offset, we have to swap the checksum
2224 			sum += __swap_int16(compute_checksum(node->start + offset, bytes));
2225 		} else
2226 			sum += compute_checksum(node->start + offset, bytes);
2227 
2228 		size -= bytes;
2229 		if (size == 0)
2230 			break;
2231 
2232 		offset = 0;
2233 
2234 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
2235 		if (node == NULL)
2236 			return B_ERROR;
2237 	}
2238 
2239 	while (sum >> 16) {
2240 		sum = (sum & 0xffff) + (sum >> 16);
2241 	}
2242 
2243 	if (!finalize)
2244 		return (uint16)sum;
2245 
2246 	return (uint16)~sum;
2247 }
2248 
2249 
2250 static uint32
2251 get_iovecs(net_buffer* _buffer, struct iovec* iovecs, uint32 vecCount)
2252 {
2253 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2254 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2255 	uint32 count = 0;
2256 
2257 	while (node != NULL && count < vecCount) {
2258 		if (node->used > 0) {
2259 			iovecs[count].iov_base = node->start;
2260 			iovecs[count].iov_len = node->used;
2261 			count++;
2262 		}
2263 
2264 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
2265 	}
2266 
2267 	return count;
2268 }
2269 
2270 
2271 static uint32
2272 count_iovecs(net_buffer* _buffer)
2273 {
2274 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2275 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2276 	uint32 count = 0;
2277 
2278 	while (node != NULL) {
2279 		if (node->used > 0)
2280 			count++;
2281 
2282 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
2283 	}
2284 
2285 	return count;
2286 }
2287 
2288 
2289 static void
2290 swap_addresses(net_buffer* buffer)
2291 {
2292 	std::swap(buffer->source, buffer->destination);
2293 }
2294 
2295 
2296 static status_t
2297 std_ops(int32 op, ...)
2298 {
2299 	switch (op) {
2300 		case B_MODULE_INIT:
2301 			// TODO: improve our code a bit so we can add constructors
2302 			//	and keep around half-constructed buffers in the slab
2303 
2304 			sNetBufferCache = create_object_cache("net buffer cache",
2305 				sizeof(net_buffer_private), 8, NULL, NULL, NULL);
2306 			if (sNetBufferCache == NULL)
2307 				return B_NO_MEMORY;
2308 
2309 			sDataNodeCache = create_object_cache("data node cache", BUFFER_SIZE,
2310 				0, NULL, NULL, NULL);
2311 			if (sDataNodeCache == NULL) {
2312 				delete_object_cache(sNetBufferCache);
2313 				return B_NO_MEMORY;
2314 			}
2315 
2316 #if ENABLE_STATS
2317 			add_debugger_command_etc("net_buffer_stats", &dump_net_buffer_stats,
2318 				"Print net buffer statistics",
2319 				"\nPrint net buffer statistics.\n", 0);
2320 #endif
2321 #if ENABLE_DEBUGGER_COMMANDS
2322 			add_debugger_command_etc("net_buffer", &dump_net_buffer,
2323 				"Dump net buffer",
2324 				"\nDump the net buffer's internal structures.\n", 0);
2325 #endif
2326 			return B_OK;
2327 
2328 		case B_MODULE_UNINIT:
2329 #if ENABLE_STATS
2330 			remove_debugger_command("net_buffer_stats", &dump_net_buffer_stats);
2331 #endif
2332 #if ENABLE_DEBUGGER_COMMANDS
2333 			remove_debugger_command("net_buffer", &dump_net_buffer);
2334 #endif
2335 			delete_object_cache(sNetBufferCache);
2336 			delete_object_cache(sDataNodeCache);
2337 			return B_OK;
2338 
2339 		default:
2340 			return B_ERROR;
2341 	}
2342 }
2343 
2344 
2345 net_buffer_module_info gNetBufferModule = {
2346 	{
2347 		NET_BUFFER_MODULE_NAME,
2348 		0,
2349 		std_ops
2350 	},
2351 	create_buffer,
2352 	free_buffer,
2353 
2354 	duplicate_buffer,
2355 	clone_buffer,
2356 	split_buffer,
2357 	merge_buffer,
2358 
2359 	prepend_size,
2360 	prepend_data,
2361 	append_size,
2362 	append_data,
2363 	NULL,	// insert
2364 	NULL,	// remove
2365 	remove_header,
2366 	remove_trailer,
2367 	trim_data,
2368 	append_cloned_data,
2369 
2370 	NULL,	// associate_data
2371 
2372 	set_ancillary_data,
2373 	get_ancillary_data,
2374 	transfer_ancillary_data,
2375 
2376 	store_header,
2377 	stored_header_length,
2378 	restore_header,
2379 	append_restored_header,
2380 
2381 	direct_access,
2382 	read_data,
2383 	write_data,
2384 
2385 	checksum_data,
2386 
2387 	NULL,	// get_memory_map
2388 	get_iovecs,
2389 	count_iovecs,
2390 
2391 	swap_addresses,
2392 
2393 	dump_buffer,	// dump
2394 };
2395 
2396