xref: /haiku/src/add-ons/kernel/network/stack/net_buffer.cpp (revision ed24eb5ff12640d052171c6a7feba37fab8a75d1)
1 /*
2  * Copyright 2006-2016, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  *		Ingo Weinhold, ingo_weinhold@gmx.de
8  */
9 
10 
11 #include "utility.h"
12 
13 #include <net_buffer.h>
14 #include <slab/Slab.h>
15 #include <tracing.h>
16 #include <util/list.h>
17 
18 #include <ByteOrder.h>
19 #include <debug.h>
20 #include <kernel.h>
21 #include <KernelExport.h>
22 #include <util/DoublyLinkedList.h>
23 
24 #include <algorithm>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/param.h>
28 #include <sys/uio.h>
29 
30 #include "ancillary_data.h"
31 #include "interfaces.h"
32 
33 #include "paranoia_config.h"
34 
35 
36 //#define TRACE_BUFFER
37 #ifdef TRACE_BUFFER
38 #	define TRACE(x) dprintf x
39 #else
40 #	define TRACE(x) ;
41 #endif
42 
43 #define BUFFER_SIZE 2048
44 	// maximum implementation derived buffer size is 65536
45 
46 #define ENABLE_DEBUGGER_COMMANDS	1
47 #define ENABLE_STATS				1
48 #define PARANOID_BUFFER_CHECK		NET_BUFFER_PARANOIA
49 
50 #define COMPONENT_PARANOIA_LEVEL	NET_BUFFER_PARANOIA
51 #include <debug_paranoia.h>
52 
53 #define DATA_NODE_READ_ONLY		0x1
54 #define DATA_NODE_STORED_HEADER	0x2
55 
56 struct header_space {
57 	uint16	size;
58 	uint16	free;
59 };
60 
61 struct free_data {
62 	struct free_data* next;
63 	uint16			size;
64 };
65 
66 struct data_header {
67 	int32			ref_count;
68 	addr_t			physical_address;
69 	free_data*		first_free;
70 	uint8*			data_end;
71 	header_space	space;
72 	uint16			tail_space;
73 };
74 
75 struct data_node {
76 	struct list_link link;
77 	struct data_header* header;
78 	struct data_header* located;
79 	size_t			offset;		// the net_buffer-wide offset of this node
80 	uint8*			start;		// points to the start of the data
81 	uint16			flags;
82 	uint16			used;		// defines how much memory is used by this node
83 
84 	uint16 HeaderSpace() const
85 	{
86 		if ((flags & DATA_NODE_READ_ONLY) != 0)
87 			return 0;
88 		return header->space.free;
89 	}
90 
91 	void AddHeaderSpace(uint16 toAdd)
92 	{
93 		if ((flags & DATA_NODE_READ_ONLY) == 0) {
94 			header->space.size += toAdd;
95 			header->space.free += toAdd;
96 		}
97 	}
98 
99 	void SubtractHeaderSpace(uint16 toSubtract)
100 	{
101 		if ((flags & DATA_NODE_READ_ONLY) == 0) {
102 			header->space.size -= toSubtract;
103 			header->space.free -= toSubtract;
104 		}
105 	}
106 
107 	uint16 TailSpace() const
108 	{
109 		if ((flags & DATA_NODE_READ_ONLY) != 0)
110 			return 0;
111 		return header->tail_space;
112 	}
113 
114 	void SetTailSpace(uint16 space)
115 	{
116 		if ((flags & DATA_NODE_READ_ONLY) == 0)
117 			header->tail_space = space;
118 	}
119 
120 	void FreeSpace()
121 	{
122 		if ((flags & DATA_NODE_READ_ONLY) == 0) {
123 			uint16 space = used + header->tail_space;
124 			header->space.size += space;
125 			header->space.free += space;
126 			header->tail_space = 0;
127 		}
128 	}
129 };
130 
131 
132 // TODO: we should think about moving the address fields into the buffer
133 // data itself via associated data or something like this. Or this
134 // structure as a whole, too...
135 struct net_buffer_private : net_buffer {
136 	struct list					buffers;
137 	data_header*				allocation_header;
138 		// the current place where we allocate header space (nodes, ...)
139 	ancillary_data_container*	ancillary_data;
140 	size_t						stored_header_length;
141 
142 	struct {
143 		struct sockaddr_storage	source;
144 		struct sockaddr_storage	destination;
145 	} storage;
146 };
147 
148 
149 #define DATA_HEADER_SIZE				_ALIGN(sizeof(data_header))
150 #define DATA_NODE_SIZE					_ALIGN(sizeof(data_node))
151 #define MAX_FREE_BUFFER_SIZE			(BUFFER_SIZE - DATA_HEADER_SIZE)
152 
153 
154 static object_cache* sNetBufferCache;
155 static object_cache* sDataNodeCache;
156 
157 
158 static status_t append_data(net_buffer* buffer, const void* data, size_t size);
159 static status_t trim_data(net_buffer* _buffer, size_t newSize);
160 static status_t remove_header(net_buffer* _buffer, size_t bytes);
161 static status_t remove_trailer(net_buffer* _buffer, size_t bytes);
162 static status_t append_cloned_data(net_buffer* _buffer, net_buffer* _source,
163 					uint32 offset, size_t bytes);
164 static status_t read_data(net_buffer* _buffer, size_t offset, void* data,
165 					size_t size);
166 
167 
168 #if ENABLE_STATS
169 static int32 sAllocatedDataHeaderCount = 0;
170 static int32 sAllocatedNetBufferCount = 0;
171 static int32 sEverAllocatedDataHeaderCount = 0;
172 static int32 sEverAllocatedNetBufferCount = 0;
173 static int32 sMaxAllocatedDataHeaderCount = 0;
174 static int32 sMaxAllocatedNetBufferCount = 0;
175 #endif
176 
177 
178 #if NET_BUFFER_TRACING
179 
180 
181 namespace NetBufferTracing {
182 
183 
184 class NetBufferTraceEntry : public AbstractTraceEntry {
185 public:
186 	NetBufferTraceEntry(net_buffer* buffer)
187 		:
188 		fBuffer(buffer)
189 	{
190 #if NET_BUFFER_TRACING_STACK_TRACE
191 	fStackTrace = capture_tracing_stack_trace(
192 		NET_BUFFER_TRACING_STACK_TRACE, 0, false);
193 #endif
194 	}
195 
196 #if NET_BUFFER_TRACING_STACK_TRACE
197 	virtual void DumpStackTrace(TraceOutput& out)
198 	{
199 		out.PrintStackTrace(fStackTrace);
200 	}
201 #endif
202 
203 protected:
204 	net_buffer*	fBuffer;
205 #if NET_BUFFER_TRACING_STACK_TRACE
206 	tracing_stack_trace* fStackTrace;
207 #endif
208 };
209 
210 
211 class Create : public NetBufferTraceEntry {
212 public:
213 	Create(size_t headerSpace, net_buffer* buffer)
214 		:
215 		NetBufferTraceEntry(buffer),
216 		fHeaderSpace(headerSpace)
217 	{
218 		Initialized();
219 	}
220 
221 	virtual void AddDump(TraceOutput& out)
222 	{
223 		out.Print("net buffer create: header space: %lu -> buffer: %p",
224 			fHeaderSpace, fBuffer);
225 	}
226 
227 private:
228 	size_t		fHeaderSpace;
229 };
230 
231 
232 class Free : public NetBufferTraceEntry {
233 public:
234 	Free(net_buffer* buffer)
235 		:
236 		NetBufferTraceEntry(buffer)
237 	{
238 		Initialized();
239 	}
240 
241 	virtual void AddDump(TraceOutput& out)
242 	{
243 		out.Print("net buffer free: buffer: %p", fBuffer);
244 	}
245 };
246 
247 
248 class Duplicate : public NetBufferTraceEntry {
249 public:
250 	Duplicate(net_buffer* buffer, net_buffer* clone)
251 		:
252 		NetBufferTraceEntry(buffer),
253 		fClone(clone)
254 	{
255 		Initialized();
256 	}
257 
258 	virtual void AddDump(TraceOutput& out)
259 	{
260 		out.Print("net buffer dup: buffer: %p -> %p", fBuffer, fClone);
261 	}
262 
263 private:
264 	net_buffer*		fClone;
265 };
266 
267 
268 class Clone : public NetBufferTraceEntry {
269 public:
270 	Clone(net_buffer* buffer, bool shareFreeSpace, net_buffer* clone)
271 		:
272 		NetBufferTraceEntry(buffer),
273 		fClone(clone),
274 		fShareFreeSpace(shareFreeSpace)
275 	{
276 		Initialized();
277 	}
278 
279 	virtual void AddDump(TraceOutput& out)
280 	{
281 		out.Print("net buffer clone: buffer: %p, share free space: %s "
282 			"-> %p", fBuffer, fShareFreeSpace ? "true" : "false", fClone);
283 	}
284 
285 private:
286 	net_buffer*		fClone;
287 	bool			fShareFreeSpace;
288 };
289 
290 
291 class Split : public NetBufferTraceEntry {
292 public:
293 	Split(net_buffer* buffer, uint32 offset, net_buffer* newBuffer)
294 		:
295 		NetBufferTraceEntry(buffer),
296 		fNewBuffer(newBuffer),
297 		fOffset(offset)
298 	{
299 		Initialized();
300 	}
301 
302 	virtual void AddDump(TraceOutput& out)
303 	{
304 		out.Print("net buffer split: buffer: %p, offset: %lu "
305 			"-> %p", fBuffer, fOffset, fNewBuffer);
306 	}
307 
308 private:
309 	net_buffer*		fNewBuffer;
310 	uint32			fOffset;
311 };
312 
313 
314 class Merge : public NetBufferTraceEntry {
315 public:
316 	Merge(net_buffer* buffer, net_buffer* otherBuffer, bool after)
317 		:
318 		NetBufferTraceEntry(buffer),
319 		fOtherBuffer(otherBuffer),
320 		fAfter(after)
321 	{
322 		Initialized();
323 	}
324 
325 	virtual void AddDump(TraceOutput& out)
326 	{
327 		out.Print("net buffer merge: buffers: %p + %p, after: %s "
328 			"-> %p", fBuffer, fOtherBuffer, fAfter ? "true" : "false",
329 			fOtherBuffer);
330 	}
331 
332 private:
333 	net_buffer*		fOtherBuffer;
334 	bool			fAfter;
335 };
336 
337 
338 class AppendCloned : public NetBufferTraceEntry {
339 public:
340 	AppendCloned(net_buffer* buffer, net_buffer* source, uint32 offset,
341 		size_t size)
342 		:
343 		NetBufferTraceEntry(buffer),
344 		fSource(source),
345 		fOffset(offset),
346 		fSize(size)
347 	{
348 		Initialized();
349 	}
350 
351 	virtual void AddDump(TraceOutput& out)
352 	{
353 		out.Print("net buffer append cloned: buffer: %p, from: %p, "
354 			"offset: %lu, size: %lu", fBuffer, fSource, fOffset, fSize);
355 	}
356 
357 private:
358 	net_buffer*		fSource;
359 	uint32			fOffset;
360 	size_t			fSize;
361 };
362 
363 
364 class PrependSize : public NetBufferTraceEntry {
365 public:
366 	PrependSize(net_buffer* buffer, size_t size)
367 		:
368 		NetBufferTraceEntry(buffer),
369 		fSize(size)
370 	{
371 		Initialized();
372 	}
373 
374 	virtual void AddDump(TraceOutput& out)
375 	{
376 		out.Print("net buffer prepend size: buffer: %p, size: %lu", fBuffer,
377 			fSize);
378 	}
379 
380 private:
381 	size_t			fSize;
382 };
383 
384 
385 class AppendSize : public NetBufferTraceEntry {
386 public:
387 	AppendSize(net_buffer* buffer, size_t size)
388 		:
389 		NetBufferTraceEntry(buffer),
390 		fSize(size)
391 	{
392 		Initialized();
393 	}
394 
395 	virtual void AddDump(TraceOutput& out)
396 	{
397 		out.Print("net buffer append size: buffer: %p, size: %lu", fBuffer,
398 			fSize);
399 	}
400 
401 private:
402 	size_t			fSize;
403 };
404 
405 
406 class RemoveHeader : public NetBufferTraceEntry {
407 public:
408 	RemoveHeader(net_buffer* buffer, size_t size)
409 		:
410 		NetBufferTraceEntry(buffer),
411 		fSize(size)
412 	{
413 		Initialized();
414 	}
415 
416 	virtual void AddDump(TraceOutput& out)
417 	{
418 		out.Print("net buffer remove header: buffer: %p, size: %lu",
419 			fBuffer, fSize);
420 	}
421 
422 private:
423 	size_t			fSize;
424 };
425 
426 
427 class Trim : public NetBufferTraceEntry {
428 public:
429 	Trim(net_buffer* buffer, size_t size)
430 		:
431 		NetBufferTraceEntry(buffer),
432 		fSize(size)
433 	{
434 		Initialized();
435 	}
436 
437 	virtual void AddDump(TraceOutput& out)
438 	{
439 		out.Print("net buffer trim: buffer: %p, size: %lu",
440 			fBuffer, fSize);
441 	}
442 
443 private:
444 	size_t			fSize;
445 };
446 
447 
448 class Read : public NetBufferTraceEntry {
449 public:
450 	Read(net_buffer* buffer, uint32 offset, void* data, size_t size)
451 		:
452 		NetBufferTraceEntry(buffer),
453 		fData(data),
454 		fOffset(offset),
455 		fSize(size)
456 	{
457 		Initialized();
458 	}
459 
460 	virtual void AddDump(TraceOutput& out)
461 	{
462 		out.Print("net buffer read: buffer: %p, offset: %lu, size: %lu, "
463 			"data: %p", fBuffer, fOffset, fSize, fData);
464 	}
465 
466 private:
467 	void*			fData;
468 	uint32			fOffset;
469 	size_t			fSize;
470 };
471 
472 
473 class Write : public NetBufferTraceEntry {
474 public:
475 	Write(net_buffer* buffer, uint32 offset, const void* data, size_t size)
476 		:
477 		NetBufferTraceEntry(buffer),
478 		fData(data),
479 		fOffset(offset),
480 		fSize(size)
481 	{
482 		Initialized();
483 	}
484 
485 	virtual void AddDump(TraceOutput& out)
486 	{
487 		out.Print("net buffer write: buffer: %p, offset: %lu, size: %lu, "
488 			"data: %p", fBuffer, fOffset, fSize, fData);
489 	}
490 
491 private:
492 	const void*		fData;
493 	uint32			fOffset;
494 	size_t			fSize;
495 };
496 
497 
498 #if NET_BUFFER_TRACING >= 2
499 
500 class DataHeaderTraceEntry : public AbstractTraceEntry {
501 public:
502 	DataHeaderTraceEntry(data_header* header)
503 		:
504 		fHeader(header)
505 	{
506 	}
507 
508 protected:
509 	data_header*	fHeader;
510 };
511 
512 
513 class CreateDataHeader : public DataHeaderTraceEntry {
514 public:
515 	CreateDataHeader(data_header* header)
516 		:
517 		DataHeaderTraceEntry(header)
518 	{
519 		Initialized();
520 	}
521 
522 	virtual void AddDump(TraceOutput& out)
523 	{
524 		out.Print("net buffer data header create:  header: %p", fHeader);
525 	}
526 };
527 
528 
529 class AcquireDataHeader : public DataHeaderTraceEntry {
530 public:
531 	AcquireDataHeader(data_header* header, int32 refCount)
532 		:
533 		DataHeaderTraceEntry(header),
534 		fRefCount(refCount)
535 	{
536 		Initialized();
537 	}
538 
539 	virtual void AddDump(TraceOutput& out)
540 	{
541 		out.Print("net buffer data header acquire: header: %p "
542 			"-> ref count: %ld", fHeader, fRefCount);
543 	}
544 
545 private:
546 	int32			fRefCount;
547 };
548 
549 
550 class ReleaseDataHeader : public DataHeaderTraceEntry {
551 public:
552 	ReleaseDataHeader(data_header* header, int32 refCount)
553 		:
554 		DataHeaderTraceEntry(header),
555 		fRefCount(refCount)
556 	{
557 		Initialized();
558 	}
559 
560 	virtual void AddDump(TraceOutput& out)
561 	{
562 		out.Print("net buffer data header release: header: %p "
563 			"-> ref count: %ld", fHeader, fRefCount);
564 	}
565 
566 private:
567 	int32			fRefCount;
568 };
569 
570 #	define T2(x)	new(std::nothrow) NetBufferTracing::x
571 #else
572 #	define T2(x)
573 #endif	// NET_BUFFER_TRACING >= 2
574 
575 }	// namespace NetBufferTracing
576 
577 #	define T(x)	new(std::nothrow) NetBufferTracing::x
578 
579 #else
580 #	define T(x)
581 #	define T2(x)
582 #endif	// NET_BUFFER_TRACING
583 
584 
585 static void
586 dump_address(const char* prefix, sockaddr* address,
587 	net_interface_address* interfaceAddress)
588 {
589 	if (address == NULL || address->sa_len == 0)
590 		return;
591 
592 	if (interfaceAddress == NULL || interfaceAddress->domain == NULL) {
593 		dprintf("  %s: length %u, family %u\n", prefix, address->sa_len,
594 			address->sa_family);
595 
596 		dump_block((char*)address + 2, address->sa_len - 2, "    ");
597 	} else {
598 		char buffer[64];
599 		interfaceAddress->domain->address_module->print_address_buffer(address,
600 			buffer, sizeof(buffer), true);
601 
602 		dprintf("  %s: %s\n", prefix, buffer);
603 	}
604 }
605 
606 
607 static void
608 dump_buffer(net_buffer* _buffer)
609 {
610 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
611 
612 	dprintf("buffer %p, size %" B_PRIu32 ", flags %" B_PRIx32 ", stored header "
613 		"%" B_PRIuSIZE ", interface address %p\n", buffer, buffer->size,
614 		buffer->flags, buffer->stored_header_length, buffer->interface_address);
615 
616 	dump_address("source", buffer->source, buffer->interface_address);
617 	dump_address("destination", buffer->destination, buffer->interface_address);
618 
619 	data_node* node = NULL;
620 	while ((node = (data_node*)list_get_next_item(&buffer->buffers, node))
621 			!= NULL) {
622 		dprintf("  node %p, offset %lu, used %u, header %u, tail %u, "
623 			"header %p\n", node, node->offset, node->used, node->HeaderSpace(),
624 			node->TailSpace(), node->header);
625 
626 		if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
627 			dump_block((char*)node->start - buffer->stored_header_length,
628 				min_c(buffer->stored_header_length, 64), "  s ");
629 		}
630 		dump_block((char*)node->start, min_c(node->used, 64), "    ");
631 	}
632 }
633 
634 #if ENABLE_DEBUGGER_COMMANDS
635 
636 static int
637 dump_net_buffer(int argc, char** argv)
638 {
639 	if (argc != 2) {
640 		kprintf("usage: %s [address]\n", argv[0]);
641 		return 0;
642 	}
643 
644 	dump_buffer((net_buffer*)parse_expression(argv[1]));
645 	return 0;
646 }
647 
648 #endif	// ENABLE_DEBUGGER_COMMANDS
649 
650 #if ENABLE_STATS
651 
652 static int
653 dump_net_buffer_stats(int argc, char** argv)
654 {
655 	kprintf("allocated data headers: %7" B_PRId32 " / %7" B_PRId32 ", peak %7"
656 		B_PRId32 "\n", sAllocatedDataHeaderCount, sEverAllocatedDataHeaderCount,
657 		sMaxAllocatedDataHeaderCount);
658 	kprintf("allocated net buffers:  %7" B_PRId32 " / %7" B_PRId32 ", peak %7"
659 		B_PRId32 "\n", sAllocatedNetBufferCount, sEverAllocatedNetBufferCount,
660 		sMaxAllocatedNetBufferCount);
661 	return 0;
662 }
663 
664 #endif	// ENABLE_STATS
665 
666 #if PARANOID_BUFFER_CHECK
667 
668 static void
669 check_buffer(net_buffer* _buffer)
670 {
671 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
672 
673 	// sum up the size of all nodes
674 	size_t size = 0;
675 
676 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
677 	while (node != NULL) {
678 		if (node->offset != size) {
679 			panic("net_buffer %p: bad node %p offset (%lu vs. %lu)",
680 				buffer, node, node->offset, size);
681 			return;
682 		}
683 		size += node->used;
684 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
685 	}
686 
687 	if (size != buffer->size) {
688 		panic("net_buffer %p size != sum of its data node sizes (%lu vs. %lu)",
689 			buffer, buffer->size, size);
690 		return;
691 	}
692 }
693 
694 
695 #if 0
696 static void
697 check_buffer_contents(net_buffer* buffer, size_t offset, const void* data,
698 	size_t size)
699 {
700 	void* bufferData = malloc(size);
701 	if (bufferData == NULL)
702 		return;
703 
704 	if (read_data(buffer, offset, bufferData, size) == B_OK) {
705 		if (memcmp(bufferData, data, size) != 0) {
706 			int32 index = 0;
707 			while (((uint8*)data)[index] == ((uint8*)bufferData)[index])
708 				index++;
709 			panic("check_buffer_contents(): contents check failed at index "
710 				"%ld, buffer: %p, offset: %lu, size: %lu", index, buffer,
711 				offset, size);
712 		}
713 	} else {
714 		panic("failed to read from buffer %p, offset: %lu, size: %lu",
715 			buffer, offset, size);
716 	}
717 
718 	free(bufferData);
719 }
720 
721 
722 static void
723 check_buffer_contents(net_buffer* buffer, size_t offset, net_buffer* source,
724 	size_t sourceOffset, size_t size)
725 {
726 	void* bufferData = malloc(size);
727 	if (bufferData == NULL)
728 		return;
729 
730 	if (read_data(source, sourceOffset, bufferData, size) == B_OK) {
731 		check_buffer_contents(buffer, offset, bufferData, size);
732 	} else {
733 		panic("failed to read from source buffer %p, offset: %lu, size: %lu",
734 			source, sourceOffset, size);
735 	}
736 
737 	free(bufferData);
738 }
739 #endif
740 
741 
742 # 	define CHECK_BUFFER(buffer)	check_buffer(buffer)
743 #else
744 # 	define CHECK_BUFFER(buffer)	do {} while (false)
745 #endif	// !PARANOID_BUFFER_CHECK
746 
747 
748 static inline data_header*
749 allocate_data_header()
750 {
751 #if ENABLE_STATS
752 	int32 current = atomic_add(&sAllocatedDataHeaderCount, 1) + 1;
753 	int32 max = atomic_get(&sMaxAllocatedDataHeaderCount);
754 	if (current > max)
755 		atomic_test_and_set(&sMaxAllocatedDataHeaderCount, current, max);
756 
757 	atomic_add(&sEverAllocatedDataHeaderCount, 1);
758 #endif
759 	return (data_header*)object_cache_alloc(sDataNodeCache, 0);
760 }
761 
762 
763 static inline net_buffer_private*
764 allocate_net_buffer()
765 {
766 #if ENABLE_STATS
767 	int32 current = atomic_add(&sAllocatedNetBufferCount, 1) + 1;
768 	int32 max = atomic_get(&sMaxAllocatedNetBufferCount);
769 	if (current > max)
770 		atomic_test_and_set(&sMaxAllocatedNetBufferCount, current, max);
771 
772 	atomic_add(&sEverAllocatedNetBufferCount, 1);
773 #endif
774 	return (net_buffer_private*)object_cache_alloc(sNetBufferCache, 0);
775 }
776 
777 
778 static inline void
779 free_data_header(data_header* header)
780 {
781 #if ENABLE_STATS
782 	if (header != NULL)
783 		atomic_add(&sAllocatedDataHeaderCount, -1);
784 #endif
785 	object_cache_free(sDataNodeCache, header, 0);
786 }
787 
788 
789 static inline void
790 free_net_buffer(net_buffer_private* buffer)
791 {
792 #if ENABLE_STATS
793 	if (buffer != NULL)
794 		atomic_add(&sAllocatedNetBufferCount, -1);
795 #endif
796 	object_cache_free(sNetBufferCache, buffer, 0);
797 }
798 
799 
800 static data_header*
801 create_data_header(size_t headerSpace)
802 {
803 	data_header* header = allocate_data_header();
804 	if (header == NULL)
805 		return NULL;
806 
807 	header->ref_count = 1;
808 	header->physical_address = 0;
809 		// TODO: initialize this correctly
810 	header->space.size = headerSpace;
811 	header->space.free = headerSpace;
812 	header->data_end = (uint8*)header + DATA_HEADER_SIZE;
813 	header->tail_space = (uint8*)header + BUFFER_SIZE - header->data_end
814 		- headerSpace;
815 	header->first_free = NULL;
816 
817 	TRACE(("%d:   create new data header %p\n", find_thread(NULL), header));
818 	T2(CreateDataHeader(header));
819 	return header;
820 }
821 
822 
823 static void
824 release_data_header(data_header* header)
825 {
826 	int32 refCount = atomic_add(&header->ref_count, -1);
827 	T2(ReleaseDataHeader(header, refCount - 1));
828 	if (refCount != 1)
829 		return;
830 
831 	TRACE(("%d:   free header %p\n", find_thread(NULL), header));
832 	free_data_header(header);
833 }
834 
835 
836 inline void
837 acquire_data_header(data_header* header)
838 {
839 	int32 refCount = atomic_add(&header->ref_count, 1);
840 	(void)refCount;
841 	T2(AcquireDataHeader(header, refCount + 1));
842 }
843 
844 
845 static void
846 free_data_header_space(data_header* header, uint8* data, size_t size)
847 {
848 	if (size < sizeof(free_data))
849 		size = sizeof(free_data);
850 
851 	free_data* freeData = (free_data*)data;
852 	freeData->next = header->first_free;
853 	freeData->size = size;
854 
855 	header->first_free = freeData;
856 }
857 
858 
859 /*!	Tries to allocate \a size bytes from the free space in the header.
860 */
861 static uint8*
862 alloc_data_header_space(data_header* header, size_t size)
863 {
864 	if (size < sizeof(free_data))
865 		size = sizeof(free_data);
866 	size = _ALIGN(size);
867 
868 	if (header->first_free != NULL && header->first_free->size >= size) {
869 		// the first entry of the header space matches the allocation's needs
870 
871 		// TODO: If the free space is greater than what shall be allocated, we
872 		// leak the remainder of the space. We should only allocate multiples of
873 		// _ALIGN(sizeof(free_data)) and split free space in this case. It's not
874 		// that pressing, since the only thing allocated ATM are data_nodes, and
875 		// thus the free space entries will always have the right size.
876 		uint8* data = (uint8*)header->first_free;
877 		header->first_free = header->first_free->next;
878 		return data;
879 	}
880 
881 	if (header->space.free < size) {
882 		// there is no free space left, search free list
883 		free_data* freeData = header->first_free;
884 		free_data* last = NULL;
885 		while (freeData != NULL) {
886 			if (last != NULL && freeData->size >= size) {
887 				// take this one
888 				last->next = freeData->next;
889 				return (uint8*)freeData;
890 			}
891 
892 			last = freeData;
893 			freeData = freeData->next;
894 		}
895 
896 		return NULL;
897 	}
898 
899 	// allocate new space
900 
901 	uint8* data = header->data_end;
902 	header->data_end += size;
903 	header->space.free -= size;
904 
905 	return data;
906 }
907 
908 
909 static uint8*
910 alloc_data_header_space(net_buffer_private* buffer, size_t size,
911 	data_header** _header = NULL)
912 {
913 	// try to allocate in our current allocation header
914 	uint8* allocated = alloc_data_header_space(buffer->allocation_header, size);
915 	if (allocated == NULL) {
916 		// not enough header space left -- create a fresh buffer for headers
917 		data_header* header = create_data_header(MAX_FREE_BUFFER_SIZE);
918 		if (header == NULL)
919 			return NULL;
920 
921 		// release our reference to the old header -- it will will stay around
922 		// until the last reference to it is released
923 		release_data_header(buffer->allocation_header);
924 		buffer->allocation_header = header;
925 			// We keep the initial reference.
926 
927 		// now the allocation can only fail, if size is too big
928 		allocated = alloc_data_header_space(buffer->allocation_header, size);
929 	}
930 
931 	if (_header != NULL)
932 		*_header = buffer->allocation_header;
933 
934 	return allocated;
935 }
936 
937 
938 static data_node*
939 add_first_data_node(data_header* header)
940 {
941 	data_node* node = (data_node*)alloc_data_header_space(header,
942 		sizeof(data_node));
943 	if (node == NULL)
944 		return NULL;
945 
946 	TRACE(("%d:   add first data node %p to header %p\n", find_thread(NULL),
947 		node, header));
948 
949 	acquire_data_header(header);
950 
951 	memset(node, 0, sizeof(struct data_node));
952 	node->located = header;
953 	node->header = header;
954 	node->offset = 0;
955 	node->start = header->data_end + header->space.free;
956 	node->used = 0;
957 	node->flags = 0;
958 
959 	return node;
960 }
961 
962 
963 static data_node*
964 add_data_node(net_buffer_private* buffer, data_header* header)
965 {
966 	data_header* located;
967 	data_node* node = (data_node*)alloc_data_header_space(buffer,
968 		sizeof(data_node), &located);
969 	if (node == NULL)
970 		return NULL;
971 
972 	TRACE(("%d:   add data node %p to header %p\n", find_thread(NULL), node,
973 		header));
974 
975 	acquire_data_header(header);
976 	if (located != header)
977 		acquire_data_header(located);
978 
979 	memset(node, 0, sizeof(struct data_node));
980 	node->located = located;
981 	node->header = header;
982 	node->flags = 0;
983 	return node;
984 }
985 
986 
987 void
988 remove_data_node(data_node* node)
989 {
990 	data_header* located = node->located;
991 
992 	TRACE(("%d:   remove data node %p from header %p (located %p)\n",
993 		find_thread(NULL), node, node->header, located));
994 
995 	// Move all used and tail space to the header space, which is useful in case
996 	// this is the first node of a buffer (i.e. the header is an allocation
997 	// header).
998 	node->FreeSpace();
999 
1000 	if (located != node->header)
1001 		release_data_header(node->header);
1002 
1003 	if (located == NULL)
1004 		return;
1005 
1006 	free_data_header_space(located, (uint8*)node, sizeof(data_node));
1007 
1008 	release_data_header(located);
1009 }
1010 
1011 
1012 static inline data_node*
1013 get_node_at_offset(net_buffer_private* buffer, size_t offset)
1014 {
1015 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1016 	while (node != NULL && node->offset + node->used <= offset)
1017 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1018 
1019 	return node;
1020 }
1021 
1022 
1023 /*!	Appends up to \a size bytes from the data of the \a from net_buffer to the
1024 	\a to net_buffer. The source buffer will remain unchanged.
1025 */
1026 static status_t
1027 append_data_from_buffer(net_buffer* to, const net_buffer* from, size_t size)
1028 {
1029 	net_buffer_private* source = (net_buffer_private*)from;
1030 	net_buffer_private* dest = (net_buffer_private*)to;
1031 
1032 	if (size > from->size)
1033 		return B_BAD_VALUE;
1034 	if (size == 0)
1035 		return B_OK;
1036 
1037 	data_node* nodeTo = get_node_at_offset(source, size);
1038 	if (nodeTo == NULL)
1039 		return B_BAD_VALUE;
1040 
1041 	data_node* node = (data_node*)list_get_first_item(&source->buffers);
1042 	if (node == NULL) {
1043 		CHECK_BUFFER(source);
1044 		return B_ERROR;
1045 	}
1046 
1047 	while (node != nodeTo) {
1048 		if (append_data(dest, node->start, node->used) < B_OK) {
1049 			CHECK_BUFFER(dest);
1050 			return B_ERROR;
1051 		}
1052 
1053 		node = (data_node*)list_get_next_item(&source->buffers, node);
1054 	}
1055 
1056 	int32 diff = node->offset + node->used - size;
1057 	if (append_data(dest, node->start, node->used - diff) < B_OK) {
1058 		CHECK_BUFFER(dest);
1059 		return B_ERROR;
1060 	}
1061 
1062 	CHECK_BUFFER(dest);
1063 
1064 	return B_OK;
1065 }
1066 
1067 
1068 static void
1069 copy_metadata(net_buffer* destination, const net_buffer* source)
1070 {
1071 	memcpy(destination->source, source->source,
1072 		min_c(source->source->sa_len, sizeof(sockaddr_storage)));
1073 	memcpy(destination->destination, source->destination,
1074 		min_c(source->destination->sa_len, sizeof(sockaddr_storage)));
1075 
1076 	destination->flags = source->flags;
1077 	destination->interface_address = source->interface_address;
1078 	if (destination->interface_address != NULL)
1079 		((InterfaceAddress*)destination->interface_address)->AcquireReference();
1080 
1081 	destination->offset = source->offset;
1082 	destination->protocol = source->protocol;
1083 	destination->type = source->type;
1084 }
1085 
1086 
1087 //	#pragma mark - module API
1088 
1089 
1090 static net_buffer*
1091 create_buffer(size_t headerSpace)
1092 {
1093 	net_buffer_private* buffer = allocate_net_buffer();
1094 	if (buffer == NULL)
1095 		return NULL;
1096 
1097 	TRACE(("%d: create buffer %p\n", find_thread(NULL), buffer));
1098 
1099 	// Make sure headerSpace is valid and at least the initial node fits.
1100 	headerSpace = _ALIGN(headerSpace);
1101 	if (headerSpace < DATA_NODE_SIZE)
1102 		headerSpace = DATA_NODE_SIZE;
1103 	else if (headerSpace > MAX_FREE_BUFFER_SIZE)
1104 		headerSpace = MAX_FREE_BUFFER_SIZE;
1105 
1106 	data_header* header = create_data_header(headerSpace);
1107 	if (header == NULL) {
1108 		free_net_buffer(buffer);
1109 		return NULL;
1110 	}
1111 	buffer->allocation_header = header;
1112 
1113 	data_node* node = add_first_data_node(header);
1114 
1115 	list_init(&buffer->buffers);
1116 	list_add_item(&buffer->buffers, node);
1117 
1118 	buffer->ancillary_data = NULL;
1119 	buffer->stored_header_length = 0;
1120 
1121 	buffer->source = (sockaddr*)&buffer->storage.source;
1122 	buffer->destination = (sockaddr*)&buffer->storage.destination;
1123 
1124 	buffer->storage.source.ss_len = 0;
1125 	buffer->storage.destination.ss_len = 0;
1126 
1127 	buffer->interface_address = NULL;
1128 	buffer->offset = 0;
1129 	buffer->flags = 0;
1130 	buffer->size = 0;
1131 
1132 	CHECK_BUFFER(buffer);
1133 	CREATE_PARANOIA_CHECK_SET(buffer, "net_buffer");
1134 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1135 		sizeof(buffer->size));
1136 
1137 	T(Create(headerSpace, buffer));
1138 
1139 	return buffer;
1140 }
1141 
1142 
1143 static void
1144 free_buffer(net_buffer* _buffer)
1145 {
1146 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1147 
1148 	TRACE(("%d: free buffer %p\n", find_thread(NULL), buffer));
1149 	T(Free(buffer));
1150 
1151 	CHECK_BUFFER(buffer);
1152 	DELETE_PARANOIA_CHECK_SET(buffer);
1153 
1154 	while (data_node* node
1155 			= (data_node*)list_remove_head_item(&buffer->buffers)) {
1156 		remove_data_node(node);
1157 	}
1158 
1159 	delete_ancillary_data_container(buffer->ancillary_data);
1160 
1161 	release_data_header(buffer->allocation_header);
1162 
1163 	if (buffer->interface_address != NULL)
1164 		((InterfaceAddress*)buffer->interface_address)->ReleaseReference();
1165 
1166 	free_net_buffer(buffer);
1167 }
1168 
1169 
1170 /*!	Creates a duplicate of the \a buffer. The new buffer does not share internal
1171 	storage; they are completely independent from each other.
1172 */
1173 static net_buffer*
1174 duplicate_buffer(net_buffer* _buffer)
1175 {
1176 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1177 
1178 	ParanoiaChecker _(buffer);
1179 
1180 	TRACE(("%d: duplicate_buffer(buffer %p)\n", find_thread(NULL), buffer));
1181 
1182 	// TODO: We might want to choose a better header space. The minimal
1183 	// one doesn't allow to prepend any data without allocating a new header.
1184 	// The same holds for appending cloned data.
1185 	net_buffer* duplicate = create_buffer(DATA_NODE_SIZE);
1186 	if (duplicate == NULL)
1187 		return NULL;
1188 
1189 	TRACE(("%d:   duplicate: %p)\n", find_thread(NULL), duplicate));
1190 
1191 	// copy the data from the source buffer
1192 
1193 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1194 	while (node != NULL) {
1195 		if (append_data(duplicate, node->start, node->used) < B_OK) {
1196 			free_buffer(duplicate);
1197 			CHECK_BUFFER(buffer);
1198 			return NULL;
1199 		}
1200 
1201 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1202 	}
1203 
1204 	copy_metadata(duplicate, buffer);
1205 
1206 	ASSERT(duplicate->size == buffer->size);
1207 	CHECK_BUFFER(buffer);
1208 	CHECK_BUFFER(duplicate);
1209 	RUN_PARANOIA_CHECKS(duplicate);
1210 
1211 	T(Duplicate(buffer, duplicate));
1212 
1213 	return duplicate;
1214 }
1215 
1216 
1217 /*!	Clones the buffer by grabbing another reference to the underlying data.
1218 	If that data changes, it will be changed in the clone as well.
1219 
1220 	If \a shareFreeSpace is \c true, the cloned buffer may claim the free
1221 	space in the original buffer as the original buffer can still do. If you
1222 	are using this, it's your responsibility that only one of the buffers
1223 	will do this.
1224 */
1225 static net_buffer*
1226 clone_buffer(net_buffer* _buffer, bool shareFreeSpace)
1227 {
1228 	// TODO: See, if the commented out code can be fixed in a safe way. We could
1229 	// probably place cloned nodes on a header not belonging to our buffer, if
1230 	// we don't free the header space for the node when removing it. Otherwise we
1231 	// mess with the header's free list which might at the same time be accessed
1232 	// by another thread.
1233 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1234 
1235 	net_buffer* clone = create_buffer(MAX_FREE_BUFFER_SIZE);
1236 	if (clone == NULL)
1237 		return NULL;
1238 
1239 	if (append_cloned_data(clone, buffer, 0, buffer->size) != B_OK) {
1240 		free_buffer(clone);
1241 		return NULL;
1242 	}
1243 
1244 	copy_metadata(clone, buffer);
1245 	ASSERT(clone->size == buffer->size);
1246 
1247 	return clone;
1248 
1249 #if 0
1250 	ParanoiaChecker _(buffer);
1251 
1252 	TRACE(("%d: clone_buffer(buffer %p)\n", find_thread(NULL), buffer));
1253 
1254 	net_buffer_private* clone = allocate_net_buffer();
1255 	if (clone == NULL)
1256 		return NULL;
1257 
1258 	TRACE(("%d:   clone: %p\n", find_thread(NULL), buffer));
1259 
1260 	data_node* sourceNode = (data_node*)list_get_first_item(&buffer->buffers);
1261 	if (sourceNode == NULL) {
1262 		free_net_buffer(clone);
1263 		return NULL;
1264 	}
1265 
1266 	clone->source = (sockaddr*)&clone->storage.source;
1267 	clone->destination = (sockaddr*)&clone->storage.destination;
1268 
1269 	list_init(&clone->buffers);
1270 
1271 	// grab reference to this buffer - all additional nodes will get
1272 	// theirs in add_data_node()
1273 	acquire_data_header(sourceNode->header);
1274 	data_node* node = &clone->first_node;
1275 	node->header = sourceNode->header;
1276 	node->located = NULL;
1277 	node->used_header_space = &node->own_header_space;
1278 
1279 	while (sourceNode != NULL) {
1280 		node->start = sourceNode->start;
1281 		node->used = sourceNode->used;
1282 		node->offset = sourceNode->offset;
1283 
1284 		if (shareFreeSpace) {
1285 			// both buffers could claim the free space - note that this option
1286 			// has to be used carefully
1287 			node->used_header_space = &sourceNode->header->space;
1288 			node->tail_space = sourceNode->tail_space;
1289 		} else {
1290 			// the free space stays with the original buffer
1291 			node->used_header_space->size = 0;
1292 			node->used_header_space->free = 0;
1293 			node->tail_space = 0;
1294 		}
1295 
1296 		// add node to clone's list of buffers
1297 		list_add_item(&clone->buffers, node);
1298 
1299 		sourceNode = (data_node*)list_get_next_item(&buffer->buffers,
1300 			sourceNode);
1301 		if (sourceNode == NULL)
1302 			break;
1303 
1304 		node = add_data_node(sourceNode->header);
1305 		if (node == NULL) {
1306 			// There was not enough space left for another node in this buffer
1307 			// TODO: handle this case!
1308 			panic("clone buffer hits size limit... (fix me)");
1309 			free_net_buffer(clone);
1310 			return NULL;
1311 		}
1312 	}
1313 
1314 	copy_metadata(clone, buffer);
1315 
1316 	ASSERT(clone->size == buffer->size);
1317 	CREATE_PARANOIA_CHECK_SET(clone, "net_buffer");
1318 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, clone, &clone->size,
1319 		sizeof(clone->size));
1320 	CHECK_BUFFER(buffer);
1321 	CHECK_BUFFER(clone);
1322 
1323 	T(Clone(buffer, shareFreeSpace, clone));
1324 
1325 	return clone;
1326 #endif
1327 }
1328 
1329 
1330 /*!	Split the buffer at offset, the header data
1331 	is returned as new buffer.
1332 */
1333 static net_buffer*
1334 split_buffer(net_buffer* from, uint32 offset)
1335 {
1336 	net_buffer* buffer = create_buffer(DATA_NODE_SIZE);
1337 	if (buffer == NULL)
1338 		return NULL;
1339 
1340 	copy_metadata(buffer, from);
1341 
1342 	ParanoiaChecker _(from);
1343 	ParanoiaChecker _2(buffer);
1344 
1345 	TRACE(("%d: split_buffer(buffer %p -> %p, offset %" B_PRIu32 ")\n",
1346 		find_thread(NULL), from, buffer, offset));
1347 
1348 	if (append_data_from_buffer(buffer, from, offset) == B_OK) {
1349 		if (remove_header(from, offset) == B_OK) {
1350 			CHECK_BUFFER(from);
1351 			CHECK_BUFFER(buffer);
1352 			T(Split(from, offset, buffer));
1353 			return buffer;
1354 		}
1355 	}
1356 
1357 	free_buffer(buffer);
1358 	CHECK_BUFFER(from);
1359 	return NULL;
1360 }
1361 
1362 
1363 /*!	Merges the second buffer with the first. If \a after is \c true, the
1364 	second buffer's contents will be appended to the first ones, else they
1365 	will be prepended.
1366 	The second buffer will be freed if this function succeeds.
1367 */
1368 static status_t
1369 merge_buffer(net_buffer* _buffer, net_buffer* _with, bool after)
1370 {
1371 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1372 	net_buffer_private* with = (net_buffer_private*)_with;
1373 	if (with == NULL)
1374 		return B_BAD_VALUE;
1375 
1376 	TRACE(("%d: merge buffer %p with %p (%s)\n", find_thread(NULL), buffer,
1377 		with, after ? "after" : "before"));
1378 	T(Merge(buffer, with, after));
1379 	//dump_buffer(buffer);
1380 	//dprintf("with:\n");
1381 	//dump_buffer(with);
1382 
1383 	ParanoiaChecker _(buffer);
1384 	CHECK_BUFFER(buffer);
1385 	CHECK_BUFFER(with);
1386 
1387 	// TODO: this is currently very simplistic, I really need to finish the
1388 	//	harder part of this implementation (data_node management per header)
1389 
1390 	data_node* before = NULL;
1391 
1392 	// TODO: Do allocating nodes (the only part that can fail) upfront. Put them
1393 	// in a list, so we can easily clean up, if necessary.
1394 
1395 	if (!after) {
1396 		// change offset of all nodes already in the buffer
1397 		data_node* node = NULL;
1398 		while (true) {
1399 			node = (data_node*)list_get_next_item(&buffer->buffers, node);
1400 			if (node == NULL)
1401 				break;
1402 
1403 			node->offset += with->size;
1404 			if (before == NULL)
1405 				before = node;
1406 		}
1407 	}
1408 
1409 	data_node* last = NULL;
1410 
1411 	while (true) {
1412 		data_node* node = (data_node*)list_get_next_item(&with->buffers, last);
1413 		if (node == NULL)
1414 			break;
1415 
1416 		if ((uint8*)node > (uint8*)node->header
1417 			&& (uint8*)node < (uint8*)node->header + BUFFER_SIZE) {
1418 			// The node is already in the buffer, we can just move it
1419 			// over to the new owner
1420 			list_remove_item(&with->buffers, node);
1421 			with->size -= node->used;
1422 		} else {
1423 			// we need a new place for this node
1424 			data_node* newNode = add_data_node(buffer, node->header);
1425 			if (newNode == NULL) {
1426 				// TODO: try to revert buffers to their initial state!!
1427 				return ENOBUFS;
1428 			}
1429 
1430 			last = node;
1431 			*newNode = *node;
1432 			node = newNode;
1433 				// the old node will get freed with its buffer
1434 		}
1435 
1436 		if (after) {
1437 			list_add_item(&buffer->buffers, node);
1438 			node->offset = buffer->size;
1439 		} else
1440 			list_insert_item_before(&buffer->buffers, before, node);
1441 
1442 		buffer->size += node->used;
1443 	}
1444 
1445 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1446 		sizeof(buffer->size));
1447 
1448 	// the data has been merged completely at this point
1449 	free_buffer(with);
1450 
1451 	//dprintf(" merge result:\n");
1452 	//dump_buffer(buffer);
1453 	CHECK_BUFFER(buffer);
1454 
1455 	return B_OK;
1456 }
1457 
1458 
1459 /*!	Writes into existing allocated memory.
1460 	\return B_BAD_VALUE if you write outside of the buffers current
1461 		bounds.
1462 */
1463 static status_t
1464 write_data(net_buffer* _buffer, size_t offset, const void* data, size_t size)
1465 {
1466 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1467 
1468 	T(Write(buffer, offset, data, size));
1469 
1470 	ParanoiaChecker _(buffer);
1471 
1472 	if (offset + size > buffer->size)
1473 		return B_BAD_VALUE;
1474 	if (size == 0)
1475 		return B_OK;
1476 
1477 	// find first node to write into
1478 	data_node* node = get_node_at_offset(buffer, offset);
1479 	if (node == NULL)
1480 		return B_BAD_VALUE;
1481 
1482 	offset -= node->offset;
1483 
1484 	while (true) {
1485 		size_t written = min_c(size, node->used - offset);
1486 		if (IS_USER_ADDRESS(data)) {
1487 			if (user_memcpy(node->start + offset, data, written) != B_OK)
1488 				return B_BAD_ADDRESS;
1489 		} else
1490 			memcpy(node->start + offset, data, written);
1491 
1492 		size -= written;
1493 		if (size == 0)
1494 			break;
1495 
1496 		offset = 0;
1497 		data = (void*)((uint8*)data + written);
1498 
1499 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1500 		if (node == NULL)
1501 			return B_BAD_VALUE;
1502 	}
1503 
1504 	CHECK_BUFFER(buffer);
1505 
1506 	return B_OK;
1507 }
1508 
1509 
1510 static status_t
1511 read_data(net_buffer* _buffer, size_t offset, void* data, size_t size)
1512 {
1513 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1514 
1515 	T(Read(buffer, offset, data, size));
1516 
1517 	ParanoiaChecker _(buffer);
1518 
1519 	if (offset + size > buffer->size)
1520 		return B_BAD_VALUE;
1521 	if (size == 0)
1522 		return B_OK;
1523 
1524 	// find first node to read from
1525 	data_node* node = get_node_at_offset(buffer, offset);
1526 	if (node == NULL)
1527 		return B_BAD_VALUE;
1528 
1529 	offset -= node->offset;
1530 
1531 	while (true) {
1532 		size_t bytesRead = min_c(size, node->used - offset);
1533 		if (IS_USER_ADDRESS(data)) {
1534 			if (user_memcpy(data, node->start + offset, bytesRead) != B_OK)
1535 				return B_BAD_ADDRESS;
1536 		} else
1537 			memcpy(data, node->start + offset, bytesRead);
1538 
1539 		size -= bytesRead;
1540 		if (size == 0)
1541 			break;
1542 
1543 		offset = 0;
1544 		data = (void*)((uint8*)data + bytesRead);
1545 
1546 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1547 		if (node == NULL)
1548 			return B_BAD_VALUE;
1549 	}
1550 
1551 	CHECK_BUFFER(buffer);
1552 
1553 	return B_OK;
1554 }
1555 
1556 
1557 static status_t
1558 prepend_size(net_buffer* _buffer, size_t size, void** _contiguousBuffer)
1559 {
1560 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1561 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
1562 	if (node == NULL) {
1563 		node = add_first_data_node(buffer->allocation_header);
1564 		if (node == NULL)
1565 			return B_NO_MEMORY;
1566 	}
1567 
1568 	T(PrependSize(buffer, size));
1569 
1570 	ParanoiaChecker _(buffer);
1571 
1572 	TRACE(("%d: prepend_size(buffer %p, size %ld) [has %u]\n",
1573 		find_thread(NULL), buffer, size, node->HeaderSpace()));
1574 	//dump_buffer(buffer);
1575 
1576 	if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
1577 		// throw any stored headers away
1578 		node->AddHeaderSpace(buffer->stored_header_length);
1579 		node->flags &= ~DATA_NODE_STORED_HEADER;
1580 		buffer->stored_header_length = 0;
1581 	}
1582 
1583 	if (node->HeaderSpace() < size) {
1584 		// we need to prepend new buffers
1585 
1586 		size_t bytesLeft = size;
1587 		size_t sizePrepended = 0;
1588 		do {
1589 			if (node->HeaderSpace() == 0) {
1590 				size_t headerSpace = MAX_FREE_BUFFER_SIZE;
1591 				data_header* header = create_data_header(headerSpace);
1592 				if (header == NULL) {
1593 					remove_header(buffer, sizePrepended);
1594 					return B_NO_MEMORY;
1595 				}
1596 
1597 				data_node* previous = node;
1598 
1599 				node = (data_node*)add_first_data_node(header);
1600 
1601 				list_insert_item_before(&buffer->buffers, previous, node);
1602 
1603 				// Release the initial reference to the header, so that it will
1604 				// be deleted when the node is removed.
1605 				release_data_header(header);
1606 			}
1607 
1608 			size_t willConsume = min_c(bytesLeft, node->HeaderSpace());
1609 
1610 			node->SubtractHeaderSpace(willConsume);
1611 			node->start -= willConsume;
1612 			node->used += willConsume;
1613 			bytesLeft -= willConsume;
1614 			sizePrepended += willConsume;
1615 		} while (bytesLeft > 0);
1616 
1617 		// correct data offset in all nodes
1618 
1619 		size_t offset = 0;
1620 		node = NULL;
1621 		while ((node = (data_node*)list_get_next_item(&buffer->buffers,
1622 				node)) != NULL) {
1623 			node->offset = offset;
1624 			offset += node->used;
1625 		}
1626 
1627 		if (_contiguousBuffer)
1628 			*_contiguousBuffer = NULL;
1629 	} else {
1630 		// the data fits into this buffer
1631 		node->SubtractHeaderSpace(size);
1632 		node->start -= size;
1633 		node->used += size;
1634 
1635 		if (_contiguousBuffer)
1636 			*_contiguousBuffer = node->start;
1637 
1638 		// adjust offset of following nodes
1639 		while ((node = (data_node*)list_get_next_item(&buffer->buffers, node))
1640 				!= NULL) {
1641 			node->offset += size;
1642 		}
1643 	}
1644 
1645 	buffer->size += size;
1646 
1647 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1648 		sizeof(buffer->size));
1649 
1650 	//dprintf(" prepend_size result:\n");
1651 	//dump_buffer(buffer);
1652 	CHECK_BUFFER(buffer);
1653 	return B_OK;
1654 }
1655 
1656 
1657 static status_t
1658 prepend_data(net_buffer* buffer, const void* data, size_t size)
1659 {
1660 	void* contiguousBuffer;
1661 	status_t status = prepend_size(buffer, size, &contiguousBuffer);
1662 	if (status < B_OK)
1663 		return status;
1664 
1665 	if (contiguousBuffer) {
1666 		if (IS_USER_ADDRESS(data)) {
1667 			if (user_memcpy(contiguousBuffer, data, size) != B_OK)
1668 				return B_BAD_ADDRESS;
1669 		} else
1670 			memcpy(contiguousBuffer, data, size);
1671 	} else
1672 		write_data(buffer, 0, data, size);
1673 
1674 	//dprintf(" prepend result:\n");
1675 	//dump_buffer(buffer);
1676 
1677 	return B_OK;
1678 }
1679 
1680 
1681 static status_t
1682 append_size(net_buffer* _buffer, size_t size, void** _contiguousBuffer)
1683 {
1684 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1685 	data_node* node = (data_node*)list_get_last_item(&buffer->buffers);
1686 	if (node == NULL) {
1687 		node = add_first_data_node(buffer->allocation_header);
1688 		if (node == NULL)
1689 			return B_NO_MEMORY;
1690 	}
1691 
1692 	T(AppendSize(buffer, size));
1693 
1694 	ParanoiaChecker _(buffer);
1695 
1696 	TRACE(("%d: append_size(buffer %p, size %ld)\n", find_thread(NULL),
1697 		buffer, size));
1698 	//dump_buffer(buffer);
1699 
1700 	if (node->TailSpace() < size) {
1701 		// we need to append at least one new buffer
1702 		uint32 previousTailSpace = node->TailSpace();
1703 		uint32 headerSpace = DATA_NODE_SIZE;
1704 		uint32 sizeUsed = MAX_FREE_BUFFER_SIZE - headerSpace;
1705 
1706 		// allocate space left in the node
1707 		node->SetTailSpace(0);
1708 		node->used += previousTailSpace;
1709 		buffer->size += previousTailSpace;
1710 		uint32 sizeAdded = previousTailSpace;
1711 		SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1712 			sizeof(buffer->size));
1713 
1714 		// allocate all buffers
1715 
1716 		while (sizeAdded < size) {
1717 			if (sizeAdded + sizeUsed > size) {
1718 				// last data_header and not all available space is used
1719 				sizeUsed = size - sizeAdded;
1720 			}
1721 
1722 			data_header* header = create_data_header(headerSpace);
1723 			if (header == NULL) {
1724 				remove_trailer(buffer, sizeAdded);
1725 				return B_NO_MEMORY;
1726 			}
1727 
1728 			node = add_first_data_node(header);
1729 			if (node == NULL) {
1730 				release_data_header(header);
1731 				return B_NO_MEMORY;
1732 			}
1733 
1734 			node->SetTailSpace(node->TailSpace() - sizeUsed);
1735 			node->used = sizeUsed;
1736 			node->offset = buffer->size;
1737 
1738 			buffer->size += sizeUsed;
1739 			sizeAdded += sizeUsed;
1740 			SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1741 				sizeof(buffer->size));
1742 
1743 			list_add_item(&buffer->buffers, node);
1744 
1745 			// Release the initial reference to the header, so that it will
1746 			// be deleted when the node is removed.
1747 			release_data_header(header);
1748 		}
1749 
1750 		if (_contiguousBuffer)
1751 			*_contiguousBuffer = NULL;
1752 
1753 		//dprintf(" append result 1:\n");
1754 		//dump_buffer(buffer);
1755 		CHECK_BUFFER(buffer);
1756 
1757 		return B_OK;
1758 	}
1759 
1760 	// the data fits into this buffer
1761 	node->SetTailSpace(node->TailSpace() - size);
1762 
1763 	if (_contiguousBuffer)
1764 		*_contiguousBuffer = node->start + node->used;
1765 
1766 	node->used += size;
1767 	buffer->size += size;
1768 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1769 		sizeof(buffer->size));
1770 
1771 	//dprintf(" append result 2:\n");
1772 	//dump_buffer(buffer);
1773 	CHECK_BUFFER(buffer);
1774 
1775 	return B_OK;
1776 }
1777 
1778 
1779 static status_t
1780 append_data(net_buffer* buffer, const void* data, size_t size)
1781 {
1782 	size_t used = buffer->size;
1783 
1784 	void* contiguousBuffer;
1785 	status_t status = append_size(buffer, size, &contiguousBuffer);
1786 	if (status < B_OK)
1787 		return status;
1788 
1789 	if (contiguousBuffer) {
1790 		if (IS_USER_ADDRESS(data)) {
1791 			if (user_memcpy(contiguousBuffer, data, size) != B_OK)
1792 				return B_BAD_ADDRESS;
1793 		} else
1794 			memcpy(contiguousBuffer, data, size);
1795 	} else
1796 		write_data(buffer, used, data, size);
1797 
1798 	return B_OK;
1799 }
1800 
1801 
1802 /*!	Removes bytes from the beginning of the buffer.
1803 */
1804 static status_t
1805 remove_header(net_buffer* _buffer, size_t bytes)
1806 {
1807 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1808 
1809 	T(RemoveHeader(buffer, bytes));
1810 
1811 	ParanoiaChecker _(buffer);
1812 
1813 	if (bytes > buffer->size)
1814 		return B_BAD_VALUE;
1815 
1816 	TRACE(("%d: remove_header(buffer %p, %ld bytes)\n", find_thread(NULL),
1817 		buffer, bytes));
1818 	//dump_buffer(buffer);
1819 
1820 	size_t left = bytes;
1821 	data_node* node = NULL;
1822 
1823 	while (true) {
1824 		node = (data_node*)list_get_first_item(&buffer->buffers);
1825 		if (node == NULL) {
1826 			if (left == 0)
1827 				break;
1828 			CHECK_BUFFER(buffer);
1829 			return B_ERROR;
1830 		}
1831 
1832 		if (node->used > left)
1833 			break;
1834 
1835 		// node will be removed completely
1836 		list_remove_item(&buffer->buffers, node);
1837 		left -= node->used;
1838 		remove_data_node(node);
1839 		node = NULL;
1840 		buffer->stored_header_length = 0;
1841 	}
1842 
1843 	// cut remaining node, if any
1844 
1845 	if (node != NULL) {
1846 		size_t cut = min_c(node->used, left);
1847 		node->offset = 0;
1848 		node->start += cut;
1849 		if ((node->flags & DATA_NODE_STORED_HEADER) != 0)
1850 			buffer->stored_header_length += cut;
1851 		else
1852 			node->AddHeaderSpace(cut);
1853 		node->used -= cut;
1854 
1855 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1856 	}
1857 
1858 	// adjust offset of following nodes
1859 	while (node != NULL) {
1860 		node->offset -= bytes;
1861 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1862 	}
1863 
1864 	buffer->size -= bytes;
1865 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1866 		sizeof(buffer->size));
1867 
1868 	//dprintf(" remove result:\n");
1869 	//dump_buffer(buffer);
1870 	CHECK_BUFFER(buffer);
1871 
1872 	return B_OK;
1873 }
1874 
1875 
1876 /*!	Removes bytes from the end of the buffer.
1877 */
1878 static status_t
1879 remove_trailer(net_buffer* buffer, size_t bytes)
1880 {
1881 	return trim_data(buffer, buffer->size - bytes);
1882 }
1883 
1884 
1885 /*!	Trims the buffer to the specified \a newSize by removing space from
1886 	the end of the buffer.
1887 */
1888 static status_t
1889 trim_data(net_buffer* _buffer, size_t newSize)
1890 {
1891 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1892 	TRACE(("%d: trim_data(buffer %p, newSize = %ld, buffer size = %" B_PRIu32 ")\n",
1893 		find_thread(NULL), buffer, newSize, buffer->size));
1894 	T(Trim(buffer, newSize));
1895 	//dump_buffer(buffer);
1896 
1897 	ParanoiaChecker _(buffer);
1898 
1899 	if (newSize > buffer->size)
1900 		return B_BAD_VALUE;
1901 	if (newSize == buffer->size)
1902 		return B_OK;
1903 
1904 	data_node* node = get_node_at_offset(buffer, newSize);
1905 	if (node == NULL) {
1906 		// trim size greater than buffer size
1907 		return B_BAD_VALUE;
1908 	}
1909 
1910 	int32 diff = node->used + node->offset - newSize;
1911 	node->SetTailSpace(node->TailSpace() + diff);
1912 	node->used -= diff;
1913 
1914 	if (node->used > 0)
1915 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
1916 
1917 	while (node != NULL) {
1918 		data_node* next = (data_node*)list_get_next_item(&buffer->buffers, node);
1919 		list_remove_item(&buffer->buffers, node);
1920 		remove_data_node(node);
1921 
1922 		node = next;
1923 	}
1924 
1925 	buffer->size = newSize;
1926 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
1927 		sizeof(buffer->size));
1928 
1929 	//dprintf(" trim result:\n");
1930 	//dump_buffer(buffer);
1931 	CHECK_BUFFER(buffer);
1932 
1933 	return B_OK;
1934 }
1935 
1936 
1937 /*!	Appends data coming from buffer \a source to the buffer \a buffer. It only
1938 	clones the data, though, that is the data is not copied, just referenced.
1939 */
1940 static status_t
1941 append_cloned_data(net_buffer* _buffer, net_buffer* _source, uint32 offset,
1942 	size_t bytes)
1943 {
1944 	if (bytes == 0)
1945 		return B_OK;
1946 
1947 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
1948 	net_buffer_private* source = (net_buffer_private*)_source;
1949 	TRACE(("%d: append_cloned_data(buffer %p, source %p, offset = %" B_PRIu32 ", "
1950 		"bytes = %ld)\n", find_thread(NULL), buffer, source, offset, bytes));
1951 	T(AppendCloned(buffer, source, offset, bytes));
1952 
1953 	ParanoiaChecker _(buffer);
1954 	ParanoiaChecker _2(source);
1955 
1956 	if (source->size < offset + bytes || source->size < offset)
1957 		return B_BAD_VALUE;
1958 
1959 	// find data_node to start with from the source buffer
1960 	data_node* node = get_node_at_offset(source, offset);
1961 	if (node == NULL) {
1962 		// trim size greater than buffer size
1963 		return B_BAD_VALUE;
1964 	}
1965 
1966 	size_t sizeAppended = 0;
1967 
1968 	while (node != NULL && bytes > 0) {
1969 		data_node* clone = add_data_node(buffer, node->header);
1970 		if (clone == NULL) {
1971 			remove_trailer(buffer, sizeAppended);
1972 			return ENOBUFS;
1973 		}
1974 
1975 		if (offset)
1976 			offset -= node->offset;
1977 
1978 		clone->offset = buffer->size;
1979 		clone->start = node->start + offset;
1980 		clone->used = min_c(bytes, node->used - offset);
1981 		if (list_is_empty(&buffer->buffers)) {
1982 			// take over stored offset
1983 			buffer->stored_header_length = source->stored_header_length;
1984 			clone->flags = node->flags | DATA_NODE_READ_ONLY;
1985 		} else
1986 			clone->flags = DATA_NODE_READ_ONLY;
1987 
1988 		list_add_item(&buffer->buffers, clone);
1989 
1990 		offset = 0;
1991 		bytes -= clone->used;
1992 		buffer->size += clone->used;
1993 		sizeAppended += clone->used;
1994 		node = (data_node*)list_get_next_item(&source->buffers, node);
1995 	}
1996 
1997 	if (bytes != 0)
1998 		panic("add_cloned_data() failed, bytes != 0!\n");
1999 
2000 	//dprintf(" append cloned result:\n");
2001 	//dump_buffer(buffer);
2002 	CHECK_BUFFER(source);
2003 	CHECK_BUFFER(buffer);
2004 	SET_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, buffer, &buffer->size,
2005 		sizeof(buffer->size));
2006 
2007 	return B_OK;
2008 }
2009 
2010 
2011 void
2012 set_ancillary_data(net_buffer* buffer, ancillary_data_container* container)
2013 {
2014 	((net_buffer_private*)buffer)->ancillary_data = container;
2015 }
2016 
2017 
2018 ancillary_data_container*
2019 get_ancillary_data(net_buffer* buffer)
2020 {
2021 	return ((net_buffer_private*)buffer)->ancillary_data;
2022 }
2023 
2024 
2025 /*!	Moves all ancillary data from buffer \c from to the end of the list of
2026 	ancillary data of buffer \c to. Note, that this is the only function that
2027 	transfers or copies ancillary data from one buffer to another.
2028 
2029 	\param from The buffer from which to remove the ancillary data.
2030 	\param to The buffer to which to add the ancillary data.
2031 	\return A pointer to the first of the moved ancillary data, if any, \c NULL
2032 		otherwise.
2033 */
2034 static void*
2035 transfer_ancillary_data(net_buffer* _from, net_buffer* _to)
2036 {
2037 	net_buffer_private* from = (net_buffer_private*)_from;
2038 	net_buffer_private* to = (net_buffer_private*)_to;
2039 
2040 	if (from == NULL || to == NULL)
2041 		return NULL;
2042 
2043 	if (from->ancillary_data == NULL)
2044 		return NULL;
2045 
2046 	if (to->ancillary_data == NULL) {
2047 		// no ancillary data in the target buffer
2048 		to->ancillary_data = from->ancillary_data;
2049 		from->ancillary_data = NULL;
2050 		return next_ancillary_data(to->ancillary_data, NULL, NULL);
2051 	}
2052 
2053 	// both have ancillary data
2054 	void* data = move_ancillary_data(from->ancillary_data,
2055 		to->ancillary_data);
2056 	delete_ancillary_data_container(from->ancillary_data);
2057 	from->ancillary_data = NULL;
2058 
2059 	return data;
2060 }
2061 
2062 
2063 /*!	Stores the current header position; even if the header is removed with
2064 	remove_header(), you can still reclaim it later using restore_header(),
2065 	unless you prepended different data (in which case restoring will fail).
2066 */
2067 status_t
2068 store_header(net_buffer* _buffer)
2069 {
2070 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2071 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2072 	if (node == NULL)
2073 		return B_ERROR;
2074 
2075 	if ((node->flags & DATA_NODE_STORED_HEADER) != 0) {
2076 		// Someone else already stored the header - since we cannot
2077 		// differentiate between them, we throw away everything
2078 		node->AddHeaderSpace(buffer->stored_header_length);
2079 		node->flags &= ~DATA_NODE_STORED_HEADER;
2080 		buffer->stored_header_length = 0;
2081 
2082 		return B_ERROR;
2083 	}
2084 
2085 	buffer->stored_header_length = 0;
2086 	node->flags |= DATA_NODE_STORED_HEADER;
2087 
2088 	return B_OK;
2089 }
2090 
2091 
2092 ssize_t
2093 stored_header_length(net_buffer* _buffer)
2094 {
2095 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2096 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2097 	if (node == NULL || (node->flags & DATA_NODE_STORED_HEADER) == 0)
2098 		return B_BAD_VALUE;
2099 
2100 	return buffer->stored_header_length;
2101 }
2102 
2103 
2104 /*!	Reads from the complete buffer with an eventually stored header.
2105 	This function does not care whether or not there is a stored header at
2106 	all - you have to use the stored_header_length() function to find out.
2107 */
2108 status_t
2109 restore_header(net_buffer* _buffer, uint32 offset, void* data, size_t bytes)
2110 {
2111 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2112 
2113 	if (offset < buffer->stored_header_length) {
2114 		data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2115 		if (node == NULL
2116 			|| offset + bytes > buffer->stored_header_length + buffer->size)
2117 			return B_BAD_VALUE;
2118 
2119 		// We have the data, so copy it out
2120 
2121 		size_t copied = std::min(bytes, buffer->stored_header_length - offset);
2122 		memcpy(data, node->start + offset - buffer->stored_header_length,
2123 			copied);
2124 
2125 		if (copied == bytes)
2126 			return B_OK;
2127 
2128 		data = (uint8*)data + copied;
2129 		bytes -= copied;
2130 		offset = 0;
2131 	} else
2132 		offset -= buffer->stored_header_length;
2133 
2134 	return read_data(_buffer, offset, data, bytes);
2135 }
2136 
2137 
2138 /*!	Copies from the complete \a source buffer with an eventually stored header
2139 	to the specified target \a buffer.
2140 	This function does not care whether or not there is a stored header at
2141 	all - you have to use the stored_header_length() function to find out.
2142 */
2143 status_t
2144 append_restored_header(net_buffer* buffer, net_buffer* _source, uint32 offset,
2145 	size_t bytes)
2146 {
2147 	net_buffer_private* source = (net_buffer_private*)_source;
2148 
2149 	if (offset < source->stored_header_length) {
2150 		data_node* node = (data_node*)list_get_first_item(&source->buffers);
2151 		if (node == NULL
2152 			|| offset + bytes > source->stored_header_length + source->size)
2153 			return B_BAD_VALUE;
2154 
2155 		// We have the data, so copy it out
2156 
2157 		size_t appended = std::min(bytes, source->stored_header_length - offset);
2158 		status_t status = append_data(buffer,
2159 			node->start + offset - source->stored_header_length, appended);
2160 		if (status != B_OK)
2161 			return status;
2162 
2163 		if (appended == bytes)
2164 			return B_OK;
2165 
2166 		bytes -= appended;
2167 		offset = 0;
2168 	} else
2169 		offset -= source->stored_header_length;
2170 
2171 	return append_cloned_data(buffer, source, offset, bytes);
2172 }
2173 
2174 
2175 /*!	Tries to directly access the requested space in the buffer.
2176 	If the space is contiguous, the function will succeed and place a pointer
2177 	to that space into \a _contiguousBuffer.
2178 
2179 	\return B_BAD_VALUE if the offset is outside of the buffer's bounds.
2180 	\return B_ERROR in case the buffer is not contiguous at that location.
2181 */
2182 static status_t
2183 direct_access(net_buffer* _buffer, uint32 offset, size_t size,
2184 	void** _contiguousBuffer)
2185 {
2186 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2187 
2188 	ParanoiaChecker _(buffer);
2189 
2190 	//TRACE(("direct_access(buffer %p, offset %ld, size %ld)\n", buffer, offset,
2191 	//	size));
2192 
2193 	if (offset + size > buffer->size)
2194 		return B_BAD_VALUE;
2195 
2196 	// find node to access
2197 	data_node* node = get_node_at_offset(buffer, offset);
2198 	if (node == NULL)
2199 		return B_BAD_VALUE;
2200 
2201 	offset -= node->offset;
2202 
2203 	if (size > node->used - offset)
2204 		return B_ERROR;
2205 
2206 	*_contiguousBuffer = node->start + offset;
2207 	return B_OK;
2208 }
2209 
2210 
2211 static int32
2212 checksum_data(net_buffer* _buffer, uint32 offset, size_t size, bool finalize)
2213 {
2214 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2215 
2216 	if (offset + size > buffer->size || size == 0)
2217 		return B_BAD_VALUE;
2218 
2219 	// find first node to read from
2220 	data_node* node = get_node_at_offset(buffer, offset);
2221 	if (node == NULL)
2222 		return B_ERROR;
2223 
2224 	offset -= node->offset;
2225 
2226 	// Since the maximum buffer size is 65536 bytes, it's impossible
2227 	// to overlap 32 bit - we don't need to handle this overlap in
2228 	// the loop, we can safely do it afterwards
2229 	uint32 sum = 0;
2230 
2231 	while (true) {
2232 		size_t bytes = min_c(size, node->used - offset);
2233 		if ((offset + node->offset) & 1) {
2234 			// if we're at an uneven offset, we have to swap the checksum
2235 			sum += __swap_int16(compute_checksum(node->start + offset, bytes));
2236 		} else
2237 			sum += compute_checksum(node->start + offset, bytes);
2238 
2239 		size -= bytes;
2240 		if (size == 0)
2241 			break;
2242 
2243 		offset = 0;
2244 
2245 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
2246 		if (node == NULL)
2247 			return B_ERROR;
2248 	}
2249 
2250 	while (sum >> 16) {
2251 		sum = (sum & 0xffff) + (sum >> 16);
2252 	}
2253 
2254 	if (!finalize)
2255 		return (uint16)sum;
2256 
2257 	return (uint16)~sum;
2258 }
2259 
2260 
2261 static uint32
2262 get_iovecs(net_buffer* _buffer, struct iovec* iovecs, uint32 vecCount)
2263 {
2264 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2265 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2266 	uint32 count = 0;
2267 
2268 	while (node != NULL && count < vecCount) {
2269 		if (node->used > 0) {
2270 			iovecs[count].iov_base = node->start;
2271 			iovecs[count].iov_len = node->used;
2272 			count++;
2273 		}
2274 
2275 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
2276 	}
2277 
2278 	return count;
2279 }
2280 
2281 
2282 static uint32
2283 count_iovecs(net_buffer* _buffer)
2284 {
2285 	net_buffer_private* buffer = (net_buffer_private*)_buffer;
2286 	data_node* node = (data_node*)list_get_first_item(&buffer->buffers);
2287 	uint32 count = 0;
2288 
2289 	while (node != NULL) {
2290 		if (node->used > 0)
2291 			count++;
2292 
2293 		node = (data_node*)list_get_next_item(&buffer->buffers, node);
2294 	}
2295 
2296 	return count;
2297 }
2298 
2299 
2300 static void
2301 swap_addresses(net_buffer* buffer)
2302 {
2303 	std::swap(buffer->source, buffer->destination);
2304 }
2305 
2306 
2307 static status_t
2308 std_ops(int32 op, ...)
2309 {
2310 	switch (op) {
2311 		case B_MODULE_INIT:
2312 			// TODO: improve our code a bit so we can add constructors
2313 			//	and keep around half-constructed buffers in the slab
2314 
2315 			sNetBufferCache = create_object_cache("net buffer cache",
2316 				sizeof(net_buffer_private), 8, NULL, NULL, NULL);
2317 			if (sNetBufferCache == NULL)
2318 				return B_NO_MEMORY;
2319 
2320 			sDataNodeCache = create_object_cache("data node cache", BUFFER_SIZE,
2321 				0, NULL, NULL, NULL);
2322 			if (sDataNodeCache == NULL) {
2323 				delete_object_cache(sNetBufferCache);
2324 				return B_NO_MEMORY;
2325 			}
2326 
2327 #if ENABLE_STATS
2328 			add_debugger_command_etc("net_buffer_stats", &dump_net_buffer_stats,
2329 				"Print net buffer statistics",
2330 				"\nPrint net buffer statistics.\n", 0);
2331 #endif
2332 #if ENABLE_DEBUGGER_COMMANDS
2333 			add_debugger_command_etc("net_buffer", &dump_net_buffer,
2334 				"Dump net buffer",
2335 				"\nDump the net buffer's internal structures.\n", 0);
2336 #endif
2337 			return B_OK;
2338 
2339 		case B_MODULE_UNINIT:
2340 #if ENABLE_STATS
2341 			remove_debugger_command("net_buffer_stats", &dump_net_buffer_stats);
2342 #endif
2343 #if ENABLE_DEBUGGER_COMMANDS
2344 			remove_debugger_command("net_buffer", &dump_net_buffer);
2345 #endif
2346 			delete_object_cache(sNetBufferCache);
2347 			delete_object_cache(sDataNodeCache);
2348 			return B_OK;
2349 
2350 		default:
2351 			return B_ERROR;
2352 	}
2353 }
2354 
2355 
2356 net_buffer_module_info gNetBufferModule = {
2357 	{
2358 		NET_BUFFER_MODULE_NAME,
2359 		0,
2360 		std_ops
2361 	},
2362 	create_buffer,
2363 	free_buffer,
2364 
2365 	duplicate_buffer,
2366 	clone_buffer,
2367 	split_buffer,
2368 	merge_buffer,
2369 
2370 	prepend_size,
2371 	prepend_data,
2372 	append_size,
2373 	append_data,
2374 	NULL,	// insert
2375 	NULL,	// remove
2376 	remove_header,
2377 	remove_trailer,
2378 	trim_data,
2379 	append_cloned_data,
2380 
2381 	NULL,	// associate_data
2382 
2383 	set_ancillary_data,
2384 	get_ancillary_data,
2385 	transfer_ancillary_data,
2386 
2387 	store_header,
2388 	stored_header_length,
2389 	restore_header,
2390 	append_restored_header,
2391 
2392 	direct_access,
2393 	read_data,
2394 	write_data,
2395 
2396 	checksum_data,
2397 
2398 	NULL,	// get_memory_map
2399 	get_iovecs,
2400 	count_iovecs,
2401 
2402 	swap_addresses,
2403 
2404 	dump_buffer,	// dump
2405 };
2406 
2407