xref: /haiku/src/system/kernel/port.cpp (revision 97901ec593ec4dd50ac115c1c35a6d72f6e489a5)
1 /*
2  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Mark-Jan Bastian. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 
10 /*!	Ports for IPC */
11 
12 
13 #include <port.h>
14 
15 #include <ctype.h>
16 #include <iovec.h>
17 #include <stdlib.h>
18 #include <string.h>
19 
20 #include <OS.h>
21 
22 #include <arch/int.h>
23 #include <heap.h>
24 #include <kernel.h>
25 #include <Notifications.h>
26 #include <sem.h>
27 #include <syscall_restart.h>
28 #include <team.h>
29 #include <tracing.h>
30 #include <util/AutoLock.h>
31 #include <util/list.h>
32 #include <vm/vm.h>
33 #include <wait_for_objects.h>
34 
35 
36 //#define TRACE_PORTS
37 #ifdef TRACE_PORTS
38 #	define TRACE(x) dprintf x
39 #else
40 #	define TRACE(x)
41 #endif
42 
43 
44 struct port_message : DoublyLinkedListLinkImpl<port_message> {
45 	int32				code;
46 	size_t				size;
47 	uid_t				sender;
48 	gid_t				sender_group;
49 	team_id				sender_team;
50 	char				buffer[0];
51 };
52 
53 typedef DoublyLinkedList<port_message> MessageList;
54 
55 struct port_entry {
56 	struct list_link	team_link;
57 	port_id				id;
58 	team_id				owner;
59 	int32		 		capacity;
60 	mutex				lock;
61 	uint32				read_count;
62 	int32				write_count;
63 	ConditionVariable	read_condition;
64 	ConditionVariable	write_condition;
65 	int32				total_count;
66 		// messages read from port since creation
67 	select_info*		select_infos;
68 	MessageList			messages;
69 };
70 
71 class PortNotificationService : public DefaultNotificationService {
72 public:
73 							PortNotificationService();
74 
75 			void			Notify(uint32 opcode, port_id team);
76 };
77 
78 
79 #if PORT_TRACING
80 namespace PortTracing {
81 
82 class Create : public AbstractTraceEntry {
83 public:
84 	Create(port_entry& port)
85 		:
86 		fID(port.id),
87 		fOwner(port.owner),
88 		fCapacity(port.capacity)
89 	{
90 		fName = alloc_tracing_buffer_strcpy(port.lock.name, B_OS_NAME_LENGTH,
91 			false);
92 
93 		Initialized();
94 	}
95 
96 	virtual void AddDump(TraceOutput& out)
97 	{
98 		out.Print("port %ld created, name \"%s\", owner %ld, capacity %ld",
99 			fID, fName, fOwner, fCapacity);
100 	}
101 
102 private:
103 	port_id				fID;
104 	char*				fName;
105 	team_id				fOwner;
106 	int32		 		fCapacity;
107 };
108 
109 
110 class Delete : public AbstractTraceEntry {
111 public:
112 	Delete(port_entry& port)
113 		:
114 		fID(port.id)
115 	{
116 		Initialized();
117 	}
118 
119 	virtual void AddDump(TraceOutput& out)
120 	{
121 		out.Print("port %ld deleted", fID);
122 	}
123 
124 private:
125 	port_id				fID;
126 };
127 
128 
129 class Read : public AbstractTraceEntry {
130 public:
131 	Read(port_entry& port, int32 code, ssize_t result)
132 		:
133 		fID(port.id),
134 		fReadCount(port.read_count),
135 		fWriteCount(port.write_count),
136 		fCode(code),
137 		fResult(result)
138 	{
139 		Initialized();
140 	}
141 
142 	virtual void AddDump(TraceOutput& out)
143 	{
144 		out.Print("port %ld read, read %ld, write %ld, code %lx: %ld",
145 			fID, fReadCount, fWriteCount, fCode, fResult);
146 	}
147 
148 private:
149 	port_id				fID;
150 	int32				fReadCount;
151 	int32				fWriteCount;
152 	int32				fCode;
153 	ssize_t				fResult;
154 };
155 
156 
157 class Write : public AbstractTraceEntry {
158 public:
159 	Write(port_entry& port, int32 code, size_t bufferSize, ssize_t result)
160 		:
161 		fID(port.id),
162 		fReadCount(port.read_count),
163 		fWriteCount(port.write_count),
164 		fCode(code),
165 		fBufferSize(bufferSize),
166 		fResult(result)
167 	{
168 		Initialized();
169 	}
170 
171 	virtual void AddDump(TraceOutput& out)
172 	{
173 		out.Print("port %ld write, read %ld, write %ld, code %lx, size %ld: %ld",
174 			fID, fReadCount, fWriteCount, fCode, fBufferSize, fResult);
175 	}
176 
177 private:
178 	port_id				fID;
179 	int32				fReadCount;
180 	int32				fWriteCount;
181 	int32				fCode;
182 	size_t				fBufferSize;
183 	ssize_t				fResult;
184 };
185 
186 
187 class Info : public AbstractTraceEntry {
188 public:
189 	Info(port_entry& port, int32 code, ssize_t result)
190 		:
191 		fID(port.id),
192 		fReadCount(port.read_count),
193 		fWriteCount(port.write_count),
194 		fCode(code),
195 		fResult(result)
196 	{
197 		Initialized();
198 	}
199 
200 	virtual void AddDump(TraceOutput& out)
201 	{
202 		out.Print("port %ld info, read %ld, write %ld, code %lx: %ld",
203 			fID, fReadCount, fWriteCount, fCode, fResult);
204 	}
205 
206 private:
207 	port_id				fID;
208 	int32				fReadCount;
209 	int32				fWriteCount;
210 	int32				fCode;
211 	ssize_t				fResult;
212 };
213 
214 
215 class OwnerChange : public AbstractTraceEntry {
216 public:
217 	OwnerChange(port_entry& port, team_id newOwner, status_t status)
218 		:
219 		fID(port.id),
220 		fOldOwner(port.owner),
221 		fNewOwner(newOwner),
222 		fStatus(status)
223 	{
224 		Initialized();
225 	}
226 
227 	virtual void AddDump(TraceOutput& out)
228 	{
229 		out.Print("port %ld owner change from %ld to %ld: %s", fID, fOldOwner,
230 			fNewOwner, strerror(fStatus));
231 	}
232 
233 private:
234 	port_id				fID;
235 	team_id				fOldOwner;
236 	team_id				fNewOwner;
237 	status_t	 		fStatus;
238 };
239 
240 }	// namespace PortTracing
241 
242 #	define T(x) new(std::nothrow) PortTracing::x;
243 #else
244 #	define T(x) ;
245 #endif
246 
247 
248 static const size_t kInitialPortBufferSize = 4 * 1024 * 1024;
249 static const size_t kTotalSpaceLimit = 64 * 1024 * 1024;
250 static const size_t kTeamSpaceLimit = 8 * 1024 * 1024;
251 static const size_t kBufferGrowRate = kInitialPortBufferSize;
252 
253 #define MAX_QUEUE_LENGTH 4096
254 #define PORT_MAX_MESSAGE_SIZE (256 * 1024)
255 
256 // sMaxPorts must be power of 2
257 static int32 sMaxPorts = 4096;
258 static int32 sUsedPorts = 0;
259 
260 static struct port_entry* sPorts;
261 static area_id sPortArea;
262 static heap_allocator* sPortAllocator;
263 static ConditionVariable sNoSpaceCondition;
264 static vint32 sTotalSpaceInUse;
265 static vint32 sAreaChangeCounter;
266 static vint32 sAllocatingArea;
267 static bool sPortsActive = false;
268 static port_id sNextPort = 1;
269 static int32 sFirstFreeSlot = 1;
270 static mutex sPortsLock = MUTEX_INITIALIZER("ports list");
271 
272 static PortNotificationService sNotificationService;
273 
274 
275 //	#pragma mark - TeamNotificationService
276 
277 
278 PortNotificationService::PortNotificationService()
279 	:
280 	DefaultNotificationService("ports")
281 {
282 }
283 
284 
285 void
286 PortNotificationService::Notify(uint32 opcode, port_id port)
287 {
288 	char eventBuffer[64];
289 	KMessage event;
290 	event.SetTo(eventBuffer, sizeof(eventBuffer), PORT_MONITOR);
291 	event.AddInt32("event", opcode);
292 	event.AddInt32("port", port);
293 
294 	DefaultNotificationService::Notify(event, opcode);
295 }
296 
297 
298 //	#pragma mark -
299 
300 
301 static int
302 dump_port_list(int argc, char** argv)
303 {
304 	const char* name = NULL;
305 	team_id owner = -1;
306 	int32 i;
307 
308 	if (argc > 2) {
309 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
310 			owner = strtoul(argv[2], NULL, 0);
311 		else if (!strcmp(argv[1], "name"))
312 			name = argv[2];
313 	} else if (argc > 1)
314 		owner = strtoul(argv[1], NULL, 0);
315 
316 	kprintf("port             id  cap  read-cnt  write-cnt   total   team  "
317 		"name\n");
318 
319 	for (i = 0; i < sMaxPorts; i++) {
320 		struct port_entry* port = &sPorts[i];
321 		if (port->id < 0
322 			|| (owner != -1 && port->owner != owner)
323 			|| (name != NULL && strstr(port->lock.name, name) == NULL))
324 			continue;
325 
326 		kprintf("%p %8ld %4ld %9ld %9ld %8ld %6ld  %s\n", port,
327 			port->id, port->capacity, port->read_count, port->write_count,
328 			port->total_count, port->owner, port->lock.name);
329 	}
330 
331 	return 0;
332 }
333 
334 
335 static void
336 _dump_port_info(struct port_entry* port)
337 {
338 	kprintf("PORT: %p\n", port);
339 	kprintf(" id:              %ld\n", port->id);
340 	kprintf(" name:            \"%s\"\n", port->lock.name);
341 	kprintf(" owner:           %ld\n", port->owner);
342 	kprintf(" capacity:        %ld\n", port->capacity);
343 	kprintf(" read_count:      %ld\n", port->read_count);
344 	kprintf(" write_count:     %ld\n", port->write_count);
345 	kprintf(" total count:     %ld\n", port->total_count);
346 
347 	if (!port->messages.IsEmpty()) {
348 		kprintf("messages:\n");
349 
350 		MessageList::Iterator iterator = port->messages.GetIterator();
351 		while (port_message* message = iterator.Next()) {
352 			kprintf(" %p  %08lx  %ld\n", message, message->code, message->size);
353 		}
354 	}
355 
356 	set_debug_variable("_port", (addr_t)port);
357 	set_debug_variable("_portID", port->id);
358 	set_debug_variable("_owner", port->owner);
359 }
360 
361 
362 static int
363 dump_port_info(int argc, char** argv)
364 {
365 	ConditionVariable* condition = NULL;
366 	const char* name = NULL;
367 
368 	if (argc < 2) {
369 		print_debugger_command_usage(argv[0]);
370 		return 0;
371 	}
372 
373 	if (argc > 2) {
374 		if (!strcmp(argv[1], "address")) {
375 			_dump_port_info((struct port_entry*)parse_expression(argv[2]));
376 			return 0;
377 		} else if (!strcmp(argv[1], "condition"))
378 			condition = (ConditionVariable*)parse_expression(argv[2]);
379 		else if (!strcmp(argv[1], "name"))
380 			name = argv[2];
381 	} else if (parse_expression(argv[1]) > 0) {
382 		// if the argument looks like a number, treat it as such
383 		int32 num = parse_expression(argv[1]);
384 		int32 slot = num % sMaxPorts;
385 		if (sPorts[slot].id != num) {
386 			kprintf("port %ld (%#lx) doesn't exist!\n", num, num);
387 			return 0;
388 		}
389 		_dump_port_info(&sPorts[slot]);
390 		return 0;
391 	} else
392 		name = argv[1];
393 
394 	// walk through the ports list, trying to match name
395 	for (int32 i = 0; i < sMaxPorts; i++) {
396 		if ((name != NULL && sPorts[i].lock.name != NULL
397 				&& !strcmp(name, sPorts[i].lock.name))
398 			|| (condition != NULL && (&sPorts[i].read_condition == condition
399 				|| &sPorts[i].write_condition == condition))) {
400 			_dump_port_info(&sPorts[i]);
401 			return 0;
402 		}
403 	}
404 
405 	return 0;
406 }
407 
408 
409 static void
410 notify_port_select_events(int slot, uint16 events)
411 {
412 	if (sPorts[slot].select_infos)
413 		notify_select_events_list(sPorts[slot].select_infos, events);
414 }
415 
416 
417 static void
418 put_port_message(port_message* message)
419 {
420 	size_t size = sizeof(port_message) + message->size;
421 	heap_free(sPortAllocator, message);
422 
423 	atomic_add(&sTotalSpaceInUse, -size);
424 	sNoSpaceCondition.NotifyAll();
425 }
426 
427 
428 static status_t
429 get_port_message(int32 code, size_t bufferSize, uint32 flags, bigtime_t timeout,
430 	port_message** _message)
431 {
432 	size_t size = sizeof(port_message) + bufferSize;
433 	bool limitReached = false;
434 
435 	while (true) {
436 		if (atomic_add(&sTotalSpaceInUse, size)
437 				> int32(kTotalSpaceLimit - size)) {
438 			// TODO: add per team limit
439 			// We are not allowed to create another heap area, as our
440 			// space limit has been reached - just wait until we get
441 			// some free space again.
442 			limitReached = true;
443 
444 		wait:
445 			MutexLocker locker(sPortsLock);
446 
447 			atomic_add(&sTotalSpaceInUse, -size);
448 
449 			// TODO: we don't want to wait - but does that also mean we
450 			// shouldn't wait for the area creation?
451 			if (limitReached && (flags & B_RELATIVE_TIMEOUT) != 0
452 				&& timeout <= 0)
453 				return B_WOULD_BLOCK;
454 
455 			ConditionVariableEntry entry;
456 			sNoSpaceCondition.Add(&entry);
457 
458 			locker.Unlock();
459 
460 			status_t status = entry.Wait(flags, timeout);
461 			if (status == B_TIMED_OUT)
462 				return B_TIMED_OUT;
463 
464 			// just try again
465 			limitReached = false;
466 			continue;
467 		}
468 
469 		int32 areaChangeCounter = atomic_get(&sAreaChangeCounter);
470 
471 		// Quota is fulfilled, try to allocate the buffer
472 
473 		port_message* message
474 			= (port_message*)heap_memalign(sPortAllocator, 0, size);
475 		if (message != NULL) {
476 			message->code = code;
477 			message->size = bufferSize;
478 
479 			*_message = message;
480 			return B_OK;
481 		}
482 
483 		if (atomic_or(&sAllocatingArea, 1) != 0) {
484 			// Just wait for someone else to create an area for us
485 			goto wait;
486 		}
487 
488 		if (areaChangeCounter != atomic_get(&sAreaChangeCounter)) {
489 			atomic_add(&sTotalSpaceInUse, -size);
490 			continue;
491 		}
492 
493 		// Create a new area for the heap to use
494 
495 		addr_t base;
496 		area_id area = create_area("port grown buffer", (void**)&base,
497 			B_ANY_KERNEL_ADDRESS, kBufferGrowRate, B_NO_LOCK,
498 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
499 		if (area < 0) {
500 			// it's time to let the userland feel our pain
501 			sNoSpaceCondition.NotifyAll();
502 			return B_NO_MEMORY;
503 		}
504 
505 		heap_add_area(sPortAllocator, area, base, kBufferGrowRate);
506 
507 		atomic_add(&sAreaChangeCounter, 1);
508 		sNoSpaceCondition.NotifyAll();
509 		atomic_and(&sAllocatingArea, 0);
510 	}
511 }
512 
513 
514 /*!	You need to own the port's lock when calling this function */
515 static bool
516 is_port_closed(int32 slot)
517 {
518 	return sPorts[slot].capacity == 0;
519 }
520 
521 
522 /*!	Fills the port_info structure with information from the specified
523 	port.
524 	The port lock must be held when called.
525 */
526 static void
527 fill_port_info(struct port_entry* port, port_info* info, size_t size)
528 {
529 	info->port = port->id;
530 	info->team = port->owner;
531 	info->capacity = port->capacity;
532 
533 	info->queue_count = port->read_count;
534 	info->total_count = port->total_count;
535 
536 	strlcpy(info->name, port->lock.name, B_OS_NAME_LENGTH);
537 }
538 
539 
540 static ssize_t
541 copy_port_message(port_message* message, int32* _code, void* buffer,
542 	size_t bufferSize, bool userCopy)
543 {
544 	// check output buffer size
545 	size_t size = min_c(bufferSize, message->size);
546 
547 	// copy message
548 	if (_code != NULL)
549 		*_code = message->code;
550 
551 	if (size > 0) {
552 		if (userCopy) {
553 			status_t status = user_memcpy(buffer, message->buffer, size);
554 			if (status != B_OK)
555 				return status;
556 		} else
557 			memcpy(buffer, message->buffer, size);
558 	}
559 
560 	return size;
561 }
562 
563 
564 static void
565 uninit_port_locked(struct port_entry& port)
566 {
567 	int32 id = port.id;
568 
569 	// mark port as invalid
570 	port.id = -1;
571 	free((char*)port.lock.name);
572 	port.lock.name = NULL;
573 
574 	while (port_message* message = port.messages.RemoveHead()) {
575 		put_port_message(message);
576 	}
577 
578 	notify_port_select_events(id % sMaxPorts, B_EVENT_INVALID);
579 	port.select_infos = NULL;
580 
581 	// Release the threads that were blocking on this port.
582 	// read_port() will see the B_BAD_PORT_ID return value, and act accordingly
583 	port.read_condition.NotifyAll(false, B_BAD_PORT_ID);
584 	port.write_condition.NotifyAll(false, B_BAD_PORT_ID);
585 	sNotificationService.Notify(PORT_REMOVED, id);
586 }
587 
588 
589 //	#pragma mark - private kernel API
590 
591 
592 /*! This function delets all the ports that are owned by the passed team.
593 */
594 void
595 delete_owned_ports(struct team* team)
596 {
597 	TRACE(("delete_owned_ports(owner = %ld)\n", team->id));
598 
599 	struct list queue;
600 
601 	{
602 		InterruptsSpinLocker locker(gTeamSpinlock);
603 		list_move_to_list(&team->port_list, &queue);
604 	}
605 
606 	int32 firstSlot = sMaxPorts;
607 	int32 count = 0;
608 
609 	while (port_entry* port = (port_entry*)list_remove_head_item(&queue)) {
610 		if (firstSlot > port->id % sMaxPorts)
611 			firstSlot = port->id % sMaxPorts;
612 		count++;
613 
614 		MutexLocker locker(port->lock);
615 		uninit_port_locked(*port);
616 	}
617 
618 	MutexLocker _(sPortsLock);
619 
620 	// update the first free slot hint in the array
621 	if (firstSlot < sFirstFreeSlot)
622 		sFirstFreeSlot = firstSlot;
623 
624 	sUsedPorts -= count;
625 }
626 
627 
628 int32
629 port_max_ports(void)
630 {
631 	return sMaxPorts;
632 }
633 
634 
635 int32
636 port_used_ports(void)
637 {
638 	return sUsedPorts;
639 }
640 
641 
642 status_t
643 port_init(kernel_args *args)
644 {
645 	size_t size = sizeof(struct port_entry) * sMaxPorts;
646 
647 	// create and initialize ports table
648 	sPortArea = create_area_etc(B_SYSTEM_TEAM, "port_table", (void**)&sPorts,
649 		B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK,
650 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
651 	if (sPortArea < 0) {
652 		panic("unable to allocate kernel port table!\n");
653 		return sPortArea;
654 	}
655 
656 	memset(sPorts, 0, size);
657 	for (int32 i = 0; i < sMaxPorts; i++) {
658 		mutex_init(&sPorts[i].lock, NULL);
659 		sPorts[i].id = -1;
660 		sPorts[i].read_condition.Init(&sPorts[i], "port read");
661 		sPorts[i].write_condition.Init(&sPorts[i], "port write");
662 	}
663 
664 	addr_t base;
665 	if (create_area("port heap", (void**)&base, B_ANY_KERNEL_ADDRESS,
666 			kInitialPortBufferSize, B_NO_LOCK,
667 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0) {
668 			// TODO: Since port_init() is invoked before the boot partition is
669 			// mounted, the underlying VMAnonymousCache cannot commit swap space
670 			// upon creation and thus the pages aren't swappable after all. This
671 			// makes the area essentially B_LAZY_LOCK with additional overhead.
672 		panic("unable to allocate port area!\n");
673 		return B_ERROR;
674 	}
675 
676 	static const heap_class kBufferHeapClass = {"default", 100,
677 		PORT_MAX_MESSAGE_SIZE + sizeof(port_message), 2 * 1024,
678 		sizeof(port_message), 8, 4, 64};
679 	sPortAllocator = heap_create_allocator("port buffer", base,
680 		kInitialPortBufferSize, &kBufferHeapClass, true);
681 	if (sPortAllocator == NULL) {
682 		panic("unable to create port heap");
683 		return B_NO_MEMORY;
684 	}
685 
686 	sNoSpaceCondition.Init(sPorts, "port space");
687 
688 	// add debugger commands
689 	add_debugger_command_etc("ports", &dump_port_list,
690 		"Dump a list of all active ports (for team, with name, etc.)",
691 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]\n"
692 		"Prints a list of all active ports meeting the given\n"
693 		"requirement. If no argument is given, all ports are listed.\n"
694 		"  <team>             - The team owning the ports.\n"
695 		"  <name>             - Part of the name of the ports.\n", 0);
696 	add_debugger_command_etc("port", &dump_port_info,
697 		"Dump info about a particular port",
698 		"(<id> | [ \"address\" ] <address>) | ([ \"name\" ] <name>) "
699 			"| (\"condition\" <address>)\n"
700 		"Prints info about the specified port.\n"
701 		"  <address>   - Pointer to the port structure.\n"
702 		"  <name>      - Name of the port.\n"
703 		"  <condition> - address of the port's read or write condition.\n", 0);
704 
705 	new(&sNotificationService) PortNotificationService();
706 	sPortsActive = true;
707 	return B_OK;
708 }
709 
710 
711 //	#pragma mark - public kernel API
712 
713 
714 port_id
715 create_port(int32 queueLength, const char* name)
716 {
717 	TRACE(("create_port(queueLength = %ld, name = \"%s\")\n", queueLength,
718 		name));
719 
720 	if (!sPortsActive) {
721 		panic("ports used too early!\n");
722 		return B_BAD_PORT_ID;
723 	}
724 	if (queueLength < 1 || queueLength > MAX_QUEUE_LENGTH)
725 		return B_BAD_VALUE;
726 
727 	struct team* team = thread_get_current_thread()->team;
728 	if (team == NULL)
729 		return B_BAD_TEAM_ID;
730 
731 	MutexLocker locker(sPortsLock);
732 
733 	// check early on if there are any free port slots to use
734 	if (sUsedPorts >= sMaxPorts)
735 		return B_NO_MORE_PORTS;
736 
737 	// check & dup name
738 	char* nameBuffer = strdup(name != NULL ? name : "unnamed port");
739 	if (nameBuffer == NULL)
740 		return B_NO_MEMORY;
741 
742 	sUsedPorts++;
743 
744 	// find the first empty spot
745 	for (int32 slot = 0; slot < sMaxPorts; slot++) {
746 		int32 i = (slot + sFirstFreeSlot) % sMaxPorts;
747 
748 		if (sPorts[i].id == -1) {
749 			// make the port_id be a multiple of the slot it's in
750 			if (i >= sNextPort % sMaxPorts)
751 				sNextPort += i - sNextPort % sMaxPorts;
752 			else
753 				sNextPort += sMaxPorts - (sNextPort % sMaxPorts - i);
754 			sFirstFreeSlot = slot + 1;
755 
756 			MutexLocker portLocker(sPorts[i].lock);
757 			sPorts[i].id = sNextPort++;
758 			locker.Unlock();
759 
760 			sPorts[i].capacity = queueLength;
761 			sPorts[i].owner = team_get_current_team_id();
762 			sPorts[i].lock.name = nameBuffer;
763 			sPorts[i].read_count = 0;
764 			sPorts[i].write_count = queueLength;
765 			sPorts[i].total_count = 0;
766 			sPorts[i].select_infos = NULL;
767 
768 			{
769 				InterruptsSpinLocker teamLocker(gTeamSpinlock);
770 				list_add_item(&team->port_list, &sPorts[i].team_link);
771 			}
772 
773 			port_id id = sPorts[i].id;
774 
775 			T(Create(sPorts[i]));
776 			portLocker.Unlock();
777 
778 			TRACE(("create_port() done: port created %ld\n", id));
779 
780 			sNotificationService.Notify(PORT_ADDED, id);
781 			return id;
782 		}
783 	}
784 
785 	// Still not enough ports... - due to sUsedPorts, this cannot really
786 	// happen anymore.
787 	panic("out of ports, but sUsedPorts is broken");
788 	return B_NO_MORE_PORTS;
789 }
790 
791 
792 status_t
793 close_port(port_id id)
794 {
795 	TRACE(("close_port(id = %ld)\n", id));
796 
797 	if (!sPortsActive || id < 0)
798 		return B_BAD_PORT_ID;
799 
800 	int32 slot = id % sMaxPorts;
801 
802 	// walk through the sem list, trying to match name
803 	MutexLocker locker(sPorts[slot].lock);
804 
805 	if (sPorts[slot].id != id) {
806 		TRACE(("close_port: invalid port_id %ld\n", id));
807 		return B_BAD_PORT_ID;
808 	}
809 
810 	// mark port to disable writing - deleting the semaphores will
811 	// wake up waiting read/writes
812 	sPorts[slot].capacity = 0;
813 
814 	notify_port_select_events(slot, B_EVENT_INVALID);
815 	sPorts[slot].select_infos = NULL;
816 
817 	sPorts[slot].read_condition.NotifyAll(false, B_BAD_PORT_ID);
818 	sPorts[slot].write_condition.NotifyAll(false, B_BAD_PORT_ID);
819 
820 	return B_OK;
821 }
822 
823 
824 status_t
825 delete_port(port_id id)
826 {
827 	TRACE(("delete_port(id = %ld)\n", id));
828 
829 	if (!sPortsActive || id < 0)
830 		return B_BAD_PORT_ID;
831 
832 	int32 slot = id % sMaxPorts;
833 
834 	MutexLocker locker(sPorts[slot].lock);
835 
836 	if (sPorts[slot].id != id) {
837 		TRACE(("delete_port: invalid port_id %ld\n", id));
838 		return B_BAD_PORT_ID;
839 	}
840 
841 	T(Delete(sPorts[slot]));
842 
843 	{
844 		InterruptsSpinLocker teamLocker(gTeamSpinlock);
845 		list_remove_link(&sPorts[slot].team_link);
846 	}
847 
848 	uninit_port_locked(sPorts[slot]);
849 
850 	locker.Unlock();
851 
852 	MutexLocker _(sPortsLock);
853 
854 	// update the first free slot hint in the array
855 	if (slot < sFirstFreeSlot)
856 		sFirstFreeSlot = slot;
857 
858 	sUsedPorts--;
859 	return B_OK;
860 }
861 
862 
863 status_t
864 select_port(int32 id, struct select_info* info, bool kernel)
865 {
866 	if (id < 0)
867 		return B_BAD_PORT_ID;
868 
869 	int32 slot = id % sMaxPorts;
870 
871 	MutexLocker locker(sPorts[slot].lock);
872 
873 	if (sPorts[slot].id != id || is_port_closed(slot))
874 		return B_BAD_PORT_ID;
875 	if (!kernel && sPorts[slot].owner == team_get_kernel_team_id()) {
876 		// kernel port, but call from userland
877 		return B_NOT_ALLOWED;
878 	}
879 
880 	info->selected_events &= B_EVENT_READ | B_EVENT_WRITE | B_EVENT_INVALID;
881 
882 	if (info->selected_events != 0) {
883 		uint16 events = 0;
884 
885 		info->next = sPorts[slot].select_infos;
886 		sPorts[slot].select_infos = info;
887 
888 		// check for events
889 		if ((info->selected_events & B_EVENT_READ) != 0
890 			&& !sPorts[slot].messages.IsEmpty()) {
891 			events |= B_EVENT_READ;
892 		}
893 
894 		if (sPorts[slot].write_count > 0)
895 			events |= B_EVENT_WRITE;
896 
897 		if (events != 0)
898 			notify_select_events(info, events);
899 	}
900 
901 	return B_OK;
902 }
903 
904 
905 status_t
906 deselect_port(int32 id, struct select_info* info, bool kernel)
907 {
908 	if (id < 0)
909 		return B_BAD_PORT_ID;
910 	if (info->selected_events == 0)
911 		return B_OK;
912 
913 	int32 slot = id % sMaxPorts;
914 
915 	MutexLocker locker(sPorts[slot].lock);
916 
917 	if (sPorts[slot].id == id) {
918 		select_info** infoLocation = &sPorts[slot].select_infos;
919 		while (*infoLocation != NULL && *infoLocation != info)
920 			infoLocation = &(*infoLocation)->next;
921 
922 		if (*infoLocation == info)
923 			*infoLocation = info->next;
924 	}
925 
926 	return B_OK;
927 }
928 
929 
930 port_id
931 find_port(const char* name)
932 {
933 	TRACE(("find_port(name = \"%s\")\n", name));
934 
935 	if (!sPortsActive) {
936 		panic("ports used too early!\n");
937 		return B_NAME_NOT_FOUND;
938 	}
939 	if (name == NULL)
940 		return B_BAD_VALUE;
941 
942 	// Since we have to check every single port, and we don't
943 	// care if it goes away at any point, we're only grabbing
944 	// the port lock in question, not the port list lock
945 
946 	// loop over list
947 	for (int32 i = 0; i < sMaxPorts; i++) {
948 		// lock every individual port before comparing
949 		MutexLocker _(sPorts[i].lock);
950 
951 		if (sPorts[i].id >= 0 && !strcmp(name, sPorts[i].lock.name))
952 			return sPorts[i].id;
953 	}
954 
955 	return B_NAME_NOT_FOUND;
956 }
957 
958 
959 status_t
960 _get_port_info(port_id id, port_info* info, size_t size)
961 {
962 	TRACE(("get_port_info(id = %ld)\n", id));
963 
964 	if (info == NULL || size != sizeof(port_info))
965 		return B_BAD_VALUE;
966 	if (!sPortsActive || id < 0)
967 		return B_BAD_PORT_ID;
968 
969 	int32 slot = id % sMaxPorts;
970 
971 	MutexLocker locker(sPorts[slot].lock);
972 
973 	if (sPorts[slot].id != id || sPorts[slot].capacity == 0) {
974 		TRACE(("get_port_info: invalid port_id %ld\n", id));
975 		return B_BAD_PORT_ID;
976 	}
977 
978 	// fill a port_info struct with info
979 	fill_port_info(&sPorts[slot], info, size);
980 	return B_OK;
981 }
982 
983 
984 status_t
985 _get_next_port_info(team_id team, int32* _cookie, struct port_info* info,
986 	size_t size)
987 {
988 	TRACE(("get_next_port_info(team = %ld)\n", team));
989 
990 	if (info == NULL || size != sizeof(port_info) || _cookie == NULL
991 		|| team < B_OK)
992 		return B_BAD_VALUE;
993 	if (!sPortsActive)
994 		return B_BAD_PORT_ID;
995 
996 	int32 slot = *_cookie;
997 	if (slot >= sMaxPorts)
998 		return B_BAD_PORT_ID;
999 
1000 	if (team == B_CURRENT_TEAM)
1001 		team = team_get_current_team_id();
1002 
1003 	info->port = -1; // used as found flag
1004 
1005 	while (slot < sMaxPorts) {
1006 		MutexLocker locker(sPorts[slot].lock);
1007 
1008 		if (sPorts[slot].id != -1 && !is_port_closed(slot)
1009 			&& sPorts[slot].owner == team) {
1010 			// found one!
1011 			fill_port_info(&sPorts[slot], info, size);
1012 			slot++;
1013 			break;
1014 		}
1015 
1016 		slot++;
1017 	}
1018 
1019 	if (info->port == -1)
1020 		return B_BAD_PORT_ID;
1021 
1022 	*_cookie = slot;
1023 	return B_OK;
1024 }
1025 
1026 
1027 ssize_t
1028 port_buffer_size(port_id id)
1029 {
1030 	return port_buffer_size_etc(id, 0, 0);
1031 }
1032 
1033 
1034 ssize_t
1035 port_buffer_size_etc(port_id id, uint32 flags, bigtime_t timeout)
1036 {
1037 	port_message_info info;
1038 	status_t error = get_port_message_info_etc(id, &info, flags, timeout);
1039 	return error != B_OK ? error : info.size;
1040 }
1041 
1042 
1043 status_t
1044 _get_port_message_info_etc(port_id id, port_message_info* info,
1045 	size_t infoSize, uint32 flags, bigtime_t timeout)
1046 {
1047 	if (info == NULL || infoSize != sizeof(port_message_info))
1048 		return B_BAD_VALUE;
1049 	if (!sPortsActive || id < 0)
1050 		return B_BAD_PORT_ID;
1051 
1052 	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
1053 		| B_ABSOLUTE_TIMEOUT;
1054 	int32 slot = id % sMaxPorts;
1055 
1056 	MutexLocker locker(sPorts[slot].lock);
1057 
1058 	if (sPorts[slot].id != id
1059 		|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
1060 		T(Info(sPorts[slot], 0, B_BAD_PORT_ID));
1061 		TRACE(("_get_port_message_info_etc(): %s port %ld\n",
1062 			sPorts[slot].id == id ? "closed" : "invalid", id));
1063 		return B_BAD_PORT_ID;
1064 	}
1065 
1066 	while (sPorts[slot].read_count == 0) {
1067 		// We need to wait for a message to appear
1068 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
1069 			return B_WOULD_BLOCK;
1070 
1071 		ConditionVariableEntry entry;
1072 		sPorts[slot].read_condition.Add(&entry);
1073 
1074 		locker.Unlock();
1075 
1076 		// block if no message, or, if B_TIMEOUT flag set, block with timeout
1077 		status_t status = entry.Wait(flags, timeout);
1078 
1079 		if (status != B_OK) {
1080 			T(Info(sPorts[slot], 0, status));
1081 			return status;
1082 		}
1083 
1084 		locker.Lock();
1085 
1086 		if (sPorts[slot].id != id
1087 			|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
1088 			// the port is no longer there
1089 			T(Info(sPorts[slot], 0, B_BAD_PORT_ID));
1090 			return B_BAD_PORT_ID;
1091 		}
1092 	}
1093 
1094 	// determine tail & get the length of the message
1095 	port_message* message = sPorts[slot].messages.Head();
1096 	if (message == NULL) {
1097 		panic("port %ld: no messages found\n", sPorts[slot].id);
1098 		return B_ERROR;
1099 	}
1100 
1101 	info->size = message->size;
1102 	info->sender = message->sender;
1103 	info->sender_group = message->sender_group;
1104 	info->sender_team = message->sender_team;
1105 
1106 	T(Info(sPorts[slot], message->code, B_OK));
1107 
1108 	// notify next one, as we haven't read from the port
1109 	sPorts[slot].read_condition.NotifyOne();
1110 
1111 	return B_OK;
1112 }
1113 
1114 
1115 ssize_t
1116 port_count(port_id id)
1117 {
1118 	if (!sPortsActive || id < 0)
1119 		return B_BAD_PORT_ID;
1120 
1121 	int32 slot = id % sMaxPorts;
1122 
1123 	MutexLocker locker(sPorts[slot].lock);
1124 
1125 	if (sPorts[slot].id != id) {
1126 		TRACE(("port_count: invalid port_id %ld\n", id));
1127 		return B_BAD_PORT_ID;
1128 	}
1129 
1130 	// return count of messages
1131 	return sPorts[slot].read_count;
1132 }
1133 
1134 
1135 ssize_t
1136 read_port(port_id port, int32* msgCode, void* buffer, size_t bufferSize)
1137 {
1138 	return read_port_etc(port, msgCode, buffer, bufferSize, 0, 0);
1139 }
1140 
1141 
1142 ssize_t
1143 read_port_etc(port_id id, int32* _code, void* buffer, size_t bufferSize,
1144 	uint32 flags, bigtime_t timeout)
1145 {
1146 	if (!sPortsActive || id < 0)
1147 		return B_BAD_PORT_ID;
1148 	if ((buffer == NULL && bufferSize > 0) || timeout < 0)
1149 		return B_BAD_VALUE;
1150 
1151 	bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) != 0;
1152 	bool peekOnly = !userCopy && (flags & B_PEEK_PORT_MESSAGE) != 0;
1153 		// TODO: we could allow peeking for user apps now
1154 
1155 	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
1156 		| B_ABSOLUTE_TIMEOUT;
1157 
1158 	int32 slot = id % sMaxPorts;
1159 
1160 	MutexLocker locker(sPorts[slot].lock);
1161 
1162 	if (sPorts[slot].id != id
1163 		|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
1164 		T(Read(sPorts[slot], 0, B_BAD_PORT_ID));
1165 		TRACE(("read_port_etc(): %s port %ld\n",
1166 			sPorts[slot].id == id ? "closed" : "invalid", id));
1167 		return B_BAD_PORT_ID;
1168 	}
1169 
1170 	while (sPorts[slot].read_count == 0) {
1171 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
1172 			return B_WOULD_BLOCK;
1173 
1174 		// We need to wait for a message to appear
1175 		ConditionVariableEntry entry;
1176 		sPorts[slot].read_condition.Add(&entry);
1177 
1178 		locker.Unlock();
1179 
1180 		// block if no message, or, if B_TIMEOUT flag set, block with timeout
1181 		status_t status = entry.Wait(flags, timeout);
1182 
1183 		locker.Lock();
1184 
1185 		if (sPorts[slot].id != id
1186 			|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
1187 			// the port is no longer there
1188 			T(Read(sPorts[slot], 0, B_BAD_PORT_ID));
1189 			return B_BAD_PORT_ID;
1190 		}
1191 
1192 		if (status != B_OK) {
1193 			T(Read(sPorts[slot], 0, status));
1194 			return status;
1195 		}
1196 	}
1197 
1198 	// determine tail & get the length of the message
1199 	port_message* message = sPorts[slot].messages.Head();
1200 	if (message == NULL) {
1201 		panic("port %ld: no messages found\n", sPorts[slot].id);
1202 		return B_ERROR;
1203 	}
1204 
1205 	if (peekOnly) {
1206 		size_t size = copy_port_message(message, _code, buffer, bufferSize,
1207 			userCopy);
1208 
1209 		T(Read(sPorts[slot], message->code, size));
1210 
1211 		sPorts[slot].read_condition.NotifyOne();
1212 			// we only peeked, but didn't grab the message
1213 		return size;
1214 	}
1215 
1216 	sPorts[slot].messages.RemoveHead();
1217 	sPorts[slot].total_count++;
1218 	sPorts[slot].write_count++;
1219 	sPorts[slot].read_count--;
1220 
1221 	notify_port_select_events(slot, B_EVENT_WRITE);
1222 	sPorts[slot].write_condition.NotifyOne();
1223 		// make one spot in queue available again for write
1224 
1225 	locker.Unlock();
1226 
1227 	size_t size = copy_port_message(message, _code, buffer, bufferSize,
1228 		userCopy);
1229 	T(Read(sPorts[slot], message->code, size));
1230 
1231 	put_port_message(message);
1232 	return size;
1233 }
1234 
1235 
1236 status_t
1237 write_port(port_id id, int32 msgCode, const void* buffer, size_t bufferSize)
1238 {
1239 	iovec vec = { (void*)buffer, bufferSize };
1240 
1241 	return writev_port_etc(id, msgCode, &vec, 1, bufferSize, 0, 0);
1242 }
1243 
1244 
1245 status_t
1246 write_port_etc(port_id id, int32 msgCode, const void* buffer,
1247 	size_t bufferSize, uint32 flags, bigtime_t timeout)
1248 {
1249 	iovec vec = { (void*)buffer, bufferSize };
1250 
1251 	return writev_port_etc(id, msgCode, &vec, 1, bufferSize, flags, timeout);
1252 }
1253 
1254 
1255 status_t
1256 writev_port_etc(port_id id, int32 msgCode, const iovec* msgVecs,
1257 	size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout)
1258 {
1259 	if (!sPortsActive || id < 0)
1260 		return B_BAD_PORT_ID;
1261 	if (bufferSize > PORT_MAX_MESSAGE_SIZE)
1262 		return B_BAD_VALUE;
1263 
1264 	// mask irrelevant flags (for acquire_sem() usage)
1265 	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
1266 		| B_ABSOLUTE_TIMEOUT;
1267 	if ((flags & B_RELATIVE_TIMEOUT) != 0
1268 		&& timeout != B_INFINITE_TIMEOUT && timeout > 0) {
1269 		// Make the timeout absolute, since we have more than one step where
1270 		// we might have to wait
1271 		flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
1272 		timeout += system_time();
1273 	}
1274 
1275 	bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) > 0;
1276 
1277 	int32 slot = id % sMaxPorts;
1278 	status_t status;
1279 	port_message* message = NULL;
1280 
1281 	MutexLocker locker(sPorts[slot].lock);
1282 
1283 	if (sPorts[slot].id != id) {
1284 		TRACE(("write_port_etc: invalid port_id %ld\n", id));
1285 		return B_BAD_PORT_ID;
1286 	}
1287 	if (is_port_closed(slot)) {
1288 		TRACE(("write_port_etc: port %ld closed\n", id));
1289 		return B_BAD_PORT_ID;
1290 	}
1291 
1292 	if (sPorts[slot].write_count <= 0) {
1293 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
1294 			return B_WOULD_BLOCK;
1295 
1296 		sPorts[slot].write_count--;
1297 
1298 		// We need to block in order to wait for a free message slot
1299 		ConditionVariableEntry entry;
1300 		sPorts[slot].write_condition.Add(&entry);
1301 
1302 		locker.Unlock();
1303 
1304 		status = entry.Wait(flags, timeout);
1305 
1306 		locker.Lock();
1307 
1308 		if (sPorts[slot].id != id || is_port_closed(slot)) {
1309 			// the port is no longer there
1310 			T(Write(sPorts[slot], 0, 0, B_BAD_PORT_ID));
1311 			return B_BAD_PORT_ID;
1312 		}
1313 
1314 		if (status != B_OK)
1315 			goto error;
1316 	} else
1317 		sPorts[slot].write_count--;
1318 
1319 	status = get_port_message(msgCode, bufferSize, flags, timeout,
1320 		&message);
1321 	if (status != B_OK)
1322 		goto error;
1323 
1324 	// sender credentials
1325 	message->sender = geteuid();
1326 	message->sender_group = getegid();
1327 	message->sender_team = team_get_current_team_id();
1328 
1329 	if (bufferSize > 0) {
1330 		uint32 i;
1331 		if (userCopy) {
1332 			// copy from user memory
1333 			for (i = 0; i < vecCount; i++) {
1334 				size_t bytes = msgVecs[i].iov_len;
1335 				if (bytes > bufferSize)
1336 					bytes = bufferSize;
1337 
1338 				status_t status = user_memcpy(message->buffer,
1339 					msgVecs[i].iov_base, bytes);
1340 				if (status != B_OK) {
1341 					put_port_message(message);
1342 					goto error;
1343 				}
1344 
1345 				bufferSize -= bytes;
1346 				if (bufferSize == 0)
1347 					break;
1348 			}
1349 		} else {
1350 			// copy from kernel memory
1351 			for (i = 0; i < vecCount; i++) {
1352 				size_t bytes = msgVecs[i].iov_len;
1353 				if (bytes > bufferSize)
1354 					bytes = bufferSize;
1355 
1356 				memcpy(message->buffer, msgVecs[i].iov_base, bytes);
1357 
1358 				bufferSize -= bytes;
1359 				if (bufferSize == 0)
1360 					break;
1361 			}
1362 		}
1363 	}
1364 
1365 	sPorts[slot].messages.Add(message);
1366 	sPorts[slot].read_count++;
1367 
1368 	T(Write(sPorts[slot], message->code, message->size, B_OK));
1369 
1370 	notify_port_select_events(slot, B_EVENT_READ);
1371 	sPorts[slot].read_condition.NotifyOne();
1372 	return B_OK;
1373 
1374 error:
1375 	// Give up our slot in the queue again, and let someone else
1376 	// try and fail
1377 	T(Write(sPorts[slot], 0, 0, status));
1378 	sPorts[slot].write_count++;
1379 	notify_port_select_events(slot, B_EVENT_WRITE);
1380 	sPorts[slot].write_condition.NotifyOne();
1381 
1382 	return status;
1383 }
1384 
1385 
1386 status_t
1387 set_port_owner(port_id id, team_id newTeamID)
1388 {
1389 	TRACE(("set_port_owner(id = %ld, team = %ld)\n", id, newTeamID));
1390 
1391 	if (id < 0)
1392 		return B_BAD_PORT_ID;
1393 
1394 	int32 slot = id % sMaxPorts;
1395 
1396 	MutexLocker locker(sPorts[slot].lock);
1397 
1398 	if (sPorts[slot].id != id) {
1399 		TRACE(("set_port_owner: invalid port_id %ld\n", id));
1400 		return B_BAD_PORT_ID;
1401 	}
1402 
1403 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1404 
1405 	struct team* team = team_get_team_struct_locked(newTeamID);
1406 	if (team == NULL) {
1407 		T(OwnerChange(sPorts[slot], newTeamID, B_BAD_TEAM_ID));
1408 		return B_BAD_TEAM_ID;
1409 	}
1410 
1411 	// transfer ownership to other team
1412 	list_remove_link(&sPorts[slot].team_link);
1413 	list_add_item(&team->port_list, &sPorts[slot].team_link);
1414 	sPorts[slot].owner = newTeamID;
1415 
1416 	T(OwnerChange(sPorts[slot], newTeamID, B_OK));
1417 	return B_OK;
1418 }
1419 
1420 
1421 //	#pragma mark - syscalls
1422 
1423 
1424 port_id
1425 _user_create_port(int32 queueLength, const char *userName)
1426 {
1427 	char name[B_OS_NAME_LENGTH];
1428 
1429 	if (userName == NULL)
1430 		return create_port(queueLength, NULL);
1431 
1432 	if (!IS_USER_ADDRESS(userName)
1433 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1434 		return B_BAD_ADDRESS;
1435 
1436 	return create_port(queueLength, name);
1437 }
1438 
1439 
1440 status_t
1441 _user_close_port(port_id id)
1442 {
1443 	return close_port(id);
1444 }
1445 
1446 
1447 status_t
1448 _user_delete_port(port_id id)
1449 {
1450 	return delete_port(id);
1451 }
1452 
1453 
1454 port_id
1455 _user_find_port(const char *userName)
1456 {
1457 	char name[B_OS_NAME_LENGTH];
1458 
1459 	if (userName == NULL)
1460 		return B_BAD_VALUE;
1461 	if (!IS_USER_ADDRESS(userName)
1462 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1463 		return B_BAD_ADDRESS;
1464 
1465 	return find_port(name);
1466 }
1467 
1468 
1469 status_t
1470 _user_get_port_info(port_id id, struct port_info *userInfo)
1471 {
1472 	struct port_info info;
1473 	status_t status;
1474 
1475 	if (userInfo == NULL)
1476 		return B_BAD_VALUE;
1477 	if (!IS_USER_ADDRESS(userInfo))
1478 		return B_BAD_ADDRESS;
1479 
1480 	status = get_port_info(id, &info);
1481 
1482 	// copy back to user space
1483 	if (status == B_OK
1484 		&& user_memcpy(userInfo, &info, sizeof(struct port_info)) < B_OK)
1485 		return B_BAD_ADDRESS;
1486 
1487 	return status;
1488 }
1489 
1490 
1491 status_t
1492 _user_get_next_port_info(team_id team, int32 *userCookie,
1493 	struct port_info *userInfo)
1494 {
1495 	struct port_info info;
1496 	status_t status;
1497 	int32 cookie;
1498 
1499 	if (userCookie == NULL || userInfo == NULL)
1500 		return B_BAD_VALUE;
1501 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1502 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1503 		return B_BAD_ADDRESS;
1504 
1505 	status = get_next_port_info(team, &cookie, &info);
1506 
1507 	// copy back to user space
1508 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
1509 		|| (status == B_OK && user_memcpy(userInfo, &info,
1510 				sizeof(struct port_info)) < B_OK))
1511 		return B_BAD_ADDRESS;
1512 
1513 	return status;
1514 }
1515 
1516 
1517 ssize_t
1518 _user_port_buffer_size_etc(port_id port, uint32 flags, bigtime_t timeout)
1519 {
1520 	syscall_restart_handle_timeout_pre(flags, timeout);
1521 
1522 	status_t status = port_buffer_size_etc(port, flags | B_CAN_INTERRUPT,
1523 		timeout);
1524 
1525 	return syscall_restart_handle_timeout_post(status, timeout);
1526 }
1527 
1528 
1529 ssize_t
1530 _user_port_count(port_id port)
1531 {
1532 	return port_count(port);
1533 }
1534 
1535 
1536 status_t
1537 _user_set_port_owner(port_id port, team_id team)
1538 {
1539 	return set_port_owner(port, team);
1540 }
1541 
1542 
1543 ssize_t
1544 _user_read_port_etc(port_id port, int32 *userCode, void *userBuffer,
1545 	size_t bufferSize, uint32 flags, bigtime_t timeout)
1546 {
1547 	int32 messageCode;
1548 	ssize_t	bytesRead;
1549 
1550 	syscall_restart_handle_timeout_pre(flags, timeout);
1551 
1552 	if (userBuffer == NULL && bufferSize != 0)
1553 		return B_BAD_VALUE;
1554 	if ((userCode != NULL && !IS_USER_ADDRESS(userCode))
1555 		|| (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer)))
1556 		return B_BAD_ADDRESS;
1557 
1558 	bytesRead = read_port_etc(port, &messageCode, userBuffer, bufferSize,
1559 		flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout);
1560 
1561 	if (bytesRead >= 0 && userCode != NULL
1562 		&& user_memcpy(userCode, &messageCode, sizeof(int32)) < B_OK)
1563 		return B_BAD_ADDRESS;
1564 
1565 	return syscall_restart_handle_timeout_post(bytesRead, timeout);
1566 }
1567 
1568 
1569 status_t
1570 _user_write_port_etc(port_id port, int32 messageCode, const void *userBuffer,
1571 	size_t bufferSize, uint32 flags, bigtime_t timeout)
1572 {
1573 	iovec vec = { (void *)userBuffer, bufferSize };
1574 
1575 	syscall_restart_handle_timeout_pre(flags, timeout);
1576 
1577 	if (userBuffer == NULL && bufferSize != 0)
1578 		return B_BAD_VALUE;
1579 	if (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer))
1580 		return B_BAD_ADDRESS;
1581 
1582 	status_t status = writev_port_etc(port, messageCode, &vec, 1, bufferSize,
1583 		flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout);
1584 
1585 	return syscall_restart_handle_timeout_post(status, timeout);
1586 }
1587 
1588 
1589 status_t
1590 _user_writev_port_etc(port_id port, int32 messageCode, const iovec *userVecs,
1591 	size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout)
1592 {
1593 	syscall_restart_handle_timeout_pre(flags, timeout);
1594 
1595 	if (userVecs == NULL && bufferSize != 0)
1596 		return B_BAD_VALUE;
1597 	if (userVecs != NULL && !IS_USER_ADDRESS(userVecs))
1598 		return B_BAD_ADDRESS;
1599 
1600 	iovec *vecs = NULL;
1601 	if (userVecs && vecCount != 0) {
1602 		vecs = (iovec*)malloc(sizeof(iovec) * vecCount);
1603 		if (vecs == NULL)
1604 			return B_NO_MEMORY;
1605 
1606 		if (user_memcpy(vecs, userVecs, sizeof(iovec) * vecCount) < B_OK) {
1607 			free(vecs);
1608 			return B_BAD_ADDRESS;
1609 		}
1610 	}
1611 
1612 	status_t status = writev_port_etc(port, messageCode, vecs, vecCount,
1613 		bufferSize, flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT,
1614 		timeout);
1615 
1616 	free(vecs);
1617 	return syscall_restart_handle_timeout_post(status, timeout);
1618 }
1619 
1620 
1621 status_t
1622 _user_get_port_message_info_etc(port_id port, port_message_info *userInfo,
1623 	size_t infoSize, uint32 flags, bigtime_t timeout)
1624 {
1625 	if (userInfo == NULL || infoSize != sizeof(port_message_info))
1626 		return B_BAD_VALUE;
1627 
1628 	syscall_restart_handle_timeout_pre(flags, timeout);
1629 
1630 	port_message_info info;
1631 	status_t error = _get_port_message_info_etc(port, &info, sizeof(info),
1632 		flags | B_CAN_INTERRUPT, timeout);
1633 
1634 	// copy info to userland
1635 	if (error == B_OK && (!IS_USER_ADDRESS(userInfo)
1636 			|| user_memcpy(userInfo, &info, sizeof(info)) != B_OK)) {
1637 		error = B_BAD_ADDRESS;
1638 	}
1639 
1640 	return syscall_restart_handle_timeout_post(error, timeout);
1641 }
1642