xref: /haiku/src/system/kernel/port.cpp (revision c9ad965c81b08802fed0827fd1dd16f45297928a)
1 /*
2  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Mark-Jan Bastian. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 
10 /*!	Ports for IPC */
11 
12 
13 #include <port.h>
14 
15 #include <ctype.h>
16 #include <iovec.h>
17 #include <stdlib.h>
18 #include <string.h>
19 
20 #include <OS.h>
21 
22 #include <arch/int.h>
23 #include <heap.h>
24 #include <kernel.h>
25 #include <Notifications.h>
26 #include <sem.h>
27 #include <syscall_restart.h>
28 #include <team.h>
29 #include <tracing.h>
30 #include <util/AutoLock.h>
31 #include <util/list.h>
32 #include <wait_for_objects.h>
33 
34 
35 //#define TRACE_PORTS
36 #ifdef TRACE_PORTS
37 #	define TRACE(x) dprintf x
38 #else
39 #	define TRACE(x)
40 #endif
41 
42 
43 struct port_message : DoublyLinkedListLinkImpl<port_message> {
44 	int32				code;
45 	size_t				size;
46 	uid_t				sender;
47 	gid_t				sender_group;
48 	team_id				sender_team;
49 	char				buffer[0];
50 };
51 
52 typedef DoublyLinkedList<port_message> MessageList;
53 
54 struct port_entry {
55 	struct list_link	team_link;
56 	port_id				id;
57 	team_id				owner;
58 	int32		 		capacity;
59 	mutex				lock;
60 	int32				read_count;
61 	int32				write_count;
62 	ConditionVariable	read_condition;
63 	ConditionVariable	write_condition;
64 	int32				total_count;
65 		// messages read from port since creation
66 	select_info*		select_infos;
67 	MessageList			messages;
68 };
69 
70 class PortNotificationService : public DefaultNotificationService {
71 public:
72 							PortNotificationService();
73 
74 			void			Notify(uint32 opcode, port_id team);
75 };
76 
77 
78 #if PORT_TRACING
79 namespace PortTracing {
80 
81 class Create : public AbstractTraceEntry {
82 public:
83 	Create(port_entry& port)
84 		:
85 		fID(port.id),
86 		fOwner(port.owner),
87 		fCapacity(port.capacity)
88 	{
89 		fName = alloc_tracing_buffer_strcpy(port.lock.name, B_OS_NAME_LENGTH,
90 			false);
91 
92 		Initialized();
93 	}
94 
95 	virtual void AddDump(TraceOutput& out)
96 	{
97 		out.Print("port %ld created, name \"%s\", owner %ld, capacity %ld",
98 			fID, fName, fOwner, fCapacity);
99 	}
100 
101 private:
102 	port_id				fID;
103 	char*				fName;
104 	team_id				fOwner;
105 	int32		 		fCapacity;
106 };
107 
108 
109 class Delete : public AbstractTraceEntry {
110 public:
111 	Delete(port_entry& port)
112 		:
113 		fID(port.id)
114 	{
115 		Initialized();
116 	}
117 
118 	virtual void AddDump(TraceOutput& out)
119 	{
120 		out.Print("port %ld deleted", fID);
121 	}
122 
123 private:
124 	port_id				fID;
125 };
126 
127 
128 class Read : public AbstractTraceEntry {
129 public:
130 	Read(port_entry& port, int32 code, ssize_t result)
131 		:
132 		fID(port.id),
133 		fReadCount(port.read_count),
134 		fWriteCount(port.write_count),
135 		fCode(code),
136 		fResult(result)
137 	{
138 		Initialized();
139 	}
140 
141 	virtual void AddDump(TraceOutput& out)
142 	{
143 		out.Print("port %ld read, read %ld, write %ld, code %lx: %ld",
144 			fID, fReadCount, fWriteCount, fCode, fResult);
145 	}
146 
147 private:
148 	port_id				fID;
149 	int32				fReadCount;
150 	int32				fWriteCount;
151 	int32				fCode;
152 	ssize_t				fResult;
153 };
154 
155 
156 class Write : public AbstractTraceEntry {
157 public:
158 	Write(port_entry& port, int32 code, size_t bufferSize, ssize_t result)
159 		:
160 		fID(port.id),
161 		fReadCount(port.read_count),
162 		fWriteCount(port.write_count),
163 		fCode(code),
164 		fBufferSize(bufferSize),
165 		fResult(result)
166 	{
167 		Initialized();
168 	}
169 
170 	virtual void AddDump(TraceOutput& out)
171 	{
172 		out.Print("port %ld write, read %ld, write %ld, code %lx, size %ld: %ld",
173 			fID, fReadCount, fWriteCount, fCode, fBufferSize, fResult);
174 	}
175 
176 private:
177 	port_id				fID;
178 	int32				fReadCount;
179 	int32				fWriteCount;
180 	int32				fCode;
181 	size_t				fBufferSize;
182 	ssize_t				fResult;
183 };
184 
185 
186 class Info : public AbstractTraceEntry {
187 public:
188 	Info(port_entry& port, int32 code, ssize_t result)
189 		:
190 		fID(port.id),
191 		fReadCount(port.read_count),
192 		fWriteCount(port.write_count),
193 		fCode(code),
194 		fResult(result)
195 	{
196 		Initialized();
197 	}
198 
199 	virtual void AddDump(TraceOutput& out)
200 	{
201 		out.Print("port %ld info, read %ld, write %ld, code %lx: %ld",
202 			fID, fReadCount, fWriteCount, fCode, fResult);
203 	}
204 
205 private:
206 	port_id				fID;
207 	int32				fReadCount;
208 	int32				fWriteCount;
209 	int32				fCode;
210 	ssize_t				fResult;
211 };
212 
213 
214 class OwnerChange : public AbstractTraceEntry {
215 public:
216 	OwnerChange(port_entry& port, team_id newOwner, status_t status)
217 		:
218 		fID(port.id),
219 		fOldOwner(port.owner),
220 		fNewOwner(newOwner),
221 		fStatus(status)
222 	{
223 		Initialized();
224 	}
225 
226 	virtual void AddDump(TraceOutput& out)
227 	{
228 		out.Print("port %ld owner change from %ld to %ld: %s", fID, fOldOwner,
229 			fNewOwner, strerror(fStatus));
230 	}
231 
232 private:
233 	port_id				fID;
234 	team_id				fOldOwner;
235 	team_id				fNewOwner;
236 	status_t	 		fStatus;
237 };
238 
239 }	// namespace PortTracing
240 
241 #	define T(x) new(std::nothrow) PortTracing::x;
242 #else
243 #	define T(x) ;
244 #endif
245 
246 
247 static const size_t kInitialPortBufferSize = 4 * 1024 * 1024;
248 static const size_t kTotalSpaceLimit = 64 * 1024 * 1024;
249 static const size_t kTeamSpaceLimit = 8 * 1024 * 1024;
250 static const size_t kBufferGrowRate = kInitialPortBufferSize;
251 
252 #define MAX_QUEUE_LENGTH 4096
253 #define PORT_MAX_MESSAGE_SIZE (256 * 1024)
254 
255 // sMaxPorts must be power of 2
256 static int32 sMaxPorts = 4096;
257 static int32 sUsedPorts = 0;
258 
259 static struct port_entry* sPorts;
260 static area_id sPortArea;
261 static heap_allocator* sPortAllocator;
262 static ConditionVariable sNoSpaceCondition;
263 static vint32 sTotalSpaceInUse;
264 static vint32 sAreaChangeCounter;
265 static vint32 sAllocatingArea;
266 static bool sPortsActive = false;
267 static port_id sNextPort = 1;
268 static int32 sFirstFreeSlot = 1;
269 static mutex sPortsLock = MUTEX_INITIALIZER("ports list");
270 
271 static PortNotificationService sNotificationService;
272 
273 
274 //	#pragma mark - TeamNotificationService
275 
276 
277 PortNotificationService::PortNotificationService()
278 	:
279 	DefaultNotificationService("ports")
280 {
281 }
282 
283 
284 void
285 PortNotificationService::Notify(uint32 opcode, port_id port)
286 {
287 	char eventBuffer[64];
288 	KMessage event;
289 	event.SetTo(eventBuffer, sizeof(eventBuffer), PORT_MONITOR);
290 	event.AddInt32("event", opcode);
291 	event.AddInt32("port", port);
292 
293 	DefaultNotificationService::Notify(event, opcode);
294 }
295 
296 
297 //	#pragma mark -
298 
299 
300 static int
301 dump_port_list(int argc, char** argv)
302 {
303 	const char* name = NULL;
304 	team_id owner = -1;
305 	int32 i;
306 
307 	if (argc > 2) {
308 		if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
309 			owner = strtoul(argv[2], NULL, 0);
310 		else if (!strcmp(argv[1], "name"))
311 			name = argv[2];
312 	} else if (argc > 1)
313 		owner = strtoul(argv[1], NULL, 0);
314 
315 	kprintf("port             id  cap  read-cnt  write-cnt   total   team  "
316 		"name\n");
317 
318 	for (i = 0; i < sMaxPorts; i++) {
319 		struct port_entry* port = &sPorts[i];
320 		if (port->id < 0
321 			|| (owner != -1 && port->owner != owner)
322 			|| (name != NULL && strstr(port->lock.name, name) == NULL))
323 			continue;
324 
325 		kprintf("%p %8ld %4ld %9ld %9ld %8ld %6ld  %s\n", port,
326 			port->id, port->capacity, port->read_count, port->write_count,
327 			port->total_count, port->owner, port->lock.name);
328 	}
329 
330 	return 0;
331 }
332 
333 
334 static void
335 _dump_port_info(struct port_entry* port)
336 {
337 	kprintf("PORT: %p\n", port);
338 	kprintf(" id:              %ld\n", port->id);
339 	kprintf(" name:            \"%s\"\n", port->lock.name);
340 	kprintf(" owner:           %ld\n", port->owner);
341 	kprintf(" capacity:        %ld\n", port->capacity);
342 	kprintf(" read_count:      %ld\n", port->read_count);
343 	kprintf(" write_count:     %ld\n", port->write_count);
344 	kprintf(" total count:     %ld\n", port->total_count);
345 
346 	if (!port->messages.IsEmpty()) {
347 		kprintf("messages:\n");
348 
349 		MessageList::Iterator iterator = port->messages.GetIterator();
350 		while (port_message* message = iterator.Next()) {
351 			kprintf(" %p  %08lx  %ld\n", message, message->code, message->size);
352 		}
353 	}
354 
355 	set_debug_variable("_port", (addr_t)port);
356 	set_debug_variable("_portID", port->id);
357 	set_debug_variable("_owner", port->owner);
358 }
359 
360 
361 static int
362 dump_port_info(int argc, char** argv)
363 {
364 	ConditionVariable* condition = NULL;
365 	const char* name = NULL;
366 
367 	if (argc < 2) {
368 		print_debugger_command_usage(argv[0]);
369 		return 0;
370 	}
371 
372 	if (argc > 2) {
373 		if (!strcmp(argv[1], "address")) {
374 			_dump_port_info((struct port_entry*)parse_expression(argv[2]));
375 			return 0;
376 		} else if (!strcmp(argv[1], "condition"))
377 			condition = (ConditionVariable*)parse_expression(argv[2]);
378 		else if (!strcmp(argv[1], "name"))
379 			name = argv[2];
380 	} else if (parse_expression(argv[1]) > 0) {
381 		// if the argument looks like a number, treat it as such
382 		int32 num = parse_expression(argv[1]);
383 		int32 slot = num % sMaxPorts;
384 		if (sPorts[slot].id != num) {
385 			kprintf("port %ld (%#lx) doesn't exist!\n", num, num);
386 			return 0;
387 		}
388 		_dump_port_info(&sPorts[slot]);
389 		return 0;
390 	} else
391 		name = argv[1];
392 
393 	// walk through the ports list, trying to match name
394 	for (int32 i = 0; i < sMaxPorts; i++) {
395 		if ((name != NULL && sPorts[i].lock.name != NULL
396 				&& !strcmp(name, sPorts[i].lock.name))
397 			|| (condition != NULL && (&sPorts[i].read_condition == condition
398 				|| &sPorts[i].write_condition == condition))) {
399 			_dump_port_info(&sPorts[i]);
400 			return 0;
401 		}
402 	}
403 
404 	return 0;
405 }
406 
407 
408 static void
409 notify_port_select_events(int slot, uint16 events)
410 {
411 	if (sPorts[slot].select_infos)
412 		notify_select_events_list(sPorts[slot].select_infos, events);
413 }
414 
415 
416 static void
417 put_port_message(port_message* message)
418 {
419 	size_t size = sizeof(port_message) + message->size;
420 	heap_free(sPortAllocator, message);
421 
422 	atomic_add(&sTotalSpaceInUse, -size);
423 	sNoSpaceCondition.NotifyAll();
424 }
425 
426 
427 static status_t
428 get_port_message(int32 code, size_t bufferSize, uint32 flags, bigtime_t timeout,
429 	port_message** _message)
430 {
431 	size_t size = sizeof(port_message) + bufferSize;
432 	bool limitReached = false;
433 
434 	while (true) {
435 		if (atomic_add(&sTotalSpaceInUse, size)
436 				> int32(kTotalSpaceLimit - size)) {
437 			// TODO: add per team limit
438 			// We are not allowed to create another heap area, as our
439 			// space limit has been reached - just wait until we get
440 			// some free space again.
441 			limitReached = true;
442 
443 		wait:
444 			MutexLocker locker(sPortsLock);
445 
446 			atomic_add(&sTotalSpaceInUse, -size);
447 
448 			// TODO: we don't want to wait - but does that also mean we
449 			// shouldn't wait for the area creation?
450 			if (limitReached && (flags & B_RELATIVE_TIMEOUT) != 0
451 				&& timeout <= 0)
452 				return B_WOULD_BLOCK;
453 
454 			ConditionVariableEntry entry;
455 			sNoSpaceCondition.Add(&entry);
456 
457 			locker.Unlock();
458 
459 			status_t status = entry.Wait(flags, timeout);
460 			if (status == B_TIMED_OUT)
461 				return B_TIMED_OUT;
462 
463 			// just try again
464 			limitReached = false;
465 			continue;
466 		}
467 
468 		int32 areaChangeCounter = atomic_get(&sAreaChangeCounter);
469 
470 		// Quota is fulfilled, try to allocate the buffer
471 
472 		port_message* message
473 			= (port_message*)heap_memalign(sPortAllocator, 0, size);
474 		if (message != NULL) {
475 			message->code = code;
476 			message->size = bufferSize;
477 
478 			*_message = message;
479 			return B_OK;
480 		}
481 
482 		if (atomic_or(&sAllocatingArea, 1) != 0) {
483 			// Just wait for someone else to create an area for us
484 			goto wait;
485 		}
486 
487 		if (areaChangeCounter != atomic_get(&sAreaChangeCounter)) {
488 			atomic_add(&sTotalSpaceInUse, -size);
489 			continue;
490 		}
491 
492 		// Create a new area for the heap to use
493 
494 		addr_t base;
495 		area_id area = create_area("port grown buffer", (void**)&base,
496 			B_ANY_KERNEL_ADDRESS, kBufferGrowRate, B_NO_LOCK,
497 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
498 		if (area < 0) {
499 			// it's time to let the userland feel our pain
500 			sNoSpaceCondition.NotifyAll();
501 			return B_NO_MEMORY;
502 		}
503 
504 		heap_add_area(sPortAllocator, area, base, kBufferGrowRate);
505 
506 		atomic_add(&sAreaChangeCounter, 1);
507 		sNoSpaceCondition.NotifyAll();
508 		atomic_and(&sAllocatingArea, 0);
509 	}
510 }
511 
512 
513 /*!	You need to own the port's lock when calling this function */
514 static bool
515 is_port_closed(int32 slot)
516 {
517 	return sPorts[slot].capacity == 0;
518 }
519 
520 
521 /*!	Fills the port_info structure with information from the specified
522 	port.
523 	The port lock must be held when called.
524 */
525 static void
526 fill_port_info(struct port_entry* port, port_info* info, size_t size)
527 {
528 	info->port = port->id;
529 	info->team = port->owner;
530 	info->capacity = port->capacity;
531 
532 	int32 count = port->read_count;
533 	if (count < 0)
534 		count = 0;
535 
536 	info->queue_count = count;
537 	info->total_count = port->total_count;
538 
539 	strlcpy(info->name, port->lock.name, B_OS_NAME_LENGTH);
540 }
541 
542 
543 static ssize_t
544 copy_port_message(port_message* message, int32* _code, void* buffer,
545 	size_t bufferSize, bool userCopy)
546 {
547 	// check output buffer size
548 	size_t size = min_c(bufferSize, message->size);
549 
550 	// copy message
551 	if (_code != NULL)
552 		*_code = message->code;
553 
554 	if (size > 0) {
555 		if (userCopy) {
556 			status_t status = user_memcpy(buffer, message->buffer, size);
557 			if (status != B_OK)
558 				return status;
559 		} else
560 			memcpy(buffer, message->buffer, size);
561 	}
562 
563 	return size;
564 }
565 
566 
567 static void
568 uninit_port_locked(struct port_entry& port)
569 {
570 	int32 id = port.id;
571 
572 	// mark port as invalid
573 	port.id = -1;
574 	free((char*)port.lock.name);
575 	port.lock.name = NULL;
576 
577 	while (port_message* message = port.messages.RemoveHead()) {
578 		put_port_message(message);
579 	}
580 
581 	notify_port_select_events(id % sMaxPorts, B_EVENT_INVALID);
582 	port.select_infos = NULL;
583 
584 	// Release the threads that were blocking on this port.
585 	// read_port() will see the B_BAD_PORT_ID return value, and act accordingly
586 	port.read_condition.NotifyAll(B_BAD_PORT_ID);
587 	port.write_condition.NotifyAll(B_BAD_PORT_ID);
588 	sNotificationService.Notify(PORT_REMOVED, id);
589 }
590 
591 
592 //	#pragma mark - private kernel API
593 
594 
595 /*! This function delets all the ports that are owned by the passed team.
596 */
597 void
598 delete_owned_ports(struct team* team)
599 {
600 	TRACE(("delete_owned_ports(owner = %ld)\n", team->id));
601 
602 	struct list queue;
603 
604 	{
605 		InterruptsSpinLocker locker(gTeamSpinlock);
606 		list_move_to_list(&team->port_list, &queue);
607 	}
608 
609 	int32 firstSlot = sMaxPorts;
610 	int32 count = 0;
611 
612 	while (port_entry* port = (port_entry*)list_remove_head_item(&queue)) {
613 		if (firstSlot > port->id % sMaxPorts)
614 			firstSlot = port->id % sMaxPorts;
615 		count++;
616 
617 		MutexLocker locker(port->lock);
618 		uninit_port_locked(*port);
619 	}
620 
621 	MutexLocker _(sPortsLock);
622 
623 	// update the first free slot hint in the array
624 	if (firstSlot < sFirstFreeSlot)
625 		sFirstFreeSlot = firstSlot;
626 
627 	sUsedPorts -= count;
628 }
629 
630 
631 int32
632 port_max_ports(void)
633 {
634 	return sMaxPorts;
635 }
636 
637 
638 int32
639 port_used_ports(void)
640 {
641 	return sUsedPorts;
642 }
643 
644 
645 status_t
646 port_init(kernel_args *args)
647 {
648 	size_t size = sizeof(struct port_entry) * sMaxPorts;
649 
650 	// create and initialize ports table
651 	sPortArea = create_area("port_table",
652 		(void**)&sPorts, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK,
653 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
654 	if (sPortArea < 0) {
655 		panic("unable to allocate kernel port table!\n");
656 		return sPortArea;
657 	}
658 
659 	memset(sPorts, 0, size);
660 	for (int32 i = 0; i < sMaxPorts; i++) {
661 		mutex_init(&sPorts[i].lock, NULL);
662 		sPorts[i].id = -1;
663 		sPorts[i].read_condition.Init(&sPorts[i], "port read");
664 		sPorts[i].write_condition.Init(&sPorts[i], "port write");
665 	}
666 
667 	addr_t base;
668 	if (create_area("port heap", (void**)&base, B_ANY_KERNEL_ADDRESS,
669 			kInitialPortBufferSize, B_NO_LOCK,
670 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0) {
671 		panic("unable to allocate port area!\n");
672 		return B_ERROR;
673 	}
674 
675 	static const heap_class kBufferHeapClass = {"default", 100,
676 		PORT_MAX_MESSAGE_SIZE + sizeof(port_message), 2 * 1024,
677 		sizeof(port_message), 8, 4, 64};
678 	sPortAllocator = heap_create_allocator("port buffer", base,
679 		kInitialPortBufferSize, &kBufferHeapClass, true);
680 	if (sPortAllocator == NULL) {
681 		panic("unable to create port heap");
682 		return B_NO_MEMORY;
683 	}
684 
685 	sNoSpaceCondition.Init(sPorts, "port space");
686 
687 	// add debugger commands
688 	add_debugger_command_etc("ports", &dump_port_list,
689 		"Dump a list of all active ports (for team, with name, etc.)",
690 		"[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]\n"
691 		"Prints a list of all active ports meeting the given\n"
692 		"requirement. If no argument is given, all ports are listed.\n"
693 		"  <team>             - The team owning the ports.\n"
694 		"  <name>             - Part of the name of the ports.\n", 0);
695 	add_debugger_command_etc("port", &dump_port_info,
696 		"Dump info about a particular port",
697 		"(<id> | [ \"address\" ] <address>) | ([ \"name\" ] <name>) "
698 			"| (\"condition\" <address>)\n"
699 		"Prints info about the specified port.\n"
700 		"  <address>   - Pointer to the port structure.\n"
701 		"  <name>      - Name of the port.\n"
702 		"  <condition> - address of the port's read or write condition.\n", 0);
703 
704 	new(&sNotificationService) PortNotificationService();
705 	sPortsActive = true;
706 	return B_OK;
707 }
708 
709 
710 //	#pragma mark - public kernel API
711 
712 
713 port_id
714 create_port(int32 queueLength, const char* name)
715 {
716 	TRACE(("create_port(queueLength = %ld, name = \"%s\")\n", queueLength,
717 		name));
718 
719 	if (!sPortsActive) {
720 		panic("ports used too early!\n");
721 		return B_BAD_PORT_ID;
722 	}
723 	if (queueLength < 1 || queueLength > MAX_QUEUE_LENGTH)
724 		return B_BAD_VALUE;
725 
726 	struct team* team = thread_get_current_thread()->team;
727 	if (team == NULL)
728 		return B_BAD_TEAM_ID;
729 
730 	MutexLocker locker(sPortsLock);
731 
732 	// check early on if there are any free port slots to use
733 	if (sUsedPorts >= sMaxPorts)
734 		return B_NO_MORE_PORTS;
735 
736 	// check & dup name
737 	char* nameBuffer = strdup(name != NULL ? name : "unnamed port");
738 	if (nameBuffer == NULL)
739 		return B_NO_MEMORY;
740 
741 	sUsedPorts++;
742 
743 	// find the first empty spot
744 	for (int32 slot = 0; slot < sMaxPorts; slot++) {
745 		int32 i = (slot + sFirstFreeSlot) % sMaxPorts;
746 
747 		if (sPorts[i].id == -1) {
748 			// make the port_id be a multiple of the slot it's in
749 			if (i >= sNextPort % sMaxPorts)
750 				sNextPort += i - sNextPort % sMaxPorts;
751 			else
752 				sNextPort += sMaxPorts - (sNextPort % sMaxPorts - i);
753 			sFirstFreeSlot = slot + 1;
754 
755 			MutexLocker portLocker(sPorts[i].lock);
756 			sPorts[i].id = sNextPort++;
757 			locker.Unlock();
758 
759 			sPorts[i].capacity = queueLength;
760 			sPorts[i].owner = team_get_current_team_id();
761 			sPorts[i].lock.name = nameBuffer;
762 			sPorts[i].read_count = 0;
763 			sPorts[i].write_count = queueLength;
764 			sPorts[i].total_count = 0;
765 			sPorts[i].select_infos = NULL;
766 
767 			{
768 				InterruptsSpinLocker teamLocker(gTeamSpinlock);
769 				list_add_item(&team->port_list, &sPorts[i].team_link);
770 			}
771 
772 			port_id id = sPorts[i].id;
773 
774 			T(Create(sPorts[i]));
775 			portLocker.Unlock();
776 
777 			TRACE(("create_port() done: port created %ld\n", id));
778 
779 			sNotificationService.Notify(PORT_ADDED, id);
780 			return id;
781 		}
782 	}
783 
784 	// Still not enough ports... - due to sUsedPorts, this cannot really
785 	// happen anymore.
786 	panic("out of ports, but sUsedPorts is broken");
787 	return B_NO_MORE_PORTS;
788 }
789 
790 
791 status_t
792 close_port(port_id id)
793 {
794 	TRACE(("close_port(id = %ld)\n", id));
795 
796 	if (!sPortsActive || id < 0)
797 		return B_BAD_PORT_ID;
798 
799 	int32 slot = id % sMaxPorts;
800 
801 	// walk through the sem list, trying to match name
802 	MutexLocker locker(sPorts[slot].lock);
803 
804 	if (sPorts[slot].id != id) {
805 		TRACE(("close_port: invalid port_id %ld\n", id));
806 		return B_BAD_PORT_ID;
807 	}
808 
809 	// mark port to disable writing - deleting the semaphores will
810 	// wake up waiting read/writes
811 	sPorts[slot].capacity = 0;
812 
813 	notify_port_select_events(slot, B_EVENT_INVALID);
814 	sPorts[slot].select_infos = NULL;
815 
816 	sPorts[slot].read_condition.NotifyAll(false, B_BAD_PORT_ID);
817 	sPorts[slot].write_condition.NotifyAll(false, B_BAD_PORT_ID);
818 
819 	return B_OK;
820 }
821 
822 
823 status_t
824 delete_port(port_id id)
825 {
826 	TRACE(("delete_port(id = %ld)\n", id));
827 
828 	if (!sPortsActive || id < 0)
829 		return B_BAD_PORT_ID;
830 
831 	int32 slot = id % sMaxPorts;
832 
833 	MutexLocker locker(sPorts[slot].lock);
834 
835 	if (sPorts[slot].id != id) {
836 		TRACE(("delete_port: invalid port_id %ld\n", id));
837 		return B_BAD_PORT_ID;
838 	}
839 
840 	T(Delete(sPorts[slot]));
841 
842 	{
843 		InterruptsSpinLocker teamLocker(gTeamSpinlock);
844 		list_remove_link(&sPorts[slot].team_link);
845 	}
846 
847 	uninit_port_locked(sPorts[slot]);
848 
849 	locker.Unlock();
850 
851 	MutexLocker _(sPortsLock);
852 
853 	// update the first free slot hint in the array
854 	if (slot < sFirstFreeSlot)
855 		sFirstFreeSlot = slot;
856 
857 	sUsedPorts--;
858 	return B_OK;
859 }
860 
861 
862 status_t
863 select_port(int32 id, struct select_info* info, bool kernel)
864 {
865 	if (id < 0)
866 		return B_BAD_PORT_ID;
867 
868 	int32 slot = id % sMaxPorts;
869 
870 	MutexLocker locker(sPorts[slot].lock);
871 
872 	if (sPorts[slot].id != id || is_port_closed(slot))
873 		return B_BAD_PORT_ID;
874 	if (!kernel && sPorts[slot].owner == team_get_kernel_team_id()) {
875 		// kernel port, but call from userland
876 		return B_NOT_ALLOWED;
877 	}
878 
879 	info->selected_events &= B_EVENT_READ | B_EVENT_WRITE | B_EVENT_INVALID;
880 
881 	if (info->selected_events != 0) {
882 		uint16 events = 0;
883 
884 		info->next = sPorts[slot].select_infos;
885 		sPorts[slot].select_infos = info;
886 
887 		// check for events
888 		if ((info->selected_events & B_EVENT_READ) != 0
889 			&& !sPorts[slot].messages.IsEmpty()) {
890 			events |= B_EVENT_READ;
891 		}
892 
893 		if (sPorts[slot].write_count > 0)
894 			events |= B_EVENT_WRITE;
895 
896 		if (events != 0)
897 			notify_select_events(info, events);
898 	}
899 
900 	return B_OK;
901 }
902 
903 
904 status_t
905 deselect_port(int32 id, struct select_info* info, bool kernel)
906 {
907 	if (id < 0)
908 		return B_BAD_PORT_ID;
909 	if (info->selected_events == 0)
910 		return B_OK;
911 
912 	int32 slot = id % sMaxPorts;
913 
914 	MutexLocker locker(sPorts[slot].lock);
915 
916 	if (sPorts[slot].id == id) {
917 		select_info** infoLocation = &sPorts[slot].select_infos;
918 		while (*infoLocation != NULL && *infoLocation != info)
919 			infoLocation = &(*infoLocation)->next;
920 
921 		if (*infoLocation == info)
922 			*infoLocation = info->next;
923 	}
924 
925 	return B_OK;
926 }
927 
928 
929 port_id
930 find_port(const char* name)
931 {
932 	TRACE(("find_port(name = \"%s\")\n", name));
933 
934 	if (!sPortsActive) {
935 		panic("ports used too early!\n");
936 		return B_NAME_NOT_FOUND;
937 	}
938 	if (name == NULL)
939 		return B_BAD_VALUE;
940 
941 	// Since we have to check every single port, and we don't
942 	// care if it goes away at any point, we're only grabbing
943 	// the port lock in question, not the port list lock
944 
945 	// loop over list
946 	for (int32 i = 0; i < sMaxPorts; i++) {
947 		// lock every individual port before comparing
948 		MutexLocker _(sPorts[i].lock);
949 
950 		if (sPorts[i].id >= 0 && !strcmp(name, sPorts[i].lock.name))
951 			return sPorts[i].id;
952 	}
953 
954 	return B_NAME_NOT_FOUND;
955 }
956 
957 
958 status_t
959 _get_port_info(port_id id, port_info* info, size_t size)
960 {
961 	TRACE(("get_port_info(id = %ld)\n", id));
962 
963 	if (info == NULL || size != sizeof(port_info))
964 		return B_BAD_VALUE;
965 	if (!sPortsActive || id < 0)
966 		return B_BAD_PORT_ID;
967 
968 	int32 slot = id % sMaxPorts;
969 
970 	MutexLocker locker(sPorts[slot].lock);
971 
972 	if (sPorts[slot].id != id || sPorts[slot].capacity == 0) {
973 		TRACE(("get_port_info: invalid port_id %ld\n", id));
974 		return B_BAD_PORT_ID;
975 	}
976 
977 	// fill a port_info struct with info
978 	fill_port_info(&sPorts[slot], info, size);
979 	return B_OK;
980 }
981 
982 
983 status_t
984 _get_next_port_info(team_id team, int32* _cookie, struct port_info* info,
985 	size_t size)
986 {
987 	TRACE(("get_next_port_info(team = %ld)\n", team));
988 
989 	if (info == NULL || size != sizeof(port_info) || _cookie == NULL
990 		|| team < B_OK)
991 		return B_BAD_VALUE;
992 	if (!sPortsActive)
993 		return B_BAD_PORT_ID;
994 
995 	int32 slot = *_cookie;
996 	if (slot >= sMaxPorts)
997 		return B_BAD_PORT_ID;
998 
999 	if (team == B_CURRENT_TEAM)
1000 		team = team_get_current_team_id();
1001 
1002 	info->port = -1; // used as found flag
1003 
1004 	while (slot < sMaxPorts) {
1005 		MutexLocker locker(sPorts[slot].lock);
1006 
1007 		if (sPorts[slot].id != -1 && !is_port_closed(slot)
1008 			&& sPorts[slot].owner == team) {
1009 			// found one!
1010 			fill_port_info(&sPorts[slot], info, size);
1011 			slot++;
1012 			break;
1013 		}
1014 
1015 		slot++;
1016 	}
1017 
1018 	if (info->port == -1)
1019 		return B_BAD_PORT_ID;
1020 
1021 	*_cookie = slot;
1022 	return B_OK;
1023 }
1024 
1025 
1026 ssize_t
1027 port_buffer_size(port_id id)
1028 {
1029 	return port_buffer_size_etc(id, 0, 0);
1030 }
1031 
1032 
1033 ssize_t
1034 port_buffer_size_etc(port_id id, uint32 flags, bigtime_t timeout)
1035 {
1036 	port_message_info info;
1037 	status_t error = get_port_message_info_etc(id, &info, flags, timeout);
1038 	return error != B_OK ? error : info.size;
1039 }
1040 
1041 
1042 status_t
1043 _get_port_message_info_etc(port_id id, port_message_info* info,
1044 	size_t infoSize, uint32 flags, bigtime_t timeout)
1045 {
1046 	if (info == NULL || infoSize != sizeof(port_message_info))
1047 		return B_BAD_VALUE;
1048 	if (!sPortsActive || id < 0)
1049 		return B_BAD_PORT_ID;
1050 
1051 	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
1052 		| B_ABSOLUTE_TIMEOUT;
1053 	int32 slot = id % sMaxPorts;
1054 
1055 	MutexLocker locker(sPorts[slot].lock);
1056 
1057 	if (sPorts[slot].id != id
1058 		|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
1059 		T(Info(sPorts[slot], 0, B_BAD_PORT_ID));
1060 		TRACE(("port_buffer_size_etc(): %s port %ld\n",
1061 			sPorts[slot].id == id ? "closed" : "invalid", id));
1062 		return B_BAD_PORT_ID;
1063 	}
1064 
1065 	if (sPorts[slot].read_count <= 0) {
1066 		// We need to wait for a message to appear
1067 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
1068 			return B_WOULD_BLOCK;
1069 
1070 		ConditionVariableEntry entry;
1071 		sPorts[slot].read_condition.Add(&entry);
1072 
1073 		locker.Unlock();
1074 
1075 		// block if no message, or, if B_TIMEOUT flag set, block with timeout
1076 		status_t status = entry.Wait(flags, timeout);
1077 		if (status == B_OK && entry.WaitStatus() != B_OK)
1078 			status = entry.WaitStatus();
1079 
1080 		if (status != B_OK) {
1081 			T(Info(sPorts[slot], 0, status));
1082 			return status;
1083 		}
1084 
1085 		locker.Lock();
1086 	}
1087 
1088 	if (sPorts[slot].id != id) {
1089 		// the port is no longer there
1090 		return B_BAD_PORT_ID;
1091 	}
1092 
1093 	// determine tail & get the length of the message
1094 	port_message* message = sPorts[slot].messages.Head();
1095 	if (message == NULL) {
1096 		panic("port %ld: no messages found\n", sPorts[slot].id);
1097 		return B_ERROR;
1098 	}
1099 
1100 	info->size = message->size;
1101 	info->sender = message->sender;
1102 	info->sender_group = message->sender_group;
1103 	info->sender_team = message->sender_team;
1104 
1105 	T(Info(sPorts[slot], message->code, B_OK));
1106 
1107 	// notify next one, as we haven't read from the port
1108 	sPorts[slot].read_condition.NotifyOne();
1109 
1110 	return B_OK;
1111 }
1112 
1113 
1114 ssize_t
1115 port_count(port_id id)
1116 {
1117 	if (!sPortsActive || id < 0)
1118 		return B_BAD_PORT_ID;
1119 
1120 	int32 slot = id % sMaxPorts;
1121 
1122 	MutexLocker locker(sPorts[slot].lock);
1123 
1124 	if (sPorts[slot].id != id) {
1125 		TRACE(("port_count: invalid port_id %ld\n", id));
1126 		return B_BAD_PORT_ID;
1127 	}
1128 
1129 	int32 count = sPorts[slot].read_count;
1130 	// do not return negative numbers
1131 	if (count < 0)
1132 		count = 0;
1133 
1134 	// return count of messages
1135 	return count;
1136 }
1137 
1138 
1139 ssize_t
1140 read_port(port_id port, int32* msgCode, void* buffer, size_t bufferSize)
1141 {
1142 	return read_port_etc(port, msgCode, buffer, bufferSize, 0, 0);
1143 }
1144 
1145 
1146 ssize_t
1147 read_port_etc(port_id id, int32* _code, void* buffer, size_t bufferSize,
1148 	uint32 flags, bigtime_t timeout)
1149 {
1150 	if (!sPortsActive || id < 0)
1151 		return B_BAD_PORT_ID;
1152 	if ((buffer == NULL && bufferSize > 0) || timeout < 0)
1153 		return B_BAD_VALUE;
1154 
1155 	bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) != 0;
1156 	bool peekOnly = !userCopy && (flags & B_PEEK_PORT_MESSAGE) != 0;
1157 		// TODO: we could allow peeking for user apps now
1158 
1159 	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
1160 		| B_ABSOLUTE_TIMEOUT;
1161 
1162 	int32 slot = id % sMaxPorts;
1163 
1164 	MutexLocker locker(sPorts[slot].lock);
1165 
1166 	if (sPorts[slot].id != id
1167 		|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
1168 		T(Read(sPorts[slot], 0, B_BAD_PORT_ID));
1169 		TRACE(("read_port_etc(): %s port %ld\n",
1170 			sPorts[slot].id == id ? "closed" : "invalid", id));
1171 		return B_BAD_PORT_ID;
1172 	}
1173 
1174 	if (sPorts[slot].read_count <= 0) {
1175 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
1176 			return B_WOULD_BLOCK;
1177 
1178 		sPorts[slot].read_count--;
1179 
1180 		// We need to wait for a message to appear
1181 		ConditionVariableEntry entry;
1182 		sPorts[slot].read_condition.Add(&entry);
1183 
1184 		locker.Unlock();
1185 
1186 		// block if no message, or, if B_TIMEOUT flag set, block with timeout
1187 		status_t status = entry.Wait(flags, timeout);
1188 
1189 		locker.Lock();
1190 
1191 		if (sPorts[slot].id != id) {
1192 			// the port is no longer there
1193 			T(Read(sPorts[slot], 0, B_BAD_PORT_ID));
1194 			return B_BAD_PORT_ID;
1195 		}
1196 
1197 		if (status != B_OK || entry.WaitStatus() != B_OK) {
1198 			T(Read(sPorts[slot], 0,
1199 				status != B_OK ? status : entry.WaitStatus()));
1200 			sPorts[slot].read_count++;
1201 			return status != B_OK ? status : entry.WaitStatus();
1202 		}
1203 	} else
1204 		sPorts[slot].read_count--;
1205 
1206 	// determine tail & get the length of the message
1207 	port_message* message = sPorts[slot].messages.Head();
1208 	if (message == NULL) {
1209 		panic("port %ld: no messages found\n", sPorts[slot].id);
1210 		return B_ERROR;
1211 	}
1212 
1213 	if (peekOnly) {
1214 		size_t size = copy_port_message(message, _code, buffer, bufferSize,
1215 			userCopy);
1216 
1217 		T(Read(sPorts[slot], message->code, size));
1218 
1219 		sPorts[slot].read_count++;
1220 		sPorts[slot].read_condition.NotifyOne();
1221 			// we only peeked, but didn't grab the message
1222 		return size;
1223 	}
1224 
1225 	sPorts[slot].messages.RemoveHead();
1226 	sPorts[slot].total_count++;
1227 	sPorts[slot].write_count++;
1228 
1229 	notify_port_select_events(slot, B_EVENT_WRITE);
1230 	sPorts[slot].write_condition.NotifyOne();
1231 		// make one spot in queue available again for write
1232 
1233 	locker.Unlock();
1234 
1235 	size_t size = copy_port_message(message, _code, buffer, bufferSize,
1236 		userCopy);
1237 	T(Read(sPorts[slot], message->code, size));
1238 
1239 	put_port_message(message);
1240 	return size;
1241 }
1242 
1243 
1244 status_t
1245 write_port(port_id id, int32 msgCode, const void* buffer, size_t bufferSize)
1246 {
1247 	iovec vec = { (void*)buffer, bufferSize };
1248 
1249 	return writev_port_etc(id, msgCode, &vec, 1, bufferSize, 0, 0);
1250 }
1251 
1252 
1253 status_t
1254 write_port_etc(port_id id, int32 msgCode, const void* buffer,
1255 	size_t bufferSize, uint32 flags, bigtime_t timeout)
1256 {
1257 	iovec vec = { (void*)buffer, bufferSize };
1258 
1259 	return writev_port_etc(id, msgCode, &vec, 1, bufferSize, flags, timeout);
1260 }
1261 
1262 
1263 status_t
1264 writev_port_etc(port_id id, int32 msgCode, const iovec* msgVecs,
1265 	size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout)
1266 {
1267 	if (!sPortsActive || id < 0)
1268 		return B_BAD_PORT_ID;
1269 	if (bufferSize > PORT_MAX_MESSAGE_SIZE)
1270 		return B_BAD_VALUE;
1271 
1272 	// mask irrelevant flags (for acquire_sem() usage)
1273 	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
1274 		| B_ABSOLUTE_TIMEOUT;
1275 	if ((flags & B_RELATIVE_TIMEOUT) != 0
1276 		&& timeout != B_INFINITE_TIMEOUT && timeout > 0) {
1277 		// Make the timeout absolute, since we have more than one step where
1278 		// we might have to wait
1279 		flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
1280 		timeout += system_time();
1281 	}
1282 
1283 	bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) > 0;
1284 
1285 	int32 slot = id % sMaxPorts;
1286 	status_t status;
1287 
1288 	MutexLocker locker(sPorts[slot].lock);
1289 
1290 	if (sPorts[slot].id != id) {
1291 		TRACE(("write_port_etc: invalid port_id %ld\n", id));
1292 		return B_BAD_PORT_ID;
1293 	}
1294 	if (is_port_closed(slot)) {
1295 		TRACE(("write_port_etc: port %ld closed\n", id));
1296 		return B_BAD_PORT_ID;
1297 	}
1298 
1299 	if (sPorts[slot].write_count <= 0) {
1300 		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
1301 			return B_WOULD_BLOCK;
1302 
1303 		sPorts[slot].write_count--;
1304 
1305 		// We need to block in order to wait for a free message slot
1306 		ConditionVariableEntry entry;
1307 		sPorts[slot].write_condition.Add(&entry);
1308 
1309 		locker.Unlock();
1310 
1311 		status = entry.Wait(flags, timeout);
1312 
1313 		locker.Lock();
1314 
1315 		if (sPorts[slot].id != id) {
1316 			// the port is no longer there
1317 			T(Write(sPorts[slot], 0, 0, B_BAD_PORT_ID));
1318 			return B_BAD_PORT_ID;
1319 		}
1320 
1321 		if (status != B_OK || entry.WaitStatus() != B_OK) {
1322 			if (status == B_OK)
1323 				status = entry.WaitStatus();
1324 			goto error;
1325 		}
1326 	} else
1327 		sPorts[slot].write_count--;
1328 
1329 	port_message* message;
1330 	status = get_port_message(msgCode, bufferSize, flags, timeout,
1331 		&message);
1332 	if (status != B_OK)
1333 		goto error;
1334 
1335 	// sender credentials
1336 	message->sender = geteuid();
1337 	message->sender_group = getegid();
1338 	message->sender_team = team_get_current_team_id();
1339 
1340 	if (bufferSize > 0) {
1341 		uint32 i;
1342 		if (userCopy) {
1343 			// copy from user memory
1344 			for (i = 0; i < vecCount; i++) {
1345 				size_t bytes = msgVecs[i].iov_len;
1346 				if (bytes > bufferSize)
1347 					bytes = bufferSize;
1348 
1349 				status_t status = user_memcpy(message->buffer,
1350 					msgVecs[i].iov_base, bytes);
1351 				if (status != B_OK) {
1352 					put_port_message(message);
1353 					goto error;
1354 				}
1355 
1356 				bufferSize -= bytes;
1357 				if (bufferSize == 0)
1358 					break;
1359 			}
1360 		} else {
1361 			// copy from kernel memory
1362 			for (i = 0; i < vecCount; i++) {
1363 				size_t bytes = msgVecs[i].iov_len;
1364 				if (bytes > bufferSize)
1365 					bytes = bufferSize;
1366 
1367 				memcpy(message->buffer, msgVecs[i].iov_base, bytes);
1368 
1369 				bufferSize -= bytes;
1370 				if (bufferSize == 0)
1371 					break;
1372 			}
1373 		}
1374 	}
1375 
1376 	sPorts[slot].messages.Add(message);
1377 	sPorts[slot].read_count++;
1378 
1379 	T(Write(sPorts[slot], message->code, message->size, B_OK));
1380 
1381 	notify_port_select_events(slot, B_EVENT_READ);
1382 	sPorts[slot].read_condition.NotifyOne();
1383 	return B_OK;
1384 
1385 error:
1386 	// Give up our slot in the queue again, and let someone else
1387 	// try and fail
1388 	T(Write(sPorts[slot], 0, 0, status));
1389 	sPorts[slot].write_count++;
1390 	notify_port_select_events(slot, B_EVENT_WRITE);
1391 	sPorts[slot].write_condition.NotifyOne();
1392 
1393 	return status;
1394 }
1395 
1396 
1397 status_t
1398 set_port_owner(port_id id, team_id newTeamID)
1399 {
1400 	TRACE(("set_port_owner(id = %ld, team = %ld)\n", id, newTeamID));
1401 
1402 	if (id < 0)
1403 		return B_BAD_PORT_ID;
1404 
1405 	int32 slot = id % sMaxPorts;
1406 
1407 	MutexLocker locker(sPorts[slot].lock);
1408 
1409 	if (sPorts[slot].id != id) {
1410 		TRACE(("set_port_owner: invalid port_id %ld\n", id));
1411 		return B_BAD_PORT_ID;
1412 	}
1413 
1414 	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1415 
1416 	struct team* team = team_get_team_struct_locked(newTeamID);
1417 	if (team == NULL) {
1418 		T(OwnerChange(sPorts[slot], newTeamID, B_BAD_TEAM_ID));
1419 		return B_BAD_TEAM_ID;
1420 	}
1421 
1422 	// transfer ownership to other team
1423 	list_remove_link(&sPorts[slot].team_link);
1424 	list_add_item(&team->port_list, &sPorts[slot].team_link);
1425 	sPorts[slot].owner = newTeamID;
1426 
1427 	T(OwnerChange(sPorts[slot], newTeamID, B_OK));
1428 	return B_OK;
1429 }
1430 
1431 
1432 //	#pragma mark - syscalls
1433 
1434 
1435 port_id
1436 _user_create_port(int32 queueLength, const char *userName)
1437 {
1438 	char name[B_OS_NAME_LENGTH];
1439 
1440 	if (userName == NULL)
1441 		return create_port(queueLength, NULL);
1442 
1443 	if (!IS_USER_ADDRESS(userName)
1444 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1445 		return B_BAD_ADDRESS;
1446 
1447 	return create_port(queueLength, name);
1448 }
1449 
1450 
1451 status_t
1452 _user_close_port(port_id id)
1453 {
1454 	return close_port(id);
1455 }
1456 
1457 
1458 status_t
1459 _user_delete_port(port_id id)
1460 {
1461 	return delete_port(id);
1462 }
1463 
1464 
1465 port_id
1466 _user_find_port(const char *userName)
1467 {
1468 	char name[B_OS_NAME_LENGTH];
1469 
1470 	if (userName == NULL)
1471 		return B_BAD_VALUE;
1472 	if (!IS_USER_ADDRESS(userName)
1473 		|| user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1474 		return B_BAD_ADDRESS;
1475 
1476 	return find_port(name);
1477 }
1478 
1479 
1480 status_t
1481 _user_get_port_info(port_id id, struct port_info *userInfo)
1482 {
1483 	struct port_info info;
1484 	status_t status;
1485 
1486 	if (userInfo == NULL)
1487 		return B_BAD_VALUE;
1488 	if (!IS_USER_ADDRESS(userInfo))
1489 		return B_BAD_ADDRESS;
1490 
1491 	status = get_port_info(id, &info);
1492 
1493 	// copy back to user space
1494 	if (status == B_OK
1495 		&& user_memcpy(userInfo, &info, sizeof(struct port_info)) < B_OK)
1496 		return B_BAD_ADDRESS;
1497 
1498 	return status;
1499 }
1500 
1501 
1502 status_t
1503 _user_get_next_port_info(team_id team, int32 *userCookie,
1504 	struct port_info *userInfo)
1505 {
1506 	struct port_info info;
1507 	status_t status;
1508 	int32 cookie;
1509 
1510 	if (userCookie == NULL || userInfo == NULL)
1511 		return B_BAD_VALUE;
1512 	if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1513 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1514 		return B_BAD_ADDRESS;
1515 
1516 	status = get_next_port_info(team, &cookie, &info);
1517 
1518 	// copy back to user space
1519 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
1520 		|| (status == B_OK && user_memcpy(userInfo, &info,
1521 				sizeof(struct port_info)) < B_OK))
1522 		return B_BAD_ADDRESS;
1523 
1524 	return status;
1525 }
1526 
1527 
1528 ssize_t
1529 _user_port_buffer_size_etc(port_id port, uint32 flags, bigtime_t timeout)
1530 {
1531 	syscall_restart_handle_timeout_pre(flags, timeout);
1532 
1533 	status_t status = port_buffer_size_etc(port, flags | B_CAN_INTERRUPT,
1534 		timeout);
1535 
1536 	return syscall_restart_handle_timeout_post(status, timeout);
1537 }
1538 
1539 
1540 ssize_t
1541 _user_port_count(port_id port)
1542 {
1543 	return port_count(port);
1544 }
1545 
1546 
1547 status_t
1548 _user_set_port_owner(port_id port, team_id team)
1549 {
1550 	return set_port_owner(port, team);
1551 }
1552 
1553 
1554 ssize_t
1555 _user_read_port_etc(port_id port, int32 *userCode, void *userBuffer,
1556 	size_t bufferSize, uint32 flags, bigtime_t timeout)
1557 {
1558 	int32 messageCode;
1559 	ssize_t	bytesRead;
1560 
1561 	syscall_restart_handle_timeout_pre(flags, timeout);
1562 
1563 	if (userBuffer == NULL && bufferSize != 0)
1564 		return B_BAD_VALUE;
1565 	if ((userCode != NULL && !IS_USER_ADDRESS(userCode))
1566 		|| (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer)))
1567 		return B_BAD_ADDRESS;
1568 
1569 	bytesRead = read_port_etc(port, &messageCode, userBuffer, bufferSize,
1570 		flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout);
1571 
1572 	if (bytesRead >= 0 && userCode != NULL
1573 		&& user_memcpy(userCode, &messageCode, sizeof(int32)) < B_OK)
1574 		return B_BAD_ADDRESS;
1575 
1576 	return syscall_restart_handle_timeout_post(bytesRead, timeout);
1577 }
1578 
1579 
1580 status_t
1581 _user_write_port_etc(port_id port, int32 messageCode, const void *userBuffer,
1582 	size_t bufferSize, uint32 flags, bigtime_t timeout)
1583 {
1584 	iovec vec = { (void *)userBuffer, bufferSize };
1585 
1586 	syscall_restart_handle_timeout_pre(flags, timeout);
1587 
1588 	if (userBuffer == NULL && bufferSize != 0)
1589 		return B_BAD_VALUE;
1590 	if (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer))
1591 		return B_BAD_ADDRESS;
1592 
1593 	status_t status = writev_port_etc(port, messageCode, &vec, 1, bufferSize,
1594 		flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout);
1595 
1596 	return syscall_restart_handle_timeout_post(status, timeout);
1597 }
1598 
1599 
1600 status_t
1601 _user_writev_port_etc(port_id port, int32 messageCode, const iovec *userVecs,
1602 	size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout)
1603 {
1604 	syscall_restart_handle_timeout_pre(flags, timeout);
1605 
1606 	if (userVecs == NULL && bufferSize != 0)
1607 		return B_BAD_VALUE;
1608 	if (userVecs != NULL && !IS_USER_ADDRESS(userVecs))
1609 		return B_BAD_ADDRESS;
1610 
1611 	iovec *vecs = NULL;
1612 	if (userVecs && vecCount != 0) {
1613 		vecs = (iovec*)malloc(sizeof(iovec) * vecCount);
1614 		if (vecs == NULL)
1615 			return B_NO_MEMORY;
1616 
1617 		if (user_memcpy(vecs, userVecs, sizeof(iovec) * vecCount) < B_OK) {
1618 			free(vecs);
1619 			return B_BAD_ADDRESS;
1620 		}
1621 	}
1622 
1623 	status_t status = writev_port_etc(port, messageCode, vecs, vecCount,
1624 		bufferSize, flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT,
1625 		timeout);
1626 
1627 	free(vecs);
1628 	return syscall_restart_handle_timeout_post(status, timeout);
1629 }
1630 
1631 
1632 status_t
1633 _user_get_port_message_info_etc(port_id port, port_message_info *userInfo,
1634 	size_t infoSize, uint32 flags, bigtime_t timeout)
1635 {
1636 	if (userInfo == NULL || infoSize != sizeof(port_message_info))
1637 		return B_BAD_VALUE;
1638 
1639 	syscall_restart_handle_timeout_pre(flags, timeout);
1640 
1641 	port_message_info info;
1642 	status_t error = _get_port_message_info_etc(port, &info, sizeof(info),
1643 		flags | B_CAN_INTERRUPT, timeout);
1644 
1645 	// copy info to userland
1646 	if (error == B_OK && (!IS_USER_ADDRESS(userInfo)
1647 			|| user_memcpy(userInfo, &info, sizeof(info)) != B_OK)) {
1648 		error = B_BAD_ADDRESS;
1649 	}
1650 
1651 	return syscall_restart_handle_timeout_post(error, timeout);
1652 }
1653