1 /* 2 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001, Mark-Jan Bastian. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9 10 /*! Ports for IPC */ 11 12 13 #include <port.h> 14 15 #include <ctype.h> 16 #include <iovec.h> 17 #include <stdlib.h> 18 #include <string.h> 19 20 #include <OS.h> 21 22 #include <arch/int.h> 23 #include <heap.h> 24 #include <kernel.h> 25 #include <Notifications.h> 26 #include <sem.h> 27 #include <syscall_restart.h> 28 #include <team.h> 29 #include <tracing.h> 30 #include <util/AutoLock.h> 31 #include <util/list.h> 32 #include <vm/vm.h> 33 #include <wait_for_objects.h> 34 35 36 //#define TRACE_PORTS 37 #ifdef TRACE_PORTS 38 # define TRACE(x) dprintf x 39 #else 40 # define TRACE(x) 41 #endif 42 43 44 struct port_message : DoublyLinkedListLinkImpl<port_message> { 45 int32 code; 46 size_t size; 47 uid_t sender; 48 gid_t sender_group; 49 team_id sender_team; 50 char buffer[0]; 51 }; 52 53 typedef DoublyLinkedList<port_message> MessageList; 54 55 struct port_entry { 56 struct list_link team_link; 57 port_id id; 58 team_id owner; 59 int32 capacity; 60 mutex lock; 61 uint32 read_count; 62 int32 write_count; 63 ConditionVariable read_condition; 64 ConditionVariable write_condition; 65 int32 total_count; 66 // messages read from port since creation 67 select_info* select_infos; 68 MessageList messages; 69 }; 70 71 class PortNotificationService : public DefaultNotificationService { 72 public: 73 PortNotificationService(); 74 75 void Notify(uint32 opcode, port_id team); 76 }; 77 78 79 #if PORT_TRACING 80 namespace PortTracing { 81 82 class Create : public AbstractTraceEntry { 83 public: 84 Create(port_entry& port) 85 : 86 fID(port.id), 87 fOwner(port.owner), 88 fCapacity(port.capacity) 89 { 90 fName = alloc_tracing_buffer_strcpy(port.lock.name, B_OS_NAME_LENGTH, 91 false); 92 93 Initialized(); 94 } 95 96 virtual void AddDump(TraceOutput& out) 97 { 98 out.Print("port %ld created, name \"%s\", owner %ld, capacity %ld", 99 fID, fName, fOwner, fCapacity); 100 } 101 102 private: 103 port_id fID; 104 char* fName; 105 team_id fOwner; 106 int32 fCapacity; 107 }; 108 109 110 class Delete : public AbstractTraceEntry { 111 public: 112 Delete(port_entry& port) 113 : 114 fID(port.id) 115 { 116 Initialized(); 117 } 118 119 virtual void AddDump(TraceOutput& out) 120 { 121 out.Print("port %ld deleted", fID); 122 } 123 124 private: 125 port_id fID; 126 }; 127 128 129 class Read : public AbstractTraceEntry { 130 public: 131 Read(port_entry& port, int32 code, ssize_t result) 132 : 133 fID(port.id), 134 fReadCount(port.read_count), 135 fWriteCount(port.write_count), 136 fCode(code), 137 fResult(result) 138 { 139 Initialized(); 140 } 141 142 virtual void AddDump(TraceOutput& out) 143 { 144 out.Print("port %ld read, read %ld, write %ld, code %lx: %ld", 145 fID, fReadCount, fWriteCount, fCode, fResult); 146 } 147 148 private: 149 port_id fID; 150 int32 fReadCount; 151 int32 fWriteCount; 152 int32 fCode; 153 ssize_t fResult; 154 }; 155 156 157 class Write : public AbstractTraceEntry { 158 public: 159 Write(port_entry& port, int32 code, size_t bufferSize, ssize_t result) 160 : 161 fID(port.id), 162 fReadCount(port.read_count), 163 fWriteCount(port.write_count), 164 fCode(code), 165 fBufferSize(bufferSize), 166 fResult(result) 167 { 168 Initialized(); 169 } 170 171 virtual void AddDump(TraceOutput& out) 172 { 173 out.Print("port %ld write, read %ld, write %ld, code %lx, size %ld: %ld", 174 fID, fReadCount, fWriteCount, fCode, fBufferSize, fResult); 175 } 176 177 private: 178 port_id fID; 179 int32 fReadCount; 180 int32 fWriteCount; 181 int32 fCode; 182 size_t fBufferSize; 183 ssize_t fResult; 184 }; 185 186 187 class Info : public AbstractTraceEntry { 188 public: 189 Info(port_entry& port, int32 code, ssize_t result) 190 : 191 fID(port.id), 192 fReadCount(port.read_count), 193 fWriteCount(port.write_count), 194 fCode(code), 195 fResult(result) 196 { 197 Initialized(); 198 } 199 200 virtual void AddDump(TraceOutput& out) 201 { 202 out.Print("port %ld info, read %ld, write %ld, code %lx: %ld", 203 fID, fReadCount, fWriteCount, fCode, fResult); 204 } 205 206 private: 207 port_id fID; 208 int32 fReadCount; 209 int32 fWriteCount; 210 int32 fCode; 211 ssize_t fResult; 212 }; 213 214 215 class OwnerChange : public AbstractTraceEntry { 216 public: 217 OwnerChange(port_entry& port, team_id newOwner, status_t status) 218 : 219 fID(port.id), 220 fOldOwner(port.owner), 221 fNewOwner(newOwner), 222 fStatus(status) 223 { 224 Initialized(); 225 } 226 227 virtual void AddDump(TraceOutput& out) 228 { 229 out.Print("port %ld owner change from %ld to %ld: %s", fID, fOldOwner, 230 fNewOwner, strerror(fStatus)); 231 } 232 233 private: 234 port_id fID; 235 team_id fOldOwner; 236 team_id fNewOwner; 237 status_t fStatus; 238 }; 239 240 } // namespace PortTracing 241 242 # define T(x) new(std::nothrow) PortTracing::x; 243 #else 244 # define T(x) ; 245 #endif 246 247 248 static const size_t kInitialPortBufferSize = 4 * 1024 * 1024; 249 static const size_t kTotalSpaceLimit = 64 * 1024 * 1024; 250 static const size_t kTeamSpaceLimit = 8 * 1024 * 1024; 251 static const size_t kBufferGrowRate = kInitialPortBufferSize; 252 253 #define MAX_QUEUE_LENGTH 4096 254 #define PORT_MAX_MESSAGE_SIZE (256 * 1024) 255 256 // sMaxPorts must be power of 2 257 static int32 sMaxPorts = 4096; 258 static int32 sUsedPorts = 0; 259 260 static struct port_entry* sPorts; 261 static area_id sPortArea; 262 static heap_allocator* sPortAllocator; 263 static ConditionVariable sNoSpaceCondition; 264 static vint32 sTotalSpaceInUse; 265 static vint32 sAreaChangeCounter; 266 static vint32 sAllocatingArea; 267 static bool sPortsActive = false; 268 static port_id sNextPort = 1; 269 static int32 sFirstFreeSlot = 1; 270 static mutex sPortsLock = MUTEX_INITIALIZER("ports list"); 271 272 static PortNotificationService sNotificationService; 273 274 275 // #pragma mark - TeamNotificationService 276 277 278 PortNotificationService::PortNotificationService() 279 : 280 DefaultNotificationService("ports") 281 { 282 } 283 284 285 void 286 PortNotificationService::Notify(uint32 opcode, port_id port) 287 { 288 char eventBuffer[64]; 289 KMessage event; 290 event.SetTo(eventBuffer, sizeof(eventBuffer), PORT_MONITOR); 291 event.AddInt32("event", opcode); 292 event.AddInt32("port", port); 293 294 DefaultNotificationService::Notify(event, opcode); 295 } 296 297 298 // #pragma mark - 299 300 301 static int 302 dump_port_list(int argc, char** argv) 303 { 304 const char* name = NULL; 305 team_id owner = -1; 306 int32 i; 307 308 if (argc > 2) { 309 if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner")) 310 owner = strtoul(argv[2], NULL, 0); 311 else if (!strcmp(argv[1], "name")) 312 name = argv[2]; 313 } else if (argc > 1) 314 owner = strtoul(argv[1], NULL, 0); 315 316 kprintf("port id cap read-cnt write-cnt total team " 317 "name\n"); 318 319 for (i = 0; i < sMaxPorts; i++) { 320 struct port_entry* port = &sPorts[i]; 321 if (port->id < 0 322 || (owner != -1 && port->owner != owner) 323 || (name != NULL && strstr(port->lock.name, name) == NULL)) 324 continue; 325 326 kprintf("%p %8ld %4ld %9ld %9ld %8ld %6ld %s\n", port, 327 port->id, port->capacity, port->read_count, port->write_count, 328 port->total_count, port->owner, port->lock.name); 329 } 330 331 return 0; 332 } 333 334 335 static void 336 _dump_port_info(struct port_entry* port) 337 { 338 kprintf("PORT: %p\n", port); 339 kprintf(" id: %ld\n", port->id); 340 kprintf(" name: \"%s\"\n", port->lock.name); 341 kprintf(" owner: %ld\n", port->owner); 342 kprintf(" capacity: %ld\n", port->capacity); 343 kprintf(" read_count: %ld\n", port->read_count); 344 kprintf(" write_count: %ld\n", port->write_count); 345 kprintf(" total count: %ld\n", port->total_count); 346 347 if (!port->messages.IsEmpty()) { 348 kprintf("messages:\n"); 349 350 MessageList::Iterator iterator = port->messages.GetIterator(); 351 while (port_message* message = iterator.Next()) { 352 kprintf(" %p %08lx %ld\n", message, message->code, message->size); 353 } 354 } 355 356 set_debug_variable("_port", (addr_t)port); 357 set_debug_variable("_portID", port->id); 358 set_debug_variable("_owner", port->owner); 359 } 360 361 362 static int 363 dump_port_info(int argc, char** argv) 364 { 365 ConditionVariable* condition = NULL; 366 const char* name = NULL; 367 368 if (argc < 2) { 369 print_debugger_command_usage(argv[0]); 370 return 0; 371 } 372 373 if (argc > 2) { 374 if (!strcmp(argv[1], "address")) { 375 _dump_port_info((struct port_entry*)parse_expression(argv[2])); 376 return 0; 377 } else if (!strcmp(argv[1], "condition")) 378 condition = (ConditionVariable*)parse_expression(argv[2]); 379 else if (!strcmp(argv[1], "name")) 380 name = argv[2]; 381 } else if (parse_expression(argv[1]) > 0) { 382 // if the argument looks like a number, treat it as such 383 int32 num = parse_expression(argv[1]); 384 int32 slot = num % sMaxPorts; 385 if (sPorts[slot].id != num) { 386 kprintf("port %ld (%#lx) doesn't exist!\n", num, num); 387 return 0; 388 } 389 _dump_port_info(&sPorts[slot]); 390 return 0; 391 } else 392 name = argv[1]; 393 394 // walk through the ports list, trying to match name 395 for (int32 i = 0; i < sMaxPorts; i++) { 396 if ((name != NULL && sPorts[i].lock.name != NULL 397 && !strcmp(name, sPorts[i].lock.name)) 398 || (condition != NULL && (&sPorts[i].read_condition == condition 399 || &sPorts[i].write_condition == condition))) { 400 _dump_port_info(&sPorts[i]); 401 return 0; 402 } 403 } 404 405 return 0; 406 } 407 408 409 static void 410 notify_port_select_events(int slot, uint16 events) 411 { 412 if (sPorts[slot].select_infos) 413 notify_select_events_list(sPorts[slot].select_infos, events); 414 } 415 416 417 static void 418 put_port_message(port_message* message) 419 { 420 size_t size = sizeof(port_message) + message->size; 421 heap_free(sPortAllocator, message); 422 423 atomic_add(&sTotalSpaceInUse, -size); 424 sNoSpaceCondition.NotifyAll(); 425 } 426 427 428 static status_t 429 get_port_message(int32 code, size_t bufferSize, uint32 flags, bigtime_t timeout, 430 port_message** _message) 431 { 432 size_t size = sizeof(port_message) + bufferSize; 433 bool limitReached = false; 434 435 while (true) { 436 if (atomic_add(&sTotalSpaceInUse, size) 437 > int32(kTotalSpaceLimit - size)) { 438 // TODO: add per team limit 439 // We are not allowed to create another heap area, as our 440 // space limit has been reached - just wait until we get 441 // some free space again. 442 limitReached = true; 443 444 wait: 445 MutexLocker locker(sPortsLock); 446 447 atomic_add(&sTotalSpaceInUse, -size); 448 449 // TODO: we don't want to wait - but does that also mean we 450 // shouldn't wait for the area creation? 451 if (limitReached && (flags & B_RELATIVE_TIMEOUT) != 0 452 && timeout <= 0) 453 return B_WOULD_BLOCK; 454 455 ConditionVariableEntry entry; 456 sNoSpaceCondition.Add(&entry); 457 458 locker.Unlock(); 459 460 status_t status = entry.Wait(flags, timeout); 461 if (status == B_TIMED_OUT) 462 return B_TIMED_OUT; 463 464 // just try again 465 limitReached = false; 466 continue; 467 } 468 469 int32 areaChangeCounter = atomic_get(&sAreaChangeCounter); 470 471 // Quota is fulfilled, try to allocate the buffer 472 473 port_message* message 474 = (port_message*)heap_memalign(sPortAllocator, 0, size); 475 if (message != NULL) { 476 message->code = code; 477 message->size = bufferSize; 478 479 *_message = message; 480 return B_OK; 481 } 482 483 if (atomic_or(&sAllocatingArea, 1) != 0) { 484 // Just wait for someone else to create an area for us 485 goto wait; 486 } 487 488 if (areaChangeCounter != atomic_get(&sAreaChangeCounter)) { 489 atomic_add(&sTotalSpaceInUse, -size); 490 continue; 491 } 492 493 // Create a new area for the heap to use 494 495 addr_t base; 496 area_id area = create_area("port grown buffer", (void**)&base, 497 B_ANY_KERNEL_ADDRESS, kBufferGrowRate, B_NO_LOCK, 498 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 499 if (area < 0) { 500 // it's time to let the userland feel our pain 501 sNoSpaceCondition.NotifyAll(); 502 return B_NO_MEMORY; 503 } 504 505 heap_add_area(sPortAllocator, area, base, kBufferGrowRate); 506 507 atomic_add(&sAreaChangeCounter, 1); 508 sNoSpaceCondition.NotifyAll(); 509 atomic_and(&sAllocatingArea, 0); 510 } 511 } 512 513 514 /*! You need to own the port's lock when calling this function */ 515 static bool 516 is_port_closed(int32 slot) 517 { 518 return sPorts[slot].capacity == 0; 519 } 520 521 522 /*! Fills the port_info structure with information from the specified 523 port. 524 The port lock must be held when called. 525 */ 526 static void 527 fill_port_info(struct port_entry* port, port_info* info, size_t size) 528 { 529 info->port = port->id; 530 info->team = port->owner; 531 info->capacity = port->capacity; 532 533 info->queue_count = port->read_count; 534 info->total_count = port->total_count; 535 536 strlcpy(info->name, port->lock.name, B_OS_NAME_LENGTH); 537 } 538 539 540 static ssize_t 541 copy_port_message(port_message* message, int32* _code, void* buffer, 542 size_t bufferSize, bool userCopy) 543 { 544 // check output buffer size 545 size_t size = min_c(bufferSize, message->size); 546 547 // copy message 548 if (_code != NULL) 549 *_code = message->code; 550 551 if (size > 0) { 552 if (userCopy) { 553 status_t status = user_memcpy(buffer, message->buffer, size); 554 if (status != B_OK) 555 return status; 556 } else 557 memcpy(buffer, message->buffer, size); 558 } 559 560 return size; 561 } 562 563 564 static void 565 uninit_port_locked(struct port_entry& port) 566 { 567 int32 id = port.id; 568 569 // mark port as invalid 570 port.id = -1; 571 free((char*)port.lock.name); 572 port.lock.name = NULL; 573 574 while (port_message* message = port.messages.RemoveHead()) { 575 put_port_message(message); 576 } 577 578 notify_port_select_events(id % sMaxPorts, B_EVENT_INVALID); 579 port.select_infos = NULL; 580 581 // Release the threads that were blocking on this port. 582 // read_port() will see the B_BAD_PORT_ID return value, and act accordingly 583 port.read_condition.NotifyAll(false, B_BAD_PORT_ID); 584 port.write_condition.NotifyAll(false, B_BAD_PORT_ID); 585 sNotificationService.Notify(PORT_REMOVED, id); 586 } 587 588 589 // #pragma mark - private kernel API 590 591 592 /*! This function delets all the ports that are owned by the passed team. 593 */ 594 void 595 delete_owned_ports(Team* team) 596 { 597 TRACE(("delete_owned_ports(owner = %ld)\n", team->id)); 598 599 struct list queue; 600 601 { 602 InterruptsSpinLocker locker(gTeamSpinlock); 603 list_move_to_list(&team->port_list, &queue); 604 } 605 606 int32 firstSlot = sMaxPorts; 607 int32 count = 0; 608 609 while (port_entry* port = (port_entry*)list_remove_head_item(&queue)) { 610 if (firstSlot > port->id % sMaxPorts) 611 firstSlot = port->id % sMaxPorts; 612 count++; 613 614 MutexLocker locker(port->lock); 615 uninit_port_locked(*port); 616 } 617 618 MutexLocker _(sPortsLock); 619 620 // update the first free slot hint in the array 621 if (firstSlot < sFirstFreeSlot) 622 sFirstFreeSlot = firstSlot; 623 624 sUsedPorts -= count; 625 } 626 627 628 int32 629 port_max_ports(void) 630 { 631 return sMaxPorts; 632 } 633 634 635 int32 636 port_used_ports(void) 637 { 638 return sUsedPorts; 639 } 640 641 642 status_t 643 port_init(kernel_args *args) 644 { 645 size_t size = sizeof(struct port_entry) * sMaxPorts; 646 647 // create and initialize ports table 648 virtual_address_restrictions virtualRestrictions = {}; 649 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; 650 physical_address_restrictions physicalRestrictions = {}; 651 sPortArea = create_area_etc(B_SYSTEM_TEAM, "port_table", size, B_FULL_LOCK, 652 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 653 &virtualRestrictions, &physicalRestrictions, (void**)&sPorts); 654 if (sPortArea < 0) { 655 panic("unable to allocate kernel port table!\n"); 656 return sPortArea; 657 } 658 659 memset(sPorts, 0, size); 660 for (int32 i = 0; i < sMaxPorts; i++) { 661 mutex_init(&sPorts[i].lock, NULL); 662 sPorts[i].id = -1; 663 sPorts[i].read_condition.Init(&sPorts[i], "port read"); 664 sPorts[i].write_condition.Init(&sPorts[i], "port write"); 665 } 666 667 addr_t base; 668 if (create_area("port heap", (void**)&base, B_ANY_KERNEL_ADDRESS, 669 kInitialPortBufferSize, B_NO_LOCK, 670 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0) { 671 // TODO: Since port_init() is invoked before the boot partition is 672 // mounted, the underlying VMAnonymousCache cannot commit swap space 673 // upon creation and thus the pages aren't swappable after all. This 674 // makes the area essentially B_LAZY_LOCK with additional overhead. 675 panic("unable to allocate port area!\n"); 676 return B_ERROR; 677 } 678 679 static const heap_class kBufferHeapClass = {"default", 100, 680 PORT_MAX_MESSAGE_SIZE + sizeof(port_message), 2 * 1024, 681 sizeof(port_message), 8, 4, 64}; 682 sPortAllocator = heap_create_allocator("port buffer", base, 683 kInitialPortBufferSize, &kBufferHeapClass, true); 684 if (sPortAllocator == NULL) { 685 panic("unable to create port heap"); 686 return B_NO_MEMORY; 687 } 688 689 sNoSpaceCondition.Init(sPorts, "port space"); 690 691 // add debugger commands 692 add_debugger_command_etc("ports", &dump_port_list, 693 "Dump a list of all active ports (for team, with name, etc.)", 694 "[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]\n" 695 "Prints a list of all active ports meeting the given\n" 696 "requirement. If no argument is given, all ports are listed.\n" 697 " <team> - The team owning the ports.\n" 698 " <name> - Part of the name of the ports.\n", 0); 699 add_debugger_command_etc("port", &dump_port_info, 700 "Dump info about a particular port", 701 "(<id> | [ \"address\" ] <address>) | ([ \"name\" ] <name>) " 702 "| (\"condition\" <address>)\n" 703 "Prints info about the specified port.\n" 704 " <address> - Pointer to the port structure.\n" 705 " <name> - Name of the port.\n" 706 " <condition> - address of the port's read or write condition.\n", 0); 707 708 new(&sNotificationService) PortNotificationService(); 709 sPortsActive = true; 710 return B_OK; 711 } 712 713 714 // #pragma mark - public kernel API 715 716 717 port_id 718 create_port(int32 queueLength, const char* name) 719 { 720 TRACE(("create_port(queueLength = %ld, name = \"%s\")\n", queueLength, 721 name)); 722 723 if (!sPortsActive) { 724 panic("ports used too early!\n"); 725 return B_BAD_PORT_ID; 726 } 727 if (queueLength < 1 || queueLength > MAX_QUEUE_LENGTH) 728 return B_BAD_VALUE; 729 730 Team* team = thread_get_current_thread()->team; 731 if (team == NULL) 732 return B_BAD_TEAM_ID; 733 734 MutexLocker locker(sPortsLock); 735 736 // check early on if there are any free port slots to use 737 if (sUsedPorts >= sMaxPorts) 738 return B_NO_MORE_PORTS; 739 740 // check & dup name 741 char* nameBuffer = strdup(name != NULL ? name : "unnamed port"); 742 if (nameBuffer == NULL) 743 return B_NO_MEMORY; 744 745 sUsedPorts++; 746 747 // find the first empty spot 748 for (int32 slot = 0; slot < sMaxPorts; slot++) { 749 int32 i = (slot + sFirstFreeSlot) % sMaxPorts; 750 751 if (sPorts[i].id == -1) { 752 // make the port_id be a multiple of the slot it's in 753 if (i >= sNextPort % sMaxPorts) 754 sNextPort += i - sNextPort % sMaxPorts; 755 else 756 sNextPort += sMaxPorts - (sNextPort % sMaxPorts - i); 757 sFirstFreeSlot = slot + 1; 758 759 MutexLocker portLocker(sPorts[i].lock); 760 sPorts[i].id = sNextPort++; 761 locker.Unlock(); 762 763 sPorts[i].capacity = queueLength; 764 sPorts[i].owner = team_get_current_team_id(); 765 sPorts[i].lock.name = nameBuffer; 766 sPorts[i].read_count = 0; 767 sPorts[i].write_count = queueLength; 768 sPorts[i].total_count = 0; 769 sPorts[i].select_infos = NULL; 770 771 { 772 InterruptsSpinLocker teamLocker(gTeamSpinlock); 773 list_add_item(&team->port_list, &sPorts[i].team_link); 774 } 775 776 port_id id = sPorts[i].id; 777 778 T(Create(sPorts[i])); 779 portLocker.Unlock(); 780 781 TRACE(("create_port() done: port created %ld\n", id)); 782 783 sNotificationService.Notify(PORT_ADDED, id); 784 return id; 785 } 786 } 787 788 // Still not enough ports... - due to sUsedPorts, this cannot really 789 // happen anymore. 790 panic("out of ports, but sUsedPorts is broken"); 791 return B_NO_MORE_PORTS; 792 } 793 794 795 status_t 796 close_port(port_id id) 797 { 798 TRACE(("close_port(id = %ld)\n", id)); 799 800 if (!sPortsActive || id < 0) 801 return B_BAD_PORT_ID; 802 803 int32 slot = id % sMaxPorts; 804 805 // walk through the sem list, trying to match name 806 MutexLocker locker(sPorts[slot].lock); 807 808 if (sPorts[slot].id != id) { 809 TRACE(("close_port: invalid port_id %ld\n", id)); 810 return B_BAD_PORT_ID; 811 } 812 813 // mark port to disable writing - deleting the semaphores will 814 // wake up waiting read/writes 815 sPorts[slot].capacity = 0; 816 817 notify_port_select_events(slot, B_EVENT_INVALID); 818 sPorts[slot].select_infos = NULL; 819 820 sPorts[slot].read_condition.NotifyAll(false, B_BAD_PORT_ID); 821 sPorts[slot].write_condition.NotifyAll(false, B_BAD_PORT_ID); 822 823 return B_OK; 824 } 825 826 827 status_t 828 delete_port(port_id id) 829 { 830 TRACE(("delete_port(id = %ld)\n", id)); 831 832 if (!sPortsActive || id < 0) 833 return B_BAD_PORT_ID; 834 835 int32 slot = id % sMaxPorts; 836 837 MutexLocker locker(sPorts[slot].lock); 838 839 if (sPorts[slot].id != id) { 840 TRACE(("delete_port: invalid port_id %ld\n", id)); 841 return B_BAD_PORT_ID; 842 } 843 844 T(Delete(sPorts[slot])); 845 846 { 847 InterruptsSpinLocker teamLocker(gTeamSpinlock); 848 list_remove_link(&sPorts[slot].team_link); 849 } 850 851 uninit_port_locked(sPorts[slot]); 852 853 locker.Unlock(); 854 855 MutexLocker _(sPortsLock); 856 857 // update the first free slot hint in the array 858 if (slot < sFirstFreeSlot) 859 sFirstFreeSlot = slot; 860 861 sUsedPorts--; 862 return B_OK; 863 } 864 865 866 status_t 867 select_port(int32 id, struct select_info* info, bool kernel) 868 { 869 if (id < 0) 870 return B_BAD_PORT_ID; 871 872 int32 slot = id % sMaxPorts; 873 874 MutexLocker locker(sPorts[slot].lock); 875 876 if (sPorts[slot].id != id || is_port_closed(slot)) 877 return B_BAD_PORT_ID; 878 if (!kernel && sPorts[slot].owner == team_get_kernel_team_id()) { 879 // kernel port, but call from userland 880 return B_NOT_ALLOWED; 881 } 882 883 info->selected_events &= B_EVENT_READ | B_EVENT_WRITE | B_EVENT_INVALID; 884 885 if (info->selected_events != 0) { 886 uint16 events = 0; 887 888 info->next = sPorts[slot].select_infos; 889 sPorts[slot].select_infos = info; 890 891 // check for events 892 if ((info->selected_events & B_EVENT_READ) != 0 893 && !sPorts[slot].messages.IsEmpty()) { 894 events |= B_EVENT_READ; 895 } 896 897 if (sPorts[slot].write_count > 0) 898 events |= B_EVENT_WRITE; 899 900 if (events != 0) 901 notify_select_events(info, events); 902 } 903 904 return B_OK; 905 } 906 907 908 status_t 909 deselect_port(int32 id, struct select_info* info, bool kernel) 910 { 911 if (id < 0) 912 return B_BAD_PORT_ID; 913 if (info->selected_events == 0) 914 return B_OK; 915 916 int32 slot = id % sMaxPorts; 917 918 MutexLocker locker(sPorts[slot].lock); 919 920 if (sPorts[slot].id == id) { 921 select_info** infoLocation = &sPorts[slot].select_infos; 922 while (*infoLocation != NULL && *infoLocation != info) 923 infoLocation = &(*infoLocation)->next; 924 925 if (*infoLocation == info) 926 *infoLocation = info->next; 927 } 928 929 return B_OK; 930 } 931 932 933 port_id 934 find_port(const char* name) 935 { 936 TRACE(("find_port(name = \"%s\")\n", name)); 937 938 if (!sPortsActive) { 939 panic("ports used too early!\n"); 940 return B_NAME_NOT_FOUND; 941 } 942 if (name == NULL) 943 return B_BAD_VALUE; 944 945 // Since we have to check every single port, and we don't 946 // care if it goes away at any point, we're only grabbing 947 // the port lock in question, not the port list lock 948 949 // loop over list 950 for (int32 i = 0; i < sMaxPorts; i++) { 951 // lock every individual port before comparing 952 MutexLocker _(sPorts[i].lock); 953 954 if (sPorts[i].id >= 0 && !strcmp(name, sPorts[i].lock.name)) 955 return sPorts[i].id; 956 } 957 958 return B_NAME_NOT_FOUND; 959 } 960 961 962 status_t 963 _get_port_info(port_id id, port_info* info, size_t size) 964 { 965 TRACE(("get_port_info(id = %ld)\n", id)); 966 967 if (info == NULL || size != sizeof(port_info)) 968 return B_BAD_VALUE; 969 if (!sPortsActive || id < 0) 970 return B_BAD_PORT_ID; 971 972 int32 slot = id % sMaxPorts; 973 974 MutexLocker locker(sPorts[slot].lock); 975 976 if (sPorts[slot].id != id || sPorts[slot].capacity == 0) { 977 TRACE(("get_port_info: invalid port_id %ld\n", id)); 978 return B_BAD_PORT_ID; 979 } 980 981 // fill a port_info struct with info 982 fill_port_info(&sPorts[slot], info, size); 983 return B_OK; 984 } 985 986 987 status_t 988 _get_next_port_info(team_id team, int32* _cookie, struct port_info* info, 989 size_t size) 990 { 991 TRACE(("get_next_port_info(team = %ld)\n", team)); 992 993 if (info == NULL || size != sizeof(port_info) || _cookie == NULL 994 || team < B_OK) 995 return B_BAD_VALUE; 996 if (!sPortsActive) 997 return B_BAD_PORT_ID; 998 999 int32 slot = *_cookie; 1000 if (slot >= sMaxPorts) 1001 return B_BAD_PORT_ID; 1002 1003 if (team == B_CURRENT_TEAM) 1004 team = team_get_current_team_id(); 1005 1006 info->port = -1; // used as found flag 1007 1008 while (slot < sMaxPorts) { 1009 MutexLocker locker(sPorts[slot].lock); 1010 1011 if (sPorts[slot].id != -1 && !is_port_closed(slot) 1012 && sPorts[slot].owner == team) { 1013 // found one! 1014 fill_port_info(&sPorts[slot], info, size); 1015 slot++; 1016 break; 1017 } 1018 1019 slot++; 1020 } 1021 1022 if (info->port == -1) 1023 return B_BAD_PORT_ID; 1024 1025 *_cookie = slot; 1026 return B_OK; 1027 } 1028 1029 1030 ssize_t 1031 port_buffer_size(port_id id) 1032 { 1033 return port_buffer_size_etc(id, 0, 0); 1034 } 1035 1036 1037 ssize_t 1038 port_buffer_size_etc(port_id id, uint32 flags, bigtime_t timeout) 1039 { 1040 port_message_info info; 1041 status_t error = get_port_message_info_etc(id, &info, flags, timeout); 1042 return error != B_OK ? error : info.size; 1043 } 1044 1045 1046 status_t 1047 _get_port_message_info_etc(port_id id, port_message_info* info, 1048 size_t infoSize, uint32 flags, bigtime_t timeout) 1049 { 1050 if (info == NULL || infoSize != sizeof(port_message_info)) 1051 return B_BAD_VALUE; 1052 if (!sPortsActive || id < 0) 1053 return B_BAD_PORT_ID; 1054 1055 flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT 1056 | B_ABSOLUTE_TIMEOUT; 1057 int32 slot = id % sMaxPorts; 1058 1059 MutexLocker locker(sPorts[slot].lock); 1060 1061 if (sPorts[slot].id != id 1062 || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) { 1063 T(Info(sPorts[slot], 0, B_BAD_PORT_ID)); 1064 TRACE(("_get_port_message_info_etc(): %s port %ld\n", 1065 sPorts[slot].id == id ? "closed" : "invalid", id)); 1066 return B_BAD_PORT_ID; 1067 } 1068 1069 while (sPorts[slot].read_count == 0) { 1070 // We need to wait for a message to appear 1071 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) 1072 return B_WOULD_BLOCK; 1073 1074 ConditionVariableEntry entry; 1075 sPorts[slot].read_condition.Add(&entry); 1076 1077 locker.Unlock(); 1078 1079 // block if no message, or, if B_TIMEOUT flag set, block with timeout 1080 status_t status = entry.Wait(flags, timeout); 1081 1082 if (status != B_OK) { 1083 T(Info(sPorts[slot], 0, status)); 1084 return status; 1085 } 1086 1087 locker.Lock(); 1088 1089 if (sPorts[slot].id != id 1090 || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) { 1091 // the port is no longer there 1092 T(Info(sPorts[slot], 0, B_BAD_PORT_ID)); 1093 return B_BAD_PORT_ID; 1094 } 1095 } 1096 1097 // determine tail & get the length of the message 1098 port_message* message = sPorts[slot].messages.Head(); 1099 if (message == NULL) { 1100 panic("port %ld: no messages found\n", sPorts[slot].id); 1101 return B_ERROR; 1102 } 1103 1104 info->size = message->size; 1105 info->sender = message->sender; 1106 info->sender_group = message->sender_group; 1107 info->sender_team = message->sender_team; 1108 1109 T(Info(sPorts[slot], message->code, B_OK)); 1110 1111 // notify next one, as we haven't read from the port 1112 sPorts[slot].read_condition.NotifyOne(); 1113 1114 return B_OK; 1115 } 1116 1117 1118 ssize_t 1119 port_count(port_id id) 1120 { 1121 if (!sPortsActive || id < 0) 1122 return B_BAD_PORT_ID; 1123 1124 int32 slot = id % sMaxPorts; 1125 1126 MutexLocker locker(sPorts[slot].lock); 1127 1128 if (sPorts[slot].id != id) { 1129 TRACE(("port_count: invalid port_id %ld\n", id)); 1130 return B_BAD_PORT_ID; 1131 } 1132 1133 // return count of messages 1134 return sPorts[slot].read_count; 1135 } 1136 1137 1138 ssize_t 1139 read_port(port_id port, int32* msgCode, void* buffer, size_t bufferSize) 1140 { 1141 return read_port_etc(port, msgCode, buffer, bufferSize, 0, 0); 1142 } 1143 1144 1145 ssize_t 1146 read_port_etc(port_id id, int32* _code, void* buffer, size_t bufferSize, 1147 uint32 flags, bigtime_t timeout) 1148 { 1149 if (!sPortsActive || id < 0) 1150 return B_BAD_PORT_ID; 1151 if ((buffer == NULL && bufferSize > 0) || timeout < 0) 1152 return B_BAD_VALUE; 1153 1154 bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) != 0; 1155 bool peekOnly = !userCopy && (flags & B_PEEK_PORT_MESSAGE) != 0; 1156 // TODO: we could allow peeking for user apps now 1157 1158 flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT 1159 | B_ABSOLUTE_TIMEOUT; 1160 1161 int32 slot = id % sMaxPorts; 1162 1163 MutexLocker locker(sPorts[slot].lock); 1164 1165 if (sPorts[slot].id != id 1166 || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) { 1167 T(Read(sPorts[slot], 0, B_BAD_PORT_ID)); 1168 TRACE(("read_port_etc(): %s port %ld\n", 1169 sPorts[slot].id == id ? "closed" : "invalid", id)); 1170 return B_BAD_PORT_ID; 1171 } 1172 1173 while (sPorts[slot].read_count == 0) { 1174 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) 1175 return B_WOULD_BLOCK; 1176 1177 // We need to wait for a message to appear 1178 ConditionVariableEntry entry; 1179 sPorts[slot].read_condition.Add(&entry); 1180 1181 locker.Unlock(); 1182 1183 // block if no message, or, if B_TIMEOUT flag set, block with timeout 1184 status_t status = entry.Wait(flags, timeout); 1185 1186 locker.Lock(); 1187 1188 if (sPorts[slot].id != id 1189 || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) { 1190 // the port is no longer there 1191 T(Read(sPorts[slot], 0, B_BAD_PORT_ID)); 1192 return B_BAD_PORT_ID; 1193 } 1194 1195 if (status != B_OK) { 1196 T(Read(sPorts[slot], 0, status)); 1197 return status; 1198 } 1199 } 1200 1201 // determine tail & get the length of the message 1202 port_message* message = sPorts[slot].messages.Head(); 1203 if (message == NULL) { 1204 panic("port %ld: no messages found\n", sPorts[slot].id); 1205 return B_ERROR; 1206 } 1207 1208 if (peekOnly) { 1209 size_t size = copy_port_message(message, _code, buffer, bufferSize, 1210 userCopy); 1211 1212 T(Read(sPorts[slot], message->code, size)); 1213 1214 sPorts[slot].read_condition.NotifyOne(); 1215 // we only peeked, but didn't grab the message 1216 return size; 1217 } 1218 1219 sPorts[slot].messages.RemoveHead(); 1220 sPorts[slot].total_count++; 1221 sPorts[slot].write_count++; 1222 sPorts[slot].read_count--; 1223 1224 notify_port_select_events(slot, B_EVENT_WRITE); 1225 sPorts[slot].write_condition.NotifyOne(); 1226 // make one spot in queue available again for write 1227 1228 locker.Unlock(); 1229 1230 size_t size = copy_port_message(message, _code, buffer, bufferSize, 1231 userCopy); 1232 T(Read(sPorts[slot], message->code, size)); 1233 1234 put_port_message(message); 1235 return size; 1236 } 1237 1238 1239 status_t 1240 write_port(port_id id, int32 msgCode, const void* buffer, size_t bufferSize) 1241 { 1242 iovec vec = { (void*)buffer, bufferSize }; 1243 1244 return writev_port_etc(id, msgCode, &vec, 1, bufferSize, 0, 0); 1245 } 1246 1247 1248 status_t 1249 write_port_etc(port_id id, int32 msgCode, const void* buffer, 1250 size_t bufferSize, uint32 flags, bigtime_t timeout) 1251 { 1252 iovec vec = { (void*)buffer, bufferSize }; 1253 1254 return writev_port_etc(id, msgCode, &vec, 1, bufferSize, flags, timeout); 1255 } 1256 1257 1258 status_t 1259 writev_port_etc(port_id id, int32 msgCode, const iovec* msgVecs, 1260 size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout) 1261 { 1262 if (!sPortsActive || id < 0) 1263 return B_BAD_PORT_ID; 1264 if (bufferSize > PORT_MAX_MESSAGE_SIZE) 1265 return B_BAD_VALUE; 1266 1267 // mask irrelevant flags (for acquire_sem() usage) 1268 flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT 1269 | B_ABSOLUTE_TIMEOUT; 1270 if ((flags & B_RELATIVE_TIMEOUT) != 0 1271 && timeout != B_INFINITE_TIMEOUT && timeout > 0) { 1272 // Make the timeout absolute, since we have more than one step where 1273 // we might have to wait 1274 flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT; 1275 timeout += system_time(); 1276 } 1277 1278 bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) > 0; 1279 1280 int32 slot = id % sMaxPorts; 1281 status_t status; 1282 port_message* message = NULL; 1283 1284 MutexLocker locker(sPorts[slot].lock); 1285 1286 if (sPorts[slot].id != id) { 1287 TRACE(("write_port_etc: invalid port_id %ld\n", id)); 1288 return B_BAD_PORT_ID; 1289 } 1290 if (is_port_closed(slot)) { 1291 TRACE(("write_port_etc: port %ld closed\n", id)); 1292 return B_BAD_PORT_ID; 1293 } 1294 1295 if (sPorts[slot].write_count <= 0) { 1296 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) 1297 return B_WOULD_BLOCK; 1298 1299 sPorts[slot].write_count--; 1300 1301 // We need to block in order to wait for a free message slot 1302 ConditionVariableEntry entry; 1303 sPorts[slot].write_condition.Add(&entry); 1304 1305 locker.Unlock(); 1306 1307 status = entry.Wait(flags, timeout); 1308 1309 locker.Lock(); 1310 1311 if (sPorts[slot].id != id || is_port_closed(slot)) { 1312 // the port is no longer there 1313 T(Write(sPorts[slot], 0, 0, B_BAD_PORT_ID)); 1314 return B_BAD_PORT_ID; 1315 } 1316 1317 if (status != B_OK) 1318 goto error; 1319 } else 1320 sPorts[slot].write_count--; 1321 1322 status = get_port_message(msgCode, bufferSize, flags, timeout, 1323 &message); 1324 if (status != B_OK) 1325 goto error; 1326 1327 // sender credentials 1328 message->sender = geteuid(); 1329 message->sender_group = getegid(); 1330 message->sender_team = team_get_current_team_id(); 1331 1332 if (bufferSize > 0) { 1333 uint32 i; 1334 if (userCopy) { 1335 // copy from user memory 1336 for (i = 0; i < vecCount; i++) { 1337 size_t bytes = msgVecs[i].iov_len; 1338 if (bytes > bufferSize) 1339 bytes = bufferSize; 1340 1341 status_t status = user_memcpy(message->buffer, 1342 msgVecs[i].iov_base, bytes); 1343 if (status != B_OK) { 1344 put_port_message(message); 1345 goto error; 1346 } 1347 1348 bufferSize -= bytes; 1349 if (bufferSize == 0) 1350 break; 1351 } 1352 } else { 1353 // copy from kernel memory 1354 for (i = 0; i < vecCount; i++) { 1355 size_t bytes = msgVecs[i].iov_len; 1356 if (bytes > bufferSize) 1357 bytes = bufferSize; 1358 1359 memcpy(message->buffer, msgVecs[i].iov_base, bytes); 1360 1361 bufferSize -= bytes; 1362 if (bufferSize == 0) 1363 break; 1364 } 1365 } 1366 } 1367 1368 sPorts[slot].messages.Add(message); 1369 sPorts[slot].read_count++; 1370 1371 T(Write(sPorts[slot], message->code, message->size, B_OK)); 1372 1373 notify_port_select_events(slot, B_EVENT_READ); 1374 sPorts[slot].read_condition.NotifyOne(); 1375 return B_OK; 1376 1377 error: 1378 // Give up our slot in the queue again, and let someone else 1379 // try and fail 1380 T(Write(sPorts[slot], 0, 0, status)); 1381 sPorts[slot].write_count++; 1382 notify_port_select_events(slot, B_EVENT_WRITE); 1383 sPorts[slot].write_condition.NotifyOne(); 1384 1385 return status; 1386 } 1387 1388 1389 status_t 1390 set_port_owner(port_id id, team_id newTeamID) 1391 { 1392 TRACE(("set_port_owner(id = %ld, team = %ld)\n", id, newTeamID)); 1393 1394 if (id < 0) 1395 return B_BAD_PORT_ID; 1396 1397 int32 slot = id % sMaxPorts; 1398 1399 MutexLocker locker(sPorts[slot].lock); 1400 1401 if (sPorts[slot].id != id) { 1402 TRACE(("set_port_owner: invalid port_id %ld\n", id)); 1403 return B_BAD_PORT_ID; 1404 } 1405 1406 InterruptsSpinLocker teamLocker(gTeamSpinlock); 1407 1408 Team* team = team_get_team_struct_locked(newTeamID); 1409 if (team == NULL) { 1410 T(OwnerChange(sPorts[slot], newTeamID, B_BAD_TEAM_ID)); 1411 return B_BAD_TEAM_ID; 1412 } 1413 1414 // transfer ownership to other team 1415 list_remove_link(&sPorts[slot].team_link); 1416 list_add_item(&team->port_list, &sPorts[slot].team_link); 1417 sPorts[slot].owner = newTeamID; 1418 1419 T(OwnerChange(sPorts[slot], newTeamID, B_OK)); 1420 return B_OK; 1421 } 1422 1423 1424 // #pragma mark - syscalls 1425 1426 1427 port_id 1428 _user_create_port(int32 queueLength, const char *userName) 1429 { 1430 char name[B_OS_NAME_LENGTH]; 1431 1432 if (userName == NULL) 1433 return create_port(queueLength, NULL); 1434 1435 if (!IS_USER_ADDRESS(userName) 1436 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 1437 return B_BAD_ADDRESS; 1438 1439 return create_port(queueLength, name); 1440 } 1441 1442 1443 status_t 1444 _user_close_port(port_id id) 1445 { 1446 return close_port(id); 1447 } 1448 1449 1450 status_t 1451 _user_delete_port(port_id id) 1452 { 1453 return delete_port(id); 1454 } 1455 1456 1457 port_id 1458 _user_find_port(const char *userName) 1459 { 1460 char name[B_OS_NAME_LENGTH]; 1461 1462 if (userName == NULL) 1463 return B_BAD_VALUE; 1464 if (!IS_USER_ADDRESS(userName) 1465 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 1466 return B_BAD_ADDRESS; 1467 1468 return find_port(name); 1469 } 1470 1471 1472 status_t 1473 _user_get_port_info(port_id id, struct port_info *userInfo) 1474 { 1475 struct port_info info; 1476 status_t status; 1477 1478 if (userInfo == NULL) 1479 return B_BAD_VALUE; 1480 if (!IS_USER_ADDRESS(userInfo)) 1481 return B_BAD_ADDRESS; 1482 1483 status = get_port_info(id, &info); 1484 1485 // copy back to user space 1486 if (status == B_OK 1487 && user_memcpy(userInfo, &info, sizeof(struct port_info)) < B_OK) 1488 return B_BAD_ADDRESS; 1489 1490 return status; 1491 } 1492 1493 1494 status_t 1495 _user_get_next_port_info(team_id team, int32 *userCookie, 1496 struct port_info *userInfo) 1497 { 1498 struct port_info info; 1499 status_t status; 1500 int32 cookie; 1501 1502 if (userCookie == NULL || userInfo == NULL) 1503 return B_BAD_VALUE; 1504 if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo) 1505 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 1506 return B_BAD_ADDRESS; 1507 1508 status = get_next_port_info(team, &cookie, &info); 1509 1510 // copy back to user space 1511 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK 1512 || (status == B_OK && user_memcpy(userInfo, &info, 1513 sizeof(struct port_info)) < B_OK)) 1514 return B_BAD_ADDRESS; 1515 1516 return status; 1517 } 1518 1519 1520 ssize_t 1521 _user_port_buffer_size_etc(port_id port, uint32 flags, bigtime_t timeout) 1522 { 1523 syscall_restart_handle_timeout_pre(flags, timeout); 1524 1525 status_t status = port_buffer_size_etc(port, flags | B_CAN_INTERRUPT, 1526 timeout); 1527 1528 return syscall_restart_handle_timeout_post(status, timeout); 1529 } 1530 1531 1532 ssize_t 1533 _user_port_count(port_id port) 1534 { 1535 return port_count(port); 1536 } 1537 1538 1539 status_t 1540 _user_set_port_owner(port_id port, team_id team) 1541 { 1542 return set_port_owner(port, team); 1543 } 1544 1545 1546 ssize_t 1547 _user_read_port_etc(port_id port, int32 *userCode, void *userBuffer, 1548 size_t bufferSize, uint32 flags, bigtime_t timeout) 1549 { 1550 int32 messageCode; 1551 ssize_t bytesRead; 1552 1553 syscall_restart_handle_timeout_pre(flags, timeout); 1554 1555 if (userBuffer == NULL && bufferSize != 0) 1556 return B_BAD_VALUE; 1557 if ((userCode != NULL && !IS_USER_ADDRESS(userCode)) 1558 || (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer))) 1559 return B_BAD_ADDRESS; 1560 1561 bytesRead = read_port_etc(port, &messageCode, userBuffer, bufferSize, 1562 flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout); 1563 1564 if (bytesRead >= 0 && userCode != NULL 1565 && user_memcpy(userCode, &messageCode, sizeof(int32)) < B_OK) 1566 return B_BAD_ADDRESS; 1567 1568 return syscall_restart_handle_timeout_post(bytesRead, timeout); 1569 } 1570 1571 1572 status_t 1573 _user_write_port_etc(port_id port, int32 messageCode, const void *userBuffer, 1574 size_t bufferSize, uint32 flags, bigtime_t timeout) 1575 { 1576 iovec vec = { (void *)userBuffer, bufferSize }; 1577 1578 syscall_restart_handle_timeout_pre(flags, timeout); 1579 1580 if (userBuffer == NULL && bufferSize != 0) 1581 return B_BAD_VALUE; 1582 if (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer)) 1583 return B_BAD_ADDRESS; 1584 1585 status_t status = writev_port_etc(port, messageCode, &vec, 1, bufferSize, 1586 flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout); 1587 1588 return syscall_restart_handle_timeout_post(status, timeout); 1589 } 1590 1591 1592 status_t 1593 _user_writev_port_etc(port_id port, int32 messageCode, const iovec *userVecs, 1594 size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout) 1595 { 1596 syscall_restart_handle_timeout_pre(flags, timeout); 1597 1598 if (userVecs == NULL && bufferSize != 0) 1599 return B_BAD_VALUE; 1600 if (userVecs != NULL && !IS_USER_ADDRESS(userVecs)) 1601 return B_BAD_ADDRESS; 1602 1603 iovec *vecs = NULL; 1604 if (userVecs && vecCount != 0) { 1605 vecs = (iovec*)malloc(sizeof(iovec) * vecCount); 1606 if (vecs == NULL) 1607 return B_NO_MEMORY; 1608 1609 if (user_memcpy(vecs, userVecs, sizeof(iovec) * vecCount) < B_OK) { 1610 free(vecs); 1611 return B_BAD_ADDRESS; 1612 } 1613 } 1614 1615 status_t status = writev_port_etc(port, messageCode, vecs, vecCount, 1616 bufferSize, flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, 1617 timeout); 1618 1619 free(vecs); 1620 return syscall_restart_handle_timeout_post(status, timeout); 1621 } 1622 1623 1624 status_t 1625 _user_get_port_message_info_etc(port_id port, port_message_info *userInfo, 1626 size_t infoSize, uint32 flags, bigtime_t timeout) 1627 { 1628 if (userInfo == NULL || infoSize != sizeof(port_message_info)) 1629 return B_BAD_VALUE; 1630 1631 syscall_restart_handle_timeout_pre(flags, timeout); 1632 1633 port_message_info info; 1634 status_t error = _get_port_message_info_etc(port, &info, sizeof(info), 1635 flags | B_CAN_INTERRUPT, timeout); 1636 1637 // copy info to userland 1638 if (error == B_OK && (!IS_USER_ADDRESS(userInfo) 1639 || user_memcpy(userInfo, &info, sizeof(info)) != B_OK)) { 1640 error = B_BAD_ADDRESS; 1641 } 1642 1643 return syscall_restart_handle_timeout_post(error, timeout); 1644 } 1645