1 /* 2 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001, Mark-Jan Bastian. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9 10 /*! Ports for IPC */ 11 12 13 #include <port.h> 14 15 #include <ctype.h> 16 #include <iovec.h> 17 #include <stdlib.h> 18 #include <string.h> 19 20 #include <OS.h> 21 22 #include <arch/int.h> 23 #include <heap.h> 24 #include <kernel.h> 25 #include <Notifications.h> 26 #include <sem.h> 27 #include <syscall_restart.h> 28 #include <team.h> 29 #include <tracing.h> 30 #include <util/AutoLock.h> 31 #include <util/list.h> 32 #include <wait_for_objects.h> 33 34 35 //#define TRACE_PORTS 36 #ifdef TRACE_PORTS 37 # define TRACE(x) dprintf x 38 #else 39 # define TRACE(x) 40 #endif 41 42 43 struct port_message : DoublyLinkedListLinkImpl<port_message> { 44 int32 code; 45 size_t size; 46 uid_t sender; 47 gid_t sender_group; 48 team_id sender_team; 49 char buffer[0]; 50 }; 51 52 typedef DoublyLinkedList<port_message> MessageList; 53 54 struct port_entry { 55 struct list_link team_link; 56 port_id id; 57 team_id owner; 58 int32 capacity; 59 mutex lock; 60 uint32 read_count; 61 int32 write_count; 62 ConditionVariable read_condition; 63 ConditionVariable write_condition; 64 int32 total_count; 65 // messages read from port since creation 66 select_info* select_infos; 67 MessageList messages; 68 }; 69 70 class PortNotificationService : public DefaultNotificationService { 71 public: 72 PortNotificationService(); 73 74 void Notify(uint32 opcode, port_id team); 75 }; 76 77 78 #if PORT_TRACING 79 namespace PortTracing { 80 81 class Create : public AbstractTraceEntry { 82 public: 83 Create(port_entry& port) 84 : 85 fID(port.id), 86 fOwner(port.owner), 87 fCapacity(port.capacity) 88 { 89 fName = alloc_tracing_buffer_strcpy(port.lock.name, B_OS_NAME_LENGTH, 90 false); 91 92 Initialized(); 93 } 94 95 virtual void AddDump(TraceOutput& out) 96 { 97 out.Print("port %ld created, name \"%s\", owner %ld, capacity %ld", 98 fID, fName, fOwner, fCapacity); 99 } 100 101 private: 102 port_id fID; 103 char* fName; 104 team_id fOwner; 105 int32 fCapacity; 106 }; 107 108 109 class Delete : public AbstractTraceEntry { 110 public: 111 Delete(port_entry& port) 112 : 113 fID(port.id) 114 { 115 Initialized(); 116 } 117 118 virtual void AddDump(TraceOutput& out) 119 { 120 out.Print("port %ld deleted", fID); 121 } 122 123 private: 124 port_id fID; 125 }; 126 127 128 class Read : public AbstractTraceEntry { 129 public: 130 Read(port_entry& port, int32 code, ssize_t result) 131 : 132 fID(port.id), 133 fReadCount(port.read_count), 134 fWriteCount(port.write_count), 135 fCode(code), 136 fResult(result) 137 { 138 Initialized(); 139 } 140 141 virtual void AddDump(TraceOutput& out) 142 { 143 out.Print("port %ld read, read %ld, write %ld, code %lx: %ld", 144 fID, fReadCount, fWriteCount, fCode, fResult); 145 } 146 147 private: 148 port_id fID; 149 int32 fReadCount; 150 int32 fWriteCount; 151 int32 fCode; 152 ssize_t fResult; 153 }; 154 155 156 class Write : public AbstractTraceEntry { 157 public: 158 Write(port_entry& port, int32 code, size_t bufferSize, ssize_t result) 159 : 160 fID(port.id), 161 fReadCount(port.read_count), 162 fWriteCount(port.write_count), 163 fCode(code), 164 fBufferSize(bufferSize), 165 fResult(result) 166 { 167 Initialized(); 168 } 169 170 virtual void AddDump(TraceOutput& out) 171 { 172 out.Print("port %ld write, read %ld, write %ld, code %lx, size %ld: %ld", 173 fID, fReadCount, fWriteCount, fCode, fBufferSize, fResult); 174 } 175 176 private: 177 port_id fID; 178 int32 fReadCount; 179 int32 fWriteCount; 180 int32 fCode; 181 size_t fBufferSize; 182 ssize_t fResult; 183 }; 184 185 186 class Info : public AbstractTraceEntry { 187 public: 188 Info(port_entry& port, int32 code, ssize_t result) 189 : 190 fID(port.id), 191 fReadCount(port.read_count), 192 fWriteCount(port.write_count), 193 fCode(code), 194 fResult(result) 195 { 196 Initialized(); 197 } 198 199 virtual void AddDump(TraceOutput& out) 200 { 201 out.Print("port %ld info, read %ld, write %ld, code %lx: %ld", 202 fID, fReadCount, fWriteCount, fCode, fResult); 203 } 204 205 private: 206 port_id fID; 207 int32 fReadCount; 208 int32 fWriteCount; 209 int32 fCode; 210 ssize_t fResult; 211 }; 212 213 214 class OwnerChange : public AbstractTraceEntry { 215 public: 216 OwnerChange(port_entry& port, team_id newOwner, status_t status) 217 : 218 fID(port.id), 219 fOldOwner(port.owner), 220 fNewOwner(newOwner), 221 fStatus(status) 222 { 223 Initialized(); 224 } 225 226 virtual void AddDump(TraceOutput& out) 227 { 228 out.Print("port %ld owner change from %ld to %ld: %s", fID, fOldOwner, 229 fNewOwner, strerror(fStatus)); 230 } 231 232 private: 233 port_id fID; 234 team_id fOldOwner; 235 team_id fNewOwner; 236 status_t fStatus; 237 }; 238 239 } // namespace PortTracing 240 241 # define T(x) new(std::nothrow) PortTracing::x; 242 #else 243 # define T(x) ; 244 #endif 245 246 247 static const size_t kInitialPortBufferSize = 4 * 1024 * 1024; 248 static const size_t kTotalSpaceLimit = 64 * 1024 * 1024; 249 static const size_t kTeamSpaceLimit = 8 * 1024 * 1024; 250 static const size_t kBufferGrowRate = kInitialPortBufferSize; 251 252 #define MAX_QUEUE_LENGTH 4096 253 #define PORT_MAX_MESSAGE_SIZE (256 * 1024) 254 255 // sMaxPorts must be power of 2 256 static int32 sMaxPorts = 4096; 257 static int32 sUsedPorts = 0; 258 259 static struct port_entry* sPorts; 260 static area_id sPortArea; 261 static heap_allocator* sPortAllocator; 262 static ConditionVariable sNoSpaceCondition; 263 static vint32 sTotalSpaceInUse; 264 static vint32 sAreaChangeCounter; 265 static vint32 sAllocatingArea; 266 static bool sPortsActive = false; 267 static port_id sNextPort = 1; 268 static int32 sFirstFreeSlot = 1; 269 static mutex sPortsLock = MUTEX_INITIALIZER("ports list"); 270 271 static PortNotificationService sNotificationService; 272 273 274 // #pragma mark - TeamNotificationService 275 276 277 PortNotificationService::PortNotificationService() 278 : 279 DefaultNotificationService("ports") 280 { 281 } 282 283 284 void 285 PortNotificationService::Notify(uint32 opcode, port_id port) 286 { 287 char eventBuffer[64]; 288 KMessage event; 289 event.SetTo(eventBuffer, sizeof(eventBuffer), PORT_MONITOR); 290 event.AddInt32("event", opcode); 291 event.AddInt32("port", port); 292 293 DefaultNotificationService::Notify(event, opcode); 294 } 295 296 297 // #pragma mark - 298 299 300 static int 301 dump_port_list(int argc, char** argv) 302 { 303 const char* name = NULL; 304 team_id owner = -1; 305 int32 i; 306 307 if (argc > 2) { 308 if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner")) 309 owner = strtoul(argv[2], NULL, 0); 310 else if (!strcmp(argv[1], "name")) 311 name = argv[2]; 312 } else if (argc > 1) 313 owner = strtoul(argv[1], NULL, 0); 314 315 kprintf("port id cap read-cnt write-cnt total team " 316 "name\n"); 317 318 for (i = 0; i < sMaxPorts; i++) { 319 struct port_entry* port = &sPorts[i]; 320 if (port->id < 0 321 || (owner != -1 && port->owner != owner) 322 || (name != NULL && strstr(port->lock.name, name) == NULL)) 323 continue; 324 325 kprintf("%p %8ld %4ld %9ld %9ld %8ld %6ld %s\n", port, 326 port->id, port->capacity, port->read_count, port->write_count, 327 port->total_count, port->owner, port->lock.name); 328 } 329 330 return 0; 331 } 332 333 334 static void 335 _dump_port_info(struct port_entry* port) 336 { 337 kprintf("PORT: %p\n", port); 338 kprintf(" id: %ld\n", port->id); 339 kprintf(" name: \"%s\"\n", port->lock.name); 340 kprintf(" owner: %ld\n", port->owner); 341 kprintf(" capacity: %ld\n", port->capacity); 342 kprintf(" read_count: %ld\n", port->read_count); 343 kprintf(" write_count: %ld\n", port->write_count); 344 kprintf(" total count: %ld\n", port->total_count); 345 346 if (!port->messages.IsEmpty()) { 347 kprintf("messages:\n"); 348 349 MessageList::Iterator iterator = port->messages.GetIterator(); 350 while (port_message* message = iterator.Next()) { 351 kprintf(" %p %08lx %ld\n", message, message->code, message->size); 352 } 353 } 354 355 set_debug_variable("_port", (addr_t)port); 356 set_debug_variable("_portID", port->id); 357 set_debug_variable("_owner", port->owner); 358 } 359 360 361 static int 362 dump_port_info(int argc, char** argv) 363 { 364 ConditionVariable* condition = NULL; 365 const char* name = NULL; 366 367 if (argc < 2) { 368 print_debugger_command_usage(argv[0]); 369 return 0; 370 } 371 372 if (argc > 2) { 373 if (!strcmp(argv[1], "address")) { 374 _dump_port_info((struct port_entry*)parse_expression(argv[2])); 375 return 0; 376 } else if (!strcmp(argv[1], "condition")) 377 condition = (ConditionVariable*)parse_expression(argv[2]); 378 else if (!strcmp(argv[1], "name")) 379 name = argv[2]; 380 } else if (parse_expression(argv[1]) > 0) { 381 // if the argument looks like a number, treat it as such 382 int32 num = parse_expression(argv[1]); 383 int32 slot = num % sMaxPorts; 384 if (sPorts[slot].id != num) { 385 kprintf("port %ld (%#lx) doesn't exist!\n", num, num); 386 return 0; 387 } 388 _dump_port_info(&sPorts[slot]); 389 return 0; 390 } else 391 name = argv[1]; 392 393 // walk through the ports list, trying to match name 394 for (int32 i = 0; i < sMaxPorts; i++) { 395 if ((name != NULL && sPorts[i].lock.name != NULL 396 && !strcmp(name, sPorts[i].lock.name)) 397 || (condition != NULL && (&sPorts[i].read_condition == condition 398 || &sPorts[i].write_condition == condition))) { 399 _dump_port_info(&sPorts[i]); 400 return 0; 401 } 402 } 403 404 return 0; 405 } 406 407 408 static void 409 notify_port_select_events(int slot, uint16 events) 410 { 411 if (sPorts[slot].select_infos) 412 notify_select_events_list(sPorts[slot].select_infos, events); 413 } 414 415 416 static void 417 put_port_message(port_message* message) 418 { 419 size_t size = sizeof(port_message) + message->size; 420 heap_free(sPortAllocator, message); 421 422 atomic_add(&sTotalSpaceInUse, -size); 423 sNoSpaceCondition.NotifyAll(); 424 } 425 426 427 static status_t 428 get_port_message(int32 code, size_t bufferSize, uint32 flags, bigtime_t timeout, 429 port_message** _message) 430 { 431 size_t size = sizeof(port_message) + bufferSize; 432 bool limitReached = false; 433 434 while (true) { 435 if (atomic_add(&sTotalSpaceInUse, size) 436 > int32(kTotalSpaceLimit - size)) { 437 // TODO: add per team limit 438 // We are not allowed to create another heap area, as our 439 // space limit has been reached - just wait until we get 440 // some free space again. 441 limitReached = true; 442 443 wait: 444 MutexLocker locker(sPortsLock); 445 446 atomic_add(&sTotalSpaceInUse, -size); 447 448 // TODO: we don't want to wait - but does that also mean we 449 // shouldn't wait for the area creation? 450 if (limitReached && (flags & B_RELATIVE_TIMEOUT) != 0 451 && timeout <= 0) 452 return B_WOULD_BLOCK; 453 454 ConditionVariableEntry entry; 455 sNoSpaceCondition.Add(&entry); 456 457 locker.Unlock(); 458 459 status_t status = entry.Wait(flags, timeout); 460 if (status == B_TIMED_OUT) 461 return B_TIMED_OUT; 462 463 // just try again 464 limitReached = false; 465 continue; 466 } 467 468 int32 areaChangeCounter = atomic_get(&sAreaChangeCounter); 469 470 // Quota is fulfilled, try to allocate the buffer 471 472 port_message* message 473 = (port_message*)heap_memalign(sPortAllocator, 0, size); 474 if (message != NULL) { 475 message->code = code; 476 message->size = bufferSize; 477 478 *_message = message; 479 return B_OK; 480 } 481 482 if (atomic_or(&sAllocatingArea, 1) != 0) { 483 // Just wait for someone else to create an area for us 484 goto wait; 485 } 486 487 if (areaChangeCounter != atomic_get(&sAreaChangeCounter)) { 488 atomic_add(&sTotalSpaceInUse, -size); 489 continue; 490 } 491 492 // Create a new area for the heap to use 493 494 addr_t base; 495 area_id area = create_area("port grown buffer", (void**)&base, 496 B_ANY_KERNEL_ADDRESS, kBufferGrowRate, B_NO_LOCK, 497 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 498 if (area < 0) { 499 // it's time to let the userland feel our pain 500 sNoSpaceCondition.NotifyAll(); 501 return B_NO_MEMORY; 502 } 503 504 heap_add_area(sPortAllocator, area, base, kBufferGrowRate); 505 506 atomic_add(&sAreaChangeCounter, 1); 507 sNoSpaceCondition.NotifyAll(); 508 atomic_and(&sAllocatingArea, 0); 509 } 510 } 511 512 513 /*! You need to own the port's lock when calling this function */ 514 static bool 515 is_port_closed(int32 slot) 516 { 517 return sPorts[slot].capacity == 0; 518 } 519 520 521 /*! Fills the port_info structure with information from the specified 522 port. 523 The port lock must be held when called. 524 */ 525 static void 526 fill_port_info(struct port_entry* port, port_info* info, size_t size) 527 { 528 info->port = port->id; 529 info->team = port->owner; 530 info->capacity = port->capacity; 531 532 info->queue_count = port->read_count; 533 info->total_count = port->total_count; 534 535 strlcpy(info->name, port->lock.name, B_OS_NAME_LENGTH); 536 } 537 538 539 static ssize_t 540 copy_port_message(port_message* message, int32* _code, void* buffer, 541 size_t bufferSize, bool userCopy) 542 { 543 // check output buffer size 544 size_t size = min_c(bufferSize, message->size); 545 546 // copy message 547 if (_code != NULL) 548 *_code = message->code; 549 550 if (size > 0) { 551 if (userCopy) { 552 status_t status = user_memcpy(buffer, message->buffer, size); 553 if (status != B_OK) 554 return status; 555 } else 556 memcpy(buffer, message->buffer, size); 557 } 558 559 return size; 560 } 561 562 563 static void 564 uninit_port_locked(struct port_entry& port) 565 { 566 int32 id = port.id; 567 568 // mark port as invalid 569 port.id = -1; 570 free((char*)port.lock.name); 571 port.lock.name = NULL; 572 573 while (port_message* message = port.messages.RemoveHead()) { 574 put_port_message(message); 575 } 576 577 notify_port_select_events(id % sMaxPorts, B_EVENT_INVALID); 578 port.select_infos = NULL; 579 580 // Release the threads that were blocking on this port. 581 // read_port() will see the B_BAD_PORT_ID return value, and act accordingly 582 port.read_condition.NotifyAll(false, B_BAD_PORT_ID); 583 port.write_condition.NotifyAll(false, B_BAD_PORT_ID); 584 sNotificationService.Notify(PORT_REMOVED, id); 585 } 586 587 588 // #pragma mark - private kernel API 589 590 591 /*! This function delets all the ports that are owned by the passed team. 592 */ 593 void 594 delete_owned_ports(struct team* team) 595 { 596 TRACE(("delete_owned_ports(owner = %ld)\n", team->id)); 597 598 struct list queue; 599 600 { 601 InterruptsSpinLocker locker(gTeamSpinlock); 602 list_move_to_list(&team->port_list, &queue); 603 } 604 605 int32 firstSlot = sMaxPorts; 606 int32 count = 0; 607 608 while (port_entry* port = (port_entry*)list_remove_head_item(&queue)) { 609 if (firstSlot > port->id % sMaxPorts) 610 firstSlot = port->id % sMaxPorts; 611 count++; 612 613 MutexLocker locker(port->lock); 614 uninit_port_locked(*port); 615 } 616 617 MutexLocker _(sPortsLock); 618 619 // update the first free slot hint in the array 620 if (firstSlot < sFirstFreeSlot) 621 sFirstFreeSlot = firstSlot; 622 623 sUsedPorts -= count; 624 } 625 626 627 int32 628 port_max_ports(void) 629 { 630 return sMaxPorts; 631 } 632 633 634 int32 635 port_used_ports(void) 636 { 637 return sUsedPorts; 638 } 639 640 641 status_t 642 port_init(kernel_args *args) 643 { 644 size_t size = sizeof(struct port_entry) * sMaxPorts; 645 646 // create and initialize ports table 647 sPortArea = create_area("port_table", 648 (void**)&sPorts, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK, 649 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 650 if (sPortArea < 0) { 651 panic("unable to allocate kernel port table!\n"); 652 return sPortArea; 653 } 654 655 memset(sPorts, 0, size); 656 for (int32 i = 0; i < sMaxPorts; i++) { 657 mutex_init(&sPorts[i].lock, NULL); 658 sPorts[i].id = -1; 659 sPorts[i].read_condition.Init(&sPorts[i], "port read"); 660 sPorts[i].write_condition.Init(&sPorts[i], "port write"); 661 } 662 663 addr_t base; 664 if (create_area("port heap", (void**)&base, B_ANY_KERNEL_ADDRESS, 665 kInitialPortBufferSize, B_NO_LOCK, 666 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0) { 667 // TODO: Since port_init() is invoked before the boot partition is 668 // mounted, the underlying VMAnonymousCache cannot commit swap space 669 // upon creation and thus the pages aren't swappable after all. This 670 // makes the area essentially B_LAZY_LOCK with additional overhead. 671 panic("unable to allocate port area!\n"); 672 return B_ERROR; 673 } 674 675 static const heap_class kBufferHeapClass = {"default", 100, 676 PORT_MAX_MESSAGE_SIZE + sizeof(port_message), 2 * 1024, 677 sizeof(port_message), 8, 4, 64}; 678 sPortAllocator = heap_create_allocator("port buffer", base, 679 kInitialPortBufferSize, &kBufferHeapClass, true); 680 if (sPortAllocator == NULL) { 681 panic("unable to create port heap"); 682 return B_NO_MEMORY; 683 } 684 685 sNoSpaceCondition.Init(sPorts, "port space"); 686 687 // add debugger commands 688 add_debugger_command_etc("ports", &dump_port_list, 689 "Dump a list of all active ports (for team, with name, etc.)", 690 "[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]\n" 691 "Prints a list of all active ports meeting the given\n" 692 "requirement. If no argument is given, all ports are listed.\n" 693 " <team> - The team owning the ports.\n" 694 " <name> - Part of the name of the ports.\n", 0); 695 add_debugger_command_etc("port", &dump_port_info, 696 "Dump info about a particular port", 697 "(<id> | [ \"address\" ] <address>) | ([ \"name\" ] <name>) " 698 "| (\"condition\" <address>)\n" 699 "Prints info about the specified port.\n" 700 " <address> - Pointer to the port structure.\n" 701 " <name> - Name of the port.\n" 702 " <condition> - address of the port's read or write condition.\n", 0); 703 704 new(&sNotificationService) PortNotificationService(); 705 sPortsActive = true; 706 return B_OK; 707 } 708 709 710 // #pragma mark - public kernel API 711 712 713 port_id 714 create_port(int32 queueLength, const char* name) 715 { 716 TRACE(("create_port(queueLength = %ld, name = \"%s\")\n", queueLength, 717 name)); 718 719 if (!sPortsActive) { 720 panic("ports used too early!\n"); 721 return B_BAD_PORT_ID; 722 } 723 if (queueLength < 1 || queueLength > MAX_QUEUE_LENGTH) 724 return B_BAD_VALUE; 725 726 struct team* team = thread_get_current_thread()->team; 727 if (team == NULL) 728 return B_BAD_TEAM_ID; 729 730 MutexLocker locker(sPortsLock); 731 732 // check early on if there are any free port slots to use 733 if (sUsedPorts >= sMaxPorts) 734 return B_NO_MORE_PORTS; 735 736 // check & dup name 737 char* nameBuffer = strdup(name != NULL ? name : "unnamed port"); 738 if (nameBuffer == NULL) 739 return B_NO_MEMORY; 740 741 sUsedPorts++; 742 743 // find the first empty spot 744 for (int32 slot = 0; slot < sMaxPorts; slot++) { 745 int32 i = (slot + sFirstFreeSlot) % sMaxPorts; 746 747 if (sPorts[i].id == -1) { 748 // make the port_id be a multiple of the slot it's in 749 if (i >= sNextPort % sMaxPorts) 750 sNextPort += i - sNextPort % sMaxPorts; 751 else 752 sNextPort += sMaxPorts - (sNextPort % sMaxPorts - i); 753 sFirstFreeSlot = slot + 1; 754 755 MutexLocker portLocker(sPorts[i].lock); 756 sPorts[i].id = sNextPort++; 757 locker.Unlock(); 758 759 sPorts[i].capacity = queueLength; 760 sPorts[i].owner = team_get_current_team_id(); 761 sPorts[i].lock.name = nameBuffer; 762 sPorts[i].read_count = 0; 763 sPorts[i].write_count = queueLength; 764 sPorts[i].total_count = 0; 765 sPorts[i].select_infos = NULL; 766 767 { 768 InterruptsSpinLocker teamLocker(gTeamSpinlock); 769 list_add_item(&team->port_list, &sPorts[i].team_link); 770 } 771 772 port_id id = sPorts[i].id; 773 774 T(Create(sPorts[i])); 775 portLocker.Unlock(); 776 777 TRACE(("create_port() done: port created %ld\n", id)); 778 779 sNotificationService.Notify(PORT_ADDED, id); 780 return id; 781 } 782 } 783 784 // Still not enough ports... - due to sUsedPorts, this cannot really 785 // happen anymore. 786 panic("out of ports, but sUsedPorts is broken"); 787 return B_NO_MORE_PORTS; 788 } 789 790 791 status_t 792 close_port(port_id id) 793 { 794 TRACE(("close_port(id = %ld)\n", id)); 795 796 if (!sPortsActive || id < 0) 797 return B_BAD_PORT_ID; 798 799 int32 slot = id % sMaxPorts; 800 801 // walk through the sem list, trying to match name 802 MutexLocker locker(sPorts[slot].lock); 803 804 if (sPorts[slot].id != id) { 805 TRACE(("close_port: invalid port_id %ld\n", id)); 806 return B_BAD_PORT_ID; 807 } 808 809 // mark port to disable writing - deleting the semaphores will 810 // wake up waiting read/writes 811 sPorts[slot].capacity = 0; 812 813 notify_port_select_events(slot, B_EVENT_INVALID); 814 sPorts[slot].select_infos = NULL; 815 816 sPorts[slot].read_condition.NotifyAll(false, B_BAD_PORT_ID); 817 sPorts[slot].write_condition.NotifyAll(false, B_BAD_PORT_ID); 818 819 return B_OK; 820 } 821 822 823 status_t 824 delete_port(port_id id) 825 { 826 TRACE(("delete_port(id = %ld)\n", id)); 827 828 if (!sPortsActive || id < 0) 829 return B_BAD_PORT_ID; 830 831 int32 slot = id % sMaxPorts; 832 833 MutexLocker locker(sPorts[slot].lock); 834 835 if (sPorts[slot].id != id) { 836 TRACE(("delete_port: invalid port_id %ld\n", id)); 837 return B_BAD_PORT_ID; 838 } 839 840 T(Delete(sPorts[slot])); 841 842 { 843 InterruptsSpinLocker teamLocker(gTeamSpinlock); 844 list_remove_link(&sPorts[slot].team_link); 845 } 846 847 uninit_port_locked(sPorts[slot]); 848 849 locker.Unlock(); 850 851 MutexLocker _(sPortsLock); 852 853 // update the first free slot hint in the array 854 if (slot < sFirstFreeSlot) 855 sFirstFreeSlot = slot; 856 857 sUsedPorts--; 858 return B_OK; 859 } 860 861 862 status_t 863 select_port(int32 id, struct select_info* info, bool kernel) 864 { 865 if (id < 0) 866 return B_BAD_PORT_ID; 867 868 int32 slot = id % sMaxPorts; 869 870 MutexLocker locker(sPorts[slot].lock); 871 872 if (sPorts[slot].id != id || is_port_closed(slot)) 873 return B_BAD_PORT_ID; 874 if (!kernel && sPorts[slot].owner == team_get_kernel_team_id()) { 875 // kernel port, but call from userland 876 return B_NOT_ALLOWED; 877 } 878 879 info->selected_events &= B_EVENT_READ | B_EVENT_WRITE | B_EVENT_INVALID; 880 881 if (info->selected_events != 0) { 882 uint16 events = 0; 883 884 info->next = sPorts[slot].select_infos; 885 sPorts[slot].select_infos = info; 886 887 // check for events 888 if ((info->selected_events & B_EVENT_READ) != 0 889 && !sPorts[slot].messages.IsEmpty()) { 890 events |= B_EVENT_READ; 891 } 892 893 if (sPorts[slot].write_count > 0) 894 events |= B_EVENT_WRITE; 895 896 if (events != 0) 897 notify_select_events(info, events); 898 } 899 900 return B_OK; 901 } 902 903 904 status_t 905 deselect_port(int32 id, struct select_info* info, bool kernel) 906 { 907 if (id < 0) 908 return B_BAD_PORT_ID; 909 if (info->selected_events == 0) 910 return B_OK; 911 912 int32 slot = id % sMaxPorts; 913 914 MutexLocker locker(sPorts[slot].lock); 915 916 if (sPorts[slot].id == id) { 917 select_info** infoLocation = &sPorts[slot].select_infos; 918 while (*infoLocation != NULL && *infoLocation != info) 919 infoLocation = &(*infoLocation)->next; 920 921 if (*infoLocation == info) 922 *infoLocation = info->next; 923 } 924 925 return B_OK; 926 } 927 928 929 port_id 930 find_port(const char* name) 931 { 932 TRACE(("find_port(name = \"%s\")\n", name)); 933 934 if (!sPortsActive) { 935 panic("ports used too early!\n"); 936 return B_NAME_NOT_FOUND; 937 } 938 if (name == NULL) 939 return B_BAD_VALUE; 940 941 // Since we have to check every single port, and we don't 942 // care if it goes away at any point, we're only grabbing 943 // the port lock in question, not the port list lock 944 945 // loop over list 946 for (int32 i = 0; i < sMaxPorts; i++) { 947 // lock every individual port before comparing 948 MutexLocker _(sPorts[i].lock); 949 950 if (sPorts[i].id >= 0 && !strcmp(name, sPorts[i].lock.name)) 951 return sPorts[i].id; 952 } 953 954 return B_NAME_NOT_FOUND; 955 } 956 957 958 status_t 959 _get_port_info(port_id id, port_info* info, size_t size) 960 { 961 TRACE(("get_port_info(id = %ld)\n", id)); 962 963 if (info == NULL || size != sizeof(port_info)) 964 return B_BAD_VALUE; 965 if (!sPortsActive || id < 0) 966 return B_BAD_PORT_ID; 967 968 int32 slot = id % sMaxPorts; 969 970 MutexLocker locker(sPorts[slot].lock); 971 972 if (sPorts[slot].id != id || sPorts[slot].capacity == 0) { 973 TRACE(("get_port_info: invalid port_id %ld\n", id)); 974 return B_BAD_PORT_ID; 975 } 976 977 // fill a port_info struct with info 978 fill_port_info(&sPorts[slot], info, size); 979 return B_OK; 980 } 981 982 983 status_t 984 _get_next_port_info(team_id team, int32* _cookie, struct port_info* info, 985 size_t size) 986 { 987 TRACE(("get_next_port_info(team = %ld)\n", team)); 988 989 if (info == NULL || size != sizeof(port_info) || _cookie == NULL 990 || team < B_OK) 991 return B_BAD_VALUE; 992 if (!sPortsActive) 993 return B_BAD_PORT_ID; 994 995 int32 slot = *_cookie; 996 if (slot >= sMaxPorts) 997 return B_BAD_PORT_ID; 998 999 if (team == B_CURRENT_TEAM) 1000 team = team_get_current_team_id(); 1001 1002 info->port = -1; // used as found flag 1003 1004 while (slot < sMaxPorts) { 1005 MutexLocker locker(sPorts[slot].lock); 1006 1007 if (sPorts[slot].id != -1 && !is_port_closed(slot) 1008 && sPorts[slot].owner == team) { 1009 // found one! 1010 fill_port_info(&sPorts[slot], info, size); 1011 slot++; 1012 break; 1013 } 1014 1015 slot++; 1016 } 1017 1018 if (info->port == -1) 1019 return B_BAD_PORT_ID; 1020 1021 *_cookie = slot; 1022 return B_OK; 1023 } 1024 1025 1026 ssize_t 1027 port_buffer_size(port_id id) 1028 { 1029 return port_buffer_size_etc(id, 0, 0); 1030 } 1031 1032 1033 ssize_t 1034 port_buffer_size_etc(port_id id, uint32 flags, bigtime_t timeout) 1035 { 1036 port_message_info info; 1037 status_t error = get_port_message_info_etc(id, &info, flags, timeout); 1038 return error != B_OK ? error : info.size; 1039 } 1040 1041 1042 status_t 1043 _get_port_message_info_etc(port_id id, port_message_info* info, 1044 size_t infoSize, uint32 flags, bigtime_t timeout) 1045 { 1046 if (info == NULL || infoSize != sizeof(port_message_info)) 1047 return B_BAD_VALUE; 1048 if (!sPortsActive || id < 0) 1049 return B_BAD_PORT_ID; 1050 1051 flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT 1052 | B_ABSOLUTE_TIMEOUT; 1053 int32 slot = id % sMaxPorts; 1054 1055 MutexLocker locker(sPorts[slot].lock); 1056 1057 if (sPorts[slot].id != id 1058 || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) { 1059 T(Info(sPorts[slot], 0, B_BAD_PORT_ID)); 1060 TRACE(("_get_port_message_info_etc(): %s port %ld\n", 1061 sPorts[slot].id == id ? "closed" : "invalid", id)); 1062 return B_BAD_PORT_ID; 1063 } 1064 1065 while (sPorts[slot].read_count == 0) { 1066 // We need to wait for a message to appear 1067 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) 1068 return B_WOULD_BLOCK; 1069 1070 ConditionVariableEntry entry; 1071 sPorts[slot].read_condition.Add(&entry); 1072 1073 locker.Unlock(); 1074 1075 // block if no message, or, if B_TIMEOUT flag set, block with timeout 1076 status_t status = entry.Wait(flags, timeout); 1077 1078 if (status != B_OK) { 1079 T(Info(sPorts[slot], 0, status)); 1080 return status; 1081 } 1082 1083 locker.Lock(); 1084 1085 if (sPorts[slot].id != id 1086 || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) { 1087 // the port is no longer there 1088 T(Info(sPorts[slot], 0, B_BAD_PORT_ID)); 1089 return B_BAD_PORT_ID; 1090 } 1091 } 1092 1093 // determine tail & get the length of the message 1094 port_message* message = sPorts[slot].messages.Head(); 1095 if (message == NULL) { 1096 panic("port %ld: no messages found\n", sPorts[slot].id); 1097 return B_ERROR; 1098 } 1099 1100 info->size = message->size; 1101 info->sender = message->sender; 1102 info->sender_group = message->sender_group; 1103 info->sender_team = message->sender_team; 1104 1105 T(Info(sPorts[slot], message->code, B_OK)); 1106 1107 // notify next one, as we haven't read from the port 1108 sPorts[slot].read_condition.NotifyOne(); 1109 1110 return B_OK; 1111 } 1112 1113 1114 ssize_t 1115 port_count(port_id id) 1116 { 1117 if (!sPortsActive || id < 0) 1118 return B_BAD_PORT_ID; 1119 1120 int32 slot = id % sMaxPorts; 1121 1122 MutexLocker locker(sPorts[slot].lock); 1123 1124 if (sPorts[slot].id != id) { 1125 TRACE(("port_count: invalid port_id %ld\n", id)); 1126 return B_BAD_PORT_ID; 1127 } 1128 1129 // return count of messages 1130 return sPorts[slot].read_count; 1131 } 1132 1133 1134 ssize_t 1135 read_port(port_id port, int32* msgCode, void* buffer, size_t bufferSize) 1136 { 1137 return read_port_etc(port, msgCode, buffer, bufferSize, 0, 0); 1138 } 1139 1140 1141 ssize_t 1142 read_port_etc(port_id id, int32* _code, void* buffer, size_t bufferSize, 1143 uint32 flags, bigtime_t timeout) 1144 { 1145 if (!sPortsActive || id < 0) 1146 return B_BAD_PORT_ID; 1147 if ((buffer == NULL && bufferSize > 0) || timeout < 0) 1148 return B_BAD_VALUE; 1149 1150 bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) != 0; 1151 bool peekOnly = !userCopy && (flags & B_PEEK_PORT_MESSAGE) != 0; 1152 // TODO: we could allow peeking for user apps now 1153 1154 flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT 1155 | B_ABSOLUTE_TIMEOUT; 1156 1157 int32 slot = id % sMaxPorts; 1158 1159 MutexLocker locker(sPorts[slot].lock); 1160 1161 if (sPorts[slot].id != id 1162 || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) { 1163 T(Read(sPorts[slot], 0, B_BAD_PORT_ID)); 1164 TRACE(("read_port_etc(): %s port %ld\n", 1165 sPorts[slot].id == id ? "closed" : "invalid", id)); 1166 return B_BAD_PORT_ID; 1167 } 1168 1169 while (sPorts[slot].read_count == 0) { 1170 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) 1171 return B_WOULD_BLOCK; 1172 1173 // We need to wait for a message to appear 1174 ConditionVariableEntry entry; 1175 sPorts[slot].read_condition.Add(&entry); 1176 1177 locker.Unlock(); 1178 1179 // block if no message, or, if B_TIMEOUT flag set, block with timeout 1180 status_t status = entry.Wait(flags, timeout); 1181 1182 locker.Lock(); 1183 1184 if (sPorts[slot].id != id 1185 || (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) { 1186 // the port is no longer there 1187 T(Read(sPorts[slot], 0, B_BAD_PORT_ID)); 1188 return B_BAD_PORT_ID; 1189 } 1190 1191 if (status != B_OK) { 1192 T(Read(sPorts[slot], 0, status)); 1193 return status; 1194 } 1195 } 1196 1197 // determine tail & get the length of the message 1198 port_message* message = sPorts[slot].messages.Head(); 1199 if (message == NULL) { 1200 panic("port %ld: no messages found\n", sPorts[slot].id); 1201 return B_ERROR; 1202 } 1203 1204 if (peekOnly) { 1205 size_t size = copy_port_message(message, _code, buffer, bufferSize, 1206 userCopy); 1207 1208 T(Read(sPorts[slot], message->code, size)); 1209 1210 sPorts[slot].read_condition.NotifyOne(); 1211 // we only peeked, but didn't grab the message 1212 return size; 1213 } 1214 1215 sPorts[slot].messages.RemoveHead(); 1216 sPorts[slot].total_count++; 1217 sPorts[slot].write_count++; 1218 sPorts[slot].read_count--; 1219 1220 notify_port_select_events(slot, B_EVENT_WRITE); 1221 sPorts[slot].write_condition.NotifyOne(); 1222 // make one spot in queue available again for write 1223 1224 locker.Unlock(); 1225 1226 size_t size = copy_port_message(message, _code, buffer, bufferSize, 1227 userCopy); 1228 T(Read(sPorts[slot], message->code, size)); 1229 1230 put_port_message(message); 1231 return size; 1232 } 1233 1234 1235 status_t 1236 write_port(port_id id, int32 msgCode, const void* buffer, size_t bufferSize) 1237 { 1238 iovec vec = { (void*)buffer, bufferSize }; 1239 1240 return writev_port_etc(id, msgCode, &vec, 1, bufferSize, 0, 0); 1241 } 1242 1243 1244 status_t 1245 write_port_etc(port_id id, int32 msgCode, const void* buffer, 1246 size_t bufferSize, uint32 flags, bigtime_t timeout) 1247 { 1248 iovec vec = { (void*)buffer, bufferSize }; 1249 1250 return writev_port_etc(id, msgCode, &vec, 1, bufferSize, flags, timeout); 1251 } 1252 1253 1254 status_t 1255 writev_port_etc(port_id id, int32 msgCode, const iovec* msgVecs, 1256 size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout) 1257 { 1258 if (!sPortsActive || id < 0) 1259 return B_BAD_PORT_ID; 1260 if (bufferSize > PORT_MAX_MESSAGE_SIZE) 1261 return B_BAD_VALUE; 1262 1263 // mask irrelevant flags (for acquire_sem() usage) 1264 flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT 1265 | B_ABSOLUTE_TIMEOUT; 1266 if ((flags & B_RELATIVE_TIMEOUT) != 0 1267 && timeout != B_INFINITE_TIMEOUT && timeout > 0) { 1268 // Make the timeout absolute, since we have more than one step where 1269 // we might have to wait 1270 flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT; 1271 timeout += system_time(); 1272 } 1273 1274 bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) > 0; 1275 1276 int32 slot = id % sMaxPorts; 1277 status_t status; 1278 port_message* message = NULL; 1279 1280 MutexLocker locker(sPorts[slot].lock); 1281 1282 if (sPorts[slot].id != id) { 1283 TRACE(("write_port_etc: invalid port_id %ld\n", id)); 1284 return B_BAD_PORT_ID; 1285 } 1286 if (is_port_closed(slot)) { 1287 TRACE(("write_port_etc: port %ld closed\n", id)); 1288 return B_BAD_PORT_ID; 1289 } 1290 1291 if (sPorts[slot].write_count <= 0) { 1292 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) 1293 return B_WOULD_BLOCK; 1294 1295 sPorts[slot].write_count--; 1296 1297 // We need to block in order to wait for a free message slot 1298 ConditionVariableEntry entry; 1299 sPorts[slot].write_condition.Add(&entry); 1300 1301 locker.Unlock(); 1302 1303 status = entry.Wait(flags, timeout); 1304 1305 locker.Lock(); 1306 1307 if (sPorts[slot].id != id || is_port_closed(slot)) { 1308 // the port is no longer there 1309 T(Write(sPorts[slot], 0, 0, B_BAD_PORT_ID)); 1310 return B_BAD_PORT_ID; 1311 } 1312 1313 if (status != B_OK) 1314 goto error; 1315 } else 1316 sPorts[slot].write_count--; 1317 1318 status = get_port_message(msgCode, bufferSize, flags, timeout, 1319 &message); 1320 if (status != B_OK) 1321 goto error; 1322 1323 // sender credentials 1324 message->sender = geteuid(); 1325 message->sender_group = getegid(); 1326 message->sender_team = team_get_current_team_id(); 1327 1328 if (bufferSize > 0) { 1329 uint32 i; 1330 if (userCopy) { 1331 // copy from user memory 1332 for (i = 0; i < vecCount; i++) { 1333 size_t bytes = msgVecs[i].iov_len; 1334 if (bytes > bufferSize) 1335 bytes = bufferSize; 1336 1337 status_t status = user_memcpy(message->buffer, 1338 msgVecs[i].iov_base, bytes); 1339 if (status != B_OK) { 1340 put_port_message(message); 1341 goto error; 1342 } 1343 1344 bufferSize -= bytes; 1345 if (bufferSize == 0) 1346 break; 1347 } 1348 } else { 1349 // copy from kernel memory 1350 for (i = 0; i < vecCount; i++) { 1351 size_t bytes = msgVecs[i].iov_len; 1352 if (bytes > bufferSize) 1353 bytes = bufferSize; 1354 1355 memcpy(message->buffer, msgVecs[i].iov_base, bytes); 1356 1357 bufferSize -= bytes; 1358 if (bufferSize == 0) 1359 break; 1360 } 1361 } 1362 } 1363 1364 sPorts[slot].messages.Add(message); 1365 sPorts[slot].read_count++; 1366 1367 T(Write(sPorts[slot], message->code, message->size, B_OK)); 1368 1369 notify_port_select_events(slot, B_EVENT_READ); 1370 sPorts[slot].read_condition.NotifyOne(); 1371 return B_OK; 1372 1373 error: 1374 // Give up our slot in the queue again, and let someone else 1375 // try and fail 1376 T(Write(sPorts[slot], 0, 0, status)); 1377 sPorts[slot].write_count++; 1378 notify_port_select_events(slot, B_EVENT_WRITE); 1379 sPorts[slot].write_condition.NotifyOne(); 1380 1381 return status; 1382 } 1383 1384 1385 status_t 1386 set_port_owner(port_id id, team_id newTeamID) 1387 { 1388 TRACE(("set_port_owner(id = %ld, team = %ld)\n", id, newTeamID)); 1389 1390 if (id < 0) 1391 return B_BAD_PORT_ID; 1392 1393 int32 slot = id % sMaxPorts; 1394 1395 MutexLocker locker(sPorts[slot].lock); 1396 1397 if (sPorts[slot].id != id) { 1398 TRACE(("set_port_owner: invalid port_id %ld\n", id)); 1399 return B_BAD_PORT_ID; 1400 } 1401 1402 InterruptsSpinLocker teamLocker(gTeamSpinlock); 1403 1404 struct team* team = team_get_team_struct_locked(newTeamID); 1405 if (team == NULL) { 1406 T(OwnerChange(sPorts[slot], newTeamID, B_BAD_TEAM_ID)); 1407 return B_BAD_TEAM_ID; 1408 } 1409 1410 // transfer ownership to other team 1411 list_remove_link(&sPorts[slot].team_link); 1412 list_add_item(&team->port_list, &sPorts[slot].team_link); 1413 sPorts[slot].owner = newTeamID; 1414 1415 T(OwnerChange(sPorts[slot], newTeamID, B_OK)); 1416 return B_OK; 1417 } 1418 1419 1420 // #pragma mark - syscalls 1421 1422 1423 port_id 1424 _user_create_port(int32 queueLength, const char *userName) 1425 { 1426 char name[B_OS_NAME_LENGTH]; 1427 1428 if (userName == NULL) 1429 return create_port(queueLength, NULL); 1430 1431 if (!IS_USER_ADDRESS(userName) 1432 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 1433 return B_BAD_ADDRESS; 1434 1435 return create_port(queueLength, name); 1436 } 1437 1438 1439 status_t 1440 _user_close_port(port_id id) 1441 { 1442 return close_port(id); 1443 } 1444 1445 1446 status_t 1447 _user_delete_port(port_id id) 1448 { 1449 return delete_port(id); 1450 } 1451 1452 1453 port_id 1454 _user_find_port(const char *userName) 1455 { 1456 char name[B_OS_NAME_LENGTH]; 1457 1458 if (userName == NULL) 1459 return B_BAD_VALUE; 1460 if (!IS_USER_ADDRESS(userName) 1461 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 1462 return B_BAD_ADDRESS; 1463 1464 return find_port(name); 1465 } 1466 1467 1468 status_t 1469 _user_get_port_info(port_id id, struct port_info *userInfo) 1470 { 1471 struct port_info info; 1472 status_t status; 1473 1474 if (userInfo == NULL) 1475 return B_BAD_VALUE; 1476 if (!IS_USER_ADDRESS(userInfo)) 1477 return B_BAD_ADDRESS; 1478 1479 status = get_port_info(id, &info); 1480 1481 // copy back to user space 1482 if (status == B_OK 1483 && user_memcpy(userInfo, &info, sizeof(struct port_info)) < B_OK) 1484 return B_BAD_ADDRESS; 1485 1486 return status; 1487 } 1488 1489 1490 status_t 1491 _user_get_next_port_info(team_id team, int32 *userCookie, 1492 struct port_info *userInfo) 1493 { 1494 struct port_info info; 1495 status_t status; 1496 int32 cookie; 1497 1498 if (userCookie == NULL || userInfo == NULL) 1499 return B_BAD_VALUE; 1500 if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo) 1501 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 1502 return B_BAD_ADDRESS; 1503 1504 status = get_next_port_info(team, &cookie, &info); 1505 1506 // copy back to user space 1507 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK 1508 || (status == B_OK && user_memcpy(userInfo, &info, 1509 sizeof(struct port_info)) < B_OK)) 1510 return B_BAD_ADDRESS; 1511 1512 return status; 1513 } 1514 1515 1516 ssize_t 1517 _user_port_buffer_size_etc(port_id port, uint32 flags, bigtime_t timeout) 1518 { 1519 syscall_restart_handle_timeout_pre(flags, timeout); 1520 1521 status_t status = port_buffer_size_etc(port, flags | B_CAN_INTERRUPT, 1522 timeout); 1523 1524 return syscall_restart_handle_timeout_post(status, timeout); 1525 } 1526 1527 1528 ssize_t 1529 _user_port_count(port_id port) 1530 { 1531 return port_count(port); 1532 } 1533 1534 1535 status_t 1536 _user_set_port_owner(port_id port, team_id team) 1537 { 1538 return set_port_owner(port, team); 1539 } 1540 1541 1542 ssize_t 1543 _user_read_port_etc(port_id port, int32 *userCode, void *userBuffer, 1544 size_t bufferSize, uint32 flags, bigtime_t timeout) 1545 { 1546 int32 messageCode; 1547 ssize_t bytesRead; 1548 1549 syscall_restart_handle_timeout_pre(flags, timeout); 1550 1551 if (userBuffer == NULL && bufferSize != 0) 1552 return B_BAD_VALUE; 1553 if ((userCode != NULL && !IS_USER_ADDRESS(userCode)) 1554 || (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer))) 1555 return B_BAD_ADDRESS; 1556 1557 bytesRead = read_port_etc(port, &messageCode, userBuffer, bufferSize, 1558 flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout); 1559 1560 if (bytesRead >= 0 && userCode != NULL 1561 && user_memcpy(userCode, &messageCode, sizeof(int32)) < B_OK) 1562 return B_BAD_ADDRESS; 1563 1564 return syscall_restart_handle_timeout_post(bytesRead, timeout); 1565 } 1566 1567 1568 status_t 1569 _user_write_port_etc(port_id port, int32 messageCode, const void *userBuffer, 1570 size_t bufferSize, uint32 flags, bigtime_t timeout) 1571 { 1572 iovec vec = { (void *)userBuffer, bufferSize }; 1573 1574 syscall_restart_handle_timeout_pre(flags, timeout); 1575 1576 if (userBuffer == NULL && bufferSize != 0) 1577 return B_BAD_VALUE; 1578 if (userBuffer != NULL && !IS_USER_ADDRESS(userBuffer)) 1579 return B_BAD_ADDRESS; 1580 1581 status_t status = writev_port_etc(port, messageCode, &vec, 1, bufferSize, 1582 flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, timeout); 1583 1584 return syscall_restart_handle_timeout_post(status, timeout); 1585 } 1586 1587 1588 status_t 1589 _user_writev_port_etc(port_id port, int32 messageCode, const iovec *userVecs, 1590 size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout) 1591 { 1592 syscall_restart_handle_timeout_pre(flags, timeout); 1593 1594 if (userVecs == NULL && bufferSize != 0) 1595 return B_BAD_VALUE; 1596 if (userVecs != NULL && !IS_USER_ADDRESS(userVecs)) 1597 return B_BAD_ADDRESS; 1598 1599 iovec *vecs = NULL; 1600 if (userVecs && vecCount != 0) { 1601 vecs = (iovec*)malloc(sizeof(iovec) * vecCount); 1602 if (vecs == NULL) 1603 return B_NO_MEMORY; 1604 1605 if (user_memcpy(vecs, userVecs, sizeof(iovec) * vecCount) < B_OK) { 1606 free(vecs); 1607 return B_BAD_ADDRESS; 1608 } 1609 } 1610 1611 status_t status = writev_port_etc(port, messageCode, vecs, vecCount, 1612 bufferSize, flags | PORT_FLAG_USE_USER_MEMCPY | B_CAN_INTERRUPT, 1613 timeout); 1614 1615 free(vecs); 1616 return syscall_restart_handle_timeout_post(status, timeout); 1617 } 1618 1619 1620 status_t 1621 _user_get_port_message_info_etc(port_id port, port_message_info *userInfo, 1622 size_t infoSize, uint32 flags, bigtime_t timeout) 1623 { 1624 if (userInfo == NULL || infoSize != sizeof(port_message_info)) 1625 return B_BAD_VALUE; 1626 1627 syscall_restart_handle_timeout_pre(flags, timeout); 1628 1629 port_message_info info; 1630 status_t error = _get_port_message_info_etc(port, &info, sizeof(info), 1631 flags | B_CAN_INTERRUPT, timeout); 1632 1633 // copy info to userland 1634 if (error == B_OK && (!IS_USER_ADDRESS(userInfo) 1635 || user_memcpy(userInfo, &info, sizeof(info)) != B_OK)) { 1636 error = B_BAD_ADDRESS; 1637 } 1638 1639 return syscall_restart_handle_timeout_post(error, timeout); 1640 } 1641