1 /* 2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 /*! Semaphore code */ 11 12 13 #include <OS.h> 14 15 #include <sem.h> 16 #include <kernel.h> 17 #include <kscheduler.h> 18 #include <ksignal.h> 19 #include <smp.h> 20 #include <int.h> 21 #include <arch/int.h> 22 #include <debug.h> 23 #include <listeners.h> 24 #include <scheduling_analysis.h> 25 #include <thread.h> 26 #include <team.h> 27 #include <util/AutoLock.h> 28 #include <util/DoublyLinkedList.h> 29 #include <vfs.h> 30 #include <vm_page.h> 31 #include <boot/kernel_args.h> 32 #include <syscall_restart.h> 33 #include <wait_for_objects.h> 34 35 #include <string.h> 36 #include <stdlib.h> 37 38 #include "kernel_debug_config.h" 39 40 41 //#define TRACE_SEM 42 #ifdef TRACE_SEM 43 # define TRACE(x) dprintf_no_syslog x 44 #else 45 # define TRACE(x) ; 46 #endif 47 48 //#define KTRACE_SEM 49 #ifdef KTRACE_SEM 50 # define KTRACE(x...) ktrace_printf(x) 51 #else 52 # define KTRACE(x...) do {} while (false) 53 #endif 54 55 56 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> { 57 queued_thread(struct thread *thread, int32 count) 58 : 59 thread(thread), 60 count(count), 61 queued(false) 62 { 63 } 64 65 struct thread *thread; 66 int32 count; 67 bool queued; 68 }; 69 70 typedef DoublyLinkedList<queued_thread> ThreadQueue; 71 72 struct sem_entry { 73 sem_id id; 74 spinlock lock; // protects only the id field when unused 75 union { 76 // when slot in use 77 struct { 78 int32 count; 79 int32 net_count; 80 // count + acquisition count of all blocked 81 // threads 82 char *name; 83 team_id owner; // if set to -1, means owned by a port 84 select_info *select_infos; 85 thread_id last_acquirer; 86 #if DEBUG_SEM_LAST_ACQUIRER 87 int32 last_acquire_count; 88 thread_id last_releaser; 89 int32 last_release_count; 90 #endif 91 } used; 92 93 // when slot unused 94 struct { 95 sem_id next_id; 96 struct sem_entry *next; 97 } unused; 98 } u; 99 100 ThreadQueue queue; // should be in u.used, but has a constructor 101 }; 102 103 static const int32 kMaxSemaphores = 131072; 104 static int32 sMaxSems = 4096; 105 // Final value is computed based on the amount of available memory 106 static int32 sUsedSems = 0; 107 108 static struct sem_entry *sSems = NULL; 109 static bool sSemsActive = false; 110 static struct sem_entry *sFreeSemsHead = NULL; 111 static struct sem_entry *sFreeSemsTail = NULL; 112 113 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER; 114 #define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sSemsSpinlock) 115 #define RELEASE_SEM_LIST_LOCK() release_spinlock(&sSemsSpinlock) 116 #define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock) 117 #define RELEASE_SEM_LOCK(s) release_spinlock(&(s).lock) 118 119 120 static int 121 dump_sem_list(int argc, char **argv) 122 { 123 const char *name = NULL; 124 team_id owner = -1; 125 thread_id last = -1; 126 int32 i; 127 128 if (argc > 2) { 129 if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner")) 130 owner = strtoul(argv[2], NULL, 0); 131 else if (!strcmp(argv[1], "name")) 132 name = argv[2]; 133 else if (!strcmp(argv[1], "last")) 134 last = strtoul(argv[2], NULL, 0); 135 } else if (argc > 1) 136 owner = strtoul(argv[1], NULL, 0); 137 138 kprintf("sem id count team last name\n"); 139 140 for (i = 0; i < sMaxSems; i++) { 141 struct sem_entry *sem = &sSems[i]; 142 if (sem->id < 0 143 || (last != -1 && sem->u.used.last_acquirer != last) 144 || (name != NULL && strstr(sem->u.used.name, name) == NULL) 145 || (owner != -1 && sem->u.used.owner != owner)) 146 continue; 147 148 kprintf("%p %6ld %5ld %6ld " 149 "%6ld " 150 " %s\n", sem, sem->id, sem->u.used.count, 151 sem->u.used.owner, 152 sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0, 153 sem->u.used.name); 154 } 155 156 return 0; 157 } 158 159 160 static void 161 dump_sem(struct sem_entry *sem) 162 { 163 kprintf("SEM: %p\n", sem); 164 kprintf("id: %ld (%#lx)\n", sem->id, sem->id); 165 if (sem->id >= 0) { 166 kprintf("name: '%s'\n", sem->u.used.name); 167 kprintf("owner: %ld\n", sem->u.used.owner); 168 kprintf("count: %ld\n", sem->u.used.count); 169 kprintf("queue: "); 170 if (!sem->queue.IsEmpty()) { 171 ThreadQueue::Iterator it = sem->queue.GetIterator(); 172 while (queued_thread* entry = it.Next()) 173 kprintf(" %ld", entry->thread->id); 174 kprintf("\n"); 175 } else 176 kprintf(" -\n"); 177 178 set_debug_variable("_sem", (addr_t)sem); 179 set_debug_variable("_semID", sem->id); 180 set_debug_variable("_owner", sem->u.used.owner); 181 182 #if DEBUG_SEM_LAST_ACQUIRER 183 kprintf("last acquired by: %ld, count: %ld\n", sem->u.used.last_acquirer, 184 sem->u.used.last_acquire_count); 185 kprintf("last released by: %ld, count: %ld\n", sem->u.used.last_releaser, 186 sem->u.used.last_release_count); 187 188 if (sem->u.used.last_releaser != 0) 189 set_debug_variable("_releaser", sem->u.used.last_releaser); 190 else 191 unset_debug_variable("_releaser"); 192 #else 193 kprintf("last acquired by: %ld\n", sem->u.used.last_acquirer); 194 #endif 195 196 if (sem->u.used.last_acquirer != 0) 197 set_debug_variable("_acquirer", sem->u.used.last_acquirer); 198 else 199 unset_debug_variable("_acquirer"); 200 201 } else { 202 kprintf("next: %p\n", sem->u.unused.next); 203 kprintf("next_id: %ld\n", sem->u.unused.next_id); 204 } 205 } 206 207 208 static int 209 dump_sem_info(int argc, char **argv) 210 { 211 bool found = false; 212 addr_t num; 213 int32 i; 214 215 if (argc < 2) { 216 print_debugger_command_usage(argv[0]); 217 return 0; 218 } 219 220 num = strtoul(argv[1], NULL, 0); 221 222 if (IS_KERNEL_ADDRESS(num)) { 223 dump_sem((struct sem_entry *)num); 224 return 0; 225 } else if (num >= 0) { 226 uint32 slot = num % sMaxSems; 227 if (sSems[slot].id != (int)num) { 228 kprintf("sem %ld (%#lx) doesn't exist!\n", num, num); 229 return 0; 230 } 231 232 dump_sem(&sSems[slot]); 233 return 0; 234 } 235 236 // walk through the sem list, trying to match name 237 for (i = 0; i < sMaxSems; i++) { 238 if (sSems[i].u.used.name != NULL 239 && strcmp(argv[1], sSems[i].u.used.name) == 0) { 240 dump_sem(&sSems[i]); 241 found = true; 242 } 243 } 244 245 if (!found) 246 kprintf("sem \"%s\" doesn't exist!\n", argv[1]); 247 return 0; 248 } 249 250 251 /*! \brief Appends a semaphore slot to the free list. 252 253 The semaphore list must be locked. 254 The slot's id field is not changed. It should already be set to -1. 255 256 \param slot The index of the semaphore slot. 257 \param nextID The ID the slot will get when reused. If < 0 the \a slot 258 is used. 259 */ 260 static void 261 free_sem_slot(int slot, sem_id nextID) 262 { 263 struct sem_entry *sem = sSems + slot; 264 // set next_id to the next possible value; for sanity check the current ID 265 if (nextID < 0) 266 sem->u.unused.next_id = slot; 267 else 268 sem->u.unused.next_id = nextID; 269 // append the entry to the list 270 if (sFreeSemsTail) 271 sFreeSemsTail->u.unused.next = sem; 272 else 273 sFreeSemsHead = sem; 274 sFreeSemsTail = sem; 275 sem->u.unused.next = NULL; 276 } 277 278 279 static inline void 280 notify_sem_select_events(struct sem_entry* sem, uint16 events) 281 { 282 if (sem->u.used.select_infos) 283 notify_select_events_list(sem->u.used.select_infos, events); 284 } 285 286 287 /*! Fills the thread_info structure with information from the specified 288 thread. 289 The thread lock must be held when called. 290 */ 291 static void 292 fill_sem_info(struct sem_entry *sem, sem_info *info, size_t size) 293 { 294 info->sem = sem->id; 295 info->team = sem->u.used.owner; 296 strlcpy(info->name, sem->u.used.name, sizeof(info->name)); 297 info->count = sem->u.used.count; 298 info->latest_holder = sem->u.used.last_acquirer; 299 } 300 301 302 static status_t 303 delete_sem_internal(sem_id id, bool checkPermission) 304 { 305 if (sSemsActive == false) 306 return B_NO_MORE_SEMS; 307 if (id < 0) 308 return B_BAD_SEM_ID; 309 310 int32 slot = id % sMaxSems; 311 312 cpu_status state = disable_interrupts(); 313 GRAB_SEM_LOCK(sSems[slot]); 314 315 if (sSems[slot].id != id) { 316 RELEASE_SEM_LOCK(sSems[slot]); 317 restore_interrupts(state); 318 TRACE(("delete_sem: invalid sem_id %ld\n", id)); 319 return B_BAD_SEM_ID; 320 } 321 322 if (checkPermission 323 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 324 RELEASE_SEM_LOCK(sSems[slot]); 325 restore_interrupts(state); 326 dprintf("thread %ld tried to delete kernel semaphore %ld.\n", 327 thread_get_current_thread_id(), id); 328 return B_NOT_ALLOWED; 329 } 330 331 KTRACE("delete_sem(sem: %ld)", id); 332 333 notify_sem_select_events(&sSems[slot], B_EVENT_INVALID); 334 sSems[slot].u.used.select_infos = NULL; 335 336 // free any threads waiting for this semaphore 337 GRAB_THREAD_LOCK(); 338 while (queued_thread* entry = sSems[slot].queue.RemoveHead()) { 339 entry->queued = false; 340 thread_unblock_locked(entry->thread, B_BAD_SEM_ID); 341 } 342 RELEASE_THREAD_LOCK(); 343 344 sSems[slot].id = -1; 345 char *name = sSems[slot].u.used.name; 346 sSems[slot].u.used.name = NULL; 347 348 RELEASE_SEM_LOCK(sSems[slot]); 349 350 // append slot to the free list 351 GRAB_SEM_LIST_LOCK(); 352 free_sem_slot(slot, id + sMaxSems); 353 atomic_add(&sUsedSems, -1); 354 RELEASE_SEM_LIST_LOCK(); 355 356 restore_interrupts(state); 357 358 free(name); 359 360 return B_OK; 361 } 362 363 364 // #pragma mark - Private Kernel API 365 366 367 status_t 368 haiku_sem_init(kernel_args *args) 369 { 370 area_id area; 371 int32 i; 372 373 TRACE(("sem_init: entry\n")); 374 375 // compute maximal number of semaphores depending on the available memory 376 // 128 MB -> 16384 semaphores, 448 kB fixed array size 377 // 256 MB -> 32768, 896 kB 378 // 512 MB -> 65536, 1.75 MB 379 // 1024 MB and more -> 131072, 3.5 MB 380 i = vm_page_num_pages() / 2; 381 while (sMaxSems < i && sMaxSems < kMaxSemaphores) 382 sMaxSems <<= 1; 383 384 // create and initialize semaphore table 385 area = create_area("sem_table", (void **)&sSems, B_ANY_KERNEL_ADDRESS, 386 sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK, 387 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 388 if (area < 0) 389 panic("unable to allocate semaphore table!\n"); 390 391 memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems); 392 for (i = 0; i < sMaxSems; i++) { 393 sSems[i].id = -1; 394 free_sem_slot(i, i); 395 } 396 397 // add debugger commands 398 add_debugger_command_etc("sems", &dump_sem_list, 399 "Dump a list of all active semaphores (for team, with name, etc.)", 400 "[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]" 401 " | (\"last\" <last acquirer>)\n" 402 "Prints a list of all active semaphores meeting the given\n" 403 "requirement. If no argument is given, all sems are listed.\n" 404 " <team> - The team owning the semaphores.\n" 405 " <name> - Part of the name of the semaphores.\n" 406 " <last acquirer> - The thread that last acquired the semaphore.\n" 407 , 0); 408 add_debugger_command_etc("sem", &dump_sem_info, 409 "Dump info about a particular semaphore", 410 "<sem>\n" 411 "Prints info about the specified semaphore.\n" 412 " <sem> - pointer to the semaphore structure, semaphore ID, or name\n" 413 " of the semaphore to print info for.\n", 0); 414 415 TRACE(("sem_init: exit\n")); 416 417 sSemsActive = true; 418 419 return 0; 420 } 421 422 423 /*! Creates a semaphore with the given parameters. 424 Note, the team_id is not checked, it must be correct, or else 425 that semaphore might not be deleted. 426 This function is only available from within the kernel, and 427 should not be made public - if possible, we should remove it 428 completely (and have only create_sem() exported). 429 */ 430 sem_id 431 create_sem_etc(int32 count, const char *name, team_id owner) 432 { 433 struct sem_entry *sem = NULL; 434 cpu_status state; 435 sem_id id = B_NO_MORE_SEMS; 436 char *tempName; 437 size_t nameLength; 438 439 if (sSemsActive == false) 440 return B_NO_MORE_SEMS; 441 442 #if 0 443 // TODO: the code below might cause unwanted deadlocks, 444 // we need an asynchronously running low resource handler. 445 if (sUsedSems == sMaxSems) { 446 // The vnode cache may have collected lots of semaphores. 447 // Freeing some unused vnodes should improve our situation. 448 // TODO: maybe create a generic "low resources" handler, instead 449 // of only the specialised low memory thing? 450 vfs_free_unused_vnodes(B_LOW_MEMORY_WARNING); 451 } 452 if (sUsedSems == sMaxSems) { 453 // try again with more enthusiasm 454 vfs_free_unused_vnodes(B_LOW_MEMORY_CRITICAL); 455 } 456 #endif 457 if (sUsedSems == sMaxSems) 458 return B_NO_MORE_SEMS; 459 460 if (name == NULL) 461 name = "unnamed semaphore"; 462 463 nameLength = strlen(name) + 1; 464 nameLength = min_c(nameLength, B_OS_NAME_LENGTH); 465 tempName = (char *)malloc(nameLength); 466 if (tempName == NULL) 467 return B_NO_MEMORY; 468 strlcpy(tempName, name, nameLength); 469 470 state = disable_interrupts(); 471 GRAB_SEM_LIST_LOCK(); 472 473 // get the first slot from the free list 474 sem = sFreeSemsHead; 475 if (sem) { 476 // remove it from the free list 477 sFreeSemsHead = sem->u.unused.next; 478 if (!sFreeSemsHead) 479 sFreeSemsTail = NULL; 480 481 // init the slot 482 GRAB_SEM_LOCK(*sem); 483 sem->id = sem->u.unused.next_id; 484 sem->u.used.count = count; 485 sem->u.used.net_count = count; 486 new(&sem->queue) ThreadQueue; 487 sem->u.used.name = tempName; 488 sem->u.used.owner = owner; 489 sem->u.used.select_infos = NULL; 490 id = sem->id; 491 RELEASE_SEM_LOCK(*sem); 492 493 atomic_add(&sUsedSems, 1); 494 495 KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld", 496 count, name, owner, id); 497 498 T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name)); 499 NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id, 500 name); 501 } 502 503 RELEASE_SEM_LIST_LOCK(); 504 restore_interrupts(state); 505 506 if (!sem) 507 free(tempName); 508 509 return id; 510 } 511 512 513 status_t 514 select_sem(int32 id, struct select_info* info, bool kernel) 515 { 516 cpu_status state; 517 int32 slot; 518 status_t error = B_OK; 519 520 if (id < 0) 521 return B_BAD_SEM_ID; 522 523 slot = id % sMaxSems; 524 525 state = disable_interrupts(); 526 GRAB_SEM_LOCK(sSems[slot]); 527 528 if (sSems[slot].id != id) { 529 // bad sem ID 530 error = B_BAD_SEM_ID; 531 } else if (!kernel 532 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 533 // kernel semaphore, but call from userland 534 error = B_NOT_ALLOWED; 535 } else { 536 info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID; 537 538 if (info->selected_events != 0) { 539 info->next = sSems[slot].u.used.select_infos; 540 sSems[slot].u.used.select_infos = info; 541 542 if (sSems[slot].u.used.count > 0) 543 notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE); 544 } 545 } 546 547 RELEASE_SEM_LOCK(sSems[slot]); 548 restore_interrupts(state); 549 550 return error; 551 } 552 553 554 status_t 555 deselect_sem(int32 id, struct select_info* info, bool kernel) 556 { 557 cpu_status state; 558 int32 slot; 559 560 if (id < 0) 561 return B_BAD_SEM_ID; 562 563 if (info->selected_events == 0) 564 return B_OK; 565 566 slot = id % sMaxSems; 567 568 state = disable_interrupts(); 569 GRAB_SEM_LOCK(sSems[slot]); 570 571 if (sSems[slot].id == id) { 572 select_info** infoLocation = &sSems[slot].u.used.select_infos; 573 while (*infoLocation != NULL && *infoLocation != info) 574 infoLocation = &(*infoLocation)->next; 575 576 if (*infoLocation == info) 577 *infoLocation = info->next; 578 } 579 580 RELEASE_SEM_LOCK(sSems[slot]); 581 restore_interrupts(state); 582 583 return B_OK; 584 } 585 586 587 /*! Forcibly removes a thread from a semaphores wait queue. May have to wake up 588 other threads in the process. 589 Must be called with semaphore lock held. The thread lock must not be held. 590 */ 591 static void 592 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem) 593 { 594 if (!entry->queued) 595 return; 596 597 sem->queue.Remove(entry); 598 entry->queued = false; 599 sem->u.used.count += entry->count; 600 601 // We're done with this entry. We only have to check, if other threads 602 // need unblocking, too. 603 604 // Now see if more threads need to be woken up. We get the thread lock for 605 // that time, so the blocking state of threads won't change. We need that 606 // lock anyway when unblocking a thread. 607 GRAB_THREAD_LOCK(); 608 609 while ((entry = sem->queue.Head()) != NULL) { 610 if (thread_is_blocked(entry->thread)) { 611 // The thread is still waiting. If its count is satisfied, unblock 612 // it. Otherwise we can't unblock any other thread. 613 if (entry->count > sem->u.used.net_count) 614 break; 615 616 thread_unblock_locked(entry->thread, B_OK); 617 sem->u.used.net_count -= entry->count; 618 } else { 619 // The thread is no longer waiting, but still queued, which means 620 // acquiration failed and we can just remove it. 621 sem->u.used.count += entry->count; 622 } 623 624 sem->queue.Remove(entry); 625 entry->queued = false; 626 } 627 628 RELEASE_THREAD_LOCK(); 629 630 // select notification, if the semaphore is now acquirable 631 if (sem->u.used.count > 0) 632 notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE); 633 } 634 635 636 /*! This function cycles through the sem table, deleting all the sems 637 that are owned by the specified team. 638 */ 639 int 640 sem_delete_owned_sems(team_id owner) 641 { 642 int state; 643 int i; 644 int count = 0; 645 646 // ToDo: that looks horribly inefficient - maybe it would be better 647 // to have them in a list in the team 648 649 if (owner < 0) 650 return B_BAD_TEAM_ID; 651 652 state = disable_interrupts(); 653 GRAB_SEM_LIST_LOCK(); 654 655 for (i = 0; i < sMaxSems; i++) { 656 if (sSems[i].id != -1 && sSems[i].u.used.owner == owner) { 657 sem_id id = sSems[i].id; 658 659 RELEASE_SEM_LIST_LOCK(); 660 restore_interrupts(state); 661 662 delete_sem(id); 663 count++; 664 665 state = disable_interrupts(); 666 GRAB_SEM_LIST_LOCK(); 667 } 668 } 669 670 RELEASE_SEM_LIST_LOCK(); 671 restore_interrupts(state); 672 673 return count; 674 } 675 676 677 int32 678 sem_max_sems(void) 679 { 680 return sMaxSems; 681 } 682 683 684 int32 685 sem_used_sems(void) 686 { 687 return sUsedSems; 688 } 689 690 691 // #pragma mark - Public Kernel API 692 693 694 sem_id 695 create_sem(int32 count, const char *name) 696 { 697 return create_sem_etc(count, name, team_get_kernel_team_id()); 698 } 699 700 701 status_t 702 delete_sem(sem_id id) 703 { 704 return delete_sem_internal(id, false); 705 } 706 707 708 status_t 709 acquire_sem(sem_id id) 710 { 711 return switch_sem_etc(-1, id, 1, 0, 0); 712 } 713 714 715 status_t 716 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout) 717 { 718 return switch_sem_etc(-1, id, count, flags, timeout); 719 } 720 721 722 status_t 723 switch_sem(sem_id toBeReleased, sem_id toBeAcquired) 724 { 725 return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0); 726 } 727 728 729 status_t 730 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count, 731 uint32 flags, bigtime_t timeout) 732 { 733 int slot = id % sMaxSems; 734 int state; 735 status_t status = B_OK; 736 737 if (gKernelStartup) 738 return B_OK; 739 if (sSemsActive == false) 740 return B_NO_MORE_SEMS; 741 742 if (!are_interrupts_enabled()) { 743 panic("switch_sem_etc: called with interrupts disabled for sem %ld\n", 744 id); 745 } 746 747 if (id < 0) 748 return B_BAD_SEM_ID; 749 if (count <= 0 750 || (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) { 751 return B_BAD_VALUE; 752 } 753 754 state = disable_interrupts(); 755 GRAB_SEM_LOCK(sSems[slot]); 756 757 if (sSems[slot].id != id) { 758 TRACE(("switch_sem_etc: bad sem %ld\n", id)); 759 status = B_BAD_SEM_ID; 760 goto err; 761 } 762 763 // TODO: the B_CHECK_PERMISSION flag should be made private, as it 764 // doesn't have any use outside the kernel 765 if ((flags & B_CHECK_PERMISSION) != 0 766 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 767 dprintf("thread %ld tried to acquire kernel semaphore %ld.\n", 768 thread_get_current_thread_id(), id); 769 status = B_NOT_ALLOWED; 770 goto err; 771 } 772 773 if (sSems[slot].u.used.count - count < 0) { 774 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) { 775 // immediate timeout 776 status = B_WOULD_BLOCK; 777 goto err; 778 } else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) { 779 // absolute negative timeout 780 status = B_TIMED_OUT; 781 goto err; 782 } 783 } 784 785 KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, " 786 "flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags, 787 timeout); 788 789 if ((sSems[slot].u.used.count -= count) < 0) { 790 // we need to block 791 struct thread *thread = thread_get_current_thread(); 792 793 TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p," 794 " name = %s\n", id, sSems[slot].u.used.name, thread, thread->name)); 795 796 // do a quick check to see if the thread has any pending signals 797 // this should catch most of the cases where the thread had a signal 798 if (thread_is_interrupted(thread, flags)) { 799 sSems[slot].u.used.count += count; 800 status = B_INTERRUPTED; 801 // the other semaphore will be released later 802 goto err; 803 } 804 805 if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0) 806 timeout = B_INFINITE_TIMEOUT; 807 808 // enqueue in the semaphore queue and get ready to wait 809 queued_thread queueEntry(thread, count); 810 sSems[slot].queue.Add(&queueEntry); 811 queueEntry.queued = true; 812 813 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE, 814 (void*)(addr_t)id); 815 816 RELEASE_SEM_LOCK(sSems[slot]); 817 818 // release the other semaphore, if any 819 if (semToBeReleased >= 0) { 820 release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE); 821 semToBeReleased = -1; 822 } 823 824 GRAB_THREAD_LOCK(); 825 826 status_t acquireStatus = timeout == B_INFINITE_TIMEOUT 827 ? thread_block_locked(thread) 828 : thread_block_with_timeout_locked(flags, timeout); 829 830 RELEASE_THREAD_LOCK(); 831 GRAB_SEM_LOCK(sSems[slot]); 832 833 // If we're still queued, this means the acquiration failed, and we 834 // need to remove our entry and (potentially) wake up other threads. 835 if (queueEntry.queued) 836 remove_thread_from_sem(&queueEntry, &sSems[slot]); 837 838 if (acquireStatus >= B_OK) { 839 sSems[slot].u.used.last_acquirer = thread_get_current_thread_id(); 840 #if DEBUG_SEM_LAST_ACQUIRER 841 sSems[slot].u.used.last_acquire_count = count; 842 #endif 843 } 844 845 RELEASE_SEM_LOCK(sSems[slot]); 846 restore_interrupts(state); 847 848 TRACE(("switch_sem_etc(sem %ld): exit block name %s, " 849 "thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id, 850 thread->name)); 851 KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus); 852 return acquireStatus; 853 } else { 854 sSems[slot].u.used.net_count -= count; 855 sSems[slot].u.used.last_acquirer = thread_get_current_thread_id(); 856 #if DEBUG_SEM_LAST_ACQUIRER 857 sSems[slot].u.used.last_acquire_count = count; 858 #endif 859 } 860 861 err: 862 RELEASE_SEM_LOCK(sSems[slot]); 863 restore_interrupts(state); 864 865 if (status == B_INTERRUPTED && semToBeReleased >= B_OK) { 866 // depending on when we were interrupted, we need to still 867 // release the semaphore to always leave in a consistent 868 // state 869 release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE); 870 } 871 872 #if 0 873 if (status == B_NOT_ALLOWED) 874 _user_debugger("Thread tried to acquire kernel semaphore."); 875 #endif 876 877 KTRACE("switch_sem_etc() done: 0x%lx", status); 878 879 return status; 880 } 881 882 883 status_t 884 release_sem(sem_id id) 885 { 886 return release_sem_etc(id, 1, 0); 887 } 888 889 890 status_t 891 release_sem_etc(sem_id id, int32 count, uint32 flags) 892 { 893 int32 slot = id % sMaxSems; 894 895 if (gKernelStartup) 896 return B_OK; 897 if (sSemsActive == false) 898 return B_NO_MORE_SEMS; 899 if (id < 0) 900 return B_BAD_SEM_ID; 901 if (count <= 0 && (flags & B_RELEASE_ALL) == 0) 902 return B_BAD_VALUE; 903 904 InterruptsLocker _; 905 SpinLocker semLocker(sSems[slot].lock); 906 907 if (sSems[slot].id != id) { 908 TRACE(("sem_release_etc: invalid sem_id %ld\n", id)); 909 return B_BAD_SEM_ID; 910 } 911 912 // ToDo: the B_CHECK_PERMISSION flag should be made private, as it 913 // doesn't have any use outside the kernel 914 if ((flags & B_CHECK_PERMISSION) != 0 915 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 916 dprintf("thread %ld tried to release kernel semaphore.\n", 917 thread_get_current_thread_id()); 918 return B_NOT_ALLOWED; 919 } 920 921 KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count, 922 flags); 923 924 sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer; 925 #if DEBUG_SEM_LAST_ACQUIRER 926 sSems[slot].u.used.last_releaser = thread_get_current_thread_id(); 927 sSems[slot].u.used.last_release_count = count; 928 #endif 929 930 if (flags & B_RELEASE_ALL) { 931 count = sSems[slot].u.used.net_count - sSems[slot].u.used.count; 932 933 // is there anything to do for us at all? 934 if (count == 0) 935 return B_OK; 936 937 // Don't release more than necessary -- there might be interrupted/ 938 // timed out threads in the queue. 939 flags |= B_RELEASE_IF_WAITING_ONLY; 940 } 941 942 struct thread* currentThread = thread_get_current_thread(); 943 bool reschedule = false; 944 945 SpinLocker threadLocker(gThreadSpinlock); 946 947 while (count > 0) { 948 queued_thread* entry = sSems[slot].queue.Head(); 949 if (entry == NULL) { 950 if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) { 951 sSems[slot].u.used.count += count; 952 sSems[slot].u.used.net_count += count; 953 } 954 break; 955 } 956 957 if (thread_is_blocked(entry->thread)) { 958 // The thread is still waiting. If its count is satisfied, 959 // unblock it. Otherwise we can't unblock any other thread. 960 if (entry->count > sSems[slot].u.used.net_count + count) { 961 sSems[slot].u.used.count += count; 962 sSems[slot].u.used.net_count += count; 963 break; 964 } 965 966 thread_unblock_locked(entry->thread, B_OK); 967 968 int delta = min_c(count, entry->count); 969 sSems[slot].u.used.count += delta; 970 sSems[slot].u.used.net_count += delta - entry->count; 971 count -= delta; 972 reschedule |= entry->thread->priority > currentThread->priority; 973 } else { 974 // The thread is no longer waiting, but still queued, which 975 // means acquiration failed and we can just remove it. 976 sSems[slot].u.used.count += entry->count; 977 } 978 979 sSems[slot].queue.Remove(entry); 980 entry->queued = false; 981 } 982 983 threadLocker.Unlock(); 984 985 if (sSems[slot].u.used.count > 0) 986 notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE); 987 988 // If we've unblocked another thread reschedule, if we've not explicitly 989 // been told not to. 990 if (reschedule && (flags & B_DO_NOT_RESCHEDULE) == 0) { 991 semLocker.Unlock(); 992 threadLocker.Lock(); 993 scheduler_reschedule(); 994 } 995 996 return B_OK; 997 } 998 999 1000 status_t 1001 get_sem_count(sem_id id, int32 *_count) 1002 { 1003 int slot; 1004 int state; 1005 1006 if (sSemsActive == false) 1007 return B_NO_MORE_SEMS; 1008 if (id < 0) 1009 return B_BAD_SEM_ID; 1010 if (_count == NULL) 1011 return B_BAD_VALUE; 1012 1013 slot = id % sMaxSems; 1014 1015 state = disable_interrupts(); 1016 GRAB_SEM_LOCK(sSems[slot]); 1017 1018 if (sSems[slot].id != id) { 1019 RELEASE_SEM_LOCK(sSems[slot]); 1020 restore_interrupts(state); 1021 TRACE(("sem_get_count: invalid sem_id %ld\n", id)); 1022 return B_BAD_SEM_ID; 1023 } 1024 1025 *_count = sSems[slot].u.used.count; 1026 1027 RELEASE_SEM_LOCK(sSems[slot]); 1028 restore_interrupts(state); 1029 1030 return B_OK; 1031 } 1032 1033 1034 /*! Called by the get_sem_info() macro. */ 1035 status_t 1036 _get_sem_info(sem_id id, struct sem_info *info, size_t size) 1037 { 1038 status_t status = B_OK; 1039 int state; 1040 int slot; 1041 1042 if (!sSemsActive) 1043 return B_NO_MORE_SEMS; 1044 if (id < 0) 1045 return B_BAD_SEM_ID; 1046 if (info == NULL || size != sizeof(sem_info)) 1047 return B_BAD_VALUE; 1048 1049 slot = id % sMaxSems; 1050 1051 state = disable_interrupts(); 1052 GRAB_SEM_LOCK(sSems[slot]); 1053 1054 if (sSems[slot].id != id) { 1055 status = B_BAD_SEM_ID; 1056 TRACE(("get_sem_info: invalid sem_id %ld\n", id)); 1057 } else 1058 fill_sem_info(&sSems[slot], info, size); 1059 1060 RELEASE_SEM_LOCK(sSems[slot]); 1061 restore_interrupts(state); 1062 1063 return status; 1064 } 1065 1066 1067 /*! Called by the get_next_sem_info() macro. */ 1068 status_t 1069 _get_next_sem_info(team_id team, int32 *_cookie, struct sem_info *info, 1070 size_t size) 1071 { 1072 int state; 1073 int slot; 1074 bool found = false; 1075 1076 if (!sSemsActive) 1077 return B_NO_MORE_SEMS; 1078 if (_cookie == NULL || info == NULL || size != sizeof(sem_info)) 1079 return B_BAD_VALUE; 1080 1081 if (team == B_CURRENT_TEAM) 1082 team = team_get_current_team_id(); 1083 /* prevents sSems[].owner == -1 >= means owned by a port */ 1084 if (team < 0 || !team_is_valid(team)) 1085 return B_BAD_TEAM_ID; 1086 1087 slot = *_cookie; 1088 if (slot >= sMaxSems) 1089 return B_BAD_VALUE; 1090 1091 state = disable_interrupts(); 1092 GRAB_SEM_LIST_LOCK(); 1093 1094 while (slot < sMaxSems) { 1095 if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) { 1096 GRAB_SEM_LOCK(sSems[slot]); 1097 if (sSems[slot].id != -1 && sSems[slot].u.used.owner == team) { 1098 // found one! 1099 fill_sem_info(&sSems[slot], info, size); 1100 1101 RELEASE_SEM_LOCK(sSems[slot]); 1102 slot++; 1103 found = true; 1104 break; 1105 } 1106 RELEASE_SEM_LOCK(sSems[slot]); 1107 } 1108 slot++; 1109 } 1110 RELEASE_SEM_LIST_LOCK(); 1111 restore_interrupts(state); 1112 1113 if (!found) 1114 return B_BAD_VALUE; 1115 1116 *_cookie = slot; 1117 return B_OK; 1118 } 1119 1120 1121 status_t 1122 set_sem_owner(sem_id id, team_id team) 1123 { 1124 int state; 1125 int slot; 1126 1127 if (sSemsActive == false) 1128 return B_NO_MORE_SEMS; 1129 if (id < 0) 1130 return B_BAD_SEM_ID; 1131 if (team < 0 || !team_is_valid(team)) 1132 return B_BAD_TEAM_ID; 1133 1134 slot = id % sMaxSems; 1135 1136 state = disable_interrupts(); 1137 GRAB_SEM_LOCK(sSems[slot]); 1138 1139 if (sSems[slot].id != id) { 1140 RELEASE_SEM_LOCK(sSems[slot]); 1141 restore_interrupts(state); 1142 TRACE(("set_sem_owner: invalid sem_id %ld\n", id)); 1143 return B_BAD_SEM_ID; 1144 } 1145 1146 // ToDo: this is a small race condition: the team ID could already 1147 // be invalid at this point - we would lose one semaphore slot in 1148 // this case! 1149 // The only safe way to do this is to prevent either team (the new 1150 // or the old owner) from dying until we leave the spinlock. 1151 sSems[slot].u.used.owner = team; 1152 1153 RELEASE_SEM_LOCK(sSems[slot]); 1154 restore_interrupts(state); 1155 1156 return B_NO_ERROR; 1157 } 1158 1159 1160 /*! Returns the name of the semaphore. The name is not copied, so the caller 1161 must make sure that the semaphore remains alive as long as the name is used. 1162 */ 1163 const char* 1164 sem_get_name_unsafe(sem_id id) 1165 { 1166 int slot = id % sMaxSems; 1167 1168 if (sSemsActive == false || id < 0 || sSems[slot].id != id) 1169 return NULL; 1170 1171 return sSems[slot].u.used.name; 1172 } 1173 1174 1175 // #pragma mark - Syscalls 1176 1177 1178 sem_id 1179 _user_create_sem(int32 count, const char *userName) 1180 { 1181 char name[B_OS_NAME_LENGTH]; 1182 1183 if (userName == NULL) 1184 return create_sem_etc(count, NULL, team_get_current_team_id()); 1185 1186 if (!IS_USER_ADDRESS(userName) 1187 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 1188 return B_BAD_ADDRESS; 1189 1190 return create_sem_etc(count, name, team_get_current_team_id()); 1191 } 1192 1193 1194 status_t 1195 _user_delete_sem(sem_id id) 1196 { 1197 return delete_sem_internal(id, true); 1198 } 1199 1200 1201 status_t 1202 _user_acquire_sem(sem_id id) 1203 { 1204 status_t error = switch_sem_etc(-1, id, 1, 1205 B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0); 1206 1207 return syscall_restart_handle_post(error); 1208 } 1209 1210 1211 status_t 1212 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout) 1213 { 1214 syscall_restart_handle_timeout_pre(flags, timeout); 1215 1216 status_t error = switch_sem_etc(-1, id, count, 1217 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout); 1218 1219 return syscall_restart_handle_timeout_post(error, timeout); 1220 } 1221 1222 1223 status_t 1224 _user_switch_sem(sem_id releaseSem, sem_id id) 1225 { 1226 status_t error = switch_sem_etc(releaseSem, id, 1, 1227 B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0); 1228 1229 if (releaseSem < 0) 1230 return syscall_restart_handle_post(error); 1231 1232 return error; 1233 } 1234 1235 1236 status_t 1237 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags, 1238 bigtime_t timeout) 1239 { 1240 if (releaseSem < 0) 1241 syscall_restart_handle_timeout_pre(flags, timeout); 1242 1243 status_t error = switch_sem_etc(releaseSem, id, count, 1244 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout); 1245 1246 if (releaseSem < 0) 1247 return syscall_restart_handle_timeout_post(error, timeout); 1248 1249 return error; 1250 } 1251 1252 1253 status_t 1254 _user_release_sem(sem_id id) 1255 { 1256 return release_sem_etc(id, 1, B_CHECK_PERMISSION); 1257 } 1258 1259 1260 status_t 1261 _user_release_sem_etc(sem_id id, int32 count, uint32 flags) 1262 { 1263 return release_sem_etc(id, count, flags | B_CHECK_PERMISSION); 1264 } 1265 1266 1267 status_t 1268 _user_get_sem_count(sem_id id, int32 *userCount) 1269 { 1270 status_t status; 1271 int32 count; 1272 1273 if (userCount == NULL || !IS_USER_ADDRESS(userCount)) 1274 return B_BAD_ADDRESS; 1275 1276 status = get_sem_count(id, &count); 1277 if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK) 1278 return B_BAD_ADDRESS; 1279 1280 return status; 1281 } 1282 1283 1284 status_t 1285 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size) 1286 { 1287 struct sem_info info; 1288 status_t status; 1289 1290 if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)) 1291 return B_BAD_ADDRESS; 1292 1293 status = _get_sem_info(id, &info, size); 1294 if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK) 1295 return B_BAD_ADDRESS; 1296 1297 return status; 1298 } 1299 1300 1301 status_t 1302 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo, 1303 size_t size) 1304 { 1305 struct sem_info info; 1306 int32 cookie; 1307 status_t status; 1308 1309 if (userCookie == NULL || userInfo == NULL 1310 || !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo) 1311 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 1312 return B_BAD_ADDRESS; 1313 1314 status = _get_next_sem_info(team, &cookie, &info, size); 1315 1316 if (status == B_OK) { 1317 if (user_memcpy(userInfo, &info, size) < B_OK 1318 || user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK) 1319 return B_BAD_ADDRESS; 1320 } 1321 1322 return status; 1323 } 1324 1325 1326 status_t 1327 _user_set_sem_owner(sem_id id, team_id team) 1328 { 1329 return set_sem_owner(id, team); 1330 } 1331