1 /* 2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 /*! Semaphore code */ 12 13 14 #include <sem.h> 15 16 #include <stdlib.h> 17 #include <string.h> 18 19 #include <OS.h> 20 21 #include <arch/int.h> 22 #include <boot/kernel_args.h> 23 #include <cpu.h> 24 #include <debug.h> 25 #include <int.h> 26 #include <kernel.h> 27 #include <ksignal.h> 28 #include <kscheduler.h> 29 #include <listeners.h> 30 #include <scheduling_analysis.h> 31 #include <smp.h> 32 #include <syscall_restart.h> 33 #include <team.h> 34 #include <thread.h> 35 #include <util/AutoLock.h> 36 #include <util/DoublyLinkedList.h> 37 #include <vfs.h> 38 #include <vm/vm_page.h> 39 #include <wait_for_objects.h> 40 41 #include "kernel_debug_config.h" 42 43 44 //#define TRACE_SEM 45 #ifdef TRACE_SEM 46 # define TRACE(x) dprintf_no_syslog x 47 #else 48 # define TRACE(x) ; 49 #endif 50 51 //#define KTRACE_SEM 52 #ifdef KTRACE_SEM 53 # define KTRACE(x...) ktrace_printf(x) 54 #else 55 # define KTRACE(x...) do {} while (false) 56 #endif 57 58 59 // Locking: 60 // * sSemsSpinlock: Protects the semaphore free list (sFreeSemsHead, 61 // sFreeSemsTail), Team::sem_list, and together with sem_entry::lock 62 // write access to sem_entry::owner/team_link. 63 // * sem_entry::lock: Protects all sem_entry members. owner, team_link 64 // additional need sSemsSpinlock for write access. 65 // lock itself doesn't need protection -- sem_entry objects are never deleted. 66 // 67 // The locking order is sSemsSpinlock -> sem_entry::lock -> scheduler lock. All 68 // semaphores are in the sSems array (sem_entry[]). Access by sem_id requires 69 // computing the object index (id % sMaxSems), locking the respective 70 // sem_entry::lock and verifying that sem_entry::id matches afterwards. 71 72 73 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> { 74 queued_thread(Thread *thread, int32 count) 75 : 76 thread(thread), 77 count(count), 78 queued(false) 79 { 80 } 81 82 Thread *thread; 83 int32 count; 84 bool queued; 85 }; 86 87 typedef DoublyLinkedList<queued_thread> ThreadQueue; 88 89 struct sem_entry { 90 union { 91 // when slot in use 92 struct { 93 struct list_link team_link; 94 int32 count; 95 int32 net_count; 96 // count + acquisition count of all blocked 97 // threads 98 char* name; 99 team_id owner; 100 select_info* select_infos; 101 thread_id last_acquirer; 102 #if DEBUG_SEM_LAST_ACQUIRER 103 int32 last_acquire_count; 104 thread_id last_releaser; 105 int32 last_release_count; 106 #endif 107 } used; 108 109 // when slot unused 110 struct { 111 sem_id next_id; 112 struct sem_entry* next; 113 } unused; 114 } u; 115 116 sem_id id; 117 spinlock lock; // protects only the id field when unused 118 ThreadQueue queue; // should be in u.used, but has a constructor 119 }; 120 121 static const int32 kMaxSemaphores = 65536; 122 static int32 sMaxSems = 4096; 123 // Final value is computed based on the amount of available memory 124 static int32 sUsedSems = 0; 125 126 static struct sem_entry *sSems = NULL; 127 static bool sSemsActive = false; 128 static struct sem_entry *sFreeSemsHead = NULL; 129 static struct sem_entry *sFreeSemsTail = NULL; 130 131 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER; 132 #define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sSemsSpinlock) 133 #define RELEASE_SEM_LIST_LOCK() release_spinlock(&sSemsSpinlock) 134 #define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock) 135 #define RELEASE_SEM_LOCK(s) release_spinlock(&(s).lock) 136 137 138 static int 139 dump_sem_list(int argc, char** argv) 140 { 141 const char* name = NULL; 142 team_id owner = -1; 143 thread_id last = -1; 144 int32 i; 145 146 if (argc > 2) { 147 if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner")) 148 owner = strtoul(argv[2], NULL, 0); 149 else if (!strcmp(argv[1], "name")) 150 name = argv[2]; 151 else if (!strcmp(argv[1], "last")) 152 last = strtoul(argv[2], NULL, 0); 153 } else if (argc > 1) 154 owner = strtoul(argv[1], NULL, 0); 155 156 kprintf("sem id count team last name\n"); 157 158 for (i = 0; i < sMaxSems; i++) { 159 struct sem_entry* sem = &sSems[i]; 160 if (sem->id < 0 161 || (last != -1 && sem->u.used.last_acquirer != last) 162 || (name != NULL && strstr(sem->u.used.name, name) == NULL) 163 || (owner != -1 && sem->u.used.owner != owner)) 164 continue; 165 166 kprintf("%p %6ld %5ld %6ld " 167 "%6ld " 168 " %s\n", sem, sem->id, sem->u.used.count, 169 sem->u.used.owner, 170 sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0, 171 sem->u.used.name); 172 } 173 174 return 0; 175 } 176 177 178 static void 179 dump_sem(struct sem_entry* sem) 180 { 181 kprintf("SEM: %p\n", sem); 182 kprintf("id: %ld (%#lx)\n", sem->id, sem->id); 183 if (sem->id >= 0) { 184 kprintf("name: '%s'\n", sem->u.used.name); 185 kprintf("owner: %ld\n", sem->u.used.owner); 186 kprintf("count: %ld\n", sem->u.used.count); 187 kprintf("queue: "); 188 if (!sem->queue.IsEmpty()) { 189 ThreadQueue::Iterator it = sem->queue.GetIterator(); 190 while (queued_thread* entry = it.Next()) 191 kprintf(" %ld", entry->thread->id); 192 kprintf("\n"); 193 } else 194 kprintf(" -\n"); 195 196 set_debug_variable("_sem", (addr_t)sem); 197 set_debug_variable("_semID", sem->id); 198 set_debug_variable("_owner", sem->u.used.owner); 199 200 #if DEBUG_SEM_LAST_ACQUIRER 201 kprintf("last acquired by: %ld, count: %ld\n", 202 sem->u.used.last_acquirer, sem->u.used.last_acquire_count); 203 kprintf("last released by: %ld, count: %ld\n", 204 sem->u.used.last_releaser, sem->u.used.last_release_count); 205 206 if (sem->u.used.last_releaser != 0) 207 set_debug_variable("_releaser", sem->u.used.last_releaser); 208 else 209 unset_debug_variable("_releaser"); 210 #else 211 kprintf("last acquired by: %ld\n", sem->u.used.last_acquirer); 212 #endif 213 214 if (sem->u.used.last_acquirer != 0) 215 set_debug_variable("_acquirer", sem->u.used.last_acquirer); 216 else 217 unset_debug_variable("_acquirer"); 218 } else { 219 kprintf("next: %p\n", sem->u.unused.next); 220 kprintf("next_id: %ld\n", sem->u.unused.next_id); 221 } 222 } 223 224 225 static int 226 dump_sem_info(int argc, char **argv) 227 { 228 bool found = false; 229 addr_t num; 230 int32 i; 231 232 if (argc < 2) { 233 print_debugger_command_usage(argv[0]); 234 return 0; 235 } 236 237 num = strtoul(argv[1], NULL, 0); 238 239 if (IS_KERNEL_ADDRESS(num)) { 240 dump_sem((struct sem_entry *)num); 241 return 0; 242 } else if (num >= 0) { 243 uint32 slot = num % sMaxSems; 244 if (sSems[slot].id != (int)num) { 245 kprintf("sem %ld (%#lx) doesn't exist!\n", num, num); 246 return 0; 247 } 248 249 dump_sem(&sSems[slot]); 250 return 0; 251 } 252 253 // walk through the sem list, trying to match name 254 for (i = 0; i < sMaxSems; i++) { 255 if (sSems[i].u.used.name != NULL 256 && strcmp(argv[1], sSems[i].u.used.name) == 0) { 257 dump_sem(&sSems[i]); 258 found = true; 259 } 260 } 261 262 if (!found) 263 kprintf("sem \"%s\" doesn't exist!\n", argv[1]); 264 return 0; 265 } 266 267 268 /*! \brief Appends a semaphore slot to the free list. 269 270 The semaphore list must be locked. 271 The slot's id field is not changed. It should already be set to -1. 272 273 \param slot The index of the semaphore slot. 274 \param nextID The ID the slot will get when reused. If < 0 the \a slot 275 is used. 276 */ 277 static void 278 free_sem_slot(int slot, sem_id nextID) 279 { 280 struct sem_entry *sem = sSems + slot; 281 // set next_id to the next possible value; for sanity check the current ID 282 if (nextID < 0) 283 sem->u.unused.next_id = slot; 284 else 285 sem->u.unused.next_id = nextID; 286 // append the entry to the list 287 if (sFreeSemsTail) 288 sFreeSemsTail->u.unused.next = sem; 289 else 290 sFreeSemsHead = sem; 291 sFreeSemsTail = sem; 292 sem->u.unused.next = NULL; 293 } 294 295 296 static inline void 297 notify_sem_select_events(struct sem_entry* sem, uint16 events) 298 { 299 if (sem->u.used.select_infos) 300 notify_select_events_list(sem->u.used.select_infos, events); 301 } 302 303 304 /*! Fills the sem_info structure with information from the given semaphore. 305 The semaphore's lock must be held when called. 306 */ 307 static void 308 fill_sem_info(struct sem_entry* sem, sem_info* info, size_t size) 309 { 310 info->sem = sem->id; 311 info->team = sem->u.used.owner; 312 strlcpy(info->name, sem->u.used.name, sizeof(info->name)); 313 info->count = sem->u.used.count; 314 info->latest_holder = sem->u.used.last_acquirer; 315 } 316 317 318 /*! You must call this function with interrupts disabled, and the semaphore's 319 spinlock held. Note that it will unlock the spinlock itself. 320 Since it cannot free() the semaphore's name with interrupts turned off, it 321 will return that one in \a name. 322 */ 323 static void 324 uninit_sem_locked(struct sem_entry& sem, char** _name) 325 { 326 KTRACE("delete_sem(sem: %ld)", sem.u.used.id); 327 328 notify_sem_select_events(&sem, B_EVENT_INVALID); 329 sem.u.used.select_infos = NULL; 330 331 // free any threads waiting for this semaphore 332 SpinLocker schedulerLocker(gSchedulerLock); 333 while (queued_thread* entry = sem.queue.RemoveHead()) { 334 entry->queued = false; 335 thread_unblock_locked(entry->thread, B_BAD_SEM_ID); 336 } 337 schedulerLocker.Unlock(); 338 339 int32 id = sem.id; 340 sem.id = -1; 341 *_name = sem.u.used.name; 342 sem.u.used.name = NULL; 343 344 RELEASE_SEM_LOCK(sem); 345 346 // append slot to the free list 347 GRAB_SEM_LIST_LOCK(); 348 free_sem_slot(id % sMaxSems, id + sMaxSems); 349 atomic_add(&sUsedSems, -1); 350 RELEASE_SEM_LIST_LOCK(); 351 } 352 353 354 static status_t 355 delete_sem_internal(sem_id id, bool checkPermission) 356 { 357 if (sSemsActive == false) 358 return B_NO_MORE_SEMS; 359 if (id < 0) 360 return B_BAD_SEM_ID; 361 362 int32 slot = id % sMaxSems; 363 364 cpu_status state = disable_interrupts(); 365 GRAB_SEM_LIST_LOCK(); 366 GRAB_SEM_LOCK(sSems[slot]); 367 368 if (sSems[slot].id != id) { 369 RELEASE_SEM_LOCK(sSems[slot]); 370 RELEASE_SEM_LIST_LOCK(); 371 restore_interrupts(state); 372 TRACE(("delete_sem: invalid sem_id %ld\n", id)); 373 return B_BAD_SEM_ID; 374 } 375 376 if (checkPermission 377 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 378 RELEASE_SEM_LOCK(sSems[slot]); 379 RELEASE_SEM_LIST_LOCK(); 380 restore_interrupts(state); 381 dprintf("thread %ld tried to delete kernel semaphore %ld.\n", 382 thread_get_current_thread_id(), id); 383 return B_NOT_ALLOWED; 384 } 385 386 if (sSems[slot].u.used.owner >= 0) { 387 list_remove_link(&sSems[slot].u.used.team_link); 388 sSems[slot].u.used.owner = -1; 389 } else 390 panic("sem %ld has no owner", id); 391 392 RELEASE_SEM_LIST_LOCK(); 393 394 char* name; 395 uninit_sem_locked(sSems[slot], &name); 396 397 SpinLocker schedulerLocker(gSchedulerLock); 398 scheduler_reschedule_if_necessary_locked(); 399 schedulerLocker.Unlock(); 400 401 restore_interrupts(state); 402 403 free(name); 404 return B_OK; 405 } 406 407 408 // #pragma mark - Private Kernel API 409 410 411 // TODO: Name clash with POSIX sem_init()... (we could just use C++) 412 status_t 413 haiku_sem_init(kernel_args *args) 414 { 415 area_id area; 416 int32 i; 417 418 TRACE(("sem_init: entry\n")); 419 420 // compute maximal number of semaphores depending on the available memory 421 // 128 MB -> 16384 semaphores, 448 kB fixed array size 422 // 256 MB -> 32768, 896 kB 423 // 512 MB and more-> 65536, 1.75 MB 424 i = vm_page_num_pages() / 2; 425 while (sMaxSems < i && sMaxSems < kMaxSemaphores) 426 sMaxSems <<= 1; 427 428 // create and initialize semaphore table 429 virtual_address_restrictions virtualRestrictions = {}; 430 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; 431 physical_address_restrictions physicalRestrictions = {}; 432 area = create_area_etc(B_SYSTEM_TEAM, "sem_table", 433 sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK, 434 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 435 &virtualRestrictions, &physicalRestrictions, (void**)&sSems); 436 if (area < 0) 437 panic("unable to allocate semaphore table!\n"); 438 439 memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems); 440 for (i = 0; i < sMaxSems; i++) { 441 sSems[i].id = -1; 442 free_sem_slot(i, i); 443 } 444 445 // add debugger commands 446 add_debugger_command_etc("sems", &dump_sem_list, 447 "Dump a list of all active semaphores (for team, with name, etc.)", 448 "[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]" 449 " | (\"last\" <last acquirer>)\n" 450 "Prints a list of all active semaphores meeting the given\n" 451 "requirement. If no argument is given, all sems are listed.\n" 452 " <team> - The team owning the semaphores.\n" 453 " <name> - Part of the name of the semaphores.\n" 454 " <last acquirer> - The thread that last acquired the semaphore.\n" 455 , 0); 456 add_debugger_command_etc("sem", &dump_sem_info, 457 "Dump info about a particular semaphore", 458 "<sem>\n" 459 "Prints info about the specified semaphore.\n" 460 " <sem> - pointer to the semaphore structure, semaphore ID, or name\n" 461 " of the semaphore to print info for.\n", 0); 462 463 TRACE(("sem_init: exit\n")); 464 465 sSemsActive = true; 466 467 return 0; 468 } 469 470 471 /*! Creates a semaphore with the given parameters. 472 473 This function is only available from within the kernel, and 474 should not be made public - if possible, we should remove it 475 completely (and have only create_sem() exported). 476 */ 477 sem_id 478 create_sem_etc(int32 count, const char* name, team_id owner) 479 { 480 struct sem_entry* sem = NULL; 481 cpu_status state; 482 sem_id id = B_NO_MORE_SEMS; 483 char* tempName; 484 size_t nameLength; 485 486 if (sSemsActive == false || sUsedSems == sMaxSems) 487 return B_NO_MORE_SEMS; 488 489 if (name == NULL) 490 name = "unnamed semaphore"; 491 492 // get the owning team 493 Team* team = Team::Get(owner); 494 if (team == NULL) 495 return B_BAD_TEAM_ID; 496 BReference<Team> teamReference(team, true); 497 498 // clone the name 499 nameLength = strlen(name) + 1; 500 nameLength = min_c(nameLength, B_OS_NAME_LENGTH); 501 tempName = (char*)malloc(nameLength); 502 if (tempName == NULL) 503 return B_NO_MEMORY; 504 505 strlcpy(tempName, name, nameLength); 506 507 state = disable_interrupts(); 508 GRAB_SEM_LIST_LOCK(); 509 510 // get the first slot from the free list 511 sem = sFreeSemsHead; 512 if (sem) { 513 // remove it from the free list 514 sFreeSemsHead = sem->u.unused.next; 515 if (!sFreeSemsHead) 516 sFreeSemsTail = NULL; 517 518 // init the slot 519 GRAB_SEM_LOCK(*sem); 520 sem->id = sem->u.unused.next_id; 521 sem->u.used.count = count; 522 sem->u.used.net_count = count; 523 new(&sem->queue) ThreadQueue; 524 sem->u.used.name = tempName; 525 sem->u.used.owner = team->id; 526 sem->u.used.select_infos = NULL; 527 id = sem->id; 528 529 list_add_item(&team->sem_list, &sem->u.used.team_link); 530 531 RELEASE_SEM_LOCK(*sem); 532 533 atomic_add(&sUsedSems, 1); 534 535 KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld", 536 count, name, owner, id); 537 538 T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name)); 539 NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id, 540 name); 541 } 542 543 RELEASE_SEM_LIST_LOCK(); 544 restore_interrupts(state); 545 546 if (sem == NULL) 547 free(tempName); 548 549 return id; 550 } 551 552 553 status_t 554 select_sem(int32 id, struct select_info* info, bool kernel) 555 { 556 cpu_status state; 557 int32 slot; 558 status_t error = B_OK; 559 560 if (id < 0) 561 return B_BAD_SEM_ID; 562 563 slot = id % sMaxSems; 564 565 state = disable_interrupts(); 566 GRAB_SEM_LOCK(sSems[slot]); 567 568 if (sSems[slot].id != id) { 569 // bad sem ID 570 error = B_BAD_SEM_ID; 571 } else if (!kernel 572 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 573 // kernel semaphore, but call from userland 574 error = B_NOT_ALLOWED; 575 } else { 576 info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID; 577 578 if (info->selected_events != 0) { 579 info->next = sSems[slot].u.used.select_infos; 580 sSems[slot].u.used.select_infos = info; 581 582 if (sSems[slot].u.used.count > 0) 583 notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE); 584 } 585 } 586 587 RELEASE_SEM_LOCK(sSems[slot]); 588 restore_interrupts(state); 589 590 return error; 591 } 592 593 594 status_t 595 deselect_sem(int32 id, struct select_info* info, bool kernel) 596 { 597 cpu_status state; 598 int32 slot; 599 600 if (id < 0) 601 return B_BAD_SEM_ID; 602 603 if (info->selected_events == 0) 604 return B_OK; 605 606 slot = id % sMaxSems; 607 608 state = disable_interrupts(); 609 GRAB_SEM_LOCK(sSems[slot]); 610 611 if (sSems[slot].id == id) { 612 select_info** infoLocation = &sSems[slot].u.used.select_infos; 613 while (*infoLocation != NULL && *infoLocation != info) 614 infoLocation = &(*infoLocation)->next; 615 616 if (*infoLocation == info) 617 *infoLocation = info->next; 618 } 619 620 RELEASE_SEM_LOCK(sSems[slot]); 621 restore_interrupts(state); 622 623 return B_OK; 624 } 625 626 627 /*! Forcibly removes a thread from a semaphores wait queue. May have to wake up 628 other threads in the process. 629 Must be called with semaphore lock held. The thread lock must not be held. 630 */ 631 static void 632 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem) 633 { 634 if (!entry->queued) 635 return; 636 637 sem->queue.Remove(entry); 638 entry->queued = false; 639 sem->u.used.count += entry->count; 640 641 // We're done with this entry. We only have to check, if other threads 642 // need unblocking, too. 643 644 // Now see if more threads need to be woken up. We get the scheduler lock 645 // for that time, so the blocking state of threads won't change (due to 646 // interruption or timeout). We need that lock anyway when unblocking a 647 // thread. 648 SpinLocker schedulerLocker(gSchedulerLock); 649 650 while ((entry = sem->queue.Head()) != NULL) { 651 if (thread_is_blocked(entry->thread)) { 652 // The thread is still waiting. If its count is satisfied, unblock 653 // it. Otherwise we can't unblock any other thread. 654 if (entry->count > sem->u.used.net_count) 655 break; 656 657 thread_unblock_locked(entry->thread, B_OK); 658 sem->u.used.net_count -= entry->count; 659 } else { 660 // The thread is no longer waiting, but still queued, which means 661 // acquiration failed and we can just remove it. 662 sem->u.used.count += entry->count; 663 } 664 665 sem->queue.Remove(entry); 666 entry->queued = false; 667 } 668 669 schedulerLocker.Unlock(); 670 671 // select notification, if the semaphore is now acquirable 672 if (sem->u.used.count > 0) 673 notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE); 674 } 675 676 677 /*! This function deletes all semaphores belonging to a particular team. 678 */ 679 void 680 sem_delete_owned_sems(Team* team) 681 { 682 while (true) { 683 char* name; 684 685 { 686 // get the next semaphore from the team's sem list 687 InterruptsLocker locker; 688 SpinLocker semListLocker(sSemsSpinlock); 689 sem_entry* sem = (sem_entry*)list_remove_head_item(&team->sem_list); 690 if (sem == NULL) 691 break; 692 693 // delete the semaphore 694 GRAB_SEM_LOCK(*sem); 695 semListLocker.Unlock(); 696 uninit_sem_locked(*sem, &name); 697 } 698 699 free(name); 700 } 701 702 scheduler_reschedule_if_necessary(); 703 } 704 705 706 int32 707 sem_max_sems(void) 708 { 709 return sMaxSems; 710 } 711 712 713 int32 714 sem_used_sems(void) 715 { 716 return sUsedSems; 717 } 718 719 720 // #pragma mark - Public Kernel API 721 722 723 sem_id 724 create_sem(int32 count, const char* name) 725 { 726 return create_sem_etc(count, name, team_get_kernel_team_id()); 727 } 728 729 730 status_t 731 delete_sem(sem_id id) 732 { 733 return delete_sem_internal(id, false); 734 } 735 736 737 status_t 738 acquire_sem(sem_id id) 739 { 740 return switch_sem_etc(-1, id, 1, 0, 0); 741 } 742 743 744 status_t 745 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout) 746 { 747 return switch_sem_etc(-1, id, count, flags, timeout); 748 } 749 750 751 status_t 752 switch_sem(sem_id toBeReleased, sem_id toBeAcquired) 753 { 754 return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0); 755 } 756 757 758 status_t 759 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count, 760 uint32 flags, bigtime_t timeout) 761 { 762 int slot = id % sMaxSems; 763 int state; 764 status_t status = B_OK; 765 766 if (gKernelStartup) 767 return B_OK; 768 if (sSemsActive == false) 769 return B_NO_MORE_SEMS; 770 771 if (!are_interrupts_enabled()) { 772 panic("switch_sem_etc: called with interrupts disabled for sem %ld\n", 773 id); 774 } 775 776 if (id < 0) 777 return B_BAD_SEM_ID; 778 if (count <= 0 779 || (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) { 780 return B_BAD_VALUE; 781 } 782 783 state = disable_interrupts(); 784 GRAB_SEM_LOCK(sSems[slot]); 785 786 if (sSems[slot].id != id) { 787 TRACE(("switch_sem_etc: bad sem %ld\n", id)); 788 status = B_BAD_SEM_ID; 789 goto err; 790 } 791 792 // TODO: the B_CHECK_PERMISSION flag should be made private, as it 793 // doesn't have any use outside the kernel 794 if ((flags & B_CHECK_PERMISSION) != 0 795 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 796 dprintf("thread %ld tried to acquire kernel semaphore %ld.\n", 797 thread_get_current_thread_id(), id); 798 status = B_NOT_ALLOWED; 799 goto err; 800 } 801 802 if (sSems[slot].u.used.count - count < 0) { 803 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) { 804 // immediate timeout 805 status = B_WOULD_BLOCK; 806 goto err; 807 } else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) { 808 // absolute negative timeout 809 status = B_TIMED_OUT; 810 goto err; 811 } 812 } 813 814 KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, " 815 "flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags, 816 timeout); 817 818 if ((sSems[slot].u.used.count -= count) < 0) { 819 // we need to block 820 Thread *thread = thread_get_current_thread(); 821 822 TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p," 823 " name = %s\n", id, sSems[slot].u.used.name, thread, thread->name)); 824 825 // do a quick check to see if the thread has any pending signals 826 // this should catch most of the cases where the thread had a signal 827 SpinLocker schedulerLocker(gSchedulerLock); 828 if (thread_is_interrupted(thread, flags)) { 829 schedulerLocker.Unlock(); 830 sSems[slot].u.used.count += count; 831 status = B_INTERRUPTED; 832 // the other semaphore will be released later 833 goto err; 834 } 835 836 if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0) 837 timeout = B_INFINITE_TIMEOUT; 838 839 // enqueue in the semaphore queue and get ready to wait 840 queued_thread queueEntry(thread, count); 841 sSems[slot].queue.Add(&queueEntry); 842 queueEntry.queued = true; 843 844 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE, 845 (void*)(addr_t)id); 846 847 RELEASE_SEM_LOCK(sSems[slot]); 848 849 // release the other semaphore, if any 850 if (semToBeReleased >= 0) { 851 release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE); 852 semToBeReleased = -1; 853 } 854 855 schedulerLocker.Lock(); 856 857 status_t acquireStatus = timeout == B_INFINITE_TIMEOUT 858 ? thread_block_locked(thread) 859 : thread_block_with_timeout_locked(flags, timeout); 860 861 schedulerLocker.Unlock(); 862 GRAB_SEM_LOCK(sSems[slot]); 863 864 // If we're still queued, this means the acquiration failed, and we 865 // need to remove our entry and (potentially) wake up other threads. 866 if (queueEntry.queued) 867 remove_thread_from_sem(&queueEntry, &sSems[slot]); 868 869 if (acquireStatus >= B_OK) { 870 sSems[slot].u.used.last_acquirer = thread_get_current_thread_id(); 871 #if DEBUG_SEM_LAST_ACQUIRER 872 sSems[slot].u.used.last_acquire_count = count; 873 #endif 874 } 875 876 RELEASE_SEM_LOCK(sSems[slot]); 877 restore_interrupts(state); 878 879 TRACE(("switch_sem_etc(sem %ld): exit block name %s, " 880 "thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id, 881 thread->name)); 882 KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus); 883 return acquireStatus; 884 } else { 885 sSems[slot].u.used.net_count -= count; 886 sSems[slot].u.used.last_acquirer = thread_get_current_thread_id(); 887 #if DEBUG_SEM_LAST_ACQUIRER 888 sSems[slot].u.used.last_acquire_count = count; 889 #endif 890 } 891 892 err: 893 RELEASE_SEM_LOCK(sSems[slot]); 894 restore_interrupts(state); 895 896 if (status == B_INTERRUPTED && semToBeReleased >= B_OK) { 897 // depending on when we were interrupted, we need to still 898 // release the semaphore to always leave in a consistent 899 // state 900 release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE); 901 } 902 903 #if 0 904 if (status == B_NOT_ALLOWED) 905 _user_debugger("Thread tried to acquire kernel semaphore."); 906 #endif 907 908 KTRACE("switch_sem_etc() done: 0x%lx", status); 909 910 return status; 911 } 912 913 914 status_t 915 release_sem(sem_id id) 916 { 917 return release_sem_etc(id, 1, 0); 918 } 919 920 921 status_t 922 release_sem_etc(sem_id id, int32 count, uint32 flags) 923 { 924 int32 slot = id % sMaxSems; 925 926 if (gKernelStartup) 927 return B_OK; 928 if (sSemsActive == false) 929 return B_NO_MORE_SEMS; 930 if (id < 0) 931 return B_BAD_SEM_ID; 932 if (count <= 0 && (flags & B_RELEASE_ALL) == 0) 933 return B_BAD_VALUE; 934 935 InterruptsLocker _; 936 SpinLocker semLocker(sSems[slot].lock); 937 938 if (sSems[slot].id != id) { 939 TRACE(("sem_release_etc: invalid sem_id %ld\n", id)); 940 return B_BAD_SEM_ID; 941 } 942 943 // ToDo: the B_CHECK_PERMISSION flag should be made private, as it 944 // doesn't have any use outside the kernel 945 if ((flags & B_CHECK_PERMISSION) != 0 946 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 947 dprintf("thread %ld tried to release kernel semaphore.\n", 948 thread_get_current_thread_id()); 949 return B_NOT_ALLOWED; 950 } 951 952 KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count, 953 flags); 954 955 sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer; 956 #if DEBUG_SEM_LAST_ACQUIRER 957 sSems[slot].u.used.last_releaser = thread_get_current_thread_id(); 958 sSems[slot].u.used.last_release_count = count; 959 #endif 960 961 if (flags & B_RELEASE_ALL) { 962 count = sSems[slot].u.used.net_count - sSems[slot].u.used.count; 963 964 // is there anything to do for us at all? 965 if (count == 0) 966 return B_OK; 967 968 // Don't release more than necessary -- there might be interrupted/ 969 // timed out threads in the queue. 970 flags |= B_RELEASE_IF_WAITING_ONLY; 971 } 972 973 // Grab the scheduler lock, so thread_is_blocked() is reliable (due to 974 // possible interruptions or timeouts, it wouldn't be otherwise). 975 SpinLocker schedulerLocker(gSchedulerLock); 976 977 while (count > 0) { 978 queued_thread* entry = sSems[slot].queue.Head(); 979 if (entry == NULL) { 980 if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) { 981 sSems[slot].u.used.count += count; 982 sSems[slot].u.used.net_count += count; 983 } 984 break; 985 } 986 987 if (thread_is_blocked(entry->thread)) { 988 // The thread is still waiting. If its count is satisfied, 989 // unblock it. Otherwise we can't unblock any other thread. 990 if (entry->count > sSems[slot].u.used.net_count + count) { 991 sSems[slot].u.used.count += count; 992 sSems[slot].u.used.net_count += count; 993 break; 994 } 995 996 thread_unblock_locked(entry->thread, B_OK); 997 998 int delta = min_c(count, entry->count); 999 sSems[slot].u.used.count += delta; 1000 sSems[slot].u.used.net_count += delta - entry->count; 1001 count -= delta; 1002 } else { 1003 // The thread is no longer waiting, but still queued, which 1004 // means acquiration failed and we can just remove it. 1005 sSems[slot].u.used.count += entry->count; 1006 } 1007 1008 sSems[slot].queue.Remove(entry); 1009 entry->queued = false; 1010 } 1011 1012 schedulerLocker.Unlock(); 1013 1014 if (sSems[slot].u.used.count > 0) 1015 notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE); 1016 1017 // If we've unblocked another thread reschedule, if we've not explicitly 1018 // been told not to. 1019 if ((flags & B_DO_NOT_RESCHEDULE) == 0) { 1020 semLocker.Unlock(); 1021 schedulerLocker.Lock(); 1022 scheduler_reschedule_if_necessary_locked(); 1023 } 1024 1025 return B_OK; 1026 } 1027 1028 1029 status_t 1030 get_sem_count(sem_id id, int32 *_count) 1031 { 1032 int slot; 1033 int state; 1034 1035 if (sSemsActive == false) 1036 return B_NO_MORE_SEMS; 1037 if (id < 0) 1038 return B_BAD_SEM_ID; 1039 if (_count == NULL) 1040 return B_BAD_VALUE; 1041 1042 slot = id % sMaxSems; 1043 1044 state = disable_interrupts(); 1045 GRAB_SEM_LOCK(sSems[slot]); 1046 1047 if (sSems[slot].id != id) { 1048 RELEASE_SEM_LOCK(sSems[slot]); 1049 restore_interrupts(state); 1050 TRACE(("sem_get_count: invalid sem_id %ld\n", id)); 1051 return B_BAD_SEM_ID; 1052 } 1053 1054 *_count = sSems[slot].u.used.count; 1055 1056 RELEASE_SEM_LOCK(sSems[slot]); 1057 restore_interrupts(state); 1058 1059 return B_OK; 1060 } 1061 1062 1063 /*! Called by the get_sem_info() macro. */ 1064 status_t 1065 _get_sem_info(sem_id id, struct sem_info *info, size_t size) 1066 { 1067 status_t status = B_OK; 1068 int state; 1069 int slot; 1070 1071 if (!sSemsActive) 1072 return B_NO_MORE_SEMS; 1073 if (id < 0) 1074 return B_BAD_SEM_ID; 1075 if (info == NULL || size != sizeof(sem_info)) 1076 return B_BAD_VALUE; 1077 1078 slot = id % sMaxSems; 1079 1080 state = disable_interrupts(); 1081 GRAB_SEM_LOCK(sSems[slot]); 1082 1083 if (sSems[slot].id != id) { 1084 status = B_BAD_SEM_ID; 1085 TRACE(("get_sem_info: invalid sem_id %ld\n", id)); 1086 } else 1087 fill_sem_info(&sSems[slot], info, size); 1088 1089 RELEASE_SEM_LOCK(sSems[slot]); 1090 restore_interrupts(state); 1091 1092 return status; 1093 } 1094 1095 1096 /*! Called by the get_next_sem_info() macro. */ 1097 status_t 1098 _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info, 1099 size_t size) 1100 { 1101 if (!sSemsActive) 1102 return B_NO_MORE_SEMS; 1103 if (_cookie == NULL || info == NULL || size != sizeof(sem_info)) 1104 return B_BAD_VALUE; 1105 if (teamID < 0) 1106 return B_BAD_TEAM_ID; 1107 1108 Team* team = Team::Get(teamID); 1109 if (team == NULL) 1110 return B_BAD_TEAM_ID; 1111 BReference<Team> teamReference(team, true); 1112 1113 InterruptsSpinLocker semListLocker(sSemsSpinlock); 1114 1115 // TODO: find a way to iterate the list that is more reliable 1116 sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list); 1117 int32 newIndex = *_cookie; 1118 int32 index = 0; 1119 bool found = false; 1120 1121 while (!found) { 1122 // find the next entry to be returned 1123 while (sem != NULL && index < newIndex) { 1124 sem = (sem_entry*)list_get_next_item(&team->sem_list, sem); 1125 index++; 1126 } 1127 1128 if (sem == NULL) 1129 return B_BAD_VALUE; 1130 1131 GRAB_SEM_LOCK(*sem); 1132 1133 if (sem->id != -1 && sem->u.used.owner == team->id) { 1134 // found one! 1135 fill_sem_info(sem, info, size); 1136 newIndex = index + 1; 1137 found = true; 1138 } else 1139 newIndex++; 1140 1141 RELEASE_SEM_LOCK(*sem); 1142 } 1143 1144 if (!found) 1145 return B_BAD_VALUE; 1146 1147 *_cookie = newIndex; 1148 return B_OK; 1149 } 1150 1151 1152 status_t 1153 set_sem_owner(sem_id id, team_id newTeamID) 1154 { 1155 if (sSemsActive == false) 1156 return B_NO_MORE_SEMS; 1157 if (id < 0) 1158 return B_BAD_SEM_ID; 1159 if (newTeamID < 0) 1160 return B_BAD_TEAM_ID; 1161 1162 int32 slot = id % sMaxSems; 1163 1164 // get the new team 1165 Team* newTeam = Team::Get(newTeamID); 1166 if (newTeam == NULL) 1167 return B_BAD_TEAM_ID; 1168 BReference<Team> newTeamReference(newTeam, true); 1169 1170 InterruptsSpinLocker semListLocker(sSemsSpinlock); 1171 SpinLocker semLocker(sSems[slot].lock); 1172 1173 if (sSems[slot].id != id) { 1174 TRACE(("set_sem_owner: invalid sem_id %ld\n", id)); 1175 return B_BAD_SEM_ID; 1176 } 1177 1178 list_remove_link(&sSems[slot].u.used.team_link); 1179 list_add_item(&newTeam->sem_list, &sSems[slot].u.used.team_link); 1180 1181 sSems[slot].u.used.owner = newTeam->id; 1182 return B_OK; 1183 } 1184 1185 1186 /*! Returns the name of the semaphore. The name is not copied, so the caller 1187 must make sure that the semaphore remains alive as long as the name is used. 1188 */ 1189 const char* 1190 sem_get_name_unsafe(sem_id id) 1191 { 1192 int slot = id % sMaxSems; 1193 1194 if (sSemsActive == false || id < 0 || sSems[slot].id != id) 1195 return NULL; 1196 1197 return sSems[slot].u.used.name; 1198 } 1199 1200 1201 // #pragma mark - Syscalls 1202 1203 1204 sem_id 1205 _user_create_sem(int32 count, const char *userName) 1206 { 1207 char name[B_OS_NAME_LENGTH]; 1208 1209 if (userName == NULL) 1210 return create_sem_etc(count, NULL, team_get_current_team_id()); 1211 1212 if (!IS_USER_ADDRESS(userName) 1213 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 1214 return B_BAD_ADDRESS; 1215 1216 return create_sem_etc(count, name, team_get_current_team_id()); 1217 } 1218 1219 1220 status_t 1221 _user_delete_sem(sem_id id) 1222 { 1223 return delete_sem_internal(id, true); 1224 } 1225 1226 1227 status_t 1228 _user_acquire_sem(sem_id id) 1229 { 1230 status_t error = switch_sem_etc(-1, id, 1, 1231 B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0); 1232 1233 return syscall_restart_handle_post(error); 1234 } 1235 1236 1237 status_t 1238 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout) 1239 { 1240 syscall_restart_handle_timeout_pre(flags, timeout); 1241 1242 status_t error = switch_sem_etc(-1, id, count, 1243 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout); 1244 1245 return syscall_restart_handle_timeout_post(error, timeout); 1246 } 1247 1248 1249 status_t 1250 _user_switch_sem(sem_id releaseSem, sem_id id) 1251 { 1252 status_t error = switch_sem_etc(releaseSem, id, 1, 1253 B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0); 1254 1255 if (releaseSem < 0) 1256 return syscall_restart_handle_post(error); 1257 1258 return error; 1259 } 1260 1261 1262 status_t 1263 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags, 1264 bigtime_t timeout) 1265 { 1266 if (releaseSem < 0) 1267 syscall_restart_handle_timeout_pre(flags, timeout); 1268 1269 status_t error = switch_sem_etc(releaseSem, id, count, 1270 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout); 1271 1272 if (releaseSem < 0) 1273 return syscall_restart_handle_timeout_post(error, timeout); 1274 1275 return error; 1276 } 1277 1278 1279 status_t 1280 _user_release_sem(sem_id id) 1281 { 1282 return release_sem_etc(id, 1, B_CHECK_PERMISSION); 1283 } 1284 1285 1286 status_t 1287 _user_release_sem_etc(sem_id id, int32 count, uint32 flags) 1288 { 1289 return release_sem_etc(id, count, flags | B_CHECK_PERMISSION); 1290 } 1291 1292 1293 status_t 1294 _user_get_sem_count(sem_id id, int32 *userCount) 1295 { 1296 status_t status; 1297 int32 count; 1298 1299 if (userCount == NULL || !IS_USER_ADDRESS(userCount)) 1300 return B_BAD_ADDRESS; 1301 1302 status = get_sem_count(id, &count); 1303 if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK) 1304 return B_BAD_ADDRESS; 1305 1306 return status; 1307 } 1308 1309 1310 status_t 1311 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size) 1312 { 1313 struct sem_info info; 1314 status_t status; 1315 1316 if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)) 1317 return B_BAD_ADDRESS; 1318 1319 status = _get_sem_info(id, &info, size); 1320 if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK) 1321 return B_BAD_ADDRESS; 1322 1323 return status; 1324 } 1325 1326 1327 status_t 1328 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo, 1329 size_t size) 1330 { 1331 struct sem_info info; 1332 int32 cookie; 1333 status_t status; 1334 1335 if (userCookie == NULL || userInfo == NULL 1336 || !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo) 1337 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 1338 return B_BAD_ADDRESS; 1339 1340 status = _get_next_sem_info(team, &cookie, &info, size); 1341 1342 if (status == B_OK) { 1343 if (user_memcpy(userInfo, &info, size) < B_OK 1344 || user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK) 1345 return B_BAD_ADDRESS; 1346 } 1347 1348 return status; 1349 } 1350 1351 1352 status_t 1353 _user_set_sem_owner(sem_id id, team_id team) 1354 { 1355 return set_sem_owner(id, team); 1356 } 1357