1 /* 2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 /*! Semaphore code */ 12 13 14 #include <sem.h> 15 16 #include <stdlib.h> 17 #include <string.h> 18 19 #include <OS.h> 20 21 #include <arch/int.h> 22 #include <boot/kernel_args.h> 23 #include <cpu.h> 24 #include <debug.h> 25 #include <int.h> 26 #include <kernel.h> 27 #include <ksignal.h> 28 #include <kscheduler.h> 29 #include <listeners.h> 30 #include <scheduling_analysis.h> 31 #include <smp.h> 32 #include <syscall_restart.h> 33 #include <team.h> 34 #include <thread.h> 35 #include <util/AutoLock.h> 36 #include <util/DoublyLinkedList.h> 37 #include <vfs.h> 38 #include <vm/vm_page.h> 39 #include <wait_for_objects.h> 40 41 #include "kernel_debug_config.h" 42 43 44 //#define TRACE_SEM 45 #ifdef TRACE_SEM 46 # define TRACE(x) dprintf_no_syslog x 47 #else 48 # define TRACE(x) ; 49 #endif 50 51 //#define KTRACE_SEM 52 #ifdef KTRACE_SEM 53 # define KTRACE(x...) ktrace_printf(x) 54 #else 55 # define KTRACE(x...) do {} while (false) 56 #endif 57 58 59 // Locking: 60 // * sSemsSpinlock: Protects the semaphore free list (sFreeSemsHead, 61 // sFreeSemsTail), Team::sem_list, and together with sem_entry::lock 62 // write access to sem_entry::owner/team_link. 63 // * sem_entry::lock: Protects all sem_entry members. owner, team_link 64 // additional need sSemsSpinlock for write access. 65 // lock itself doesn't need protection -- sem_entry objects are never deleted. 66 // 67 // The locking order is sSemsSpinlock -> sem_entry::lock -> scheduler lock. All 68 // semaphores are in the sSems array (sem_entry[]). Access by sem_id requires 69 // computing the object index (id % sMaxSems), locking the respective 70 // sem_entry::lock and verifying that sem_entry::id matches afterwards. 71 72 73 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> { 74 queued_thread(Thread *thread, int32 count) 75 : 76 thread(thread), 77 count(count), 78 queued(false) 79 { 80 } 81 82 Thread *thread; 83 int32 count; 84 bool queued; 85 }; 86 87 typedef DoublyLinkedList<queued_thread> ThreadQueue; 88 89 struct sem_entry { 90 union { 91 // when slot in use 92 struct { 93 struct list_link team_link; 94 int32 count; 95 int32 net_count; 96 // count + acquisition count of all blocked 97 // threads 98 char* name; 99 team_id owner; 100 select_info* select_infos; 101 thread_id last_acquirer; 102 #if DEBUG_SEM_LAST_ACQUIRER 103 int32 last_acquire_count; 104 thread_id last_releaser; 105 int32 last_release_count; 106 #endif 107 } used; 108 109 // when slot unused 110 struct { 111 sem_id next_id; 112 struct sem_entry* next; 113 } unused; 114 } u; 115 116 sem_id id; 117 spinlock lock; // protects only the id field when unused 118 ThreadQueue queue; // should be in u.used, but has a constructor 119 }; 120 121 static const int32 kMaxSemaphores = 65536; 122 static int32 sMaxSems = 4096; 123 // Final value is computed based on the amount of available memory 124 static int32 sUsedSems = 0; 125 126 static struct sem_entry *sSems = NULL; 127 static bool sSemsActive = false; 128 static struct sem_entry *sFreeSemsHead = NULL; 129 static struct sem_entry *sFreeSemsTail = NULL; 130 131 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER; 132 #define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sSemsSpinlock) 133 #define RELEASE_SEM_LIST_LOCK() release_spinlock(&sSemsSpinlock) 134 #define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock) 135 #define RELEASE_SEM_LOCK(s) release_spinlock(&(s).lock) 136 137 138 static int 139 dump_sem_list(int argc, char** argv) 140 { 141 const char* name = NULL; 142 team_id owner = -1; 143 thread_id last = -1; 144 int32 i; 145 146 if (argc > 2) { 147 if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner")) 148 owner = strtoul(argv[2], NULL, 0); 149 else if (!strcmp(argv[1], "name")) 150 name = argv[2]; 151 else if (!strcmp(argv[1], "last")) 152 last = strtoul(argv[2], NULL, 0); 153 } else if (argc > 1) 154 owner = strtoul(argv[1], NULL, 0); 155 156 kprintf("%-*s id count team last name\n", B_PRINTF_POINTER_WIDTH, 157 "sem"); 158 159 for (i = 0; i < sMaxSems; i++) { 160 struct sem_entry* sem = &sSems[i]; 161 if (sem->id < 0 162 || (last != -1 && sem->u.used.last_acquirer != last) 163 || (name != NULL && strstr(sem->u.used.name, name) == NULL) 164 || (owner != -1 && sem->u.used.owner != owner)) 165 continue; 166 167 kprintf("%p %6" B_PRId32 " %5" B_PRId32 " %6" B_PRId32 " " 168 "%6" B_PRId32 " " 169 " %s\n", sem, sem->id, sem->u.used.count, 170 sem->u.used.owner, 171 sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0, 172 sem->u.used.name); 173 } 174 175 return 0; 176 } 177 178 179 static void 180 dump_sem(struct sem_entry* sem) 181 { 182 kprintf("SEM: %p\n", sem); 183 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", sem->id, sem->id); 184 if (sem->id >= 0) { 185 kprintf("name: '%s'\n", sem->u.used.name); 186 kprintf("owner: %" B_PRId32 "\n", sem->u.used.owner); 187 kprintf("count: %" B_PRId32 "\n", sem->u.used.count); 188 kprintf("queue: "); 189 if (!sem->queue.IsEmpty()) { 190 ThreadQueue::Iterator it = sem->queue.GetIterator(); 191 while (queued_thread* entry = it.Next()) 192 kprintf(" %" B_PRId32, entry->thread->id); 193 kprintf("\n"); 194 } else 195 kprintf(" -\n"); 196 197 set_debug_variable("_sem", (addr_t)sem); 198 set_debug_variable("_semID", sem->id); 199 set_debug_variable("_owner", sem->u.used.owner); 200 201 #if DEBUG_SEM_LAST_ACQUIRER 202 kprintf("last acquired by: %" B_PRId32 ", count: %" B_PRId32 "\n", 203 sem->u.used.last_acquirer, sem->u.used.last_acquire_count); 204 kprintf("last released by: %" B_PRId32 ", count: %" B_PRId32 "\n", 205 sem->u.used.last_releaser, sem->u.used.last_release_count); 206 207 if (sem->u.used.last_releaser != 0) 208 set_debug_variable("_releaser", sem->u.used.last_releaser); 209 else 210 unset_debug_variable("_releaser"); 211 #else 212 kprintf("last acquired by: %" B_PRId32 "\n", sem->u.used.last_acquirer); 213 #endif 214 215 if (sem->u.used.last_acquirer != 0) 216 set_debug_variable("_acquirer", sem->u.used.last_acquirer); 217 else 218 unset_debug_variable("_acquirer"); 219 } else { 220 kprintf("next: %p\n", sem->u.unused.next); 221 kprintf("next_id: %" B_PRId32 "\n", sem->u.unused.next_id); 222 } 223 } 224 225 226 static int 227 dump_sem_info(int argc, char **argv) 228 { 229 bool found = false; 230 addr_t num; 231 int32 i; 232 233 if (argc < 2) { 234 print_debugger_command_usage(argv[0]); 235 return 0; 236 } 237 238 num = strtoul(argv[1], NULL, 0); 239 240 if (IS_KERNEL_ADDRESS(num)) { 241 dump_sem((struct sem_entry *)num); 242 return 0; 243 } else if (num >= 0) { 244 uint32 slot = num % sMaxSems; 245 if (sSems[slot].id != (int)num) { 246 kprintf("sem %ld (%#lx) doesn't exist!\n", num, num); 247 return 0; 248 } 249 250 dump_sem(&sSems[slot]); 251 return 0; 252 } 253 254 // walk through the sem list, trying to match name 255 for (i = 0; i < sMaxSems; i++) { 256 if (sSems[i].u.used.name != NULL 257 && strcmp(argv[1], sSems[i].u.used.name) == 0) { 258 dump_sem(&sSems[i]); 259 found = true; 260 } 261 } 262 263 if (!found) 264 kprintf("sem \"%s\" doesn't exist!\n", argv[1]); 265 return 0; 266 } 267 268 269 /*! \brief Appends a semaphore slot to the free list. 270 271 The semaphore list must be locked. 272 The slot's id field is not changed. It should already be set to -1. 273 274 \param slot The index of the semaphore slot. 275 \param nextID The ID the slot will get when reused. If < 0 the \a slot 276 is used. 277 */ 278 static void 279 free_sem_slot(int slot, sem_id nextID) 280 { 281 struct sem_entry *sem = sSems + slot; 282 // set next_id to the next possible value; for sanity check the current ID 283 if (nextID < 0) 284 sem->u.unused.next_id = slot; 285 else 286 sem->u.unused.next_id = nextID; 287 // append the entry to the list 288 if (sFreeSemsTail) 289 sFreeSemsTail->u.unused.next = sem; 290 else 291 sFreeSemsHead = sem; 292 sFreeSemsTail = sem; 293 sem->u.unused.next = NULL; 294 } 295 296 297 static inline void 298 notify_sem_select_events(struct sem_entry* sem, uint16 events) 299 { 300 if (sem->u.used.select_infos) 301 notify_select_events_list(sem->u.used.select_infos, events); 302 } 303 304 305 /*! Fills the sem_info structure with information from the given semaphore. 306 The semaphore's lock must be held when called. 307 */ 308 static void 309 fill_sem_info(struct sem_entry* sem, sem_info* info, size_t size) 310 { 311 info->sem = sem->id; 312 info->team = sem->u.used.owner; 313 strlcpy(info->name, sem->u.used.name, sizeof(info->name)); 314 info->count = sem->u.used.count; 315 info->latest_holder = sem->u.used.last_acquirer; 316 } 317 318 319 /*! You must call this function with interrupts disabled, and the semaphore's 320 spinlock held. Note that it will unlock the spinlock itself. 321 Since it cannot free() the semaphore's name with interrupts turned off, it 322 will return that one in \a name. 323 */ 324 static void 325 uninit_sem_locked(struct sem_entry& sem, char** _name) 326 { 327 KTRACE("delete_sem(sem: %ld)", sem.u.used.id); 328 329 notify_sem_select_events(&sem, B_EVENT_INVALID); 330 sem.u.used.select_infos = NULL; 331 332 // free any threads waiting for this semaphore 333 while (queued_thread* entry = sem.queue.RemoveHead()) { 334 entry->queued = false; 335 thread_unblock(entry->thread, B_BAD_SEM_ID); 336 } 337 338 int32 id = sem.id; 339 sem.id = -1; 340 *_name = sem.u.used.name; 341 sem.u.used.name = NULL; 342 343 RELEASE_SEM_LOCK(sem); 344 345 // append slot to the free list 346 GRAB_SEM_LIST_LOCK(); 347 free_sem_slot(id % sMaxSems, id + sMaxSems); 348 atomic_add(&sUsedSems, -1); 349 RELEASE_SEM_LIST_LOCK(); 350 } 351 352 353 static status_t 354 delete_sem_internal(sem_id id, bool checkPermission) 355 { 356 if (sSemsActive == false) 357 return B_NO_MORE_SEMS; 358 if (id < 0) 359 return B_BAD_SEM_ID; 360 361 int32 slot = id % sMaxSems; 362 363 cpu_status state = disable_interrupts(); 364 GRAB_SEM_LIST_LOCK(); 365 GRAB_SEM_LOCK(sSems[slot]); 366 367 if (sSems[slot].id != id) { 368 RELEASE_SEM_LOCK(sSems[slot]); 369 RELEASE_SEM_LIST_LOCK(); 370 restore_interrupts(state); 371 TRACE(("delete_sem: invalid sem_id %ld\n", id)); 372 return B_BAD_SEM_ID; 373 } 374 375 if (checkPermission 376 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 377 RELEASE_SEM_LOCK(sSems[slot]); 378 RELEASE_SEM_LIST_LOCK(); 379 restore_interrupts(state); 380 dprintf("thread %" B_PRId32 " tried to delete kernel semaphore " 381 "%" B_PRId32 ".\n", thread_get_current_thread_id(), id); 382 return B_NOT_ALLOWED; 383 } 384 385 if (sSems[slot].u.used.owner >= 0) { 386 list_remove_link(&sSems[slot].u.used.team_link); 387 sSems[slot].u.used.owner = -1; 388 } else 389 panic("sem %" B_PRId32 " has no owner", id); 390 391 RELEASE_SEM_LIST_LOCK(); 392 393 char* name; 394 uninit_sem_locked(sSems[slot], &name); 395 396 SpinLocker schedulerLocker(thread_get_current_thread()->scheduler_lock); 397 scheduler_reschedule_if_necessary_locked(); 398 schedulerLocker.Unlock(); 399 400 restore_interrupts(state); 401 402 free(name); 403 return B_OK; 404 } 405 406 407 // #pragma mark - Private Kernel API 408 409 410 // TODO: Name clash with POSIX sem_init()... (we could just use C++) 411 status_t 412 haiku_sem_init(kernel_args *args) 413 { 414 area_id area; 415 int32 i; 416 417 TRACE(("sem_init: entry\n")); 418 419 // compute maximal number of semaphores depending on the available memory 420 // 128 MB -> 16384 semaphores, 448 kB fixed array size 421 // 256 MB -> 32768, 896 kB 422 // 512 MB and more-> 65536, 1.75 MB 423 i = vm_page_num_pages() / 2; 424 while (sMaxSems < i && sMaxSems < kMaxSemaphores) 425 sMaxSems <<= 1; 426 427 // create and initialize semaphore table 428 virtual_address_restrictions virtualRestrictions = {}; 429 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; 430 physical_address_restrictions physicalRestrictions = {}; 431 area = create_area_etc(B_SYSTEM_TEAM, "sem_table", 432 sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK, 433 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0, 434 &virtualRestrictions, &physicalRestrictions, (void**)&sSems); 435 if (area < 0) 436 panic("unable to allocate semaphore table!\n"); 437 438 memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems); 439 for (i = 0; i < sMaxSems; i++) { 440 sSems[i].id = -1; 441 free_sem_slot(i, i); 442 } 443 444 // add debugger commands 445 add_debugger_command_etc("sems", &dump_sem_list, 446 "Dump a list of all active semaphores (for team, with name, etc.)", 447 "[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]" 448 " | (\"last\" <last acquirer>)\n" 449 "Prints a list of all active semaphores meeting the given\n" 450 "requirement. If no argument is given, all sems are listed.\n" 451 " <team> - The team owning the semaphores.\n" 452 " <name> - Part of the name of the semaphores.\n" 453 " <last acquirer> - The thread that last acquired the semaphore.\n" 454 , 0); 455 add_debugger_command_etc("sem", &dump_sem_info, 456 "Dump info about a particular semaphore", 457 "<sem>\n" 458 "Prints info about the specified semaphore.\n" 459 " <sem> - pointer to the semaphore structure, semaphore ID, or name\n" 460 " of the semaphore to print info for.\n", 0); 461 462 TRACE(("sem_init: exit\n")); 463 464 sSemsActive = true; 465 466 return 0; 467 } 468 469 470 /*! Creates a semaphore with the given parameters. 471 472 This function is only available from within the kernel, and 473 should not be made public - if possible, we should remove it 474 completely (and have only create_sem() exported). 475 */ 476 sem_id 477 create_sem_etc(int32 count, const char* name, team_id owner) 478 { 479 struct sem_entry* sem = NULL; 480 cpu_status state; 481 sem_id id = B_NO_MORE_SEMS; 482 char* tempName; 483 size_t nameLength; 484 485 if (sSemsActive == false || sUsedSems == sMaxSems) 486 return B_NO_MORE_SEMS; 487 488 if (name == NULL) 489 name = "unnamed semaphore"; 490 491 // get the owning team 492 Team* team = Team::Get(owner); 493 if (team == NULL) 494 return B_BAD_TEAM_ID; 495 BReference<Team> teamReference(team, true); 496 497 // clone the name 498 nameLength = strlen(name) + 1; 499 nameLength = min_c(nameLength, B_OS_NAME_LENGTH); 500 tempName = (char*)malloc(nameLength); 501 if (tempName == NULL) 502 return B_NO_MEMORY; 503 504 strlcpy(tempName, name, nameLength); 505 506 state = disable_interrupts(); 507 GRAB_SEM_LIST_LOCK(); 508 509 // get the first slot from the free list 510 sem = sFreeSemsHead; 511 if (sem) { 512 // remove it from the free list 513 sFreeSemsHead = sem->u.unused.next; 514 if (!sFreeSemsHead) 515 sFreeSemsTail = NULL; 516 517 // init the slot 518 GRAB_SEM_LOCK(*sem); 519 sem->id = sem->u.unused.next_id; 520 sem->u.used.count = count; 521 sem->u.used.net_count = count; 522 new(&sem->queue) ThreadQueue; 523 sem->u.used.name = tempName; 524 sem->u.used.owner = team->id; 525 sem->u.used.select_infos = NULL; 526 id = sem->id; 527 528 list_add_item(&team->sem_list, &sem->u.used.team_link); 529 530 RELEASE_SEM_LOCK(*sem); 531 532 atomic_add(&sUsedSems, 1); 533 534 KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld", 535 count, name, owner, id); 536 537 T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name)); 538 NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id, 539 name); 540 } 541 542 RELEASE_SEM_LIST_LOCK(); 543 restore_interrupts(state); 544 545 if (sem == NULL) 546 free(tempName); 547 548 return id; 549 } 550 551 552 status_t 553 select_sem(int32 id, struct select_info* info, bool kernel) 554 { 555 cpu_status state; 556 int32 slot; 557 status_t error = B_OK; 558 559 if (id < 0) 560 return B_BAD_SEM_ID; 561 562 slot = id % sMaxSems; 563 564 state = disable_interrupts(); 565 GRAB_SEM_LOCK(sSems[slot]); 566 567 if (sSems[slot].id != id) { 568 // bad sem ID 569 error = B_BAD_SEM_ID; 570 } else if (!kernel 571 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 572 // kernel semaphore, but call from userland 573 error = B_NOT_ALLOWED; 574 } else { 575 info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID; 576 577 if (info->selected_events != 0) { 578 info->next = sSems[slot].u.used.select_infos; 579 sSems[slot].u.used.select_infos = info; 580 581 if (sSems[slot].u.used.count > 0) 582 notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE); 583 } 584 } 585 586 RELEASE_SEM_LOCK(sSems[slot]); 587 restore_interrupts(state); 588 589 return error; 590 } 591 592 593 status_t 594 deselect_sem(int32 id, struct select_info* info, bool kernel) 595 { 596 cpu_status state; 597 int32 slot; 598 599 if (id < 0) 600 return B_BAD_SEM_ID; 601 602 if (info->selected_events == 0) 603 return B_OK; 604 605 slot = id % sMaxSems; 606 607 state = disable_interrupts(); 608 GRAB_SEM_LOCK(sSems[slot]); 609 610 if (sSems[slot].id == id) { 611 select_info** infoLocation = &sSems[slot].u.used.select_infos; 612 while (*infoLocation != NULL && *infoLocation != info) 613 infoLocation = &(*infoLocation)->next; 614 615 if (*infoLocation == info) 616 *infoLocation = info->next; 617 } 618 619 RELEASE_SEM_LOCK(sSems[slot]); 620 restore_interrupts(state); 621 622 return B_OK; 623 } 624 625 626 /*! Forcibly removes a thread from a semaphores wait queue. May have to wake up 627 other threads in the process. 628 Must be called with semaphore lock held. The thread lock must not be held. 629 */ 630 static void 631 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem) 632 { 633 if (!entry->queued) 634 return; 635 636 sem->queue.Remove(entry); 637 entry->queued = false; 638 sem->u.used.count += entry->count; 639 640 // We're done with this entry. We only have to check, if other threads 641 // need unblocking, too. 642 643 // Now see if more threads need to be woken up. We get the scheduler lock 644 // for that time, so the blocking state of threads won't change (due to 645 // interruption or timeout). We need that lock anyway when unblocking a 646 // thread. 647 while ((entry = sem->queue.Head()) != NULL) { 648 SpinLocker schedulerLocker(entry->thread->scheduler_lock); 649 if (thread_is_blocked(entry->thread)) { 650 // The thread is still waiting. If its count is satisfied, unblock 651 // it. Otherwise we can't unblock any other thread. 652 if (entry->count > sem->u.used.net_count) 653 break; 654 655 thread_unblock_locked(entry->thread, B_OK); 656 sem->u.used.net_count -= entry->count; 657 } else { 658 // The thread is no longer waiting, but still queued, which means 659 // acquiration failed and we can just remove it. 660 sem->u.used.count += entry->count; 661 } 662 663 sem->queue.Remove(entry); 664 entry->queued = false; 665 } 666 667 // select notification, if the semaphore is now acquirable 668 if (sem->u.used.count > 0) 669 notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE); 670 } 671 672 673 /*! This function deletes all semaphores belonging to a particular team. 674 */ 675 void 676 sem_delete_owned_sems(Team* team) 677 { 678 while (true) { 679 char* name; 680 681 { 682 // get the next semaphore from the team's sem list 683 InterruptsLocker locker; 684 SpinLocker semListLocker(sSemsSpinlock); 685 sem_entry* sem = (sem_entry*)list_remove_head_item(&team->sem_list); 686 if (sem == NULL) 687 break; 688 689 // delete the semaphore 690 GRAB_SEM_LOCK(*sem); 691 semListLocker.Unlock(); 692 uninit_sem_locked(*sem, &name); 693 } 694 695 free(name); 696 } 697 698 scheduler_reschedule_if_necessary(); 699 } 700 701 702 int32 703 sem_max_sems(void) 704 { 705 return sMaxSems; 706 } 707 708 709 int32 710 sem_used_sems(void) 711 { 712 return sUsedSems; 713 } 714 715 716 // #pragma mark - Public Kernel API 717 718 719 sem_id 720 create_sem(int32 count, const char* name) 721 { 722 return create_sem_etc(count, name, team_get_kernel_team_id()); 723 } 724 725 726 status_t 727 delete_sem(sem_id id) 728 { 729 return delete_sem_internal(id, false); 730 } 731 732 733 status_t 734 acquire_sem(sem_id id) 735 { 736 return switch_sem_etc(-1, id, 1, 0, 0); 737 } 738 739 740 status_t 741 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout) 742 { 743 return switch_sem_etc(-1, id, count, flags, timeout); 744 } 745 746 747 status_t 748 switch_sem(sem_id toBeReleased, sem_id toBeAcquired) 749 { 750 return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0); 751 } 752 753 754 status_t 755 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count, 756 uint32 flags, bigtime_t timeout) 757 { 758 int slot = id % sMaxSems; 759 int state; 760 status_t status = B_OK; 761 762 if (gKernelStartup) 763 return B_OK; 764 if (sSemsActive == false) 765 return B_NO_MORE_SEMS; 766 767 if (!are_interrupts_enabled()) { 768 panic("switch_sem_etc: called with interrupts disabled for sem " 769 "%" B_PRId32 "\n", id); 770 } 771 772 if (id < 0) 773 return B_BAD_SEM_ID; 774 if (count <= 0 775 || (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) { 776 return B_BAD_VALUE; 777 } 778 779 state = disable_interrupts(); 780 GRAB_SEM_LOCK(sSems[slot]); 781 782 if (sSems[slot].id != id) { 783 TRACE(("switch_sem_etc: bad sem %ld\n", id)); 784 status = B_BAD_SEM_ID; 785 goto err; 786 } 787 788 // TODO: the B_CHECK_PERMISSION flag should be made private, as it 789 // doesn't have any use outside the kernel 790 if ((flags & B_CHECK_PERMISSION) != 0 791 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 792 dprintf("thread %" B_PRId32 " tried to acquire kernel semaphore " 793 "%" B_PRId32 ".\n", thread_get_current_thread_id(), id); 794 status = B_NOT_ALLOWED; 795 goto err; 796 } 797 798 if (sSems[slot].u.used.count - count < 0) { 799 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) { 800 // immediate timeout 801 status = B_WOULD_BLOCK; 802 goto err; 803 } else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) { 804 // absolute negative timeout 805 status = B_TIMED_OUT; 806 goto err; 807 } 808 } 809 810 KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, " 811 "flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags, 812 timeout); 813 814 if ((sSems[slot].u.used.count -= count) < 0) { 815 // we need to block 816 Thread *thread = thread_get_current_thread(); 817 818 TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p," 819 " name = %s\n", id, sSems[slot].u.used.name, thread, thread->name)); 820 821 // do a quick check to see if the thread has any pending signals 822 // this should catch most of the cases where the thread had a signal 823 SpinLocker schedulerLocker(thread->scheduler_lock); 824 if (thread_is_interrupted(thread, flags)) { 825 schedulerLocker.Unlock(); 826 sSems[slot].u.used.count += count; 827 status = B_INTERRUPTED; 828 // the other semaphore will be released later 829 goto err; 830 } 831 832 schedulerLocker.Unlock(); 833 834 if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0) 835 timeout = B_INFINITE_TIMEOUT; 836 837 // enqueue in the semaphore queue and get ready to wait 838 queued_thread queueEntry(thread, count); 839 sSems[slot].queue.Add(&queueEntry); 840 queueEntry.queued = true; 841 842 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE, 843 (void*)(addr_t)id); 844 845 RELEASE_SEM_LOCK(sSems[slot]); 846 847 // release the other semaphore, if any 848 if (semToBeReleased >= 0) { 849 release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE); 850 semToBeReleased = -1; 851 } 852 853 status_t acquireStatus = timeout == B_INFINITE_TIMEOUT 854 ? thread_block() : thread_block_with_timeout(flags, timeout); 855 856 GRAB_SEM_LOCK(sSems[slot]); 857 858 // If we're still queued, this means the acquiration failed, and we 859 // need to remove our entry and (potentially) wake up other threads. 860 if (queueEntry.queued) 861 remove_thread_from_sem(&queueEntry, &sSems[slot]); 862 863 if (acquireStatus >= B_OK) { 864 sSems[slot].u.used.last_acquirer = thread_get_current_thread_id(); 865 #if DEBUG_SEM_LAST_ACQUIRER 866 sSems[slot].u.used.last_acquire_count = count; 867 #endif 868 } 869 870 RELEASE_SEM_LOCK(sSems[slot]); 871 restore_interrupts(state); 872 873 TRACE(("switch_sem_etc(sem %ld): exit block name %s, " 874 "thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id, 875 thread->name)); 876 KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus); 877 return acquireStatus; 878 } else { 879 sSems[slot].u.used.net_count -= count; 880 sSems[slot].u.used.last_acquirer = thread_get_current_thread_id(); 881 #if DEBUG_SEM_LAST_ACQUIRER 882 sSems[slot].u.used.last_acquire_count = count; 883 #endif 884 } 885 886 err: 887 RELEASE_SEM_LOCK(sSems[slot]); 888 restore_interrupts(state); 889 890 if (status == B_INTERRUPTED && semToBeReleased >= B_OK) { 891 // depending on when we were interrupted, we need to still 892 // release the semaphore to always leave in a consistent 893 // state 894 release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE); 895 } 896 897 #if 0 898 if (status == B_NOT_ALLOWED) 899 _user_debugger("Thread tried to acquire kernel semaphore."); 900 #endif 901 902 KTRACE("switch_sem_etc() done: 0x%lx", status); 903 904 return status; 905 } 906 907 908 status_t 909 release_sem(sem_id id) 910 { 911 return release_sem_etc(id, 1, 0); 912 } 913 914 915 status_t 916 release_sem_etc(sem_id id, int32 count, uint32 flags) 917 { 918 int32 slot = id % sMaxSems; 919 920 if (gKernelStartup) 921 return B_OK; 922 if (sSemsActive == false) 923 return B_NO_MORE_SEMS; 924 if (id < 0) 925 return B_BAD_SEM_ID; 926 if (count <= 0 && (flags & B_RELEASE_ALL) == 0) 927 return B_BAD_VALUE; 928 #if KDEBUG 929 if ((flags & B_DO_NOT_RESCHEDULE) == 0 && !are_interrupts_enabled()) { 930 panic("release_sem_etc(): called with interrupts disabled and " 931 "rescheduling allowed for sem_id %" B_PRId32, id); 932 } 933 #endif 934 935 InterruptsLocker _; 936 SpinLocker semLocker(sSems[slot].lock); 937 938 if (sSems[slot].id != id) { 939 TRACE(("sem_release_etc: invalid sem_id %ld\n", id)); 940 return B_BAD_SEM_ID; 941 } 942 943 // ToDo: the B_CHECK_PERMISSION flag should be made private, as it 944 // doesn't have any use outside the kernel 945 if ((flags & B_CHECK_PERMISSION) != 0 946 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { 947 dprintf("thread %" B_PRId32 " tried to release kernel semaphore.\n", 948 thread_get_current_thread_id()); 949 return B_NOT_ALLOWED; 950 } 951 952 KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count, 953 flags); 954 955 sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer; 956 #if DEBUG_SEM_LAST_ACQUIRER 957 sSems[slot].u.used.last_releaser = thread_get_current_thread_id(); 958 sSems[slot].u.used.last_release_count = count; 959 #endif 960 961 if (flags & B_RELEASE_ALL) { 962 count = sSems[slot].u.used.net_count - sSems[slot].u.used.count; 963 964 // is there anything to do for us at all? 965 if (count == 0) 966 return B_OK; 967 968 // Don't release more than necessary -- there might be interrupted/ 969 // timed out threads in the queue. 970 flags |= B_RELEASE_IF_WAITING_ONLY; 971 } 972 973 // Grab the scheduler lock, so thread_is_blocked() is reliable (due to 974 // possible interruptions or timeouts, it wouldn't be otherwise). 975 while (count > 0) { 976 queued_thread* entry = sSems[slot].queue.Head(); 977 if (entry == NULL) { 978 if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) { 979 sSems[slot].u.used.count += count; 980 sSems[slot].u.used.net_count += count; 981 } 982 break; 983 } 984 985 SpinLocker schedulerLock(entry->thread->scheduler_lock); 986 if (thread_is_blocked(entry->thread)) { 987 // The thread is still waiting. If its count is satisfied, 988 // unblock it. Otherwise we can't unblock any other thread. 989 if (entry->count > sSems[slot].u.used.net_count + count) { 990 sSems[slot].u.used.count += count; 991 sSems[slot].u.used.net_count += count; 992 break; 993 } 994 995 thread_unblock_locked(entry->thread, B_OK); 996 997 int delta = min_c(count, entry->count); 998 sSems[slot].u.used.count += delta; 999 sSems[slot].u.used.net_count += delta - entry->count; 1000 count -= delta; 1001 } else { 1002 // The thread is no longer waiting, but still queued, which 1003 // means acquiration failed and we can just remove it. 1004 sSems[slot].u.used.count += entry->count; 1005 } 1006 1007 sSems[slot].queue.Remove(entry); 1008 entry->queued = false; 1009 } 1010 1011 if (sSems[slot].u.used.count > 0) 1012 notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE); 1013 1014 // If we've unblocked another thread reschedule, if we've not explicitly 1015 // been told not to. 1016 if ((flags & B_DO_NOT_RESCHEDULE) == 0) { 1017 semLocker.Unlock(); 1018 1019 SpinLocker _(thread_get_current_thread()->scheduler_lock); 1020 scheduler_reschedule_if_necessary_locked(); 1021 } 1022 1023 return B_OK; 1024 } 1025 1026 1027 status_t 1028 get_sem_count(sem_id id, int32 *_count) 1029 { 1030 int slot; 1031 int state; 1032 1033 if (sSemsActive == false) 1034 return B_NO_MORE_SEMS; 1035 if (id < 0) 1036 return B_BAD_SEM_ID; 1037 if (_count == NULL) 1038 return B_BAD_VALUE; 1039 1040 slot = id % sMaxSems; 1041 1042 state = disable_interrupts(); 1043 GRAB_SEM_LOCK(sSems[slot]); 1044 1045 if (sSems[slot].id != id) { 1046 RELEASE_SEM_LOCK(sSems[slot]); 1047 restore_interrupts(state); 1048 TRACE(("sem_get_count: invalid sem_id %ld\n", id)); 1049 return B_BAD_SEM_ID; 1050 } 1051 1052 *_count = sSems[slot].u.used.count; 1053 1054 RELEASE_SEM_LOCK(sSems[slot]); 1055 restore_interrupts(state); 1056 1057 return B_OK; 1058 } 1059 1060 1061 /*! Called by the get_sem_info() macro. */ 1062 status_t 1063 _get_sem_info(sem_id id, struct sem_info *info, size_t size) 1064 { 1065 status_t status = B_OK; 1066 int state; 1067 int slot; 1068 1069 if (!sSemsActive) 1070 return B_NO_MORE_SEMS; 1071 if (id < 0) 1072 return B_BAD_SEM_ID; 1073 if (info == NULL || size != sizeof(sem_info)) 1074 return B_BAD_VALUE; 1075 1076 slot = id % sMaxSems; 1077 1078 state = disable_interrupts(); 1079 GRAB_SEM_LOCK(sSems[slot]); 1080 1081 if (sSems[slot].id != id) { 1082 status = B_BAD_SEM_ID; 1083 TRACE(("get_sem_info: invalid sem_id %ld\n", id)); 1084 } else 1085 fill_sem_info(&sSems[slot], info, size); 1086 1087 RELEASE_SEM_LOCK(sSems[slot]); 1088 restore_interrupts(state); 1089 1090 return status; 1091 } 1092 1093 1094 /*! Called by the get_next_sem_info() macro. */ 1095 status_t 1096 _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info, 1097 size_t size) 1098 { 1099 if (!sSemsActive) 1100 return B_NO_MORE_SEMS; 1101 if (_cookie == NULL || info == NULL || size != sizeof(sem_info)) 1102 return B_BAD_VALUE; 1103 if (teamID < 0) 1104 return B_BAD_TEAM_ID; 1105 1106 Team* team = Team::Get(teamID); 1107 if (team == NULL) 1108 return B_BAD_TEAM_ID; 1109 BReference<Team> teamReference(team, true); 1110 1111 InterruptsSpinLocker semListLocker(sSemsSpinlock); 1112 1113 // TODO: find a way to iterate the list that is more reliable 1114 sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list); 1115 int32 newIndex = *_cookie; 1116 int32 index = 0; 1117 bool found = false; 1118 1119 while (!found) { 1120 // find the next entry to be returned 1121 while (sem != NULL && index < newIndex) { 1122 sem = (sem_entry*)list_get_next_item(&team->sem_list, sem); 1123 index++; 1124 } 1125 1126 if (sem == NULL) 1127 return B_BAD_VALUE; 1128 1129 GRAB_SEM_LOCK(*sem); 1130 1131 if (sem->id != -1 && sem->u.used.owner == team->id) { 1132 // found one! 1133 fill_sem_info(sem, info, size); 1134 newIndex = index + 1; 1135 found = true; 1136 } else 1137 newIndex++; 1138 1139 RELEASE_SEM_LOCK(*sem); 1140 } 1141 1142 if (!found) 1143 return B_BAD_VALUE; 1144 1145 *_cookie = newIndex; 1146 return B_OK; 1147 } 1148 1149 1150 status_t 1151 set_sem_owner(sem_id id, team_id newTeamID) 1152 { 1153 if (sSemsActive == false) 1154 return B_NO_MORE_SEMS; 1155 if (id < 0) 1156 return B_BAD_SEM_ID; 1157 if (newTeamID < 0) 1158 return B_BAD_TEAM_ID; 1159 1160 int32 slot = id % sMaxSems; 1161 1162 // get the new team 1163 Team* newTeam = Team::Get(newTeamID); 1164 if (newTeam == NULL) 1165 return B_BAD_TEAM_ID; 1166 BReference<Team> newTeamReference(newTeam, true); 1167 1168 InterruptsSpinLocker semListLocker(sSemsSpinlock); 1169 SpinLocker semLocker(sSems[slot].lock); 1170 1171 if (sSems[slot].id != id) { 1172 TRACE(("set_sem_owner: invalid sem_id %ld\n", id)); 1173 return B_BAD_SEM_ID; 1174 } 1175 1176 list_remove_link(&sSems[slot].u.used.team_link); 1177 list_add_item(&newTeam->sem_list, &sSems[slot].u.used.team_link); 1178 1179 sSems[slot].u.used.owner = newTeam->id; 1180 return B_OK; 1181 } 1182 1183 1184 /*! Returns the name of the semaphore. The name is not copied, so the caller 1185 must make sure that the semaphore remains alive as long as the name is used. 1186 */ 1187 const char* 1188 sem_get_name_unsafe(sem_id id) 1189 { 1190 int slot = id % sMaxSems; 1191 1192 if (sSemsActive == false || id < 0 || sSems[slot].id != id) 1193 return NULL; 1194 1195 return sSems[slot].u.used.name; 1196 } 1197 1198 1199 // #pragma mark - Syscalls 1200 1201 1202 sem_id 1203 _user_create_sem(int32 count, const char *userName) 1204 { 1205 char name[B_OS_NAME_LENGTH]; 1206 1207 if (userName == NULL) 1208 return create_sem_etc(count, NULL, team_get_current_team_id()); 1209 1210 if (!IS_USER_ADDRESS(userName) 1211 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 1212 return B_BAD_ADDRESS; 1213 1214 return create_sem_etc(count, name, team_get_current_team_id()); 1215 } 1216 1217 1218 status_t 1219 _user_delete_sem(sem_id id) 1220 { 1221 return delete_sem_internal(id, true); 1222 } 1223 1224 1225 status_t 1226 _user_acquire_sem(sem_id id) 1227 { 1228 status_t error = switch_sem_etc(-1, id, 1, 1229 B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0); 1230 1231 return syscall_restart_handle_post(error); 1232 } 1233 1234 1235 status_t 1236 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout) 1237 { 1238 syscall_restart_handle_timeout_pre(flags, timeout); 1239 1240 status_t error = switch_sem_etc(-1, id, count, 1241 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout); 1242 1243 return syscall_restart_handle_timeout_post(error, timeout); 1244 } 1245 1246 1247 status_t 1248 _user_switch_sem(sem_id releaseSem, sem_id id) 1249 { 1250 status_t error = switch_sem_etc(releaseSem, id, 1, 1251 B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0); 1252 1253 if (releaseSem < 0) 1254 return syscall_restart_handle_post(error); 1255 1256 return error; 1257 } 1258 1259 1260 status_t 1261 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags, 1262 bigtime_t timeout) 1263 { 1264 if (releaseSem < 0) 1265 syscall_restart_handle_timeout_pre(flags, timeout); 1266 1267 status_t error = switch_sem_etc(releaseSem, id, count, 1268 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout); 1269 1270 if (releaseSem < 0) 1271 return syscall_restart_handle_timeout_post(error, timeout); 1272 1273 return error; 1274 } 1275 1276 1277 status_t 1278 _user_release_sem(sem_id id) 1279 { 1280 return release_sem_etc(id, 1, B_CHECK_PERMISSION); 1281 } 1282 1283 1284 status_t 1285 _user_release_sem_etc(sem_id id, int32 count, uint32 flags) 1286 { 1287 return release_sem_etc(id, count, flags | B_CHECK_PERMISSION); 1288 } 1289 1290 1291 status_t 1292 _user_get_sem_count(sem_id id, int32 *userCount) 1293 { 1294 status_t status; 1295 int32 count; 1296 1297 if (userCount == NULL || !IS_USER_ADDRESS(userCount)) 1298 return B_BAD_ADDRESS; 1299 1300 status = get_sem_count(id, &count); 1301 if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK) 1302 return B_BAD_ADDRESS; 1303 1304 return status; 1305 } 1306 1307 1308 status_t 1309 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size) 1310 { 1311 struct sem_info info; 1312 status_t status; 1313 1314 if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)) 1315 return B_BAD_ADDRESS; 1316 1317 status = _get_sem_info(id, &info, size); 1318 if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK) 1319 return B_BAD_ADDRESS; 1320 1321 return status; 1322 } 1323 1324 1325 status_t 1326 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo, 1327 size_t size) 1328 { 1329 struct sem_info info; 1330 int32 cookie; 1331 status_t status; 1332 1333 if (userCookie == NULL || userInfo == NULL 1334 || !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo) 1335 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 1336 return B_BAD_ADDRESS; 1337 1338 status = _get_next_sem_info(team, &cookie, &info, size); 1339 1340 if (status == B_OK) { 1341 if (user_memcpy(userInfo, &info, size) < B_OK 1342 || user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK) 1343 return B_BAD_ADDRESS; 1344 } 1345 1346 return status; 1347 } 1348 1349 1350 status_t 1351 _user_set_sem_owner(sem_id id, team_id team) 1352 { 1353 return set_sem_owner(id, team); 1354 } 1355