1 /* 2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 /* Functionality for symetrical multi-processors */ 11 12 #include <smp.h> 13 14 #include <stdlib.h> 15 #include <string.h> 16 17 #include <arch/cpu.h> 18 #include <arch/debug.h> 19 #include <arch/int.h> 20 #include <arch/smp.h> 21 #include <cpu.h> 22 #include <generic_syscall.h> 23 #include <int.h> 24 #include <spinlock_contention.h> 25 #include <thread.h> 26 27 #include "kernel_debug_config.h" 28 29 30 //#define TRACE_SMP 31 32 #ifdef TRACE_SMP 33 # define TRACE(x) dprintf x 34 #else 35 # define TRACE(x) ; 36 #endif 37 38 #define MSG_POOL_SIZE (SMP_MAX_CPUS * 4) 39 40 // These macros define the number of unsuccessful iterations in 41 // acquire_spinlock() and acquire_spinlock_nocheck() after which the functions 42 // panic(), assuming a deadlock. 43 #define SPINLOCK_DEADLOCK_COUNT 100000000 44 #define SPINLOCK_DEADLOCK_COUNT_NO_CHECK 2000000000 45 46 47 struct smp_msg { 48 struct smp_msg *next; 49 int32 message; 50 uint32 data; 51 uint32 data2; 52 uint32 data3; 53 void *data_ptr; 54 uint32 flags; 55 int32 ref_count; 56 volatile bool done; 57 uint32 proc_bitmap; 58 }; 59 60 #define MAILBOX_LOCAL 1 61 #define MAILBOX_BCAST 2 62 63 static spinlock boot_cpu_spin[SMP_MAX_CPUS] = { }; 64 65 static struct smp_msg *sFreeMessages = NULL; 66 static volatile int sFreeMessageCount = 0; 67 static spinlock sFreeMessageSpinlock = B_SPINLOCK_INITIALIZER; 68 69 static struct smp_msg *sCPUMessages[SMP_MAX_CPUS] = { NULL, }; 70 static spinlock sCPUMessageSpinlock[SMP_MAX_CPUS]; 71 72 static struct smp_msg *sBroadcastMessages = NULL; 73 static spinlock sBroadcastMessageSpinlock = B_SPINLOCK_INITIALIZER; 74 75 static bool sICIEnabled = false; 76 static int32 sNumCPUs = 1; 77 78 static int32 process_pending_ici(int32 currentCPU); 79 80 81 #if DEBUG_SPINLOCKS 82 #define NUM_LAST_CALLERS 32 83 84 static struct { 85 void *caller; 86 spinlock *lock; 87 } sLastCaller[NUM_LAST_CALLERS]; 88 89 static vint32 sLastIndex = 0; 90 // Is incremented atomically. Must be % NUM_LAST_CALLERS before being used 91 // as index into sLastCaller. Note, that it has to be casted to uint32 92 // before applying the modulo operation, since otherwise after overflowing 93 // that would yield negative indices. 94 95 96 static void 97 push_lock_caller(void *caller, spinlock *lock) 98 { 99 int32 index = (uint32)atomic_add(&sLastIndex, 1) % NUM_LAST_CALLERS; 100 101 sLastCaller[index].caller = caller; 102 sLastCaller[index].lock = lock; 103 } 104 105 106 static void * 107 find_lock_caller(spinlock *lock) 108 { 109 int32 lastIndex = (uint32)sLastIndex % NUM_LAST_CALLERS; 110 111 for (int32 i = 0; i < NUM_LAST_CALLERS; i++) { 112 int32 index = (NUM_LAST_CALLERS + lastIndex - 1 - i) % NUM_LAST_CALLERS; 113 if (sLastCaller[index].lock == lock) 114 return sLastCaller[index].caller; 115 } 116 117 return NULL; 118 } 119 120 121 int 122 dump_spinlock(int argc, char** argv) 123 { 124 if (argc != 2) { 125 print_debugger_command_usage(argv[0]); 126 return 0; 127 } 128 129 uint64 address; 130 if (!evaluate_debug_expression(argv[1], &address, false)) 131 return 0; 132 133 spinlock* lock = (spinlock*)(addr_t)address; 134 kprintf("spinlock %p:\n", lock); 135 bool locked = B_SPINLOCK_IS_LOCKED(lock); 136 if (locked) { 137 kprintf(" locked from %p\n", find_lock_caller(lock)); 138 } else 139 kprintf(" not locked\n"); 140 141 return 0; 142 } 143 144 145 #endif // DEBUG_SPINLOCKS 146 147 148 int 149 dump_ici_messages(int argc, char** argv) 150 { 151 // count broadcast messages 152 int32 count = 0; 153 int32 doneCount = 0; 154 int32 unreferencedCount = 0; 155 smp_msg* message = sBroadcastMessages; 156 while (message != NULL) { 157 count++; 158 if (message->done) 159 doneCount++; 160 if (message->ref_count <= 0) 161 unreferencedCount++; 162 message = message->next; 163 } 164 165 kprintf("ICI broadcast messages: %ld, first: %p\n", count, 166 sBroadcastMessages); 167 kprintf(" done: %ld\n", doneCount); 168 kprintf(" unreferenced: %ld\n", unreferencedCount); 169 170 // count per-CPU messages 171 for (int32 i = 0; i < sNumCPUs; i++) { 172 count = 0; 173 message = sCPUMessages[i]; 174 while (message != NULL) { 175 count++; 176 message = message->next; 177 } 178 179 kprintf("CPU %ld messages: %ld, first: %p\n", i, count, 180 sCPUMessages[i]); 181 } 182 183 return 0; 184 } 185 186 187 int 188 dump_ici_message(int argc, char** argv) 189 { 190 if (argc != 2) { 191 print_debugger_command_usage(argv[0]); 192 return 0; 193 } 194 195 uint64 address; 196 if (!evaluate_debug_expression(argv[1], &address, false)) 197 return 0; 198 199 smp_msg* message = (smp_msg*)(addr_t)address; 200 kprintf("ICI message %p:\n", message); 201 kprintf(" next: %p\n", message->next); 202 kprintf(" message: %ld\n", message->message); 203 kprintf(" data: %ld\n", message->data); 204 kprintf(" data2: %ld\n", message->data2); 205 kprintf(" data3: %ld\n", message->data3); 206 kprintf(" data_ptr: %p\n", message->data_ptr); 207 kprintf(" flags: %lx\n", message->flags); 208 kprintf(" ref_count: %lx\n", message->ref_count); 209 kprintf(" done: %s\n", message->done ? "true" : "false"); 210 kprintf(" proc_bitmap: %lx\n", message->proc_bitmap); 211 212 return 0; 213 } 214 215 216 static inline void 217 process_all_pending_ici(int32 currentCPU) 218 { 219 while (process_pending_ici(currentCPU) != B_ENTRY_NOT_FOUND) 220 ; 221 } 222 223 224 void 225 acquire_spinlock(spinlock *lock) 226 { 227 #if DEBUG_SPINLOCKS 228 if (are_interrupts_enabled()) { 229 panic("acquire_spinlock: attempt to acquire lock %p with interrupts " 230 "enabled", lock); 231 } 232 #endif 233 234 if (sNumCPUs > 1) { 235 int currentCPU = smp_get_current_cpu(); 236 #if B_DEBUG_SPINLOCK_CONTENTION 237 while (atomic_add(&lock->lock, 1) != 0) 238 process_all_pending_ici(currentCPU); 239 #else 240 while (1) { 241 uint32 count = 0; 242 while (*lock != 0) { 243 if (++count == SPINLOCK_DEADLOCK_COUNT) { 244 panic("acquire_spinlock(): Failed to acquire spinlock %p " 245 "for a long time!", lock); 246 count = 0; 247 } 248 249 process_all_pending_ici(currentCPU); 250 PAUSE(); 251 } 252 if (atomic_set((int32 *)lock, 1) == 0) 253 break; 254 } 255 256 #if DEBUG_SPINLOCKS 257 push_lock_caller(arch_debug_get_caller(), lock); 258 #endif 259 #endif 260 } else { 261 #if DEBUG_SPINLOCKS 262 int32 oldValue; 263 oldValue = atomic_set((int32 *)lock, 1); 264 if (oldValue != 0) { 265 panic("acquire_spinlock: attempt to acquire lock %p twice on " 266 "non-SMP system (last caller: %p, value %ld)", lock, 267 find_lock_caller(lock), oldValue); 268 } 269 270 push_lock_caller(arch_debug_get_caller(), lock); 271 #endif 272 } 273 } 274 275 276 static void 277 acquire_spinlock_nocheck(spinlock *lock) 278 { 279 #if DEBUG_SPINLOCKS 280 if (are_interrupts_enabled()) { 281 panic("acquire_spinlock_nocheck: attempt to acquire lock %p with " 282 "interrupts enabled", lock); 283 } 284 #endif 285 286 if (sNumCPUs > 1) { 287 #if B_DEBUG_SPINLOCK_CONTENTION 288 while (atomic_add(&lock->lock, 1) != 0) { 289 } 290 #else 291 while (1) { 292 uint32 count = 0; 293 while (*lock != 0) { 294 if (++count == SPINLOCK_DEADLOCK_COUNT_NO_CHECK) { 295 panic("acquire_spinlock(): Failed to acquire spinlock %p " 296 "for a long time!", lock); 297 count = 0; 298 } 299 300 PAUSE(); 301 } 302 303 if (atomic_set((int32 *)lock, 1) == 0) 304 break; 305 } 306 #endif 307 } else { 308 #if DEBUG_SPINLOCKS 309 if (atomic_set((int32 *)lock, 1) != 0) { 310 panic("acquire_spinlock_nocheck: attempt to acquire lock %p twice " 311 "on non-SMP system\n", lock); 312 } 313 #endif 314 } 315 } 316 317 318 void 319 release_spinlock(spinlock *lock) 320 { 321 if (sNumCPUs > 1) { 322 if (are_interrupts_enabled()) 323 panic("release_spinlock: attempt to release lock %p with interrupts enabled\n", lock); 324 #if B_DEBUG_SPINLOCK_CONTENTION 325 { 326 int32 count = atomic_set(&lock->lock, 0) - 1; 327 if (count < 0) { 328 panic("release_spinlock: lock %p was already released\n", lock); 329 } else { 330 // add to the total count -- deal with carry manually 331 if ((uint32)atomic_add(&lock->count_low, count) + count 332 < (uint32)count) { 333 atomic_add(&lock->count_high, 1); 334 } 335 } 336 } 337 #else 338 if (atomic_set((int32 *)lock, 0) != 1) 339 panic("release_spinlock: lock %p was already released\n", lock); 340 #endif 341 } else { 342 #if DEBUG_SPINLOCKS 343 if (are_interrupts_enabled()) 344 panic("release_spinlock: attempt to release lock %p with interrupts enabled\n", lock); 345 if (atomic_set((int32 *)lock, 0) != 1) 346 panic("release_spinlock: lock %p was already released\n", lock); 347 #endif 348 } 349 } 350 351 352 /** Finds a free message and gets it. 353 * NOTE: has side effect of disabling interrupts 354 * return value is the former interrupt state 355 */ 356 357 static cpu_status 358 find_free_message(struct smp_msg **msg) 359 { 360 cpu_status state; 361 362 TRACE(("find_free_message: entry\n")); 363 364 retry: 365 while (sFreeMessageCount <= 0) { 366 state = disable_interrupts(); 367 process_all_pending_ici(smp_get_current_cpu()); 368 restore_interrupts(state); 369 PAUSE(); 370 } 371 state = disable_interrupts(); 372 acquire_spinlock(&sFreeMessageSpinlock); 373 374 if (sFreeMessageCount <= 0) { 375 // someone grabbed one while we were getting the lock, 376 // go back to waiting for it 377 release_spinlock(&sFreeMessageSpinlock); 378 restore_interrupts(state); 379 goto retry; 380 } 381 382 *msg = sFreeMessages; 383 sFreeMessages = (*msg)->next; 384 sFreeMessageCount--; 385 386 release_spinlock(&sFreeMessageSpinlock); 387 388 TRACE(("find_free_message: returning msg %p\n", *msg)); 389 390 return state; 391 } 392 393 394 static void 395 return_free_message(struct smp_msg *msg) 396 { 397 TRACE(("return_free_message: returning msg %p\n", msg)); 398 399 acquire_spinlock_nocheck(&sFreeMessageSpinlock); 400 msg->next = sFreeMessages; 401 sFreeMessages = msg; 402 sFreeMessageCount++; 403 release_spinlock(&sFreeMessageSpinlock); 404 } 405 406 407 static struct smp_msg * 408 check_for_message(int currentCPU, int *source_mailbox) 409 { 410 struct smp_msg *msg; 411 412 if (!sICIEnabled) 413 return NULL; 414 415 acquire_spinlock_nocheck(&sCPUMessageSpinlock[currentCPU]); 416 msg = sCPUMessages[currentCPU]; 417 if (msg != NULL) { 418 sCPUMessages[currentCPU] = msg->next; 419 release_spinlock(&sCPUMessageSpinlock[currentCPU]); 420 TRACE((" cpu %d: found msg %p in cpu mailbox\n", currentCPU, msg)); 421 *source_mailbox = MAILBOX_LOCAL; 422 } else { 423 // try getting one from the broadcast mailbox 424 425 release_spinlock(&sCPUMessageSpinlock[currentCPU]); 426 acquire_spinlock_nocheck(&sBroadcastMessageSpinlock); 427 428 msg = sBroadcastMessages; 429 while (msg != NULL) { 430 if (CHECK_BIT(msg->proc_bitmap, currentCPU) != 0) { 431 // we have handled this one already 432 msg = msg->next; 433 continue; 434 } 435 436 // mark it so we wont try to process this one again 437 msg->proc_bitmap = SET_BIT(msg->proc_bitmap, currentCPU); 438 *source_mailbox = MAILBOX_BCAST; 439 break; 440 } 441 release_spinlock(&sBroadcastMessageSpinlock); 442 TRACE((" cpu %d: found msg %p in broadcast mailbox\n", currentCPU, msg)); 443 } 444 return msg; 445 } 446 447 448 static void 449 finish_message_processing(int currentCPU, struct smp_msg *msg, int source_mailbox) 450 { 451 int old_refcount; 452 453 old_refcount = atomic_add(&msg->ref_count, -1); 454 if (old_refcount == 1) { 455 // we were the last one to decrement the ref_count 456 // it's our job to remove it from the list & possibly clean it up 457 struct smp_msg **mbox = NULL; 458 spinlock *spinlock = NULL; 459 460 // clean up the message from one of the mailboxes 461 switch (source_mailbox) { 462 case MAILBOX_BCAST: 463 mbox = &sBroadcastMessages; 464 spinlock = &sBroadcastMessageSpinlock; 465 break; 466 case MAILBOX_LOCAL: 467 mbox = &sCPUMessages[currentCPU]; 468 spinlock = &sCPUMessageSpinlock[currentCPU]; 469 break; 470 } 471 472 acquire_spinlock_nocheck(spinlock); 473 474 TRACE(("cleaning up message %p\n", msg)); 475 476 if (source_mailbox != MAILBOX_BCAST) { 477 // local mailbox -- the message has already been removed in 478 // check_for_message() 479 } else if (msg == *mbox) { 480 (*mbox) = msg->next; 481 } else { 482 // we need to walk to find the message in the list. 483 // we can't use any data found when previously walking through 484 // the list, since the list may have changed. But, we are guaranteed 485 // to at least have msg in it. 486 struct smp_msg *last = NULL; 487 struct smp_msg *msg1; 488 489 msg1 = *mbox; 490 while (msg1 != NULL && msg1 != msg) { 491 last = msg1; 492 msg1 = msg1->next; 493 } 494 495 // by definition, last must be something 496 if (msg1 == msg && last != NULL) 497 last->next = msg->next; 498 else 499 panic("last == NULL or msg != msg1"); 500 } 501 502 release_spinlock(spinlock); 503 504 if ((msg->flags & SMP_MSG_FLAG_FREE_ARG) != 0 && msg->data_ptr != NULL) 505 free(msg->data_ptr); 506 507 if (msg->flags & SMP_MSG_FLAG_SYNC) { 508 msg->done = true; 509 // the caller cpu should now free the message 510 } else { 511 // in the !SYNC case, we get to free the message 512 return_free_message(msg); 513 } 514 } 515 } 516 517 518 static int32 519 process_pending_ici(int32 currentCPU) 520 { 521 struct smp_msg *msg; 522 bool haltCPU = false; 523 int sourceMailbox = 0; 524 int retval = B_HANDLED_INTERRUPT; 525 526 msg = check_for_message(currentCPU, &sourceMailbox); 527 if (msg == NULL) 528 return B_ENTRY_NOT_FOUND; 529 530 TRACE((" cpu %ld message = %ld\n", currentCPU, msg->message)); 531 532 switch (msg->message) { 533 case SMP_MSG_INVALIDATE_PAGE_RANGE: 534 arch_cpu_invalidate_TLB_range((addr_t)msg->data, (addr_t)msg->data2); 535 break; 536 case SMP_MSG_INVALIDATE_PAGE_LIST: 537 arch_cpu_invalidate_TLB_list((addr_t *)msg->data, (int)msg->data2); 538 break; 539 case SMP_MSG_USER_INVALIDATE_PAGES: 540 arch_cpu_user_TLB_invalidate(); 541 break; 542 case SMP_MSG_GLOBAL_INVALIDATE_PAGES: 543 arch_cpu_global_TLB_invalidate(); 544 break; 545 case SMP_MSG_CPU_HALT: 546 haltCPU = true; 547 break; 548 case SMP_MSG_CALL_FUNCTION: 549 { 550 smp_call_func func = (smp_call_func)msg->data_ptr; 551 func(msg->data, currentCPU, msg->data2, msg->data3); 552 break; 553 } 554 case SMP_MSG_RESCHEDULE_IF_IDLE: 555 { 556 struct thread* thread = thread_get_current_thread(); 557 if (thread->priority == B_IDLE_PRIORITY) 558 thread->cpu->invoke_scheduler = true; 559 break; 560 } 561 562 default: 563 dprintf("smp_intercpu_int_handler: got unknown message %ld\n", msg->message); 564 } 565 566 // finish dealing with this message, possibly removing it from the list 567 finish_message_processing(currentCPU, msg, sourceMailbox); 568 569 // special case for the halt message 570 if (haltCPU) 571 debug_trap_cpu_in_kdl(false); 572 573 return retval; 574 } 575 576 577 #if B_DEBUG_SPINLOCK_CONTENTION 578 579 static uint64 580 get_spinlock_counter(spinlock* lock) 581 { 582 uint32 high; 583 uint32 low; 584 do { 585 high = (uint32)atomic_get(&lock->count_high); 586 low = (uint32)atomic_get(&lock->count_low); 587 } while (high != atomic_get(&lock->count_high)); 588 589 return ((uint64)high << 32) | low; 590 } 591 592 593 static status_t 594 spinlock_contention_syscall(const char* subsystem, uint32 function, 595 void* buffer, size_t bufferSize) 596 { 597 spinlock_contention_info info; 598 599 if (function != GET_SPINLOCK_CONTENTION_INFO) 600 return B_BAD_VALUE; 601 602 if (bufferSize < sizeof(spinlock_contention_info)) 603 return B_BAD_VALUE; 604 605 info.thread_spinlock_counter = get_spinlock_counter(&gThreadSpinlock); 606 info.team_spinlock_counter = get_spinlock_counter(&gTeamSpinlock); 607 608 if (!IS_USER_ADDRESS(buffer) 609 || user_memcpy(buffer, &info, sizeof(info)) != B_OK) { 610 return B_BAD_ADDRESS; 611 } 612 613 return B_OK; 614 } 615 616 #endif // B_DEBUG_SPINLOCK_CONTENTION 617 618 619 // #pragma mark - 620 621 622 int 623 smp_intercpu_int_handler(void) 624 { 625 int currentCPU = smp_get_current_cpu(); 626 627 TRACE(("smp_intercpu_int_handler: entry on cpu %d\n", currentCPU)); 628 629 process_all_pending_ici(currentCPU); 630 631 TRACE(("smp_intercpu_int_handler: done\n")); 632 633 return B_HANDLED_INTERRUPT; 634 } 635 636 637 void 638 smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 data3, 639 void *data_ptr, uint32 flags) 640 { 641 struct smp_msg *msg; 642 643 TRACE(("smp_send_ici: target 0x%lx, mess 0x%lx, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n", 644 targetCPU, message, data, data2, data3, data_ptr, flags)); 645 646 if (sICIEnabled) { 647 int state; 648 int currentCPU; 649 650 // find_free_message leaves interrupts disabled 651 state = find_free_message(&msg); 652 653 currentCPU = smp_get_current_cpu(); 654 if (targetCPU == currentCPU) { 655 return_free_message(msg); 656 restore_interrupts(state); 657 return; // nope, cant do that 658 } 659 660 // set up the message 661 msg->message = message; 662 msg->data = data; 663 msg->data2 = data2; 664 msg->data3 = data3; 665 msg->data_ptr = data_ptr; 666 msg->ref_count = 1; 667 msg->flags = flags; 668 msg->done = false; 669 670 // stick it in the appropriate cpu's mailbox 671 acquire_spinlock_nocheck(&sCPUMessageSpinlock[targetCPU]); 672 msg->next = sCPUMessages[targetCPU]; 673 sCPUMessages[targetCPU] = msg; 674 release_spinlock(&sCPUMessageSpinlock[targetCPU]); 675 676 arch_smp_send_ici(targetCPU); 677 678 if (flags & SMP_MSG_FLAG_SYNC) { 679 // wait for the other cpu to finish processing it 680 // the interrupt handler will ref count it to <0 681 // if the message is sync after it has removed it from the mailbox 682 while (msg->done == false) { 683 process_all_pending_ici(currentCPU); 684 PAUSE(); 685 } 686 // for SYNC messages, it's our responsibility to put it 687 // back into the free list 688 return_free_message(msg); 689 } 690 691 restore_interrupts(state); 692 } 693 } 694 695 696 void 697 smp_send_multicast_ici(cpu_mask_t cpuMask, int32 message, uint32 data, 698 uint32 data2, uint32 data3, void *data_ptr, uint32 flags) 699 { 700 if (!sICIEnabled) 701 return; 702 703 int currentCPU = smp_get_current_cpu(); 704 cpuMask &= ~((cpu_mask_t)1 << currentCPU) 705 & (((cpu_mask_t)1 << sNumCPUs) - 1); 706 if (cpuMask == 0) { 707 panic("smp_send_multicast_ici(): 0 CPU mask"); 708 return; 709 } 710 711 // count target CPUs 712 int32 targetCPUs = 0; 713 for (int32 i = 0; i < sNumCPUs; i++) { 714 if ((cpuMask & (cpu_mask_t)1 << i) != 0) 715 targetCPUs++; 716 } 717 718 // find_free_message leaves interrupts disabled 719 struct smp_msg *msg; 720 int state = find_free_message(&msg); 721 722 msg->message = message; 723 msg->data = data; 724 msg->data2 = data2; 725 msg->data3 = data3; 726 msg->data_ptr = data_ptr; 727 msg->ref_count = targetCPUs; 728 msg->flags = flags; 729 msg->proc_bitmap = ~cpuMask; 730 msg->done = false; 731 732 // stick it in the broadcast mailbox 733 acquire_spinlock_nocheck(&sBroadcastMessageSpinlock); 734 msg->next = sBroadcastMessages; 735 sBroadcastMessages = msg; 736 release_spinlock(&sBroadcastMessageSpinlock); 737 738 arch_smp_send_broadcast_ici(); 739 // TODO: Introduce a call that only bothers the target CPUs! 740 741 if (flags & SMP_MSG_FLAG_SYNC) { 742 // wait for the other cpus to finish processing it 743 // the interrupt handler will ref count it to <0 744 // if the message is sync after it has removed it from the mailbox 745 while (msg->done == false) { 746 process_all_pending_ici(currentCPU); 747 PAUSE(); 748 } 749 750 // for SYNC messages, it's our responsibility to put it 751 // back into the free list 752 return_free_message(msg); 753 } 754 755 restore_interrupts(state); 756 } 757 758 759 void 760 smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3, 761 void *data_ptr, uint32 flags) 762 { 763 struct smp_msg *msg; 764 765 TRACE(("smp_send_broadcast_ici: cpu %ld mess 0x%lx, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n", 766 smp_get_current_cpu(), message, data, data2, data3, data_ptr, flags)); 767 768 if (sICIEnabled) { 769 int state; 770 int currentCPU; 771 772 // find_free_message leaves interrupts disabled 773 state = find_free_message(&msg); 774 775 currentCPU = smp_get_current_cpu(); 776 777 msg->message = message; 778 msg->data = data; 779 msg->data2 = data2; 780 msg->data3 = data3; 781 msg->data_ptr = data_ptr; 782 msg->ref_count = sNumCPUs - 1; 783 msg->flags = flags; 784 msg->proc_bitmap = SET_BIT(0, currentCPU); 785 msg->done = false; 786 787 TRACE(("smp_send_broadcast_ici%d: inserting msg %p into broadcast mbox\n", 788 currentCPU, msg)); 789 790 // stick it in the appropriate cpu's mailbox 791 acquire_spinlock_nocheck(&sBroadcastMessageSpinlock); 792 msg->next = sBroadcastMessages; 793 sBroadcastMessages = msg; 794 release_spinlock(&sBroadcastMessageSpinlock); 795 796 arch_smp_send_broadcast_ici(); 797 798 TRACE(("smp_send_broadcast_ici: sent interrupt\n")); 799 800 if (flags & SMP_MSG_FLAG_SYNC) { 801 // wait for the other cpus to finish processing it 802 // the interrupt handler will ref count it to <0 803 // if the message is sync after it has removed it from the mailbox 804 TRACE(("smp_send_broadcast_ici: waiting for ack\n")); 805 806 while (msg->done == false) { 807 process_all_pending_ici(currentCPU); 808 PAUSE(); 809 } 810 811 TRACE(("smp_send_broadcast_ici: returning message to free list\n")); 812 813 // for SYNC messages, it's our responsibility to put it 814 // back into the free list 815 return_free_message(msg); 816 } 817 818 restore_interrupts(state); 819 } 820 821 TRACE(("smp_send_broadcast_ici: done\n")); 822 } 823 824 825 bool 826 smp_trap_non_boot_cpus(int32 cpu) 827 { 828 if (cpu > 0) { 829 #if B_DEBUG_SPINLOCK_CONTENTION 830 boot_cpu_spin[cpu].lock = 1; 831 #else 832 boot_cpu_spin[cpu] = 1; 833 #endif 834 acquire_spinlock_nocheck(&boot_cpu_spin[cpu]); 835 return false; 836 } 837 838 return true; 839 } 840 841 842 void 843 smp_wake_up_non_boot_cpus() 844 { 845 int i; 846 847 // ICIs were previously being ignored 848 if (sNumCPUs > 1) 849 sICIEnabled = true; 850 851 // resume non boot CPUs 852 for (i = 1; i < sNumCPUs; i++) { 853 release_spinlock(&boot_cpu_spin[i]); 854 } 855 } 856 857 /* have all cpus spin until all have run */ 858 void 859 smp_cpu_rendezvous(volatile uint32 *var, int current_cpu) 860 { 861 atomic_or((vint32*)var, 1 << current_cpu); 862 863 while (*var != (((uint32)1 << sNumCPUs) - 1)) 864 PAUSE(); 865 } 866 867 status_t 868 smp_init(kernel_args *args) 869 { 870 struct smp_msg *msg; 871 int i; 872 873 TRACE(("smp_init: entry\n")); 874 875 #if DEBUG_SPINLOCKS 876 add_debugger_command_etc("spinlock", &dump_spinlock, 877 "Dump info on a spinlock", 878 "\n" 879 "Dumps info on a spinlock.\n", 0); 880 #endif 881 add_debugger_command_etc("ici", &dump_ici_messages, 882 "Dump info on pending ICI messages", 883 "\n" 884 "Dumps info on pending ICI messages.\n", 0); 885 add_debugger_command_etc("ici_message", &dump_ici_message, 886 "Dump info on an ICI message", 887 "\n" 888 "Dumps info on an ICI message.\n", 0); 889 890 if (args->num_cpus > 1) { 891 sFreeMessages = NULL; 892 sFreeMessageCount = 0; 893 for (i = 0; i < MSG_POOL_SIZE; i++) { 894 msg = (struct smp_msg *)malloc(sizeof(struct smp_msg)); 895 if (msg == NULL) { 896 panic("error creating smp mailboxes\n"); 897 return B_ERROR; 898 } 899 memset(msg, 0, sizeof(struct smp_msg)); 900 msg->next = sFreeMessages; 901 sFreeMessages = msg; 902 sFreeMessageCount++; 903 } 904 sNumCPUs = args->num_cpus; 905 } 906 TRACE(("smp_init: calling arch_smp_init\n")); 907 908 return arch_smp_init(args); 909 } 910 911 912 status_t 913 smp_per_cpu_init(kernel_args *args, int32 cpu) 914 { 915 return arch_smp_per_cpu_init(args, cpu); 916 } 917 918 919 status_t 920 smp_init_post_generic_syscalls(void) 921 { 922 #if B_DEBUG_SPINLOCK_CONTENTION 923 return register_generic_syscall(SPINLOCK_CONTENTION, 924 &spinlock_contention_syscall, 0, 0); 925 #else 926 return B_OK; 927 #endif 928 } 929 930 931 void 932 smp_set_num_cpus(int32 numCPUs) 933 { 934 sNumCPUs = numCPUs; 935 } 936 937 938 int32 939 smp_get_num_cpus() 940 { 941 return sNumCPUs; 942 } 943 944 945 int32 946 smp_get_current_cpu(void) 947 { 948 return thread_get_current_thread()->cpu->cpu_num; 949 } 950 951 952 // #pragma mark - 953 // public exported functions 954 955 956 void 957 call_all_cpus(void (*func)(void *, int), void *cookie) 958 { 959 cpu_status state = disable_interrupts(); 960 961 if (smp_get_num_cpus() > 1) { 962 smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (uint32)cookie, 963 0, 0, (void *)func, SMP_MSG_FLAG_ASYNC); 964 } 965 966 // we need to call this function ourselves as well 967 func(cookie, smp_get_current_cpu()); 968 969 restore_interrupts(state); 970 } 971 972 void 973 call_all_cpus_sync(void (*func)(void *, int), void *cookie) 974 { 975 cpu_status state = disable_interrupts(); 976 977 if (smp_get_num_cpus() > 1) { 978 smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (uint32)cookie, 979 0, 0, (void *)func, SMP_MSG_FLAG_SYNC); 980 } 981 982 // we need to call this function ourselves as well 983 func(cookie, smp_get_current_cpu()); 984 985 restore_interrupts(state); 986 } 987 988 989 void 990 memory_read_barrier(void) 991 { 992 arch_cpu_memory_read_barrier(); 993 } 994 995 996 void 997 memory_write_barrier(void) 998 { 999 arch_cpu_memory_write_barrier(); 1000 } 1001 1002