1 /* 2 * Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 #ifndef KERNEL_SMP_H 9 #define KERNEL_SMP_H 10 11 12 #include <KernelExport.h> 13 14 struct kernel_args; 15 16 17 // intercpu messages 18 enum { 19 SMP_MSG_INVALIDATE_PAGE_RANGE = 0, 20 SMP_MSG_INVALIDATE_PAGE_LIST, 21 SMP_MSG_USER_INVALIDATE_PAGES, 22 SMP_MSG_GLOBAL_INVALIDATE_PAGES, 23 SMP_MSG_CPU_HALT, 24 SMP_MSG_CALL_FUNCTION, 25 SMP_MSG_RESCHEDULE, 26 SMP_MSG_RESCHEDULE_IF_IDLE 27 }; 28 29 enum { 30 SMP_MSG_FLAG_ASYNC = 0x0, 31 SMP_MSG_FLAG_SYNC = 0x1, 32 SMP_MSG_FLAG_FREE_ARG = 0x2, 33 }; 34 35 typedef uint32 cpu_mask_t; 36 37 typedef void (*smp_call_func)(uint32 data1, int32 currentCPU, uint32 data2, uint32 data3); 38 39 40 #ifdef __cplusplus 41 extern "C" { 42 #endif 43 44 status_t smp_init(struct kernel_args *args); 45 status_t smp_per_cpu_init(struct kernel_args *args, int32 cpu); 46 status_t smp_init_post_generic_syscalls(void); 47 bool smp_trap_non_boot_cpus(int32 cpu); 48 void smp_wake_up_non_boot_cpus(void); 49 void smp_cpu_rendezvous(volatile uint32 *var, int current_cpu); 50 void smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 data3, 51 void *data_ptr, uint32 flags); 52 void smp_send_multicast_ici(cpu_mask_t cpuMask, int32 message, uint32 data, 53 uint32 data2, uint32 data3, void *data_ptr, uint32 flags); 54 void smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3, 55 void *data_ptr, uint32 flags); 56 void smp_send_broadcast_ici_interrupts_disabled(int32 currentCPU, int32 message, 57 uint32 data, uint32 data2, uint32 data3, void *data_ptr, uint32 flags); 58 59 int32 smp_get_num_cpus(void); 60 void smp_set_num_cpus(int32 numCPUs); 61 int32 smp_get_current_cpu(void); 62 63 int smp_intercpu_int_handler(int32 cpu); 64 65 void _acquire_spinlock(spinlock* lock); 66 67 #ifdef __cplusplus 68 } 69 #endif 70 71 72 // Unless spinlock debug features are enabled, try to inline 73 // {acquire,release}_spinlock(). 74 #if !DEBUG_SPINLOCKS && !B_DEBUG_SPINLOCK_CONTENTION 75 76 static inline void 77 acquire_spinlock_inline(spinlock* lock) 78 { 79 if (atomic_or((int32*)lock, 1) == 0) 80 return; 81 _acquire_spinlock(lock); 82 } 83 84 85 static inline void 86 release_spinlock_inline(spinlock* lock) 87 { 88 atomic_and((int32*)lock, 0); 89 } 90 91 92 #define acquire_spinlock(lock) acquire_spinlock_inline(lock) 93 #define release_spinlock(lock) release_spinlock_inline(lock) 94 95 #endif // !DEBUG_SPINLOCKS && !B_DEBUG_SPINLOCK_CONTENTION 96 97 98 #endif /* KERNEL_SMP_H */ 99