xref: /haiku/headers/private/kernel/smp.h (revision 579f1dbca962a2a03df54f69fdc6e9423f91f20e)
1 /*
2  * Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 #ifndef KERNEL_SMP_H
9 #define KERNEL_SMP_H
10 
11 
12 #include <KernelExport.h>
13 
14 struct kernel_args;
15 
16 
17 // intercpu messages
18 enum {
19 	SMP_MSG_INVALIDATE_PAGE_RANGE = 0,
20 	SMP_MSG_INVALIDATE_PAGE_LIST,
21 	SMP_MSG_USER_INVALIDATE_PAGES,
22 	SMP_MSG_GLOBAL_INVALIDATE_PAGES,
23 	SMP_MSG_CPU_HALT,
24 	SMP_MSG_CALL_FUNCTION,
25 	SMP_MSG_RESCHEDULE,
26 	SMP_MSG_RESCHEDULE_IF_IDLE
27 };
28 
29 enum {
30 	SMP_MSG_FLAG_ASYNC		= 0x0,
31 	SMP_MSG_FLAG_SYNC		= 0x1,
32 	SMP_MSG_FLAG_FREE_ARG	= 0x2,
33 };
34 
35 typedef uint32 cpu_mask_t;
36 
37 typedef void (*smp_call_func)(addr_t data1, int32 currentCPU, addr_t data2, addr_t data3);
38 
39 
40 #ifdef __cplusplus
41 extern "C" {
42 #endif
43 
44 bool try_acquire_spinlock(spinlock* lock);
45 
46 status_t smp_init(struct kernel_args *args);
47 status_t smp_per_cpu_init(struct kernel_args *args, int32 cpu);
48 status_t smp_init_post_generic_syscalls(void);
49 bool smp_trap_non_boot_cpus(int32 cpu, uint32* rendezVous);
50 void smp_wake_up_non_boot_cpus(void);
51 void smp_cpu_rendezvous(volatile uint32 *var, int current_cpu);
52 void smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2, addr_t data3,
53 		void *data_ptr, uint32 flags);
54 void smp_send_multicast_ici(cpu_mask_t cpuMask, int32 message, addr_t data,
55 		addr_t data2, addr_t data3, void *data_ptr, uint32 flags);
56 void smp_send_broadcast_ici(int32 message, addr_t data, addr_t data2, addr_t data3,
57 		void *data_ptr, uint32 flags);
58 void smp_send_broadcast_ici_interrupts_disabled(int32 currentCPU, int32 message,
59 		addr_t data, addr_t data2, addr_t data3, void *data_ptr, uint32 flags);
60 
61 int32 smp_get_num_cpus(void);
62 void smp_set_num_cpus(int32 numCPUs);
63 int32 smp_get_current_cpu(void);
64 
65 int smp_intercpu_int_handler(int32 cpu);
66 
67 #ifdef __cplusplus
68 }
69 #endif
70 
71 
72 // Unless spinlock debug features are enabled, try to inline
73 // {acquire,release}_spinlock().
74 #if !DEBUG_SPINLOCKS && !B_DEBUG_SPINLOCK_CONTENTION
75 
76 
77 static inline bool
78 try_acquire_spinlock_inline(spinlock* lock)
79 {
80 	return atomic_or((int32*)lock, 1) == 0;
81 }
82 
83 
84 static inline void
85 acquire_spinlock_inline(spinlock* lock)
86 {
87 	if (try_acquire_spinlock_inline(lock))
88 		return;
89 	acquire_spinlock(lock);
90 }
91 
92 
93 static inline void
94 release_spinlock_inline(spinlock* lock)
95 {
96 	atomic_and((int32*)lock, 0);
97 }
98 
99 
100 #define try_acquire_spinlock(lock)	try_acquire_spinlock_inline(lock)
101 #define acquire_spinlock(lock)		acquire_spinlock_inline(lock)
102 #define release_spinlock(lock)		release_spinlock_inline(lock)
103 
104 #endif	// !DEBUG_SPINLOCKS && !B_DEBUG_SPINLOCK_CONTENTION
105 
106 
107 #endif	/* KERNEL_SMP_H */
108