xref: /haiku/headers/private/kernel/lock.h (revision 7120e97489acbf17d86d3f33e3b2e68974fd4b23)
1 /*
2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #ifndef _KERNEL_LOCK_H
6 #define _KERNEL_LOCK_H
7 
8 #include <OS.h>
9 #include <kernel.h>
10 #include <debug.h>
11 
12 typedef struct recursive_lock {
13 	sem_id sem;
14 	thread_id holder;
15 	int recursion;
16 } recursive_lock;
17 
18 int recursive_lock_create(recursive_lock *lock);
19 void recursive_lock_destroy(recursive_lock *lock);
20 bool recursive_lock_lock(recursive_lock *lock);
21 bool recursive_lock_unlock(recursive_lock *lock);
22 int recursive_lock_get_recursion(recursive_lock *lock);
23 
24 #define ASSERT_LOCKED_RECURSIVE(r) { ASSERT(thread_get_current_thread_id() == (r)->holder); }
25 
26 typedef struct mutex {
27 	sem_id sem;
28 	thread_id holder;
29 } mutex;
30 
31 int mutex_init(mutex *m, const char *name);
32 void mutex_destroy(mutex *m);
33 void mutex_lock(mutex *m);
34 void mutex_unlock(mutex *m);
35 
36 #define ASSERT_LOCKED_MUTEX(m) { ASSERT(thread_get_current_thread_id() == (m)->holder); }
37 
38 // for read/write locks
39 #define MAX_READERS 100000
40 
41 struct benaphore {
42 	sem_id	sem;
43 	int32	count;
44 };
45 
46 typedef struct benaphore benaphore;
47 
48 // it may make sense to add a status field to the rw_lock to
49 // be able to check if the semaphore could be locked
50 
51 // Note: using rw_lock in this way probably doesn't make too much sense
52 // for use in the kernel, we may change this in the near future.
53 // It basically uses 2 benaphores to create the rw_lock which is not
54 // necessary in the kernel -- axeld, 2002/07/18.
55 // Furthermore, those should probably be __inlines - I didn't know about
56 // them earlier... :-)
57 
58 struct rw_lock {
59 	sem_id		sem;
60 	int32		count;
61 	benaphore	writeLock;
62 };
63 
64 typedef struct rw_lock rw_lock;
65 
66 #define INIT_BENAPHORE(lock,name) \
67 	{ \
68 		(lock).count = 1; \
69 		(lock).sem = create_sem(0, name); \
70 	}
71 
72 #define CHECK_BENAPHORE(lock) \
73 	((lock).sem)
74 
75 #define UNINIT_BENAPHORE(lock) \
76 	delete_sem((lock).sem);
77 
78 #define ACQUIRE_BENAPHORE(lock) \
79 	(atomic_add(&((lock).count), -1) <= 0 ? \
80 		acquire_sem_etc((lock).sem, 1, B_CAN_INTERRUPT, 0) \
81 		: 0)
82 
83 #define RELEASE_BENAPHORE(lock) \
84 	{ \
85 		if (atomic_add(&((lock).count), 1) < 0) \
86 			release_sem_etc((lock).sem, 1, B_CAN_INTERRUPT); \
87 	}
88 
89 /* read/write lock */
90 #define INIT_RW_LOCK(lock,name) \
91 	{ \
92 		(lock).sem = create_sem(0, name); \
93 		(lock).count = MAX_READERS; \
94 		INIT_BENAPHORE((lock).writeLock, "r/w write lock"); \
95 	}
96 
97 #define CHECK_RW_LOCK(lock) \
98 	((lock).sem)
99 
100 #define UNINIT_RW_LOCK(lock) \
101 	delete_sem((lock).sem); \
102 	UNINIT_BENAPHORE((lock).writeLock)
103 
104 #define ACQUIRE_READ_LOCK(lock) \
105 	{ \
106 		if (atomic_add(&(lock).count, -1) <= 0) \
107 			acquire_sem_etc((lock).sem, 1, B_CAN_INTERRUPT, 0); \
108 	}
109 
110 #define RELEASE_READ_LOCK(lock) \
111 	{ \
112 		if (atomic_add(&(lock).count, 1) < 0) \
113 			release_sem_etc((lock).sem, 1, B_CAN_INTERRUPT); \
114 	}
115 
116 #define ACQUIRE_WRITE_LOCK(lock) \
117 	{ \
118 		int32 readers; \
119 		ACQUIRE_BENAPHORE((lock).writeLock); \
120 		readers = atomic_add(&(lock).count, -MAX_READERS); \
121 		if (readers < MAX_READERS) \
122 			acquire_sem_etc((lock).sem,readers <= 0 ? 1 : MAX_READERS - readers, \
123 			                B_CAN_INTERRUPT,0); \
124 		RELEASE_BENAPHORE((lock).writeLock); \
125 	}
126 
127 #define RELEASE_WRITE_LOCK(lock) \
128 	{ \
129 		int32 readers = atomic_add(&(lock).count,MAX_READERS); \
130 		if (readers < 0) \
131 			release_sem_etc((lock).sem,readers <= -MAX_READERS ? 1 : -readers,B_CAN_INTERRUPT); \
132 	}
133 
134 
135 #endif	/* _KERNEL_LOCK_H */
136