1 /* 2 * Copyright 2010, Axel Dörfler, axeld@pinc-software.de. 3 * Copyright 2018-2023, Haiku, Inc. All rights reserved. 4 * Distributed under the terms of the MIT license. 5 */ 6 7 8 #include <lock.h> 9 #include <thread.h> 10 11 extern "C" { 12 # include "device.h" 13 # include <sys/callout.h> 14 # include <sys/mutex.h> 15 } 16 17 #include <util/AutoLock.h> 18 19 20 //#define TRACE_CALLOUT 21 #ifdef TRACE_CALLOUT 22 # define TRACE(x...) dprintf(x) 23 #else 24 # define TRACE(x...) ; 25 #endif 26 27 28 static struct list sTimers; 29 static mutex sLock; 30 static sem_id sWaitSem; 31 static callout* sCurrentCallout; 32 static thread_id sThread; 33 static bigtime_t sTimeout; 34 35 36 static void 37 invoke_callout(callout *c, struct mtx *c_mtx) 38 { 39 if (c_mtx != NULL) { 40 mtx_lock(c_mtx); 41 42 if (c->c_due < 0 || c->c_due > 0) { 43 mtx_unlock(c_mtx); 44 return; 45 } 46 c->c_due = -1; 47 } 48 49 c->c_func(c->c_arg); 50 51 if (c_mtx != NULL && (c->c_flags & CALLOUT_RETURNUNLOCKED) == 0) 52 mtx_unlock(c_mtx); 53 } 54 55 56 static status_t 57 callout_thread(void* /*data*/) 58 { 59 status_t status = B_NO_INIT; 60 61 do { 62 bigtime_t timeout = B_INFINITE_TIMEOUT; 63 64 if (status == B_TIMED_OUT || status == B_OK) { 65 // scan timers for new timeout and/or execute a timer 66 if ((status = mutex_lock(&sLock)) != B_OK) 67 continue; 68 69 struct callout* c = NULL; 70 while (true) { 71 c = (callout*)list_get_next_item(&sTimers, c); 72 if (c == NULL) 73 break; 74 75 if (c->c_due > system_time()) { 76 // calculate new timeout 77 if (timeout > c->c_due) 78 timeout = c->c_due; 79 continue; 80 } 81 82 // execute timer 83 list_remove_item(&sTimers, c); 84 struct mtx *c_mtx = c->c_mtx; 85 c->c_due = 0; 86 sCurrentCallout = c; 87 88 mutex_unlock(&sLock); 89 90 invoke_callout(c, c_mtx); 91 92 if ((status = mutex_lock(&sLock)) != B_OK) 93 break; 94 95 sCurrentCallout = NULL; 96 c = NULL; 97 // restart scanning as we unlocked the list 98 } 99 100 sTimeout = timeout; 101 mutex_unlock(&sLock); 102 } 103 104 status = acquire_sem_etc(sWaitSem, 1, B_ABSOLUTE_TIMEOUT, timeout); 105 // the wait sem normally can't be acquired, so we 106 // have to look at the status value the call returns: 107 // 108 // B_OK - a new timer has been added or canceled 109 // B_TIMED_OUT - look for timers to be executed 110 // B_BAD_SEM_ID - we are asked to quit 111 } while (status != B_BAD_SEM_ID); 112 113 return B_OK; 114 } 115 116 117 // #pragma mark - private API 118 119 120 status_t 121 init_callout(void) 122 { 123 list_init_etc(&sTimers, offsetof(struct callout, c_link)); 124 sTimeout = B_INFINITE_TIMEOUT; 125 126 status_t status = B_OK; 127 mutex_init(&sLock, "fbsd callout"); 128 129 sWaitSem = create_sem(0, "fbsd callout wait"); 130 if (sWaitSem < 0) { 131 status = sWaitSem; 132 goto err1; 133 } 134 135 sThread = spawn_kernel_thread(callout_thread, "fbsd callout", 136 B_DISPLAY_PRIORITY, NULL); 137 if (sThread < 0) { 138 status = sThread; 139 goto err2; 140 } 141 142 return resume_thread(sThread); 143 144 err2: 145 delete_sem(sWaitSem); 146 err1: 147 mutex_destroy(&sLock); 148 return status; 149 } 150 151 152 void 153 uninit_callout(void) 154 { 155 delete_sem(sWaitSem); 156 157 wait_for_thread(sThread, NULL); 158 159 mutex_lock(&sLock); 160 mutex_destroy(&sLock); 161 } 162 163 164 // #pragma mark - public API 165 166 167 void 168 callout_init(struct callout *callout, int mpsafe) 169 { 170 if (mpsafe) 171 callout_init_mtx(callout, NULL, 0); 172 else 173 callout_init_mtx(callout, &Giant, 0); 174 } 175 176 177 void 178 callout_init_mtx(struct callout *c, struct mtx *mtx, int flags) 179 { 180 c->c_due = 0; 181 182 c->c_arg = NULL; 183 c->c_func = NULL; 184 c->c_mtx = mtx; 185 c->c_flags = flags; 186 } 187 188 189 static int 190 _callout_stop(struct callout *c, bool drain, bool locked = false) 191 { 192 TRACE("_callout_stop %p, func %p, arg %p\n", c, c->c_func, c->c_arg); 193 194 MutexLocker locker; 195 if (!locked) 196 locker.SetTo(sLock, false); 197 198 bool lockHeld = false; 199 if (!drain && c->c_mtx != NULL) { 200 if (c->c_mtx != &Giant) { 201 // The documentation for callout_stop() confirms any associated locks 202 // must be held when invoking it. We depend on this behavior for 203 // synchronization with the callout thread, which can modify c_due 204 // with only the callout's lock held. 205 mtx_assert(c->c_mtx, MA_OWNED); 206 lockHeld = true; 207 } else { 208 // FreeBSD is lenient and does not assert if the callout mutex is &Giant. 209 lockHeld = mtx_owned(&Giant); 210 } 211 } 212 213 int ret = -1; 214 if (callout_active(c)) { 215 ret = 0; 216 if (!drain && lockHeld && c->c_due == 0) { 217 // The callout is active, but c_due == 0 and we hold the locks: this 218 // means the callout thread has dequeued it and is waiting for c_mtx. 219 // Clear c_due to signal the callout thread. 220 c->c_due = -1; 221 ret = 1; 222 } 223 if (drain) { 224 locker.Unlock(); 225 while (callout_active(c)) 226 snooze(100); 227 locker.Lock(); 228 } 229 } 230 231 if (c->c_due <= 0) 232 return ret; 233 234 // this timer is scheduled, cancel it 235 list_remove_item(&sTimers, c); 236 c->c_due = -1; 237 return (ret == -1) ? 1 : ret; 238 } 239 240 241 int 242 callout_reset(struct callout *c, int _ticks, void (*func)(void *), void *arg) 243 { 244 MutexLocker locker(sLock); 245 246 TRACE("callout_reset %p, func %p, arg %p\n", c, c->c_func, c->c_arg); 247 248 c->c_func = func; 249 c->c_arg = arg; 250 251 if (_ticks < 0) { 252 int stopped = -1; 253 if (c->c_due > 0) 254 stopped = _callout_stop(c, 0, true); 255 return (stopped == -1) ? 0 : 1; 256 } 257 258 int rescheduled = 0; 259 if (_ticks >= 0) { 260 // reschedule or add this timer 261 if (c->c_due <= 0) { 262 list_add_item(&sTimers, c); 263 } else { 264 rescheduled = 1; 265 } 266 267 c->c_due = system_time() + TICKS_2_USEC(_ticks); 268 269 // notify timer about the change if necessary 270 if (sTimeout > c->c_due) 271 release_sem(sWaitSem); 272 } 273 274 return rescheduled; 275 } 276 277 278 int 279 _callout_stop_safe(struct callout *c, int safe) 280 { 281 if (c == NULL) 282 return -1; 283 284 return _callout_stop(c, safe); 285 } 286 287 288 int 289 callout_schedule(struct callout *callout, int _ticks) 290 { 291 return callout_reset(callout, _ticks, callout->c_func, callout->c_arg); 292 } 293 294 295 int 296 callout_pending(struct callout *c) 297 { 298 return c->c_due > 0; 299 } 300 301 302 int 303 callout_active(struct callout *c) 304 { 305 return c == sCurrentCallout; 306 } 307