1 /* 2 * Copyright 2002-2011, Haiku. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9 10 /*! Policy info for timers */ 11 12 13 #include <timer.h> 14 15 #include <OS.h> 16 17 #include <arch/timer.h> 18 #include <boot/kernel_args.h> 19 #include <cpu.h> 20 #include <debug.h> 21 #include <elf.h> 22 #include <real_time_clock.h> 23 #include <smp.h> 24 #include <thread.h> 25 #include <util/AutoLock.h> 26 27 28 struct per_cpu_timer_data { 29 spinlock lock; 30 timer* events; 31 timer* current_event; 32 int32 current_event_in_progress; 33 bigtime_t real_time_offset; 34 }; 35 36 static per_cpu_timer_data sPerCPU[SMP_MAX_CPUS]; 37 38 39 //#define TRACE_TIMER 40 #ifdef TRACE_TIMER 41 # define TRACE(x) dprintf x 42 #else 43 # define TRACE(x) ; 44 #endif 45 46 47 /*! Sets the hardware timer to the given absolute time. 48 49 \param scheduleTime The absolute system time for the timer expiration. 50 \param now The current system time. 51 */ 52 static void 53 set_hardware_timer(bigtime_t scheduleTime, bigtime_t now) 54 { 55 arch_timer_set_hardware_timer(scheduleTime > now ? scheduleTime - now : 0); 56 } 57 58 59 /*! Sets the hardware timer to the given absolute time. 60 61 \param scheduleTime The absolute system time for the timer expiration. 62 */ 63 static inline void 64 set_hardware_timer(bigtime_t scheduleTime) 65 { 66 set_hardware_timer(scheduleTime, system_time()); 67 } 68 69 70 /*! NOTE: expects the list to be locked. */ 71 static void 72 add_event_to_list(timer* event, timer** list) 73 { 74 timer* next; 75 timer* previous = NULL; 76 77 // stick it in the event list 78 for (next = *list; next != NULL; previous = next, next = (timer*)next->next) { 79 if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time) 80 break; 81 } 82 83 if (previous != NULL) { 84 event->next = previous->next; 85 previous->next = event; 86 } else { 87 event->next = next; 88 *list = event; 89 } 90 } 91 92 93 static void 94 per_cpu_real_time_clock_changed(void*, int cpu) 95 { 96 per_cpu_timer_data& cpuData = sPerCPU[cpu]; 97 SpinLocker cpuDataLocker(cpuData.lock); 98 99 bigtime_t realTimeOffset = rtc_boot_time(); 100 if (realTimeOffset == cpuData.real_time_offset) 101 return; 102 103 // The real time offset has changed. We need to update all affected 104 // timers. First find and dequeue them. 105 bigtime_t timeDiff = cpuData.real_time_offset - realTimeOffset; 106 cpuData.real_time_offset = realTimeOffset; 107 108 timer* affectedTimers = NULL; 109 timer** it = &cpuData.events; 110 timer* firstEvent = *it; 111 while (timer* event = *it) { 112 // check whether it's an absolute real-time timer 113 uint32 flags = event->flags; 114 if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER 115 || (flags & B_TIMER_REAL_TIME_BASE) == 0) { 116 it = &event->next; 117 continue; 118 } 119 120 // Yep, remove the timer from the queue and add it to the 121 // affectedTimers list. 122 *it = event->next; 123 event->next = affectedTimers; 124 affectedTimers = event; 125 } 126 127 // update and requeue the affected timers 128 bool firstEventChanged = cpuData.events != firstEvent; 129 firstEvent = cpuData.events; 130 131 while (affectedTimers != NULL) { 132 timer* event = affectedTimers; 133 affectedTimers = event->next; 134 135 bigtime_t oldTime = event->schedule_time; 136 event->schedule_time += timeDiff; 137 138 // handle over-/underflows 139 if (timeDiff >= 0) { 140 if (event->schedule_time < oldTime) 141 event->schedule_time = B_INFINITE_TIMEOUT; 142 } else { 143 if (event->schedule_time < 0) 144 event->schedule_time = 0; 145 } 146 147 add_event_to_list(event, &cpuData.events); 148 } 149 150 firstEventChanged |= cpuData.events != firstEvent; 151 152 // If the first event has changed, reset the hardware timer. 153 if (firstEventChanged) 154 set_hardware_timer(cpuData.events->schedule_time); 155 } 156 157 158 // #pragma mark - debugging 159 160 161 static int 162 dump_timers(int argc, char** argv) 163 { 164 int32 cpuCount = smp_get_num_cpus(); 165 for (int32 i = 0; i < cpuCount; i++) { 166 kprintf("CPU %" B_PRId32 ":\n", i); 167 168 if (sPerCPU[i].events == NULL) { 169 kprintf(" no timers scheduled\n"); 170 continue; 171 } 172 173 for (timer* event = sPerCPU[i].events; event != NULL; 174 event = event->next) { 175 kprintf(" [%9lld] %p: ", (long long)event->schedule_time, event); 176 if ((event->flags & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER) 177 kprintf("periodic %9lld, ", (long long)event->period); 178 else 179 kprintf("one shot, "); 180 181 kprintf("flags: %#x, user data: %p, callback: %p ", 182 event->flags, event->user_data, event->hook); 183 184 // look up and print the hook function symbol 185 const char* symbol; 186 const char* imageName; 187 bool exactMatch; 188 189 status_t error = elf_debug_lookup_symbol_address( 190 (addr_t)event->hook, NULL, &symbol, &imageName, &exactMatch); 191 if (error == B_OK && exactMatch) { 192 if (const char* slash = strchr(imageName, '/')) 193 imageName = slash + 1; 194 195 kprintf(" %s:%s", imageName, symbol); 196 } 197 198 kprintf("\n"); 199 } 200 } 201 202 kprintf("current time: %lld\n", (long long)system_time()); 203 204 return 0; 205 } 206 207 208 // #pragma mark - kernel-private 209 210 211 status_t 212 timer_init(kernel_args* args) 213 { 214 TRACE(("timer_init: entry\n")); 215 216 if (arch_init_timer(args) != B_OK) 217 panic("arch_init_timer() failed"); 218 219 add_debugger_command_etc("timers", &dump_timers, "List all timers", 220 "\n" 221 "Prints a list of all scheduled timers.\n", 0); 222 223 return B_OK; 224 } 225 226 227 void 228 timer_init_post_rtc(void) 229 { 230 bigtime_t realTimeOffset = rtc_boot_time(); 231 232 int32 cpuCount = smp_get_num_cpus(); 233 for (int32 i = 0; i < cpuCount; i++) 234 sPerCPU[i].real_time_offset = realTimeOffset; 235 } 236 237 238 void 239 timer_real_time_clock_changed() 240 { 241 call_all_cpus(&per_cpu_real_time_clock_changed, NULL); 242 } 243 244 245 int32 246 timer_interrupt() 247 { 248 per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()]; 249 int32 rc = B_HANDLED_INTERRUPT; 250 251 TRACE(("timer_interrupt: time %" B_PRIdBIGTIME ", cpu %" B_PRId32 "\n", 252 system_time(), smp_get_current_cpu())); 253 254 spinlock* spinlock = &cpuData.lock; 255 acquire_spinlock(spinlock); 256 257 timer* event = cpuData.events; 258 while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) { 259 // this event needs to happen 260 int mode = event->flags; 261 262 cpuData.events = (timer*)event->next; 263 cpuData.current_event = event; 264 atomic_set(&cpuData.current_event_in_progress, 1); 265 266 release_spinlock(spinlock); 267 268 TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook, 269 event)); 270 271 // call the callback 272 // note: if the event is not periodic, it is ok 273 // to delete the event structure inside the callback 274 if (event->hook) 275 rc = event->hook(event); 276 277 atomic_set(&cpuData.current_event_in_progress, 0); 278 279 acquire_spinlock(spinlock); 280 281 if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER 282 && cpuData.current_event != NULL) { 283 // we need to adjust it and add it back to the list 284 event->schedule_time += event->period; 285 286 // If the new schedule time is a full interval or more in the past, 287 // skip ticks. 288 bigtime_t now = system_time(); 289 if (now >= event->schedule_time + event->period) { 290 // pick the closest tick in the past 291 event->schedule_time = now 292 - (now - event->schedule_time) % event->period; 293 } 294 295 add_event_to_list(event, &cpuData.events); 296 } 297 298 cpuData.current_event = NULL; 299 event = cpuData.events; 300 } 301 302 // setup the next hardware timer 303 if (cpuData.events != NULL) 304 set_hardware_timer(cpuData.events->schedule_time); 305 306 release_spinlock(spinlock); 307 308 return rc; 309 } 310 311 312 // #pragma mark - public API 313 314 315 status_t 316 add_timer(timer* event, timer_hook hook, bigtime_t period, int32 flags) 317 { 318 const bigtime_t currentTime = system_time(); 319 320 if (event == NULL || hook == NULL || period < 0) 321 return B_BAD_VALUE; 322 323 TRACE(("add_timer: event %p\n", event)); 324 325 // compute the schedule time 326 if ((flags & B_TIMER_USE_TIMER_STRUCT_TIMES) == 0) { 327 bigtime_t scheduleTime = period; 328 if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER) 329 scheduleTime += currentTime; 330 event->schedule_time = (int64)scheduleTime; 331 event->period = period; 332 } 333 334 event->hook = hook; 335 event->flags = flags; 336 337 InterruptsLocker interruptsLocker; 338 const int currentCPU = smp_get_current_cpu(); 339 per_cpu_timer_data& cpuData = sPerCPU[currentCPU]; 340 SpinLocker locker(&cpuData.lock); 341 342 // If the timer is an absolute real-time base timer, convert the schedule 343 // time to system time. 344 if ((flags & ~B_TIMER_FLAGS) == B_ONE_SHOT_ABSOLUTE_TIMER 345 && (flags & B_TIMER_REAL_TIME_BASE) != 0) { 346 if (event->schedule_time > cpuData.real_time_offset) 347 event->schedule_time -= cpuData.real_time_offset; 348 else 349 event->schedule_time = 0; 350 } 351 352 add_event_to_list(event, &cpuData.events); 353 event->cpu = currentCPU; 354 355 // if we were stuck at the head of the list, set the hardware timer 356 if (event == cpuData.events) 357 set_hardware_timer(event->schedule_time, currentTime); 358 359 return B_OK; 360 } 361 362 363 bool 364 cancel_timer(timer* event) 365 { 366 TRACE(("cancel_timer: event %p\n", event)); 367 368 InterruptsLocker _; 369 370 // lock the right CPU spinlock 371 int cpu = event->cpu; 372 SpinLocker spinLocker; 373 while (true) { 374 if (cpu >= SMP_MAX_CPUS) 375 return false; 376 377 spinLocker.SetTo(sPerCPU[cpu].lock, false); 378 if (cpu == event->cpu) 379 break; 380 381 // cpu field changed while we were trying to lock 382 spinLocker.Unlock(); 383 cpu = event->cpu; 384 } 385 386 per_cpu_timer_data& cpuData = sPerCPU[cpu]; 387 388 if (event != cpuData.current_event) { 389 // The timer hook is not yet being executed. 390 timer* current = cpuData.events; 391 timer* previous = NULL; 392 393 while (current != NULL) { 394 if (current == event) { 395 // we found it 396 if (previous == NULL) 397 cpuData.events = current->next; 398 else 399 previous->next = current->next; 400 current->next = NULL; 401 // break out of the whole thing 402 break; 403 } 404 previous = current; 405 current = current->next; 406 } 407 408 // If not found, we assume this was a one-shot timer and has already 409 // fired. 410 if (current == NULL) 411 return true; 412 413 // invalidate CPU field 414 event->cpu = 0xffff; 415 416 // If on the current CPU, also reset the hardware timer. 417 // FIXME: Theoretically we should be able to skip this if (previous == NULL). 418 // But it seems adding that causes problems on some systems, possibly due to 419 // some other bug. For now, just reset the hardware timer on every cancellation. 420 if (cpu == smp_get_current_cpu()) { 421 if (cpuData.events == NULL) 422 arch_timer_clear_hardware_timer(); 423 else 424 set_hardware_timer(cpuData.events->schedule_time); 425 } 426 427 return false; 428 } 429 430 // The timer hook is currently being executed. We clear the current 431 // event so that timer_interrupt() will not reschedule periodic timers. 432 cpuData.current_event = NULL; 433 434 // Unless this is a kernel-private timer that also requires the scheduler 435 // lock to be held while calling the event hook, we'll have to wait 436 // for the hook to complete. When called from the timer hook we don't 437 // wait either, of course. 438 if (cpu != smp_get_current_cpu()) { 439 spinLocker.Unlock(); 440 441 while (atomic_get(&cpuData.current_event_in_progress) == 1) 442 cpu_wait(&cpuData.current_event_in_progress, 0); 443 } 444 445 return true; 446 } 447 448 449 void 450 spin(bigtime_t microseconds) 451 { 452 bigtime_t time = system_time(); 453 454 while ((system_time() - time) < microseconds) 455 cpu_pause(); 456 } 457