1 /* 2 * Copyright 2002-2011, Haiku. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9 10 /*! Policy info for timers */ 11 12 13 #include <timer.h> 14 15 #include <OS.h> 16 17 #include <arch/timer.h> 18 #include <boot/kernel_args.h> 19 #include <cpu.h> 20 #include <debug.h> 21 #include <elf.h> 22 #include <real_time_clock.h> 23 #include <smp.h> 24 #include <thread.h> 25 #include <util/AutoLock.h> 26 27 28 struct per_cpu_timer_data { 29 spinlock lock; 30 timer* volatile events; 31 timer* volatile current_event; 32 vint32 current_event_in_progress; 33 bigtime_t real_time_offset; 34 }; 35 36 static per_cpu_timer_data sPerCPU[B_MAX_CPU_COUNT]; 37 38 39 //#define TRACE_TIMER 40 #ifdef TRACE_TIMER 41 # define TRACE(x) dprintf x 42 #else 43 # define TRACE(x) ; 44 #endif 45 46 47 /*! Sets the hardware timer to the given absolute time. 48 49 \param scheduleTime The absolute system time for the timer expiration. 50 \param now The current system time. 51 */ 52 static void 53 set_hardware_timer(bigtime_t scheduleTime, bigtime_t now) 54 { 55 arch_timer_set_hardware_timer(scheduleTime > now ? scheduleTime - now : 0); 56 } 57 58 59 /*! Sets the hardware timer to the given absolute time. 60 61 \param scheduleTime The absolute system time for the timer expiration. 62 */ 63 static inline void 64 set_hardware_timer(bigtime_t scheduleTime) 65 { 66 set_hardware_timer(scheduleTime, system_time()); 67 } 68 69 70 /*! NOTE: expects interrupts to be off */ 71 static void 72 add_event_to_list(timer* event, timer* volatile* list) 73 { 74 timer* next; 75 timer* last = NULL; 76 77 // stick it in the event list 78 for (next = *list; next; last = next, next = (timer*)next->next) { 79 if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time) 80 break; 81 } 82 83 if (last != NULL) { 84 event->next = last->next; 85 last->next = event; 86 } else { 87 event->next = next; 88 *list = event; 89 } 90 } 91 92 93 static void 94 per_cpu_real_time_clock_changed(void*, int cpu) 95 { 96 per_cpu_timer_data& cpuData = sPerCPU[cpu]; 97 SpinLocker cpuDataLocker(cpuData.lock); 98 99 bigtime_t realTimeOffset = rtc_boot_time(); 100 if (realTimeOffset == cpuData.real_time_offset) 101 return; 102 103 // The real time offset has changed. We need to update all affected 104 // timers. First find and dequeue them. 105 bigtime_t timeDiff = cpuData.real_time_offset - realTimeOffset; 106 cpuData.real_time_offset = realTimeOffset; 107 108 timer* affectedTimers = NULL; 109 timer* volatile* it = &cpuData.events; 110 timer* firstEvent = *it; 111 while (timer* event = *it) { 112 // check whether it's an absolute real-time timer 113 uint32 flags = event->flags; 114 if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER 115 || (flags & B_TIMER_REAL_TIME_BASE) == 0) { 116 it = &event->next; 117 continue; 118 } 119 120 // Yep, remove the timer from the queue and add it to the 121 // affectedTimers list. 122 *it = event->next; 123 event->next = affectedTimers; 124 affectedTimers = event; 125 } 126 127 // update and requeue the affected timers 128 bool firstEventChanged = cpuData.events != firstEvent; 129 firstEvent = cpuData.events; 130 131 while (affectedTimers != NULL) { 132 timer* event = affectedTimers; 133 affectedTimers = event->next; 134 135 bigtime_t oldTime = event->schedule_time; 136 event->schedule_time += timeDiff; 137 138 // handle over-/underflows 139 if (timeDiff >= 0) { 140 if (event->schedule_time < oldTime) 141 event->schedule_time = B_INFINITE_TIMEOUT; 142 } else { 143 if (event->schedule_time < 0) 144 event->schedule_time = 0; 145 } 146 147 add_event_to_list(event, &cpuData.events); 148 } 149 150 firstEventChanged |= cpuData.events != firstEvent; 151 152 // If the first event has changed, reset the hardware timer. 153 if (firstEventChanged) 154 set_hardware_timer(cpuData.events->schedule_time); 155 } 156 157 158 // #pragma mark - debugging 159 160 161 static int 162 dump_timers(int argc, char** argv) 163 { 164 int32 cpuCount = smp_get_num_cpus(); 165 for (int32 i = 0; i < cpuCount; i++) { 166 kprintf("CPU %" B_PRId32 ":\n", i); 167 168 if (sPerCPU[i].events == NULL) { 169 kprintf(" no timers scheduled\n"); 170 continue; 171 } 172 173 for (timer* event = sPerCPU[i].events; event != NULL; 174 event = event->next) { 175 kprintf(" [%9lld] %p: ", (long long)event->schedule_time, event); 176 if ((event->flags & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER) 177 kprintf("periodic %9lld, ", (long long)event->period); 178 else 179 kprintf("one shot, "); 180 181 kprintf("flags: %#x, user data: %p, callback: %p ", 182 event->flags, event->user_data, event->hook); 183 184 // look up and print the hook function symbol 185 const char* symbol; 186 const char* imageName; 187 bool exactMatch; 188 189 status_t error = elf_debug_lookup_symbol_address( 190 (addr_t)event->hook, NULL, &symbol, &imageName, &exactMatch); 191 if (error == B_OK && exactMatch) { 192 if (const char* slash = strchr(imageName, '/')) 193 imageName = slash + 1; 194 195 kprintf(" %s:%s", imageName, symbol); 196 } 197 198 kprintf("\n"); 199 } 200 } 201 202 kprintf("current time: %lld\n", (long long)system_time()); 203 204 return 0; 205 } 206 207 208 // #pragma mark - kernel-private 209 210 211 status_t 212 timer_init(kernel_args* args) 213 { 214 TRACE(("timer_init: entry\n")); 215 216 if (arch_init_timer(args) != B_OK) 217 panic("arch_init_timer() failed"); 218 219 add_debugger_command_etc("timers", &dump_timers, "List all timers", 220 "\n" 221 "Prints a list of all scheduled timers.\n", 0); 222 223 return B_OK; 224 } 225 226 227 void 228 timer_init_post_rtc(void) 229 { 230 bigtime_t realTimeOffset = rtc_boot_time(); 231 232 int32 cpuCount = smp_get_num_cpus(); 233 for (int32 i = 0; i < cpuCount; i++) 234 sPerCPU[i].real_time_offset = realTimeOffset; 235 } 236 237 238 void 239 timer_real_time_clock_changed() 240 { 241 call_all_cpus(&per_cpu_real_time_clock_changed, NULL); 242 } 243 244 245 int32 246 timer_interrupt() 247 { 248 timer* event; 249 spinlock* spinlock; 250 per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()]; 251 int32 rc = B_HANDLED_INTERRUPT; 252 253 TRACE(("timer_interrupt: time %lld, cpu %ld\n", system_time(), 254 smp_get_current_cpu())); 255 256 spinlock = &cpuData.lock; 257 258 acquire_spinlock(spinlock); 259 260 event = cpuData.events; 261 while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) { 262 // this event needs to happen 263 int mode = event->flags; 264 265 cpuData.events = (timer*)event->next; 266 cpuData.current_event = event; 267 cpuData.current_event_in_progress = 1; 268 269 release_spinlock(spinlock); 270 271 TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook, 272 event)); 273 274 // call the callback 275 // note: if the event is not periodic, it is ok 276 // to delete the event structure inside the callback 277 if (event->hook) { 278 bool callHook = true; 279 280 // we may need to acquire the scheduler lock 281 if ((mode & B_TIMER_ACQUIRE_SCHEDULER_LOCK) != 0) { 282 acquire_spinlock(&gSchedulerLock); 283 284 // If the event has been cancelled in the meantime, we don't 285 // call the hook anymore. 286 if (cpuData.current_event == NULL) 287 callHook = false; 288 } 289 290 if (callHook) 291 rc = event->hook(event); 292 293 if ((mode & B_TIMER_ACQUIRE_SCHEDULER_LOCK) != 0) 294 release_spinlock(&gSchedulerLock); 295 } 296 297 cpuData.current_event_in_progress = 0; 298 299 acquire_spinlock(spinlock); 300 301 if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER 302 && cpuData.current_event != NULL) { 303 // we need to adjust it and add it back to the list 304 event->schedule_time += event->period; 305 306 // If the new schedule time is a full interval or more in the past, 307 // skip ticks. 308 bigtime_t now = system_time(); 309 if (now >= event->schedule_time + event->period) { 310 // pick the closest tick in the past 311 event->schedule_time = now 312 - (now - event->schedule_time) % event->period; 313 } 314 315 add_event_to_list(event, &cpuData.events); 316 } 317 318 cpuData.current_event = NULL; 319 320 event = cpuData.events; 321 } 322 323 // setup the next hardware timer 324 if (cpuData.events != NULL) 325 set_hardware_timer(cpuData.events->schedule_time); 326 327 release_spinlock(spinlock); 328 329 return rc; 330 } 331 332 333 // #pragma mark - public API 334 335 336 status_t 337 add_timer(timer* event, timer_hook hook, bigtime_t period, int32 flags) 338 { 339 bigtime_t currentTime = system_time(); 340 cpu_status state; 341 342 if (event == NULL || hook == NULL || period < 0) 343 return B_BAD_VALUE; 344 345 TRACE(("add_timer: event %p\n", event)); 346 347 // compute the schedule time 348 bigtime_t scheduleTime; 349 if ((flags & B_TIMER_USE_TIMER_STRUCT_TIMES) != 0) { 350 scheduleTime = event->schedule_time; 351 period = event->period; 352 } else { 353 scheduleTime = period; 354 if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER) 355 scheduleTime += currentTime; 356 event->schedule_time = (int64)scheduleTime; 357 event->period = period; 358 } 359 360 event->hook = hook; 361 event->flags = flags; 362 363 state = disable_interrupts(); 364 int currentCPU = smp_get_current_cpu(); 365 per_cpu_timer_data& cpuData = sPerCPU[currentCPU]; 366 acquire_spinlock(&cpuData.lock); 367 368 // If the timer is an absolute real-time base timer, convert the schedule 369 // time to system time. 370 if ((flags & ~B_TIMER_FLAGS) == B_ONE_SHOT_ABSOLUTE_TIMER 371 && (flags & B_TIMER_REAL_TIME_BASE) != 0) { 372 if (event->schedule_time > cpuData.real_time_offset) 373 event->schedule_time -= cpuData.real_time_offset; 374 else 375 event->schedule_time = 0; 376 } 377 378 add_event_to_list(event, &cpuData.events); 379 event->cpu = currentCPU; 380 381 // if we were stuck at the head of the list, set the hardware timer 382 if (event == cpuData.events) 383 set_hardware_timer(scheduleTime, currentTime); 384 385 release_spinlock(&cpuData.lock); 386 restore_interrupts(state); 387 388 return B_OK; 389 } 390 391 392 bool 393 cancel_timer(timer* event) 394 { 395 TRACE(("cancel_timer: event %p\n", event)); 396 397 InterruptsLocker _; 398 399 // lock the right CPU spinlock 400 int cpu = event->cpu; 401 SpinLocker spinLocker; 402 while (true) { 403 if (cpu >= B_MAX_CPU_COUNT) 404 return false; 405 406 spinLocker.SetTo(sPerCPU[cpu].lock, false); 407 if (cpu == event->cpu) 408 break; 409 410 // cpu field changed while we were trying to lock 411 spinLocker.Unlock(); 412 cpu = event->cpu; 413 } 414 415 per_cpu_timer_data& cpuData = sPerCPU[cpu]; 416 417 if (event != cpuData.current_event) { 418 // The timer hook is not yet being executed. 419 timer* current = cpuData.events; 420 timer* last = NULL; 421 422 while (current != NULL) { 423 if (current == event) { 424 // we found it 425 if (last == NULL) 426 cpuData.events = current->next; 427 else 428 last->next = current->next; 429 current->next = NULL; 430 // break out of the whole thing 431 break; 432 } 433 last = current; 434 current = current->next; 435 } 436 437 // If not found, we assume this was a one-shot timer and has already 438 // fired. 439 if (current == NULL) 440 return true; 441 442 // invalidate CPU field 443 event->cpu = 0xffff; 444 445 // If on the current CPU, also reset the hardware timer. 446 if (cpu == smp_get_current_cpu()) { 447 if (cpuData.events == NULL) 448 arch_timer_clear_hardware_timer(); 449 else 450 set_hardware_timer(cpuData.events->schedule_time); 451 } 452 453 return false; 454 } 455 456 // The timer hook is currently being executed. We clear the current 457 // event so that timer_interrupt() will not reschedule periodic timers. 458 cpuData.current_event = NULL; 459 460 // Unless this is a kernel-private timer that also requires the scheduler 461 // lock to be held while calling the event hook, we'll have to wait 462 // for the hook to complete. When called from the timer hook we don't 463 // wait either, of course. 464 if ((event->flags & B_TIMER_ACQUIRE_SCHEDULER_LOCK) == 0 465 && cpu != smp_get_current_cpu()) { 466 spinLocker.Unlock(); 467 468 while (cpuData.current_event_in_progress == 1) { 469 PAUSE(); 470 } 471 } 472 473 return true; 474 } 475 476 477 void 478 spin(bigtime_t microseconds) 479 { 480 bigtime_t time = system_time(); 481 482 while ((system_time() - time) < microseconds) { 483 PAUSE(); 484 } 485 } 486