xref: /haiku/src/system/kernel/timer.cpp (revision 2141d2fe3a5df2f55f3590f67660573b50d1d1d3)
1 /*
2  * Copyright 2002-2011, Haiku. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 
10 /*! Policy info for timers */
11 
12 
13 #include <timer.h>
14 
15 #include <OS.h>
16 
17 #include <arch/timer.h>
18 #include <boot/kernel_args.h>
19 #include <cpu.h>
20 #include <debug.h>
21 #include <elf.h>
22 #include <real_time_clock.h>
23 #include <smp.h>
24 #include <thread.h>
25 #include <util/AutoLock.h>
26 
27 
28 struct per_cpu_timer_data {
29 	spinlock		lock;
30 	timer*			events;
31 	timer*			current_event;
32 	int32			current_event_in_progress;
33 	bigtime_t		real_time_offset;
34 };
35 
36 static per_cpu_timer_data sPerCPU[SMP_MAX_CPUS];
37 
38 
39 //#define TRACE_TIMER
40 #ifdef TRACE_TIMER
41 #	define TRACE(x) dprintf x
42 #else
43 #	define TRACE(x) ;
44 #endif
45 
46 
47 /*!	Sets the hardware timer to the given absolute time.
48 
49 	\param scheduleTime The absolute system time for the timer expiration.
50 	\param now The current system time.
51 */
52 static void
53 set_hardware_timer(bigtime_t scheduleTime, bigtime_t now)
54 {
55 	arch_timer_set_hardware_timer(scheduleTime > now ? scheduleTime - now : 0);
56 }
57 
58 
59 /*!	Sets the hardware timer to the given absolute time.
60 
61 	\param scheduleTime The absolute system time for the timer expiration.
62 */
63 static inline void
64 set_hardware_timer(bigtime_t scheduleTime)
65 {
66 	set_hardware_timer(scheduleTime, system_time());
67 }
68 
69 
70 /*! NOTE: expects the list to be locked. */
71 static void
72 add_event_to_list(timer* event, timer** list)
73 {
74 	timer* next;
75 	timer* previous = NULL;
76 
77 	for (next = *list; next != NULL; previous = next, next = previous->next) {
78 		if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
79 			break;
80 	}
81 
82 	event->next = next;
83 	if (previous != NULL)
84 		previous->next = event;
85 	else
86 		*list = event;
87 }
88 
89 
90 static void
91 per_cpu_real_time_clock_changed(void*, int cpu)
92 {
93 	per_cpu_timer_data& cpuData = sPerCPU[cpu];
94 	SpinLocker cpuDataLocker(cpuData.lock);
95 
96 	bigtime_t realTimeOffset = rtc_boot_time();
97 	if (realTimeOffset == cpuData.real_time_offset)
98 		return;
99 
100 	// The real time offset has changed. We need to update all affected
101 	// timers. First find and dequeue them.
102 	bigtime_t timeDiff = cpuData.real_time_offset - realTimeOffset;
103 	cpuData.real_time_offset = realTimeOffset;
104 
105 	timer* affectedTimers = NULL;
106 	timer** it = &cpuData.events;
107 	timer* firstEvent = *it;
108 	while (timer* event = *it) {
109 		// check whether it's an absolute real-time timer
110 		uint32 flags = event->flags;
111 		if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER
112 			|| (flags & B_TIMER_REAL_TIME_BASE) == 0) {
113 			it = &event->next;
114 			continue;
115 		}
116 
117 		// Yep, remove the timer from the queue and add it to the
118 		// affectedTimers list.
119 		*it = event->next;
120 		event->next = affectedTimers;
121 		affectedTimers = event;
122 	}
123 
124 	// update and requeue the affected timers
125 	bool firstEventChanged = cpuData.events != firstEvent;
126 	firstEvent = cpuData.events;
127 
128 	while (affectedTimers != NULL) {
129 		timer* event = affectedTimers;
130 		affectedTimers = event->next;
131 
132 		bigtime_t oldTime = event->schedule_time;
133 		event->schedule_time += timeDiff;
134 
135 		// handle over-/underflows
136 		if (timeDiff >= 0) {
137 			if (event->schedule_time < oldTime)
138 				event->schedule_time = B_INFINITE_TIMEOUT;
139 		} else {
140 			if (event->schedule_time < 0)
141 				event->schedule_time = 0;
142 		}
143 
144 		add_event_to_list(event, &cpuData.events);
145 	}
146 
147 	firstEventChanged |= cpuData.events != firstEvent;
148 
149 	// If the first event has changed, reset the hardware timer.
150 	if (firstEventChanged)
151 		set_hardware_timer(cpuData.events->schedule_time);
152 }
153 
154 
155 // #pragma mark - debugging
156 
157 
158 static int
159 dump_timers(int argc, char** argv)
160 {
161 	int32 cpuCount = smp_get_num_cpus();
162 	for (int32 i = 0; i < cpuCount; i++) {
163 		kprintf("CPU %" B_PRId32 ":\n", i);
164 
165 		if (sPerCPU[i].events == NULL) {
166 			kprintf("  no timers scheduled\n");
167 			continue;
168 		}
169 
170 		for (timer* event = sPerCPU[i].events; event != NULL;
171 				event = event->next) {
172 			kprintf("  [%9lld] %p: ", (long long)event->schedule_time, event);
173 			if ((event->flags & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER)
174 				kprintf("periodic %9lld, ", (long long)event->period);
175 			else
176 				kprintf("one shot,           ");
177 
178 			kprintf("flags: %#x, user data: %p, callback: %p  ",
179 				event->flags, event->user_data, event->hook);
180 
181 			// look up and print the hook function symbol
182 			const char* symbol;
183 			const char* imageName;
184 			bool exactMatch;
185 
186 			status_t error = elf_debug_lookup_symbol_address(
187 				(addr_t)event->hook, NULL, &symbol, &imageName, &exactMatch);
188 			if (error == B_OK && exactMatch) {
189 				if (const char* slash = strchr(imageName, '/'))
190 					imageName = slash + 1;
191 
192 				kprintf("   %s:%s", imageName, symbol);
193 			}
194 
195 			kprintf("\n");
196 		}
197 	}
198 
199 	kprintf("current time: %lld\n", (long long)system_time());
200 
201 	return 0;
202 }
203 
204 
205 // #pragma mark - kernel-private
206 
207 
208 status_t
209 timer_init(kernel_args* args)
210 {
211 	TRACE(("timer_init: entry\n"));
212 
213 	if (arch_init_timer(args) != B_OK)
214 		panic("arch_init_timer() failed");
215 
216 	add_debugger_command_etc("timers", &dump_timers, "List all timers",
217 		"\n"
218 		"Prints a list of all scheduled timers.\n", 0);
219 
220 	return B_OK;
221 }
222 
223 
224 void
225 timer_init_post_rtc(void)
226 {
227 	bigtime_t realTimeOffset = rtc_boot_time();
228 
229 	int32 cpuCount = smp_get_num_cpus();
230 	for (int32 i = 0; i < cpuCount; i++)
231 		sPerCPU[i].real_time_offset = realTimeOffset;
232 }
233 
234 
235 void
236 timer_real_time_clock_changed()
237 {
238 	call_all_cpus(&per_cpu_real_time_clock_changed, NULL);
239 }
240 
241 
242 int32
243 timer_interrupt()
244 {
245 	per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
246 	int32 rc = B_HANDLED_INTERRUPT;
247 
248 	TRACE(("timer_interrupt: time %" B_PRIdBIGTIME ", cpu %" B_PRId32 "\n",
249 		system_time(), smp_get_current_cpu()));
250 
251 	spinlock* spinlock = &cpuData.lock;
252 	acquire_spinlock(spinlock);
253 
254 	timer* event = cpuData.events;
255 	while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
256 		// this event needs to happen
257 		int mode = event->flags;
258 
259 		cpuData.events = event->next;
260 		cpuData.current_event = event;
261 		atomic_set(&cpuData.current_event_in_progress, 1);
262 
263 		release_spinlock(spinlock);
264 
265 		TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
266 			event));
267 
268 		// call the callback
269 		// note: if the event is not periodic, it is ok
270 		// to delete the event structure inside the callback
271 		if (event->hook)
272 			rc = event->hook(event);
273 
274 		atomic_set(&cpuData.current_event_in_progress, 0);
275 
276 		acquire_spinlock(spinlock);
277 
278 		if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
279 				&& cpuData.current_event != NULL) {
280 			// we need to adjust it and add it back to the list
281 			event->schedule_time += event->period;
282 
283 			// If the new schedule time is a full interval or more in the past,
284 			// skip ticks.
285 			bigtime_t now = system_time();
286 			if (now >= event->schedule_time + event->period) {
287 				// pick the closest tick in the past
288 				event->schedule_time = now
289 					- (now - event->schedule_time) % event->period;
290 			}
291 
292 			add_event_to_list(event, &cpuData.events);
293 		}
294 
295 		cpuData.current_event = NULL;
296 		event = cpuData.events;
297 	}
298 
299 	// setup the next hardware timer
300 	if (cpuData.events != NULL)
301 		set_hardware_timer(cpuData.events->schedule_time);
302 
303 	release_spinlock(spinlock);
304 
305 	return rc;
306 }
307 
308 
309 // #pragma mark - public API
310 
311 
312 status_t
313 add_timer(timer* event, timer_hook hook, bigtime_t period, int32 flags)
314 {
315 	const bigtime_t currentTime = system_time();
316 
317 	if (event == NULL || hook == NULL || period < 0)
318 		return B_BAD_VALUE;
319 
320 	TRACE(("add_timer: event %p\n", event));
321 
322 	// compute the schedule time
323 	if ((flags & B_TIMER_USE_TIMER_STRUCT_TIMES) == 0) {
324 		bigtime_t scheduleTime = period;
325 		if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
326 			scheduleTime += currentTime;
327 		event->schedule_time = (int64)scheduleTime;
328 		event->period = period;
329 	}
330 
331 	event->hook = hook;
332 	event->flags = flags;
333 
334 	InterruptsLocker interruptsLocker;
335 	const int currentCPU = smp_get_current_cpu();
336 	per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
337 	SpinLocker locker(&cpuData.lock);
338 
339 	// If the timer is an absolute real-time base timer, convert the schedule
340 	// time to system time.
341 	if ((flags & ~B_TIMER_FLAGS) == B_ONE_SHOT_ABSOLUTE_TIMER
342 			&& (flags & B_TIMER_REAL_TIME_BASE) != 0) {
343 		if (event->schedule_time > cpuData.real_time_offset)
344 			event->schedule_time -= cpuData.real_time_offset;
345 		else
346 			event->schedule_time = 0;
347 	}
348 
349 	add_event_to_list(event, &cpuData.events);
350 	event->cpu = currentCPU;
351 
352 	// if we were stuck at the head of the list, set the hardware timer
353 	if (event == cpuData.events)
354 		set_hardware_timer(event->schedule_time, currentTime);
355 
356 	return B_OK;
357 }
358 
359 
360 bool
361 cancel_timer(timer* event)
362 {
363 	TRACE(("cancel_timer: event %p\n", event));
364 
365 	InterruptsLocker _;
366 
367 	// lock the right CPU spinlock
368 	int cpu = event->cpu;
369 	SpinLocker spinLocker;
370 	while (true) {
371 		if (cpu >= SMP_MAX_CPUS)
372 			return false;
373 
374 		spinLocker.SetTo(sPerCPU[cpu].lock, false);
375 		if (cpu == event->cpu)
376 			break;
377 
378 		// cpu field changed while we were trying to lock
379 		spinLocker.Unlock();
380 		cpu = event->cpu;
381 	}
382 
383 	per_cpu_timer_data& cpuData = sPerCPU[cpu];
384 
385 	if (event != cpuData.current_event) {
386 		// The timer hook is not yet being executed.
387 		timer* current = cpuData.events;
388 		timer* previous = NULL;
389 
390 		while (current != NULL) {
391 			if (current == event) {
392 				// we found it
393 				if (previous == NULL)
394 					cpuData.events = current->next;
395 				else
396 					previous->next = current->next;
397 				current->next = NULL;
398 				// break out of the whole thing
399 				break;
400 			}
401 			previous = current;
402 			current = current->next;
403 		}
404 
405 		// If not found, we assume this was a one-shot timer and has already
406 		// fired.
407 		if (current == NULL)
408 			return true;
409 
410 		// invalidate CPU field
411 		event->cpu = 0xffff;
412 
413 		// If on the current CPU, also reset the hardware timer.
414 		// FIXME: Theoretically we should be able to skip this if (previous == NULL).
415 		// But it seems adding that causes problems on some systems, possibly due to
416 		// some other bug. For now, just reset the hardware timer on every cancellation.
417 		if (cpu == smp_get_current_cpu()) {
418 			if (cpuData.events == NULL)
419 				arch_timer_clear_hardware_timer();
420 			else
421 				set_hardware_timer(cpuData.events->schedule_time);
422 		}
423 
424 		return false;
425 	}
426 
427 	// The timer hook is currently being executed. We clear the current
428 	// event so that timer_interrupt() will not reschedule periodic timers.
429 	cpuData.current_event = NULL;
430 
431 	// Unless this is a kernel-private timer that also requires the scheduler
432 	// lock to be held while calling the event hook, we'll have to wait
433 	// for the hook to complete. When called from the timer hook we don't
434 	// wait either, of course.
435 	if (cpu != smp_get_current_cpu()) {
436 		spinLocker.Unlock();
437 
438 		while (atomic_get(&cpuData.current_event_in_progress) == 1)
439 			cpu_wait(&cpuData.current_event_in_progress, 0);
440 	}
441 
442 	return true;
443 }
444 
445 
446 void
447 spin(bigtime_t microseconds)
448 {
449 	bigtime_t time = system_time();
450 
451 	while ((system_time() - time) < microseconds)
452 		cpu_pause();
453 }
454