xref: /haiku/src/system/kernel/timer.cpp (revision cc6e7cb3477cdb34c23be8ce246203d2b7f002de)
1 /*
2  * Copyright 2002-2008, Haiku. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Policy info for timers */
10 
11 #include <timer.h>
12 
13 #include <OS.h>
14 
15 #include <arch/timer.h>
16 #include <boot/kernel_args.h>
17 #include <smp.h>
18 #include <thread.h>
19 #include <util/AutoLock.h>
20 
21 
22 struct per_cpu_timer_data {
23 	spinlock		lock;
24 	timer* volatile	events;
25 	timer* volatile	current_event;
26 	vint32			current_event_in_progress;
27 };
28 
29 static per_cpu_timer_data sPerCPU[B_MAX_CPU_COUNT];
30 
31 
32 //#define TRACE_TIMER
33 #ifdef TRACE_TIMER
34 #	define TRACE(x) dprintf x
35 #else
36 #	define TRACE(x) ;
37 #endif
38 
39 #if __INTEL__
40 #	define PAUSE() asm volatile ("pause;")
41 #else
42 #	define PAUSE()
43 #endif
44 
45 
46 status_t
47 timer_init(kernel_args *args)
48 {
49 	TRACE(("timer_init: entry\n"));
50 
51 	return arch_init_timer(args);
52 }
53 
54 
55 /*! NOTE: expects interrupts to be off */
56 static void
57 add_event_to_list(timer *event, timer * volatile *list)
58 {
59 	timer *next;
60 	timer *last = NULL;
61 
62 	// stick it in the event list
63 	for (next = *list; next; last = next, next = (timer *)next->next) {
64 		if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
65 			break;
66 	}
67 
68 	if (last != NULL) {
69 		event->next = last->next;
70 		last->next = event;
71 	} else {
72 		event->next = next;
73 		*list = event;
74 	}
75 }
76 
77 
78 int32
79 timer_interrupt()
80 {
81 	timer *event;
82 	spinlock *spinlock;
83 	per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
84 	int32 rc = B_HANDLED_INTERRUPT;
85 
86 	TRACE(("timer_interrupt: time 0x%x 0x%x, cpu %d\n", system_time(),
87 		smp_get_current_cpu()));
88 
89 	spinlock = &cpuData.lock;
90 
91 	acquire_spinlock(spinlock);
92 
93 	event = cpuData.events;
94 	while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
95 		// this event needs to happen
96 		int mode = event->flags;
97 
98 		cpuData.events = (timer *)event->next;
99 		cpuData.current_event = event;
100 		cpuData.current_event_in_progress = 1;
101 		event->schedule_time = 0;
102 
103 		release_spinlock(spinlock);
104 
105 		TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
106 			event));
107 
108 		// call the callback
109 		// note: if the event is not periodic, it is ok
110 		// to delete the event structure inside the callback
111 		if (event->hook) {
112 			bool callHook = true;
113 
114 			// we may need to acquire the thread spinlock
115 			if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0) {
116 				GRAB_THREAD_LOCK();
117 
118 				// If the event has been cancelled in the meantime, we don't
119 				// call the hook anymore.
120 				if (cpuData.current_event == NULL)
121 					callHook = false;
122 			}
123 
124 			if (callHook)
125 				rc = event->hook(event);
126 
127 			if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0)
128 				RELEASE_THREAD_LOCK();
129 		}
130 
131 		cpuData.current_event_in_progress = 0;
132 
133 		acquire_spinlock(spinlock);
134 
135 		if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
136 			&& cpuData.current_event != NULL) {
137 			// we need to adjust it and add it back to the list
138 			bigtime_t scheduleTime = system_time() + event->period;
139 			if (scheduleTime == 0) {
140 				// if we wrapped around and happen to hit zero, set
141 				// it to one, since zero represents not scheduled
142 				scheduleTime = 1;
143 			}
144 			event->schedule_time = (int64)scheduleTime;
145 			add_event_to_list(event, &cpuData.events);
146 		}
147 
148 		cpuData.current_event = NULL;
149 
150 		event = cpuData.events;
151 	}
152 
153 	// setup the next hardware timer
154 	if (cpuData.events != NULL) {
155 		arch_timer_set_hardware_timer(
156 			(bigtime_t)cpuData.events->schedule_time - system_time());
157 	}
158 
159 	release_spinlock(spinlock);
160 
161 	return rc;
162 }
163 
164 
165 status_t
166 add_timer(timer *event, timer_hook hook, bigtime_t period, int32 flags)
167 {
168 	bigtime_t scheduleTime;
169 	bigtime_t currentTime = system_time();
170 	cpu_status state;
171 
172 	if (event == NULL || hook == NULL || period < 0)
173 		return B_BAD_VALUE;
174 
175 	TRACE(("add_timer: event %p\n", event));
176 
177 	scheduleTime = period;
178 	if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
179 		scheduleTime += currentTime;
180 	if (scheduleTime == 0)
181 		scheduleTime = 1;
182 
183 	event->schedule_time = (int64)scheduleTime;
184 	event->period = period;
185 	event->hook = hook;
186 	event->flags = flags;
187 
188 	state = disable_interrupts();
189 	int currentCPU = smp_get_current_cpu();
190 	per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
191 	acquire_spinlock(&cpuData.lock);
192 
193 	add_event_to_list(event, &cpuData.events);
194 	event->cpu = currentCPU;
195 
196 	// if we were stuck at the head of the list, set the hardware timer
197 	if (event == cpuData.events)
198 		arch_timer_set_hardware_timer(scheduleTime - currentTime);
199 
200 	release_spinlock(&cpuData.lock);
201 	restore_interrupts(state);
202 
203 	return B_OK;
204 }
205 
206 
207 bool
208 cancel_timer(timer *event)
209 {
210 	TRACE(("cancel_timer: event %p\n", event));
211 
212 	InterruptsLocker _;
213 
214 	// lock the right CPU spinlock
215 	int cpu = event->cpu;
216 	SpinLocker spinLocker;
217 	while (true) {
218 		if (cpu >= B_MAX_CPU_COUNT)
219 			return false;
220 
221 		spinLocker.SetTo(sPerCPU[cpu].lock, false);
222 		if (cpu == event->cpu)
223 			break;
224 
225 		// cpu field changed while we were trying to lock
226 		spinLocker.Unlock();
227 		cpu = event->cpu;
228 	}
229 
230 	per_cpu_timer_data& cpuData = sPerCPU[cpu];
231 
232 	timer *current = cpuData.events;
233 
234 	if (event != cpuData.current_event) {
235 		// The timer hook is not yet being executed.
236 		timer *last = NULL;
237 
238 		while (current != NULL) {
239 			if (current == event) {
240 				// we found it
241 				if (current == cpuData.events)
242 					cpuData.events = current->next;
243 				else
244 					last->next = current->next;
245 				current->next = NULL;
246 				// break out of the whole thing
247 				break;
248 			}
249 			last = current;
250 			current = current->next;
251 		}
252 
253 		// If not found, we assume this was a one-shot timer and has already
254 		// fired.
255 		if (current == NULL)
256 			return true;
257 
258 		// invalidate CPU field
259 		event->cpu = 0xffff;
260 
261 		// If on the current CPU, also reset the hardware timer.
262 		if (cpu == smp_get_current_cpu()) {
263 			if (cpuData.events == NULL)
264 				arch_timer_clear_hardware_timer();
265 			else {
266 				arch_timer_set_hardware_timer(
267 					(bigtime_t)cpuData.events->schedule_time - system_time());
268 			}
269 		}
270 
271 		return false;
272 	} else {
273 		// The timer hook is currently being executed. We clear the current
274 		// event so that timer_interrupt() will not reschedule periodic timers.
275 		cpuData.current_event = NULL;
276 		current = event;
277 
278 		// Unless this is a kernel-private timer that also requires the thread
279 		// lock to be held while calling the event hook, we'll have to wait
280 		// for the hook to complete. When called from the timer hook we don't
281 		// wait either, of course.
282 		if ((event->flags & B_TIMER_ACQUIRE_THREAD_LOCK) == 0
283 			|| cpu == smp_get_current_cpu()) {
284 			spinLocker.Unlock();
285 
286 			while (cpuData.current_event_in_progress == 1) {
287 				PAUSE();
288 			}
289 		}
290 
291 		return true;
292 	}
293 }
294 
295 
296 void
297 spin(bigtime_t microseconds)
298 {
299 	bigtime_t time = system_time();
300 
301 	while ((system_time() - time) < microseconds) {
302 		PAUSE();
303 	}
304 }
305