xref: /haiku/src/system/kernel/timer.cpp (revision 97901ec593ec4dd50ac115c1c35a6d72f6e489a5)
1 /*
2  * Copyright 2002-2009, Haiku. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Policy info for timers */
10 
11 #include <timer.h>
12 
13 #include <OS.h>
14 
15 #include <arch/timer.h>
16 #include <boot/kernel_args.h>
17 #include <cpu.h>
18 #include <smp.h>
19 #include <thread.h>
20 #include <util/AutoLock.h>
21 
22 
23 struct per_cpu_timer_data {
24 	spinlock		lock;
25 	timer* volatile	events;
26 	timer* volatile	current_event;
27 	vint32			current_event_in_progress;
28 };
29 
30 static per_cpu_timer_data sPerCPU[B_MAX_CPU_COUNT];
31 
32 
33 //#define TRACE_TIMER
34 #ifdef TRACE_TIMER
35 #	define TRACE(x) dprintf x
36 #else
37 #	define TRACE(x) ;
38 #endif
39 
40 
41 status_t
42 timer_init(kernel_args *args)
43 {
44 	TRACE(("timer_init: entry\n"));
45 
46 	return arch_init_timer(args);
47 }
48 
49 
50 /*! NOTE: expects interrupts to be off */
51 static void
52 add_event_to_list(timer *event, timer * volatile *list)
53 {
54 	timer *next;
55 	timer *last = NULL;
56 
57 	// stick it in the event list
58 	for (next = *list; next; last = next, next = (timer *)next->next) {
59 		if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
60 			break;
61 	}
62 
63 	if (last != NULL) {
64 		event->next = last->next;
65 		last->next = event;
66 	} else {
67 		event->next = next;
68 		*list = event;
69 	}
70 }
71 
72 
73 int32
74 timer_interrupt()
75 {
76 	timer *event;
77 	spinlock *spinlock;
78 	per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
79 	int32 rc = B_HANDLED_INTERRUPT;
80 
81 	TRACE(("timer_interrupt: time %lld, cpu %ld\n", system_time(),
82 		smp_get_current_cpu()));
83 
84 	spinlock = &cpuData.lock;
85 
86 	acquire_spinlock(spinlock);
87 
88 	event = cpuData.events;
89 	while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
90 		// this event needs to happen
91 		int mode = event->flags;
92 
93 		cpuData.events = (timer *)event->next;
94 		cpuData.current_event = event;
95 		cpuData.current_event_in_progress = 1;
96 		event->schedule_time = 0;
97 
98 		release_spinlock(spinlock);
99 
100 		TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
101 			event));
102 
103 		// call the callback
104 		// note: if the event is not periodic, it is ok
105 		// to delete the event structure inside the callback
106 		if (event->hook) {
107 			bool callHook = true;
108 
109 			// we may need to acquire the thread spinlock
110 			if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0) {
111 				GRAB_THREAD_LOCK();
112 
113 				// If the event has been cancelled in the meantime, we don't
114 				// call the hook anymore.
115 				if (cpuData.current_event == NULL)
116 					callHook = false;
117 			}
118 
119 			if (callHook)
120 				rc = event->hook(event);
121 
122 			if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0)
123 				RELEASE_THREAD_LOCK();
124 		}
125 
126 		cpuData.current_event_in_progress = 0;
127 
128 		acquire_spinlock(spinlock);
129 
130 		if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
131 			&& cpuData.current_event != NULL) {
132 			// we need to adjust it and add it back to the list
133 			bigtime_t scheduleTime = system_time() + event->period;
134 			if (scheduleTime == 0) {
135 				// if we wrapped around and happen to hit zero, set
136 				// it to one, since zero represents not scheduled
137 				scheduleTime = 1;
138 			}
139 			event->schedule_time = (int64)scheduleTime;
140 			add_event_to_list(event, &cpuData.events);
141 		}
142 
143 		cpuData.current_event = NULL;
144 
145 		event = cpuData.events;
146 	}
147 
148 	// setup the next hardware timer
149 	if (cpuData.events != NULL) {
150 		bigtime_t timeout = (bigtime_t)cpuData.events->schedule_time
151 			- system_time();
152 		if (timeout <= 0)
153 			timeout = 1;
154 		arch_timer_set_hardware_timer(timeout);
155 	}
156 
157 	release_spinlock(spinlock);
158 
159 	return rc;
160 }
161 
162 
163 status_t
164 add_timer(timer *event, timer_hook hook, bigtime_t period, int32 flags)
165 {
166 	bigtime_t scheduleTime;
167 	bigtime_t currentTime = system_time();
168 	cpu_status state;
169 
170 	if (event == NULL || hook == NULL || period < 0)
171 		return B_BAD_VALUE;
172 
173 	TRACE(("add_timer: event %p\n", event));
174 
175 	scheduleTime = period;
176 	if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
177 		scheduleTime += currentTime;
178 	if (scheduleTime == 0)
179 		scheduleTime = 1;
180 
181 	event->schedule_time = (int64)scheduleTime;
182 	event->period = period;
183 	event->hook = hook;
184 	event->flags = flags;
185 
186 	state = disable_interrupts();
187 	int currentCPU = smp_get_current_cpu();
188 	per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
189 	acquire_spinlock(&cpuData.lock);
190 
191 	add_event_to_list(event, &cpuData.events);
192 	event->cpu = currentCPU;
193 
194 	// if we were stuck at the head of the list, set the hardware timer
195 	if (event == cpuData.events)
196 		arch_timer_set_hardware_timer(scheduleTime - currentTime);
197 
198 	release_spinlock(&cpuData.lock);
199 	restore_interrupts(state);
200 
201 	return B_OK;
202 }
203 
204 
205 bool
206 cancel_timer(timer *event)
207 {
208 	TRACE(("cancel_timer: event %p\n", event));
209 
210 	InterruptsLocker _;
211 
212 	// lock the right CPU spinlock
213 	int cpu = event->cpu;
214 	SpinLocker spinLocker;
215 	while (true) {
216 		if (cpu >= B_MAX_CPU_COUNT)
217 			return false;
218 
219 		spinLocker.SetTo(sPerCPU[cpu].lock, false);
220 		if (cpu == event->cpu)
221 			break;
222 
223 		// cpu field changed while we were trying to lock
224 		spinLocker.Unlock();
225 		cpu = event->cpu;
226 	}
227 
228 	per_cpu_timer_data& cpuData = sPerCPU[cpu];
229 
230 	timer *current = cpuData.events;
231 
232 	if (event != cpuData.current_event) {
233 		// The timer hook is not yet being executed.
234 		timer *last = NULL;
235 
236 		while (current != NULL) {
237 			if (current == event) {
238 				// we found it
239 				if (current == cpuData.events)
240 					cpuData.events = current->next;
241 				else
242 					last->next = current->next;
243 				current->next = NULL;
244 				// break out of the whole thing
245 				break;
246 			}
247 			last = current;
248 			current = current->next;
249 		}
250 
251 		// If not found, we assume this was a one-shot timer and has already
252 		// fired.
253 		if (current == NULL)
254 			return true;
255 
256 		// invalidate CPU field
257 		event->cpu = 0xffff;
258 
259 		// If on the current CPU, also reset the hardware timer.
260 		if (cpu == smp_get_current_cpu()) {
261 			if (cpuData.events == NULL)
262 				arch_timer_clear_hardware_timer();
263 			else {
264 				arch_timer_set_hardware_timer(
265 					(bigtime_t)cpuData.events->schedule_time - system_time());
266 			}
267 		}
268 
269 		return false;
270 	} else {
271 		// The timer hook is currently being executed. We clear the current
272 		// event so that timer_interrupt() will not reschedule periodic timers.
273 		cpuData.current_event = NULL;
274 		current = event;
275 
276 		// Unless this is a kernel-private timer that also requires the thread
277 		// lock to be held while calling the event hook, we'll have to wait
278 		// for the hook to complete. When called from the timer hook we don't
279 		// wait either, of course.
280 		if ((event->flags & B_TIMER_ACQUIRE_THREAD_LOCK) == 0
281 			|| cpu == smp_get_current_cpu()) {
282 			spinLocker.Unlock();
283 
284 			while (cpuData.current_event_in_progress == 1) {
285 				PAUSE();
286 			}
287 		}
288 
289 		return true;
290 	}
291 }
292 
293 
294 void
295 spin(bigtime_t microseconds)
296 {
297 	bigtime_t time = system_time();
298 
299 	while ((system_time() - time) < microseconds) {
300 		PAUSE();
301 	}
302 }
303