xref: /haiku/src/system/kernel/timer.cpp (revision 1294543de9ac0eff000eaea1b18368c36435d08e)
1 /*
2  * Copyright 2002-2010, Haiku. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 
10 /*! Policy info for timers */
11 
12 
13 #include <timer.h>
14 
15 #include <OS.h>
16 
17 #include <arch/timer.h>
18 #include <boot/kernel_args.h>
19 #include <cpu.h>
20 #include <smp.h>
21 #include <thread.h>
22 #include <util/AutoLock.h>
23 
24 
25 struct per_cpu_timer_data {
26 	spinlock		lock;
27 	timer* volatile	events;
28 	timer* volatile	current_event;
29 	vint32			current_event_in_progress;
30 };
31 
32 static per_cpu_timer_data sPerCPU[B_MAX_CPU_COUNT];
33 
34 
35 //#define TRACE_TIMER
36 #ifdef TRACE_TIMER
37 #	define TRACE(x) dprintf x
38 #else
39 #	define TRACE(x) ;
40 #endif
41 
42 
43 status_t
44 timer_init(kernel_args* args)
45 {
46 	TRACE(("timer_init: entry\n"));
47 
48 	return arch_init_timer(args);
49 }
50 
51 
52 /*! NOTE: expects interrupts to be off */
53 static void
54 add_event_to_list(timer* event, timer* volatile* list)
55 {
56 	timer* next;
57 	timer* last = NULL;
58 
59 	// stick it in the event list
60 	for (next = *list; next; last = next, next = (timer*)next->next) {
61 		if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
62 			break;
63 	}
64 
65 	if (last != NULL) {
66 		event->next = last->next;
67 		last->next = event;
68 	} else {
69 		event->next = next;
70 		*list = event;
71 	}
72 }
73 
74 
75 int32
76 timer_interrupt()
77 {
78 	timer* event;
79 	spinlock* spinlock;
80 	per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
81 	int32 rc = B_HANDLED_INTERRUPT;
82 
83 	TRACE(("timer_interrupt: time %lld, cpu %ld\n", system_time(),
84 		smp_get_current_cpu()));
85 
86 	spinlock = &cpuData.lock;
87 
88 	acquire_spinlock(spinlock);
89 
90 	event = cpuData.events;
91 	while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
92 		// this event needs to happen
93 		int mode = event->flags;
94 
95 		cpuData.events = (timer*)event->next;
96 		cpuData.current_event = event;
97 		cpuData.current_event_in_progress = 1;
98 		event->schedule_time = 0;
99 
100 		release_spinlock(spinlock);
101 
102 		TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
103 			event));
104 
105 		// call the callback
106 		// note: if the event is not periodic, it is ok
107 		// to delete the event structure inside the callback
108 		if (event->hook) {
109 			bool callHook = true;
110 
111 			// we may need to acquire the thread spinlock
112 			if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0) {
113 				GRAB_THREAD_LOCK();
114 
115 				// If the event has been cancelled in the meantime, we don't
116 				// call the hook anymore.
117 				if (cpuData.current_event == NULL)
118 					callHook = false;
119 			}
120 
121 			if (callHook)
122 				rc = event->hook(event);
123 
124 			if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0)
125 				RELEASE_THREAD_LOCK();
126 		}
127 
128 		cpuData.current_event_in_progress = 0;
129 
130 		acquire_spinlock(spinlock);
131 
132 		if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
133 			&& cpuData.current_event != NULL) {
134 			// we need to adjust it and add it back to the list
135 			bigtime_t scheduleTime = system_time() + event->period;
136 			if (scheduleTime == 0) {
137 				// if we wrapped around and happen to hit zero, set
138 				// it to one, since zero represents not scheduled
139 				scheduleTime = 1;
140 			}
141 			event->schedule_time = (int64)scheduleTime;
142 			add_event_to_list(event, &cpuData.events);
143 		}
144 
145 		cpuData.current_event = NULL;
146 
147 		event = cpuData.events;
148 	}
149 
150 	// setup the next hardware timer
151 	if (cpuData.events != NULL) {
152 		bigtime_t timeout = (bigtime_t)cpuData.events->schedule_time
153 			- system_time();
154 		if (timeout <= 0)
155 			timeout = 1;
156 		arch_timer_set_hardware_timer(timeout);
157 	}
158 
159 	release_spinlock(spinlock);
160 
161 	return rc;
162 }
163 
164 
165 status_t
166 add_timer(timer* event, timer_hook hook, bigtime_t period, int32 flags)
167 {
168 	bigtime_t scheduleTime;
169 	bigtime_t currentTime = system_time();
170 	cpu_status state;
171 
172 	if (event == NULL || hook == NULL || period < 0)
173 		return B_BAD_VALUE;
174 
175 	TRACE(("add_timer: event %p\n", event));
176 
177 	scheduleTime = period;
178 	if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
179 		scheduleTime += currentTime;
180 	if (scheduleTime == 0)
181 		scheduleTime = 1;
182 
183 	event->schedule_time = (int64)scheduleTime;
184 	event->period = period;
185 	event->hook = hook;
186 	event->flags = flags;
187 
188 	state = disable_interrupts();
189 	int currentCPU = smp_get_current_cpu();
190 	per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
191 	acquire_spinlock(&cpuData.lock);
192 
193 	add_event_to_list(event, &cpuData.events);
194 	event->cpu = currentCPU;
195 
196 	// if we were stuck at the head of the list, set the hardware timer
197 	if (event == cpuData.events)
198 		arch_timer_set_hardware_timer(scheduleTime - currentTime);
199 
200 	release_spinlock(&cpuData.lock);
201 	restore_interrupts(state);
202 
203 	return B_OK;
204 }
205 
206 
207 bool
208 cancel_timer(timer* event)
209 {
210 	TRACE(("cancel_timer: event %p\n", event));
211 
212 	InterruptsLocker _;
213 
214 	// lock the right CPU spinlock
215 	int cpu = event->cpu;
216 	SpinLocker spinLocker;
217 	while (true) {
218 		if (cpu >= B_MAX_CPU_COUNT)
219 			return false;
220 
221 		spinLocker.SetTo(sPerCPU[cpu].lock, false);
222 		if (cpu == event->cpu)
223 			break;
224 
225 		// cpu field changed while we were trying to lock
226 		spinLocker.Unlock();
227 		cpu = event->cpu;
228 	}
229 
230 	per_cpu_timer_data& cpuData = sPerCPU[cpu];
231 
232 	if (event != cpuData.current_event) {
233 		// The timer hook is not yet being executed.
234 		timer* current = cpuData.events;
235 		timer* last = NULL;
236 
237 		while (current != NULL) {
238 			if (current == event) {
239 				// we found it
240 				if (last == NULL)
241 					cpuData.events = current->next;
242 				else
243 					last->next = current->next;
244 				current->next = NULL;
245 				// break out of the whole thing
246 				break;
247 			}
248 			last = current;
249 			current = current->next;
250 		}
251 
252 		// If not found, we assume this was a one-shot timer and has already
253 		// fired.
254 		if (current == NULL)
255 			return true;
256 
257 		// invalidate CPU field
258 		event->cpu = 0xffff;
259 
260 		// If on the current CPU, also reset the hardware timer.
261 		if (cpu == smp_get_current_cpu()) {
262 			if (cpuData.events == NULL)
263 				arch_timer_clear_hardware_timer();
264 			else {
265 				arch_timer_set_hardware_timer(
266 					(bigtime_t)cpuData.events->schedule_time - system_time());
267 			}
268 		}
269 
270 		return false;
271 	}
272 
273 	// The timer hook is currently being executed. We clear the current
274 	// event so that timer_interrupt() will not reschedule periodic timers.
275 	cpuData.current_event = NULL;
276 
277 	// Unless this is a kernel-private timer that also requires the thread
278 	// lock to be held while calling the event hook, we'll have to wait
279 	// for the hook to complete. When called from the timer hook we don't
280 	// wait either, of course.
281 	if ((event->flags & B_TIMER_ACQUIRE_THREAD_LOCK) == 0
282 		|| cpu == smp_get_current_cpu()) {
283 		spinLocker.Unlock();
284 
285 		while (cpuData.current_event_in_progress == 1) {
286 			PAUSE();
287 		}
288 	}
289 
290 	return true;
291 }
292 
293 
294 void
295 spin(bigtime_t microseconds)
296 {
297 	bigtime_t time = system_time();
298 
299 	while ((system_time() - time) < microseconds) {
300 		PAUSE();
301 	}
302 }
303