xref: /haiku/src/system/kernel/timer.cpp (revision b671e9bbdbd10268a042b4f4cc4317ccd03d105e)
1 /*
2  * Copyright 2002-2008, Haiku. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*! Policy info for timers */
10 
11 #include <timer.h>
12 
13 #include <OS.h>
14 
15 #include <arch/timer.h>
16 #include <boot/kernel_args.h>
17 #include <cpu.h>
18 #include <smp.h>
19 #include <thread.h>
20 #include <util/AutoLock.h>
21 
22 
23 struct per_cpu_timer_data {
24 	spinlock		lock;
25 	timer* volatile	events;
26 	timer* volatile	current_event;
27 	vint32			current_event_in_progress;
28 };
29 
30 static per_cpu_timer_data sPerCPU[B_MAX_CPU_COUNT];
31 
32 
33 //#define TRACE_TIMER
34 #ifdef TRACE_TIMER
35 #	define TRACE(x) dprintf x
36 #else
37 #	define TRACE(x) ;
38 #endif
39 
40 
41 status_t
42 timer_init(kernel_args *args)
43 {
44 	TRACE(("timer_init: entry\n"));
45 
46 	return arch_init_timer(args);
47 }
48 
49 
50 /*! NOTE: expects interrupts to be off */
51 static void
52 add_event_to_list(timer *event, timer * volatile *list)
53 {
54 	timer *next;
55 	timer *last = NULL;
56 
57 	// stick it in the event list
58 	for (next = *list; next; last = next, next = (timer *)next->next) {
59 		if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
60 			break;
61 	}
62 
63 	if (last != NULL) {
64 		event->next = last->next;
65 		last->next = event;
66 	} else {
67 		event->next = next;
68 		*list = event;
69 	}
70 }
71 
72 
73 int32
74 timer_interrupt()
75 {
76 	timer *event;
77 	spinlock *spinlock;
78 	per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
79 	int32 rc = B_HANDLED_INTERRUPT;
80 
81 	TRACE(("timer_interrupt: time %lld, cpu %ld\n", system_time(),
82 		smp_get_current_cpu()));
83 
84 	spinlock = &cpuData.lock;
85 
86 	acquire_spinlock(spinlock);
87 
88 	event = cpuData.events;
89 	while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
90 		// this event needs to happen
91 		int mode = event->flags;
92 
93 		cpuData.events = (timer *)event->next;
94 		cpuData.current_event = event;
95 		cpuData.current_event_in_progress = 1;
96 		event->schedule_time = 0;
97 
98 		release_spinlock(spinlock);
99 
100 		TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
101 			event));
102 
103 		// call the callback
104 		// note: if the event is not periodic, it is ok
105 		// to delete the event structure inside the callback
106 		if (event->hook) {
107 			bool callHook = true;
108 
109 			// we may need to acquire the thread spinlock
110 			if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0) {
111 				GRAB_THREAD_LOCK();
112 
113 				// If the event has been cancelled in the meantime, we don't
114 				// call the hook anymore.
115 				if (cpuData.current_event == NULL)
116 					callHook = false;
117 			}
118 
119 			if (callHook)
120 				rc = event->hook(event);
121 
122 			if ((mode & B_TIMER_ACQUIRE_THREAD_LOCK) != 0)
123 				RELEASE_THREAD_LOCK();
124 		}
125 
126 		cpuData.current_event_in_progress = 0;
127 
128 		acquire_spinlock(spinlock);
129 
130 		if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
131 			&& cpuData.current_event != NULL) {
132 			// we need to adjust it and add it back to the list
133 			bigtime_t scheduleTime = system_time() + event->period;
134 			if (scheduleTime == 0) {
135 				// if we wrapped around and happen to hit zero, set
136 				// it to one, since zero represents not scheduled
137 				scheduleTime = 1;
138 			}
139 			event->schedule_time = (int64)scheduleTime;
140 			add_event_to_list(event, &cpuData.events);
141 		}
142 
143 		cpuData.current_event = NULL;
144 
145 		event = cpuData.events;
146 	}
147 
148 	// setup the next hardware timer
149 	if (cpuData.events != NULL) {
150 		arch_timer_set_hardware_timer(
151 			(bigtime_t)cpuData.events->schedule_time - system_time());
152 	}
153 
154 	release_spinlock(spinlock);
155 
156 	return rc;
157 }
158 
159 
160 status_t
161 add_timer(timer *event, timer_hook hook, bigtime_t period, int32 flags)
162 {
163 	bigtime_t scheduleTime;
164 	bigtime_t currentTime = system_time();
165 	cpu_status state;
166 
167 	if (event == NULL || hook == NULL || period < 0)
168 		return B_BAD_VALUE;
169 
170 	TRACE(("add_timer: event %p\n", event));
171 
172 	scheduleTime = period;
173 	if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
174 		scheduleTime += currentTime;
175 	if (scheduleTime == 0)
176 		scheduleTime = 1;
177 
178 	event->schedule_time = (int64)scheduleTime;
179 	event->period = period;
180 	event->hook = hook;
181 	event->flags = flags;
182 
183 	state = disable_interrupts();
184 	int currentCPU = smp_get_current_cpu();
185 	per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
186 	acquire_spinlock(&cpuData.lock);
187 
188 	add_event_to_list(event, &cpuData.events);
189 	event->cpu = currentCPU;
190 
191 	// if we were stuck at the head of the list, set the hardware timer
192 	if (event == cpuData.events)
193 		arch_timer_set_hardware_timer(scheduleTime - currentTime);
194 
195 	release_spinlock(&cpuData.lock);
196 	restore_interrupts(state);
197 
198 	return B_OK;
199 }
200 
201 
202 bool
203 cancel_timer(timer *event)
204 {
205 	TRACE(("cancel_timer: event %p\n", event));
206 
207 	InterruptsLocker _;
208 
209 	// lock the right CPU spinlock
210 	int cpu = event->cpu;
211 	SpinLocker spinLocker;
212 	while (true) {
213 		if (cpu >= B_MAX_CPU_COUNT)
214 			return false;
215 
216 		spinLocker.SetTo(sPerCPU[cpu].lock, false);
217 		if (cpu == event->cpu)
218 			break;
219 
220 		// cpu field changed while we were trying to lock
221 		spinLocker.Unlock();
222 		cpu = event->cpu;
223 	}
224 
225 	per_cpu_timer_data& cpuData = sPerCPU[cpu];
226 
227 	timer *current = cpuData.events;
228 
229 	if (event != cpuData.current_event) {
230 		// The timer hook is not yet being executed.
231 		timer *last = NULL;
232 
233 		while (current != NULL) {
234 			if (current == event) {
235 				// we found it
236 				if (current == cpuData.events)
237 					cpuData.events = current->next;
238 				else
239 					last->next = current->next;
240 				current->next = NULL;
241 				// break out of the whole thing
242 				break;
243 			}
244 			last = current;
245 			current = current->next;
246 		}
247 
248 		// If not found, we assume this was a one-shot timer and has already
249 		// fired.
250 		if (current == NULL)
251 			return true;
252 
253 		// invalidate CPU field
254 		event->cpu = 0xffff;
255 
256 		// If on the current CPU, also reset the hardware timer.
257 		if (cpu == smp_get_current_cpu()) {
258 			if (cpuData.events == NULL)
259 				arch_timer_clear_hardware_timer();
260 			else {
261 				arch_timer_set_hardware_timer(
262 					(bigtime_t)cpuData.events->schedule_time - system_time());
263 			}
264 		}
265 
266 		return false;
267 	} else {
268 		// The timer hook is currently being executed. We clear the current
269 		// event so that timer_interrupt() will not reschedule periodic timers.
270 		cpuData.current_event = NULL;
271 		current = event;
272 
273 		// Unless this is a kernel-private timer that also requires the thread
274 		// lock to be held while calling the event hook, we'll have to wait
275 		// for the hook to complete. When called from the timer hook we don't
276 		// wait either, of course.
277 		if ((event->flags & B_TIMER_ACQUIRE_THREAD_LOCK) == 0
278 			|| cpu == smp_get_current_cpu()) {
279 			spinLocker.Unlock();
280 
281 			while (cpuData.current_event_in_progress == 1) {
282 				PAUSE();
283 			}
284 		}
285 
286 		return true;
287 	}
288 }
289 
290 
291 void
292 spin(bigtime_t microseconds)
293 {
294 	bigtime_t time = system_time();
295 
296 	while ((system_time() - time) < microseconds) {
297 		PAUSE();
298 	}
299 }
300