xref: /haiku/src/system/kernel/arch/ppc/arch_cpu.cpp (revision 962b0b887d830b7f7bfd47de10112609302bbd82)
1 /*
2  * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 
10 #include <KernelExport.h>
11 
12 #include <arch_platform.h>
13 #include <arch_thread.h>
14 #include <arch/cpu.h>
15 #include <boot/kernel_args.h>
16 
17 static bool sHasTlbia;
18 
19 status_t
20 arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
21 {
22 	// enable FPU
23 	set_msr(get_msr() | MSR_FP_AVAILABLE);
24 
25 	// The current thread must be NULL for all CPUs till we have threads.
26 	// Some boot code relies on this.
27 	arch_thread_set_current_thread(NULL);
28 
29 	return B_OK;
30 }
31 
32 
33 status_t
34 arch_cpu_init(kernel_args *args)
35 {
36 	// TODO: Let the boot loader put that info into the kernel args
37 	// (property "tlbia" in the CPU node).
38 	sHasTlbia = false;
39 
40 	return B_OK;
41 }
42 
43 
44 status_t
45 arch_cpu_init_post_vm(kernel_args *args)
46 {
47 	return B_OK;
48 }
49 
50 status_t
51 arch_cpu_init_post_modules(kernel_args *args)
52 {
53 	return B_OK;
54 }
55 
56 #define CACHELINE 32
57 
58 void
59 arch_cpu_sync_icache(void *address, size_t len)
60 {
61 	int l, off;
62 	char *p;
63 
64 	off = (unsigned int)address & (CACHELINE - 1);
65 	len += off;
66 
67 	l = len;
68 	p = (char *)address - off;
69 	do {
70 		asm volatile ("dcbst 0,%0" :: "r"(p));
71 		p += CACHELINE;
72 	} while ((l -= CACHELINE) > 0);
73 	asm volatile ("sync");
74 
75 	p = (char *)address - off;
76 	do {
77 		asm volatile ("icbi 0,%0" :: "r"(p));
78 		p += CACHELINE;
79 	} while ((len -= CACHELINE) > 0);
80 	asm volatile ("sync");
81 	isync();
82 }
83 
84 
85 void
86 arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
87 {
88 	asm volatile("sync");
89 	while (start < end) {
90 		asm volatile("tlbie %0" :: "r" (start));
91 		asm volatile("eieio");
92 		asm volatile("sync");
93 		start += B_PAGE_SIZE;
94 	}
95 	asm volatile("tlbsync");
96 	asm volatile("sync");
97 }
98 
99 
100 void
101 arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
102 {
103 	int i;
104 
105 	asm volatile("sync");
106 	for (i = 0; i < num_pages; i++) {
107 		asm volatile("tlbie %0" :: "r" (pages[i]));
108 		asm volatile("eieio");
109 		asm volatile("sync");
110 	}
111 	asm volatile("tlbsync");
112 	asm volatile("sync");
113 }
114 
115 
116 void
117 arch_cpu_global_TLB_invalidate(void)
118 {
119 	if (sHasTlbia) {
120 		ppc_sync();
121 		tlbia();
122 		ppc_sync();
123 	} else {
124 		addr_t address = 0;
125 		unsigned long i;
126 
127 		ppc_sync();
128 		for (i = 0; i < 0x100000; i++) {
129 			tlbie(address);
130 			eieio();
131 			ppc_sync();
132 
133 			address += B_PAGE_SIZE;
134 		}
135 		tlbsync();
136 		ppc_sync();
137 	}
138 }
139 
140 
141 void
142 arch_cpu_user_TLB_invalidate(void)
143 {
144 	arch_cpu_global_TLB_invalidate();
145 }
146 
147 
148 status_t
149 arch_cpu_user_memcpy(void *to, const void *from, size_t size,
150 	addr_t *faultHandler)
151 {
152 	char *tmp = (char *)to;
153 	char *s = (char *)from;
154 	addr_t oldFaultHandler = *faultHandler;
155 
156 	if (ppc_set_fault_handler(faultHandler, (addr_t)&&error))
157 		goto error;
158 
159 	while (size--)
160 		*tmp++ = *s++;
161 
162 	*faultHandler = oldFaultHandler;
163 	return 0;
164 
165 error:
166 	*faultHandler = oldFaultHandler;
167 	return B_BAD_ADDRESS;
168 }
169 
170 
171 /**	\brief Copies at most (\a size - 1) characters from the string in \a from to
172  *	the string in \a to, NULL-terminating the result.
173  *
174  *	\param to Pointer to the destination C-string.
175  *	\param from Pointer to the source C-string.
176  *	\param size Size in bytes of the string buffer pointed to by \a to.
177  *
178  *	\return strlen(\a from).
179  */
180 
181 ssize_t
182 arch_cpu_user_strlcpy(char *to, const char *from, size_t size, addr_t *faultHandler)
183 {
184 	int from_length = 0;
185 	addr_t oldFaultHandler = *faultHandler;
186 
187 	if (ppc_set_fault_handler(faultHandler, (addr_t)&&error))
188 		goto error;
189 
190 	if (size > 0) {
191 		to[--size] = '\0';
192 		// copy
193 		for ( ; size; size--, from_length++, to++, from++) {
194 			if ((*to = *from) == '\0')
195 				break;
196 		}
197 	}
198 	// count any leftover from chars
199 	while (*from++ != '\0')
200 		from_length++;
201 
202 	*faultHandler = oldFaultHandler;
203 	return from_length;
204 
205 error:
206 	*faultHandler = oldFaultHandler;
207 	return B_BAD_ADDRESS;
208 }
209 
210 
211 status_t
212 arch_cpu_user_memset(void *s, char c, size_t count, addr_t *faultHandler)
213 {
214 	char *xs = (char *)s;
215 	addr_t oldFaultHandler = *faultHandler;
216 
217 	if (ppc_set_fault_handler(faultHandler, (addr_t)&&error))
218 		goto error;
219 
220 	while (count--)
221 		*xs++ = c;
222 
223 	*faultHandler = oldFaultHandler;
224 	return 0;
225 
226 error:
227 	*faultHandler = oldFaultHandler;
228 	return B_BAD_ADDRESS;
229 }
230 
231 
232 status_t
233 arch_cpu_shutdown(bool reboot)
234 {
235 	PPCPlatform::Default()->ShutDown(reboot);
236 	return B_ERROR;
237 }
238 
239 
240 void
241 arch_cpu_idle(void)
242 {
243 }
244 
245 
246 // The purpose of this function is to trick the compiler. When setting the
247 // page_handler to a label that is obviously (to the compiler) never used,
248 // it may reorganize the control flow, so that the labeled part is optimized
249 // away.
250 // By invoking the function like this
251 //
252 //	if (ppc_set_fault_handler(faultHandler, (addr_t)&&error))
253 //		goto error;
254 //
255 // the compiler has to keep the labeled code, since it can't guess the return
256 // value of this (non-inlinable) function. At least in my tests it worked that
257 // way, and I hope it will continue to work like this in the future.
258 //
259 bool
260 ppc_set_fault_handler(addr_t *handlerLocation, addr_t handler)
261 {
262 	*handlerLocation = handler;
263 	return false;
264 }
265