xref: /haiku/src/system/kernel/arch/arm/arch_cpu.cpp (revision ed24eb5ff12640d052171c6a7feba37fab8a75d1)
1 /*
2  * Copyright 2007, François Revol, revol@free.fr.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
9  * Distributed under the terms of the NewOS License.
10  */
11 
12 
13 #include <KernelExport.h>
14 
15 #include <arch/cpu.h>
16 #include <boot/kernel_args.h>
17 #include <commpage.h>
18 #include <elf.h>
19 
20 
21 status_t
22 arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
23 {
24 	// The current thread must be NULL for all CPUs till we have threads.
25 	// Some boot code relies on this.
26 	arch_thread_set_current_thread(NULL);
27 
28 	return B_OK;
29 }
30 
31 
32 status_t
33 arch_cpu_init_percpu(kernel_args *args, int curr_cpu)
34 {
35 	if (curr_cpu != 0)
36 		panic("No SMP support on ARM yet!\n");
37 
38 	return 0;
39 }
40 
41 
42 status_t
43 arch_cpu_init(kernel_args *args)
44 {
45 	return B_OK;
46 }
47 
48 
49 status_t
50 arch_cpu_init_post_vm(kernel_args *args)
51 {
52 	return B_OK;
53 }
54 
55 
56 status_t
57 arch_cpu_init_post_modules(kernel_args *args)
58 {
59 	// add the functions to the commpage image
60 	image_id image = get_commpage_image();
61 
62 	return B_OK;
63 }
64 
65 
66 status_t
67 arch_cpu_shutdown(bool reboot)
68 {
69 	while(1)
70 		arch_cpu_idle();
71 
72 	// never reached
73 	return B_ERROR;
74 }
75 
76 
77 void
78 arch_cpu_sync_icache(void *address, size_t len)
79 {
80 	uint32 Rd = 0;
81 	asm volatile ("mcr p15, 0, %[c7format], c7, c5, 0"
82 		: : [c7format] "r" (Rd) );
83 }
84 
85 
86 void
87 arch_cpu_memory_read_barrier(void)
88 {
89 	// TODO: check if we need more here
90 	// (or just call the inline version?)
91 	// cf. headers/private/kernel/arch/arm/arch_atomic.h
92 	asm volatile ("" : : : "memory");
93 }
94 
95 
96 void
97 arch_cpu_memory_write_barrier(void)
98 {
99 	// TODO: check if we need more here
100 	// (or just call the inline version?)
101 	// cf. headers/private/kernel/arch/arm/arch_atomic.h
102 	asm volatile ("" : : : "memory");
103 }
104 
105 
106 void
107 arch_cpu_invalidate_TLB_page(addr_t page)
108 {
109 	// ensure visibility of the update to translation table walks
110 	dsb();
111 
112 	// TLBIMVAIS(page)
113 	asm volatile ("mcr p15, 0, %0, c8, c3, 1"
114 		: : "r" (page));
115 
116 	// ensure completion of TLB invalidation
117 	dsb();
118 	isb();
119 }
120 
121 void
122 arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
123 {
124 	// ensure visibility of the update to translation table walks
125 	dsb();
126 
127 	int32 num_pages = end / B_PAGE_SIZE - start / B_PAGE_SIZE;
128 	while (num_pages-- >= 0) {
129 		asm volatile ("mcr p15, 0, %[c8format], c8, c6, 1"
130 			: : [c8format] "r" (start) );
131 		start += B_PAGE_SIZE;
132 	}
133 
134 	// ensure completion of TLB invalidation
135 	dsb();
136 	isb();
137 }
138 
139 
140 void
141 arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
142 {
143 	// ensure visibility of the update to translation table walks
144 	dsb();
145 
146 	for (int i = 0; i < num_pages; i++) {
147 		asm volatile ("mcr p15, 0, %[c8format], c8, c6, 1":
148 			: [c8format] "r" (pages[i]) );
149 	}
150 
151 	// ensure completion of TLB invalidation
152 	dsb();
153 	isb();
154 }
155 
156 
157 void
158 arch_cpu_global_TLB_invalidate(void)
159 {
160 	// ensure visibility of the update to translation table walks
161 	dsb();
162 
163 	uint32 Rd = 0;
164 	asm volatile ("mcr p15, 0, %[c8format], c8, c7, 0"
165 		: : [c8format] "r" (Rd) );
166 
167 	// ensure completion of TLB invalidation
168 	dsb();
169 	isb();
170 }
171 
172 
173 void
174 arch_cpu_user_TLB_invalidate(void)
175 {/*
176 	cpu_ops.flush_insn_pipeline();
177 	cpu_ops.flush_atc_user();
178 	cpu_ops.flush_insn_pipeline();
179 */
180 #warning WRITEME
181 }
182