xref: /haiku/src/system/boot/platform/bios_ia32/long.cpp (revision 294711f98c107cf2d9d05b7fc34cd863e87bd358)
1 /*
2  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "long.h"
8 
9 #include <KernelExport.h>
10 
11 #include <arch/x86_64/descriptors.h>
12 #include <arch_system_info.h>
13 #include <boot/platform.h>
14 #include <boot/heap.h>
15 #include <boot/stage2.h>
16 #include <boot/stdio.h>
17 #include <kernel.h>
18 
19 #include "debug.h"
20 #include "mmu.h"
21 
22 
23 /*! Convert a 32-bit address to a 64-bit address. */
24 static inline uint64
25 fix_address(uint64 address)
26 {
27 	return address - KERNEL_BASE + KERNEL_BASE_64BIT;
28 }
29 
30 
31 template<typename Type>
32 inline void
33 fix_address(FixedWidthPointer<Type>& p)
34 {
35 	if (p != NULL)
36 		p.SetTo(fix_address(p.Get()));
37 }
38 
39 
40 static void
41 long_gdt_init()
42 {
43 	// Allocate memory for the GDT.
44 	segment_descriptor* gdt = (segment_descriptor*)
45 		mmu_allocate_page(&gKernelArgs.arch_args.phys_gdt);
46 	gKernelArgs.arch_args.vir_gdt = fix_address((addr_t)gdt);
47 
48 	dprintf("GDT at phys 0x%lx, virt 0x%llx\n", gKernelArgs.arch_args.phys_gdt,
49 		gKernelArgs.arch_args.vir_gdt);
50 
51 	clear_segment_descriptor(&gdt[0]);
52 
53 	// Set up code/data segments (TSS segments set up later in the kernel).
54 	set_segment_descriptor(&gdt[KERNEL_CODE_SEG / 8], DT_CODE_EXECUTE_ONLY,
55 		DPL_KERNEL);
56 	set_segment_descriptor(&gdt[KERNEL_DATA_SEG / 8], DT_DATA_WRITEABLE,
57 		DPL_KERNEL);
58 	set_segment_descriptor(&gdt[USER_CODE_SEG / 8], DT_CODE_EXECUTE_ONLY,
59 		DPL_USER);
60 	set_segment_descriptor(&gdt[USER_DATA_SEG / 8], DT_DATA_WRITEABLE,
61 		DPL_USER);
62 }
63 
64 
65 static void
66 long_idt_init()
67 {
68 	interrupt_descriptor* idt = (interrupt_descriptor*)
69 		mmu_allocate_page(&gKernelArgs.arch_args.phys_idt);
70 	gKernelArgs.arch_args.vir_idt = fix_address((addr_t)idt);
71 
72 	dprintf("IDT at phys %#lx, virt %#llx\n", gKernelArgs.arch_args.phys_idt,
73 		gKernelArgs.arch_args.vir_idt);
74 
75 	// The 32-bit kernel gets an IDT with the loader's exception handlers until
76 	// it can set up its own. Can't do that here because they won't work after
77 	// switching to long mode. Therefore, just clear the IDT and leave the
78 	// kernel to set it up.
79 	memset(idt, 0, B_PAGE_SIZE);
80 }
81 
82 
83 static void
84 long_mmu_init()
85 {
86 	addr_t physicalAddress;
87 
88 	// Allocate the top level PML4.
89 	uint64* pml4 = (uint64*)mmu_allocate_page(&gKernelArgs.arch_args.phys_pgdir);
90 	memset(pml4, 0, B_PAGE_SIZE);
91 	gKernelArgs.arch_args.vir_pgdir = (uint64)(addr_t)pml4;
92 
93 	// Identity map the first 1GB of memory, do so using large pages.
94 
95 	uint64* pdpt = (uint64*)mmu_allocate_page(&physicalAddress);
96 	memset(pdpt, 0, B_PAGE_SIZE);
97 	pml4[0] = physicalAddress | 0x3;
98 
99 	uint64* pageDir = (uint64*)mmu_allocate_page(&physicalAddress);
100 	memset(pageDir, 0, B_PAGE_SIZE);
101 	pdpt[0] = physicalAddress | 0x3;
102 
103 	for (uint32 i = 0; i < 512; i++) {
104 		pageDir[i] = (i * 0x200000) | 0x83;
105 	}
106 
107 	// Allocate tables for the kernel mappings.
108 
109 	pdpt = (uint64*)mmu_allocate_page(&physicalAddress);
110 	memset(pdpt, 0, B_PAGE_SIZE);
111 	pml4[511] = physicalAddress | 0x3;
112 
113 	pageDir = (uint64*)mmu_allocate_page(&physicalAddress);
114 	memset(pageDir, 0, B_PAGE_SIZE);
115 	pdpt[510] = physicalAddress | 0x3;
116 
117 	// Store the virtual memory usage information.
118 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE_64BIT;
119 	gKernelArgs.virtual_allocated_range[0].size = mmu_get_virtual_usage();
120 	gKernelArgs.num_virtual_allocated_ranges = 1;
121 
122 	// We can now allocate page tables and duplicate the mappings across from
123 	// the 32-bit address space to them.
124 	uint64* pageTable = NULL;
125 	for (uint32 i = 0; i < gKernelArgs.virtual_allocated_range[0].size
126 			/ B_PAGE_SIZE; i++) {
127 		if ((i % 512) == 0) {
128 			pageTable = (uint64*)mmu_allocate_page(&physicalAddress);
129 			memset(pageTable, 0, B_PAGE_SIZE);
130 			pageDir[i / 512] = physicalAddress | 0x3;
131 
132 			// Just performed another virtual allocation, account for it.
133 			gKernelArgs.virtual_allocated_range[0].size += B_PAGE_SIZE;
134 		}
135 
136 		// Get the physical address to map.
137 		if (!mmu_get_virtual_mapping(KERNEL_BASE + (i * B_PAGE_SIZE),
138 				&physicalAddress))
139 			continue;
140 
141 		pageTable[i % 512] = physicalAddress | 0x3;
142 	}
143 
144 	gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_BASE_64BIT
145 		+ gKernelArgs.virtual_allocated_range[0].size, 0x200000);
146 
147 	// Sort the address ranges.
148 	sort_address_ranges(gKernelArgs.physical_memory_range,
149 		gKernelArgs.num_physical_memory_ranges);
150 	sort_address_ranges(gKernelArgs.physical_allocated_range,
151 		gKernelArgs.num_physical_allocated_ranges);
152 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
153 		gKernelArgs.num_virtual_allocated_ranges);
154 
155 	dprintf("phys memory ranges:\n");
156 	for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
157 		dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
158 			gKernelArgs.physical_memory_range[i].start,
159 			gKernelArgs.physical_memory_range[i].size);
160 	}
161 
162 	dprintf("allocated phys memory ranges:\n");
163 	for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
164 		dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
165 			gKernelArgs.physical_allocated_range[i].start,
166 			gKernelArgs.physical_allocated_range[i].size);
167 	}
168 
169 	dprintf("allocated virt memory ranges:\n");
170 	for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
171 		dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
172 			gKernelArgs.virtual_allocated_range[i].start,
173 			gKernelArgs.virtual_allocated_range[i].size);
174 	}
175 }
176 
177 
178 static void
179 convert_preloaded_image(preloaded_elf64_image* image)
180 {
181 	fix_address(image->next);
182 	fix_address(image->name);
183 	fix_address(image->debug_string_table);
184 	fix_address(image->syms);
185 	fix_address(image->rel);
186 	fix_address(image->rela);
187 	fix_address(image->pltrel);
188 	fix_address(image->debug_symbols);
189 }
190 
191 
192 /*!	Convert all addresses in kernel_args to 64-bit addresses. */
193 static void
194 convert_kernel_args()
195 {
196 	fix_address(gKernelArgs.boot_volume);
197 	fix_address(gKernelArgs.vesa_modes);
198 	fix_address(gKernelArgs.edid_info);
199 	fix_address(gKernelArgs.debug_output);
200 	fix_address(gKernelArgs.boot_splash);
201 	fix_address(gKernelArgs.arch_args.apic);
202 	fix_address(gKernelArgs.arch_args.hpet);
203 
204 	convert_preloaded_image(static_cast<preloaded_elf64_image*>(
205 		gKernelArgs.kernel_image.Pointer()));
206 	fix_address(gKernelArgs.kernel_image);
207 
208 	// Iterate over the preloaded images. Must save the next address before
209 	// converting, as the next pointer will be converted.
210 	preloaded_image* image = gKernelArgs.preloaded_images;
211 	fix_address(gKernelArgs.preloaded_images);
212 	while (image != NULL) {
213 		preloaded_image* next = image->next;
214 		convert_preloaded_image(static_cast<preloaded_elf64_image*>(image));
215 		image = next;
216 	}
217 
218 	// Set correct kernel args range addresses.
219 	dprintf("kernel args ranges:\n");
220 	for (uint32 i = 0; i < gKernelArgs.num_kernel_args_ranges; i++) {
221 		gKernelArgs.kernel_args_range[i].start = fix_address(
222 			gKernelArgs.kernel_args_range[i].start);
223 		dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
224 			gKernelArgs.kernel_args_range[i].start,
225 			gKernelArgs.kernel_args_range[i].size);
226 	}
227 
228 	// Set correct kernel stack addresses.
229 	for (uint32 i = 0; i < gKernelArgs.num_cpus; i++) {
230 		gKernelArgs.cpu_kstack[i].start = fix_address(
231 			gKernelArgs.cpu_kstack[i].start);
232 	}
233 
234 	// Fix driver settings files.
235 	driver_settings_file* file = gKernelArgs.driver_settings;
236 	fix_address(gKernelArgs.driver_settings);
237 	while (file != NULL) {
238 		driver_settings_file* next = file->next;
239 		fix_address(file->next);
240 		fix_address(file->buffer);
241 		file = next;
242 	}
243 }
244 
245 
246 void
247 long_start_kernel()
248 {
249 	// Check whether long mode is supported.
250 	cpuid_info info;
251 	get_current_cpuid(&info, 0x80000001);
252 	if ((info.regs.edx & (1 << 29)) == 0)
253 		panic("64-bit kernel requires a 64-bit CPU");
254 
255 	preloaded_elf64_image *image = static_cast<preloaded_elf64_image *>(
256 		gKernelArgs.kernel_image.Pointer());
257 
258 	// TODO: x86_64 SMP, disable for now.
259 	gKernelArgs.num_cpus = 1;
260 
261 	long_gdt_init();
262 	long_idt_init();
263 	long_mmu_init();
264 	convert_kernel_args();
265 
266 	debug_cleanup();
267 
268 	// Calculate the arguments for long_enter_kernel().
269 	uint64 entry = image->elf_header.e_entry;
270 	uint64 stackTop = gKernelArgs.cpu_kstack[0].start
271 		+ gKernelArgs.cpu_kstack[0].size;
272 	uint64 kernelArgs = (addr_t)&gKernelArgs;
273 
274 	dprintf("kernel entry at %#llx, stack %#llx, args %#llx\n", entry,
275 		stackTop, kernelArgs);
276 
277 	// We're about to enter the kernel -- disable console output.
278 	stdout = NULL;
279 
280 	// Load the new GDT. The physical address is used because long_enter_kernel
281 	// disables 32-bit paging.
282 	gdt_idt_descr gdtr = { GDT_LIMIT - 1, gKernelArgs.arch_args.phys_gdt };
283 	asm volatile("lgdt %0" :: "m"(gdtr));
284 
285 	// Enter the kernel!
286 	long_enter_kernel(gKernelArgs.arch_args.phys_pgdir, entry, stackTop,
287 		kernelArgs, 0);
288 	panic("Shouldn't get here");
289 }
290 
291