xref: /haiku/src/system/kernel/arch/x86/64/descriptors.cpp (revision 820dca4df6c7bf955c46e8f6521b9408f50b2900)
1 /*
2  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <arch/x86/descriptors.h>
8 
9 #include <boot/kernel_args.h>
10 #include <cpu.h>
11 #include <vm/vm.h>
12 #include <vm/vm_priv.h>
13 
14 #include <arch/int.h>
15 #include <arch/user_debugger.h>
16 
17 
18 typedef void interrupt_handler_function(iframe* frame);
19 
20 
21 static segment_descriptor* sGDT;
22 static interrupt_descriptor* sIDT;
23 
24 static const uint32 kInterruptHandlerTableSize = 256;
25 interrupt_handler_function* gInterruptHandlerTable[kInterruptHandlerTableSize];
26 
27 extern uint8 isr_array[kInterruptHandlerTableSize][16];
28 
29 
30 static void
31 load_tss(int cpu)
32 {
33 	uint16 seg = (TSS_SEGMENT(cpu) << 3) | DPL_KERNEL;
34 	asm volatile("ltr %%ax" :: "a" (seg));
35 }
36 
37 
38 //	#pragma mark - Exception handlers
39 
40 
41 static void
42 x86_64_general_protection_fault(iframe* frame)
43 {
44 	if (debug_debugger_running()) {
45 		// Handle GPFs if there is a debugger fault handler installed, for
46 		// non-canonical address accesses.
47 		cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
48 		if (cpu->fault_handler != 0) {
49 			debug_set_page_fault_info(0, frame->ip, DEBUG_PAGE_FAULT_NO_INFO);
50 			frame->ip = cpu->fault_handler;
51 			frame->bp = cpu->fault_handler_stack_pointer;
52 			return;
53 		}
54 	}
55 
56 	x86_unexpected_exception(frame);
57 }
58 
59 
60 // #pragma mark -
61 
62 
63 void
64 x86_descriptors_init(kernel_args* args)
65 {
66 	// The boot loader sets up a GDT and allocates an empty IDT for us.
67 	sGDT = (segment_descriptor*)args->arch_args.vir_gdt;
68 	sIDT = (interrupt_descriptor*)args->arch_args.vir_idt;
69 
70 	// Fill out the IDT, pointing each entry to the corresponding entry in the
71 	// ISR array created in arch_interrupts.S (see there to see how this works).
72 	for(uint32 i = 0; i < kInterruptHandlerTableSize; i++) {
73 		// x86_64 removes task gates, therefore we cannot use a separate TSS
74 		// for the double fault exception. However, instead it adds a new stack
75 		// switching mechanism, the IST. The IST is a table of stack addresses
76 		// in the TSS. If the IST field of an interrupt descriptor is non-zero,
77 		// the CPU will switch to the stack specified by that IST entry when
78 		// handling that interrupt. So, we use IST entry 1 to store the double
79 		// fault stack address (set up in x86_descriptors_init_post_vm()).
80 		uint32 ist = (i == 8) ? 1 : 0;
81 
82 		// Breakpoint exception can be raised from userland.
83 		uint32 dpl = (i == 3) ? DPL_USER : DPL_KERNEL;
84 
85 		set_interrupt_descriptor(&sIDT[i], (addr_t)&isr_array[i],
86 			GATE_INTERRUPT, KERNEL_CODE_SEG, dpl, ist);
87 	}
88 
89 	// Initialize the interrupt handler table.
90 	interrupt_handler_function** table = gInterruptHandlerTable;
91 	for (uint32 i = 0; i < ARCH_INTERRUPT_BASE; i++)
92 		table[i] = x86_invalid_exception;
93 	for (uint32 i = ARCH_INTERRUPT_BASE; i < kInterruptHandlerTableSize; i++)
94 		table[i] = x86_hardware_interrupt;
95 
96 	table[0]  = x86_unexpected_exception;	// Divide Error Exception (#DE)
97 	table[1]  = x86_handle_debug_exception; // Debug Exception (#DB)
98 	table[2]  = x86_fatal_exception;		// NMI Interrupt
99 	table[3]  = x86_handle_breakpoint_exception; // Breakpoint Exception (#BP)
100 	table[4]  = x86_unexpected_exception;	// Overflow Exception (#OF)
101 	table[5]  = x86_unexpected_exception;	// BOUND Range Exceeded Exception (#BR)
102 	table[6]  = x86_unexpected_exception;	// Invalid Opcode Exception (#UD)
103 	table[7]  = x86_fatal_exception;		// Device Not Available Exception (#NM)
104 	table[8]  = x86_fatal_exception;		// Double Fault Exception (#DF)
105 	table[9]  = x86_fatal_exception;		// Coprocessor Segment Overrun
106 	table[10] = x86_fatal_exception;		// Invalid TSS Exception (#TS)
107 	table[11] = x86_fatal_exception;		// Segment Not Present (#NP)
108 	table[12] = x86_fatal_exception;		// Stack Fault Exception (#SS)
109 	table[13] = x86_64_general_protection_fault; // General Protection Exception (#GP)
110 	table[14] = x86_page_fault_exception;	// Page-Fault Exception (#PF)
111 	table[16] = x86_unexpected_exception;	// x87 FPU Floating-Point Error (#MF)
112 	table[17] = x86_unexpected_exception;	// Alignment Check Exception (#AC)
113 	table[18] = x86_fatal_exception;		// Machine-Check Exception (#MC)
114 	table[19] = x86_unexpected_exception;	// SIMD Floating-Point Exception (#XF)
115 }
116 
117 
118 void
119 x86_descriptors_init_percpu(kernel_args* args, int cpu)
120 {
121 	// Load the IDT.
122 	gdt_idt_descr idtr = {
123 		256 * sizeof(interrupt_descriptor) - 1,
124 		(addr_t)sIDT
125 	};
126 	asm volatile("lidt %0" :: "m" (idtr));
127 
128 	// Load the TSS for non-boot CPUs (boot CPU gets done below).
129 	if (cpu != 0) {
130 		load_tss(cpu);
131 	}
132 }
133 
134 
135 status_t
136 x86_descriptors_init_post_vm(kernel_args* args)
137 {
138 	area_id area;
139 
140 	// Create an area for the GDT.
141 	area = create_area("gdt", (void**)&sGDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
142 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
143 	if (area < B_OK)
144 		return area;
145 
146 	// Same for the IDT.
147 	area = create_area("idt", (void**)&sIDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
148 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
149 	if (area < B_OK)
150 		return area;
151 
152 	for (uint32 i = 0; i < args->num_cpus; i++) {
153 		// Set up the task state segment.
154 		memset(&gCPU[i].arch.tss, 0, sizeof(struct tss));
155 		gCPU[i].arch.tss.io_map_base = sizeof(struct tss);
156 
157 		// Set up the descriptor for this TSS.
158 		set_tss_descriptor(&sGDT[TSS_SEGMENT(i)], (addr_t)&gCPU[i].arch.tss,
159 			sizeof(struct tss));
160 
161 		// Set up the double fault IST entry (see x86_descriptors_init()).
162 		struct tss* tss = &gCPU[i].arch.tss;
163 		size_t stackSize;
164 		tss->ist1 = (addr_t)x86_get_double_fault_stack(i, &stackSize);
165 		tss->ist1 += stackSize;
166 	}
167 
168 	// Load the TSS for the boot CPU.
169 	load_tss(0);
170 
171 	return B_OK;
172 }
173