xref: /haiku/src/system/kernel/arch/x86/64/descriptors.cpp (revision a30a4a41f948ebb03b95dab065a27a584ac0c97a)
1 /*
2  * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <arch/x86/descriptors.h>
9 
10 #include <boot/kernel_args.h>
11 #include <cpu.h>
12 #include <vm/vm.h>
13 #include <vm/vm_priv.h>
14 
15 #include <arch/int.h>
16 #include <arch/user_debugger.h>
17 
18 
19 template<typename T, T (*Function)(unsigned), unsigned N, unsigned ...Index>
20 struct GenerateTable : GenerateTable<T, Function, N - 1,  N - 1, Index...> {
21 };
22 
23 template<typename T, T (*Function)(unsigned), unsigned ...Index>
24 struct GenerateTable<T, Function, 0, Index...> {
25 	GenerateTable()
26 		:
27 		fTable { Function(Index)... }
28 	{
29 	}
30 
31 	T	fTable[sizeof...(Index)];
32 };
33 
34 enum class DescriptorType : unsigned {
35 	DataWritable		= 0x2,
36 	CodeExecuteOnly		= 0x8,
37 	TSS					= 0x9,
38 };
39 
40 class Descriptor {
41 public:
42 	constexpr				Descriptor();
43 	inline					Descriptor(uint32_t first, uint32_t second);
44 	constexpr				Descriptor(DescriptorType type, bool kernelOnly);
45 
46 protected:
47 	union {
48 		struct [[gnu::packed]] {
49 			uint16_t		fLimit0;
50 			unsigned		fBase0			:24;
51 			unsigned		fType			:4;
52 			unsigned		fSystem			:1;
53 			unsigned		fDPL			:2;
54 			unsigned		fPresent		:1;
55 			unsigned		fLimit1			:4;
56 			unsigned 		fUnused			:1;
57 			unsigned		fLong			:1;
58 			unsigned		fDB				:1;
59 			unsigned		fGranularity	:1;
60 			uint8_t			fBase1;
61 		};
62 
63 		uint32_t			fDescriptor[2];
64 	};
65 };
66 
67 class TSSDescriptor : public Descriptor {
68 public:
69 	inline						TSSDescriptor(uintptr_t base, size_t limit);
70 
71 			const Descriptor&	GetLower() const	{ return *this; }
72 			const Descriptor&	GetUpper() const	{ return fSecond; }
73 
74 	static	void				LoadTSS(unsigned index);
75 
76 private:
77 			Descriptor			fSecond;
78 };
79 
80 class GlobalDescriptorTable {
81 public:
82 	constexpr						GlobalDescriptorTable();
83 
84 	inline	void					Load() const;
85 
86 			unsigned				SetTSS(unsigned cpu,
87 										const TSSDescriptor& tss);
88 private:
89 	static constexpr	unsigned	kFirstTSS = 5;
90 	static constexpr	unsigned	kDescriptorCount
91 										= kFirstTSS + SMP_MAX_CPUS * 2;
92 
93 	alignas(uint64_t)	Descriptor	fTable[kDescriptorCount];
94 };
95 
96 enum class InterruptDescriptorType : unsigned {
97 	Interrupt		= 14,
98 	Trap,
99 };
100 
101 class [[gnu::packed]] InterruptDescriptor {
102 public:
103 	constexpr						InterruptDescriptor(uintptr_t isr,
104 										unsigned ist, bool kernelOnly);
105 	constexpr						InterruptDescriptor(uintptr_t isr);
106 
107 	static constexpr	InterruptDescriptor	Generate(unsigned index);
108 
109 private:
110 						uint16_t	fBase0;
111 						uint16_t	fSelector;
112 						unsigned	fIST		:3;
113 						unsigned	fReserved0	:5;
114 						unsigned	fType		:4;
115 						unsigned	fReserved1	:1;
116 						unsigned	fDPL		:2;
117 						unsigned	fPresent	:1;
118 						uint16_t	fBase1;
119 						uint32_t	fBase2;
120 						uint32_t	fReserved2;
121 };
122 
123 class InterruptDescriptorTable {
124 public:
125 	inline				void		Load() const;
126 
127 	static constexpr	unsigned	kDescriptorCount = 256;
128 
129 private:
130 	typedef GenerateTable<InterruptDescriptor, InterruptDescriptor::Generate,
131 			kDescriptorCount> TableType;
132 	alignas(uint64_t)	TableType	fTable;
133 };
134 
135 class InterruptServiceRoutine {
136 	alignas(16)	uint8_t	fDummy[16];
137 };
138 
139 extern const InterruptServiceRoutine
140 	isr_array[InterruptDescriptorTable::kDescriptorCount];
141 
142 static GlobalDescriptorTable	sGDT;
143 static InterruptDescriptorTable	sIDT;
144 
145 typedef void interrupt_handler_function(iframe* frame);
146 interrupt_handler_function*
147 	gInterruptHandlerTable[InterruptDescriptorTable::kDescriptorCount];
148 
149 
150 constexpr bool
151 is_code_segment(DescriptorType type)
152 {
153 	return type == DescriptorType::CodeExecuteOnly;
154 };
155 
156 
157 constexpr
158 Descriptor::Descriptor()
159 	:
160 	fDescriptor { 0, 0 }
161 {
162 	static_assert(sizeof(Descriptor) == sizeof(uint64_t),
163 		"Invalid Descriptor size.");
164 }
165 
166 
167 Descriptor::Descriptor(uint32_t first, uint32_t second)
168 	:
169 	fDescriptor { first, second }
170 {
171 }
172 
173 
174 constexpr
175 Descriptor::Descriptor(DescriptorType type, bool kernelOnly)
176 	:
177 	fLimit0(-1),
178 	fBase0(0),
179 	fType(static_cast<unsigned>(type)),
180 	fSystem(1),
181 	fDPL(kernelOnly ? 0 : 3),
182 	fPresent(1),
183 	fLimit1(0xf),
184 	fUnused(0),
185 	fLong(is_code_segment(type) ? 1 : 0),
186 	fDB(is_code_segment(type) ? 0 : 1),
187 	fGranularity(1),
188 	fBase1(0)
189 {
190 }
191 
192 
193 TSSDescriptor::TSSDescriptor(uintptr_t base, size_t limit)
194 	:
195 	fSecond(base >> 32, 0)
196 {
197 	fLimit0 = static_cast<uint16_t>(limit);
198 	fBase0 = base & 0xffffff;
199 	fType = static_cast<unsigned>(DescriptorType::TSS);
200 	fPresent = 1;
201 	fLimit1 = (limit >> 16) & 0xf;
202 	fBase1 = static_cast<uint8_t>(base >> 24);
203 }
204 
205 
206 void
207 TSSDescriptor::LoadTSS(unsigned index)
208 {
209 	asm volatile("ltr %w0" : : "r" (index << 3));
210 }
211 
212 
213 constexpr
214 GlobalDescriptorTable::GlobalDescriptorTable()
215 	:
216 	fTable {
217 		Descriptor(),
218 		Descriptor(DescriptorType::CodeExecuteOnly, true),
219 		Descriptor(DescriptorType::DataWritable, true),
220 		Descriptor(DescriptorType::DataWritable, false),
221 		Descriptor(DescriptorType::CodeExecuteOnly, false),
222 	}
223 {
224 	static_assert(kDescriptorCount <= 8192,
225 		"GDT cannot contain more than 8192 descriptors");
226 }
227 
228 
229 void
230 GlobalDescriptorTable::Load() const
231 {
232 	struct [[gnu::packed]] {
233 		uint16_t	fLimit;
234 		const void*	fAddress;
235 	} gdtDescriptor = {
236 		sizeof(fTable) - 1,
237 		static_cast<const void*>(fTable),
238 	};
239 
240 	asm volatile("lgdt	%0" : : "m" (gdtDescriptor));
241 }
242 
243 
244 unsigned
245 GlobalDescriptorTable::SetTSS(unsigned cpu, const TSSDescriptor& tss)
246 {
247 	auto index = kFirstTSS + cpu * 2;
248 	ASSERT(index + 1 < kDescriptorCount);
249 	fTable[index] = tss.GetLower();
250 	fTable[index + 1] = tss.GetUpper();
251 	return index;
252 }
253 
254 
255 constexpr
256 InterruptDescriptor::InterruptDescriptor(uintptr_t isr, unsigned ist,
257 	bool kernelOnly)
258 	:
259 	fBase0(isr),
260 	fSelector(KERNEL_CODE_SELECTOR),
261 	fIST(ist),
262 	fReserved0(0),
263 	fType(static_cast<unsigned>(InterruptDescriptorType::Interrupt)),
264 	fReserved1(0),
265 	fDPL(kernelOnly ? 0 : 3),
266 	fPresent(1),
267 	fBase1(isr >> 16),
268 	fBase2(isr >> 32),
269 	fReserved2(0)
270 {
271 	static_assert(sizeof(InterruptDescriptor) == sizeof(uint64_t) * 2,
272 		"Invalid InterruptDescriptor size.");
273 }
274 
275 
276 constexpr
277 InterruptDescriptor::InterruptDescriptor(uintptr_t isr)
278 	:
279 	InterruptDescriptor(isr, 0, true)
280 {
281 }
282 
283 
284 void
285 InterruptDescriptorTable::Load() const
286 {
287 	struct [[gnu::packed]] {
288 		uint16_t	fLimit;
289 		const void*	fAddress;
290 	} gdtDescriptor = {
291 		sizeof(fTable) - 1,
292 		static_cast<const void*>(fTable.fTable),
293 	};
294 
295 	asm volatile("lidt	%0" : : "m" (gdtDescriptor));
296 }
297 
298 
299 constexpr InterruptDescriptor
300 InterruptDescriptor::Generate(unsigned index)
301 {
302 	return index == 3
303 		? InterruptDescriptor(uintptr_t(isr_array + index), 0, false)
304 		: (index == 8
305 			? InterruptDescriptor(uintptr_t(isr_array + index), 1, true)
306 			: InterruptDescriptor(uintptr_t(isr_array + index)));
307 }
308 
309 
310 //	#pragma mark - Exception handlers
311 
312 
313 static void
314 x86_64_general_protection_fault(iframe* frame)
315 {
316 	if (debug_debugger_running()) {
317 		// Handle GPFs if there is a debugger fault handler installed, for
318 		// non-canonical address accesses.
319 		cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
320 		if (cpu->fault_handler != 0) {
321 			debug_set_page_fault_info(0, frame->ip, DEBUG_PAGE_FAULT_NO_INFO);
322 			frame->ip = cpu->fault_handler;
323 			frame->bp = cpu->fault_handler_stack_pointer;
324 			return;
325 		}
326 	}
327 
328 	x86_unexpected_exception(frame);
329 }
330 
331 
332 // #pragma mark -
333 
334 
335 void
336 x86_descriptors_preboot_init_percpu(kernel_args* args, int cpu)
337 {
338 	new(&sGDT) GlobalDescriptorTable;
339 	sGDT.Load();
340 
341 	memset(&gCPU[cpu].arch.tss, 0, sizeof(struct tss));
342 	gCPU[cpu].arch.tss.io_map_base = sizeof(struct tss);
343 
344 	// Set up the double fault IST entry (see x86_descriptors_init()).
345 	struct tss* tss = &gCPU[cpu].arch.tss;
346 	size_t stackSize;
347 	tss->ist1 = (addr_t)x86_get_double_fault_stack(cpu, &stackSize);
348 	tss->ist1 += stackSize;
349 
350 	// Set up the descriptor for this TSS.
351 	auto tssIndex = sGDT.SetTSS(cpu,
352 			TSSDescriptor(uintptr_t(&gCPU[cpu].arch.tss), sizeof(struct tss)));
353 	TSSDescriptor::LoadTSS(tssIndex);
354 
355 	new(&sIDT) InterruptDescriptorTable;
356 	sIDT.Load();
357 }
358 
359 
360 void
361 x86_descriptors_init(kernel_args* args)
362 {
363 	// Initialize the interrupt handler table.
364 	interrupt_handler_function** table = gInterruptHandlerTable;
365 	for (uint32 i = 0; i < ARCH_INTERRUPT_BASE; i++)
366 		table[i] = x86_invalid_exception;
367 	for (uint32 i = ARCH_INTERRUPT_BASE;
368 		i < InterruptDescriptorTable::kDescriptorCount; i++) {
369 		table[i] = x86_hardware_interrupt;
370 	}
371 
372 	table[0]  = x86_unexpected_exception;	// Divide Error Exception (#DE)
373 	table[1]  = x86_handle_debug_exception; // Debug Exception (#DB)
374 	table[2]  = x86_fatal_exception;		// NMI Interrupt
375 	table[3]  = x86_handle_breakpoint_exception; // Breakpoint Exception (#BP)
376 	table[4]  = x86_unexpected_exception;	// Overflow Exception (#OF)
377 	table[5]  = x86_unexpected_exception;	// BOUND Range Exceeded Exception (#BR)
378 	table[6]  = x86_unexpected_exception;	// Invalid Opcode Exception (#UD)
379 	table[7]  = x86_fatal_exception;		// Device Not Available Exception (#NM)
380 	table[8]  = x86_fatal_exception;		// Double Fault Exception (#DF)
381 	table[9]  = x86_fatal_exception;		// Coprocessor Segment Overrun
382 	table[10] = x86_fatal_exception;		// Invalid TSS Exception (#TS)
383 	table[11] = x86_fatal_exception;		// Segment Not Present (#NP)
384 	table[12] = x86_fatal_exception;		// Stack Fault Exception (#SS)
385 	table[13] = x86_64_general_protection_fault; // General Protection Exception (#GP)
386 	table[14] = x86_page_fault_exception;	// Page-Fault Exception (#PF)
387 	table[16] = x86_unexpected_exception;	// x87 FPU Floating-Point Error (#MF)
388 	table[17] = x86_unexpected_exception;	// Alignment Check Exception (#AC)
389 	table[18] = x86_fatal_exception;		// Machine-Check Exception (#MC)
390 	table[19] = x86_unexpected_exception;	// SIMD Floating-Point Exception (#XF)
391 }
392 
393