xref: /haiku/src/system/kernel/arch/x86/arch_cpu.cpp (revision ed24eb5ff12640d052171c6a7feba37fab8a75d1)
1 /*
2  * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
5  * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
9  * Distributed under the terms of the NewOS License.
10  */
11 
12 
13 #include <cpu.h>
14 
15 #include <string.h>
16 #include <stdlib.h>
17 #include <stdio.h>
18 
19 #include <algorithm>
20 
21 #include <ACPI.h>
22 
23 #include <boot_device.h>
24 #include <commpage.h>
25 #include <debug.h>
26 #include <elf.h>
27 #include <safemode.h>
28 #include <smp.h>
29 #include <util/BitUtils.h>
30 #include <vm/vm.h>
31 #include <vm/vm_types.h>
32 #include <vm/VMAddressSpace.h>
33 
34 #include <arch_system_info.h>
35 #include <arch/x86/apic.h>
36 #include <boot/kernel_args.h>
37 
38 #include "paging/X86PagingStructures.h"
39 #include "paging/X86VMTranslationMap.h"
40 
41 
42 #define DUMP_FEATURE_STRING	1
43 #define DUMP_CPU_TOPOLOGY	1
44 #define DUMP_CPU_PATCHLEVEL_TYPE	1
45 
46 
47 /* cpu vendor info */
48 struct cpu_vendor_info {
49 	const char *vendor;
50 	const char *ident_string[2];
51 };
52 
53 static const struct cpu_vendor_info vendor_info[VENDOR_NUM] = {
54 	{ "Intel", { "GenuineIntel" } },
55 	{ "AMD", { "AuthenticAMD" } },
56 	{ "Cyrix", { "CyrixInstead" } },
57 	{ "UMC", { "UMC UMC UMC" } },
58 	{ "NexGen", { "NexGenDriven" } },
59 	{ "Centaur", { "CentaurHauls" } },
60 	{ "Rise", { "RiseRiseRise" } },
61 	{ "Transmeta", { "GenuineTMx86", "TransmetaCPU" } },
62 	{ "NSC", { "Geode by NSC" } },
63 	{ "Hygon", { "HygonGenuine" } },
64 };
65 
66 #define K8_SMIONCMPHALT			(1ULL << 27)
67 #define K8_C1EONCMPHALT			(1ULL << 28)
68 
69 #define K8_CMPHALT				(K8_SMIONCMPHALT | K8_C1EONCMPHALT)
70 
71 struct set_mtrr_parameter {
72 	int32	index;
73 	uint64	base;
74 	uint64	length;
75 	uint8	type;
76 };
77 
78 struct set_mtrrs_parameter {
79 	const x86_mtrr_info*	infos;
80 	uint32					count;
81 	uint8					defaultType;
82 };
83 
84 
85 #ifdef __x86_64__
86 extern addr_t _stac;
87 extern addr_t _clac;
88 extern addr_t _xsave;
89 extern addr_t _xsavec;
90 extern addr_t _xrstor;
91 uint64 gXsaveMask;
92 uint64 gFPUSaveLength = 512;
93 bool gHasXsave = false;
94 bool gHasXsavec = false;
95 #endif
96 
97 extern "C" void x86_reboot(void);
98 	// from arch.S
99 
100 void (*gCpuIdleFunc)(void);
101 #ifndef __x86_64__
102 void (*gX86SwapFPUFunc)(void* oldState, const void* newState) = x86_noop_swap;
103 bool gHasSSE = false;
104 #endif
105 
106 static uint32 sCpuRendezvous;
107 static uint32 sCpuRendezvous2;
108 static uint32 sCpuRendezvous3;
109 static vint32 sTSCSyncRendezvous;
110 
111 /* Some specials for the double fault handler */
112 static uint8* sDoubleFaultStacks;
113 static const size_t kDoubleFaultStackSize = 4096;	// size per CPU
114 
115 static x86_cpu_module_info* sCpuModule;
116 
117 
118 /* CPU topology information */
119 static uint32 (*sGetCPUTopologyID)(int currentCPU);
120 static uint32 sHierarchyMask[CPU_TOPOLOGY_LEVELS];
121 static uint32 sHierarchyShift[CPU_TOPOLOGY_LEVELS];
122 
123 /* Cache topology information */
124 static uint32 sCacheSharingMask[CPU_MAX_CACHE_LEVEL];
125 
126 static void* sUcodeData = NULL;
127 static size_t sUcodeDataSize = 0;
128 static void* sLoadedUcodeUpdate;
129 static spinlock sUcodeUpdateLock = B_SPINLOCK_INITIALIZER;
130 
131 
132 static status_t
133 acpi_shutdown(bool rebootSystem)
134 {
135 	if (debug_debugger_running() || !are_interrupts_enabled())
136 		return B_ERROR;
137 
138 	acpi_module_info* acpi;
139 	if (get_module(B_ACPI_MODULE_NAME, (module_info**)&acpi) != B_OK)
140 		return B_NOT_SUPPORTED;
141 
142 	status_t status;
143 	if (rebootSystem) {
144 		status = acpi->reboot();
145 	} else {
146 		status = acpi->prepare_sleep_state(ACPI_POWER_STATE_OFF, NULL, 0);
147 		if (status == B_OK) {
148 			//cpu_status state = disable_interrupts();
149 			status = acpi->enter_sleep_state(ACPI_POWER_STATE_OFF);
150 			//restore_interrupts(state);
151 		}
152 	}
153 
154 	put_module(B_ACPI_MODULE_NAME);
155 	return status;
156 }
157 
158 
159 /*!	Disable CPU caches, and invalidate them. */
160 static void
161 disable_caches()
162 {
163 	x86_write_cr0((x86_read_cr0() | CR0_CACHE_DISABLE)
164 		& ~CR0_NOT_WRITE_THROUGH);
165 	wbinvd();
166 	arch_cpu_global_TLB_invalidate();
167 }
168 
169 
170 /*!	Invalidate CPU caches, and enable them. */
171 static void
172 enable_caches()
173 {
174 	wbinvd();
175 	arch_cpu_global_TLB_invalidate();
176 	x86_write_cr0(x86_read_cr0()
177 		& ~(CR0_CACHE_DISABLE | CR0_NOT_WRITE_THROUGH));
178 }
179 
180 
181 static void
182 set_mtrr(void* _parameter, int cpu)
183 {
184 	struct set_mtrr_parameter* parameter
185 		= (struct set_mtrr_parameter*)_parameter;
186 
187 	// wait until all CPUs have arrived here
188 	smp_cpu_rendezvous(&sCpuRendezvous);
189 
190 	// One CPU has to reset sCpuRendezvous3 -- it is needed to prevent the CPU
191 	// that initiated the call_all_cpus() from doing that again and clearing
192 	// sCpuRendezvous2 before the last CPU has actually left the loop in
193 	// smp_cpu_rendezvous();
194 	if (cpu == 0)
195 		atomic_set((int32*)&sCpuRendezvous3, 0);
196 
197 	disable_caches();
198 
199 	sCpuModule->set_mtrr(parameter->index, parameter->base, parameter->length,
200 		parameter->type);
201 
202 	enable_caches();
203 
204 	// wait until all CPUs have arrived here
205 	smp_cpu_rendezvous(&sCpuRendezvous2);
206 	smp_cpu_rendezvous(&sCpuRendezvous3);
207 }
208 
209 
210 static void
211 set_mtrrs(void* _parameter, int cpu)
212 {
213 	set_mtrrs_parameter* parameter = (set_mtrrs_parameter*)_parameter;
214 
215 	// wait until all CPUs have arrived here
216 	smp_cpu_rendezvous(&sCpuRendezvous);
217 
218 	// One CPU has to reset sCpuRendezvous3 -- it is needed to prevent the CPU
219 	// that initiated the call_all_cpus() from doing that again and clearing
220 	// sCpuRendezvous2 before the last CPU has actually left the loop in
221 	// smp_cpu_rendezvous();
222 	if (cpu == 0)
223 		atomic_set((int32*)&sCpuRendezvous3, 0);
224 
225 	disable_caches();
226 
227 	sCpuModule->set_mtrrs(parameter->defaultType, parameter->infos,
228 		parameter->count);
229 
230 	enable_caches();
231 
232 	// wait until all CPUs have arrived here
233 	smp_cpu_rendezvous(&sCpuRendezvous2);
234 	smp_cpu_rendezvous(&sCpuRendezvous3);
235 }
236 
237 
238 static void
239 init_mtrrs(void* _unused, int cpu)
240 {
241 	// wait until all CPUs have arrived here
242 	smp_cpu_rendezvous(&sCpuRendezvous);
243 
244 	// One CPU has to reset sCpuRendezvous3 -- it is needed to prevent the CPU
245 	// that initiated the call_all_cpus() from doing that again and clearing
246 	// sCpuRendezvous2 before the last CPU has actually left the loop in
247 	// smp_cpu_rendezvous();
248 	if (cpu == 0)
249 		atomic_set((int32*)&sCpuRendezvous3, 0);
250 
251 	disable_caches();
252 
253 	sCpuModule->init_mtrrs();
254 
255 	enable_caches();
256 
257 	// wait until all CPUs have arrived here
258 	smp_cpu_rendezvous(&sCpuRendezvous2);
259 	smp_cpu_rendezvous(&sCpuRendezvous3);
260 }
261 
262 
263 uint32
264 x86_count_mtrrs(void)
265 {
266 	if (sCpuModule == NULL)
267 		return 0;
268 
269 	return sCpuModule->count_mtrrs();
270 }
271 
272 
273 void
274 x86_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
275 {
276 	struct set_mtrr_parameter parameter;
277 	parameter.index = index;
278 	parameter.base = base;
279 	parameter.length = length;
280 	parameter.type = type;
281 
282 	sCpuRendezvous = sCpuRendezvous2 = 0;
283 	call_all_cpus(&set_mtrr, &parameter);
284 }
285 
286 
287 status_t
288 x86_get_mtrr(uint32 index, uint64* _base, uint64* _length, uint8* _type)
289 {
290 	// the MTRRs are identical on all CPUs, so it doesn't matter
291 	// on which CPU this runs
292 	return sCpuModule->get_mtrr(index, _base, _length, _type);
293 }
294 
295 
296 void
297 x86_set_mtrrs(uint8 defaultType, const x86_mtrr_info* infos, uint32 count)
298 {
299 	if (sCpuModule == NULL)
300 		return;
301 
302 	struct set_mtrrs_parameter parameter;
303 	parameter.defaultType = defaultType;
304 	parameter.infos = infos;
305 	parameter.count = count;
306 
307 	sCpuRendezvous = sCpuRendezvous2 = 0;
308 	call_all_cpus(&set_mtrrs, &parameter);
309 }
310 
311 
312 void
313 x86_init_fpu(void)
314 {
315 	// All x86_64 CPUs support SSE, don't need to bother checking for it.
316 #ifndef __x86_64__
317 	if (!x86_check_feature(IA32_FEATURE_FPU, FEATURE_COMMON)) {
318 		// No FPU... time to install one in your 386?
319 		dprintf("%s: Warning: CPU has no reported FPU.\n", __func__);
320 		gX86SwapFPUFunc = x86_noop_swap;
321 		return;
322 	}
323 
324 	if (!x86_check_feature(IA32_FEATURE_SSE, FEATURE_COMMON)
325 		|| !x86_check_feature(IA32_FEATURE_FXSR, FEATURE_COMMON)) {
326 		dprintf("%s: CPU has no SSE... just enabling FPU.\n", __func__);
327 		// we don't have proper SSE support, just enable FPU
328 		x86_write_cr0(x86_read_cr0() & ~(CR0_FPU_EMULATION | CR0_MONITOR_FPU));
329 		gX86SwapFPUFunc = x86_fnsave_swap;
330 		return;
331 	}
332 #endif
333 
334 	dprintf("%s: CPU has SSE... enabling FXSR and XMM.\n", __func__);
335 #ifndef __x86_64__
336 	// enable OS support for SSE
337 	x86_write_cr4(x86_read_cr4() | CR4_OS_FXSR | CR4_OS_XMM_EXCEPTION);
338 	x86_write_cr0(x86_read_cr0() & ~(CR0_FPU_EMULATION | CR0_MONITOR_FPU));
339 
340 	gX86SwapFPUFunc = x86_fxsave_swap;
341 	gHasSSE = true;
342 #endif
343 }
344 
345 
346 #if DUMP_FEATURE_STRING
347 static void
348 dump_feature_string(int currentCPU, cpu_ent* cpu)
349 {
350 	char features[768];
351 	features[0] = 0;
352 
353 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_FPU)
354 		strlcat(features, "fpu ", sizeof(features));
355 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_VME)
356 		strlcat(features, "vme ", sizeof(features));
357 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_DE)
358 		strlcat(features, "de ", sizeof(features));
359 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PSE)
360 		strlcat(features, "pse ", sizeof(features));
361 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_TSC)
362 		strlcat(features, "tsc ", sizeof(features));
363 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MSR)
364 		strlcat(features, "msr ", sizeof(features));
365 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PAE)
366 		strlcat(features, "pae ", sizeof(features));
367 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MCE)
368 		strlcat(features, "mce ", sizeof(features));
369 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_CX8)
370 		strlcat(features, "cx8 ", sizeof(features));
371 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_APIC)
372 		strlcat(features, "apic ", sizeof(features));
373 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_SEP)
374 		strlcat(features, "sep ", sizeof(features));
375 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MTRR)
376 		strlcat(features, "mtrr ", sizeof(features));
377 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PGE)
378 		strlcat(features, "pge ", sizeof(features));
379 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MCA)
380 		strlcat(features, "mca ", sizeof(features));
381 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_CMOV)
382 		strlcat(features, "cmov ", sizeof(features));
383 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PAT)
384 		strlcat(features, "pat ", sizeof(features));
385 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PSE36)
386 		strlcat(features, "pse36 ", sizeof(features));
387 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PSN)
388 		strlcat(features, "psn ", sizeof(features));
389 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_CLFSH)
390 		strlcat(features, "clfsh ", sizeof(features));
391 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_DS)
392 		strlcat(features, "ds ", sizeof(features));
393 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_ACPI)
394 		strlcat(features, "acpi ", sizeof(features));
395 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MMX)
396 		strlcat(features, "mmx ", sizeof(features));
397 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_FXSR)
398 		strlcat(features, "fxsr ", sizeof(features));
399 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_SSE)
400 		strlcat(features, "sse ", sizeof(features));
401 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_SSE2)
402 		strlcat(features, "sse2 ", sizeof(features));
403 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_SS)
404 		strlcat(features, "ss ", sizeof(features));
405 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_HTT)
406 		strlcat(features, "htt ", sizeof(features));
407 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_TM)
408 		strlcat(features, "tm ", sizeof(features));
409 	if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PBE)
410 		strlcat(features, "pbe ", sizeof(features));
411 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SSE3)
412 		strlcat(features, "sse3 ", sizeof(features));
413 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_PCLMULQDQ)
414 		strlcat(features, "pclmulqdq ", sizeof(features));
415 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_DTES64)
416 		strlcat(features, "dtes64 ", sizeof(features));
417 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_MONITOR)
418 		strlcat(features, "monitor ", sizeof(features));
419 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_DSCPL)
420 		strlcat(features, "dscpl ", sizeof(features));
421 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_VMX)
422 		strlcat(features, "vmx ", sizeof(features));
423 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SMX)
424 		strlcat(features, "smx ", sizeof(features));
425 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_EST)
426 		strlcat(features, "est ", sizeof(features));
427 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_TM2)
428 		strlcat(features, "tm2 ", sizeof(features));
429 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SSSE3)
430 		strlcat(features, "ssse3 ", sizeof(features));
431 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_CNXTID)
432 		strlcat(features, "cnxtid ", sizeof(features));
433 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_FMA)
434 		strlcat(features, "fma ", sizeof(features));
435 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_CX16)
436 		strlcat(features, "cx16 ", sizeof(features));
437 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_XTPR)
438 		strlcat(features, "xtpr ", sizeof(features));
439 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_PDCM)
440 		strlcat(features, "pdcm ", sizeof(features));
441 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_PCID)
442 		strlcat(features, "pcid ", sizeof(features));
443 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_DCA)
444 		strlcat(features, "dca ", sizeof(features));
445 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SSE4_1)
446 		strlcat(features, "sse4_1 ", sizeof(features));
447 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SSE4_2)
448 		strlcat(features, "sse4_2 ", sizeof(features));
449 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_X2APIC)
450 		strlcat(features, "x2apic ", sizeof(features));
451 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_MOVBE)
452 		strlcat(features, "movbe ", sizeof(features));
453 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_POPCNT)
454 		strlcat(features, "popcnt ", sizeof(features));
455 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_TSCDEADLINE)
456 		strlcat(features, "tscdeadline ", sizeof(features));
457 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_AES)
458 		strlcat(features, "aes ", sizeof(features));
459 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_XSAVE)
460 		strlcat(features, "xsave ", sizeof(features));
461 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_OSXSAVE)
462 		strlcat(features, "osxsave ", sizeof(features));
463 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_AVX)
464 		strlcat(features, "avx ", sizeof(features));
465 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_F16C)
466 		strlcat(features, "f16c ", sizeof(features));
467 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_RDRND)
468 		strlcat(features, "rdrnd ", sizeof(features));
469 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_HYPERVISOR)
470 		strlcat(features, "hypervisor ", sizeof(features));
471 	if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_SYSCALL)
472 		strlcat(features, "syscall ", sizeof(features));
473 	if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_NX)
474 		strlcat(features, "nx ", sizeof(features));
475 	if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_MMXEXT)
476 		strlcat(features, "mmxext ", sizeof(features));
477 	if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_FFXSR)
478 		strlcat(features, "ffxsr ", sizeof(features));
479 	if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_PDPE1GB)
480 		strlcat(features, "pdpe1gb ", sizeof(features));
481 	if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_LONG)
482 		strlcat(features, "long ", sizeof(features));
483 	if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_3DNOWEXT)
484 		strlcat(features, "3dnowext ", sizeof(features));
485 	if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_3DNOW)
486 		strlcat(features, "3dnow ", sizeof(features));
487 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_DTS)
488 		strlcat(features, "dts ", sizeof(features));
489 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_ITB)
490 		strlcat(features, "itb ", sizeof(features));
491 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_ARAT)
492 		strlcat(features, "arat ", sizeof(features));
493 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_PLN)
494 		strlcat(features, "pln ", sizeof(features));
495 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_ECMD)
496 		strlcat(features, "ecmd ", sizeof(features));
497 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_PTM)
498 		strlcat(features, "ptm ", sizeof(features));
499 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP)
500 		strlcat(features, "hwp ", sizeof(features));
501 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_NOTIFY)
502 		strlcat(features, "hwp_notify ", sizeof(features));
503 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_ACTWIN)
504 		strlcat(features, "hwp_actwin ", sizeof(features));
505 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_EPP)
506 		strlcat(features, "hwp_epp ", sizeof(features));
507 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_PLR)
508 		strlcat(features, "hwp_plr ", sizeof(features));
509 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HDC)
510 		strlcat(features, "hdc ", sizeof(features));
511 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_TBMT3)
512 		strlcat(features, "tbmt3 ", sizeof(features));
513 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_CAP)
514 		strlcat(features, "hwp_cap ", sizeof(features));
515 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_PECI)
516 		strlcat(features, "hwp_peci ", sizeof(features));
517 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_FLEX)
518 		strlcat(features, "hwp_flex ", sizeof(features));
519 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_FAST)
520 		strlcat(features, "hwp_fast ", sizeof(features));
521 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HW_FEEDBACK)
522 		strlcat(features, "hw_feedback ", sizeof(features));
523 	if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_HWP_IGNIDL)
524 		strlcat(features, "hwp_ignidl ", sizeof(features));
525 	if (cpu->arch.feature[FEATURE_6_ECX] & IA32_FEATURE_APERFMPERF)
526 		strlcat(features, "aperfmperf ", sizeof(features));
527 	if (cpu->arch.feature[FEATURE_6_ECX] & IA32_FEATURE_EPB)
528 		strlcat(features, "epb ", sizeof(features));
529 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_TSC_ADJUST)
530 		strlcat(features, "tsc_adjust ", sizeof(features));
531 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_SGX)
532 		strlcat(features, "sgx ", sizeof(features));
533 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_BMI1)
534 		strlcat(features, "bmi1 ", sizeof(features));
535 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_HLE)
536 		strlcat(features, "hle ", sizeof(features));
537 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX2)
538 		strlcat(features, "avx2 ", sizeof(features));
539 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_SMEP)
540 		strlcat(features, "smep ", sizeof(features));
541 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_BMI2)
542 		strlcat(features, "bmi2 ", sizeof(features));
543 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_ERMS)
544 		strlcat(features, "erms ", sizeof(features));
545 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_INVPCID)
546 		strlcat(features, "invpcid ", sizeof(features));
547 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_RTM)
548 		strlcat(features, "rtm ", sizeof(features));
549 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_CQM)
550 		strlcat(features, "cqm ", sizeof(features));
551 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_MPX)
552 		strlcat(features, "mpx ", sizeof(features));
553 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_RDT_A)
554 		strlcat(features, "rdt_a ", sizeof(features));
555 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX512F)
556 		strlcat(features, "avx512f ", sizeof(features));
557 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX512DQ)
558 		strlcat(features, "avx512dq ", sizeof(features));
559 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_RDSEED)
560 		strlcat(features, "rdseed ", sizeof(features));
561 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_ADX)
562 		strlcat(features, "adx ", sizeof(features));
563 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_SMAP)
564 		strlcat(features, "smap ", sizeof(features));
565 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX512IFMA)
566 		strlcat(features, "avx512ifma ", sizeof(features));
567 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_PCOMMIT)
568 		strlcat(features, "pcommit ", sizeof(features));
569 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_CLFLUSHOPT)
570 		strlcat(features, "cflushopt ", sizeof(features));
571 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_CLWB)
572 		strlcat(features, "clwb ", sizeof(features));
573 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_INTEL_PT)
574 		strlcat(features, "intel_pt ", sizeof(features));
575 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX512PF)
576 		strlcat(features, "avx512pf ", sizeof(features));
577 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX512ER)
578 		strlcat(features, "avx512er ", sizeof(features));
579 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX512CD)
580 		strlcat(features, "avx512cd ", sizeof(features));
581 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_SHA_NI)
582 		strlcat(features, "sha_ni ", sizeof(features));
583 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX512BW)
584 		strlcat(features, "avx512bw ", sizeof(features));
585 	if (cpu->arch.feature[FEATURE_7_EBX] & IA32_FEATURE_AVX512VI)
586 		strlcat(features, "avx512vi ", sizeof(features));
587 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_AVX512VMBI)
588 		strlcat(features, "avx512vmbi ", sizeof(features));
589 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_UMIP)
590 		strlcat(features, "umip ", sizeof(features));
591 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_PKU)
592 		strlcat(features, "pku ", sizeof(features));
593 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_OSPKE)
594 		strlcat(features, "ospke ", sizeof(features));
595 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_AVX512VMBI2)
596 		strlcat(features, "avx512vmbi2 ", sizeof(features));
597 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_GFNI)
598 		strlcat(features, "gfni ", sizeof(features));
599 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_VAES)
600 		strlcat(features, "vaes ", sizeof(features));
601 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_VPCLMULQDQ)
602 		strlcat(features, "vpclmulqdq ", sizeof(features));
603 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_AVX512_VNNI)
604 		strlcat(features, "avx512vnni ", sizeof(features));
605 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_AVX512_BITALG)
606 		strlcat(features, "avx512bitalg ", sizeof(features));
607 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_AVX512_VPOPCNTDQ)
608 		strlcat(features, "avx512vpopcntdq ", sizeof(features));
609 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_LA57)
610 		strlcat(features, "la57 ", sizeof(features));
611 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_RDPID)
612 		strlcat(features, "rdpid ", sizeof(features));
613 	if (cpu->arch.feature[FEATURE_7_ECX] & IA32_FEATURE_SGX_LC)
614 		strlcat(features, "sgx_lc ", sizeof(features));
615 	if (cpu->arch.feature[FEATURE_7_EDX] & IA32_FEATURE_HYBRID_CPU)
616 		strlcat(features, "hybrid ", sizeof(features));
617 	if (cpu->arch.feature[FEATURE_7_EDX] & IA32_FEATURE_IBRS)
618 		strlcat(features, "ibrs ", sizeof(features));
619 	if (cpu->arch.feature[FEATURE_7_EDX] & IA32_FEATURE_STIBP)
620 		strlcat(features, "stibp ", sizeof(features));
621 	if (cpu->arch.feature[FEATURE_7_EDX] & IA32_FEATURE_L1D_FLUSH)
622 		strlcat(features, "l1d_flush ", sizeof(features));
623 	if (cpu->arch.feature[FEATURE_7_EDX] & IA32_FEATURE_ARCH_CAPABILITIES)
624 		strlcat(features, "msr_arch ", sizeof(features));
625 	if (cpu->arch.feature[FEATURE_7_EDX] & IA32_FEATURE_SSBD)
626 		strlcat(features, "ssbd ", sizeof(features));
627 	if (cpu->arch.feature[FEATURE_D_1_EAX] & IA32_FEATURE_XSAVEOPT)
628 		strlcat(features, "xsaveopt ", sizeof(features));
629 	if (cpu->arch.feature[FEATURE_D_1_EAX] & IA32_FEATURE_XSAVEC)
630 		strlcat(features, "xsavec ", sizeof(features));
631 	if (cpu->arch.feature[FEATURE_D_1_EAX] & IA32_FEATURE_XGETBV1)
632 		strlcat(features, "xgetbv1 ", sizeof(features));
633 	if (cpu->arch.feature[FEATURE_D_1_EAX] & IA32_FEATURE_XSAVES)
634 		strlcat(features, "xsaves ", sizeof(features));
635 	if (cpu->arch.feature[FEATURE_EXT_8_EBX] & IA32_FEATURE_CLZERO)
636 		strlcat(features, "clzero ", sizeof(features));
637 	if (cpu->arch.feature[FEATURE_EXT_8_EBX] & IA32_FEATURE_IBPB)
638 		strlcat(features, "ibpb ", sizeof(features));
639 	if (cpu->arch.feature[FEATURE_EXT_8_EBX] & IA32_FEATURE_AMD_SSBD)
640 		strlcat(features, "amd_ssbd ", sizeof(features));
641 	if (cpu->arch.feature[FEATURE_EXT_8_EBX] & IA32_FEATURE_VIRT_SSBD)
642 		strlcat(features, "virt_ssbd ", sizeof(features));
643 	if (cpu->arch.feature[FEATURE_EXT_8_EBX] & IA32_FEATURE_AMD_SSB_NO)
644 		strlcat(features, "amd_ssb_no ", sizeof(features));
645 	if (cpu->arch.feature[FEATURE_EXT_8_EBX] & IA32_FEATURE_CPPC)
646 		strlcat(features, "cppc ", sizeof(features));
647 	dprintf("CPU %d: features: %s\n", currentCPU, features);
648 }
649 #endif	// DUMP_FEATURE_STRING
650 
651 
652 static void
653 compute_cpu_hierarchy_masks(int maxLogicalID, int maxCoreID)
654 {
655 	ASSERT(maxLogicalID >= maxCoreID);
656 	const int kMaxSMTID = maxLogicalID / maxCoreID;
657 
658 	sHierarchyMask[CPU_TOPOLOGY_SMT] = kMaxSMTID - 1;
659 	sHierarchyShift[CPU_TOPOLOGY_SMT] = 0;
660 
661 	sHierarchyMask[CPU_TOPOLOGY_CORE] = (maxCoreID - 1) * kMaxSMTID;
662 	sHierarchyShift[CPU_TOPOLOGY_CORE]
663 		= count_set_bits(sHierarchyMask[CPU_TOPOLOGY_SMT]);
664 
665 	const uint32 kSinglePackageMask = sHierarchyMask[CPU_TOPOLOGY_SMT]
666 		| sHierarchyMask[CPU_TOPOLOGY_CORE];
667 	sHierarchyMask[CPU_TOPOLOGY_PACKAGE] = ~kSinglePackageMask;
668 	sHierarchyShift[CPU_TOPOLOGY_PACKAGE] = count_set_bits(kSinglePackageMask);
669 }
670 
671 
672 static uint32
673 get_cpu_legacy_initial_apic_id(int /* currentCPU */)
674 {
675 	cpuid_info cpuid;
676 	get_current_cpuid(&cpuid, 1, 0);
677 	return cpuid.regs.ebx >> 24;
678 }
679 
680 
681 static inline status_t
682 detect_amd_cpu_topology(uint32 maxBasicLeaf, uint32 maxExtendedLeaf)
683 {
684 	sGetCPUTopologyID = get_cpu_legacy_initial_apic_id;
685 
686 	cpuid_info cpuid;
687 	get_current_cpuid(&cpuid, 1, 0);
688 	int maxLogicalID = next_power_of_2((cpuid.regs.ebx >> 16) & 0xff);
689 
690 	int maxCoreID = 1;
691 	if (maxExtendedLeaf >= 0x80000008) {
692 		get_current_cpuid(&cpuid, 0x80000008, 0);
693 		maxCoreID = (cpuid.regs.ecx >> 12) & 0xf;
694 		if (maxCoreID != 0)
695 			maxCoreID = 1 << maxCoreID;
696 		else
697 			maxCoreID = next_power_of_2((cpuid.regs.edx & 0xf) + 1);
698 	}
699 
700 	if (maxExtendedLeaf >= 0x80000001) {
701 		get_current_cpuid(&cpuid, 0x80000001, 0);
702 		if (x86_check_feature(IA32_FEATURE_AMD_EXT_CMPLEGACY,
703 				FEATURE_EXT_AMD_ECX))
704 			maxCoreID = maxLogicalID;
705 	}
706 
707 	compute_cpu_hierarchy_masks(maxLogicalID, maxCoreID);
708 
709 	return B_OK;
710 }
711 
712 
713 static void
714 detect_amd_cache_topology(uint32 maxExtendedLeaf)
715 {
716 	if (!x86_check_feature(IA32_FEATURE_AMD_EXT_TOPOLOGY, FEATURE_EXT_AMD_ECX))
717 		return;
718 
719 	if (maxExtendedLeaf < 0x8000001d)
720 		return;
721 
722 	uint8 hierarchyLevels[CPU_MAX_CACHE_LEVEL];
723 	int maxCacheLevel = 0;
724 
725 	int currentLevel = 0;
726 	int cacheType;
727 	do {
728 		cpuid_info cpuid;
729 		get_current_cpuid(&cpuid, 0x8000001d, currentLevel);
730 
731 		cacheType = cpuid.regs.eax & 0x1f;
732 		if (cacheType == 0)
733 			break;
734 
735 		int cacheLevel = (cpuid.regs.eax >> 5) & 0x7;
736 		int coresCount = next_power_of_2(((cpuid.regs.eax >> 14) & 0x3f) + 1);
737 		hierarchyLevels[cacheLevel - 1]
738 			= coresCount * (sHierarchyMask[CPU_TOPOLOGY_SMT] + 1);
739 		maxCacheLevel = std::max(maxCacheLevel, cacheLevel);
740 
741 		currentLevel++;
742 	} while (true);
743 
744 	for (int i = 0; i < maxCacheLevel; i++)
745 		sCacheSharingMask[i] = ~uint32(hierarchyLevels[i] - 1);
746 	gCPUCacheLevelCount = maxCacheLevel;
747 }
748 
749 
750 static uint32
751 get_intel_cpu_initial_x2apic_id(int /* currentCPU */)
752 {
753 	cpuid_info cpuid;
754 	get_current_cpuid(&cpuid, 11, 0);
755 	return cpuid.regs.edx;
756 }
757 
758 
759 static inline status_t
760 detect_intel_cpu_topology_x2apic(uint32 maxBasicLeaf)
761 {
762 
763 	uint32 leaf = 0;
764 	cpuid_info cpuid;
765 	if (maxBasicLeaf >= 0x1f) {
766 		get_current_cpuid(&cpuid, 0x1f, 0);
767 		if (cpuid.regs.ebx != 0)
768 			leaf = 0x1f;
769 	}
770 	if (maxBasicLeaf >= 0xb && leaf == 0) {
771 		get_current_cpuid(&cpuid, 0xb, 0);
772 		if (cpuid.regs.ebx != 0)
773 			leaf = 0xb;
774 	}
775 	if (leaf == 0)
776 		return B_UNSUPPORTED;
777 
778 	uint8 hierarchyLevels[CPU_TOPOLOGY_LEVELS] = { 0 };
779 
780 	int currentLevel = 0;
781 	unsigned int levelsSet = 0;
782 	do {
783 		cpuid_info cpuid;
784 		get_current_cpuid(&cpuid, leaf, currentLevel++);
785 		int levelType = (cpuid.regs.ecx >> 8) & 0xff;
786 		int levelValue = cpuid.regs.eax & 0x1f;
787 
788 		if (levelType == 0)
789 			break;
790 
791 		switch (levelType) {
792 			case 1:	// SMT
793 				hierarchyLevels[CPU_TOPOLOGY_SMT] = levelValue;
794 				levelsSet |= 1;
795 				break;
796 			case 2:	// core
797 				hierarchyLevels[CPU_TOPOLOGY_CORE] = levelValue;
798 				levelsSet |= 2;
799 				break;
800 		}
801 
802 	} while (levelsSet != 3);
803 
804 	sGetCPUTopologyID = get_intel_cpu_initial_x2apic_id;
805 
806 	for (int i = 1; i < CPU_TOPOLOGY_LEVELS; i++) {
807 		if ((levelsSet & (1u << i)) != 0)
808 			continue;
809 		hierarchyLevels[i] = hierarchyLevels[i - 1];
810 	}
811 
812 	for (int i = 0; i < CPU_TOPOLOGY_LEVELS; i++) {
813 		uint32 mask = ~uint32(0);
814 		if (i < CPU_TOPOLOGY_LEVELS - 1)
815 			mask = (1u << hierarchyLevels[i]) - 1;
816 		if (i > 0)
817 			mask &= ~sHierarchyMask[i - 1];
818 		sHierarchyMask[i] = mask;
819 		sHierarchyShift[i] = i > 0 ? hierarchyLevels[i - 1] : 0;
820 	}
821 
822 	return B_OK;
823 }
824 
825 
826 static inline status_t
827 detect_intel_cpu_topology_legacy(uint32 maxBasicLeaf)
828 {
829 	sGetCPUTopologyID = get_cpu_legacy_initial_apic_id;
830 
831 	cpuid_info cpuid;
832 
833 	get_current_cpuid(&cpuid, 1, 0);
834 	int maxLogicalID = next_power_of_2((cpuid.regs.ebx >> 16) & 0xff);
835 
836 	int maxCoreID = 1;
837 	if (maxBasicLeaf >= 4) {
838 		get_current_cpuid(&cpuid, 4, 0);
839 		maxCoreID = next_power_of_2((cpuid.regs.eax >> 26) + 1);
840 	}
841 
842 	compute_cpu_hierarchy_masks(maxLogicalID, maxCoreID);
843 
844 	return B_OK;
845 }
846 
847 
848 static void
849 detect_intel_cache_topology(uint32 maxBasicLeaf)
850 {
851 	if (maxBasicLeaf < 4)
852 		return;
853 
854 	uint8 hierarchyLevels[CPU_MAX_CACHE_LEVEL];
855 	int maxCacheLevel = 0;
856 
857 	int currentLevel = 0;
858 	int cacheType;
859 	do {
860 		cpuid_info cpuid;
861 		get_current_cpuid(&cpuid, 4, currentLevel);
862 
863 		cacheType = cpuid.regs.eax & 0x1f;
864 		if (cacheType == 0)
865 			break;
866 
867 		int cacheLevel = (cpuid.regs.eax >> 5) & 0x7;
868 		hierarchyLevels[cacheLevel - 1]
869 			= next_power_of_2(((cpuid.regs.eax >> 14) & 0x3f) + 1);
870 		maxCacheLevel = std::max(maxCacheLevel, cacheLevel);
871 
872 		currentLevel++;
873 	} while (true);
874 
875 	for (int i = 0; i < maxCacheLevel; i++)
876 		sCacheSharingMask[i] = ~uint32(hierarchyLevels[i] - 1);
877 
878 	gCPUCacheLevelCount = maxCacheLevel;
879 }
880 
881 
882 static uint32
883 get_simple_cpu_topology_id(int currentCPU)
884 {
885 	return currentCPU;
886 }
887 
888 
889 static inline int
890 get_topology_level_id(uint32 id, cpu_topology_level level)
891 {
892 	ASSERT(level < CPU_TOPOLOGY_LEVELS);
893 	return (id & sHierarchyMask[level]) >> sHierarchyShift[level];
894 }
895 
896 
897 static void
898 detect_cpu_topology(int currentCPU, cpu_ent* cpu, uint32 maxBasicLeaf,
899 	uint32 maxExtendedLeaf)
900 {
901 	if (currentCPU == 0) {
902 		memset(sCacheSharingMask, 0xff, sizeof(sCacheSharingMask));
903 
904 		status_t result = B_UNSUPPORTED;
905 		if (x86_check_feature(IA32_FEATURE_HTT, FEATURE_COMMON)) {
906 			if (cpu->arch.vendor == VENDOR_AMD
907 				|| cpu->arch.vendor == VENDOR_HYGON) {
908 				result = detect_amd_cpu_topology(maxBasicLeaf, maxExtendedLeaf);
909 
910 				if (result == B_OK)
911 					detect_amd_cache_topology(maxExtendedLeaf);
912 			}
913 
914 			if (cpu->arch.vendor == VENDOR_INTEL) {
915 				result = detect_intel_cpu_topology_x2apic(maxBasicLeaf);
916 				if (result != B_OK)
917 					result = detect_intel_cpu_topology_legacy(maxBasicLeaf);
918 
919 				if (result == B_OK)
920 					detect_intel_cache_topology(maxBasicLeaf);
921 			}
922 		}
923 
924 		if (result != B_OK) {
925 			dprintf("No CPU topology information available.\n");
926 
927 			sGetCPUTopologyID = get_simple_cpu_topology_id;
928 
929 			sHierarchyMask[CPU_TOPOLOGY_PACKAGE] = ~uint32(0);
930 		}
931 	}
932 
933 	ASSERT(sGetCPUTopologyID != NULL);
934 	int topologyID = sGetCPUTopologyID(currentCPU);
935 	cpu->topology_id[CPU_TOPOLOGY_SMT]
936 		= get_topology_level_id(topologyID, CPU_TOPOLOGY_SMT);
937 	cpu->topology_id[CPU_TOPOLOGY_CORE]
938 		= get_topology_level_id(topologyID, CPU_TOPOLOGY_CORE);
939 	cpu->topology_id[CPU_TOPOLOGY_PACKAGE]
940 		= get_topology_level_id(topologyID, CPU_TOPOLOGY_PACKAGE);
941 
942 	unsigned int i;
943 	for (i = 0; i < gCPUCacheLevelCount; i++)
944 		cpu->cache_id[i] = topologyID & sCacheSharingMask[i];
945 	for (; i < CPU_MAX_CACHE_LEVEL; i++)
946 		cpu->cache_id[i] = -1;
947 
948 #if DUMP_CPU_TOPOLOGY
949 	dprintf("CPU %d: apic id %d, package %d, core %d, smt %d\n", currentCPU,
950 		topologyID, cpu->topology_id[CPU_TOPOLOGY_PACKAGE],
951 		cpu->topology_id[CPU_TOPOLOGY_CORE],
952 		cpu->topology_id[CPU_TOPOLOGY_SMT]);
953 
954 	if (gCPUCacheLevelCount > 0) {
955 		char cacheLevels[256];
956 		unsigned int offset = 0;
957 		for (i = 0; i < gCPUCacheLevelCount; i++) {
958 			offset += snprintf(cacheLevels + offset,
959 					sizeof(cacheLevels) - offset,
960 					" L%d id %d%s", i + 1, cpu->cache_id[i],
961 					i < gCPUCacheLevelCount - 1 ? "," : "");
962 
963 			if (offset >= sizeof(cacheLevels))
964 				break;
965 		}
966 
967 		dprintf("CPU %d: cache sharing:%s\n", currentCPU, cacheLevels);
968 	}
969 #endif
970 }
971 
972 
973 static void
974 detect_intel_patch_level(cpu_ent* cpu)
975 {
976 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_HYPERVISOR) {
977 		cpu->arch.patch_level = 0;
978 		return;
979 	}
980 
981 	x86_write_msr(IA32_MSR_UCODE_REV, 0);
982 	cpuid_info cpuid;
983 	get_current_cpuid(&cpuid, 1, 0);
984 
985 	uint64 value = x86_read_msr(IA32_MSR_UCODE_REV);
986 	cpu->arch.patch_level = value >> 32;
987 }
988 
989 
990 static void
991 detect_amd_patch_level(cpu_ent* cpu)
992 {
993 	if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_HYPERVISOR) {
994 		cpu->arch.patch_level = 0;
995 		return;
996 	}
997 
998 	uint64 value = x86_read_msr(IA32_MSR_UCODE_REV);
999 	cpu->arch.patch_level = (uint32)value;
1000 }
1001 
1002 
1003 static struct intel_microcode_header*
1004 find_microcode_intel(addr_t data, size_t size, uint32 patchLevel)
1005 {
1006 	// 9.11.3 Processor Identification
1007 	cpuid_info cpuid;
1008 	get_current_cpuid(&cpuid, 1, 0);
1009 	uint32 signature = cpuid.regs.eax;
1010 	// 9.11.4 Platform Identification
1011 	uint64 platformBits = (x86_read_msr(IA32_MSR_PLATFORM_ID) >> 50) & 0x7;
1012 	uint64 mask = 1 << platformBits;
1013 
1014 	while (size > 0) {
1015 		if (size < sizeof(struct intel_microcode_header)) {
1016 			dprintf("find_microcode_intel update is too small for header\n");
1017 			break;
1018 		}
1019 		struct intel_microcode_header* header =
1020 			(struct intel_microcode_header*)data;
1021 
1022 		uint32 totalSize = header->total_size;
1023 		uint32 dataSize = header->data_size;
1024 		if (dataSize == 0) {
1025 			dataSize = 2000;
1026 			totalSize = sizeof(struct intel_microcode_header)
1027 				+ dataSize;
1028 		}
1029 		if (totalSize > size) {
1030 			dprintf("find_microcode_intel update is too small for data\n");
1031 			break;
1032 		}
1033 
1034 		uint32* dwords = (uint32*)data;
1035 		// prepare the next update
1036 		size -= totalSize;
1037 		data += totalSize;
1038 
1039 		if (header->loader_revision != 1) {
1040 			dprintf("find_microcode_intel incorrect loader version\n");
1041 			continue;
1042 		}
1043 		// 9.11.6 The microcode update data requires a 16-byte boundary
1044 		// alignment.
1045 		if (((addr_t)header % 16) != 0) {
1046 			dprintf("find_microcode_intel incorrect alignment\n");
1047 			continue;
1048 		}
1049 		uint32 sum = 0;
1050 		for (uint32 i = 0; i < totalSize / 4; i++) {
1051 			sum += dwords[i];
1052 		}
1053 		if (sum != 0) {
1054 			dprintf("find_microcode_intel incorrect checksum\n");
1055 			continue;
1056 		}
1057 		if (patchLevel > header->update_revision) {
1058 			dprintf("find_microcode_intel update_revision is lower\n");
1059 			continue;
1060 		}
1061 		if (signature == header->processor_signature
1062 			&& (mask & header->processor_flags) != 0) {
1063 			return header;
1064 		}
1065 		if (totalSize <= (sizeof(struct intel_microcode_header) + dataSize
1066 			+ sizeof(struct intel_microcode_extended_signature_header))) {
1067 			continue;
1068 		}
1069 		struct intel_microcode_extended_signature_header* extSigHeader =
1070 			(struct intel_microcode_extended_signature_header*)((addr_t)header
1071 				+ sizeof(struct intel_microcode_header) + dataSize);
1072 		struct intel_microcode_extended_signature* extended_signature =
1073 			(struct intel_microcode_extended_signature*)((addr_t)extSigHeader
1074 				+ sizeof(struct intel_microcode_extended_signature_header));
1075 		for (uint32 i = 0; i < extSigHeader->extended_signature_count; i++) {
1076 			if (signature == extended_signature[i].processor_signature
1077 				&& (mask & extended_signature[i].processor_flags) != 0)
1078 				return header;
1079 		}
1080 	}
1081 	return NULL;
1082 }
1083 
1084 
1085 static void
1086 load_microcode_intel(int currentCPU, cpu_ent* cpu)
1087 {
1088 	// serialize for HT cores
1089 	if (currentCPU != 0)
1090 		acquire_spinlock(&sUcodeUpdateLock);
1091 	detect_intel_patch_level(cpu);
1092 	uint32 revision = cpu->arch.patch_level;
1093 	struct intel_microcode_header* update = (struct intel_microcode_header*)sLoadedUcodeUpdate;
1094 	if (update == NULL) {
1095 		update = find_microcode_intel((addr_t)sUcodeData, sUcodeDataSize,
1096 			revision);
1097 	}
1098 	if (update != NULL) {
1099 		addr_t data = (addr_t)update + sizeof(struct intel_microcode_header);
1100 		wbinvd();
1101 		x86_write_msr(IA32_MSR_UCODE_WRITE, data);
1102 		detect_intel_patch_level(cpu);
1103 		if (revision == cpu->arch.patch_level) {
1104 			dprintf("CPU %d: update failed\n", currentCPU);
1105 		} else {
1106 			if (sLoadedUcodeUpdate == NULL)
1107 				sLoadedUcodeUpdate = update;
1108 			dprintf("CPU %d: updated from revision %" B_PRIu32 " to %" B_PRIu32
1109 				"\n", currentCPU, revision, cpu->arch.patch_level);
1110 		}
1111 	} else {
1112 		dprintf("CPU %d: no update found\n", currentCPU);
1113 	}
1114 	if (currentCPU != 0)
1115 		release_spinlock(&sUcodeUpdateLock);
1116 }
1117 
1118 
1119 static struct amd_microcode_header*
1120 find_microcode_amd(addr_t data, size_t size, uint32 patchLevel)
1121 {
1122 	// 9.11.3 Processor Identification
1123 	cpuid_info cpuid;
1124 	get_current_cpuid(&cpuid, 1, 0);
1125 	uint32 signature = cpuid.regs.eax;
1126 
1127 	if (size < sizeof(struct amd_container_header)) {
1128 		dprintf("find_microcode_amd update is too small for header\n");
1129 		return NULL;
1130 	}
1131 	struct amd_container_header* container = (struct amd_container_header*)data;
1132 	if (container->magic != 0x414d44) {
1133 		dprintf("find_microcode_amd update invalid magic\n");
1134 		return NULL;
1135 	}
1136 
1137 	size -= sizeof(*container);
1138 	data += sizeof(*container);
1139 
1140 	struct amd_section_header* section =
1141 		(struct amd_section_header*)data;
1142 	if (section->type != 0 || section->size == 0) {
1143 		dprintf("find_microcode_amd update first section invalid\n");
1144 		return NULL;
1145 	}
1146 
1147 	size -= sizeof(*section);
1148 	data += sizeof(*section);
1149 
1150 	amd_equiv_cpu_entry* table = (amd_equiv_cpu_entry*)data;
1151 	size -= section->size;
1152 	data += section->size;
1153 
1154 	uint16 equiv_id = 0;
1155 	for (uint32 i = 0; table[i].installed_cpu != 0; i++) {
1156 		if (signature == table[i].equiv_cpu) {
1157 			equiv_id = table[i].equiv_cpu;
1158 			dprintf("find_microcode_amd found equiv cpu: %x\n", equiv_id);
1159 			break;
1160 		}
1161 	}
1162 	if (equiv_id == 0) {
1163 		dprintf("find_microcode_amd update cpu not found in equiv table\n");
1164 		return NULL;
1165 	}
1166 
1167 	while (size > sizeof(amd_section_header)) {
1168 		struct amd_section_header* section = (struct amd_section_header*)data;
1169 		size -= sizeof(*section);
1170 		data += sizeof(*section);
1171 
1172 		if (section->type != 1 || section->size > size
1173 			|| section->size < sizeof(amd_microcode_header)) {
1174 			dprintf("find_microcode_amd update firmware section invalid\n");
1175 			return NULL;
1176 		}
1177 		struct amd_microcode_header* header = (struct amd_microcode_header*)data;
1178 		size -= section->size;
1179 		data += section->size;
1180 
1181 		if (header->processor_rev_id != equiv_id) {
1182 			dprintf("find_microcode_amd update found rev_id %x\n", header->processor_rev_id);
1183 			continue;
1184 		}
1185 		if (patchLevel >= header->patch_id) {
1186 			dprintf("find_microcode_intel update_revision is lower\n");
1187 			continue;
1188 		}
1189 		if (header->nb_dev_id != 0 || header->sb_dev_id != 0) {
1190 			dprintf("find_microcode_amd update chipset specific firmware\n");
1191 			continue;
1192 		}
1193 		if (((addr_t)header % 16) != 0) {
1194 			dprintf("find_microcode_amd incorrect alignment\n");
1195 			continue;
1196 		}
1197 
1198 		return header;
1199 	}
1200 	dprintf("find_microcode_amd no fw update found for this cpu\n");
1201 	return NULL;
1202 }
1203 
1204 
1205 static void
1206 load_microcode_amd(int currentCPU, cpu_ent* cpu)
1207 {
1208 	// serialize for HT cores
1209 	if (currentCPU != 0)
1210 		acquire_spinlock(&sUcodeUpdateLock);
1211 	detect_amd_patch_level(cpu);
1212 	uint32 revision = cpu->arch.patch_level;
1213 	struct amd_microcode_header* update = (struct amd_microcode_header*)sLoadedUcodeUpdate;
1214 	if (update == NULL) {
1215 		update = find_microcode_amd((addr_t)sUcodeData, sUcodeDataSize,
1216 			revision);
1217 	}
1218 	if (update != NULL) {
1219 		addr_t data = (addr_t)update;
1220 		wbinvd();
1221 
1222 		x86_write_msr(MSR_K8_UCODE_UPDATE, data);
1223 
1224 		detect_amd_patch_level(cpu);
1225 		if (revision == cpu->arch.patch_level) {
1226 			dprintf("CPU %d: update failed\n", currentCPU);
1227 		} else {
1228 			if (sLoadedUcodeUpdate == NULL)
1229 				sLoadedUcodeUpdate = update;
1230 			dprintf("CPU %d: updated from revision 0x%" B_PRIx32 " to 0x%" B_PRIx32
1231 				"\n", currentCPU, revision, cpu->arch.patch_level);
1232 		}
1233 
1234 	} else {
1235 		dprintf("CPU %d: no update found\n", currentCPU);
1236 	}
1237 
1238 	if (currentCPU != 0)
1239 		release_spinlock(&sUcodeUpdateLock);
1240 }
1241 
1242 
1243 static void
1244 load_microcode(int currentCPU)
1245 {
1246 	if (sUcodeData == NULL)
1247 		return;
1248 	cpu_ent* cpu = get_cpu_struct();
1249 	if ((cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_HYPERVISOR) != 0)
1250 		return;
1251 	if (cpu->arch.vendor == VENDOR_INTEL)
1252 		load_microcode_intel(currentCPU, cpu);
1253 	else if (cpu->arch.vendor == VENDOR_AMD)
1254 		load_microcode_amd(currentCPU, cpu);
1255 }
1256 
1257 
1258 static uint8
1259 get_hybrid_cpu_type()
1260 {
1261 	cpu_ent* cpu = get_cpu_struct();
1262 	if ((cpu->arch.feature[FEATURE_7_EDX] & IA32_FEATURE_HYBRID_CPU) == 0)
1263 		return 0;
1264 
1265 #define X86_HYBRID_CPU_TYPE_ID_SHIFT       24
1266 	cpuid_info cpuid;
1267 	get_current_cpuid(&cpuid, 0x1a, 0);
1268 	return cpuid.regs.eax >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
1269 }
1270 
1271 
1272 static const char*
1273 get_hybrid_cpu_type_string(uint8 type)
1274 {
1275 	switch (type) {
1276 		case 0x20:
1277 			return "Atom";
1278 		case 0x40:
1279 			return "Core";
1280 		default:
1281 			return "";
1282 	}
1283 }
1284 
1285 
1286 static void
1287 detect_cpu(int currentCPU, bool full = true)
1288 {
1289 	cpu_ent* cpu = get_cpu_struct();
1290 	char vendorString[17];
1291 	cpuid_info cpuid;
1292 
1293 	// clear out the cpu info data
1294 	cpu->arch.vendor = VENDOR_UNKNOWN;
1295 	cpu->arch.vendor_name = "UNKNOWN VENDOR";
1296 	cpu->arch.feature[FEATURE_COMMON] = 0;
1297 	cpu->arch.feature[FEATURE_EXT] = 0;
1298 	cpu->arch.feature[FEATURE_EXT_AMD] = 0;
1299 	cpu->arch.feature[FEATURE_7_EBX] = 0;
1300 	cpu->arch.feature[FEATURE_7_ECX] = 0;
1301 	cpu->arch.feature[FEATURE_7_EDX] = 0;
1302 	cpu->arch.feature[FEATURE_D_1_EAX] = 0;
1303 	cpu->arch.model_name[0] = 0;
1304 
1305 	// print some fun data
1306 	get_current_cpuid(&cpuid, 0, 0);
1307 	uint32 maxBasicLeaf = cpuid.eax_0.max_eax;
1308 
1309 	// build the vendor string
1310 	memset(vendorString, 0, sizeof(vendorString));
1311 	memcpy(vendorString, cpuid.eax_0.vendor_id, sizeof(cpuid.eax_0.vendor_id));
1312 
1313 	// get the family, model, stepping
1314 	get_current_cpuid(&cpuid, 1, 0);
1315 	cpu->arch.type = cpuid.eax_1.type;
1316 	cpu->arch.family = cpuid.eax_1.family;
1317 	cpu->arch.extended_family = cpuid.eax_1.extended_family;
1318 	cpu->arch.model = cpuid.eax_1.model;
1319 	cpu->arch.extended_model = cpuid.eax_1.extended_model;
1320 	cpu->arch.stepping = cpuid.eax_1.stepping;
1321 	if (full) {
1322 		dprintf("CPU %d: type %d family %d extended_family %d model %d "
1323 			"extended_model %d stepping %d, string '%s'\n",
1324 			currentCPU, cpu->arch.type, cpu->arch.family,
1325 			cpu->arch.extended_family, cpu->arch.model,
1326 			cpu->arch.extended_model, cpu->arch.stepping, vendorString);
1327 	}
1328 
1329 	// figure out what vendor we have here
1330 
1331 	for (int32 i = 0; i < VENDOR_NUM; i++) {
1332 		if (vendor_info[i].ident_string[0]
1333 			&& !strcmp(vendorString, vendor_info[i].ident_string[0])) {
1334 			cpu->arch.vendor = (x86_vendors)i;
1335 			cpu->arch.vendor_name = vendor_info[i].vendor;
1336 			break;
1337 		}
1338 		if (vendor_info[i].ident_string[1]
1339 			&& !strcmp(vendorString, vendor_info[i].ident_string[1])) {
1340 			cpu->arch.vendor = (x86_vendors)i;
1341 			cpu->arch.vendor_name = vendor_info[i].vendor;
1342 			break;
1343 		}
1344 	}
1345 
1346 	// see if we can get the model name
1347 	get_current_cpuid(&cpuid, 0x80000000, 0);
1348 	uint32 maxExtendedLeaf = cpuid.eax_0.max_eax;
1349 	if (maxExtendedLeaf >= 0x80000004) {
1350 		// build the model string (need to swap ecx/edx data before copying)
1351 		unsigned int temp;
1352 		memset(cpu->arch.model_name, 0, sizeof(cpu->arch.model_name));
1353 
1354 		get_current_cpuid(&cpuid, 0x80000002, 0);
1355 		temp = cpuid.regs.edx;
1356 		cpuid.regs.edx = cpuid.regs.ecx;
1357 		cpuid.regs.ecx = temp;
1358 		memcpy(cpu->arch.model_name, cpuid.as_chars, sizeof(cpuid.as_chars));
1359 
1360 		get_current_cpuid(&cpuid, 0x80000003, 0);
1361 		temp = cpuid.regs.edx;
1362 		cpuid.regs.edx = cpuid.regs.ecx;
1363 		cpuid.regs.ecx = temp;
1364 		memcpy(cpu->arch.model_name + 16, cpuid.as_chars,
1365 			sizeof(cpuid.as_chars));
1366 
1367 		get_current_cpuid(&cpuid, 0x80000004, 0);
1368 		temp = cpuid.regs.edx;
1369 		cpuid.regs.edx = cpuid.regs.ecx;
1370 		cpuid.regs.ecx = temp;
1371 		memcpy(cpu->arch.model_name + 32, cpuid.as_chars,
1372 			sizeof(cpuid.as_chars));
1373 
1374 		// some cpus return a right-justified string
1375 		int32 i = 0;
1376 		while (cpu->arch.model_name[i] == ' ')
1377 			i++;
1378 		if (i > 0) {
1379 			memmove(cpu->arch.model_name, &cpu->arch.model_name[i],
1380 				strlen(&cpu->arch.model_name[i]) + 1);
1381 		}
1382 
1383 		if (full) {
1384 			dprintf("CPU %d: vendor '%s' model name '%s'\n",
1385 				currentCPU, cpu->arch.vendor_name, cpu->arch.model_name);
1386 		}
1387 	} else {
1388 		strlcpy(cpu->arch.model_name, "unknown", sizeof(cpu->arch.model_name));
1389 	}
1390 
1391 	// load feature bits
1392 	get_current_cpuid(&cpuid, 1, 0);
1393 	cpu->arch.feature[FEATURE_COMMON] = cpuid.eax_1.features; // edx
1394 	cpu->arch.feature[FEATURE_EXT] = cpuid.eax_1.extended_features; // ecx
1395 
1396 	if (!full)
1397 		return;
1398 
1399 	if (maxExtendedLeaf >= 0x80000001) {
1400 		get_current_cpuid(&cpuid, 0x80000001, 0);
1401 		if (cpu->arch.vendor == VENDOR_AMD)
1402 			cpu->arch.feature[FEATURE_EXT_AMD_ECX] = cpuid.regs.ecx; // ecx
1403 		cpu->arch.feature[FEATURE_EXT_AMD] = cpuid.regs.edx; // edx
1404 		if (cpu->arch.vendor != VENDOR_AMD)
1405 			cpu->arch.feature[FEATURE_EXT_AMD] &= IA32_FEATURES_INTEL_EXT;
1406 	}
1407 
1408 	if (maxBasicLeaf >= 5) {
1409 		get_current_cpuid(&cpuid, 5, 0);
1410 		cpu->arch.feature[FEATURE_5_ECX] = cpuid.regs.ecx;
1411 	}
1412 
1413 	if (maxBasicLeaf >= 6) {
1414 		get_current_cpuid(&cpuid, 6, 0);
1415 		cpu->arch.feature[FEATURE_6_EAX] = cpuid.regs.eax;
1416 		cpu->arch.feature[FEATURE_6_ECX] = cpuid.regs.ecx;
1417 	}
1418 
1419 	if (maxBasicLeaf >= 7) {
1420 		get_current_cpuid(&cpuid, 7, 0);
1421 		cpu->arch.feature[FEATURE_7_EBX] = cpuid.regs.ebx;
1422 		cpu->arch.feature[FEATURE_7_ECX] = cpuid.regs.ecx;
1423 		cpu->arch.feature[FEATURE_7_EDX] = cpuid.regs.edx;
1424 	}
1425 
1426 	if (maxBasicLeaf >= 0xd) {
1427 		get_current_cpuid(&cpuid, 0xd, 1);
1428 		cpu->arch.feature[FEATURE_D_1_EAX] = cpuid.regs.eax;
1429 	}
1430 
1431 	if (maxExtendedLeaf >= 0x80000007) {
1432 		get_current_cpuid(&cpuid, 0x80000007, 0);
1433 		cpu->arch.feature[FEATURE_EXT_7_EDX] = cpuid.regs.edx;
1434 	}
1435 
1436 	if (maxExtendedLeaf >= 0x80000008) {
1437 		get_current_cpuid(&cpuid, 0x80000008, 0);
1438 			cpu->arch.feature[FEATURE_EXT_8_EBX] = cpuid.regs.ebx;
1439 	}
1440 
1441 	detect_cpu_topology(currentCPU, cpu, maxBasicLeaf, maxExtendedLeaf);
1442 
1443 	if (cpu->arch.vendor == VENDOR_INTEL)
1444 		detect_intel_patch_level(cpu);
1445 	else if (cpu->arch.vendor == VENDOR_AMD)
1446 		detect_amd_patch_level(cpu);
1447 
1448 	cpu->arch.hybrid_type = get_hybrid_cpu_type();
1449 
1450 #if DUMP_FEATURE_STRING
1451 	dump_feature_string(currentCPU, cpu);
1452 #endif
1453 #if DUMP_CPU_PATCHLEVEL_TYPE
1454 	dprintf("CPU %d: patch_level %" B_PRIx32 "%s%s\n", currentCPU,
1455 		cpu->arch.patch_level,
1456 		cpu->arch.hybrid_type != 0 ? ", hybrid type ": "",
1457 		get_hybrid_cpu_type_string(cpu->arch.hybrid_type));
1458 #endif
1459 }
1460 
1461 
1462 bool
1463 x86_check_feature(uint32 feature, enum x86_feature_type type)
1464 {
1465 	cpu_ent* cpu = get_cpu_struct();
1466 
1467 #if 0
1468 	int i;
1469 	dprintf("x86_check_feature: feature 0x%x, type %d\n", feature, type);
1470 	for (i = 0; i < FEATURE_NUM; i++) {
1471 		dprintf("features %d: 0x%x\n", i, cpu->arch.feature[i]);
1472 	}
1473 #endif
1474 
1475 	return (cpu->arch.feature[type] & feature) != 0;
1476 }
1477 
1478 
1479 void*
1480 x86_get_double_fault_stack(int32 cpu, size_t* _size)
1481 {
1482 	*_size = kDoubleFaultStackSize;
1483 	return sDoubleFaultStacks + kDoubleFaultStackSize * cpu;
1484 }
1485 
1486 
1487 /*!	Returns the index of the current CPU. Can only be called from the double
1488 	fault handler.
1489 */
1490 int32
1491 x86_double_fault_get_cpu(void)
1492 {
1493 	addr_t stack = x86_get_stack_frame();
1494 	return (stack - (addr_t)sDoubleFaultStacks) / kDoubleFaultStackSize;
1495 }
1496 
1497 
1498 //	#pragma mark -
1499 
1500 
1501 status_t
1502 arch_cpu_preboot_init_percpu(kernel_args* args, int cpu)
1503 {
1504 	// On SMP system we want to synchronize the CPUs' TSCs, so system_time()
1505 	// will return consistent values.
1506 	if (smp_get_num_cpus() > 1) {
1507 		// let the first CPU prepare the rendezvous point
1508 		if (cpu == 0)
1509 			sTSCSyncRendezvous = smp_get_num_cpus() - 1;
1510 
1511 		// One CPU after the other will drop out of this loop and be caught by
1512 		// the loop below, until the last CPU (0) gets there. Save for +/- a few
1513 		// cycles the CPUs should pass the second loop at the same time.
1514 		while (sTSCSyncRendezvous != cpu) {
1515 		}
1516 
1517 		sTSCSyncRendezvous = cpu - 1;
1518 
1519 		while (sTSCSyncRendezvous != -1) {
1520 		}
1521 
1522 		// reset TSC to 0
1523 		x86_write_msr(IA32_MSR_TSC, 0);
1524 	}
1525 
1526 	x86_descriptors_preboot_init_percpu(args, cpu);
1527 
1528 	return B_OK;
1529 }
1530 
1531 
1532 static void
1533 halt_idle(void)
1534 {
1535 	asm("hlt");
1536 }
1537 
1538 
1539 static void
1540 amdc1e_noarat_idle(void)
1541 {
1542 	uint64 msr = x86_read_msr(K8_MSR_IPM);
1543 	if (msr & K8_CMPHALT)
1544 		x86_write_msr(K8_MSR_IPM, msr & ~K8_CMPHALT);
1545 	halt_idle();
1546 }
1547 
1548 
1549 static bool
1550 detect_amdc1e_noarat()
1551 {
1552 	cpu_ent* cpu = get_cpu_struct();
1553 
1554 	if (cpu->arch.vendor != VENDOR_AMD)
1555 		return false;
1556 
1557 	// Family 0x12 and higher processors support ARAT
1558 	// Family lower than 0xf processors doesn't support C1E
1559 	// Family 0xf with model <= 0x40 procssors doesn't support C1E
1560 	uint32 family = cpu->arch.family + cpu->arch.extended_family;
1561 	uint32 model = (cpu->arch.extended_model << 4) | cpu->arch.model;
1562 	return (family < 0x12 && family > 0xf) || (family == 0xf && model > 0x40);
1563 }
1564 
1565 
1566 static void
1567 init_tsc_with_cpuid(kernel_args* args, uint32* conversionFactor)
1568 {
1569 	cpu_ent* cpu = get_cpu_struct();
1570 	if (cpu->arch.vendor != VENDOR_INTEL)
1571 		return;
1572 	uint32 model = (cpu->arch.extended_model << 4) | cpu->arch.model;
1573 	cpuid_info cpuid;
1574 	get_current_cpuid(&cpuid, 0, 0);
1575 	uint32 maxBasicLeaf = cpuid.eax_0.max_eax;
1576 	if (maxBasicLeaf < 0x15)
1577 		return;
1578 
1579 	get_current_cpuid(&cpuid, 0x15, 0);
1580 	if (cpuid.regs.eax == 0 || cpuid.regs.ebx == 0)
1581 		return;
1582 	uint32 khz = cpuid.regs.ecx / 1000;
1583 	uint32 denominator = cpuid.regs.eax;
1584 	uint32 numerator = cpuid.regs.ebx;
1585 	if (khz == 0 && model == 0x5f) {
1586 		// CPUID 0x16 isn't supported, hardcoding
1587 		khz = 25000;
1588 	}
1589 
1590 	if (khz == 0 && maxBasicLeaf >= 0x16) {
1591 		// for these CPUs the base frequency is also the tsc frequency
1592 		get_current_cpuid(&cpuid, 0x16, 0);
1593 		khz = cpuid.regs.eax * 1000 * denominator / numerator;
1594 	}
1595 	if (khz == 0)
1596 		return;
1597 	dprintf("CPU: using TSC frequency from CPUID\n");
1598 	// compute for microseconds as follows (1000000 << 32) / (tsc freq in Hz),
1599 	// or (1000 << 32) / (tsc freq in kHz)
1600 	*conversionFactor = (1000ULL << 32) / (khz * numerator / denominator);
1601 	// overwrite the bootloader value
1602 	args->arch_args.system_time_cv_factor = *conversionFactor;
1603 }
1604 
1605 
1606 static void
1607 init_tsc_with_msr(kernel_args* args, uint32* conversionFactor)
1608 {
1609 	cpu_ent* cpuEnt = get_cpu_struct();
1610 	if (cpuEnt->arch.vendor != VENDOR_AMD)
1611 		return;
1612 	uint32 family = cpuEnt->arch.family + cpuEnt->arch.extended_family;
1613 	if (family < 0x10)
1614 		return;
1615 	uint64 value = x86_read_msr(MSR_F10H_HWCR);
1616 	if ((value & HWCR_TSCFREQSEL) == 0)
1617 		return;
1618 
1619 	value = x86_read_msr(MSR_F10H_PSTATEDEF(0));
1620 	if ((value & PSTATEDEF_EN) == 0)
1621 		return;
1622 	if (family != 0x17 && family != 0x19)
1623 		return;
1624 
1625 	uint64 khz = 200 * 1000;
1626 	uint32 denominator = (value >> 8) & 0x3f;
1627 	if (denominator < 0x8 || denominator > 0x2c)
1628 		return;
1629 	if (denominator > 0x1a && (denominator % 2) == 1)
1630 		return;
1631 	uint32 numerator = value & 0xff;
1632 	if (numerator < 0x10)
1633 		return;
1634 
1635 	dprintf("CPU: using TSC frequency from MSR %" B_PRIu64 "\n", khz * numerator / denominator);
1636 	// compute for microseconds as follows (1000000 << 32) / (tsc freq in Hz),
1637 	// or (1000 << 32) / (tsc freq in kHz)
1638 	*conversionFactor = (1000ULL << 32) / (khz * numerator / denominator);
1639 	// overwrite the bootloader value
1640 	args->arch_args.system_time_cv_factor = *conversionFactor;
1641 }
1642 
1643 
1644 static void
1645 init_tsc(kernel_args* args)
1646 {
1647 	// init the TSC -> system_time() conversion factors
1648 
1649 	// try to find the TSC frequency with CPUID
1650 	uint32 conversionFactor = args->arch_args.system_time_cv_factor;
1651 	init_tsc_with_cpuid(args, &conversionFactor);
1652 	init_tsc_with_msr(args, &conversionFactor);
1653 	uint64 conversionFactorNsecs = (uint64)conversionFactor * 1000;
1654 
1655 
1656 #ifdef __x86_64__
1657 	// The x86_64 system_time() implementation uses 64-bit multiplication and
1658 	// therefore shifting is not necessary for low frequencies (it's also not
1659 	// too likely that there'll be any x86_64 CPUs clocked under 1GHz).
1660 	__x86_setup_system_time((uint64)conversionFactor << 32,
1661 		conversionFactorNsecs);
1662 #else
1663 	if (conversionFactorNsecs >> 32 != 0) {
1664 		// the TSC frequency is < 1 GHz, which forces us to shift the factor
1665 		__x86_setup_system_time(conversionFactor, conversionFactorNsecs >> 16,
1666 			true);
1667 	} else {
1668 		// the TSC frequency is >= 1 GHz
1669 		__x86_setup_system_time(conversionFactor, conversionFactorNsecs, false);
1670 	}
1671 #endif
1672 }
1673 
1674 
1675 status_t
1676 arch_cpu_init_percpu(kernel_args* args, int cpu)
1677 {
1678 	detect_cpu(cpu, false);
1679 	load_microcode(cpu);
1680 	detect_cpu(cpu);
1681 
1682 	if (cpu == 0)
1683 		init_tsc(args);
1684 
1685 	if (!gCpuIdleFunc) {
1686 		if (detect_amdc1e_noarat())
1687 			gCpuIdleFunc = amdc1e_noarat_idle;
1688 		else
1689 			gCpuIdleFunc = halt_idle;
1690 	}
1691 
1692 	if (x86_check_feature(IA32_FEATURE_MCE, FEATURE_COMMON))
1693 		x86_write_cr4(x86_read_cr4() | IA32_CR4_MCE);
1694 
1695 #ifdef __x86_64__
1696 	// if RDTSCP is available write cpu number in TSC_AUX
1697 	if (x86_check_feature(IA32_FEATURE_AMD_EXT_RDTSCP, FEATURE_EXT_AMD))
1698 		x86_write_msr(IA32_MSR_TSC_AUX, cpu);
1699 
1700 	// make LFENCE a dispatch serializing instruction on AMD 64bit
1701 	cpu_ent* cpuEnt = get_cpu_struct();
1702 	if (cpuEnt->arch.vendor == VENDOR_AMD) {
1703 		uint32 family = cpuEnt->arch.family + cpuEnt->arch.extended_family;
1704 		if (family >= 0x10 && family != 0x11) {
1705 			uint64 value = x86_read_msr(MSR_F10H_DE_CFG);
1706 			if ((value & DE_CFG_SERIALIZE_LFENCE) == 0)
1707 				x86_write_msr(MSR_F10H_DE_CFG, value | DE_CFG_SERIALIZE_LFENCE);
1708 		}
1709 	}
1710 #endif
1711 
1712 	if (x86_check_feature(IA32_FEATURE_APERFMPERF, FEATURE_6_ECX)) {
1713 		gCPU[cpu].arch.mperf_prev = x86_read_msr(IA32_MSR_MPERF);
1714 		gCPU[cpu].arch.aperf_prev = x86_read_msr(IA32_MSR_APERF);
1715 		gCPU[cpu].arch.frequency = 0;
1716 		gCPU[cpu].arch.perf_timestamp = 0;
1717 	}
1718 	return __x86_patch_errata_percpu(cpu);
1719 }
1720 
1721 
1722 status_t
1723 arch_cpu_init(kernel_args* args)
1724 {
1725 	if (args->ucode_data != NULL
1726 		&& args->ucode_data_size > 0) {
1727 		sUcodeData = args->ucode_data;
1728 		sUcodeDataSize = args->ucode_data_size;
1729 	} else {
1730 		dprintf("CPU: no microcode provided\n");
1731 	}
1732 
1733 	// Initialize descriptor tables.
1734 	x86_descriptors_init(args);
1735 
1736 	return B_OK;
1737 }
1738 
1739 
1740 #ifdef __x86_64__
1741 static void
1742 enable_smap(void* dummy, int cpu)
1743 {
1744 	x86_write_cr4(x86_read_cr4() | IA32_CR4_SMAP);
1745 }
1746 
1747 
1748 static void
1749 enable_smep(void* dummy, int cpu)
1750 {
1751 	x86_write_cr4(x86_read_cr4() | IA32_CR4_SMEP);
1752 }
1753 
1754 
1755 static void
1756 enable_osxsave(void* dummy, int cpu)
1757 {
1758 	x86_write_cr4(x86_read_cr4() | IA32_CR4_OSXSAVE);
1759 }
1760 
1761 
1762 static void
1763 enable_xsavemask(void* dummy, int cpu)
1764 {
1765 	xsetbv(0, gXsaveMask);
1766 }
1767 #endif
1768 
1769 
1770 status_t
1771 arch_cpu_init_post_vm(kernel_args* args)
1772 {
1773 	uint32 i;
1774 
1775 	// allocate an area for the double fault stacks
1776 	virtual_address_restrictions virtualRestrictions = {};
1777 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
1778 	physical_address_restrictions physicalRestrictions = {};
1779 	create_area_etc(B_SYSTEM_TEAM, "double fault stacks",
1780 		kDoubleFaultStackSize * smp_get_num_cpus(), B_FULL_LOCK,
1781 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
1782 		&virtualRestrictions, &physicalRestrictions,
1783 		(void**)&sDoubleFaultStacks);
1784 
1785 	X86PagingStructures* kernelPagingStructures
1786 		= static_cast<X86VMTranslationMap*>(
1787 			VMAddressSpace::Kernel()->TranslationMap())->PagingStructures();
1788 
1789 	// Set active translation map on each CPU.
1790 	for (i = 0; i < args->num_cpus; i++) {
1791 		gCPU[i].arch.active_paging_structures = kernelPagingStructures;
1792 		kernelPagingStructures->AddReference();
1793 	}
1794 
1795 	if (!apic_available())
1796 		x86_init_fpu();
1797 	// else fpu gets set up in smp code
1798 
1799 #ifdef __x86_64__
1800 	// if available enable SMEP (Supervisor Memory Execution Protection)
1801 	if (x86_check_feature(IA32_FEATURE_SMEP, FEATURE_7_EBX)) {
1802 		if (!get_safemode_boolean(B_SAFEMODE_DISABLE_SMEP_SMAP, false)) {
1803 			dprintf("enable SMEP\n");
1804 			call_all_cpus_sync(&enable_smep, NULL);
1805 		} else
1806 			dprintf("SMEP disabled per safemode setting\n");
1807 	}
1808 
1809 	// if available enable SMAP (Supervisor Memory Access Protection)
1810 	if (x86_check_feature(IA32_FEATURE_SMAP, FEATURE_7_EBX)) {
1811 		if (!get_safemode_boolean(B_SAFEMODE_DISABLE_SMEP_SMAP, false)) {
1812 			dprintf("enable SMAP\n");
1813 			call_all_cpus_sync(&enable_smap, NULL);
1814 
1815 			arch_altcodepatch_replace(ALTCODEPATCH_TAG_STAC, &_stac, 3);
1816 			arch_altcodepatch_replace(ALTCODEPATCH_TAG_CLAC, &_clac, 3);
1817 		} else
1818 			dprintf("SMAP disabled per safemode setting\n");
1819 	}
1820 
1821 	// if available enable XSAVE (XSAVE and extended states)
1822 	gHasXsave = x86_check_feature(IA32_FEATURE_EXT_XSAVE, FEATURE_EXT);
1823 	if (gHasXsave) {
1824 		gHasXsavec = x86_check_feature(IA32_FEATURE_XSAVEC,
1825 			FEATURE_D_1_EAX);
1826 
1827 		call_all_cpus_sync(&enable_osxsave, NULL);
1828 		gXsaveMask = IA32_XCR0_X87 | IA32_XCR0_SSE;
1829 		cpuid_info cpuid;
1830 		get_current_cpuid(&cpuid, 0xd, 0);
1831 		gXsaveMask |= (cpuid.regs.eax & IA32_XCR0_AVX);
1832 		call_all_cpus_sync(&enable_xsavemask, NULL);
1833 		get_current_cpuid(&cpuid, 0xd, 0);
1834 		gFPUSaveLength = cpuid.regs.ebx;
1835 		if (gFPUSaveLength > sizeof(((struct arch_thread *)0)->fpu_state))
1836 			gFPUSaveLength = 832;
1837 
1838 		arch_altcodepatch_replace(ALTCODEPATCH_TAG_XSAVE,
1839 			gHasXsavec ? &_xsavec : &_xsave, 4);
1840 		arch_altcodepatch_replace(ALTCODEPATCH_TAG_XRSTOR,
1841 			&_xrstor, 4);
1842 
1843 		dprintf("enable %s 0x%" B_PRIx64 " %" B_PRId64 "\n",
1844 			gHasXsavec ? "XSAVEC" : "XSAVE", gXsaveMask, gFPUSaveLength);
1845 	}
1846 
1847 #endif
1848 
1849 	return B_OK;
1850 }
1851 
1852 
1853 status_t
1854 arch_cpu_init_post_modules(kernel_args* args)
1855 {
1856 	// initialize CPU module
1857 
1858 	void* cookie = open_module_list("cpu");
1859 
1860 	while (true) {
1861 		char name[B_FILE_NAME_LENGTH];
1862 		size_t nameLength = sizeof(name);
1863 
1864 		if (read_next_module_name(cookie, name, &nameLength) != B_OK
1865 			|| get_module(name, (module_info**)&sCpuModule) == B_OK)
1866 			break;
1867 	}
1868 
1869 	close_module_list(cookie);
1870 
1871 	// initialize MTRRs if available
1872 	if (x86_count_mtrrs() > 0) {
1873 		sCpuRendezvous = sCpuRendezvous2 = 0;
1874 		call_all_cpus(&init_mtrrs, NULL);
1875 	}
1876 
1877 	size_t threadExitLen = (addr_t)x86_end_userspace_thread_exit
1878 		- (addr_t)x86_userspace_thread_exit;
1879 	addr_t threadExitPosition = fill_commpage_entry(
1880 		COMMPAGE_ENTRY_X86_THREAD_EXIT, (const void*)x86_userspace_thread_exit,
1881 		threadExitLen);
1882 
1883 	// add the functions to the commpage image
1884 	image_id image = get_commpage_image();
1885 
1886 	elf_add_memory_image_symbol(image, "commpage_thread_exit",
1887 		threadExitPosition, threadExitLen, B_SYMBOL_TYPE_TEXT);
1888 
1889 	return B_OK;
1890 }
1891 
1892 
1893 void
1894 arch_cpu_user_TLB_invalidate(void)
1895 {
1896 	x86_write_cr3(x86_read_cr3());
1897 }
1898 
1899 
1900 void
1901 arch_cpu_global_TLB_invalidate(void)
1902 {
1903 	uint32 flags = x86_read_cr4();
1904 
1905 	if (flags & IA32_CR4_GLOBAL_PAGES) {
1906 		// disable and reenable the global pages to flush all TLBs regardless
1907 		// of the global page bit
1908 		x86_write_cr4(flags & ~IA32_CR4_GLOBAL_PAGES);
1909 		x86_write_cr4(flags | IA32_CR4_GLOBAL_PAGES);
1910 	} else {
1911 		cpu_status state = disable_interrupts();
1912 		arch_cpu_user_TLB_invalidate();
1913 		restore_interrupts(state);
1914 	}
1915 }
1916 
1917 
1918 void
1919 arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
1920 {
1921 	int32 num_pages = end / B_PAGE_SIZE - start / B_PAGE_SIZE;
1922 	while (num_pages-- >= 0) {
1923 		invalidate_TLB(start);
1924 		start += B_PAGE_SIZE;
1925 	}
1926 }
1927 
1928 
1929 void
1930 arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
1931 {
1932 	int i;
1933 	for (i = 0; i < num_pages; i++) {
1934 		invalidate_TLB(pages[i]);
1935 	}
1936 }
1937 
1938 
1939 status_t
1940 arch_cpu_shutdown(bool rebootSystem)
1941 {
1942 	if (acpi_shutdown(rebootSystem) == B_OK)
1943 		return B_OK;
1944 
1945 	if (!rebootSystem) {
1946 #ifndef __x86_64__
1947 		return apm_shutdown();
1948 #else
1949 		return B_NOT_SUPPORTED;
1950 #endif
1951 	}
1952 
1953 	cpu_status state = disable_interrupts();
1954 
1955 	// try to reset the system using the keyboard controller
1956 	out8(0xfe, 0x64);
1957 
1958 	// Give some time to the controller to do its job (0.5s)
1959 	snooze(500000);
1960 
1961 	// if that didn't help, try it this way
1962 	x86_reboot();
1963 
1964 	restore_interrupts(state);
1965 	return B_ERROR;
1966 }
1967 
1968 
1969 void
1970 arch_cpu_sync_icache(void* address, size_t length)
1971 {
1972 	// instruction cache is always consistent on x86
1973 }
1974 
1975