xref: /haiku/src/system/kernel/arch/x86/arch_system_info.cpp (revision c237c4ce593ee823d9867fd997e51e4c447f5623)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <arch/system_info.h>
8 
9 #include <string.h>
10 
11 #include <KernelExport.h>
12 #include <OS.h>
13 
14 #include <boot/kernel_args.h>
15 #include <cpu.h>
16 #include <kernel.h>
17 #include <smp.h>
18 
19 
20 enum cpu_vendor sCPUVendor;
21 uint32 sCPUModel;
22 int64 sCPUClockSpeed;
23 
24 
25 static bool
26 get_cpuid_for(cpuid_info *info, uint32 currentCPU, uint32 eaxRegister,
27 	uint32 forCPU)
28 {
29 	if (currentCPU != forCPU)
30 		return false;
31 
32 	get_current_cpuid(info, eaxRegister, 0);
33 	return true;
34 }
35 
36 
37 status_t
38 get_cpuid(cpuid_info *info, uint32 eaxRegister, uint32 forCPU)
39 {
40 	uint32 numCPUs = (uint32)smp_get_num_cpus();
41 	cpu_status state;
42 
43 	if (forCPU >= numCPUs)
44 		return B_BAD_VALUE;
45 
46 	// prevent us from being rescheduled
47 	state = disable_interrupts();
48 
49 	// ToDo: as long as we only run on pentium-class systems, we can assume
50 	//	that the CPU supports cpuid.
51 
52 	if (!get_cpuid_for(info, smp_get_current_cpu(), eaxRegister, forCPU)) {
53 		smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (addr_t)info,
54 			eaxRegister, forCPU, (void *)get_cpuid_for, SMP_MSG_FLAG_SYNC);
55 	}
56 
57 	restore_interrupts(state);
58 	return B_OK;
59 }
60 
61 
62 status_t
63 arch_system_info_init(struct kernel_args *args)
64 {
65 	// So far we don't have to care about heterogeneous x86 platforms.
66 	cpu_ent* cpu = get_cpu_struct();
67 
68 	switch (cpu->arch.vendor) {
69 		case VENDOR_AMD:
70 			sCPUVendor = B_CPU_VENDOR_AMD;
71 			break;
72 		case VENDOR_CENTAUR:
73 			sCPUVendor = B_CPU_VENDOR_VIA;
74 			break;
75 		case VENDOR_CYRIX:
76 			sCPUVendor = B_CPU_VENDOR_CYRIX;
77 			break;
78 		case VENDOR_INTEL:
79 			sCPUVendor = B_CPU_VENDOR_INTEL;
80 			break;
81 		case VENDOR_NSC:
82 			sCPUVendor = B_CPU_VENDOR_NATIONAL_SEMICONDUCTOR;
83 			break;
84 		case VENDOR_RISE:
85 			sCPUVendor = B_CPU_VENDOR_RISE;
86 			break;
87 		case VENDOR_TRANSMETA:
88 			sCPUVendor = B_CPU_VENDOR_TRANSMETA;
89 			break;
90 		case VENDOR_HYGON:
91 			sCPUVendor = B_CPU_VENDOR_HYGON;
92 			break;
93 		default:
94 			sCPUVendor = B_CPU_VENDOR_UNKNOWN;
95 			break;
96 	}
97 
98 	sCPUModel = (cpu->arch.extended_family << 20)
99 		| (cpu->arch.extended_model << 16) | (cpu->arch.type << 12)
100 		| (cpu->arch.family << 8) | (cpu->arch.model << 4) | cpu->arch.stepping;
101 
102 	sCPUClockSpeed = args->arch_args.cpu_clock_speed;
103 	if (cpu->arch.vendor == VENDOR_INTEL) {
104 		cpuid_info cpuid;
105 		get_current_cpuid(&cpuid, 0, 0);
106 		uint32 maxBasicLeaf = cpuid.eax_0.max_eax;
107 		if (maxBasicLeaf >= 0x16) {
108 			get_current_cpuid(&cpuid, 0x16, 0);
109 			if (cpuid.regs.eax != 0) {
110 				sCPUClockSpeed = cpuid.regs.eax * 1000000LL;
111 				dprintf("found clock speed with CPUID.16h\n");
112 			}
113 		}
114 	}
115 	return B_OK;
116 }
117 
118 
119 void
120 arch_fill_topology_node(cpu_topology_node_info* node, int32 cpu)
121 {
122 	switch (node->type) {
123 		case B_TOPOLOGY_ROOT:
124 #if __i386__
125 			node->data.root.platform = B_CPU_x86;
126 #elif __x86_64__
127 			node->data.root.platform = B_CPU_x86_64;
128 #else
129 			node->data.root.platform = B_CPU_UNKNOWN;
130 #endif
131 			break;
132 
133 		case B_TOPOLOGY_PACKAGE:
134 			node->data.package.vendor = sCPUVendor;
135 			node->data.package.cache_line_size = CACHE_LINE_SIZE;
136 			break;
137 
138 		case B_TOPOLOGY_CORE:
139 			node->data.core.model = sCPUModel;
140 			node->data.core.default_frequency = sCPUClockSpeed;
141 			break;
142 
143 		default:
144 			break;
145 	}
146 }
147 
148 
149 static void
150 get_frequency_for(void *_frequency, int cpu)
151 {
152 	uint64 *frequency = (uint64*)_frequency;
153 
154 	bigtime_t timestamp = gCPU[cpu].arch.perf_timestamp;
155 	bigtime_t timestamp2 = system_time();
156 	if (timestamp2 - timestamp < 100) {
157 		*frequency = gCPU[cpu].arch.frequency;
158 		return;
159 	}
160 
161 	uint64 mperf = gCPU[cpu].arch.mperf_prev;
162 	uint64 aperf = gCPU[cpu].arch.aperf_prev;
163 	uint64 mperf2 = x86_read_msr(IA32_MSR_MPERF);
164 	uint64 aperf2 = x86_read_msr(IA32_MSR_APERF);
165 
166 	if (mperf2 == mperf)
167 		*frequency = 0;
168 	else {
169 		*frequency = (aperf2 - aperf) * sCPUClockSpeed / (mperf2 - mperf);
170 		gCPU[cpu].arch.mperf_prev = mperf2;
171 		gCPU[cpu].arch.aperf_prev = aperf2;
172 		gCPU[cpu].arch.perf_timestamp = timestamp2;
173 		gCPU[cpu].arch.frequency = *frequency;
174 	}
175 }
176 
177 
178 status_t
179 arch_get_frequency(uint64 *frequency, int32 cpu)
180 {
181 	if (x86_check_feature(IA32_FEATURE_APERFMPERF, FEATURE_6_ECX))
182 		call_single_cpu_sync(cpu, get_frequency_for, frequency);
183 	else
184 		*frequency = sCPUClockSpeed;
185 
186 	return B_OK;
187 }
188 
189 
190 //	#pragma mark -
191 
192 
193 status_t
194 _user_get_cpuid(cpuid_info *userInfo, uint32 eaxRegister, uint32 cpuNum)
195 {
196 	cpuid_info info;
197 	status_t status;
198 
199 	if (!IS_USER_ADDRESS(userInfo))
200 		return B_BAD_ADDRESS;
201 
202 	status = get_cpuid(&info, eaxRegister, cpuNum);
203 
204 	if (status == B_OK
205 		&& user_memcpy(userInfo, &info, sizeof(cpuid_info)) < B_OK)
206 		return B_BAD_ADDRESS;
207 
208 	return status;
209 }
210 
211