1 /* 2 * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <arch/system_info.h> 8 9 #include <string.h> 10 11 #include <KernelExport.h> 12 #include <OS.h> 13 14 #include <boot/kernel_args.h> 15 #include <cpu.h> 16 #include <kernel.h> 17 #include <smp.h> 18 19 20 enum cpu_vendor sCPUVendor; 21 uint32 sCPUModel; 22 int64 sCPUClockSpeed; 23 24 25 static bool 26 get_cpuid_for(cpuid_info *info, uint32 currentCPU, uint32 eaxRegister, 27 uint32 forCPU) 28 { 29 if (currentCPU != forCPU) 30 return false; 31 32 get_current_cpuid(info, eaxRegister, 0); 33 return true; 34 } 35 36 37 status_t 38 get_cpuid(cpuid_info *info, uint32 eaxRegister, uint32 forCPU) 39 { 40 uint32 numCPUs = (uint32)smp_get_num_cpus(); 41 cpu_status state; 42 43 if (forCPU >= numCPUs) 44 return B_BAD_VALUE; 45 46 // prevent us from being rescheduled 47 state = disable_interrupts(); 48 49 // ToDo: as long as we only run on pentium-class systems, we can assume 50 // that the CPU supports cpuid. 51 52 if (!get_cpuid_for(info, smp_get_current_cpu(), eaxRegister, forCPU)) { 53 smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (addr_t)info, 54 eaxRegister, forCPU, (void *)get_cpuid_for, SMP_MSG_FLAG_SYNC); 55 } 56 57 restore_interrupts(state); 58 return B_OK; 59 } 60 61 62 status_t 63 arch_system_info_init(struct kernel_args *args) 64 { 65 // So far we don't have to care about heterogeneous x86 platforms. 66 cpu_ent* cpu = get_cpu_struct(); 67 68 switch (cpu->arch.vendor) { 69 case VENDOR_AMD: 70 sCPUVendor = B_CPU_VENDOR_AMD; 71 break; 72 case VENDOR_CENTAUR: 73 sCPUVendor = B_CPU_VENDOR_VIA; 74 break; 75 case VENDOR_CYRIX: 76 sCPUVendor = B_CPU_VENDOR_CYRIX; 77 break; 78 case VENDOR_INTEL: 79 sCPUVendor = B_CPU_VENDOR_INTEL; 80 break; 81 case VENDOR_NSC: 82 sCPUVendor = B_CPU_VENDOR_NATIONAL_SEMICONDUCTOR; 83 break; 84 case VENDOR_RISE: 85 sCPUVendor = B_CPU_VENDOR_RISE; 86 break; 87 case VENDOR_TRANSMETA: 88 sCPUVendor = B_CPU_VENDOR_TRANSMETA; 89 break; 90 case VENDOR_HYGON: 91 sCPUVendor = B_CPU_VENDOR_HYGON; 92 break; 93 default: 94 sCPUVendor = B_CPU_VENDOR_UNKNOWN; 95 break; 96 } 97 98 sCPUModel = (cpu->arch.extended_family << 20) 99 | (cpu->arch.extended_model << 16) | (cpu->arch.type << 12) 100 | (cpu->arch.family << 8) | (cpu->arch.model << 4) | cpu->arch.stepping; 101 102 sCPUClockSpeed = args->arch_args.cpu_clock_speed; 103 return B_OK; 104 } 105 106 107 void 108 arch_fill_topology_node(cpu_topology_node_info* node, int32 cpu) 109 { 110 switch (node->type) { 111 case B_TOPOLOGY_ROOT: 112 #if __i386__ 113 node->data.root.platform = B_CPU_x86; 114 #elif __x86_64__ 115 node->data.root.platform = B_CPU_x86_64; 116 #else 117 node->data.root.platform = B_CPU_UNKNOWN; 118 #endif 119 break; 120 121 case B_TOPOLOGY_PACKAGE: 122 node->data.package.vendor = sCPUVendor; 123 node->data.package.cache_line_size = CACHE_LINE_SIZE; 124 break; 125 126 case B_TOPOLOGY_CORE: 127 node->data.core.model = sCPUModel; 128 node->data.core.default_frequency = sCPUClockSpeed; 129 break; 130 131 default: 132 break; 133 } 134 } 135 136 137 static void 138 get_frequency_for(void *_frequency, int cpu) 139 { 140 uint64 *frequency = (uint64*)_frequency; 141 142 bigtime_t timestamp = gCPU[cpu].arch.perf_timestamp; 143 bigtime_t timestamp2 = system_time(); 144 if (timestamp2 - timestamp < 100) { 145 *frequency = gCPU[cpu].arch.frequency; 146 return; 147 } 148 149 uint64 mperf = gCPU[cpu].arch.mperf_prev; 150 uint64 aperf = gCPU[cpu].arch.aperf_prev; 151 uint64 mperf2 = x86_read_msr(IA32_MSR_MPERF); 152 uint64 aperf2 = x86_read_msr(IA32_MSR_APERF); 153 154 if (mperf2 == mperf) 155 *frequency = 0; 156 else { 157 *frequency = (aperf2 - aperf) * sCPUClockSpeed / (mperf2 - mperf); 158 gCPU[cpu].arch.mperf_prev = mperf2; 159 gCPU[cpu].arch.aperf_prev = aperf2; 160 gCPU[cpu].arch.perf_timestamp = timestamp2; 161 gCPU[cpu].arch.frequency = *frequency; 162 } 163 } 164 165 166 status_t 167 arch_get_frequency(uint64 *frequency, int32 cpu) 168 { 169 if (x86_check_feature(IA32_FEATURE_APERFMPERF, FEATURE_6_ECX)) 170 call_single_cpu_sync(cpu, get_frequency_for, frequency); 171 else 172 *frequency = sCPUClockSpeed; 173 174 return B_OK; 175 } 176 177 178 // #pragma mark - 179 180 181 status_t 182 _user_get_cpuid(cpuid_info *userInfo, uint32 eaxRegister, uint32 cpuNum) 183 { 184 cpuid_info info; 185 status_t status; 186 187 if (!IS_USER_ADDRESS(userInfo)) 188 return B_BAD_ADDRESS; 189 190 status = get_cpuid(&info, eaxRegister, cpuNum); 191 192 if (status == B_OK 193 && user_memcpy(userInfo, &info, sizeof(cpuid_info)) < B_OK) 194 return B_BAD_ADDRESS; 195 196 return status; 197 } 198 199