1 /* 2 * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <arch/system_info.h> 8 9 #include <string.h> 10 11 #include <KernelExport.h> 12 #include <OS.h> 13 14 #include <boot/kernel_args.h> 15 #include <cpu.h> 16 #include <kernel.h> 17 #include <smp.h> 18 19 20 enum cpu_vendor sCPUVendor; 21 uint32 sCPUModel; 22 int64 sCPUClockSpeed; 23 24 25 static bool 26 get_cpuid_for(cpuid_info *info, uint32 currentCPU, uint32 eaxRegister, 27 uint32 forCPU) 28 { 29 if (currentCPU != forCPU) 30 return false; 31 32 get_current_cpuid(info, eaxRegister, 0); 33 return true; 34 } 35 36 37 status_t 38 get_cpuid(cpuid_info *info, uint32 eaxRegister, uint32 forCPU) 39 { 40 uint32 numCPUs = (uint32)smp_get_num_cpus(); 41 cpu_status state; 42 43 if (forCPU >= numCPUs) 44 return B_BAD_VALUE; 45 46 // prevent us from being rescheduled 47 state = disable_interrupts(); 48 49 // ToDo: as long as we only run on pentium-class systems, we can assume 50 // that the CPU supports cpuid. 51 52 if (!get_cpuid_for(info, smp_get_current_cpu(), eaxRegister, forCPU)) { 53 smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (addr_t)info, 54 eaxRegister, forCPU, (void *)get_cpuid_for, SMP_MSG_FLAG_SYNC); 55 } 56 57 restore_interrupts(state); 58 return B_OK; 59 } 60 61 62 status_t 63 arch_system_info_init(struct kernel_args *args) 64 { 65 // So far we don't have to care about heterogeneous x86 platforms. 66 cpu_ent* cpu = get_cpu_struct(); 67 68 switch (cpu->arch.vendor) { 69 case VENDOR_AMD: 70 sCPUVendor = B_CPU_VENDOR_AMD; 71 break; 72 case VENDOR_CENTAUR: 73 sCPUVendor = B_CPU_VENDOR_VIA; 74 break; 75 case VENDOR_CYRIX: 76 sCPUVendor = B_CPU_VENDOR_CYRIX; 77 break; 78 case VENDOR_INTEL: 79 sCPUVendor = B_CPU_VENDOR_INTEL; 80 break; 81 case VENDOR_NSC: 82 sCPUVendor = B_CPU_VENDOR_NATIONAL_SEMICONDUCTOR; 83 break; 84 case VENDOR_RISE: 85 sCPUVendor = B_CPU_VENDOR_RISE; 86 break; 87 case VENDOR_TRANSMETA: 88 sCPUVendor = B_CPU_VENDOR_TRANSMETA; 89 break; 90 default: 91 sCPUVendor = B_CPU_VENDOR_UNKNOWN; 92 break; 93 } 94 95 sCPUModel = (cpu->arch.extended_family << 20) 96 | (cpu->arch.extended_model << 16) | (cpu->arch.type << 12) 97 | (cpu->arch.family << 8) | (cpu->arch.model << 4) | cpu->arch.stepping; 98 99 sCPUClockSpeed = args->arch_args.cpu_clock_speed; 100 return B_OK; 101 } 102 103 104 void 105 arch_fill_topology_node(cpu_topology_node_info* node, int32 cpu) 106 { 107 switch (node->type) { 108 case B_TOPOLOGY_ROOT: 109 #if __INTEL__ 110 node->data.root.platform = B_CPU_x86; 111 #elif __x86_64__ 112 node->data.root.platform = B_CPU_x86_64; 113 #else 114 node->data.root.platform = B_CPU_UNKNOWN; 115 #endif 116 break; 117 118 case B_TOPOLOGY_PACKAGE: 119 node->data.package.vendor = sCPUVendor; 120 node->data.package.cache_line_size = CACHE_LINE_SIZE; 121 break; 122 123 case B_TOPOLOGY_CORE: 124 node->data.core.model = sCPUModel; 125 node->data.core.default_frequency = sCPUClockSpeed; 126 break; 127 128 default: 129 break; 130 } 131 } 132 133 134 // #pragma mark - 135 136 137 status_t 138 _user_get_cpuid(cpuid_info *userInfo, uint32 eaxRegister, uint32 cpuNum) 139 { 140 cpuid_info info; 141 status_t status; 142 143 if (!IS_USER_ADDRESS(userInfo)) 144 return B_BAD_ADDRESS; 145 146 status = get_cpuid(&info, eaxRegister, cpuNum); 147 148 if (status == B_OK 149 && user_memcpy(userInfo, &info, sizeof(cpuid_info)) < B_OK) 150 return B_BAD_ADDRESS; 151 152 return status; 153 } 154 155