1 /*
2 * Copyright 2005-2009, Haiku, Inc.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 * Axel Dörfler, axeld@pinc-software.de
7 * Ingo Weinhold, ingo_weinhold@gmx.de
8 */
9
10
11 #include "generic_x86.h"
12 #include "intel.h"
13 #include "amd.h"
14 #include "via.h"
15
16 #include <KernelExport.h>
17 #include <arch_system_info.h>
18 #include <arch/x86/arch_cpu.h>
19 #include <smp.h>
20
21
22 //#define TRACE_MTRR
23 #ifdef TRACE_MTRR
24 # define TRACE(x...) dprintf("mtrr: " x)
25 #else
26 # define TRACE(x...) /* nothing */
27 #endif
28
29
30 #define IA32_MTRR_ENABLE (1UL << 11)
31 #define IA32_MTRR_ENABLE_FIXED (1UL << 10)
32 #define IA32_MTRR_VALID_RANGE (1UL << 11)
33
34
35 struct mtrr_capabilities {
mtrr_capabilitiesmtrr_capabilities36 mtrr_capabilities(uint64 value) { *(uint64 *)this = value; }
37
38 uint64 variable_ranges : 8;
39 uint64 supports_fixed : 1;
40 uint64 _reserved0 : 1;
41 uint64 supports_write_combined : 1;
42 uint64 _reserved1 : 53;
43 };
44
45
46 uint64 gPhysicalMask = 0;
47
48
49 static const char *
mtrr_type_to_string(uint8 type)50 mtrr_type_to_string(uint8 type)
51 {
52 switch (type) {
53 case IA32_MTR_UNCACHED:
54 return "uncacheable";
55 case IA32_MTR_WRITE_COMBINING:
56 return "write combining";
57 case IA32_MTR_WRITE_THROUGH:
58 return "write-through";
59 case IA32_MTR_WRITE_PROTECTED:
60 return "write-protected";
61 case IA32_MTR_WRITE_BACK:
62 return "write-back";
63 default:
64 return "reserved";
65 }
66 }
67
68
69 static void
set_mtrr(uint32 index,uint64 base,uint64 length,uint8 type)70 set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
71 {
72 uint64 mask = length - 1;
73 mask = ~mask & gPhysicalMask;
74
75 TRACE("MTRR %" B_PRIu32 ": new mask %" B_PRIx64 "\n", index, mask);
76 TRACE(" mask test base: %" B_PRIx64 "\n", mask & base);
77 TRACE(" mask test middle: %" B_PRIx64 "\n", mask & (base + length / 2));
78 TRACE(" mask test end: %" B_PRIx64 "\n", mask & (base + length));
79
80 index *= 2;
81 // there are two registers per slot
82
83 // First, disable MTRR
84
85 x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index, 0);
86
87 if (base != 0 || mask != 0 || type != 0) {
88 // then fill in the new values, and enable it again
89
90 x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index,
91 (base & ~(B_PAGE_SIZE - 1)) | type);
92 x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index,
93 mask | IA32_MTRR_VALID_RANGE);
94 } else {
95 // reset base as well
96 x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index, 0);
97 }
98 }
99
100
101 // #pragma mark -
102
103
104 uint32
generic_count_mtrrs(void)105 generic_count_mtrrs(void)
106 {
107 if (!x86_check_feature(IA32_FEATURE_MTRR, FEATURE_COMMON)
108 || !x86_check_feature(IA32_FEATURE_MSR, FEATURE_COMMON))
109 return 0;
110
111 mtrr_capabilities capabilities(x86_read_msr(IA32_MSR_MTRR_CAPABILITIES));
112 TRACE("CPU %" B_PRId32 " has %u variable range MTRRs.\n",
113 smp_get_current_cpu(), (uint8)capabilities.variable_ranges);
114
115 return capabilities.variable_ranges;
116 }
117
118
119 void
generic_init_mtrrs(uint32 count)120 generic_init_mtrrs(uint32 count)
121 {
122 if (count == 0)
123 return;
124
125 // If MTRRs are enabled, we leave everything as is (save for, possibly, the
126 // default, which we set below), so that we can benefit from the BIOS's
127 // setup until we've installed our own. If MTRRs are disabled, we clear
128 // all registers and enable MTRRs.
129 // (we leave the fixed MTRRs as is)
130 // TODO: check if the fixed MTRRs are set on all CPUs identically?
131 TRACE("generic_init_mtrrs(count = %" B_PRIu32 ")\n", count);
132
133 uint64 defaultType = x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE);
134 if ((defaultType & IA32_MTRR_ENABLE) == 0) {
135 for (uint32 i = 0; i < count; i++)
136 set_mtrr(i, 0, 0, 0);
137 }
138
139 // Turn on variable MTRR functionality.
140 // We need to ensure that the default type is uncacheable, otherwise
141 // clearing the mtrrs could result in ranges that aren't supposed to be
142 // cacheable to become cacheable due to the default type.
143 x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
144 (defaultType & ~0xff) | IA32_MTRR_ENABLE);
145 }
146
147
148 void
generic_set_mtrr(uint32 index,uint64 base,uint64 length,uint8 type)149 generic_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
150 {
151 set_mtrr(index, base, length, type);
152 TRACE("[cpu %" B_PRId32 "] mtrrs now:\n", smp_get_current_cpu());
153 #if TRACE_MTRR
154 generic_dump_mtrrs(generic_count_mtrrs());
155 #endif
156 }
157
158
159 status_t
generic_get_mtrr(uint32 index,uint64 * _base,uint64 * _length,uint8 * _type)160 generic_get_mtrr(uint32 index, uint64 *_base, uint64 *_length, uint8 *_type)
161 {
162 uint64 mask = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index * 2);
163 if ((mask & IA32_MTRR_VALID_RANGE) == 0)
164 return B_ERROR;
165
166 uint64 base = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index * 2);
167
168 *_base = base & ~(B_PAGE_SIZE - 1);
169 *_length = (~mask & gPhysicalMask) + B_PAGE_SIZE;
170 *_type = base & 0xff;
171
172 return B_OK;
173 }
174
175
176 void
generic_set_mtrrs(uint8 newDefaultType,const x86_mtrr_info * infos,uint32 count,uint32 maxCount)177 generic_set_mtrrs(uint8 newDefaultType, const x86_mtrr_info* infos,
178 uint32 count, uint32 maxCount)
179 {
180 // check count
181 if (maxCount == 0)
182 return;
183
184 if (count > maxCount)
185 count = maxCount;
186
187 // disable MTTRs
188 uint64 defaultType = x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE)
189 & ~IA32_MTRR_ENABLE;
190 x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE, defaultType);
191
192 // set the given MTRRs
193 for (uint32 i = 0; i < count; i++)
194 set_mtrr(i, infos[i].base, infos[i].size, infos[i].type);
195
196 // clear the other MTRRs
197 for (uint32 i = count; i < maxCount; i++)
198 set_mtrr(i, 0, 0, 0);
199
200 // re-enable MTTRs and set the new default type
201 defaultType = (defaultType & ~(uint64)0xff) | newDefaultType;
202 x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE, defaultType | IA32_MTRR_ENABLE);
203 }
204
205
206 status_t
generic_mtrr_compute_physical_mask(void)207 generic_mtrr_compute_physical_mask(void)
208 {
209 uint32 bits = 36;
210
211 cpuid_info cpuInfo;
212 if (get_current_cpuid(&cpuInfo, 0x80000000, 0) == B_OK
213 && (cpuInfo.eax_0.max_eax & 0xff) >= 8) {
214 get_current_cpuid(&cpuInfo, 0x80000008, 0);
215 bits = cpuInfo.regs.eax & 0xff;
216
217 // Obviously, the bits are not always reported correctly
218 if (bits < 36)
219 bits = 36;
220 }
221
222 gPhysicalMask = ((1ULL << bits) - 1) & ~(B_PAGE_SIZE - 1);
223
224 TRACE("CPU %" B_PRId32 " has %" B_PRIu32
225 " physical address bits, physical mask is %016" B_PRIx64 "\n",
226 smp_get_current_cpu(), bits, gPhysicalMask);
227
228 return B_OK;
229 }
230
231
232 void
generic_dump_mtrrs(uint32 count)233 generic_dump_mtrrs(uint32 count)
234 {
235 if (count == 0)
236 return;
237
238 int cpu = smp_get_current_cpu();
239 uint64 defaultType = x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE);
240 dprintf("mtrr: [cpu %d] MTRRs are %sabled\n", cpu,
241 (defaultType & IA32_MTRR_ENABLE) != 0 ? "en" : "dis");
242 dprintf("mtrr: [cpu %d] default type is %u %s\n", cpu,
243 (uint8)defaultType, mtrr_type_to_string(defaultType));
244 dprintf("mtrr: [cpu %d] fixed range MTRRs are %sabled\n", cpu,
245 (defaultType & IA32_MTRR_ENABLE_FIXED) != 0 ? "en" : "dis");
246
247 for (uint32 i = 0; i < count; i++) {
248 uint64 base;
249 uint64 length;
250 uint8 type;
251 if (generic_get_mtrr(i, &base, &length, &type) == B_OK) {
252 dprintf("mtrr: [cpu %d] %" B_PRIu32 ": base: 0x%" B_PRIx64
253 "; length: 0x%" B_PRIx64 "; type: %u %s\n", cpu, i, base,
254 length, type, mtrr_type_to_string(type));
255 }
256 }
257 }
258
259
260 module_info *modules[] = {
261 (module_info *)&gIntelModule,
262 (module_info *)&gAMDModule,
263 (module_info *)&gVIAModule,
264 NULL
265 };
266