xref: /haiku/src/add-ons/kernel/power/cpufreq/intel_pstates/intel_pstates.cpp (revision 83b1a68c52ba3e0e8796282759f694b7fdddf06d)
1 /*
2  * Copyright 2013, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Paweł Dziepak, <pdziepak@quarnos.org>
7  */
8 
9 
10 #include <cpufreq.h>
11 #include <KernelExport.h>
12 
13 #include <arch_cpu.h>
14 #include <cpu.h>
15 #include <smp.h>
16 #include <util/AutoLock.h>
17 
18 
19 #define INTEL_PSTATES_MODULE_NAME	CPUFREQ_MODULES_PREFIX "/intel_pstates/v1"
20 
21 
22 const int kMinimalInterval = 50000;
23 
24 static uint16 sMinPState;
25 static uint16 sMaxPState;
26 static uint16 sBoostPState;
27 
28 static bool sAvoidBoost;
29 
30 
31 struct CPUEntry {
32 				CPUEntry();
33 
34 	uint16		fCurrentPState;
35 
36 	bigtime_t	fLastUpdate;
37 } CACHE_LINE_ALIGN;
38 static CPUEntry* sCPUEntries;
39 
40 
41 CPUEntry::CPUEntry()
42 	:
43 	fCurrentPState(sMinPState - 1),
44 	fLastUpdate(0)
45 {
46 }
47 
48 
49 static void
50 pstates_set_scheduler_mode(scheduler_mode mode)
51 {
52 	sAvoidBoost = mode == SCHEDULER_MODE_POWER_SAVING;
53 }
54 
55 
56 static int
57 measure_pstate(CPUEntry* entry)
58 {
59 	InterruptsLocker locker;
60 
61 	uint64 mperf = x86_read_msr(IA32_MSR_MPERF);
62 	uint64 aperf = x86_read_msr(IA32_MSR_APERF);
63 
64 	x86_write_msr(IA32_MSR_MPERF, 0);
65 	x86_write_msr(IA32_MSR_APERF, 0);
66 
67 	locker.Unlock();
68 
69 	if (mperf == 0)
70 		return sMinPState;
71 
72 	int oldPState = sMaxPState * aperf / mperf;
73 	oldPState = min_c(max_c(oldPState, sMinPState), sBoostPState);
74 
75 	return oldPState;
76 }
77 
78 
79 static inline void
80 set_pstate(uint16 pstate)
81 {
82 	CPUEntry* entry = &sCPUEntries[smp_get_current_cpu()];
83 	pstate = min_c(max_c(sMinPState, pstate), sBoostPState);
84 
85 	if (entry->fCurrentPState != pstate) {
86 		entry->fLastUpdate = system_time();
87 		entry->fCurrentPState = pstate;
88 
89 		x86_write_msr(IA32_MSR_PERF_CTL, pstate << 8);
90 	}
91 }
92 
93 
94 static status_t
95 pstates_increase_performance(int delta)
96 {
97 	CPUEntry* entry = &sCPUEntries[smp_get_current_cpu()];
98 
99 	if (system_time() - entry->fLastUpdate < kMinimalInterval)
100 		return B_OK;
101 
102 	int pState = measure_pstate(entry);
103 	pState += (sBoostPState - pState) * delta / kCPUPerformanceScaleMax;
104 
105 	if (sAvoidBoost && pState < (sMaxPState + sBoostPState) / 2)
106 		pState = min_c(pState, sMaxPState);
107 
108 	set_pstate(pState);
109 	return B_OK;
110 }
111 
112 
113 static status_t
114 pstates_decrease_performance(int delta)
115 {
116 	CPUEntry* entry = &sCPUEntries[smp_get_current_cpu()];
117 
118 	if (system_time() - entry->fLastUpdate < kMinimalInterval)
119 		return B_OK;
120 
121 	int pState = measure_pstate(entry);
122 	pState -= (pState - sMinPState) * delta / kCPUPerformanceScaleMax;
123 
124 	set_pstate(pState);
125 	return B_OK;
126 }
127 
128 
129 static bool
130 is_cpu_model_supported(cpu_ent* cpu)
131 {
132 	uint8 model = cpu->arch.model + (cpu->arch.extended_model << 4);
133 
134 	if (cpu->arch.vendor != VENDOR_INTEL)
135 		return false;
136 
137 	if (cpu->arch.family != 6)
138 		return false;
139 
140 	const uint8 kSupportedFamily6Models[] = {
141 		0x2a, 0x2d, 0x2e, 0x3a, 0x3c, 0x3e, 0x3f, 0x45, 0x46,
142 	};
143 	const int kSupportedFamily6ModelsCount
144 		= sizeof(kSupportedFamily6Models) / sizeof(uint8);
145 
146 	int i;
147 	for (i = 0; i < kSupportedFamily6ModelsCount; i++) {
148 		if (model == kSupportedFamily6Models[i])
149 			break;
150 	}
151 
152 	return i != kSupportedFamily6ModelsCount;
153 }
154 
155 
156 static void
157 set_normal_pstate(void* /* dummy */, int cpu)
158 {
159 	measure_pstate(&sCPUEntries[cpu]);
160 	set_pstate(sMaxPState);
161 }
162 
163 
164 static status_t
165 init_pstates()
166 {
167 	if (!x86_check_feature(IA32_FEATURE_MSR, FEATURE_COMMON))
168 		return B_ERROR;
169 
170 	if (!x86_check_feature(IA32_FEATURE_APERFMPERF, FEATURE_6_ECX))
171 		return B_ERROR;
172 
173 	int32 cpuCount = smp_get_num_cpus();
174 	for (int32 i = 0; i < cpuCount; i++) {
175 		if (!is_cpu_model_supported(&gCPU[i]))
176 			return B_ERROR;
177 	}
178 
179 	sMinPState = (x86_read_msr(IA32_MSR_PLATFORM_INFO) >> 40) & 0xff;
180 	sMaxPState = (x86_read_msr(IA32_MSR_PLATFORM_INFO) >> 8) & 0xff;
181 	sBoostPState
182 		= max_c(x86_read_msr(IA32_MSR_TURBO_RATIO_LIMIT) & 0xff, sMaxPState);
183 
184 	dprintf("using Intel P-States: min %" B_PRIu16 ", max %" B_PRIu16
185 		", boost %" B_PRIu16 "\n", sMinPState, sMaxPState, sBoostPState);
186 
187 	if (sMaxPState <= sMinPState || sMaxPState == 0) {
188 		dprintf("unexpected or invalid Intel P-States limits, aborting\n");
189 		return B_ERROR;
190 	}
191 
192 	sCPUEntries = new(std::nothrow) CPUEntry[cpuCount];
193 	if (sCPUEntries == NULL)
194 		return B_NO_MEMORY;
195 
196 	pstates_set_scheduler_mode(SCHEDULER_MODE_LOW_LATENCY);
197 
198 	call_all_cpus_sync(set_normal_pstate, NULL);
199 	return B_OK;
200 }
201 
202 
203 static status_t
204 uninit_pstates()
205 {
206 	call_all_cpus_sync(set_normal_pstate, NULL);
207 	delete[] sCPUEntries;
208 
209 	return B_OK;
210 }
211 
212 
213 static status_t
214 std_ops(int32 op, ...)
215 {
216 	switch (op) {
217 		case B_MODULE_INIT:
218 			return init_pstates();
219 
220 		case B_MODULE_UNINIT:
221 			uninit_pstates();
222 			return B_OK;
223 	}
224 
225 	return B_ERROR;
226 }
227 
228 
229 static cpufreq_module_info sIntelPStates = {
230 	{
231 		INTEL_PSTATES_MODULE_NAME,
232 		0,
233 		std_ops,
234 	},
235 
236 	1.0f,
237 
238 	pstates_set_scheduler_mode,
239 
240 	pstates_increase_performance,
241 	pstates_decrease_performance,
242 };
243 
244 
245 module_info* modules[] = {
246 	(module_info*)&sIntelPStates,
247 	NULL
248 };
249 
250