xref: /haiku/src/system/kernel/scheduler/low_latency.cpp (revision 984f843b917a1c4e077915c5961a6ef1cf8dabc7)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <util/AutoLock.h>
8 
9 #include "scheduler_common.h"
10 #include "scheduler_cpu.h"
11 #include "scheduler_modes.h"
12 #include "scheduler_profiler.h"
13 #include "scheduler_thread.h"
14 
15 
16 using namespace Scheduler;
17 
18 
19 const bigtime_t kCacheExpire = 100000;
20 
21 
22 static void
23 switch_to_mode()
24 {
25 }
26 
27 
28 static void
29 set_cpu_enabled(int32 /* cpu */, bool /* enabled */)
30 {
31 }
32 
33 
34 static bool
35 has_cache_expired(const ThreadData* threadData)
36 {
37 	SCHEDULER_ENTER_FUNCTION();
38 	if (threadData->WentSleepActive() == 0)
39 		return false;
40 	CoreEntry* core = threadData->Core();
41 	bigtime_t activeTime = core->GetActiveTime();
42 	return activeTime - threadData->WentSleepActive() > kCacheExpire;
43 }
44 
45 
46 static CoreEntry*
47 choose_core(const ThreadData* threadData)
48 {
49 	SCHEDULER_ENTER_FUNCTION();
50 
51 	// wake new package
52 	PackageEntry* package = gIdlePackageList.Last();
53 	if (package == NULL) {
54 		// wake new core
55 		package = PackageEntry::GetMostIdlePackage();
56 	}
57 
58 	int32 index = 0;
59 	CPUSet mask = threadData->GetCPUMask();
60 	if (mask.IsEmpty()) {
61 		// ignore when empty
62 		mask.SetAll();
63 	}
64 	CoreEntry* core = NULL;
65 	if (package != NULL) {
66 		do {
67 			core = package->GetIdleCore(index++);
68 		} while (core != NULL && !core->CPUMask().Matches(mask));
69 	}
70 	if (core == NULL) {
71 		ReadSpinLocker coreLocker(gCoreHeapsLock);
72 		index = 0;
73 		// no idle cores, use least occupied core
74 		do {
75 			core = gCoreLoadHeap.PeekMinimum(index++);
76 		} while (core != NULL && !core->CPUMask().Matches(mask));
77 		if (core == NULL) {
78 			index = 0;
79 			do {
80 				core = gCoreHighLoadHeap.PeekMinimum(index++);
81 			} while (core != NULL && !core->CPUMask().Matches(mask));
82 		}
83 	}
84 
85 	ASSERT(core != NULL);
86 	return core;
87 }
88 
89 
90 static CoreEntry*
91 rebalance(const ThreadData* threadData)
92 {
93 	SCHEDULER_ENTER_FUNCTION();
94 
95 	CoreEntry* core = threadData->Core();
96 	ASSERT(core != NULL);
97 
98 	// Get the least loaded core.
99 	ReadSpinLocker coreLocker(gCoreHeapsLock);
100 	CPUSet mask = threadData->GetCPUMask();
101 	if (mask.IsEmpty()) {
102 		// ignore when empty
103 		mask.SetAll();
104 	}
105 	int32 index = 0;
106 	CoreEntry* other;
107 	do {
108 		other = gCoreLoadHeap.PeekMinimum(index++);
109 		if (other != NULL && other->CPUMask().IsEmpty())
110 			panic("other->CPUMask().IsEmpty()\n");
111 	} while (other != NULL && !other->CPUMask().Matches(mask));
112 
113 	if (other == NULL) {
114 		index = 0;
115 		do {
116 			other = gCoreHighLoadHeap.PeekMinimum(index++);
117 		} while (other != NULL && !other->CPUMask().Matches(mask));
118 	}
119 	coreLocker.Unlock();
120 	ASSERT(other != NULL);
121 
122 	// Check if the least loaded core is significantly less loaded than
123 	// the current one.
124 	int32 coreLoad = core->GetLoad();
125 	int32 otherLoad = other->GetLoad();
126 	if (other == core || otherLoad + kLoadDifference >= coreLoad)
127 		return core;
128 
129 	// Check whether migrating the current thread would result in both core
130 	// loads become closer to the average.
131 	int32 difference = coreLoad - otherLoad - kLoadDifference;
132 	ASSERT(difference > 0);
133 
134 	int32 threadLoad = threadData->GetLoad() / core->CPUCount();
135 	return difference >= threadLoad ? other : core;
136 }
137 
138 
139 static void
140 rebalance_irqs(bool idle)
141 {
142 	SCHEDULER_ENTER_FUNCTION();
143 
144 	if (idle)
145 		return;
146 
147 	cpu_ent* cpu = get_cpu_struct();
148 	SpinLocker locker(cpu->irqs_lock);
149 
150 	irq_assignment* chosen = NULL;
151 	irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
152 
153 	int32 totalLoad = 0;
154 	while (irq != NULL) {
155 		if (chosen == NULL || chosen->load < irq->load)
156 			chosen = irq;
157 		totalLoad += irq->load;
158 		irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
159 	}
160 
161 	locker.Unlock();
162 
163 	if (chosen == NULL || totalLoad < kLowLoad)
164 		return;
165 
166 	ReadSpinLocker coreLocker(gCoreHeapsLock);
167 	CoreEntry* other = gCoreLoadHeap.PeekMinimum();
168 	if (other == NULL)
169 		other = gCoreHighLoadHeap.PeekMinimum();
170 	coreLocker.Unlock();
171 
172 	int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
173 
174 	ASSERT(other != NULL);
175 
176 	CoreEntry* core = CoreEntry::GetCore(cpu->cpu_num);
177 	if (other == core)
178 		return;
179 	if (other->GetLoad() + kLoadDifference >= core->GetLoad())
180 		return;
181 
182 	assign_io_interrupt_to_cpu(chosen->irq, newCPU);
183 }
184 
185 
186 scheduler_mode_operations gSchedulerLowLatencyMode = {
187 	"low latency",
188 
189 	1000,
190 	100,
191 	{ 2, 5 },
192 
193 	5000,
194 
195 	switch_to_mode,
196 	set_cpu_enabled,
197 	has_cache_expired,
198 	choose_core,
199 	rebalance,
200 	rebalance_irqs,
201 };
202 
203