xref: /haiku/src/system/boot/loader/kernel_args.cpp (revision 125183f9e5c136781f71c879faaeab43fdc3ea7b)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <OS.h>
8 
9 #include <string.h>
10 
11 #include <algorithm>
12 
13 #include <kernel.h>
14 #include <boot/stage2.h>
15 #include <boot/platform.h>
16 
17 
18 static const size_t kChunkSize = 8 * B_PAGE_SIZE;
19 
20 kernel_args gKernelArgs;
21 
22 static void* sFirstFree;
23 static void* sLast;
24 static size_t sFree = kChunkSize;
25 
26 
27 static void
28 remove_range_index(addr_range* ranges, uint32& numRanges, uint32 index)
29 {
30 	if (index + 1 == numRanges) {
31 		// remove last range
32 		numRanges--;
33 		return;
34 	}
35 
36 	memmove(&ranges[index], &ranges[index + 1],
37 		sizeof(addr_range) * (numRanges - 1 - index));
38 	numRanges--;
39 }
40 
41 
42 static status_t
43 add_kernel_args_range(void* start, uint32 size)
44 {
45 	return insert_address_range(gKernelArgs.kernel_args_range,
46 		&gKernelArgs.num_kernel_args_ranges, MAX_KERNEL_ARGS_RANGE,
47 		(addr_t)start, size);
48 }
49 
50 
51 //	#pragma mark - addr_range utility
52 
53 
54 /*!	Inserts the specified (start, size) pair (aka range) in the
55 	addr_range array.
56 	It will extend existing ranges in order to have as little
57 	ranges in the array as possible.
58 	Returns B_OK on success, or B_ENTRY_NOT_FOUND if there was
59 	no free array entry available anymore.
60 */
61 extern "C" status_t
62 insert_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
63 	addr_t start, uint32 size)
64 {
65 	uint32 numRanges = *_numRanges;
66 
67 	start = ROUNDDOWN(start, B_PAGE_SIZE);
68 	size = ROUNDUP(size, B_PAGE_SIZE);
69 	addr_t end = start + size;
70 
71 	for (uint32 i = 0; i < numRanges; i++) {
72 		addr_t rangeStart = ranges[i].start;
73 		addr_t rangeEnd = rangeStart + ranges[i].size;
74 
75 		if (end < rangeStart || start > rangeEnd) {
76 			// ranges don't intersect or touch each other
77 			continue;
78 		}
79 		if (start >= rangeStart && end <= rangeEnd) {
80 			// range is already completely covered
81 			return B_OK;
82 		}
83 
84 		if (start < rangeStart) {
85 			// prepend to the existing range
86 			ranges[i].start = start;
87 			ranges[i].size += rangeStart - start;
88 		}
89 		if (end > ranges[i].start + ranges[i].size) {
90 			// append to the existing range
91 			ranges[i].size = end - ranges[i].start;
92 		}
93 
94 		// join ranges if possible
95 
96 		for (uint32 j = 0; j < numRanges; j++) {
97 			if (i == j)
98 				continue;
99 
100 			rangeStart = ranges[i].start;
101 			rangeEnd = rangeStart + ranges[i].size;
102 			addr_t joinStart = ranges[j].start;
103 			addr_t joinEnd = joinStart + ranges[j].size;
104 
105 			if (rangeStart <= joinEnd && joinEnd <= rangeEnd) {
106 				// join range that used to be before the current one, or
107 				// the one that's now entirely included by the current one
108 				if (joinStart < rangeStart) {
109 					ranges[i].size += rangeStart - joinStart;
110 					ranges[i].start = joinStart;
111 				}
112 
113 				remove_range_index(ranges, numRanges, j--);
114 			} else if (joinStart <= rangeEnd && joinEnd > rangeEnd) {
115 				// join range that used to be after the current one
116 				ranges[i].size += joinEnd - rangeEnd;
117 
118 				remove_range_index(ranges, numRanges, j--);
119 			}
120 		}
121 
122 		*_numRanges = numRanges;
123 		return B_OK;
124 	}
125 
126 	// no range matched, we need to create a new one
127 
128 	if (numRanges >= maxRanges)
129 		return B_ENTRY_NOT_FOUND;
130 
131 	ranges[numRanges].start = (addr_t)start;
132 	ranges[numRanges].size = size;
133 	(*_numRanges)++;
134 
135 	return B_OK;
136 }
137 
138 
139 extern "C" status_t
140 remove_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
141 	addr_t start, uint32 size)
142 {
143 	uint32 numRanges = *_numRanges;
144 
145 	addr_t end = ROUNDUP(start + size, B_PAGE_SIZE);
146 	start = ROUNDDOWN(start, B_PAGE_SIZE);
147 
148 	for (uint32 i = 0; i < numRanges; i++) {
149 		addr_t rangeStart = ranges[i].start;
150 		addr_t rangeEnd = rangeStart + ranges[i].size;
151 
152 		if (start <= rangeStart) {
153 			if (end <= rangeStart) {
154 				// no intersection
155 			} else if (end >= rangeEnd) {
156 				// remove the complete range
157 				remove_range_index(ranges, numRanges, i);
158 				i--;
159 			} else {
160 				// remove the head of the range
161 				ranges[i].start = end;
162 				ranges[i].size = rangeEnd - end;
163 			}
164 		} else if (end >= rangeEnd) {
165 			if (start < rangeEnd) {
166 				// remove the tail
167 				ranges[i].size = start - rangeStart;
168 			}	// else: no intersection
169 		} else {
170 			// rangeStart < start < end < rangeEnd
171 			// The ugly case: We have to remove something from the middle of
172 			// the range. We keep the head of the range and insert its tail
173 			// as a new range.
174 			ranges[i].size = start - rangeStart;
175 			return insert_address_range(ranges, _numRanges, maxRanges,
176 				end, rangeEnd - end);
177 		}
178 	}
179 
180 	*_numRanges = numRanges;
181 	return B_OK;
182 }
183 
184 
185 bool
186 get_free_address_range(addr_range *ranges, uint32 numRanges, addr_t base,
187 	size_t size, addr_t *_rangeBase)
188 {
189 	addr_t end = base + size - 1;
190 	if (end < base)
191 		return false;
192 
193 	// Note: We don't assume that the ranges are sorted, so we can't do this
194 	// in a simple loop. Instead we restart the loop whenever our range
195 	// intersects with an existing one.
196 
197 	for (uint32 i = 0; i < numRanges;) {
198 		addr_t rangeStart = ranges[i].start;
199 		addr_t rangeEnd = ranges[i].start + ranges[i].size - 1;
200 
201 		if (base <= rangeEnd && rangeStart <= end) {
202 			base = rangeEnd + 1;
203 			end = rangeEnd + size;
204 			if (base == 0 || end < base)
205 				return false;
206 
207 			i = 0;
208 			continue;
209 		}
210 
211 		i++;
212 	}
213 
214 	*_rangeBase = base;
215 	return true;
216 }
217 
218 
219 bool
220 is_address_range_covered(addr_range* ranges, uint32 numRanges, addr_t base,
221 	size_t size)
222 {
223 	// Note: We don't assume that the ranges are sorted, so we can't do this
224 	// in a simple loop. Instead we restart the loop whenever the start of the
225 	// given range intersects with an existing one.
226 
227 	for (uint32 i = 0; i < numRanges;) {
228 		addr_t rangeStart = ranges[i].start;
229 		addr_t rangeSize = ranges[i].size;
230 
231 		if (rangeStart <= base && rangeSize > base - rangeStart) {
232 			size_t intersect = std::min(rangeStart + rangeSize - base, size);
233 			base += intersect;
234 			size -= intersect;
235 			if (size == 0)
236 				return true;
237 
238 			i = 0;
239 			continue;
240 		}
241 
242 		i++;
243 	}
244 
245 	return false;
246 }
247 
248 
249 status_t
250 insert_physical_memory_range(addr_t start, uint32 size)
251 {
252 	return insert_address_range(gKernelArgs.physical_memory_range,
253 		&gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
254 		start, size);
255 }
256 
257 
258 status_t
259 insert_physical_allocated_range(addr_t start, uint32 size)
260 {
261 	return insert_address_range(gKernelArgs.physical_allocated_range,
262 		&gKernelArgs.num_physical_allocated_ranges,
263 		MAX_PHYSICAL_ALLOCATED_RANGE, start, size);
264 }
265 
266 
267 status_t
268 insert_virtual_allocated_range(addr_t start, uint32 size)
269 {
270 	return insert_address_range(gKernelArgs.virtual_allocated_range,
271 		&gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
272 		start, size);
273 }
274 
275 
276 //	#pragma mark - kernel_args allocations
277 
278 
279 /*!	This function can be used to allocate memory that is going
280 	to be passed over to the kernel. For example, the preloaded_image
281 	structures are allocated this way.
282 	The boot loader heap doesn't make it into the kernel!
283 */
284 extern "C" void*
285 kernel_args_malloc(size_t size)
286 {
287 	//dprintf("kernel_args_malloc(): %ld bytes (%ld bytes left)\n", size, sFree);
288 
289 	if (sFirstFree != NULL && size <= sFree) {
290 		// there is enough space in the current buffer
291 		void* address = sFirstFree;
292 		sFirstFree = (void*)((addr_t)sFirstFree + size);
293 		sLast = address;
294 		sFree -= size;
295 
296 		return address;
297 	}
298 
299 	if (size > kChunkSize / 2 && sFree < size) {
300 		// the block is so large, we'll allocate a new block for it
301 		void* block = NULL;
302 		if (platform_allocate_region(&block, size, B_READ_AREA | B_WRITE_AREA,
303 				false) != B_OK) {
304 			return NULL;
305 		}
306 
307 		if (add_kernel_args_range(block, size) != B_OK)
308 			panic("kernel_args max range to low!\n");
309 		return block;
310 	}
311 
312 	// just allocate a new block and "close" the old one
313 	void* block = NULL;
314 	if (platform_allocate_region(&block, kChunkSize, B_READ_AREA | B_WRITE_AREA,
315 			false) != B_OK) {
316 		return NULL;
317 	}
318 
319 	sFirstFree = (void*)((addr_t)block + size);
320 	sLast = block;
321 	sFree = kChunkSize - size;
322 	if (add_kernel_args_range(block, kChunkSize) != B_OK)
323 		panic("kernel_args max range to low!\n");
324 
325 	return block;
326 }
327 
328 
329 /*!	Convenience function that copies strdup() functions for the
330 	kernel args heap.
331 */
332 extern "C" char *
333 kernel_args_strdup(const char *string)
334 {
335 	if (string == NULL || string[0] == '\0')
336 		return NULL;
337 
338 	size_t length = strlen(string) + 1;
339 
340 	char *target = (char *)kernel_args_malloc(length);
341 	if (target == NULL)
342 		return NULL;
343 
344 	memcpy(target, string, length);
345 	return target;
346 }
347 
348 
349 /*!	This function frees a block allocated via kernel_args_malloc().
350 	It's very simple; it can only free the last allocation. It's
351 	enough for its current usage in the boot loader, though.
352 */
353 extern "C" void
354 kernel_args_free(void *block)
355 {
356 	if (sLast != block) {
357 		// sorry, we're dumb
358 		return;
359 	}
360 
361 	sFree = (addr_t)sFirstFree - (addr_t)sLast;
362 	sFirstFree = block;
363 	sLast = NULL;
364 }
365 
366