xref: /haiku/src/system/boot/loader/kernel_args.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include <OS.h>
9 
10 #include <string.h>
11 
12 #include <algorithm>
13 
14 #include <kernel.h>
15 #include <boot/stage2.h>
16 #include <boot/platform.h>
17 
18 
19 static const size_t kChunkSize = 16 * B_PAGE_SIZE;
20 
21 kernel_args gKernelArgs;
22 KMessage gBootVolume;
23 
24 static void* sFirstFree;
25 static void* sLast;
26 static size_t sFree = kChunkSize;
27 
28 
29 static status_t
30 add_kernel_args_range(void* start, size_t size)
31 {
32 	return insert_address_range(gKernelArgs.kernel_args_range,
33 		&gKernelArgs.num_kernel_args_ranges, MAX_KERNEL_ARGS_RANGE,
34 		(addr_t)start, size);
35 }
36 
37 
38 // #pragma mark - addr_range utility functions
39 
40 
41 static void
42 remove_range_index(addr_range* ranges, uint32& numRanges, uint32 index)
43 {
44 	if (index + 1 == numRanges) {
45 		// remove last range
46 		numRanges--;
47 		return;
48 	}
49 
50 	memmove(&ranges[index], &ranges[index + 1],
51 		sizeof(addr_range) * (numRanges - 1 - index));
52 	numRanges--;
53 }
54 
55 
56 /*!	Inserts the specified (start, size) pair (aka range) in the
57 	addr_range array.
58 	It will extend existing ranges in order to have as little
59 	ranges in the array as possible.
60 	Returns B_OK on success, or B_ENTRY_NOT_FOUND if there was
61 	no free array entry available anymore.
62 */
63 extern "C" status_t
64 insert_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
65 	uint64 start, uint64 size)
66 {
67 	uint32 numRanges = *_numRanges;
68 
69 	start = ROUNDDOWN(start, B_PAGE_SIZE);
70 	size = ROUNDUP(size, B_PAGE_SIZE);
71 	uint64 end = start + size;
72 
73 	for (uint32 i = 0; i < numRanges; i++) {
74 		uint64 rangeStart = ranges[i].start;
75 		uint64 rangeEnd = rangeStart + ranges[i].size;
76 
77 		if (end < rangeStart || start > rangeEnd) {
78 			// ranges don't intersect or touch each other
79 			continue;
80 		}
81 		if (start >= rangeStart && end <= rangeEnd) {
82 			// range is already completely covered
83 			return B_OK;
84 		}
85 
86 		if (start < rangeStart) {
87 			// prepend to the existing range
88 			ranges[i].start = start;
89 			ranges[i].size += rangeStart - start;
90 		}
91 		if (end > ranges[i].start + ranges[i].size) {
92 			// append to the existing range
93 			ranges[i].size = end - ranges[i].start;
94 		}
95 
96 		// join ranges if possible
97 
98 		for (uint32 j = 0; j < numRanges; j++) {
99 			if (i == j)
100 				continue;
101 
102 			rangeStart = ranges[i].start;
103 			rangeEnd = rangeStart + ranges[i].size;
104 			uint64 joinStart = ranges[j].start;
105 			uint64 joinEnd = joinStart + ranges[j].size;
106 
107 			if (rangeStart <= joinEnd && joinEnd <= rangeEnd) {
108 				// join range that used to be before the current one, or
109 				// the one that's now entirely included by the current one
110 				if (joinStart < rangeStart) {
111 					ranges[i].size += rangeStart - joinStart;
112 					ranges[i].start = joinStart;
113 				}
114 
115 				remove_range_index(ranges, numRanges, j--);
116 			} else if (joinStart <= rangeEnd && joinEnd > rangeEnd) {
117 				// join range that used to be after the current one
118 				ranges[i].size += joinEnd - rangeEnd;
119 
120 				remove_range_index(ranges, numRanges, j--);
121 			}
122 		}
123 
124 		*_numRanges = numRanges;
125 		return B_OK;
126 	}
127 
128 	// no range matched, we need to create a new one
129 
130 	if (numRanges >= maxRanges)
131 		return B_ENTRY_NOT_FOUND;
132 
133 	ranges[numRanges].start = (uint64)start;
134 	ranges[numRanges].size = size;
135 	(*_numRanges)++;
136 
137 	return B_OK;
138 }
139 
140 
141 extern "C" status_t
142 remove_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
143 	uint64 start, uint64 size)
144 {
145 	uint32 numRanges = *_numRanges;
146 
147 	uint64 end = ROUNDUP(start + size, B_PAGE_SIZE);
148 	start = ROUNDDOWN(start, B_PAGE_SIZE);
149 
150 	for (uint32 i = 0; i < numRanges; i++) {
151 		uint64 rangeStart = ranges[i].start;
152 		uint64 rangeEnd = rangeStart + ranges[i].size;
153 
154 		if (start <= rangeStart) {
155 			if (end <= rangeStart) {
156 				// no intersection
157 			} else if (end >= rangeEnd) {
158 				// remove the complete range
159 				remove_range_index(ranges, numRanges, i);
160 				i--;
161 			} else {
162 				// remove the head of the range
163 				ranges[i].start = end;
164 				ranges[i].size = rangeEnd - end;
165 			}
166 		} else if (end >= rangeEnd) {
167 			if (start < rangeEnd) {
168 				// remove the tail
169 				ranges[i].size = start - rangeStart;
170 			}	// else: no intersection
171 		} else {
172 			// rangeStart < start < end < rangeEnd
173 			// The ugly case: We have to remove something from the middle of
174 			// the range. We keep the head of the range and insert its tail
175 			// as a new range.
176 			ranges[i].size = start - rangeStart;
177 			return insert_address_range(ranges, _numRanges, maxRanges, end,
178 				rangeEnd - end);
179 		}
180 	}
181 
182 	*_numRanges = numRanges;
183 	return B_OK;
184 }
185 
186 
187 bool
188 get_free_address_range(addr_range* ranges, uint32 numRanges, uint64 base,
189 	uint64 size, uint64* _rangeBase)
190 {
191 	uint64 end = base + size - 1;
192 	if (end < base)
193 		return false;
194 
195 	// Note: We don't assume that the ranges are sorted, so we can't do this
196 	// in a simple loop. Instead we restart the loop whenever our range
197 	// intersects with an existing one.
198 
199 	for (uint32 i = 0; i < numRanges;) {
200 		uint64 rangeStart = ranges[i].start;
201 		uint64 rangeEnd = ranges[i].start + ranges[i].size - 1;
202 
203 		if (base <= rangeEnd && rangeStart <= end) {
204 			base = rangeEnd + 1;
205 			end = rangeEnd + size;
206 			if (base == 0 || end < base)
207 				return false;
208 
209 			i = 0;
210 			continue;
211 		}
212 
213 		i++;
214 	}
215 
216 	*_rangeBase = base;
217 	return true;
218 }
219 
220 
221 bool
222 is_address_range_covered(addr_range* ranges, uint32 numRanges, uint64 base,
223 	uint64 size)
224 {
225 	// Note: We don't assume that the ranges are sorted, so we can't do this
226 	// in a simple loop. Instead we restart the loop whenever the start of the
227 	// given range intersects with an existing one.
228 
229 	for (uint32 i = 0; i < numRanges;) {
230 		uint64 rangeStart = ranges[i].start;
231 		uint64 rangeSize = ranges[i].size;
232 
233 		if (rangeStart <= base && rangeSize > base - rangeStart) {
234 			uint64 intersect = std::min(rangeStart + rangeSize - base, size);
235 			base += intersect;
236 			size -= intersect;
237 			if (size == 0)
238 				return true;
239 
240 			i = 0;
241 			continue;
242 		}
243 
244 		i++;
245 	}
246 
247 	return false;
248 }
249 
250 
251 extern "C" uint64
252 total_address_ranges_size(addr_range* ranges, uint32 numRanges)
253 {
254 	uint64 total = 0;
255 	for (uint32 i = 0; i < numRanges; i++)
256 		total += ranges[i].size;
257 	return total;
258 }
259 
260 
261 void
262 sort_address_ranges(addr_range* ranges, uint32 numRanges)
263 {
264 	// TODO: This is a pretty sucky bubble sort implementation!
265 	bool done;
266 
267 	do {
268 		done = true;
269 		for (uint32 i = 1; i < numRanges; i++) {
270 			if (ranges[i].start < ranges[i - 1].start) {
271 				done = false;
272 				addr_range tempRange;
273 				memcpy(&tempRange, &ranges[i], sizeof(addr_range));
274 				memcpy(&ranges[i], &ranges[i - 1], sizeof(addr_range));
275 				memcpy(&ranges[i - 1], &tempRange, sizeof(addr_range));
276 			}
277 		}
278 	} while (!done);
279 }
280 
281 
282 // #pragma mark - kernel args range functions
283 
284 
285 status_t
286 insert_physical_memory_range(uint64 start, uint64 size)
287 {
288 	return insert_address_range(gKernelArgs.physical_memory_range,
289 		&gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
290 		start, size);
291 }
292 
293 
294 status_t
295 remove_physical_memory_range(uint64 start, uint64 size)
296 {
297 	return remove_address_range(gKernelArgs.physical_memory_range,
298 		&gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
299 		start, size);
300 }
301 
302 
303 uint64
304 total_physical_memory()
305 {
306 	return total_address_ranges_size(gKernelArgs.physical_memory_range,
307 		gKernelArgs.num_physical_memory_ranges);
308 }
309 
310 
311 status_t
312 insert_physical_allocated_range(uint64 start, uint64 size)
313 {
314 	return insert_address_range(gKernelArgs.physical_allocated_range,
315 		&gKernelArgs.num_physical_allocated_ranges,
316 		MAX_PHYSICAL_ALLOCATED_RANGE, start, size);
317 }
318 
319 
320 status_t
321 remove_physical_allocated_range(uint64 start, uint64 size)
322 {
323 	return remove_address_range(gKernelArgs.physical_allocated_range,
324 		&gKernelArgs.num_physical_allocated_ranges, MAX_PHYSICAL_ALLOCATED_RANGE,
325 		start, size);
326 }
327 
328 
329 status_t
330 insert_virtual_allocated_range(uint64 start, uint64 size)
331 {
332 	return insert_address_range(gKernelArgs.virtual_allocated_range,
333 		&gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
334 		start, size);
335 }
336 
337 
338 status_t
339 remove_virtual_allocated_range(uint64 start, uint64 size)
340 {
341 	return remove_address_range(gKernelArgs.virtual_allocated_range,
342 		&gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
343 		start, size);
344 }
345 
346 
347 #if B_HAIKU_PHYSICAL_BITS > 32
348 
349 void
350 ignore_physical_memory_ranges_beyond_4gb()
351 {
352 	// sort
353 	sort_address_ranges(gKernelArgs.physical_memory_range,
354 		gKernelArgs.num_physical_memory_ranges);
355 
356 	static const uint64 kLimit = (uint64)1 << 32;
357 
358 	// remove everything past 4 GB
359 	for (uint32 i = gKernelArgs.num_physical_memory_ranges; i > 0; i--) {
360 		addr_range& range = gKernelArgs.physical_memory_range[i - 1];
361 		if (range.start >= kLimit) {
362 			// the complete range is beyond the limit
363 			dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring "
364 				"range: %#" B_PRIx64 " - %#" B_PRIx64 "\n", range.start,
365 				range.start + range.size);
366 			gKernelArgs.ignored_physical_memory += range.size;
367 			gKernelArgs.num_physical_memory_ranges = i - 1;
368 			continue;
369 		}
370 
371 		if (kLimit - range.start < range.size) {
372 			// the range is partially beyond the limit
373 			dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring "
374 				"range: %#" B_PRIx64 " - %#" B_PRIx64 "\n", kLimit,
375 				range.start + range.size);
376 			gKernelArgs.ignored_physical_memory
377 				+= range.size - (kLimit - range.start);
378 		}
379 
380 		break;
381 	}
382 }
383 
384 #endif	// B_HAIKU_PHYSICAL_BITS > 32
385 
386 
387 // #pragma mark - kernel_args allocations
388 
389 
390 /*!	Convenience function that copies strdup() functions for the
391 	kernel args heap.
392 */
393 char*
394 kernel_args_strdup(const char* string)
395 {
396 	if (string == NULL || string[0] == '\0')
397 		return NULL;
398 
399 	size_t length = strlen(string) + 1;
400 
401 	char* target = (char*)kernel_args_malloc(length);
402 	if (target == NULL)
403 		return NULL;
404 
405 	memcpy(target, string, length);
406 	return target;
407 }
408 
409 
410 /*!	This function can be used to allocate (optionally aligned) memory that
411 	is going to be passed over to the kernel. For example, the preloaded_image
412 	structures are allocated this way.
413 	The boot loader heap doesn't make it into the kernel!
414 */
415 void*
416 kernel_args_malloc(size_t size, uint8 alignment)
417 {
418 	//dprintf("kernel_args_malloc(): %ld bytes (%ld bytes left)\n", size, sFree);
419 
420 	#define ALIGN(addr, align) (((addr) + align - 1) & ~(align - 1))
421 	size_t alignedSize = size + alignment - 1;
422 
423 	if (sFirstFree != NULL && alignedSize <= sFree) {
424 		// there is enough space in the current buffer
425 		void* address = (void*)ALIGN((addr_t)sFirstFree, alignment);
426 		sFirstFree = (void*)((addr_t)sFirstFree + alignedSize);
427 		sLast = address;
428 		sFree -= alignedSize;
429 
430 		return address;
431 	}
432 
433 	if (alignedSize > kChunkSize / 2 && sFree < alignedSize) {
434 		// the block is so large, we'll allocate a new block for it
435 		void* block = NULL;
436 		if (platform_allocate_region(&block, alignedSize,
437 				B_READ_AREA | B_WRITE_AREA, false) != B_OK) {
438 			return NULL;
439 		}
440 
441 		// Translate the address if needed on the current platform
442 		addr_t translated_block;
443 		platform_bootloader_address_to_kernel_address(block, &translated_block);
444 		if (add_kernel_args_range((void *)translated_block, size) != B_OK)
445 			panic("kernel_args max range too low!\n");
446 
447 		return (void*)ALIGN((addr_t)block, alignment);
448 	}
449 
450 	// just allocate a new block and "close" the old one
451 	void* block = NULL;
452 	if (platform_allocate_region(&block, kChunkSize, B_READ_AREA | B_WRITE_AREA,
453 			false) != B_OK) {
454 		return NULL;
455 	}
456 
457 	sFirstFree = (void*)((addr_t)block + alignedSize);
458 	sLast = block;
459 	sFree = kChunkSize - alignedSize;
460 
461 	// Translate the address if needed on the current platform
462 	addr_t translated_block;
463 	platform_bootloader_address_to_kernel_address(block, &translated_block);
464 	if (add_kernel_args_range((void *)translated_block, kChunkSize) != B_OK)
465 		panic("kernel_args max range too low!\n");
466 
467 	return (void*)ALIGN((addr_t)block, alignment);
468 }
469 
470 
471 /*!	This function frees a block allocated via kernel_args_malloc().
472 	It's very simple; it can only free the last allocation. It's
473 	enough for its current usage in the boot loader, though.
474 */
475 void
476 kernel_args_free(void* block)
477 {
478 	if (sLast != block) {
479 		// sorry, we're dumb
480 		return;
481 	}
482 
483 	sFree = (addr_t)sFirstFree - (addr_t)sLast;
484 	sFirstFree = block;
485 	sLast = NULL;
486 }
487 
488