1393fceb5SAxel Dörfler /* 2dac21d8bSIngo Weinhold * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3b20d05b4SJérôme Duval * Copyright 2008, Jérôme Duval. 4bb163c02SIngo Weinhold * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. 5393fceb5SAxel Dörfler * Distributed under the terms of the MIT License. 6393fceb5SAxel Dörfler * 7393fceb5SAxel Dörfler * Copyright 2001, Travis Geiselbrecht. All rights reserved. 8393fceb5SAxel Dörfler * Distributed under the terms of the NewOS License. 9393fceb5SAxel Dörfler */ 10393fceb5SAxel Dörfler 11393fceb5SAxel Dörfler 1274785e79SIngo Weinhold #include <stdlib.h> 1374785e79SIngo Weinhold #include <string.h> 1474785e79SIngo Weinhold 15dac21d8bSIngo Weinhold #include <algorithm> 16dac21d8bSIngo Weinhold #include <new> 17dac21d8bSIngo Weinhold 18393fceb5SAxel Dörfler #include <KernelExport.h> 1974785e79SIngo Weinhold 20393fceb5SAxel Dörfler #include <smp.h> 21393fceb5SAxel Dörfler #include <util/AutoLock.h> 22e50cf876SIngo Weinhold #include <vm/vm.h> 23e50cf876SIngo Weinhold #include <vm/vm_page.h> 24e50cf876SIngo Weinhold #include <vm/vm_priv.h> 25e50cf876SIngo Weinhold #include <vm/VMAddressSpace.h> 26f34a1dd5SIngo Weinhold #include <vm/VMArea.h> 27393fceb5SAxel Dörfler 28393fceb5SAxel Dörfler #include <arch/vm.h> 29393fceb5SAxel Dörfler #include <arch/int.h> 30393fceb5SAxel Dörfler #include <arch/cpu.h> 31393fceb5SAxel Dörfler 32393fceb5SAxel Dörfler #include <arch/x86/bios.h> 33393fceb5SAxel Dörfler 3447c40a10SIngo Weinhold #include "x86_paging.h" 3547c40a10SIngo Weinhold 36393fceb5SAxel Dörfler 37393fceb5SAxel Dörfler //#define TRACE_ARCH_VM 38393fceb5SAxel Dörfler #ifdef TRACE_ARCH_VM 39393fceb5SAxel Dörfler # define TRACE(x) dprintf x 40393fceb5SAxel Dörfler #else 41393fceb5SAxel Dörfler # define TRACE(x) ; 42393fceb5SAxel Dörfler #endif 43393fceb5SAxel Dörfler 44dac21d8bSIngo Weinhold // 0: disabled, 1: some, 2: more 45dac21d8bSIngo Weinhold #define TRACE_MTRR_ARCH_VM 1 46dac21d8bSIngo Weinhold 47dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 1 484f893e39SJérôme Duval # define TRACE_MTRR(x...) dprintf(x) 494f893e39SJérôme Duval #else 504f893e39SJérôme Duval # define TRACE_MTRR(x...) 514f893e39SJérôme Duval #endif 524f893e39SJérôme Duval 53dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 2 54dac21d8bSIngo Weinhold # define TRACE_MTRR2(x...) dprintf(x) 55dac21d8bSIngo Weinhold #else 56dac21d8bSIngo Weinhold # define TRACE_MTRR2(x...) 57dac21d8bSIngo Weinhold #endif 58bb163c02SIngo Weinhold 59bb163c02SIngo Weinhold 60dac21d8bSIngo Weinhold void *gDmaAddress; 61bb163c02SIngo Weinhold 62bb163c02SIngo Weinhold 63dac21d8bSIngo Weinhold struct memory_type_range : DoublyLinkedListLinkImpl<memory_type_range> { 64bb163c02SIngo Weinhold uint64 base; 65bb163c02SIngo Weinhold uint64 size; 66bb163c02SIngo Weinhold uint32 type; 67bb163c02SIngo Weinhold area_id area; 68bb163c02SIngo Weinhold }; 69bb163c02SIngo Weinhold 70393fceb5SAxel Dörfler 71dac21d8bSIngo Weinhold struct memory_type_range_point 72dac21d8bSIngo Weinhold : DoublyLinkedListLinkImpl<memory_type_range_point> { 73dac21d8bSIngo Weinhold uint64 address; 74dac21d8bSIngo Weinhold memory_type_range* range; 75393fceb5SAxel Dörfler 76dac21d8bSIngo Weinhold bool IsStart() const { return range->base == address; } 77bb163c02SIngo Weinhold 78dac21d8bSIngo Weinhold bool operator<(const memory_type_range_point& other) const 79dac21d8bSIngo Weinhold { 80dac21d8bSIngo Weinhold return address < other.address; 81dac21d8bSIngo Weinhold } 82dac21d8bSIngo Weinhold }; 83bb163c02SIngo Weinhold 84dac21d8bSIngo Weinhold 85dac21d8bSIngo Weinhold typedef DoublyLinkedList<memory_type_range> MemoryTypeRangeList; 86dac21d8bSIngo Weinhold 87dac21d8bSIngo Weinhold static mutex sMemoryTypeLock = MUTEX_INITIALIZER("memory type ranges"); 88dac21d8bSIngo Weinhold static MemoryTypeRangeList sMemoryTypeRanges; 89dac21d8bSIngo Weinhold static int32 sMemoryTypeRangeCount = 0; 90dac21d8bSIngo Weinhold 91dac21d8bSIngo Weinhold static const uint32 kMaxMemoryTypeRegisters = 32; 92bb163c02SIngo Weinhold static x86_mtrr_info sMemoryTypeRegisters[kMaxMemoryTypeRegisters]; 93393fceb5SAxel Dörfler static uint32 sMemoryTypeRegisterCount; 94bb163c02SIngo Weinhold static uint32 sMemoryTypeRegistersUsed; 95bb163c02SIngo Weinhold 96dac21d8bSIngo Weinhold static memory_type_range* sTemporaryRanges = NULL; 97dac21d8bSIngo Weinhold static memory_type_range_point* sTemporaryRangePoints = NULL; 98dac21d8bSIngo Weinhold static int32 sTemporaryRangeCount = 0; 99dac21d8bSIngo Weinhold static int32 sTemporaryRangePointCount = 0; 100393fceb5SAxel Dörfler 101393fceb5SAxel Dörfler 102bb163c02SIngo Weinhold static void 103bb163c02SIngo Weinhold set_mtrrs() 104393fceb5SAxel Dörfler { 105dac21d8bSIngo Weinhold x86_set_mtrrs(IA32_MTR_WRITE_BACK, sMemoryTypeRegisters, 106dac21d8bSIngo Weinhold sMemoryTypeRegistersUsed); 107393fceb5SAxel Dörfler 108dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM 109bb163c02SIngo Weinhold TRACE_MTRR("set MTRRs to:\n"); 110bb163c02SIngo Weinhold for (uint32 i = 0; i < sMemoryTypeRegistersUsed; i++) { 111bb163c02SIngo Weinhold const x86_mtrr_info& info = sMemoryTypeRegisters[i]; 112dac21d8bSIngo Weinhold TRACE_MTRR(" mtrr: %2" B_PRIu32 ": base: %#10" B_PRIx64 ", size: %#10" 113dac21d8bSIngo Weinhold B_PRIx64 ", type: %u\n", i, info.base, info.size, 114dac21d8bSIngo Weinhold info.type); 115393fceb5SAxel Dörfler } 116bb163c02SIngo Weinhold #endif 117393fceb5SAxel Dörfler } 118393fceb5SAxel Dörfler 119393fceb5SAxel Dörfler 120dac21d8bSIngo Weinhold static bool 121bb163c02SIngo Weinhold add_used_mtrr(uint64 base, uint64 size, uint32 type) 122393fceb5SAxel Dörfler { 123dac21d8bSIngo Weinhold if (sMemoryTypeRegistersUsed == sMemoryTypeRegisterCount) { 124682c3066SIngo Weinhold if (sMemoryTypeRegisterCount > 0) { 125dac21d8bSIngo Weinhold dprintf("add_used_mtrr(%#" B_PRIx64 ", %#" B_PRIx64 ", %" B_PRIu32 126dac21d8bSIngo Weinhold "): out of MTRRs!\n", base, size, type); 127682c3066SIngo Weinhold } 128dac21d8bSIngo Weinhold return false; 129dac21d8bSIngo Weinhold } 130393fceb5SAxel Dörfler 131dac21d8bSIngo Weinhold x86_mtrr_info& mtrr = sMemoryTypeRegisters[sMemoryTypeRegistersUsed++]; 132dac21d8bSIngo Weinhold mtrr.base = base; 133dac21d8bSIngo Weinhold mtrr.size = size; 134dac21d8bSIngo Weinhold mtrr.type = type; 135dac21d8bSIngo Weinhold 136dac21d8bSIngo Weinhold return true; 137dac21d8bSIngo Weinhold } 138dac21d8bSIngo Weinhold 139dac21d8bSIngo Weinhold 140dac21d8bSIngo Weinhold static bool 141dac21d8bSIngo Weinhold add_mtrrs_for_range(uint64 base, uint64 size, uint32 type) 142dac21d8bSIngo Weinhold { 143dac21d8bSIngo Weinhold for (uint64 interval = B_PAGE_SIZE; size > 0; interval <<= 1) { 144dac21d8bSIngo Weinhold if (((base | size) & interval) != 0) { 145dac21d8bSIngo Weinhold if ((base & interval) != 0) { 146dac21d8bSIngo Weinhold if (!add_used_mtrr(base, interval, type)) 147dac21d8bSIngo Weinhold return false; 148dac21d8bSIngo Weinhold base += interval; 149dac21d8bSIngo Weinhold } else { 150dac21d8bSIngo Weinhold if (!add_used_mtrr(base + size - interval, interval, type)) 151dac21d8bSIngo Weinhold return false; 152dac21d8bSIngo Weinhold } 153dac21d8bSIngo Weinhold 154dac21d8bSIngo Weinhold size -= interval; 155dac21d8bSIngo Weinhold } 156dac21d8bSIngo Weinhold } 157dac21d8bSIngo Weinhold 158dac21d8bSIngo Weinhold return true; 159dac21d8bSIngo Weinhold } 160dac21d8bSIngo Weinhold 161dac21d8bSIngo Weinhold 162dac21d8bSIngo Weinhold static memory_type_range* 163dac21d8bSIngo Weinhold find_range(area_id areaID) 164dac21d8bSIngo Weinhold { 165dac21d8bSIngo Weinhold for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator(); 166dac21d8bSIngo Weinhold memory_type_range* range = it.Next();) { 167dac21d8bSIngo Weinhold if (range->area == areaID) 168dac21d8bSIngo Weinhold return range; 169dac21d8bSIngo Weinhold } 170dac21d8bSIngo Weinhold 171dac21d8bSIngo Weinhold return NULL; 172393fceb5SAxel Dörfler } 173393fceb5SAxel Dörfler 174393fceb5SAxel Dörfler 1754f893e39SJérôme Duval static void 176dac21d8bSIngo Weinhold optimize_memory_ranges(MemoryTypeRangeList& ranges, uint32 type, 177dac21d8bSIngo Weinhold bool removeRanges) 1784f893e39SJérôme Duval { 179bb163c02SIngo Weinhold uint64 previousEnd = 0; 180dac21d8bSIngo Weinhold uint64 nextStart = 0; 181dac21d8bSIngo Weinhold MemoryTypeRangeList::Iterator it = ranges.GetIterator(); 182dac21d8bSIngo Weinhold memory_type_range* range = it.Next(); 183dac21d8bSIngo Weinhold while (range != NULL) { 184dac21d8bSIngo Weinhold if (range->type != type) { 185dac21d8bSIngo Weinhold previousEnd = range->base + range->size; 186dac21d8bSIngo Weinhold nextStart = 0; 187dac21d8bSIngo Weinhold range = it.Next(); 188dac21d8bSIngo Weinhold continue; 189bb163c02SIngo Weinhold } 190bb163c02SIngo Weinhold 191dac21d8bSIngo Weinhold // find the start of the next range we cannot join this one with 192dac21d8bSIngo Weinhold if (nextStart == 0) { 193dac21d8bSIngo Weinhold MemoryTypeRangeList::Iterator nextIt = it; 194dac21d8bSIngo Weinhold while (memory_type_range* nextRange = nextIt.Next()) { 195dac21d8bSIngo Weinhold if (nextRange->type != range->type) { 196dac21d8bSIngo Weinhold nextStart = nextRange->base; 197dac21d8bSIngo Weinhold break; 198dac21d8bSIngo Weinhold } 199dac21d8bSIngo Weinhold } 200bb163c02SIngo Weinhold 201dac21d8bSIngo Weinhold if (nextStart == 0) { 202dac21d8bSIngo Weinhold // no upper limit -- set an artificial one, so we don't need to 203dac21d8bSIngo Weinhold // special case below 204dac21d8bSIngo Weinhold nextStart = (uint64)1 << 32; 205dac21d8bSIngo Weinhold } 206dac21d8bSIngo Weinhold } 207dac21d8bSIngo Weinhold 208dac21d8bSIngo Weinhold // Align the range's base and end to the greatest power of two possible. 209dac21d8bSIngo Weinhold // As long as we can align both without intersecting any differently 210dac21d8bSIngo Weinhold // range, we can extend the range without making it more complicated. 211dac21d8bSIngo Weinhold // Once one side hit a limit we need to be careful. We can still 212dac21d8bSIngo Weinhold // continue aligning the other side, if the range crosses the power of 213dac21d8bSIngo Weinhold // two boundary. 214dac21d8bSIngo Weinhold uint64 rangeBase = range->base; 215dac21d8bSIngo Weinhold uint64 rangeEnd = rangeBase + range->size; 216dac21d8bSIngo Weinhold uint64 interval = B_PAGE_SIZE * 2; 217dac21d8bSIngo Weinhold while (true) { 218dac21d8bSIngo Weinhold uint64 alignedBase = rangeBase & ~(interval - 1); 219dac21d8bSIngo Weinhold uint64 alignedEnd = (rangeEnd + interval - 1) & ~(interval - 1); 220dac21d8bSIngo Weinhold 221dac21d8bSIngo Weinhold if (alignedBase < previousEnd) 222dac21d8bSIngo Weinhold alignedBase += interval; 223dac21d8bSIngo Weinhold 224dac21d8bSIngo Weinhold if (alignedEnd > nextStart) 225dac21d8bSIngo Weinhold alignedEnd -= interval; 226dac21d8bSIngo Weinhold 227dac21d8bSIngo Weinhold if (alignedBase >= alignedEnd) 228dac21d8bSIngo Weinhold break; 229dac21d8bSIngo Weinhold 230dac21d8bSIngo Weinhold rangeBase = std::min(rangeBase, alignedBase); 231dac21d8bSIngo Weinhold rangeEnd = std::max(rangeEnd, alignedEnd); 232dac21d8bSIngo Weinhold 233dac21d8bSIngo Weinhold interval <<= 1; 234dac21d8bSIngo Weinhold } 235dac21d8bSIngo Weinhold 236dac21d8bSIngo Weinhold range->base = rangeBase; 237dac21d8bSIngo Weinhold range->size = rangeEnd - rangeBase; 238dac21d8bSIngo Weinhold 239dac21d8bSIngo Weinhold if (removeRanges) 240dac21d8bSIngo Weinhold it.Remove(); 241dac21d8bSIngo Weinhold 242dac21d8bSIngo Weinhold previousEnd = rangeEnd; 243dac21d8bSIngo Weinhold 244dac21d8bSIngo Weinhold // Skip the subsequent ranges we have swallowed and possible cut one 245dac21d8bSIngo Weinhold // we now partially intersect with. 246dac21d8bSIngo Weinhold while ((range = it.Next()) != NULL) { 247dac21d8bSIngo Weinhold if (range->base >= rangeEnd) 248dac21d8bSIngo Weinhold break; 249dac21d8bSIngo Weinhold 250dac21d8bSIngo Weinhold if (range->base + range->size > rangeEnd) { 251dac21d8bSIngo Weinhold // we partially intersect -- cut the range 252dac21d8bSIngo Weinhold range->size = range->base + range->size - rangeEnd; 253dac21d8bSIngo Weinhold range->base = rangeEnd; 254dac21d8bSIngo Weinhold break; 255dac21d8bSIngo Weinhold } 256dac21d8bSIngo Weinhold 257dac21d8bSIngo Weinhold // we have swallowed this range completely 258dac21d8bSIngo Weinhold range->size = 0; 259dac21d8bSIngo Weinhold it.Remove(); 260dac21d8bSIngo Weinhold } 261dac21d8bSIngo Weinhold } 262dac21d8bSIngo Weinhold } 263dac21d8bSIngo Weinhold 264dac21d8bSIngo Weinhold 265dac21d8bSIngo Weinhold static bool 266dac21d8bSIngo Weinhold ensure_temporary_ranges_space(int32 count) 267dac21d8bSIngo Weinhold { 268dac21d8bSIngo Weinhold if (sTemporaryRangeCount >= count && sTemporaryRangePointCount >= count) 269dac21d8bSIngo Weinhold return true; 270dac21d8bSIngo Weinhold 271dac21d8bSIngo Weinhold // round count to power of 2 272dac21d8bSIngo Weinhold int32 unalignedCount = count; 273dac21d8bSIngo Weinhold count = 8; 274dac21d8bSIngo Weinhold while (count < unalignedCount) 275dac21d8bSIngo Weinhold count <<= 1; 276dac21d8bSIngo Weinhold 277dac21d8bSIngo Weinhold // resize ranges array 278dac21d8bSIngo Weinhold if (sTemporaryRangeCount < count) { 279dac21d8bSIngo Weinhold memory_type_range* ranges = new(std::nothrow) memory_type_range[count]; 280dac21d8bSIngo Weinhold if (ranges == NULL) 281dac21d8bSIngo Weinhold return false; 282dac21d8bSIngo Weinhold 283dac21d8bSIngo Weinhold delete[] sTemporaryRanges; 284dac21d8bSIngo Weinhold 285dac21d8bSIngo Weinhold sTemporaryRanges = ranges; 286dac21d8bSIngo Weinhold sTemporaryRangeCount = count; 287dac21d8bSIngo Weinhold } 288dac21d8bSIngo Weinhold 289dac21d8bSIngo Weinhold // resize points array 290dac21d8bSIngo Weinhold if (sTemporaryRangePointCount < count) { 291dac21d8bSIngo Weinhold memory_type_range_point* points 292dac21d8bSIngo Weinhold = new(std::nothrow) memory_type_range_point[count]; 293dac21d8bSIngo Weinhold if (points == NULL) 294dac21d8bSIngo Weinhold return false; 295dac21d8bSIngo Weinhold 296dac21d8bSIngo Weinhold delete[] sTemporaryRangePoints; 297dac21d8bSIngo Weinhold 298dac21d8bSIngo Weinhold sTemporaryRangePoints = points; 299dac21d8bSIngo Weinhold sTemporaryRangePointCount = count; 300dac21d8bSIngo Weinhold } 301dac21d8bSIngo Weinhold 302dac21d8bSIngo Weinhold return true; 303dac21d8bSIngo Weinhold } 304dac21d8bSIngo Weinhold 305dac21d8bSIngo Weinhold 306dac21d8bSIngo Weinhold status_t 307dac21d8bSIngo Weinhold update_mtrrs() 308dac21d8bSIngo Weinhold { 309dac21d8bSIngo Weinhold // resize the temporary points/ranges arrays, if necessary 310dac21d8bSIngo Weinhold if (!ensure_temporary_ranges_space(sMemoryTypeRangeCount * 2)) 311dac21d8bSIngo Weinhold return B_NO_MEMORY; 312dac21d8bSIngo Weinhold 313dac21d8bSIngo Weinhold // get the range points and sort them 314dac21d8bSIngo Weinhold memory_type_range_point* rangePoints = sTemporaryRangePoints; 315dac21d8bSIngo Weinhold int32 pointCount = 0; 316dac21d8bSIngo Weinhold for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator(); 317dac21d8bSIngo Weinhold memory_type_range* range = it.Next();) { 318dac21d8bSIngo Weinhold rangePoints[pointCount].address = range->base; 319dac21d8bSIngo Weinhold rangePoints[pointCount++].range = range; 320dac21d8bSIngo Weinhold rangePoints[pointCount].address = range->base + range->size; 321dac21d8bSIngo Weinhold rangePoints[pointCount++].range = range; 322dac21d8bSIngo Weinhold } 323dac21d8bSIngo Weinhold 324dac21d8bSIngo Weinhold std::sort(rangePoints, rangePoints + pointCount); 325dac21d8bSIngo Weinhold 326dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 2 327dac21d8bSIngo Weinhold TRACE_MTRR2("memory type range points:\n"); 328dac21d8bSIngo Weinhold for (int32 i = 0; i < pointCount; i++) { 329dac21d8bSIngo Weinhold TRACE_MTRR2("%12" B_PRIx64 " (%p)\n", rangePoints[i].address, 330dac21d8bSIngo Weinhold rangePoints[i].range); 331dac21d8bSIngo Weinhold } 332dac21d8bSIngo Weinhold #endif 333dac21d8bSIngo Weinhold 334dac21d8bSIngo Weinhold // Compute the effective ranges. When ranges overlap, we go with the 335dac21d8bSIngo Weinhold // stricter requirement. The types are not necessarily totally ordered, so 336dac21d8bSIngo Weinhold // the order we use below is not always correct. To keep it simple we 337dac21d8bSIngo Weinhold // consider it the reponsibility of the callers not to define overlapping 338dac21d8bSIngo Weinhold // memory ranges with uncomparable types. 339dac21d8bSIngo Weinhold 340dac21d8bSIngo Weinhold memory_type_range* ranges = sTemporaryRanges; 341dac21d8bSIngo Weinhold typedef DoublyLinkedList<memory_type_range_point> PointList; 342dac21d8bSIngo Weinhold PointList pendingPoints; 343dac21d8bSIngo Weinhold memory_type_range* activeRange = NULL; 344dac21d8bSIngo Weinhold int32 rangeCount = 0; 345dac21d8bSIngo Weinhold 346dac21d8bSIngo Weinhold for (int32 i = 0; i < pointCount; i++) { 347dac21d8bSIngo Weinhold memory_type_range_point* point = &rangePoints[i]; 348dac21d8bSIngo Weinhold bool terminateRange = false; 349dac21d8bSIngo Weinhold if (point->IsStart()) { 350dac21d8bSIngo Weinhold // a range start point 351dac21d8bSIngo Weinhold pendingPoints.Add(point); 352dac21d8bSIngo Weinhold if (activeRange != NULL && activeRange->type > point->range->type) 353dac21d8bSIngo Weinhold terminateRange = true; 354dac21d8bSIngo Weinhold } else { 355dac21d8bSIngo Weinhold // a range end point -- remove the pending start point 356dac21d8bSIngo Weinhold for (PointList::Iterator it = pendingPoints.GetIterator(); 357dac21d8bSIngo Weinhold memory_type_range_point* pendingPoint = it.Next();) { 358dac21d8bSIngo Weinhold if (pendingPoint->range == point->range) { 359dac21d8bSIngo Weinhold it.Remove(); 360dac21d8bSIngo Weinhold break; 361dac21d8bSIngo Weinhold } 362dac21d8bSIngo Weinhold } 363dac21d8bSIngo Weinhold 364dac21d8bSIngo Weinhold if (point->range == activeRange) 365dac21d8bSIngo Weinhold terminateRange = true; 366dac21d8bSIngo Weinhold } 367dac21d8bSIngo Weinhold 368dac21d8bSIngo Weinhold if (terminateRange) { 369dac21d8bSIngo Weinhold ranges[rangeCount].size = point->address - ranges[rangeCount].base; 370dac21d8bSIngo Weinhold rangeCount++; 371dac21d8bSIngo Weinhold activeRange = NULL; 372dac21d8bSIngo Weinhold } 373dac21d8bSIngo Weinhold 374dac21d8bSIngo Weinhold if (activeRange != NULL || pendingPoints.IsEmpty()) 375dac21d8bSIngo Weinhold continue; 376dac21d8bSIngo Weinhold 377dac21d8bSIngo Weinhold // we need to start a new range -- find the strictest pending range 378dac21d8bSIngo Weinhold for (PointList::Iterator it = pendingPoints.GetIterator(); 379dac21d8bSIngo Weinhold memory_type_range_point* pendingPoint = it.Next();) { 380dac21d8bSIngo Weinhold memory_type_range* pendingRange = pendingPoint->range; 381dac21d8bSIngo Weinhold if (activeRange == NULL || activeRange->type > pendingRange->type) 382dac21d8bSIngo Weinhold activeRange = pendingRange; 383dac21d8bSIngo Weinhold } 384dac21d8bSIngo Weinhold 385dac21d8bSIngo Weinhold memory_type_range* previousRange = rangeCount > 0 386dac21d8bSIngo Weinhold ? &ranges[rangeCount - 1] : NULL; 387dac21d8bSIngo Weinhold if (previousRange == NULL || previousRange->type != activeRange->type 388dac21d8bSIngo Weinhold || previousRange->base + previousRange->size 389dac21d8bSIngo Weinhold < activeRange->base) { 390dac21d8bSIngo Weinhold // we can't join with the previous range -- add a new one 391dac21d8bSIngo Weinhold ranges[rangeCount].base = point->address; 392dac21d8bSIngo Weinhold ranges[rangeCount].type = activeRange->type; 393dac21d8bSIngo Weinhold } else 394dac21d8bSIngo Weinhold rangeCount--; 395dac21d8bSIngo Weinhold } 396dac21d8bSIngo Weinhold 397dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 2 398dac21d8bSIngo Weinhold TRACE_MTRR2("effective memory type ranges:\n"); 399dac21d8bSIngo Weinhold for (int32 i = 0; i < rangeCount; i++) { 400dac21d8bSIngo Weinhold TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n", 401dac21d8bSIngo Weinhold ranges[i].base, ranges[i].base + ranges[i].size, ranges[i].type); 402dac21d8bSIngo Weinhold } 403dac21d8bSIngo Weinhold #endif 404dac21d8bSIngo Weinhold 405dac21d8bSIngo Weinhold // Extend ranges to be more MTRR-friendly. A range is MTRR friendly, when it 406dac21d8bSIngo Weinhold // has a power of two size and a base address aligned to the size. For 407dac21d8bSIngo Weinhold // ranges without this property we need more than one MTRR. We improve 408dac21d8bSIngo Weinhold // MTRR-friendliness by aligning a range's base and end address to the 409dac21d8bSIngo Weinhold // greatest power of two (base rounded down, end up) such that the extended 410dac21d8bSIngo Weinhold // range does not intersect with any other differently typed range. We join 411dac21d8bSIngo Weinhold // equally typed ranges, if possible. There are two exceptions to the 412dac21d8bSIngo Weinhold // intersection requirement: Uncached ranges may intersect with any other 413dac21d8bSIngo Weinhold // range; the resulting type will still be uncached. Hence we can ignore 414dac21d8bSIngo Weinhold // uncached ranges when extending the other ranges. Write-through range may 415dac21d8bSIngo Weinhold // intersect with write-back ranges; the resulting type will be 416dac21d8bSIngo Weinhold // write-through. Hence we can ignore write-through ranges when extending 417dac21d8bSIngo Weinhold // write-back ranges. 418dac21d8bSIngo Weinhold 419dac21d8bSIngo Weinhold MemoryTypeRangeList rangeList; 420dac21d8bSIngo Weinhold for (int32 i = 0; i < rangeCount; i++) 421dac21d8bSIngo Weinhold rangeList.Add(&ranges[i]); 422dac21d8bSIngo Weinhold 423dac21d8bSIngo Weinhold static const uint32 kMemoryTypes[] = { 424dac21d8bSIngo Weinhold IA32_MTR_UNCACHED, 425dac21d8bSIngo Weinhold IA32_MTR_WRITE_COMBINING, 426dac21d8bSIngo Weinhold IA32_MTR_WRITE_PROTECTED, 427dac21d8bSIngo Weinhold IA32_MTR_WRITE_THROUGH, 428dac21d8bSIngo Weinhold IA32_MTR_WRITE_BACK 429dac21d8bSIngo Weinhold }; 430dac21d8bSIngo Weinhold static const int32 kMemoryTypeCount = sizeof(kMemoryTypes) 431dac21d8bSIngo Weinhold / sizeof(*kMemoryTypes); 432dac21d8bSIngo Weinhold 433dac21d8bSIngo Weinhold for (int32 i = 0; i < kMemoryTypeCount; i++) { 434dac21d8bSIngo Weinhold uint32 type = kMemoryTypes[i]; 435dac21d8bSIngo Weinhold 436dac21d8bSIngo Weinhold // Remove uncached and write-through ranges after processing them. This 437dac21d8bSIngo Weinhold // let's us leverage their intersection property with any other 438dac21d8bSIngo Weinhold // respectively write-back ranges. 439dac21d8bSIngo Weinhold bool removeRanges = type == IA32_MTR_UNCACHED 440dac21d8bSIngo Weinhold || type == IA32_MTR_WRITE_THROUGH; 441dac21d8bSIngo Weinhold 442dac21d8bSIngo Weinhold optimize_memory_ranges(rangeList, type, removeRanges); 443dac21d8bSIngo Weinhold } 444dac21d8bSIngo Weinhold 445dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 2 446dac21d8bSIngo Weinhold TRACE_MTRR2("optimized memory type ranges:\n"); 447dac21d8bSIngo Weinhold for (int32 i = 0; i < rangeCount; i++) { 448dac21d8bSIngo Weinhold if (ranges[i].size > 0) { 449dac21d8bSIngo Weinhold TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n", 450dac21d8bSIngo Weinhold ranges[i].base, ranges[i].base + ranges[i].size, 451dac21d8bSIngo Weinhold ranges[i].type); 452dac21d8bSIngo Weinhold } 453dac21d8bSIngo Weinhold } 454dac21d8bSIngo Weinhold #endif 455dac21d8bSIngo Weinhold 456dac21d8bSIngo Weinhold // compute the mtrrs from the ranges 457bb163c02SIngo Weinhold sMemoryTypeRegistersUsed = 0; 458dac21d8bSIngo Weinhold for (int32 i = 0; i < kMemoryTypeCount; i++) { 459dac21d8bSIngo Weinhold uint32 type = kMemoryTypes[i]; 460bb163c02SIngo Weinhold 461dac21d8bSIngo Weinhold // skip write-back ranges -- that'll be the default type anyway 462dac21d8bSIngo Weinhold if (type == IA32_MTR_WRITE_BACK) 463dac21d8bSIngo Weinhold continue; 464dac21d8bSIngo Weinhold 465dac21d8bSIngo Weinhold for (int32 i = 0; i < rangeCount; i++) { 466dac21d8bSIngo Weinhold if (ranges[i].size == 0 || ranges[i].type != type) 467dac21d8bSIngo Weinhold continue; 468dac21d8bSIngo Weinhold 469dac21d8bSIngo Weinhold add_mtrrs_for_range(ranges[i].base, ranges[i].size, type); 470dac21d8bSIngo Weinhold } 471bb163c02SIngo Weinhold } 472bb163c02SIngo Weinhold 473bb163c02SIngo Weinhold set_mtrrs(); 474bb163c02SIngo Weinhold 475bb163c02SIngo Weinhold return B_OK; 476bb163c02SIngo Weinhold } 477bb163c02SIngo Weinhold 478bb163c02SIngo Weinhold 479bb163c02SIngo Weinhold static status_t 480bb163c02SIngo Weinhold add_memory_type_range(area_id areaID, uint64 base, uint64 size, uint32 type) 481bb163c02SIngo Weinhold { 482bb163c02SIngo Weinhold // translate the type 483393fceb5SAxel Dörfler if (type == 0) 484393fceb5SAxel Dörfler return B_OK; 485393fceb5SAxel Dörfler 486393fceb5SAxel Dörfler switch (type) { 487393fceb5SAxel Dörfler case B_MTR_UC: 4884f893e39SJérôme Duval type = IA32_MTR_UNCACHED; 489393fceb5SAxel Dörfler break; 490393fceb5SAxel Dörfler case B_MTR_WC: 4914f893e39SJérôme Duval type = IA32_MTR_WRITE_COMBINING; 492393fceb5SAxel Dörfler break; 493393fceb5SAxel Dörfler case B_MTR_WT: 4944f893e39SJérôme Duval type = IA32_MTR_WRITE_THROUGH; 495393fceb5SAxel Dörfler break; 496393fceb5SAxel Dörfler case B_MTR_WP: 4974f893e39SJérôme Duval type = IA32_MTR_WRITE_PROTECTED; 498393fceb5SAxel Dörfler break; 499393fceb5SAxel Dörfler case B_MTR_WB: 5004f893e39SJérôme Duval type = IA32_MTR_WRITE_BACK; 501393fceb5SAxel Dörfler break; 502393fceb5SAxel Dörfler default: 503393fceb5SAxel Dörfler return B_BAD_VALUE; 504393fceb5SAxel Dörfler } 505393fceb5SAxel Dörfler 506dac21d8bSIngo Weinhold TRACE_MTRR("add_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#" 507dac21d8bSIngo Weinhold B_PRIx64 ", %" B_PRIu32 ")\n", areaID, base, size, type); 508393fceb5SAxel Dörfler 509bb163c02SIngo Weinhold MutexLocker locker(sMemoryTypeLock); 510bb163c02SIngo Weinhold 511dac21d8bSIngo Weinhold memory_type_range* range = areaID >= 0 ? find_range(areaID) : NULL; 512dac21d8bSIngo Weinhold int32 oldRangeType = -1; 513dac21d8bSIngo Weinhold if (range != NULL) { 514dac21d8bSIngo Weinhold if (range->base != base || range->size != size) 515393fceb5SAxel Dörfler return B_BAD_VALUE; 516dac21d8bSIngo Weinhold if (range->type == type) 517393fceb5SAxel Dörfler return B_OK; 518dac21d8bSIngo Weinhold 519dac21d8bSIngo Weinhold oldRangeType = range->type; 520dac21d8bSIngo Weinhold range->type = type; 521dac21d8bSIngo Weinhold } else { 522dac21d8bSIngo Weinhold range = new(std::nothrow) memory_type_range; 523dac21d8bSIngo Weinhold if (range == NULL) 524dac21d8bSIngo Weinhold return B_NO_MEMORY; 525dac21d8bSIngo Weinhold 526dac21d8bSIngo Weinhold range->area = areaID; 527dac21d8bSIngo Weinhold range->base = base; 528dac21d8bSIngo Weinhold range->size = size; 529dac21d8bSIngo Weinhold range->type = type; 530dac21d8bSIngo Weinhold sMemoryTypeRanges.Add(range); 531dac21d8bSIngo Weinhold sMemoryTypeRangeCount++; 532393fceb5SAxel Dörfler } 533393fceb5SAxel Dörfler 534dac21d8bSIngo Weinhold status_t error = update_mtrrs(); 535dac21d8bSIngo Weinhold if (error != B_OK) { 536dac21d8bSIngo Weinhold // revert the addition of the range/change of its type 537dac21d8bSIngo Weinhold if (oldRangeType < 0) { 538dac21d8bSIngo Weinhold sMemoryTypeRanges.Remove(range); 539dac21d8bSIngo Weinhold sMemoryTypeRangeCount--; 540dac21d8bSIngo Weinhold delete range; 541dac21d8bSIngo Weinhold } else 542dac21d8bSIngo Weinhold range->type = oldRangeType; 543393fceb5SAxel Dörfler 544dac21d8bSIngo Weinhold update_mtrrs(); 545bb163c02SIngo Weinhold return error; 546bb163c02SIngo Weinhold } 5474f893e39SJérôme Duval 548dac21d8bSIngo Weinhold return B_OK; 549dac21d8bSIngo Weinhold } 550dac21d8bSIngo Weinhold 5514f893e39SJérôme Duval 5524f893e39SJérôme Duval static void 553bb163c02SIngo Weinhold remove_memory_type_range(area_id areaID) 5544f893e39SJérôme Duval { 555bb163c02SIngo Weinhold MutexLocker locker(sMemoryTypeLock); 556bb163c02SIngo Weinhold 557dac21d8bSIngo Weinhold memory_type_range* range = find_range(areaID); 558dac21d8bSIngo Weinhold if (range != NULL) { 559dac21d8bSIngo Weinhold TRACE_MTRR("remove_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#" 560dac21d8bSIngo Weinhold B_PRIx64 ", %" B_PRIu32 ")\n", range->area, range->base, 561dac21d8bSIngo Weinhold range->size, range->type); 562dac21d8bSIngo Weinhold 563dac21d8bSIngo Weinhold sMemoryTypeRanges.Remove(range); 564dac21d8bSIngo Weinhold sMemoryTypeRangeCount--; 565dac21d8bSIngo Weinhold delete range; 566dac21d8bSIngo Weinhold 567dac21d8bSIngo Weinhold update_mtrrs(); 568dac21d8bSIngo Weinhold } else { 569dac21d8bSIngo Weinhold dprintf("remove_memory_type_range(): no range known for area %" B_PRId32 570dac21d8bSIngo Weinhold "\n", areaID); 5714f893e39SJérôme Duval } 5724f893e39SJérôme Duval } 5734f893e39SJérôme Duval 5744f893e39SJérôme Duval 575393fceb5SAxel Dörfler // #pragma mark - 576393fceb5SAxel Dörfler 577393fceb5SAxel Dörfler 578393fceb5SAxel Dörfler status_t 579393fceb5SAxel Dörfler arch_vm_init(kernel_args *args) 580393fceb5SAxel Dörfler { 581393fceb5SAxel Dörfler TRACE(("arch_vm_init: entry\n")); 582393fceb5SAxel Dörfler return 0; 583393fceb5SAxel Dörfler } 584393fceb5SAxel Dörfler 585393fceb5SAxel Dörfler 586393fceb5SAxel Dörfler /*! Marks DMA region as in-use, and maps it into the kernel space */ 587393fceb5SAxel Dörfler status_t 588393fceb5SAxel Dörfler arch_vm_init_post_area(kernel_args *args) 589393fceb5SAxel Dörfler { 590393fceb5SAxel Dörfler area_id id; 591393fceb5SAxel Dörfler 592393fceb5SAxel Dörfler TRACE(("arch_vm_init_post_area: entry\n")); 593393fceb5SAxel Dörfler 594393fceb5SAxel Dörfler // account for DMA area and mark the pages unusable 595393fceb5SAxel Dörfler vm_mark_page_range_inuse(0x0, 0xa0000 / B_PAGE_SIZE); 596393fceb5SAxel Dörfler 597393fceb5SAxel Dörfler // map 0 - 0xa0000 directly 598393fceb5SAxel Dörfler id = map_physical_memory("dma_region", (void *)0x0, 0xa0000, 599dac21d8bSIngo Weinhold B_ANY_KERNEL_ADDRESS | B_MTR_WB, 600dac21d8bSIngo Weinhold B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &gDmaAddress); 601393fceb5SAxel Dörfler if (id < 0) { 602393fceb5SAxel Dörfler panic("arch_vm_init_post_area: unable to map dma region\n"); 603393fceb5SAxel Dörfler return B_NO_MEMORY; 604393fceb5SAxel Dörfler } 605393fceb5SAxel Dörfler 606393fceb5SAxel Dörfler return bios_init(); 607393fceb5SAxel Dörfler } 608393fceb5SAxel Dörfler 609393fceb5SAxel Dörfler 610393fceb5SAxel Dörfler /*! Gets rid of all yet unmapped (and therefore now unused) page tables */ 611393fceb5SAxel Dörfler status_t 612393fceb5SAxel Dörfler arch_vm_init_end(kernel_args *args) 613393fceb5SAxel Dörfler { 614393fceb5SAxel Dörfler TRACE(("arch_vm_init_endvm: entry\n")); 615393fceb5SAxel Dörfler 616393fceb5SAxel Dörfler // throw away anything in the kernel_args.pgtable[] that's not yet mapped 617393fceb5SAxel Dörfler vm_free_unused_boot_loader_range(KERNEL_BASE, 618*d40a9355SIngo Weinhold args->arch_args.virtual_end - KERNEL_BASE); 619393fceb5SAxel Dörfler 620393fceb5SAxel Dörfler return B_OK; 621393fceb5SAxel Dörfler } 622393fceb5SAxel Dörfler 623393fceb5SAxel Dörfler 624393fceb5SAxel Dörfler status_t 625393fceb5SAxel Dörfler arch_vm_init_post_modules(kernel_args *args) 626393fceb5SAxel Dörfler { 627393fceb5SAxel Dörfler // the x86 CPU modules are now accessible 628393fceb5SAxel Dörfler 629393fceb5SAxel Dörfler sMemoryTypeRegisterCount = x86_count_mtrrs(); 630393fceb5SAxel Dörfler if (sMemoryTypeRegisterCount == 0) 631393fceb5SAxel Dörfler return B_OK; 632393fceb5SAxel Dörfler 633393fceb5SAxel Dörfler // not very likely, but play safe here 634393fceb5SAxel Dörfler if (sMemoryTypeRegisterCount > kMaxMemoryTypeRegisters) 635393fceb5SAxel Dörfler sMemoryTypeRegisterCount = kMaxMemoryTypeRegisters; 636393fceb5SAxel Dörfler 637393fceb5SAxel Dörfler // set the physical memory ranges to write-back mode 638393fceb5SAxel Dörfler for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) { 639bb163c02SIngo Weinhold add_memory_type_range(-1, args->physical_memory_range[i].start, 640bb163c02SIngo Weinhold args->physical_memory_range[i].size, B_MTR_WB); 641393fceb5SAxel Dörfler } 642393fceb5SAxel Dörfler 643393fceb5SAxel Dörfler return B_OK; 644393fceb5SAxel Dörfler } 645393fceb5SAxel Dörfler 646393fceb5SAxel Dörfler 647393fceb5SAxel Dörfler void 648b0db552cSIngo Weinhold arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to) 649393fceb5SAxel Dörfler { 6509a42ad7aSIngo Weinhold // This functions is only invoked when a userland thread is in the process 6519a42ad7aSIngo Weinhold // of dying. It switches to the kernel team and does whatever cleanup is 6529a42ad7aSIngo Weinhold // necessary (in case it is the team's main thread, it will delete the 6539a42ad7aSIngo Weinhold // team). 6549a42ad7aSIngo Weinhold // It is however not necessary to change the page directory. Userland team's 6559a42ad7aSIngo Weinhold // page directories include all kernel mappings as well. Furthermore our 6569a42ad7aSIngo Weinhold // arch specific translation map data objects are ref-counted, so they won't 6579a42ad7aSIngo Weinhold // go away as long as they are still used on any CPU. 658393fceb5SAxel Dörfler } 659393fceb5SAxel Dörfler 660393fceb5SAxel Dörfler 661393fceb5SAxel Dörfler bool 662393fceb5SAxel Dörfler arch_vm_supports_protection(uint32 protection) 663393fceb5SAxel Dörfler { 664393fceb5SAxel Dörfler // x86 always has the same read/write properties for userland and the 665393fceb5SAxel Dörfler // kernel. 666393fceb5SAxel Dörfler // That's why we do not support user-read/kernel-write access. While the 667393fceb5SAxel Dörfler // other way around is not supported either, we don't care in this case 668393fceb5SAxel Dörfler // and give the kernel full access. 669393fceb5SAxel Dörfler if ((protection & (B_READ_AREA | B_WRITE_AREA)) == B_READ_AREA 670393fceb5SAxel Dörfler && protection & B_KERNEL_WRITE_AREA) 671393fceb5SAxel Dörfler return false; 672393fceb5SAxel Dörfler 673393fceb5SAxel Dörfler return true; 674393fceb5SAxel Dörfler } 675393fceb5SAxel Dörfler 676393fceb5SAxel Dörfler 677393fceb5SAxel Dörfler void 678a99eb6b5SIngo Weinhold arch_vm_unset_memory_type(struct VMArea *area) 679393fceb5SAxel Dörfler { 680393fceb5SAxel Dörfler if (area->memory_type == 0) 681393fceb5SAxel Dörfler return; 682393fceb5SAxel Dörfler 683bb163c02SIngo Weinhold remove_memory_type_range(area->id); 684393fceb5SAxel Dörfler } 685393fceb5SAxel Dörfler 686393fceb5SAxel Dörfler 687393fceb5SAxel Dörfler status_t 688a99eb6b5SIngo Weinhold arch_vm_set_memory_type(struct VMArea *area, addr_t physicalBase, 689393fceb5SAxel Dörfler uint32 type) 690393fceb5SAxel Dörfler { 691393fceb5SAxel Dörfler area->memory_type = type >> MEMORY_TYPE_SHIFT; 692bbd97b4bSIngo Weinhold return add_memory_type_range(area->id, physicalBase, area->Size(), type); 693393fceb5SAxel Dörfler } 694