1393fceb5SAxel Dörfler /* 2dac21d8bSIngo Weinhold * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3b20d05b4SJérôme Duval * Copyright 2008, Jérôme Duval. 4bb163c02SIngo Weinhold * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. 5393fceb5SAxel Dörfler * Distributed under the terms of the MIT License. 6393fceb5SAxel Dörfler * 7393fceb5SAxel Dörfler * Copyright 2001, Travis Geiselbrecht. All rights reserved. 8393fceb5SAxel Dörfler * Distributed under the terms of the NewOS License. 9393fceb5SAxel Dörfler */ 10393fceb5SAxel Dörfler 11393fceb5SAxel Dörfler 1274785e79SIngo Weinhold #include <stdlib.h> 1374785e79SIngo Weinhold #include <string.h> 1474785e79SIngo Weinhold 15dac21d8bSIngo Weinhold #include <algorithm> 16dac21d8bSIngo Weinhold #include <new> 17dac21d8bSIngo Weinhold 18393fceb5SAxel Dörfler #include <KernelExport.h> 1974785e79SIngo Weinhold 2045bd7bb3SIngo Weinhold #include <boot/kernel_args.h> 21393fceb5SAxel Dörfler #include <smp.h> 22393fceb5SAxel Dörfler #include <util/AutoLock.h> 23e50cf876SIngo Weinhold #include <vm/vm.h> 24e50cf876SIngo Weinhold #include <vm/vm_page.h> 25e50cf876SIngo Weinhold #include <vm/vm_priv.h> 26e50cf876SIngo Weinhold #include <vm/VMAddressSpace.h> 27f34a1dd5SIngo Weinhold #include <vm/VMArea.h> 28393fceb5SAxel Dörfler 29393fceb5SAxel Dörfler #include <arch/vm.h> 30393fceb5SAxel Dörfler #include <arch/int.h> 31393fceb5SAxel Dörfler #include <arch/cpu.h> 32393fceb5SAxel Dörfler 33393fceb5SAxel Dörfler #include <arch/x86/bios.h> 34393fceb5SAxel Dörfler 35393fceb5SAxel Dörfler 36393fceb5SAxel Dörfler //#define TRACE_ARCH_VM 37393fceb5SAxel Dörfler #ifdef TRACE_ARCH_VM 38393fceb5SAxel Dörfler # define TRACE(x) dprintf x 39393fceb5SAxel Dörfler #else 40393fceb5SAxel Dörfler # define TRACE(x) ; 41393fceb5SAxel Dörfler #endif 42393fceb5SAxel Dörfler 43dac21d8bSIngo Weinhold // 0: disabled, 1: some, 2: more 44dac21d8bSIngo Weinhold #define TRACE_MTRR_ARCH_VM 1 45dac21d8bSIngo Weinhold 46dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 1 474f893e39SJérôme Duval # define TRACE_MTRR(x...) dprintf(x) 484f893e39SJérôme Duval #else 494f893e39SJérôme Duval # define TRACE_MTRR(x...) 504f893e39SJérôme Duval #endif 514f893e39SJérôme Duval 52dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 2 53dac21d8bSIngo Weinhold # define TRACE_MTRR2(x...) dprintf(x) 54dac21d8bSIngo Weinhold #else 55dac21d8bSIngo Weinhold # define TRACE_MTRR2(x...) 56dac21d8bSIngo Weinhold #endif 57bb163c02SIngo Weinhold 58bb163c02SIngo Weinhold 59dac21d8bSIngo Weinhold void *gDmaAddress; 60bb163c02SIngo Weinhold 61bb163c02SIngo Weinhold 62c73d1301SMichael Lotz namespace { 63c73d1301SMichael Lotz 64dac21d8bSIngo Weinhold struct memory_type_range : DoublyLinkedListLinkImpl<memory_type_range> { 65bb163c02SIngo Weinhold uint64 base; 66bb163c02SIngo Weinhold uint64 size; 67bb163c02SIngo Weinhold uint32 type; 68bb163c02SIngo Weinhold area_id area; 69bb163c02SIngo Weinhold }; 70bb163c02SIngo Weinhold 71393fceb5SAxel Dörfler 72dac21d8bSIngo Weinhold struct memory_type_range_point 73dac21d8bSIngo Weinhold : DoublyLinkedListLinkImpl<memory_type_range_point> { 74dac21d8bSIngo Weinhold uint64 address; 75dac21d8bSIngo Weinhold memory_type_range* range; 76393fceb5SAxel Dörfler 77dac21d8bSIngo Weinhold bool IsStart() const { return range->base == address; } 78bb163c02SIngo Weinhold 79dac21d8bSIngo Weinhold bool operator<(const memory_type_range_point& other) const 80dac21d8bSIngo Weinhold { 81dac21d8bSIngo Weinhold return address < other.address; 82dac21d8bSIngo Weinhold } 83dac21d8bSIngo Weinhold }; 84bb163c02SIngo Weinhold 85dac21d8bSIngo Weinhold 86fa0c1e96SIngo Weinhold struct update_mtrr_info { 87fa0c1e96SIngo Weinhold uint64 ignoreUncacheableSize; 88fa0c1e96SIngo Weinhold uint64 shortestUncacheableSize; 89fa0c1e96SIngo Weinhold }; 90fa0c1e96SIngo Weinhold 91fa0c1e96SIngo Weinhold 92dac21d8bSIngo Weinhold typedef DoublyLinkedList<memory_type_range> MemoryTypeRangeList; 93dac21d8bSIngo Weinhold 94c73d1301SMichael Lotz } // namespace 95c73d1301SMichael Lotz 96c73d1301SMichael Lotz 97dac21d8bSIngo Weinhold static mutex sMemoryTypeLock = MUTEX_INITIALIZER("memory type ranges"); 98dac21d8bSIngo Weinhold static MemoryTypeRangeList sMemoryTypeRanges; 99dac21d8bSIngo Weinhold static int32 sMemoryTypeRangeCount = 0; 100dac21d8bSIngo Weinhold 101dac21d8bSIngo Weinhold static const uint32 kMaxMemoryTypeRegisters = 32; 102bb163c02SIngo Weinhold static x86_mtrr_info sMemoryTypeRegisters[kMaxMemoryTypeRegisters]; 103393fceb5SAxel Dörfler static uint32 sMemoryTypeRegisterCount; 104bb163c02SIngo Weinhold static uint32 sMemoryTypeRegistersUsed; 105bb163c02SIngo Weinhold 106dac21d8bSIngo Weinhold static memory_type_range* sTemporaryRanges = NULL; 107dac21d8bSIngo Weinhold static memory_type_range_point* sTemporaryRangePoints = NULL; 108dac21d8bSIngo Weinhold static int32 sTemporaryRangeCount = 0; 109dac21d8bSIngo Weinhold static int32 sTemporaryRangePointCount = 0; 110393fceb5SAxel Dörfler 111393fceb5SAxel Dörfler 112bb163c02SIngo Weinhold static void 113bb163c02SIngo Weinhold set_mtrrs() 114393fceb5SAxel Dörfler { 115dac21d8bSIngo Weinhold x86_set_mtrrs(IA32_MTR_WRITE_BACK, sMemoryTypeRegisters, 116dac21d8bSIngo Weinhold sMemoryTypeRegistersUsed); 117393fceb5SAxel Dörfler 118dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM 119bb163c02SIngo Weinhold TRACE_MTRR("set MTRRs to:\n"); 120bb163c02SIngo Weinhold for (uint32 i = 0; i < sMemoryTypeRegistersUsed; i++) { 121bb163c02SIngo Weinhold const x86_mtrr_info& info = sMemoryTypeRegisters[i]; 122dac21d8bSIngo Weinhold TRACE_MTRR(" mtrr: %2" B_PRIu32 ": base: %#10" B_PRIx64 ", size: %#10" 123dac21d8bSIngo Weinhold B_PRIx64 ", type: %u\n", i, info.base, info.size, 124dac21d8bSIngo Weinhold info.type); 125393fceb5SAxel Dörfler } 126bb163c02SIngo Weinhold #endif 127393fceb5SAxel Dörfler } 128393fceb5SAxel Dörfler 129393fceb5SAxel Dörfler 130dac21d8bSIngo Weinhold static bool 131bb163c02SIngo Weinhold add_used_mtrr(uint64 base, uint64 size, uint32 type) 132393fceb5SAxel Dörfler { 133fa0c1e96SIngo Weinhold if (sMemoryTypeRegistersUsed == sMemoryTypeRegisterCount) 134dac21d8bSIngo Weinhold return false; 135393fceb5SAxel Dörfler 136dac21d8bSIngo Weinhold x86_mtrr_info& mtrr = sMemoryTypeRegisters[sMemoryTypeRegistersUsed++]; 137dac21d8bSIngo Weinhold mtrr.base = base; 138dac21d8bSIngo Weinhold mtrr.size = size; 139dac21d8bSIngo Weinhold mtrr.type = type; 140dac21d8bSIngo Weinhold 141dac21d8bSIngo Weinhold return true; 142dac21d8bSIngo Weinhold } 143dac21d8bSIngo Weinhold 144dac21d8bSIngo Weinhold 145dac21d8bSIngo Weinhold static bool 146dac21d8bSIngo Weinhold add_mtrrs_for_range(uint64 base, uint64 size, uint32 type) 147dac21d8bSIngo Weinhold { 148dac21d8bSIngo Weinhold for (uint64 interval = B_PAGE_SIZE; size > 0; interval <<= 1) { 149dac21d8bSIngo Weinhold if ((base & interval) != 0) { 150dac21d8bSIngo Weinhold if (!add_used_mtrr(base, interval, type)) 151dac21d8bSIngo Weinhold return false; 152dac21d8bSIngo Weinhold base += interval; 153affb4716SIngo Weinhold size -= interval; 154dac21d8bSIngo Weinhold } 155dac21d8bSIngo Weinhold 156affb4716SIngo Weinhold if ((size & interval) != 0) { 157affb4716SIngo Weinhold if (!add_used_mtrr(base + size - interval, interval, type)) 158affb4716SIngo Weinhold return false; 159dac21d8bSIngo Weinhold size -= interval; 160dac21d8bSIngo Weinhold } 161dac21d8bSIngo Weinhold } 162dac21d8bSIngo Weinhold 163dac21d8bSIngo Weinhold return true; 164dac21d8bSIngo Weinhold } 165dac21d8bSIngo Weinhold 166dac21d8bSIngo Weinhold 167dac21d8bSIngo Weinhold static memory_type_range* 168dac21d8bSIngo Weinhold find_range(area_id areaID) 169dac21d8bSIngo Weinhold { 170dac21d8bSIngo Weinhold for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator(); 171dac21d8bSIngo Weinhold memory_type_range* range = it.Next();) { 172dac21d8bSIngo Weinhold if (range->area == areaID) 173dac21d8bSIngo Weinhold return range; 174dac21d8bSIngo Weinhold } 175dac21d8bSIngo Weinhold 176dac21d8bSIngo Weinhold return NULL; 177393fceb5SAxel Dörfler } 178393fceb5SAxel Dörfler 179393fceb5SAxel Dörfler 1804f893e39SJérôme Duval static void 181dac21d8bSIngo Weinhold optimize_memory_ranges(MemoryTypeRangeList& ranges, uint32 type, 182dac21d8bSIngo Weinhold bool removeRanges) 1834f893e39SJérôme Duval { 184bb163c02SIngo Weinhold uint64 previousEnd = 0; 185dac21d8bSIngo Weinhold uint64 nextStart = 0; 186dac21d8bSIngo Weinhold MemoryTypeRangeList::Iterator it = ranges.GetIterator(); 187dac21d8bSIngo Weinhold memory_type_range* range = it.Next(); 188dac21d8bSIngo Weinhold while (range != NULL) { 189dac21d8bSIngo Weinhold if (range->type != type) { 190dac21d8bSIngo Weinhold previousEnd = range->base + range->size; 191dac21d8bSIngo Weinhold nextStart = 0; 192dac21d8bSIngo Weinhold range = it.Next(); 193dac21d8bSIngo Weinhold continue; 194bb163c02SIngo Weinhold } 195bb163c02SIngo Weinhold 196dac21d8bSIngo Weinhold // find the start of the next range we cannot join this one with 197dac21d8bSIngo Weinhold if (nextStart == 0) { 198dac21d8bSIngo Weinhold MemoryTypeRangeList::Iterator nextIt = it; 199dac21d8bSIngo Weinhold while (memory_type_range* nextRange = nextIt.Next()) { 200dac21d8bSIngo Weinhold if (nextRange->type != range->type) { 201dac21d8bSIngo Weinhold nextStart = nextRange->base; 202dac21d8bSIngo Weinhold break; 203dac21d8bSIngo Weinhold } 204dac21d8bSIngo Weinhold } 205bb163c02SIngo Weinhold 206dac21d8bSIngo Weinhold if (nextStart == 0) { 207dac21d8bSIngo Weinhold // no upper limit -- set an artificial one, so we don't need to 208dac21d8bSIngo Weinhold // special case below 209dac21d8bSIngo Weinhold nextStart = (uint64)1 << 32; 210dac21d8bSIngo Weinhold } 211dac21d8bSIngo Weinhold } 212dac21d8bSIngo Weinhold 213dac21d8bSIngo Weinhold // Align the range's base and end to the greatest power of two possible. 214dac21d8bSIngo Weinhold // As long as we can align both without intersecting any differently 215dac21d8bSIngo Weinhold // range, we can extend the range without making it more complicated. 216dac21d8bSIngo Weinhold // Once one side hit a limit we need to be careful. We can still 217dac21d8bSIngo Weinhold // continue aligning the other side, if the range crosses the power of 218dac21d8bSIngo Weinhold // two boundary. 219dac21d8bSIngo Weinhold uint64 rangeBase = range->base; 220dac21d8bSIngo Weinhold uint64 rangeEnd = rangeBase + range->size; 221dac21d8bSIngo Weinhold uint64 interval = B_PAGE_SIZE * 2; 222dac21d8bSIngo Weinhold while (true) { 223dac21d8bSIngo Weinhold uint64 alignedBase = rangeBase & ~(interval - 1); 224dac21d8bSIngo Weinhold uint64 alignedEnd = (rangeEnd + interval - 1) & ~(interval - 1); 225dac21d8bSIngo Weinhold 226dac21d8bSIngo Weinhold if (alignedBase < previousEnd) 227dac21d8bSIngo Weinhold alignedBase += interval; 228dac21d8bSIngo Weinhold 229dac21d8bSIngo Weinhold if (alignedEnd > nextStart) 230dac21d8bSIngo Weinhold alignedEnd -= interval; 231dac21d8bSIngo Weinhold 232dac21d8bSIngo Weinhold if (alignedBase >= alignedEnd) 233dac21d8bSIngo Weinhold break; 234dac21d8bSIngo Weinhold 235dac21d8bSIngo Weinhold rangeBase = std::min(rangeBase, alignedBase); 236dac21d8bSIngo Weinhold rangeEnd = std::max(rangeEnd, alignedEnd); 237dac21d8bSIngo Weinhold 238dac21d8bSIngo Weinhold interval <<= 1; 239dac21d8bSIngo Weinhold } 240dac21d8bSIngo Weinhold 241dac21d8bSIngo Weinhold range->base = rangeBase; 242dac21d8bSIngo Weinhold range->size = rangeEnd - rangeBase; 243dac21d8bSIngo Weinhold 244dac21d8bSIngo Weinhold if (removeRanges) 245dac21d8bSIngo Weinhold it.Remove(); 246dac21d8bSIngo Weinhold 247dac21d8bSIngo Weinhold previousEnd = rangeEnd; 248dac21d8bSIngo Weinhold 249dac21d8bSIngo Weinhold // Skip the subsequent ranges we have swallowed and possible cut one 250dac21d8bSIngo Weinhold // we now partially intersect with. 251dac21d8bSIngo Weinhold while ((range = it.Next()) != NULL) { 252dac21d8bSIngo Weinhold if (range->base >= rangeEnd) 253dac21d8bSIngo Weinhold break; 254dac21d8bSIngo Weinhold 255dac21d8bSIngo Weinhold if (range->base + range->size > rangeEnd) { 256dac21d8bSIngo Weinhold // we partially intersect -- cut the range 257dac21d8bSIngo Weinhold range->size = range->base + range->size - rangeEnd; 258dac21d8bSIngo Weinhold range->base = rangeEnd; 259dac21d8bSIngo Weinhold break; 260dac21d8bSIngo Weinhold } 261dac21d8bSIngo Weinhold 262dac21d8bSIngo Weinhold // we have swallowed this range completely 263dac21d8bSIngo Weinhold range->size = 0; 264dac21d8bSIngo Weinhold it.Remove(); 265dac21d8bSIngo Weinhold } 266dac21d8bSIngo Weinhold } 267dac21d8bSIngo Weinhold } 268dac21d8bSIngo Weinhold 269dac21d8bSIngo Weinhold 270dac21d8bSIngo Weinhold static bool 271dac21d8bSIngo Weinhold ensure_temporary_ranges_space(int32 count) 272dac21d8bSIngo Weinhold { 273dac21d8bSIngo Weinhold if (sTemporaryRangeCount >= count && sTemporaryRangePointCount >= count) 274dac21d8bSIngo Weinhold return true; 275dac21d8bSIngo Weinhold 276dac21d8bSIngo Weinhold // round count to power of 2 277dac21d8bSIngo Weinhold int32 unalignedCount = count; 278dac21d8bSIngo Weinhold count = 8; 279dac21d8bSIngo Weinhold while (count < unalignedCount) 280dac21d8bSIngo Weinhold count <<= 1; 281dac21d8bSIngo Weinhold 282dac21d8bSIngo Weinhold // resize ranges array 283dac21d8bSIngo Weinhold if (sTemporaryRangeCount < count) { 284dac21d8bSIngo Weinhold memory_type_range* ranges = new(std::nothrow) memory_type_range[count]; 285dac21d8bSIngo Weinhold if (ranges == NULL) 286dac21d8bSIngo Weinhold return false; 287dac21d8bSIngo Weinhold 288dac21d8bSIngo Weinhold delete[] sTemporaryRanges; 289dac21d8bSIngo Weinhold 290dac21d8bSIngo Weinhold sTemporaryRanges = ranges; 291dac21d8bSIngo Weinhold sTemporaryRangeCount = count; 292dac21d8bSIngo Weinhold } 293dac21d8bSIngo Weinhold 294dac21d8bSIngo Weinhold // resize points array 295dac21d8bSIngo Weinhold if (sTemporaryRangePointCount < count) { 296dac21d8bSIngo Weinhold memory_type_range_point* points 297dac21d8bSIngo Weinhold = new(std::nothrow) memory_type_range_point[count]; 298dac21d8bSIngo Weinhold if (points == NULL) 299dac21d8bSIngo Weinhold return false; 300dac21d8bSIngo Weinhold 301dac21d8bSIngo Weinhold delete[] sTemporaryRangePoints; 302dac21d8bSIngo Weinhold 303dac21d8bSIngo Weinhold sTemporaryRangePoints = points; 304dac21d8bSIngo Weinhold sTemporaryRangePointCount = count; 305dac21d8bSIngo Weinhold } 306dac21d8bSIngo Weinhold 307dac21d8bSIngo Weinhold return true; 308dac21d8bSIngo Weinhold } 309dac21d8bSIngo Weinhold 310dac21d8bSIngo Weinhold 311dac21d8bSIngo Weinhold status_t 312fa0c1e96SIngo Weinhold update_mtrrs(update_mtrr_info& updateInfo) 313dac21d8bSIngo Weinhold { 314dac21d8bSIngo Weinhold // resize the temporary points/ranges arrays, if necessary 315dac21d8bSIngo Weinhold if (!ensure_temporary_ranges_space(sMemoryTypeRangeCount * 2)) 316dac21d8bSIngo Weinhold return B_NO_MEMORY; 317dac21d8bSIngo Weinhold 318dac21d8bSIngo Weinhold // get the range points and sort them 319dac21d8bSIngo Weinhold memory_type_range_point* rangePoints = sTemporaryRangePoints; 320dac21d8bSIngo Weinhold int32 pointCount = 0; 321dac21d8bSIngo Weinhold for (MemoryTypeRangeList::Iterator it = sMemoryTypeRanges.GetIterator(); 322dac21d8bSIngo Weinhold memory_type_range* range = it.Next();) { 323fa0c1e96SIngo Weinhold if (range->type == IA32_MTR_UNCACHED) { 324fa0c1e96SIngo Weinhold // Ignore uncacheable ranges below a certain size, if requested. 325fa0c1e96SIngo Weinhold // Since we always enforce uncacheability via the PTE attributes, 326fa0c1e96SIngo Weinhold // this is no problem (though not recommended for performance 327fa0c1e96SIngo Weinhold // reasons). 328fa0c1e96SIngo Weinhold if (range->size <= updateInfo.ignoreUncacheableSize) 329fa0c1e96SIngo Weinhold continue; 330fa0c1e96SIngo Weinhold if (range->size < updateInfo.shortestUncacheableSize) 331fa0c1e96SIngo Weinhold updateInfo.shortestUncacheableSize = range->size; 332fa0c1e96SIngo Weinhold } 333fa0c1e96SIngo Weinhold 334dac21d8bSIngo Weinhold rangePoints[pointCount].address = range->base; 335dac21d8bSIngo Weinhold rangePoints[pointCount++].range = range; 336dac21d8bSIngo Weinhold rangePoints[pointCount].address = range->base + range->size; 337dac21d8bSIngo Weinhold rangePoints[pointCount++].range = range; 338dac21d8bSIngo Weinhold } 339dac21d8bSIngo Weinhold 340dac21d8bSIngo Weinhold std::sort(rangePoints, rangePoints + pointCount); 341dac21d8bSIngo Weinhold 342dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 2 343dac21d8bSIngo Weinhold TRACE_MTRR2("memory type range points:\n"); 344dac21d8bSIngo Weinhold for (int32 i = 0; i < pointCount; i++) { 345dac21d8bSIngo Weinhold TRACE_MTRR2("%12" B_PRIx64 " (%p)\n", rangePoints[i].address, 346dac21d8bSIngo Weinhold rangePoints[i].range); 347dac21d8bSIngo Weinhold } 348dac21d8bSIngo Weinhold #endif 349dac21d8bSIngo Weinhold 350dac21d8bSIngo Weinhold // Compute the effective ranges. When ranges overlap, we go with the 351dac21d8bSIngo Weinhold // stricter requirement. The types are not necessarily totally ordered, so 352dac21d8bSIngo Weinhold // the order we use below is not always correct. To keep it simple we 353dac21d8bSIngo Weinhold // consider it the reponsibility of the callers not to define overlapping 354dac21d8bSIngo Weinhold // memory ranges with uncomparable types. 355dac21d8bSIngo Weinhold 356dac21d8bSIngo Weinhold memory_type_range* ranges = sTemporaryRanges; 357dac21d8bSIngo Weinhold typedef DoublyLinkedList<memory_type_range_point> PointList; 358dac21d8bSIngo Weinhold PointList pendingPoints; 359dac21d8bSIngo Weinhold memory_type_range* activeRange = NULL; 360dac21d8bSIngo Weinhold int32 rangeCount = 0; 361dac21d8bSIngo Weinhold 362dac21d8bSIngo Weinhold for (int32 i = 0; i < pointCount; i++) { 363dac21d8bSIngo Weinhold memory_type_range_point* point = &rangePoints[i]; 364dac21d8bSIngo Weinhold bool terminateRange = false; 365dac21d8bSIngo Weinhold if (point->IsStart()) { 366dac21d8bSIngo Weinhold // a range start point 367dac21d8bSIngo Weinhold pendingPoints.Add(point); 368dac21d8bSIngo Weinhold if (activeRange != NULL && activeRange->type > point->range->type) 369dac21d8bSIngo Weinhold terminateRange = true; 370dac21d8bSIngo Weinhold } else { 371dac21d8bSIngo Weinhold // a range end point -- remove the pending start point 372dac21d8bSIngo Weinhold for (PointList::Iterator it = pendingPoints.GetIterator(); 373dac21d8bSIngo Weinhold memory_type_range_point* pendingPoint = it.Next();) { 374dac21d8bSIngo Weinhold if (pendingPoint->range == point->range) { 375dac21d8bSIngo Weinhold it.Remove(); 376dac21d8bSIngo Weinhold break; 377dac21d8bSIngo Weinhold } 378dac21d8bSIngo Weinhold } 379dac21d8bSIngo Weinhold 380dac21d8bSIngo Weinhold if (point->range == activeRange) 381dac21d8bSIngo Weinhold terminateRange = true; 382dac21d8bSIngo Weinhold } 383dac21d8bSIngo Weinhold 384dac21d8bSIngo Weinhold if (terminateRange) { 385dac21d8bSIngo Weinhold ranges[rangeCount].size = point->address - ranges[rangeCount].base; 386dac21d8bSIngo Weinhold rangeCount++; 387dac21d8bSIngo Weinhold activeRange = NULL; 388dac21d8bSIngo Weinhold } 389dac21d8bSIngo Weinhold 390dac21d8bSIngo Weinhold if (activeRange != NULL || pendingPoints.IsEmpty()) 391dac21d8bSIngo Weinhold continue; 392dac21d8bSIngo Weinhold 393dac21d8bSIngo Weinhold // we need to start a new range -- find the strictest pending range 394dac21d8bSIngo Weinhold for (PointList::Iterator it = pendingPoints.GetIterator(); 395dac21d8bSIngo Weinhold memory_type_range_point* pendingPoint = it.Next();) { 396dac21d8bSIngo Weinhold memory_type_range* pendingRange = pendingPoint->range; 397dac21d8bSIngo Weinhold if (activeRange == NULL || activeRange->type > pendingRange->type) 398dac21d8bSIngo Weinhold activeRange = pendingRange; 399dac21d8bSIngo Weinhold } 400dac21d8bSIngo Weinhold 401dac21d8bSIngo Weinhold memory_type_range* previousRange = rangeCount > 0 402dac21d8bSIngo Weinhold ? &ranges[rangeCount - 1] : NULL; 403dac21d8bSIngo Weinhold if (previousRange == NULL || previousRange->type != activeRange->type 404dac21d8bSIngo Weinhold || previousRange->base + previousRange->size 405dac21d8bSIngo Weinhold < activeRange->base) { 406dac21d8bSIngo Weinhold // we can't join with the previous range -- add a new one 407dac21d8bSIngo Weinhold ranges[rangeCount].base = point->address; 408dac21d8bSIngo Weinhold ranges[rangeCount].type = activeRange->type; 409dac21d8bSIngo Weinhold } else 410dac21d8bSIngo Weinhold rangeCount--; 411dac21d8bSIngo Weinhold } 412dac21d8bSIngo Weinhold 413dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 2 414dac21d8bSIngo Weinhold TRACE_MTRR2("effective memory type ranges:\n"); 415dac21d8bSIngo Weinhold for (int32 i = 0; i < rangeCount; i++) { 416dac21d8bSIngo Weinhold TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n", 417dac21d8bSIngo Weinhold ranges[i].base, ranges[i].base + ranges[i].size, ranges[i].type); 418dac21d8bSIngo Weinhold } 419dac21d8bSIngo Weinhold #endif 420dac21d8bSIngo Weinhold 421dac21d8bSIngo Weinhold // Extend ranges to be more MTRR-friendly. A range is MTRR friendly, when it 422dac21d8bSIngo Weinhold // has a power of two size and a base address aligned to the size. For 423dac21d8bSIngo Weinhold // ranges without this property we need more than one MTRR. We improve 424dac21d8bSIngo Weinhold // MTRR-friendliness by aligning a range's base and end address to the 425dac21d8bSIngo Weinhold // greatest power of two (base rounded down, end up) such that the extended 426dac21d8bSIngo Weinhold // range does not intersect with any other differently typed range. We join 427dac21d8bSIngo Weinhold // equally typed ranges, if possible. There are two exceptions to the 428dac21d8bSIngo Weinhold // intersection requirement: Uncached ranges may intersect with any other 429dac21d8bSIngo Weinhold // range; the resulting type will still be uncached. Hence we can ignore 430fa0c1e96SIngo Weinhold // uncached ranges when extending the other ranges. Write-through ranges may 431dac21d8bSIngo Weinhold // intersect with write-back ranges; the resulting type will be 432dac21d8bSIngo Weinhold // write-through. Hence we can ignore write-through ranges when extending 433dac21d8bSIngo Weinhold // write-back ranges. 434dac21d8bSIngo Weinhold 435dac21d8bSIngo Weinhold MemoryTypeRangeList rangeList; 436dac21d8bSIngo Weinhold for (int32 i = 0; i < rangeCount; i++) 437dac21d8bSIngo Weinhold rangeList.Add(&ranges[i]); 438dac21d8bSIngo Weinhold 439dac21d8bSIngo Weinhold static const uint32 kMemoryTypes[] = { 440dac21d8bSIngo Weinhold IA32_MTR_UNCACHED, 441dac21d8bSIngo Weinhold IA32_MTR_WRITE_COMBINING, 442dac21d8bSIngo Weinhold IA32_MTR_WRITE_PROTECTED, 443dac21d8bSIngo Weinhold IA32_MTR_WRITE_THROUGH, 444dac21d8bSIngo Weinhold IA32_MTR_WRITE_BACK 445dac21d8bSIngo Weinhold }; 446dac21d8bSIngo Weinhold static const int32 kMemoryTypeCount = sizeof(kMemoryTypes) 447dac21d8bSIngo Weinhold / sizeof(*kMemoryTypes); 448dac21d8bSIngo Weinhold 449dac21d8bSIngo Weinhold for (int32 i = 0; i < kMemoryTypeCount; i++) { 450dac21d8bSIngo Weinhold uint32 type = kMemoryTypes[i]; 451dac21d8bSIngo Weinhold 452dac21d8bSIngo Weinhold // Remove uncached and write-through ranges after processing them. This 453dac21d8bSIngo Weinhold // let's us leverage their intersection property with any other 454dac21d8bSIngo Weinhold // respectively write-back ranges. 455dac21d8bSIngo Weinhold bool removeRanges = type == IA32_MTR_UNCACHED 456dac21d8bSIngo Weinhold || type == IA32_MTR_WRITE_THROUGH; 457dac21d8bSIngo Weinhold 458dac21d8bSIngo Weinhold optimize_memory_ranges(rangeList, type, removeRanges); 459dac21d8bSIngo Weinhold } 460dac21d8bSIngo Weinhold 461dac21d8bSIngo Weinhold #if TRACE_MTRR_ARCH_VM >= 2 462dac21d8bSIngo Weinhold TRACE_MTRR2("optimized memory type ranges:\n"); 463dac21d8bSIngo Weinhold for (int32 i = 0; i < rangeCount; i++) { 464dac21d8bSIngo Weinhold if (ranges[i].size > 0) { 465dac21d8bSIngo Weinhold TRACE_MTRR2("%12" B_PRIx64 " - %12" B_PRIx64 ": %" B_PRIu32 "\n", 466dac21d8bSIngo Weinhold ranges[i].base, ranges[i].base + ranges[i].size, 467dac21d8bSIngo Weinhold ranges[i].type); 468dac21d8bSIngo Weinhold } 469dac21d8bSIngo Weinhold } 470dac21d8bSIngo Weinhold #endif 471dac21d8bSIngo Weinhold 472dac21d8bSIngo Weinhold // compute the mtrrs from the ranges 473bb163c02SIngo Weinhold sMemoryTypeRegistersUsed = 0; 474dac21d8bSIngo Weinhold for (int32 i = 0; i < kMemoryTypeCount; i++) { 475dac21d8bSIngo Weinhold uint32 type = kMemoryTypes[i]; 476bb163c02SIngo Weinhold 477dac21d8bSIngo Weinhold // skip write-back ranges -- that'll be the default type anyway 478dac21d8bSIngo Weinhold if (type == IA32_MTR_WRITE_BACK) 479dac21d8bSIngo Weinhold continue; 480dac21d8bSIngo Weinhold 481dac21d8bSIngo Weinhold for (int32 i = 0; i < rangeCount; i++) { 482dac21d8bSIngo Weinhold if (ranges[i].size == 0 || ranges[i].type != type) 483dac21d8bSIngo Weinhold continue; 484dac21d8bSIngo Weinhold 485fa0c1e96SIngo Weinhold if (!add_mtrrs_for_range(ranges[i].base, ranges[i].size, type)) 486fa0c1e96SIngo Weinhold return B_BUSY; 487dac21d8bSIngo Weinhold } 488bb163c02SIngo Weinhold } 489bb163c02SIngo Weinhold 490bb163c02SIngo Weinhold set_mtrrs(); 491bb163c02SIngo Weinhold 492bb163c02SIngo Weinhold return B_OK; 493bb163c02SIngo Weinhold } 494bb163c02SIngo Weinhold 495bb163c02SIngo Weinhold 496fa0c1e96SIngo Weinhold status_t 497fa0c1e96SIngo Weinhold update_mtrrs() 498fa0c1e96SIngo Weinhold { 499fa0c1e96SIngo Weinhold // Until we know how many MTRRs we have, pretend everything is OK. 500fa0c1e96SIngo Weinhold if (sMemoryTypeRegisterCount == 0) 501fa0c1e96SIngo Weinhold return B_OK; 502fa0c1e96SIngo Weinhold 503fa0c1e96SIngo Weinhold update_mtrr_info updateInfo; 504fa0c1e96SIngo Weinhold updateInfo.ignoreUncacheableSize = 0; 505fa0c1e96SIngo Weinhold 506fa0c1e96SIngo Weinhold while (true) { 507fa0c1e96SIngo Weinhold TRACE_MTRR2("update_mtrrs(): Trying with ignoreUncacheableSize %#" 508fa0c1e96SIngo Weinhold B_PRIx64 ".\n", updateInfo.ignoreUncacheableSize); 509fa0c1e96SIngo Weinhold 510fa0c1e96SIngo Weinhold updateInfo.shortestUncacheableSize = ~(uint64)0; 511fa0c1e96SIngo Weinhold status_t error = update_mtrrs(updateInfo); 512fa0c1e96SIngo Weinhold if (error != B_BUSY) { 513fa0c1e96SIngo Weinhold if (error == B_OK && updateInfo.ignoreUncacheableSize > 0) { 514fa0c1e96SIngo Weinhold TRACE_MTRR("update_mtrrs(): Succeeded setting MTRRs after " 515fa0c1e96SIngo Weinhold "ignoring uncacheable ranges up to size %#" B_PRIx64 ".\n", 516fa0c1e96SIngo Weinhold updateInfo.ignoreUncacheableSize); 517fa0c1e96SIngo Weinhold } 518fa0c1e96SIngo Weinhold return error; 519fa0c1e96SIngo Weinhold } 520fa0c1e96SIngo Weinhold 521fa0c1e96SIngo Weinhold // Not enough MTRRs. Retry with less uncacheable ranges. 522fa0c1e96SIngo Weinhold if (updateInfo.shortestUncacheableSize == ~(uint64)0) { 523fa0c1e96SIngo Weinhold // Ugh, even without any uncacheable ranges the available MTRRs do 524fa0c1e96SIngo Weinhold // not suffice. 525fa0c1e96SIngo Weinhold panic("update_mtrrs(): Out of MTRRs!"); 526fa0c1e96SIngo Weinhold return B_BUSY; 527fa0c1e96SIngo Weinhold } 528fa0c1e96SIngo Weinhold 529fa0c1e96SIngo Weinhold ASSERT(updateInfo.ignoreUncacheableSize 530fa0c1e96SIngo Weinhold < updateInfo.shortestUncacheableSize); 531fa0c1e96SIngo Weinhold 532fa0c1e96SIngo Weinhold updateInfo.ignoreUncacheableSize = updateInfo.shortestUncacheableSize; 533fa0c1e96SIngo Weinhold } 534fa0c1e96SIngo Weinhold } 535fa0c1e96SIngo Weinhold 536fa0c1e96SIngo Weinhold 537bb163c02SIngo Weinhold static status_t 538bb163c02SIngo Weinhold add_memory_type_range(area_id areaID, uint64 base, uint64 size, uint32 type) 539bb163c02SIngo Weinhold { 540bb163c02SIngo Weinhold // translate the type 541393fceb5SAxel Dörfler if (type == 0) 542393fceb5SAxel Dörfler return B_OK; 543393fceb5SAxel Dörfler 544393fceb5SAxel Dörfler switch (type) { 545393fceb5SAxel Dörfler case B_MTR_UC: 5464f893e39SJérôme Duval type = IA32_MTR_UNCACHED; 547393fceb5SAxel Dörfler break; 548393fceb5SAxel Dörfler case B_MTR_WC: 5494f893e39SJérôme Duval type = IA32_MTR_WRITE_COMBINING; 550393fceb5SAxel Dörfler break; 551393fceb5SAxel Dörfler case B_MTR_WT: 5524f893e39SJérôme Duval type = IA32_MTR_WRITE_THROUGH; 553393fceb5SAxel Dörfler break; 554393fceb5SAxel Dörfler case B_MTR_WP: 5554f893e39SJérôme Duval type = IA32_MTR_WRITE_PROTECTED; 556393fceb5SAxel Dörfler break; 557393fceb5SAxel Dörfler case B_MTR_WB: 5584f893e39SJérôme Duval type = IA32_MTR_WRITE_BACK; 559393fceb5SAxel Dörfler break; 560393fceb5SAxel Dörfler default: 561393fceb5SAxel Dörfler return B_BAD_VALUE; 562393fceb5SAxel Dörfler } 563393fceb5SAxel Dörfler 564dac21d8bSIngo Weinhold TRACE_MTRR("add_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#" 565dac21d8bSIngo Weinhold B_PRIx64 ", %" B_PRIu32 ")\n", areaID, base, size, type); 566393fceb5SAxel Dörfler 567bb163c02SIngo Weinhold MutexLocker locker(sMemoryTypeLock); 568bb163c02SIngo Weinhold 569dac21d8bSIngo Weinhold memory_type_range* range = areaID >= 0 ? find_range(areaID) : NULL; 570dac21d8bSIngo Weinhold int32 oldRangeType = -1; 571dac21d8bSIngo Weinhold if (range != NULL) { 572dac21d8bSIngo Weinhold if (range->base != base || range->size != size) 573393fceb5SAxel Dörfler return B_BAD_VALUE; 574dac21d8bSIngo Weinhold if (range->type == type) 575393fceb5SAxel Dörfler return B_OK; 576dac21d8bSIngo Weinhold 577dac21d8bSIngo Weinhold oldRangeType = range->type; 578dac21d8bSIngo Weinhold range->type = type; 579dac21d8bSIngo Weinhold } else { 580dac21d8bSIngo Weinhold range = new(std::nothrow) memory_type_range; 581dac21d8bSIngo Weinhold if (range == NULL) 582dac21d8bSIngo Weinhold return B_NO_MEMORY; 583dac21d8bSIngo Weinhold 584dac21d8bSIngo Weinhold range->area = areaID; 585dac21d8bSIngo Weinhold range->base = base; 586dac21d8bSIngo Weinhold range->size = size; 587dac21d8bSIngo Weinhold range->type = type; 588dac21d8bSIngo Weinhold sMemoryTypeRanges.Add(range); 589dac21d8bSIngo Weinhold sMemoryTypeRangeCount++; 590393fceb5SAxel Dörfler } 591393fceb5SAxel Dörfler 592dac21d8bSIngo Weinhold status_t error = update_mtrrs(); 593dac21d8bSIngo Weinhold if (error != B_OK) { 594dac21d8bSIngo Weinhold // revert the addition of the range/change of its type 595dac21d8bSIngo Weinhold if (oldRangeType < 0) { 596dac21d8bSIngo Weinhold sMemoryTypeRanges.Remove(range); 597dac21d8bSIngo Weinhold sMemoryTypeRangeCount--; 598dac21d8bSIngo Weinhold delete range; 599dac21d8bSIngo Weinhold } else 600dac21d8bSIngo Weinhold range->type = oldRangeType; 601393fceb5SAxel Dörfler 602dac21d8bSIngo Weinhold update_mtrrs(); 603bb163c02SIngo Weinhold return error; 604bb163c02SIngo Weinhold } 6054f893e39SJérôme Duval 606dac21d8bSIngo Weinhold return B_OK; 607dac21d8bSIngo Weinhold } 608dac21d8bSIngo Weinhold 6094f893e39SJérôme Duval 6104f893e39SJérôme Duval static void 611bb163c02SIngo Weinhold remove_memory_type_range(area_id areaID) 6124f893e39SJérôme Duval { 613bb163c02SIngo Weinhold MutexLocker locker(sMemoryTypeLock); 614bb163c02SIngo Weinhold 615dac21d8bSIngo Weinhold memory_type_range* range = find_range(areaID); 616dac21d8bSIngo Weinhold if (range != NULL) { 617dac21d8bSIngo Weinhold TRACE_MTRR("remove_memory_type_range(%" B_PRId32 ", %#" B_PRIx64 ", %#" 618dac21d8bSIngo Weinhold B_PRIx64 ", %" B_PRIu32 ")\n", range->area, range->base, 619dac21d8bSIngo Weinhold range->size, range->type); 620dac21d8bSIngo Weinhold 621dac21d8bSIngo Weinhold sMemoryTypeRanges.Remove(range); 622dac21d8bSIngo Weinhold sMemoryTypeRangeCount--; 623dac21d8bSIngo Weinhold delete range; 624dac21d8bSIngo Weinhold 625dac21d8bSIngo Weinhold update_mtrrs(); 626dac21d8bSIngo Weinhold } else { 627dac21d8bSIngo Weinhold dprintf("remove_memory_type_range(): no range known for area %" B_PRId32 628dac21d8bSIngo Weinhold "\n", areaID); 6294f893e39SJérôme Duval } 6304f893e39SJérôme Duval } 6314f893e39SJérôme Duval 6324f893e39SJérôme Duval 633393fceb5SAxel Dörfler // #pragma mark - 634393fceb5SAxel Dörfler 635393fceb5SAxel Dörfler 636393fceb5SAxel Dörfler status_t 637393fceb5SAxel Dörfler arch_vm_init(kernel_args *args) 638393fceb5SAxel Dörfler { 639393fceb5SAxel Dörfler TRACE(("arch_vm_init: entry\n")); 640393fceb5SAxel Dörfler return 0; 641393fceb5SAxel Dörfler } 642393fceb5SAxel Dörfler 643393fceb5SAxel Dörfler 644393fceb5SAxel Dörfler /*! Marks DMA region as in-use, and maps it into the kernel space */ 645393fceb5SAxel Dörfler status_t 646393fceb5SAxel Dörfler arch_vm_init_post_area(kernel_args *args) 647393fceb5SAxel Dörfler { 648393fceb5SAxel Dörfler area_id id; 649393fceb5SAxel Dörfler 650393fceb5SAxel Dörfler TRACE(("arch_vm_init_post_area: entry\n")); 651393fceb5SAxel Dörfler 652393fceb5SAxel Dörfler // account for DMA area and mark the pages unusable 653393fceb5SAxel Dörfler vm_mark_page_range_inuse(0x0, 0xa0000 / B_PAGE_SIZE); 654393fceb5SAxel Dörfler 655393fceb5SAxel Dörfler // map 0 - 0xa0000 directly 65664d79effSIngo Weinhold id = map_physical_memory("dma_region", 0x0, 0xa0000, 657dac21d8bSIngo Weinhold B_ANY_KERNEL_ADDRESS | B_MTR_WB, 658dac21d8bSIngo Weinhold B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &gDmaAddress); 659393fceb5SAxel Dörfler if (id < 0) { 660393fceb5SAxel Dörfler panic("arch_vm_init_post_area: unable to map dma region\n"); 661393fceb5SAxel Dörfler return B_NO_MEMORY; 662393fceb5SAxel Dörfler } 663393fceb5SAxel Dörfler 6644988ca58SAlex Smith #ifndef __x86_64__ 665393fceb5SAxel Dörfler return bios_init(); 6664988ca58SAlex Smith #else 6674988ca58SAlex Smith return B_OK; 6684988ca58SAlex Smith #endif 669393fceb5SAxel Dörfler } 670393fceb5SAxel Dörfler 671393fceb5SAxel Dörfler 672393fceb5SAxel Dörfler /*! Gets rid of all yet unmapped (and therefore now unused) page tables */ 673393fceb5SAxel Dörfler status_t 674393fceb5SAxel Dörfler arch_vm_init_end(kernel_args *args) 675393fceb5SAxel Dörfler { 676393fceb5SAxel Dörfler TRACE(("arch_vm_init_endvm: entry\n")); 677393fceb5SAxel Dörfler 678393fceb5SAxel Dörfler // throw away anything in the kernel_args.pgtable[] that's not yet mapped 679cc248cf2SAlex Smith vm_free_unused_boot_loader_range(KERNEL_LOAD_BASE, 680cc248cf2SAlex Smith args->arch_args.virtual_end - KERNEL_LOAD_BASE); 681393fceb5SAxel Dörfler 682393fceb5SAxel Dörfler return B_OK; 683393fceb5SAxel Dörfler } 684393fceb5SAxel Dörfler 685393fceb5SAxel Dörfler 686393fceb5SAxel Dörfler status_t 687393fceb5SAxel Dörfler arch_vm_init_post_modules(kernel_args *args) 688393fceb5SAxel Dörfler { 689393fceb5SAxel Dörfler // the x86 CPU modules are now accessible 690393fceb5SAxel Dörfler 691393fceb5SAxel Dörfler sMemoryTypeRegisterCount = x86_count_mtrrs(); 692393fceb5SAxel Dörfler if (sMemoryTypeRegisterCount == 0) 693393fceb5SAxel Dörfler return B_OK; 694393fceb5SAxel Dörfler 695393fceb5SAxel Dörfler // not very likely, but play safe here 696393fceb5SAxel Dörfler if (sMemoryTypeRegisterCount > kMaxMemoryTypeRegisters) 697393fceb5SAxel Dörfler sMemoryTypeRegisterCount = kMaxMemoryTypeRegisters; 698393fceb5SAxel Dörfler 699393fceb5SAxel Dörfler // set the physical memory ranges to write-back mode 700393fceb5SAxel Dörfler for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) { 701bb163c02SIngo Weinhold add_memory_type_range(-1, args->physical_memory_range[i].start, 702bb163c02SIngo Weinhold args->physical_memory_range[i].size, B_MTR_WB); 703393fceb5SAxel Dörfler } 704393fceb5SAxel Dörfler 705393fceb5SAxel Dörfler return B_OK; 706393fceb5SAxel Dörfler } 707393fceb5SAxel Dörfler 708393fceb5SAxel Dörfler 709393fceb5SAxel Dörfler void 710b0db552cSIngo Weinhold arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to) 711393fceb5SAxel Dörfler { 7129a42ad7aSIngo Weinhold // This functions is only invoked when a userland thread is in the process 7139a42ad7aSIngo Weinhold // of dying. It switches to the kernel team and does whatever cleanup is 7149a42ad7aSIngo Weinhold // necessary (in case it is the team's main thread, it will delete the 7159a42ad7aSIngo Weinhold // team). 7169a42ad7aSIngo Weinhold // It is however not necessary to change the page directory. Userland team's 7179a42ad7aSIngo Weinhold // page directories include all kernel mappings as well. Furthermore our 7189a42ad7aSIngo Weinhold // arch specific translation map data objects are ref-counted, so they won't 7199a42ad7aSIngo Weinhold // go away as long as they are still used on any CPU. 720393fceb5SAxel Dörfler } 721393fceb5SAxel Dörfler 722393fceb5SAxel Dörfler 723393fceb5SAxel Dörfler bool 724393fceb5SAxel Dörfler arch_vm_supports_protection(uint32 protection) 725393fceb5SAxel Dörfler { 726393fceb5SAxel Dörfler // x86 always has the same read/write properties for userland and the 727393fceb5SAxel Dörfler // kernel. 728393fceb5SAxel Dörfler // That's why we do not support user-read/kernel-write access. While the 729393fceb5SAxel Dörfler // other way around is not supported either, we don't care in this case 730393fceb5SAxel Dörfler // and give the kernel full access. 731393fceb5SAxel Dörfler if ((protection & (B_READ_AREA | B_WRITE_AREA)) == B_READ_AREA 73240f1dd84SIngo Weinhold && (protection & B_KERNEL_WRITE_AREA) != 0) { 733393fceb5SAxel Dörfler return false; 73440f1dd84SIngo Weinhold } 735393fceb5SAxel Dörfler 736*966f2076SPawel Dziepak // Userland and the kernel have the same setting of NX-bit. 737*966f2076SPawel Dziepak // That's why we do not allow any area that user can access, but not execute 738*966f2076SPawel Dziepak // and the kernel can execute. 739*966f2076SPawel Dziepak if ((protection & (B_READ_AREA | B_WRITE_AREA)) != 0 740*966f2076SPawel Dziepak && (protection & B_EXECUTE_AREA) == 0 741*966f2076SPawel Dziepak && (protection & B_KERNEL_EXECUTE_AREA) != 0) { 742*966f2076SPawel Dziepak return false; 743*966f2076SPawel Dziepak } 744*966f2076SPawel Dziepak 745393fceb5SAxel Dörfler return true; 746393fceb5SAxel Dörfler } 747393fceb5SAxel Dörfler 748393fceb5SAxel Dörfler 749393fceb5SAxel Dörfler void 750a99eb6b5SIngo Weinhold arch_vm_unset_memory_type(struct VMArea *area) 751393fceb5SAxel Dörfler { 7523b0c1b52SIngo Weinhold if (area->MemoryType() == 0) 753393fceb5SAxel Dörfler return; 754393fceb5SAxel Dörfler 755bb163c02SIngo Weinhold remove_memory_type_range(area->id); 756393fceb5SAxel Dörfler } 757393fceb5SAxel Dörfler 758393fceb5SAxel Dörfler 759393fceb5SAxel Dörfler status_t 760147133b7SIngo Weinhold arch_vm_set_memory_type(struct VMArea *area, phys_addr_t physicalBase, 761147133b7SIngo Weinhold uint32 type) 762393fceb5SAxel Dörfler { 763bbd97b4bSIngo Weinhold return add_memory_type_range(area->id, physicalBase, area->Size(), type); 764393fceb5SAxel Dörfler } 765