1 /* 2 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk 3 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. 5 * Distributed under the terms of the MIT License. 6 * 7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 12 #include "paging/64bit/X86VMTranslationMap64Bit.h" 13 14 #include <int.h> 15 #include <slab/Slab.h> 16 #include <thread.h> 17 #include <util/AutoLock.h> 18 #include <vm/vm_page.h> 19 #include <vm/VMAddressSpace.h> 20 #include <vm/VMCache.h> 21 22 #include "paging/64bit/X86PagingMethod64Bit.h" 23 #include "paging/64bit/X86PagingStructures64Bit.h" 24 #include "paging/x86_physical_page_mapper.h" 25 26 27 //#define TRACE_X86_VM_TRANSLATION_MAP_64BIT 28 #ifdef TRACE_X86_VM_TRANSLATION_MAP_64BIT 29 # define TRACE(x...) dprintf(x) 30 #else 31 # define TRACE(x...) ; 32 #endif 33 34 35 // #pragma mark - X86VMTranslationMap64Bit 36 37 38 X86VMTranslationMap64Bit::X86VMTranslationMap64Bit(bool la57) 39 : 40 fPagingStructures(NULL), 41 fLA57(la57) 42 { 43 } 44 45 46 X86VMTranslationMap64Bit::~X86VMTranslationMap64Bit() 47 { 48 TRACE("X86VMTranslationMap64Bit::~X86VMTranslationMap64Bit()\n"); 49 50 if (fPagingStructures == NULL) 51 return; 52 53 if (fPageMapper != NULL) { 54 phys_addr_t address; 55 vm_page* page; 56 57 // Free all structures in the bottom half of the PMLTop (user memory). 58 uint64* virtualPML4 = fPagingStructures->VirtualPMLTop(); 59 for (uint32 i = 0; i < 256; i++) { 60 if ((virtualPML4[i] & X86_64_PML4E_PRESENT) == 0) 61 continue; 62 63 uint64* virtualPDPT = (uint64*)fPageMapper->GetPageTableAt( 64 virtualPML4[i] & X86_64_PML4E_ADDRESS_MASK); 65 for (uint32 j = 0; j < 512; j++) { 66 if ((virtualPDPT[j] & X86_64_PDPTE_PRESENT) == 0) 67 continue; 68 69 uint64* virtualPageDir = (uint64*)fPageMapper->GetPageTableAt( 70 virtualPDPT[j] & X86_64_PDPTE_ADDRESS_MASK); 71 for (uint32 k = 0; k < 512; k++) { 72 if ((virtualPageDir[k] & X86_64_PDE_PRESENT) == 0) 73 continue; 74 75 address = virtualPageDir[k] & X86_64_PDE_ADDRESS_MASK; 76 page = vm_lookup_page(address / B_PAGE_SIZE); 77 if (page == NULL) { 78 panic("page table %u %u %u on invalid page %#" 79 B_PRIxPHYSADDR "\n", i, j, k, address); 80 } 81 82 DEBUG_PAGE_ACCESS_START(page); 83 vm_page_set_state(page, PAGE_STATE_FREE); 84 } 85 86 address = virtualPDPT[j] & X86_64_PDPTE_ADDRESS_MASK; 87 page = vm_lookup_page(address / B_PAGE_SIZE); 88 if (page == NULL) { 89 panic("page directory %u %u on invalid page %#" 90 B_PRIxPHYSADDR "\n", i, j, address); 91 } 92 93 DEBUG_PAGE_ACCESS_START(page); 94 vm_page_set_state(page, PAGE_STATE_FREE); 95 } 96 97 address = virtualPML4[i] & X86_64_PML4E_ADDRESS_MASK; 98 page = vm_lookup_page(address / B_PAGE_SIZE); 99 if (page == NULL) { 100 panic("PDPT %u on invalid page %#" B_PRIxPHYSADDR "\n", i, 101 address); 102 } 103 104 DEBUG_PAGE_ACCESS_START(page); 105 vm_page_set_state(page, PAGE_STATE_FREE); 106 } 107 108 fPageMapper->Delete(); 109 } 110 111 fPagingStructures->RemoveReference(); 112 } 113 114 115 status_t 116 X86VMTranslationMap64Bit::Init(bool kernel) 117 { 118 TRACE("X86VMTranslationMap64Bit::Init()\n"); 119 120 X86VMTranslationMap::Init(kernel); 121 122 fPagingStructures = new(std::nothrow) X86PagingStructures64Bit; 123 if (fPagingStructures == NULL) 124 return B_NO_MEMORY; 125 126 X86PagingMethod64Bit* method = X86PagingMethod64Bit::Method(); 127 128 if (kernel) { 129 // Get the page mapper. 130 fPageMapper = method->KernelPhysicalPageMapper(); 131 132 // Kernel PMLTop is already mapped. 133 fPagingStructures->Init(method->KernelVirtualPMLTop(), 134 method->KernelPhysicalPMLTop()); 135 } else { 136 // Allocate a physical page mapper. 137 status_t error = method->PhysicalPageMapper() 138 ->CreateTranslationMapPhysicalPageMapper(&fPageMapper); 139 if (error != B_OK) 140 return error; 141 142 // Assuming that only the top 2 PMLTop entries are occupied for the 143 // kernel. 144 STATIC_ASSERT(KERNEL_PMAP_BASE == 0xffffff0000000000); 145 STATIC_ASSERT(KERNEL_BASE == 0xffffff0000000000); 146 147 // Allocate and clear the PMLTop. 148 uint64* virtualPMLTop = (uint64*)memalign(B_PAGE_SIZE, B_PAGE_SIZE); 149 if (virtualPMLTop == NULL) 150 return B_NO_MEMORY; 151 memset(virtualPMLTop, 0, B_PAGE_SIZE); 152 153 // Copy the top 2 PMLTop entries. 154 virtualPMLTop[510] = method->KernelVirtualPMLTop()[510]; 155 virtualPMLTop[511] = method->KernelVirtualPMLTop()[511]; 156 157 // Look up the PMLTop physical address. 158 phys_addr_t physicalPMLTop; 159 vm_get_page_mapping(VMAddressSpace::KernelID(), (addr_t)virtualPMLTop, 160 &physicalPMLTop); 161 162 // Initialize the paging structures. 163 fPagingStructures->Init(virtualPMLTop, physicalPMLTop); 164 } 165 166 return B_OK; 167 } 168 169 170 size_t 171 X86VMTranslationMap64Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const 172 { 173 // If start == 0, the actual base address is not yet known to the caller and 174 // we shall assume the worst case, which is where the start address is the 175 // last page covered by a PDPT or PML4. 176 if (start == 0) { 177 start = (fLA57 ? k64BitPML4TRange : k64BitPDPTRange) - B_PAGE_SIZE; 178 end += start; 179 } 180 181 size_t requiredPML4s = 0; 182 if (fLA57) { 183 requiredPML4s = end / k64BitPML4TRange + 1 184 - start / k64BitPML4TRange; 185 } 186 size_t requiredPDPTs = end / k64BitPDPTRange + 1 187 - start / k64BitPDPTRange; 188 size_t requiredPageDirs = end / k64BitPageDirectoryRange + 1 189 - start / k64BitPageDirectoryRange; 190 size_t requiredPageTables = end / k64BitPageTableRange + 1 191 - start / k64BitPageTableRange; 192 193 return requiredPML4s + requiredPDPTs + requiredPageDirs 194 + requiredPageTables; 195 } 196 197 198 status_t 199 X86VMTranslationMap64Bit::Map(addr_t virtualAddress, phys_addr_t physicalAddress, 200 uint32 attributes, uint32 memoryType, vm_page_reservation* reservation) 201 { 202 TRACE("X86VMTranslationMap64Bit::Map(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR 203 ")\n", virtualAddress, physicalAddress); 204 205 ThreadCPUPinner pinner(thread_get_current_thread()); 206 207 // Look up the page table for the virtual address, allocating new tables 208 // if required. Shouldn't fail. 209 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress( 210 fPagingStructures->VirtualPMLTop(), virtualAddress, fIsKernelMap, 211 true, reservation, fPageMapper, fMapCount); 212 ASSERT(entry != NULL); 213 214 // The entry should not already exist. 215 ASSERT_PRINT((*entry & X86_64_PTE_PRESENT) == 0, 216 "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64, 217 virtualAddress, *entry); 218 219 // Fill in the table entry. 220 X86PagingMethod64Bit::PutPageTableEntryInTable(entry, physicalAddress, 221 attributes, memoryType, fIsKernelMap); 222 223 // Note: We don't need to invalidate the TLB for this address, as previously 224 // the entry was not present and the TLB doesn't cache those entries. 225 226 fMapCount++; 227 228 return 0; 229 } 230 231 232 status_t 233 X86VMTranslationMap64Bit::Unmap(addr_t start, addr_t end) 234 { 235 start = ROUNDDOWN(start, B_PAGE_SIZE); 236 if (start >= end) 237 return B_OK; 238 239 TRACE("X86VMTranslationMap64Bit::Unmap(%#" B_PRIxADDR ", %#" B_PRIxADDR 240 ")\n", start, end); 241 242 ThreadCPUPinner pinner(thread_get_current_thread()); 243 244 do { 245 uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress( 246 fPagingStructures->VirtualPMLTop(), start, fIsKernelMap, false, 247 NULL, fPageMapper, fMapCount); 248 if (pageTable == NULL) { 249 // Move on to the next page table. 250 start = ROUNDUP(start + 1, k64BitPageTableRange); 251 continue; 252 } 253 254 for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount; 255 index < k64BitTableEntryCount && start < end; 256 index++, start += B_PAGE_SIZE) { 257 if ((pageTable[index] & X86_64_PTE_PRESENT) == 0) 258 continue; 259 260 TRACE("X86VMTranslationMap64Bit::Unmap(): removing page %#" 261 B_PRIxADDR " (%#" B_PRIxPHYSADDR ")\n", start, 262 pageTable[index] & X86_64_PTE_ADDRESS_MASK); 263 264 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags( 265 &pageTable[index], X86_64_PTE_PRESENT); 266 fMapCount--; 267 268 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) { 269 // Note, that we only need to invalidate the address, if the 270 // accessed flags was set, since only then the entry could have 271 // been in any TLB. 272 InvalidatePage(start); 273 } 274 } 275 } while (start != 0 && start < end); 276 277 return B_OK; 278 } 279 280 281 status_t 282 X86VMTranslationMap64Bit::DebugMarkRangePresent(addr_t start, addr_t end, 283 bool markPresent) 284 { 285 start = ROUNDDOWN(start, B_PAGE_SIZE); 286 if (start >= end) 287 return B_OK; 288 289 TRACE("X86VMTranslationMap64Bit::DebugMarkRangePresent(%#" B_PRIxADDR 290 ", %#" B_PRIxADDR ")\n", start, end); 291 292 ThreadCPUPinner pinner(thread_get_current_thread()); 293 294 do { 295 uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress( 296 fPagingStructures->VirtualPMLTop(), start, fIsKernelMap, false, 297 NULL, fPageMapper, fMapCount); 298 if (pageTable == NULL) { 299 // Move on to the next page table. 300 start = ROUNDUP(start + 1, k64BitPageTableRange); 301 continue; 302 } 303 304 for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount; 305 index < k64BitTableEntryCount && start < end; 306 index++, start += B_PAGE_SIZE) { 307 if ((pageTable[index] & X86_64_PTE_PRESENT) == 0) { 308 if (!markPresent) 309 continue; 310 311 X86PagingMethod64Bit::SetTableEntryFlags(&pageTable[index], 312 X86_64_PTE_PRESENT); 313 } else { 314 if (markPresent) 315 continue; 316 317 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags( 318 &pageTable[index], X86_64_PTE_PRESENT); 319 320 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) { 321 // Note, that we only need to invalidate the address, if the 322 // accessed flags was set, since only then the entry could 323 // have been in any TLB. 324 InvalidatePage(start); 325 } 326 } 327 } 328 } while (start != 0 && start < end); 329 330 return B_OK; 331 } 332 333 334 status_t 335 X86VMTranslationMap64Bit::UnmapPage(VMArea* area, addr_t address, 336 bool updatePageQueue) 337 { 338 ASSERT(address % B_PAGE_SIZE == 0); 339 340 TRACE("X86VMTranslationMap64Bit::UnmapPage(%#" B_PRIxADDR ")\n", address); 341 342 ThreadCPUPinner pinner(thread_get_current_thread()); 343 344 // Look up the page table for the virtual address. 345 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress( 346 fPagingStructures->VirtualPMLTop(), address, fIsKernelMap, 347 false, NULL, fPageMapper, fMapCount); 348 if (entry == NULL) 349 return B_ENTRY_NOT_FOUND; 350 351 RecursiveLocker locker(fLock); 352 353 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(entry); 354 355 pinner.Unlock(); 356 357 if ((oldEntry & X86_64_PTE_PRESENT) == 0) 358 return B_ENTRY_NOT_FOUND; 359 360 fMapCount--; 361 362 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) { 363 // Note, that we only need to invalidate the address, if the 364 // accessed flags was set, since only then the entry could have been 365 // in any TLB. 366 InvalidatePage(address); 367 368 Flush(); 369 370 // NOTE: Between clearing the page table entry and Flush() other 371 // processors (actually even this processor with another thread of the 372 // same team) could still access the page in question via their cached 373 // entry. We can obviously lose a modified flag in this case, with the 374 // effect that the page looks unmodified (and might thus be recycled), 375 // but is actually modified. 376 // In most cases this is harmless, but for vm_remove_all_page_mappings() 377 // this is actually a problem. 378 // Interestingly FreeBSD seems to ignore this problem as well 379 // (cf. pmap_remove_all()), unless I've missed something. 380 } 381 382 locker.Detach(); 383 // PageUnmapped() will unlock for us 384 385 PageUnmapped(area, (oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE, 386 (oldEntry & X86_64_PTE_ACCESSED) != 0, 387 (oldEntry & X86_64_PTE_DIRTY) != 0, updatePageQueue); 388 389 return B_OK; 390 } 391 392 393 void 394 X86VMTranslationMap64Bit::UnmapPages(VMArea* area, addr_t base, size_t size, 395 bool updatePageQueue) 396 { 397 if (size == 0) 398 return; 399 400 addr_t start = base; 401 addr_t end = base + size - 1; 402 403 TRACE("X86VMTranslationMap64Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#" 404 B_PRIxADDR ")\n", area, start, end); 405 406 VMAreaMappings queue; 407 408 RecursiveLocker locker(fLock); 409 ThreadCPUPinner pinner(thread_get_current_thread()); 410 411 do { 412 uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress( 413 fPagingStructures->VirtualPMLTop(), start, fIsKernelMap, false, 414 NULL, fPageMapper, fMapCount); 415 if (pageTable == NULL) { 416 // Move on to the next page table. 417 start = ROUNDUP(start + 1, k64BitPageTableRange); 418 continue; 419 } 420 421 for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount; 422 index < k64BitTableEntryCount && start < end; 423 index++, start += B_PAGE_SIZE) { 424 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry( 425 &pageTable[index]); 426 if ((oldEntry & X86_64_PTE_PRESENT) == 0) 427 continue; 428 429 fMapCount--; 430 431 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) { 432 // Note, that we only need to invalidate the address, if the 433 // accessed flags was set, since only then the entry could have 434 // been in any TLB. 435 InvalidatePage(start); 436 } 437 438 if (area->cache_type != CACHE_TYPE_DEVICE) { 439 // get the page 440 vm_page* page = vm_lookup_page( 441 (oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE); 442 ASSERT(page != NULL); 443 444 DEBUG_PAGE_ACCESS_START(page); 445 446 // transfer the accessed/dirty flags to the page 447 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) 448 page->accessed = true; 449 if ((oldEntry & X86_64_PTE_DIRTY) != 0) 450 page->modified = true; 451 452 // remove the mapping object/decrement the wired_count of the 453 // page 454 if (area->wiring == B_NO_LOCK) { 455 vm_page_mapping* mapping = NULL; 456 vm_page_mappings::Iterator iterator 457 = page->mappings.GetIterator(); 458 while ((mapping = iterator.Next()) != NULL) { 459 if (mapping->area == area) 460 break; 461 } 462 463 ASSERT(mapping != NULL); 464 465 area->mappings.Remove(mapping); 466 page->mappings.Remove(mapping); 467 queue.Add(mapping); 468 } else 469 page->DecrementWiredCount(); 470 471 if (!page->IsMapped()) { 472 atomic_add(&gMappedPagesCount, -1); 473 474 if (updatePageQueue) { 475 if (page->Cache()->temporary) 476 vm_page_set_state(page, PAGE_STATE_INACTIVE); 477 else if (page->modified) 478 vm_page_set_state(page, PAGE_STATE_MODIFIED); 479 else 480 vm_page_set_state(page, PAGE_STATE_CACHED); 481 } 482 } 483 484 DEBUG_PAGE_ACCESS_END(page); 485 } 486 } 487 488 Flush(); 489 // flush explicitly, since we directly use the lock 490 } while (start != 0 && start < end); 491 492 // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not 493 // really critical here, as in all cases this method is used, the unmapped 494 // area range is unmapped for good (resized/cut) and the pages will likely 495 // be freed. 496 497 locker.Unlock(); 498 499 // free removed mappings 500 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel(); 501 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY 502 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0); 503 while (vm_page_mapping* mapping = queue.RemoveHead()) 504 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags); 505 } 506 507 508 void 509 X86VMTranslationMap64Bit::UnmapArea(VMArea* area, bool deletingAddressSpace, 510 bool ignoreTopCachePageFlags) 511 { 512 TRACE("X86VMTranslationMap64Bit::UnmapArea(%p)\n", area); 513 514 if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) { 515 X86VMTranslationMap64Bit::UnmapPages(area, area->Base(), area->Size(), 516 true); 517 return; 518 } 519 520 bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags; 521 522 RecursiveLocker locker(fLock); 523 ThreadCPUPinner pinner(thread_get_current_thread()); 524 525 VMAreaMappings mappings; 526 mappings.MoveFrom(&area->mappings); 527 528 for (VMAreaMappings::Iterator it = mappings.GetIterator(); 529 vm_page_mapping* mapping = it.Next();) { 530 vm_page* page = mapping->page; 531 page->mappings.Remove(mapping); 532 533 VMCache* cache = page->Cache(); 534 535 bool pageFullyUnmapped = false; 536 if (!page->IsMapped()) { 537 atomic_add(&gMappedPagesCount, -1); 538 pageFullyUnmapped = true; 539 } 540 541 if (unmapPages || cache != area->cache) { 542 addr_t address = area->Base() 543 + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset); 544 545 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress( 546 fPagingStructures->VirtualPMLTop(), address, fIsKernelMap, 547 false, NULL, fPageMapper, fMapCount); 548 if (entry == NULL) { 549 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but " 550 "has no page table", page, area, address); 551 continue; 552 } 553 554 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(entry); 555 556 if ((oldEntry & X86_64_PTE_PRESENT) == 0) { 557 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but " 558 "has no page table entry", page, area, address); 559 continue; 560 } 561 562 // transfer the accessed/dirty flags to the page and invalidate 563 // the mapping, if necessary 564 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) { 565 page->accessed = true; 566 567 if (!deletingAddressSpace) 568 InvalidatePage(address); 569 } 570 571 if ((oldEntry & X86_64_PTE_DIRTY) != 0) 572 page->modified = true; 573 574 if (pageFullyUnmapped) { 575 DEBUG_PAGE_ACCESS_START(page); 576 577 if (cache->temporary) 578 vm_page_set_state(page, PAGE_STATE_INACTIVE); 579 else if (page->modified) 580 vm_page_set_state(page, PAGE_STATE_MODIFIED); 581 else 582 vm_page_set_state(page, PAGE_STATE_CACHED); 583 584 DEBUG_PAGE_ACCESS_END(page); 585 } 586 } 587 588 fMapCount--; 589 } 590 591 Flush(); 592 // flush explicitely, since we directly use the lock 593 594 locker.Unlock(); 595 596 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel(); 597 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY 598 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0); 599 while (vm_page_mapping* mapping = mappings.RemoveHead()) 600 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags); 601 } 602 603 604 status_t 605 X86VMTranslationMap64Bit::Query(addr_t virtualAddress, 606 phys_addr_t* _physicalAddress, uint32* _flags) 607 { 608 *_flags = 0; 609 *_physicalAddress = 0; 610 611 ThreadCPUPinner pinner(thread_get_current_thread()); 612 613 // This function may be called on the physical map area, so we must handle 614 // large pages here. Look up the page directory entry for the virtual 615 // address. 616 uint64* pde = X86PagingMethod64Bit::PageDirectoryEntryForAddress( 617 fPagingStructures->VirtualPMLTop(), virtualAddress, fIsKernelMap, 618 false, NULL, fPageMapper, fMapCount); 619 if (pde == NULL || (*pde & X86_64_PDE_PRESENT) == 0) 620 return B_OK; 621 622 uint64 entry; 623 if ((*pde & X86_64_PDE_LARGE_PAGE) != 0) { 624 entry = *pde; 625 *_physicalAddress = (entry & X86_64_PDE_ADDRESS_MASK) 626 + (virtualAddress % 0x200000); 627 } else { 628 uint64* virtualPageTable = (uint64*)fPageMapper->GetPageTableAt( 629 *pde & X86_64_PDE_ADDRESS_MASK); 630 entry = virtualPageTable[VADDR_TO_PTE(virtualAddress)]; 631 *_physicalAddress = entry & X86_64_PTE_ADDRESS_MASK; 632 } 633 634 // Translate the page state flags. 635 if ((entry & X86_64_PTE_USER) != 0) { 636 *_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0) 637 | B_READ_AREA 638 | ((entry & X86_64_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0); 639 } 640 641 *_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0) 642 | B_KERNEL_READ_AREA 643 | ((entry & X86_64_PTE_NOT_EXECUTABLE) == 0 ? B_KERNEL_EXECUTE_AREA : 0) 644 | ((entry & X86_64_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) 645 | ((entry & X86_64_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) 646 | ((entry & X86_64_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0); 647 648 TRACE("X86VMTranslationMap64Bit::Query(%#" B_PRIxADDR ") -> %#" 649 B_PRIxPHYSADDR " %#" B_PRIx32 " (entry: %#" B_PRIx64 ")\n", 650 virtualAddress, *_physicalAddress, *_flags, entry); 651 652 return B_OK; 653 } 654 655 656 status_t 657 X86VMTranslationMap64Bit::QueryInterrupt(addr_t virtualAddress, 658 phys_addr_t* _physicalAddress, uint32* _flags) 659 { 660 // With our page mapper, there is no difference in getting a page table 661 // when interrupts are enabled or disabled, so just call Query(). 662 return Query(virtualAddress, _physicalAddress, _flags); 663 } 664 665 666 status_t 667 X86VMTranslationMap64Bit::Protect(addr_t start, addr_t end, uint32 attributes, 668 uint32 memoryType) 669 { 670 start = ROUNDDOWN(start, B_PAGE_SIZE); 671 if (start >= end) 672 return B_OK; 673 674 TRACE("X86VMTranslationMap64Bit::Protect(%#" B_PRIxADDR ", %#" B_PRIxADDR 675 ", %#" B_PRIx32 ")\n", start, end, attributes); 676 677 // compute protection flags 678 uint64 newProtectionFlags = 0; 679 if ((attributes & B_USER_PROTECTION) != 0) { 680 newProtectionFlags = X86_64_PTE_USER; 681 if ((attributes & B_WRITE_AREA) != 0) 682 newProtectionFlags |= X86_64_PTE_WRITABLE; 683 if ((attributes & B_EXECUTE_AREA) == 0 684 && x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) { 685 newProtectionFlags |= X86_64_PTE_NOT_EXECUTABLE; 686 } 687 } else if ((attributes & B_KERNEL_WRITE_AREA) != 0) 688 newProtectionFlags = X86_64_PTE_WRITABLE; 689 690 ThreadCPUPinner pinner(thread_get_current_thread()); 691 692 do { 693 uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress( 694 fPagingStructures->VirtualPMLTop(), start, fIsKernelMap, false, 695 NULL, fPageMapper, fMapCount); 696 if (pageTable == NULL) { 697 // Move on to the next page table. 698 start = ROUNDUP(start + 1, k64BitPageTableRange); 699 continue; 700 } 701 702 for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount; 703 index < k64BitTableEntryCount && start < end; 704 index++, start += B_PAGE_SIZE) { 705 uint64 entry = pageTable[index]; 706 if ((entry & X86_64_PTE_PRESENT) == 0) 707 continue; 708 709 TRACE("X86VMTranslationMap64Bit::Protect(): protect page %#" 710 B_PRIxADDR "\n", start); 711 712 // set the new protection flags -- we want to do that atomically, 713 // without changing the accessed or dirty flag 714 uint64 oldEntry; 715 while (true) { 716 oldEntry = X86PagingMethod64Bit::TestAndSetTableEntry( 717 &pageTable[index], 718 (entry & ~(X86_64_PTE_PROTECTION_MASK 719 | X86_64_PTE_MEMORY_TYPE_MASK)) 720 | newProtectionFlags 721 | X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags( 722 memoryType), 723 entry); 724 if (oldEntry == entry) 725 break; 726 entry = oldEntry; 727 } 728 729 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) { 730 // Note, that we only need to invalidate the address, if the 731 // accessed flag was set, since only then the entry could have 732 // been in any TLB. 733 InvalidatePage(start); 734 } 735 } 736 } while (start != 0 && start < end); 737 738 return B_OK; 739 } 740 741 742 status_t 743 X86VMTranslationMap64Bit::ClearFlags(addr_t address, uint32 flags) 744 { 745 TRACE("X86VMTranslationMap64Bit::ClearFlags(%#" B_PRIxADDR ", %#" B_PRIx32 746 ")\n", address, flags); 747 748 ThreadCPUPinner pinner(thread_get_current_thread()); 749 750 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress( 751 fPagingStructures->VirtualPMLTop(), address, fIsKernelMap, 752 false, NULL, fPageMapper, fMapCount); 753 if (entry == NULL) 754 return B_OK; 755 756 uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_64_PTE_DIRTY : 0) 757 | ((flags & PAGE_ACCESSED) ? X86_64_PTE_ACCESSED : 0); 758 759 uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry, 760 flagsToClear); 761 762 if ((oldEntry & flagsToClear) != 0) 763 InvalidatePage(address); 764 765 return B_OK; 766 } 767 768 769 bool 770 X86VMTranslationMap64Bit::ClearAccessedAndModified(VMArea* area, addr_t address, 771 bool unmapIfUnaccessed, bool& _modified) 772 { 773 ASSERT(address % B_PAGE_SIZE == 0); 774 775 TRACE("X86VMTranslationMap64Bit::ClearAccessedAndModified(%#" B_PRIxADDR 776 ")\n", address); 777 778 RecursiveLocker locker(fLock); 779 ThreadCPUPinner pinner(thread_get_current_thread()); 780 781 uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress( 782 fPagingStructures->VirtualPMLTop(), address, fIsKernelMap, 783 false, NULL, fPageMapper, fMapCount); 784 if (entry == NULL) 785 return false; 786 787 uint64 oldEntry; 788 789 if (unmapIfUnaccessed) { 790 while (true) { 791 oldEntry = *entry; 792 if ((oldEntry & X86_64_PTE_PRESENT) == 0) { 793 // page mapping not valid 794 return false; 795 } 796 797 if (oldEntry & X86_64_PTE_ACCESSED) { 798 // page was accessed -- just clear the flags 799 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry, 800 X86_64_PTE_ACCESSED | X86_64_PTE_DIRTY); 801 break; 802 } 803 804 // page hasn't been accessed -- unmap it 805 if (X86PagingMethod64Bit::TestAndSetTableEntry(entry, 0, oldEntry) 806 == oldEntry) { 807 break; 808 } 809 810 // something changed -- check again 811 } 812 } else { 813 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry, 814 X86_64_PTE_ACCESSED | X86_64_PTE_DIRTY); 815 } 816 817 pinner.Unlock(); 818 819 _modified = (oldEntry & X86_64_PTE_DIRTY) != 0; 820 821 if ((oldEntry & X86_64_PTE_ACCESSED) != 0) { 822 // Note, that we only need to invalidate the address, if the 823 // accessed flags was set, since only then the entry could have been 824 // in any TLB. 825 InvalidatePage(address); 826 Flush(); 827 828 return true; 829 } 830 831 if (!unmapIfUnaccessed) 832 return false; 833 834 // We have unmapped the address. Do the "high level" stuff. 835 836 fMapCount--; 837 838 locker.Detach(); 839 // UnaccessedPageUnmapped() will unlock for us 840 841 UnaccessedPageUnmapped(area, 842 (oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE); 843 844 return false; 845 } 846 847 848 X86PagingStructures* 849 X86VMTranslationMap64Bit::PagingStructures() const 850 { 851 return fPagingStructures; 852 } 853