1 /* 2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include "paging/pae/X86PagingMethodPAE.h" 12 13 #include <stdlib.h> 14 #include <string.h> 15 16 #include <AutoDeleter.h> 17 18 #include <boot/kernel_args.h> 19 #include <util/AutoLock.h> 20 #include <vm/vm.h> 21 #include <vm/vm_page.h> 22 #include <vm/VMAddressSpace.h> 23 24 #include "paging/32bit/paging.h" 25 #include "paging/32bit/X86PagingMethod32Bit.h" 26 #include "paging/pae/X86PagingStructuresPAE.h" 27 #include "paging/pae/X86VMTranslationMapPAE.h" 28 #include "paging/x86_physical_page_mapper.h" 29 #include "paging/x86_physical_page_mapper_large_memory.h" 30 31 32 //#define TRACE_X86_PAGING_METHOD_PAE 33 #ifdef TRACE_X86_PAGING_METHOD_PAE 34 # define TRACE(x...) dprintf(x) 35 #else 36 # define TRACE(x...) ; 37 #endif 38 39 40 #if B_HAIKU_PHYSICAL_BITS == 64 41 42 43 using X86LargePhysicalPageMapper::PhysicalPageSlot; 44 45 46 // number of 32 bit pages that will be cached 47 static const page_num_t kMaxFree32BitPagesCount = 32; 48 49 50 // #pragma mark - ToPAESwitcher 51 52 53 struct X86PagingMethodPAE::ToPAESwitcher { 54 ToPAESwitcher(kernel_args* args) 55 : 56 fKernelArgs(args) 57 { 58 // page hole set up in the boot loader 59 fPageHole = (page_table_entry*) 60 (addr_t)fKernelArgs->arch_args.page_hole; 61 62 // calculate where the page dir would be 63 fPageHolePageDir = (page_directory_entry*) 64 (((addr_t)fKernelArgs->arch_args.page_hole) 65 + (B_PAGE_SIZE * 1024 - B_PAGE_SIZE)); 66 67 fPhysicalPageDir = fKernelArgs->arch_args.phys_pgdir; 68 69 TRACE("page hole: %p\n", fPageHole); 70 TRACE("page dir: %p (physical: %#" B_PRIxPHYSADDR ")\n", 71 fPageHolePageDir, fPhysicalPageDir); 72 } 73 74 void Switch(pae_page_directory_pointer_table_entry*& _virtualPDPT, 75 phys_addr_t& _physicalPDPT, void*& _pageStructures, 76 size_t& _pageStructuresSize, pae_page_directory_entry** pageDirs, 77 phys_addr_t* physicalPageDirs, addr_t& _freeVirtualSlot, 78 pae_page_table_entry*& _freeVirtualSlotPTE) 79 { 80 // count the page tables we have to translate 81 uint32 pageTableCount = 0; 82 for (uint32 i = FIRST_KERNEL_PGDIR_ENT; 83 i < FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS; i++) { 84 page_directory_entry entry = fPageHolePageDir[i]; 85 if ((entry & X86_PDE_PRESENT) != 0) 86 pageTableCount++; 87 } 88 89 TRACE("page tables to translate: %" B_PRIu32 "\n", pageTableCount); 90 91 // The pages we need to allocate to do our job: 92 // + 1 page dir pointer table 93 // + 4 page dirs 94 // + 2 * page tables (each has 512 instead of 1024 entries) 95 // + 1 page for the free virtual slot (no physical page needed) 96 uint32 pagesNeeded = 1 + 4 + pageTableCount * 2 + 1; 97 98 // We need additional PAE page tables for the new pages we're going to 99 // allocate: Two tables for every 1024 pages to map, i.e. 2 additional 100 // pages for every 1022 pages we want to allocate. We also need 32 bit 101 // page tables, but we don't need additional virtual space for them, 102 // since we can access then via the page hole. 103 pagesNeeded += ((pagesNeeded + 1021) / 1022) * 2; 104 105 TRACE("pages needed: %" B_PRIu32 "\n", pagesNeeded); 106 107 // allocate the pages we need 108 _AllocateNeededPages(pagesNeeded); 109 110 // prepare the page directory pointer table 111 phys_addr_t physicalPDPT = 0; 112 pae_page_directory_pointer_table_entry* pdpt 113 = (pae_page_directory_pointer_table_entry*)_NextPage(true, 114 physicalPDPT); 115 116 for (int32 i = 0; i < 4; i++) { 117 fPageDirs[i] = (pae_page_directory_entry*)_NextPage(true, 118 fPhysicalPageDirs[i]); 119 120 pdpt[i] = X86_PAE_PDPTE_PRESENT 121 | (fPhysicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK); 122 } 123 124 // Since we have to enable PAE in two steps -- setting cr3 to the PDPT 125 // and setting the cr4 PAE bit -- we copy the kernel page dir entries to 126 // the PDPT page, so after setting cr3, we continue to have working 127 // kernel mappings. This requires that the PDPTE registers and the 128 // page dir entries don't interect, obviously. 129 ASSERT(4 * sizeof(pae_page_directory_pointer_table_entry) 130 <= FIRST_KERNEL_PGDIR_ENT * sizeof(page_directory_entry)); 131 132 // translate the page tables 133 for (uint32 i = FIRST_KERNEL_PGDIR_ENT; 134 i < FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS; i++) { 135 if ((fPageHolePageDir[i] & X86_PDE_PRESENT) != 0) { 136 // two PAE page tables per 32 bit page table 137 _TranslatePageTable((addr_t)i * 1024 * B_PAGE_SIZE); 138 _TranslatePageTable(((addr_t)i * 1024 + 512) * B_PAGE_SIZE); 139 140 // copy the page directory entry to the PDPT page 141 ((page_directory_entry*)pdpt)[i] = fPageHolePageDir[i]; 142 } 143 } 144 145 TRACE("free virtual slot: %#" B_PRIxADDR ", PTE: %p\n", 146 fFreeVirtualSlot, fFreeVirtualSlotPTE); 147 148 // enable PAE on all CPUs 149 call_all_cpus_sync(&_EnablePAE, (void*)(addr_t)physicalPDPT); 150 151 // if availalbe enable NX-bit (No eXecute) 152 if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) 153 call_all_cpus_sync(&_EnableExecutionDisable, NULL); 154 155 // set return values 156 _virtualPDPT = pdpt; 157 _physicalPDPT = physicalPDPT; 158 _pageStructures = fAllocatedPages; 159 _pageStructuresSize = (size_t)fUsedPagesCount * B_PAGE_SIZE; 160 memcpy(pageDirs, fPageDirs, sizeof(fPageDirs)); 161 memcpy(physicalPageDirs, fPhysicalPageDirs, sizeof(fPhysicalPageDirs)); 162 163 _freeVirtualSlot = fFreeVirtualSlot; 164 _freeVirtualSlotPTE = fFreeVirtualSlotPTE; 165 } 166 167 private: 168 static void _EnablePAE(void* physicalPDPT, int cpu) 169 { 170 x86_write_cr3((addr_t)physicalPDPT); 171 x86_write_cr4(x86_read_cr4() | IA32_CR4_PAE | IA32_CR4_GLOBAL_PAGES); 172 } 173 174 static void _EnableExecutionDisable(void* dummy, int cpu) 175 { 176 x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER) 177 | IA32_MSR_EFER_NX); 178 } 179 180 void _TranslatePageTable(addr_t virtualBase) 181 { 182 page_table_entry* entry = &fPageHole[virtualBase / B_PAGE_SIZE]; 183 184 // allocate a PAE page table 185 phys_addr_t physicalTable = 0; 186 pae_page_table_entry* paeTable = (pae_page_table_entry*)_NextPage(false, 187 physicalTable); 188 189 // enter it into the page dir 190 pae_page_directory_entry* pageDirEntry = PageDirEntryForAddress( 191 fPageDirs, virtualBase); 192 PutPageTableInPageDir(pageDirEntry, physicalTable, 193 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 194 195 pae_page_table_entry* paeEntry = paeTable; 196 for (uint32 i = 0; i < kPAEPageTableEntryCount; 197 i++, entry++, paeEntry++) { 198 if ((*entry & X86_PTE_PRESENT) != 0 199 && _IsVirtualAddressAllocated(virtualBase + i * B_PAGE_SIZE)) { 200 // Note, we use the fact that the PAE flags are defined to the 201 // same values. 202 *paeEntry = *entry & (X86_PTE_PRESENT 203 | X86_PTE_WRITABLE 204 | X86_PTE_USER 205 | X86_PTE_WRITE_THROUGH 206 | X86_PTE_CACHING_DISABLED 207 | X86_PTE_GLOBAL 208 | X86_PTE_ADDRESS_MASK); 209 } else 210 *paeEntry = 0; 211 } 212 213 if (fFreeVirtualSlot / kPAEPageTableRange 214 == virtualBase / kPAEPageTableRange) { 215 fFreeVirtualSlotPTE = paeTable 216 + fFreeVirtualSlot / B_PAGE_SIZE % kPAEPageTableEntryCount; 217 } 218 } 219 220 void _AllocateNeededPages(uint32 pagesNeeded) 221 { 222 size_t virtualSize = ROUNDUP(pagesNeeded, 1024) * B_PAGE_SIZE; 223 addr_t virtualBase = vm_allocate_early(fKernelArgs, virtualSize, 0, 0, 224 kPageTableAlignment); 225 if (virtualBase == 0) { 226 panic("Failed to reserve virtual address space for the switch to " 227 "PAE!"); 228 } 229 230 TRACE("virtual space: %#" B_PRIxADDR ", size: %#" B_PRIxSIZE "\n", 231 virtualBase, virtualSize); 232 233 // allocate pages for the 32 bit page tables and prepare the tables 234 uint32 oldPageTableCount = virtualSize / B_PAGE_SIZE / 1024; 235 for (uint32 i = 0; i < oldPageTableCount; i++) { 236 // allocate a page 237 phys_addr_t physicalTable =_AllocatePage32Bit(); 238 239 // put the page into the page dir 240 page_directory_entry* entry = &fPageHolePageDir[ 241 virtualBase / B_PAGE_SIZE / 1024 + i]; 242 X86PagingMethod32Bit::PutPageTableInPageDir(entry, physicalTable, 243 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 244 245 // clear the table 246 memset((void*)((addr_t)fPageHole 247 + (virtualBase / B_PAGE_SIZE / 1024 + i) * B_PAGE_SIZE), 248 0, B_PAGE_SIZE); 249 } 250 251 // We don't need a physical page for the free virtual slot. 252 pagesNeeded--; 253 254 // allocate and map the pages we need 255 for (uint32 i = 0; i < pagesNeeded; i++) { 256 // allocate a page 257 phys_addr_t physicalAddress =_AllocatePage32Bit(); 258 259 // put the page into the page table 260 page_table_entry* entry = fPageHole + virtualBase / B_PAGE_SIZE + i; 261 X86PagingMethod32Bit::PutPageTableEntryInTable(entry, 262 physicalAddress, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 263 true); 264 265 // Write the page's physical address into the page itself, so we 266 // don't need to look it up later. 267 *(phys_addr_t*)(virtualBase + i * B_PAGE_SIZE) = physicalAddress; 268 } 269 270 fAllocatedPages = (uint8*)virtualBase; 271 fAllocatedPagesCount = pagesNeeded; 272 fUsedPagesCount = 0; 273 fFreeVirtualSlot 274 = (addr_t)(fAllocatedPages + pagesNeeded * B_PAGE_SIZE); 275 } 276 277 phys_addr_t _AllocatePage() 278 { 279 phys_addr_t physicalAddress 280 = (phys_addr_t)vm_allocate_early_physical_page(fKernelArgs) 281 * B_PAGE_SIZE; 282 if (physicalAddress == 0) 283 panic("Failed to allocate page for the switch to PAE!"); 284 return physicalAddress; 285 } 286 287 phys_addr_t _AllocatePage32Bit() 288 { 289 phys_addr_t physicalAddress = _AllocatePage(); 290 if (physicalAddress > 0xffffffff) { 291 panic("Failed to allocate 32 bit addressable page for the switch " 292 "to PAE!"); 293 return 0; 294 } 295 return physicalAddress; 296 } 297 298 void* _NextPage(bool clearPage, phys_addr_t& _physicalAddress) 299 { 300 if (fUsedPagesCount >= fAllocatedPagesCount) { 301 panic("X86PagingMethodPAE::ToPAESwitcher::_NextPage(): no more " 302 "allocated pages!"); 303 return NULL; 304 } 305 306 void* page = fAllocatedPages + (fUsedPagesCount++) * B_PAGE_SIZE; 307 _physicalAddress = *((phys_addr_t*)page); 308 309 if (clearPage) 310 memset(page, 0, B_PAGE_SIZE); 311 312 return page; 313 } 314 315 bool _IsVirtualAddressAllocated(addr_t address) const 316 { 317 for (uint32 i = 0; i < fKernelArgs->num_virtual_allocated_ranges; i++) { 318 addr_t start = fKernelArgs->virtual_allocated_range[i].start; 319 addr_t end = start + fKernelArgs->virtual_allocated_range[i].size; 320 if (address < start) 321 return false; 322 if (address <= end - 1) 323 return true; 324 } 325 326 return false; 327 } 328 329 private: 330 kernel_args* fKernelArgs; 331 page_table_entry* fPageHole; 332 page_directory_entry* fPageHolePageDir; 333 phys_addr_t fPhysicalPageDir; 334 uint8* fAllocatedPages; 335 uint32 fAllocatedPagesCount; 336 uint32 fUsedPagesCount; 337 addr_t fFreeVirtualSlot; 338 pae_page_table_entry* fFreeVirtualSlotPTE; 339 pae_page_directory_entry* fPageDirs[4]; 340 phys_addr_t fPhysicalPageDirs[4]; 341 }; 342 343 344 // #pragma mark - PhysicalPageSlotPool 345 346 347 struct X86PagingMethodPAE::PhysicalPageSlotPool 348 : X86LargePhysicalPageMapper::PhysicalPageSlotPool { 349 public: 350 virtual ~PhysicalPageSlotPool(); 351 352 status_t InitInitial(X86PagingMethodPAE* method, 353 kernel_args* args); 354 status_t InitInitialPostArea(kernel_args* args); 355 356 void Init(area_id dataArea, 357 pae_page_table_entry* pageTable, 358 area_id virtualArea, addr_t virtualBase); 359 360 virtual status_t AllocatePool( 361 X86LargePhysicalPageMapper 362 ::PhysicalPageSlotPool*& _pool); 363 virtual void Map(phys_addr_t physicalAddress, 364 addr_t virtualAddress); 365 366 public: 367 static PhysicalPageSlotPool sInitialPhysicalPagePool; 368 369 private: 370 area_id fDataArea; 371 area_id fVirtualArea; 372 addr_t fVirtualBase; 373 pae_page_table_entry* fPageTable; 374 }; 375 376 377 X86PagingMethodPAE::PhysicalPageSlotPool 378 X86PagingMethodPAE::PhysicalPageSlotPool::sInitialPhysicalPagePool; 379 380 381 X86PagingMethodPAE::PhysicalPageSlotPool::~PhysicalPageSlotPool() 382 { 383 } 384 385 386 status_t 387 X86PagingMethodPAE::PhysicalPageSlotPool::InitInitial( 388 X86PagingMethodPAE* method, kernel_args* args) 389 { 390 // allocate a virtual address range for the pages to be mapped into 391 addr_t virtualBase = vm_allocate_early(args, kPAEPageTableRange, 0, 0, 392 kPAEPageTableRange); 393 if (virtualBase == 0) { 394 panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve " 395 "physical page pool space in virtual address space!"); 396 return B_ERROR; 397 } 398 399 // allocate memory for the page table and data 400 size_t areaSize = B_PAGE_SIZE 401 + sizeof(PhysicalPageSlot[kPAEPageTableEntryCount]); 402 pae_page_table_entry* pageTable = (pae_page_table_entry*)vm_allocate_early( 403 args, areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0); 404 405 // clear the page table and put it in the page dir 406 memset(pageTable, 0, B_PAGE_SIZE); 407 408 phys_addr_t physicalTable = 0; 409 method->_EarlyQuery((addr_t)pageTable, &physicalTable); 410 411 pae_page_directory_entry* entry = PageDirEntryForAddress( 412 method->KernelVirtualPageDirs(), virtualBase); 413 PutPageTableInPageDir(entry, physicalTable, 414 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 415 416 // init the pool structure and add the initial pool 417 Init(-1, pageTable, -1, (addr_t)virtualBase); 418 419 return B_OK; 420 } 421 422 423 status_t 424 X86PagingMethodPAE::PhysicalPageSlotPool::InitInitialPostArea( 425 kernel_args* args) 426 { 427 // create an area for the (already allocated) data 428 size_t areaSize = B_PAGE_SIZE 429 + sizeof(PhysicalPageSlot[kPAEPageTableEntryCount]); 430 void* temp = fPageTable; 431 area_id area = create_area("physical page pool", &temp, 432 B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED, 433 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 434 if (area < B_OK) { 435 panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to " 436 "create area for physical page pool."); 437 return area; 438 } 439 fDataArea = area; 440 441 // create an area for the virtual address space 442 temp = (void*)fVirtualBase; 443 area = vm_create_null_area(VMAddressSpace::KernelID(), 444 "physical page pool space", &temp, B_EXACT_ADDRESS, 445 kPAEPageTableRange, 0); 446 if (area < B_OK) { 447 panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to " 448 "create area for physical page pool space."); 449 return area; 450 } 451 fVirtualArea = area; 452 453 return B_OK; 454 } 455 456 457 void 458 X86PagingMethodPAE::PhysicalPageSlotPool::Init(area_id dataArea, 459 pae_page_table_entry* pageTable, area_id virtualArea, addr_t virtualBase) 460 { 461 fDataArea = dataArea; 462 fVirtualArea = virtualArea; 463 fVirtualBase = virtualBase; 464 fPageTable = pageTable; 465 466 // init slot list 467 fSlots = (PhysicalPageSlot*)(fPageTable + kPAEPageTableEntryCount); 468 addr_t slotAddress = virtualBase; 469 for (uint32 i = 0; i < kPAEPageTableEntryCount; 470 i++, slotAddress += B_PAGE_SIZE) { 471 PhysicalPageSlot* slot = &fSlots[i]; 472 slot->next = slot + 1; 473 slot->pool = this; 474 slot->address = slotAddress; 475 } 476 477 fSlots[kPAEPageTableEntryCount - 1].next = NULL; 478 // terminate list 479 } 480 481 482 void 483 X86PagingMethodPAE::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress, 484 addr_t virtualAddress) 485 { 486 pae_page_table_entry& pte = fPageTable[ 487 (virtualAddress - fVirtualBase) / B_PAGE_SIZE]; 488 pte = (physicalAddress & X86_PAE_PTE_ADDRESS_MASK) 489 | X86_PAE_PTE_WRITABLE | X86_PAE_PTE_GLOBAL | X86_PAE_PTE_PRESENT; 490 491 invalidate_TLB(virtualAddress); 492 } 493 494 495 status_t 496 X86PagingMethodPAE::PhysicalPageSlotPool::AllocatePool( 497 X86LargePhysicalPageMapper::PhysicalPageSlotPool*& _pool) 498 { 499 // create the pool structure 500 PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool; 501 if (pool == NULL) 502 return B_NO_MEMORY; 503 ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool); 504 505 // create an area that can contain the page table and the slot 506 // structures 507 size_t areaSize = B_PAGE_SIZE 508 + sizeof(PhysicalPageSlot[kPAEPageTableEntryCount]); 509 void* data; 510 virtual_address_restrictions virtualRestrictions = {}; 511 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; 512 physical_address_restrictions physicalRestrictions = {}; 513 area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool", 514 PAGE_ALIGN(areaSize), B_FULL_LOCK, 515 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0, 516 &virtualRestrictions, &physicalRestrictions, &data); 517 if (dataArea < 0) 518 return dataArea; 519 520 // create the null area for the virtual address space 521 void* virtualBase; 522 area_id virtualArea = vm_create_null_area( 523 VMAddressSpace::KernelID(), "physical page pool space", 524 &virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, kPAEPageTableRange, 525 CREATE_AREA_PRIORITY_VIP); 526 if (virtualArea < 0) { 527 delete_area(dataArea); 528 return virtualArea; 529 } 530 531 // prepare the page table 532 memset(data, 0, B_PAGE_SIZE); 533 534 // get the page table's physical address 535 phys_addr_t physicalTable; 536 X86VMTranslationMapPAE* map = static_cast<X86VMTranslationMapPAE*>( 537 VMAddressSpace::Kernel()->TranslationMap()); 538 uint32 dummyFlags; 539 cpu_status state = disable_interrupts(); 540 map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags); 541 restore_interrupts(state); 542 543 // put the page table into the page directory 544 pae_page_directory_entry* pageDirEntry 545 = X86PagingMethodPAE::PageDirEntryForAddress( 546 map->PagingStructuresPAE()->VirtualPageDirs(), (addr_t)virtualBase); 547 PutPageTableInPageDir(pageDirEntry, physicalTable, 548 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 549 550 // init the pool structure 551 pool->Init(dataArea, (pae_page_table_entry*)data, virtualArea, 552 (addr_t)virtualBase); 553 poolDeleter.Detach(); 554 _pool = pool; 555 return B_OK; 556 } 557 558 559 // #pragma mark - X86PagingMethodPAE 560 561 562 X86PagingMethodPAE::X86PagingMethodPAE() 563 : 564 fPhysicalPageMapper(NULL), 565 fKernelPhysicalPageMapper(NULL), 566 fFreePages(NULL), 567 fFreePagesCount(0) 568 { 569 mutex_init(&fFreePagesLock, "x86 PAE free pages"); 570 } 571 572 573 X86PagingMethodPAE::~X86PagingMethodPAE() 574 { 575 } 576 577 578 status_t 579 X86PagingMethodPAE::Init(kernel_args* args, 580 VMPhysicalPageMapper** _physicalPageMapper) 581 { 582 // switch to PAE 583 ToPAESwitcher(args).Switch(fKernelVirtualPageDirPointerTable, 584 fKernelPhysicalPageDirPointerTable, fEarlyPageStructures, 585 fEarlyPageStructuresSize, fKernelVirtualPageDirs, 586 fKernelPhysicalPageDirs, fFreeVirtualSlot, fFreeVirtualSlotPTE); 587 588 // create the initial pool for the physical page mapper 589 PhysicalPageSlotPool* pool 590 = new(&PhysicalPageSlotPool::sInitialPhysicalPagePool) 591 PhysicalPageSlotPool; 592 status_t error = pool->InitInitial(this, args); 593 if (error != B_OK) { 594 panic("X86PagingMethodPAE::Init(): Failed to create initial pool " 595 "for physical page mapper!"); 596 return error; 597 } 598 599 // create physical page mapper 600 large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper, 601 fKernelPhysicalPageMapper); 602 603 *_physicalPageMapper = fPhysicalPageMapper; 604 return B_OK; 605 } 606 607 608 status_t 609 X86PagingMethodPAE::InitPostArea(kernel_args* args) 610 { 611 // wrap the kernel paging structures in an area 612 area_id area = create_area("kernel paging structs", &fEarlyPageStructures, 613 B_EXACT_ADDRESS, fEarlyPageStructuresSize, B_ALREADY_WIRED, 614 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 615 if (area < B_OK) 616 return area; 617 618 // let the initial page pool create areas for its structures 619 status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool 620 .InitInitialPostArea(args); 621 if (error != B_OK) 622 return error; 623 624 // The early physical page mapping mechanism is no longer needed. Unmap the 625 // slot. 626 *fFreeVirtualSlotPTE = 0; 627 invalidate_TLB(fFreeVirtualSlot); 628 629 fFreeVirtualSlotPTE = NULL; 630 fFreeVirtualSlot = 0; 631 632 return B_OK; 633 } 634 635 636 status_t 637 X86PagingMethodPAE::CreateTranslationMap(bool kernel, VMTranslationMap** _map) 638 { 639 X86VMTranslationMapPAE* map = new(std::nothrow) X86VMTranslationMapPAE; 640 if (map == NULL) 641 return B_NO_MEMORY; 642 643 status_t error = map->Init(kernel); 644 if (error != B_OK) { 645 delete map; 646 return error; 647 } 648 649 *_map = map; 650 return B_OK; 651 } 652 653 654 status_t 655 X86PagingMethodPAE::MapEarly(kernel_args* args, addr_t virtualAddress, 656 phys_addr_t physicalAddress, uint8 attributes, 657 page_num_t (*get_free_page)(kernel_args*)) 658 { 659 // check to see if a page table exists for this range 660 pae_page_directory_entry* pageDirEntry = PageDirEntryForAddress( 661 fKernelVirtualPageDirs, virtualAddress); 662 pae_page_table_entry* pageTable; 663 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) { 664 // we need to allocate a page table 665 phys_addr_t physicalPageTable = get_free_page(args) * B_PAGE_SIZE; 666 667 TRACE("X86PagingMethodPAE::MapEarly(): asked for free page for " 668 "page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable); 669 670 // put it in the page dir 671 PutPageTableInPageDir(pageDirEntry, physicalPageTable, attributes); 672 673 // zero it out 674 pageTable = _EarlyGetPageTable(physicalPageTable); 675 memset(pageTable, 0, B_PAGE_SIZE); 676 } else { 677 // table already exists -- map it 678 pageTable = _EarlyGetPageTable( 679 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK); 680 } 681 682 pae_page_table_entry* entry = pageTable 683 + virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount; 684 685 ASSERT_PRINT( 686 (*entry & X86_PAE_PTE_PRESENT) == 0, 687 "virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx64 688 ", existing pte: %#" B_PRIx64, virtualAddress, *pageDirEntry, *entry); 689 690 // now, fill in the pentry 691 PutPageTableEntryInTable(entry, physicalAddress, attributes, 0, 692 IS_KERNEL_ADDRESS(virtualAddress)); 693 694 return B_OK; 695 } 696 697 698 bool 699 X86PagingMethodPAE::IsKernelPageAccessible(addr_t virtualAddress, 700 uint32 protection) 701 { 702 // we can't check much without the physical page mapper 703 if (fPhysicalPageMapper == NULL) 704 return false; 705 706 // We only trust the kernel team's page directories. So switch to the 707 // kernel PDPT first. Always set it to make sure the TLBs don't contain 708 // obsolete data. 709 uint32 physicalPDPT = x86_read_cr3(); 710 x86_write_cr3(fKernelPhysicalPageDirPointerTable); 711 712 // get the PDPT entry for the address 713 pae_page_directory_pointer_table_entry pdptEntry = 0; 714 if (physicalPDPT == fKernelPhysicalPageDirPointerTable) { 715 pdptEntry = fKernelVirtualPageDirPointerTable[ 716 virtualAddress / kPAEPageDirRange]; 717 } else { 718 // map the original PDPT and get the entry 719 void* handle; 720 addr_t virtualPDPT; 721 status_t error = fPhysicalPageMapper->GetPageDebug(physicalPDPT, 722 &virtualPDPT, &handle); 723 if (error == B_OK) { 724 pdptEntry = ((pae_page_directory_pointer_table_entry*) 725 virtualPDPT)[virtualAddress / kPAEPageDirRange]; 726 fPhysicalPageMapper->PutPageDebug(virtualPDPT, handle); 727 } 728 } 729 730 // map the page dir and get the entry 731 pae_page_directory_entry pageDirEntry = 0; 732 if ((pdptEntry & X86_PAE_PDPTE_PRESENT) != 0) { 733 void* handle; 734 addr_t virtualPageDir; 735 status_t error = fPhysicalPageMapper->GetPageDebug( 736 pdptEntry & X86_PAE_PDPTE_ADDRESS_MASK, &virtualPageDir, &handle); 737 if (error == B_OK) { 738 pageDirEntry = ((pae_page_directory_entry*)virtualPageDir)[ 739 virtualAddress / kPAEPageTableRange % kPAEPageDirEntryCount]; 740 fPhysicalPageMapper->PutPageDebug(virtualPageDir, handle); 741 } 742 } 743 744 // map the page table and get the entry 745 pae_page_table_entry pageTableEntry = 0; 746 if ((pageDirEntry & X86_PAE_PDE_PRESENT) != 0) { 747 void* handle; 748 addr_t virtualPageTable; 749 status_t error = fPhysicalPageMapper->GetPageDebug( 750 pageDirEntry & X86_PAE_PDE_ADDRESS_MASK, &virtualPageTable, 751 &handle); 752 if (error == B_OK) { 753 pageTableEntry = ((pae_page_table_entry*)virtualPageTable)[ 754 virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount]; 755 fPhysicalPageMapper->PutPageDebug(virtualPageTable, handle); 756 } 757 } 758 759 // switch back to the original page directory 760 if (physicalPDPT != fKernelPhysicalPageDirPointerTable) 761 x86_write_cr3(physicalPDPT); 762 763 if ((pageTableEntry & X86_PAE_PTE_PRESENT) == 0) 764 return false; 765 766 // present means kernel-readable, so check for writable 767 return (protection & B_KERNEL_WRITE_AREA) == 0 768 || (pageTableEntry & X86_PAE_PTE_WRITABLE) != 0; 769 } 770 771 772 /*static*/ void 773 X86PagingMethodPAE::PutPageTableInPageDir(pae_page_directory_entry* entry, 774 phys_addr_t physicalTable, uint32 attributes) 775 { 776 *entry = (physicalTable & X86_PAE_PDE_ADDRESS_MASK) 777 | X86_PAE_PDE_PRESENT 778 | X86_PAE_PDE_WRITABLE 779 | X86_PAE_PDE_USER; 780 // TODO: We ignore the attributes of the page table -- for compatibility 781 // with BeOS we allow having user accessible areas in the kernel address 782 // space. This is currently being used by some drivers, mainly for the 783 // frame buffer. Our current real time data implementation makes use of 784 // this fact, too. 785 // We might want to get rid of this possibility one day, especially if 786 // we intend to port it to a platform that does not support this. 787 } 788 789 790 /*static*/ void 791 X86PagingMethodPAE::PutPageTableEntryInTable(pae_page_table_entry* entry, 792 phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType, 793 bool globalPage) 794 { 795 pae_page_table_entry page = (physicalAddress & X86_PAE_PTE_ADDRESS_MASK) 796 | X86_PAE_PTE_PRESENT | (globalPage ? X86_PAE_PTE_GLOBAL : 0) 797 | MemoryTypeToPageTableEntryFlags(memoryType); 798 799 // if the page is user accessible, it's automatically 800 // accessible in kernel space, too (but with the same 801 // protection) 802 if ((attributes & B_USER_PROTECTION) != 0) { 803 page |= X86_PAE_PTE_USER; 804 if ((attributes & B_WRITE_AREA) != 0) 805 page |= X86_PAE_PTE_WRITABLE; 806 if ((attributes & B_EXECUTE_AREA) == 0 807 && x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) { 808 page |= X86_PAE_PTE_NOT_EXECUTABLE; 809 } 810 } else if ((attributes & B_KERNEL_WRITE_AREA) != 0) 811 page |= X86_PAE_PTE_WRITABLE; 812 813 // put it in the page table 814 *(volatile pae_page_table_entry*)entry = page; 815 } 816 817 818 void* 819 X86PagingMethodPAE::Allocate32BitPage(phys_addr_t& _physicalAddress, 820 void*& _handle) 821 { 822 // get a free page 823 MutexLocker locker(fFreePagesLock); 824 vm_page* page; 825 if (fFreePages != NULL) { 826 page = fFreePages; 827 fFreePages = page->cache_next; 828 fFreePagesCount--; 829 locker.Unlock(); 830 } else { 831 // no pages -- allocate one 832 locker.Unlock(); 833 834 physical_address_restrictions restrictions = {}; 835 restrictions.high_address = 0x100000000LL; 836 page = vm_page_allocate_page_run(PAGE_STATE_UNUSED, 1, &restrictions, 837 VM_PRIORITY_SYSTEM); 838 if (page == NULL) 839 return NULL; 840 841 DEBUG_PAGE_ACCESS_END(page); 842 } 843 844 // map the page 845 phys_addr_t physicalAddress 846 = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE; 847 addr_t virtualAddress; 848 if (fPhysicalPageMapper->GetPage(physicalAddress, &virtualAddress, &_handle) 849 != B_OK) { 850 // mapping failed -- free page 851 locker.Lock(); 852 page->cache_next = fFreePages; 853 fFreePages = page; 854 fFreePagesCount++; 855 return NULL; 856 } 857 858 _physicalAddress = physicalAddress; 859 return (void*)virtualAddress; 860 } 861 862 863 void 864 X86PagingMethodPAE::Free32BitPage(void* address, phys_addr_t physicalAddress, 865 void* handle) 866 { 867 // unmap the page 868 fPhysicalPageMapper->PutPage((addr_t)address, handle); 869 870 // free it 871 vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE); 872 MutexLocker locker(fFreePagesLock); 873 if (fFreePagesCount < kMaxFree32BitPagesCount) { 874 // cache not full yet -- cache it 875 page->cache_next = fFreePages; 876 fFreePages = page; 877 fFreePagesCount++; 878 } else { 879 // cache full -- free it 880 locker.Unlock(); 881 DEBUG_PAGE_ACCESS_START(page); 882 vm_page_free(NULL, page); 883 } 884 } 885 886 887 bool 888 X86PagingMethodPAE::_EarlyQuery(addr_t virtualAddress, 889 phys_addr_t* _physicalAddress) 890 { 891 pae_page_directory_entry* pageDirEntry = PageDirEntryForAddress( 892 fKernelVirtualPageDirs, virtualAddress); 893 if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) { 894 // no pagetable here 895 return false; 896 } 897 898 pae_page_table_entry* entry = _EarlyGetPageTable( 899 *pageDirEntry & X86_PAE_PDE_ADDRESS_MASK) 900 + virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount; 901 if ((*entry & X86_PAE_PTE_PRESENT) == 0) { 902 // page mapping not valid 903 return false; 904 } 905 906 *_physicalAddress = *entry & X86_PAE_PTE_ADDRESS_MASK; 907 return true; 908 } 909 910 911 pae_page_table_entry* 912 X86PagingMethodPAE::_EarlyGetPageTable(phys_addr_t address) 913 { 914 *fFreeVirtualSlotPTE = (address & X86_PAE_PTE_ADDRESS_MASK) 915 | X86_PAE_PTE_PRESENT | X86_PAE_PTE_WRITABLE | X86_PAE_PTE_GLOBAL; 916 917 invalidate_TLB(fFreeVirtualSlot); 918 919 return (pae_page_table_entry*)fFreeVirtualSlot; 920 } 921 922 923 #endif // B_HAIKU_PHYSICAL_BITS == 64 924