1 /* 2 * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl 3 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 5 * Distributed under the terms of the MIT License. 6 * 7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 12 #include "paging/32bit/ARMVMTranslationMap32Bit.h" 13 14 #include <stdlib.h> 15 #include <string.h> 16 17 #include <int.h> 18 #include <thread.h> 19 #include <slab/Slab.h> 20 #include <smp.h> 21 #include <util/AutoLock.h> 22 #include <util/queue.h> 23 #include <vm/vm_page.h> 24 #include <vm/vm_priv.h> 25 #include <vm/VMAddressSpace.h> 26 #include <vm/VMCache.h> 27 28 #include "paging/32bit/ARMPagingMethod32Bit.h" 29 #include "paging/32bit/ARMPagingStructures32Bit.h" 30 #include "paging/arm_physical_page_mapper.h" 31 32 33 //#define TRACE_ARM_VM_TRANSLATION_MAP_32_BIT 34 #ifdef TRACE_ARM_VM_TRANSLATION_MAP_32_BIT 35 # define TRACE(x...) dprintf(x) 36 #else 37 # define TRACE(x...) ; 38 #endif 39 40 41 ARMVMTranslationMap32Bit::ARMVMTranslationMap32Bit() 42 : 43 fPagingStructures(NULL) 44 { 45 } 46 47 48 ARMVMTranslationMap32Bit::~ARMVMTranslationMap32Bit() 49 { 50 if (fPagingStructures == NULL) 51 return; 52 53 if (fPageMapper != NULL) 54 fPageMapper->Delete(); 55 56 if (fPagingStructures->pgdir_virt != NULL) { 57 // cycle through and free all of the user space pgtables 58 for (uint32 i = VADDR_TO_PDENT(USER_BASE); 59 i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) { 60 if ((fPagingStructures->pgdir_virt[i] & ARM_PDE_TYPE_MASK) != 0) { 61 addr_t address = fPagingStructures->pgdir_virt[i] 62 & ARM_PDE_ADDRESS_MASK; 63 vm_page* page = vm_lookup_page(address / B_PAGE_SIZE); 64 if (!page) 65 panic("destroy_tmap: didn't find pgtable page\n"); 66 DEBUG_PAGE_ACCESS_START(page); 67 vm_page_set_state(page, PAGE_STATE_FREE); 68 } 69 } 70 } 71 72 fPagingStructures->RemoveReference(); 73 } 74 75 76 status_t 77 ARMVMTranslationMap32Bit::Init(bool kernel) 78 { 79 TRACE("ARMVMTranslationMap32Bit::Init()\n"); 80 81 ARMVMTranslationMap::Init(kernel); 82 83 fPagingStructures = new(std::nothrow) ARMPagingStructures32Bit; 84 if (fPagingStructures == NULL) 85 return B_NO_MEMORY; 86 87 ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method(); 88 89 if (!kernel) { 90 // user 91 // allocate a physical page mapper 92 status_t error = method->PhysicalPageMapper() 93 ->CreateTranslationMapPhysicalPageMapper(&fPageMapper); 94 if (error != B_OK) 95 return error; 96 97 // allocate the page directory 98 page_directory_entry* virtualPageDir = (page_directory_entry*)memalign( 99 B_PAGE_SIZE, B_PAGE_SIZE); 100 if (virtualPageDir == NULL) 101 return B_NO_MEMORY; 102 103 // look up the page directory's physical address 104 phys_addr_t physicalPageDir; 105 vm_get_page_mapping(VMAddressSpace::KernelID(), 106 (addr_t)virtualPageDir, &physicalPageDir); 107 108 fPagingStructures->Init(virtualPageDir, physicalPageDir, 109 method->KernelVirtualPageDirectory()); 110 } else { 111 // kernel 112 // get the physical page mapper 113 fPageMapper = method->KernelPhysicalPageMapper(); 114 115 // we already know the kernel pgdir mapping 116 fPagingStructures->Init(method->KernelVirtualPageDirectory(), 117 method->KernelPhysicalPageDirectory(), NULL); 118 } 119 120 return B_OK; 121 } 122 123 124 size_t 125 ARMVMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const 126 { 127 // If start == 0, the actual base address is not yet known to the caller and 128 // we shall assume the worst case. 129 if (start == 0) { 130 // offset the range so it has the worst possible alignment 131 start = 1023 * B_PAGE_SIZE; 132 end += 1023 * B_PAGE_SIZE; 133 } 134 135 return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start); 136 } 137 138 139 status_t 140 ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes, 141 uint32 memoryType, vm_page_reservation* reservation) 142 { 143 TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va); 144 145 /* 146 dprintf("pgdir at 0x%x\n", pgdir); 147 dprintf("index is %d\n", va / B_PAGE_SIZE / 1024); 148 dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]); 149 dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]); 150 dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present); 151 dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr); 152 */ 153 page_directory_entry* pd = fPagingStructures->pgdir_virt; 154 155 // check to see if a page table exists for this range 156 uint32 index = VADDR_TO_PDENT(va); 157 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 158 phys_addr_t pgtable; 159 vm_page *page; 160 161 // we need to allocate a pgtable 162 page = vm_page_allocate_page(reservation, 163 PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR); 164 165 DEBUG_PAGE_ACCESS_END(page); 166 167 pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE; 168 169 TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable); 170 171 // put it in the pgdir 172 ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable, 173 attributes 174 | ((attributes & B_USER_PROTECTION) != 0 175 ? B_WRITE_AREA : B_KERNEL_WRITE_AREA)); 176 177 // update any other page directories, if it maps kernel space 178 if (index >= FIRST_KERNEL_PGDIR_ENT 179 && index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) { 180 ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]); 181 } 182 183 fMapCount++; 184 } 185 186 // now, fill in the pentry 187 Thread* thread = thread_get_current_thread(); 188 ThreadCPUPinner pinner(thread); 189 190 page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 191 pd[index] & ARM_PDE_ADDRESS_MASK); 192 index = VADDR_TO_PTENT(va); 193 194 ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0, 195 "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va, 196 pt[index]); 197 198 ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes, 199 memoryType, fIsKernelMap); 200 201 pinner.Unlock(); 202 203 // Note: We don't need to invalidate the TLB for this address, as previously 204 // the entry was not present and the TLB doesn't cache those entries. 205 206 fMapCount++; 207 208 return 0; 209 } 210 211 212 status_t 213 ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end) 214 { 215 start = ROUNDDOWN(start, B_PAGE_SIZE); 216 if (start >= end) 217 return B_OK; 218 219 TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end); 220 221 page_directory_entry *pd = fPagingStructures->pgdir_virt; 222 223 do { 224 int index = VADDR_TO_PDENT(start); 225 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 226 // no page table here, move the start up to access the next page 227 // table 228 start = ROUNDUP(start + 1, kPageTableAlignment); 229 continue; 230 } 231 232 Thread* thread = thread_get_current_thread(); 233 ThreadCPUPinner pinner(thread); 234 235 page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 236 pd[index] & ARM_PDE_ADDRESS_MASK); 237 238 for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end); 239 index++, start += B_PAGE_SIZE) { 240 if ((pt[index] & ARM_PTE_TYPE_MASK) == 0) { 241 // page mapping not valid 242 continue; 243 } 244 245 TRACE("unmap_tmap: removing page 0x%lx\n", start); 246 247 page_table_entry oldEntry 248 = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index], 249 ARM_PTE_TYPE_MASK); 250 fMapCount--; 251 252 if (true /* (oldEntry & ARM_PTE_ACCESSED) != 0*/) { 253 // Note, that we only need to invalidate the address, if the 254 // accessed flags was set, since only then the entry could have 255 // been in any TLB. 256 InvalidatePage(start); 257 } 258 } 259 } while (start != 0 && start < end); 260 261 return B_OK; 262 } 263 264 265 /*! Caller must have locked the cache of the page to be unmapped. 266 This object shouldn't be locked. 267 */ 268 status_t 269 ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address, 270 bool updatePageQueue) 271 { 272 ASSERT(address % B_PAGE_SIZE == 0); 273 274 page_directory_entry* pd = fPagingStructures->pgdir_virt; 275 276 TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address); 277 278 RecursiveLocker locker(fLock); 279 280 int index = VADDR_TO_PDENT(address); 281 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) 282 return B_ENTRY_NOT_FOUND; 283 284 ThreadCPUPinner pinner(thread_get_current_thread()); 285 286 page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 287 pd[index] & ARM_PDE_ADDRESS_MASK); 288 289 index = VADDR_TO_PTENT(address); 290 page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry( 291 &pt[index]); 292 293 pinner.Unlock(); 294 295 if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) { 296 // page mapping not valid 297 return B_ENTRY_NOT_FOUND; 298 } 299 300 fMapCount--; 301 302 303 if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA 304 // Note, that we only need to invalidate the address, if the 305 // accessed flags was set, since only then the entry could have been 306 // in any TLB. 307 InvalidatePage(address); 308 Flush(); 309 310 // NOTE: Between clearing the page table entry and Flush() other 311 // processors (actually even this processor with another thread of the 312 // same team) could still access the page in question via their cached 313 // entry. We can obviously lose a modified flag in this case, with the 314 // effect that the page looks unmodified (and might thus be recycled), 315 // but is actually modified. 316 // In most cases this is harmless, but for vm_remove_all_page_mappings() 317 // this is actually a problem. 318 // Interestingly FreeBSD seems to ignore this problem as well 319 // (cf. pmap_remove_all()), unless I've missed something. 320 } 321 322 locker.Detach(); 323 // PageUnmapped() will unlock for us 324 325 PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE, 326 true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/, true /*(oldEntry & ARM_PTE_DIRTY) != 0*/, 327 updatePageQueue); 328 329 return B_OK; 330 } 331 332 333 void 334 ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size, 335 bool updatePageQueue) 336 { 337 if (size == 0) 338 return; 339 340 addr_t start = base; 341 addr_t end = base + size - 1; 342 343 TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#" 344 B_PRIxADDR ")\n", area, start, end); 345 346 page_directory_entry* pd = fPagingStructures->pgdir_virt; 347 348 VMAreaMappings queue; 349 350 RecursiveLocker locker(fLock); 351 352 do { 353 int index = VADDR_TO_PDENT(start); 354 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 355 // no page table here, move the start up to access the next page 356 // table 357 start = ROUNDUP(start + 1, kPageTableAlignment); 358 continue; 359 } 360 361 Thread* thread = thread_get_current_thread(); 362 ThreadCPUPinner pinner(thread); 363 364 page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 365 pd[index] & ARM_PDE_ADDRESS_MASK); 366 367 for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end); 368 index++, start += B_PAGE_SIZE) { 369 page_table_entry oldEntry 370 = ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]); 371 if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) 372 continue; 373 374 fMapCount--; 375 376 if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA 377 // Note, that we only need to invalidate the address, if the 378 // accessed flags was set, since only then the entry could have 379 // been in any TLB. 380 InvalidatePage(start); 381 } 382 383 if (area->cache_type != CACHE_TYPE_DEVICE) { 384 // get the page 385 vm_page* page = vm_lookup_page( 386 (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE); 387 ASSERT(page != NULL); 388 389 DEBUG_PAGE_ACCESS_START(page); 390 391 // transfer the accessed/dirty flags to the page 392 if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA 393 page->accessed = true; 394 if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true) 395 page->modified = true; 396 397 // remove the mapping object/decrement the wired_count of the 398 // page 399 if (area->wiring == B_NO_LOCK) { 400 vm_page_mapping* mapping = NULL; 401 vm_page_mappings::Iterator iterator 402 = page->mappings.GetIterator(); 403 while ((mapping = iterator.Next()) != NULL) { 404 if (mapping->area == area) 405 break; 406 } 407 408 ASSERT(mapping != NULL); 409 410 area->mappings.Remove(mapping); 411 page->mappings.Remove(mapping); 412 queue.Add(mapping); 413 } else 414 page->DecrementWiredCount(); 415 416 if (!page->IsMapped()) { 417 atomic_add(&gMappedPagesCount, -1); 418 419 if (updatePageQueue) { 420 if (page->Cache()->temporary) 421 vm_page_set_state(page, PAGE_STATE_INACTIVE); 422 else if (page->modified) 423 vm_page_set_state(page, PAGE_STATE_MODIFIED); 424 else 425 vm_page_set_state(page, PAGE_STATE_CACHED); 426 } 427 } 428 429 DEBUG_PAGE_ACCESS_END(page); 430 } 431 } 432 433 Flush(); 434 // flush explicitly, since we directly use the lock 435 } while (start != 0 && start < end); 436 437 // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not 438 // really critical here, as in all cases this method is used, the unmapped 439 // area range is unmapped for good (resized/cut) and the pages will likely 440 // be freed. 441 442 locker.Unlock(); 443 444 // free removed mappings 445 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel(); 446 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY 447 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0); 448 while (vm_page_mapping* mapping = queue.RemoveHead()) 449 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags); 450 } 451 452 453 void 454 ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace, 455 bool ignoreTopCachePageFlags) 456 { 457 if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) { 458 ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(), 459 true); 460 return; 461 } 462 463 bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags; 464 465 page_directory_entry* pd = fPagingStructures->pgdir_virt; 466 467 RecursiveLocker locker(fLock); 468 469 VMAreaMappings mappings; 470 mappings.MoveFrom(&area->mappings); 471 472 for (VMAreaMappings::Iterator it = mappings.GetIterator(); 473 vm_page_mapping* mapping = it.Next();) { 474 vm_page* page = mapping->page; 475 page->mappings.Remove(mapping); 476 477 VMCache* cache = page->Cache(); 478 479 bool pageFullyUnmapped = false; 480 if (!page->IsMapped()) { 481 atomic_add(&gMappedPagesCount, -1); 482 pageFullyUnmapped = true; 483 } 484 485 if (unmapPages || cache != area->cache) { 486 addr_t address = area->Base() 487 + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset); 488 489 int index = VADDR_TO_PDENT(address); 490 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 491 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but " 492 "has no page dir entry", page, area, address); 493 continue; 494 } 495 496 ThreadCPUPinner pinner(thread_get_current_thread()); 497 498 page_table_entry* pt 499 = (page_table_entry*)fPageMapper->GetPageTableAt( 500 pd[index] & ARM_PDE_ADDRESS_MASK); 501 page_table_entry oldEntry 502 = ARMPagingMethod32Bit::ClearPageTableEntry( 503 &pt[VADDR_TO_PTENT(address)]); 504 505 pinner.Unlock(); 506 507 if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) { 508 panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but " 509 "has no page table entry", page, area, address); 510 continue; 511 } 512 513 // transfer the accessed/dirty flags to the page and invalidate 514 // the mapping, if necessary 515 if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA 516 page->accessed = true; 517 518 if (!deletingAddressSpace) 519 InvalidatePage(address); 520 } 521 522 if (true /*(oldEntry & ARM_PTE_DIRTY) != 0*/) 523 page->modified = true; 524 525 if (pageFullyUnmapped) { 526 DEBUG_PAGE_ACCESS_START(page); 527 528 if (cache->temporary) 529 vm_page_set_state(page, PAGE_STATE_INACTIVE); 530 else if (page->modified) 531 vm_page_set_state(page, PAGE_STATE_MODIFIED); 532 else 533 vm_page_set_state(page, PAGE_STATE_CACHED); 534 535 DEBUG_PAGE_ACCESS_END(page); 536 } 537 } 538 539 fMapCount--; 540 } 541 542 Flush(); 543 // flush explicitely, since we directly use the lock 544 545 locker.Unlock(); 546 547 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel(); 548 uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY 549 | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0); 550 while (vm_page_mapping* mapping = mappings.RemoveHead()) 551 object_cache_free(gPageMappingsObjectCache, mapping, freeFlags); 552 } 553 554 555 status_t 556 ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical, 557 uint32 *_flags) 558 { 559 // default the flags to not present 560 *_flags = 0; 561 *_physical = 0; 562 563 int index = VADDR_TO_PDENT(va); 564 page_directory_entry *pd = fPagingStructures->pgdir_virt; 565 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 566 // no pagetable here 567 return B_OK; 568 } 569 570 Thread* thread = thread_get_current_thread(); 571 ThreadCPUPinner pinner(thread); 572 573 page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 574 pd[index] & ARM_PDE_ADDRESS_MASK); 575 page_table_entry entry = pt[VADDR_TO_PTENT(va)]; 576 577 if ((entry & ARM_PTE_TYPE_MASK) != 0) 578 *_physical = (entry & ARM_PTE_ADDRESS_MASK) | VADDR_TO_PGOFF(va); 579 580 #if 0 //IRA 581 // read in the page state flags 582 if ((entry & ARM_PTE_USER) != 0) { 583 *_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0) 584 | B_READ_AREA; 585 } 586 587 *_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0) 588 | B_KERNEL_READ_AREA 589 | ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) 590 | ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) 591 | ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0); 592 #else 593 *_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA; 594 if (*_physical != 0) 595 *_flags |= PAGE_PRESENT; 596 #endif 597 pinner.Unlock(); 598 599 TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va); 600 601 return B_OK; 602 } 603 604 605 status_t 606 ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical, 607 uint32 *_flags) 608 { 609 *_flags = 0; 610 *_physical = 0; 611 612 int index = VADDR_TO_PDENT(va); 613 page_directory_entry* pd = fPagingStructures->pgdir_virt; 614 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 615 // no pagetable here 616 return B_OK; 617 } 618 619 // map page table entry 620 page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method() 621 ->PhysicalPageMapper()->InterruptGetPageTableAt( 622 pd[index] & ARM_PDE_ADDRESS_MASK); 623 page_table_entry entry = pt[VADDR_TO_PTENT(va)]; 624 625 if ((entry & ARM_PTE_TYPE_MASK) != 0) 626 *_physical = (entry & ARM_PTE_ADDRESS_MASK) | VADDR_TO_PGOFF(va); 627 628 #if 0 629 // read in the page state flags 630 if ((entry & ARM_PTE_USER) != 0) { 631 *_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0) 632 | B_READ_AREA; 633 } 634 635 *_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0) 636 | B_KERNEL_READ_AREA 637 | ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) 638 | ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) 639 | ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0); 640 #else 641 *_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA; 642 if (*_physical != 0) 643 *_flags |= PAGE_PRESENT; 644 #endif 645 return B_OK; 646 } 647 648 649 status_t 650 ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes, 651 uint32 memoryType) 652 { 653 start = ROUNDDOWN(start, B_PAGE_SIZE); 654 if (start >= end) 655 return B_OK; 656 657 TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end, 658 attributes); 659 #if 0 //IRA 660 // compute protection flags 661 uint32 newProtectionFlags = 0; 662 if ((attributes & B_USER_PROTECTION) != 0) { 663 newProtectionFlags = ARM_PTE_USER; 664 if ((attributes & B_WRITE_AREA) != 0) 665 newProtectionFlags |= ARM_PTE_WRITABLE; 666 } else if ((attributes & B_KERNEL_WRITE_AREA) != 0) 667 newProtectionFlags = ARM_PTE_WRITABLE; 668 669 page_directory_entry *pd = fPagingStructures->pgdir_virt; 670 671 do { 672 int index = VADDR_TO_PDENT(start); 673 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 674 // no page table here, move the start up to access the next page 675 // table 676 start = ROUNDUP(start + 1, kPageTableAlignment); 677 continue; 678 } 679 680 Thread* thread = thread_get_current_thread(); 681 ThreadCPUPinner pinner(thread); 682 683 page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 684 pd[index] & ARM_PDE_ADDRESS_MASK); 685 686 for (index = VADDR_TO_PTENT(start); index < 256 && start < end; 687 index++, start += B_PAGE_SIZE) { 688 page_table_entry entry = pt[index]; 689 if ((entry & ARM_PTE_PRESENT) == 0) { 690 // page mapping not valid 691 continue; 692 } 693 694 TRACE("protect_tmap: protect page 0x%lx\n", start); 695 696 // set the new protection flags -- we want to do that atomically, 697 // without changing the accessed or dirty flag 698 page_table_entry oldEntry; 699 while (true) { 700 oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry( 701 &pt[index], 702 (entry & ~(ARM_PTE_PROTECTION_MASK 703 | ARM_PTE_MEMORY_TYPE_MASK)) 704 | newProtectionFlags 705 | ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags( 706 memoryType), 707 entry); 708 if (oldEntry == entry) 709 break; 710 entry = oldEntry; 711 } 712 713 if ((oldEntry & ARM_PTE_ACCESSED) != 0) { 714 // Note, that we only need to invalidate the address, if the 715 // accessed flag was set, since only then the entry could have 716 // been in any TLB. 717 InvalidatePage(start); 718 } 719 } 720 } while (start != 0 && start < end); 721 #endif 722 return B_OK; 723 } 724 725 726 status_t 727 ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags) 728 { 729 int index = VADDR_TO_PDENT(va); 730 page_directory_entry* pd = fPagingStructures->pgdir_virt; 731 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 732 // no pagetable here 733 return B_OK; 734 } 735 #if 0 //IRA 736 uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? ARM_PTE_DIRTY : 0) 737 | ((flags & PAGE_ACCESSED) ? ARM_PTE_ACCESSED : 0); 738 #else 739 uint32 flagsToClear = 0; 740 #endif 741 Thread* thread = thread_get_current_thread(); 742 ThreadCPUPinner pinner(thread); 743 744 page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 745 pd[index] & ARM_PDE_ADDRESS_MASK); 746 index = VADDR_TO_PTENT(va); 747 748 // clear out the flags we've been requested to clear 749 page_table_entry oldEntry 750 = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index], 751 flagsToClear); 752 753 pinner.Unlock(); 754 755 //XXX IRA if ((oldEntry & flagsToClear) != 0) 756 InvalidatePage(va); 757 758 return B_OK; 759 } 760 761 762 bool 763 ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address, 764 bool unmapIfUnaccessed, bool& _modified) 765 { 766 ASSERT(address % B_PAGE_SIZE == 0); 767 768 page_directory_entry* pd = fPagingStructures->pgdir_virt; 769 770 TRACE("ARMVMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR 771 ")\n", address); 772 773 RecursiveLocker locker(fLock); 774 775 int index = VADDR_TO_PDENT(address); 776 if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) 777 return false; 778 779 ThreadCPUPinner pinner(thread_get_current_thread()); 780 781 page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 782 pd[index] & ARM_PDE_ADDRESS_MASK); 783 784 index = VADDR_TO_PTENT(address); 785 786 // perform the deed 787 page_table_entry oldEntry; 788 789 if (unmapIfUnaccessed) { 790 while (true) { 791 oldEntry = pt[index]; 792 if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) { 793 // page mapping not valid 794 return false; 795 } 796 #if 0 //IRA 797 if (oldEntry & ARM_PTE_ACCESSED) { 798 // page was accessed -- just clear the flags 799 oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags( 800 &pt[index], ARM_PTE_ACCESSED | ARM_PTE_DIRTY); 801 break; 802 } 803 #endif 804 // page hasn't been accessed -- unmap it 805 if (ARMPagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0, 806 oldEntry) == oldEntry) { 807 break; 808 } 809 810 // something changed -- check again 811 } 812 } else { 813 #if 0 //IRA 814 oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index], 815 ARM_PTE_ACCESSED | ARM_PTE_DIRTY); 816 #else 817 oldEntry = pt[index]; 818 #endif 819 } 820 821 pinner.Unlock(); 822 823 _modified = true /* (oldEntry & ARM_PTE_DIRTY) != 0 */; // XXX IRA 824 825 if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { 826 // Note, that we only need to invalidate the address, if the 827 // accessed flags was set, since only then the entry could have been 828 // in any TLB. 829 InvalidatePage(address); 830 831 Flush(); 832 833 return true; 834 } 835 836 if (!unmapIfUnaccessed) 837 return false; 838 839 // We have unmapped the address. Do the "high level" stuff. 840 841 fMapCount--; 842 843 locker.Detach(); 844 // UnaccessedPageUnmapped() will unlock for us 845 846 UnaccessedPageUnmapped(area, 847 (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE); 848 849 return false; 850 } 851 852 853 ARMPagingStructures* 854 ARMVMTranslationMap32Bit::PagingStructures() const 855 { 856 return fPagingStructures; 857 } 858