1c917cd62SIthamar R. Adema /* 23091264bSFrançois Revol * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl 34535495dSIngo Weinhold * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 4c917cd62SIthamar R. Adema * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 5c917cd62SIthamar R. Adema * Distributed under the terms of the MIT License. 6c917cd62SIthamar R. Adema * 7c917cd62SIthamar R. Adema * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 8c917cd62SIthamar R. Adema * Distributed under the terms of the NewOS License. 9c917cd62SIthamar R. Adema */ 10c917cd62SIthamar R. Adema 11c917cd62SIthamar R. Adema 12c917cd62SIthamar R. Adema #include "paging/32bit/ARMVMTranslationMap32Bit.h" 13c917cd62SIthamar R. Adema 14c917cd62SIthamar R. Adema #include <stdlib.h> 15c917cd62SIthamar R. Adema #include <string.h> 16c917cd62SIthamar R. Adema 17c917cd62SIthamar R. Adema #include <int.h> 18c917cd62SIthamar R. Adema #include <thread.h> 19c917cd62SIthamar R. Adema #include <slab/Slab.h> 20c917cd62SIthamar R. Adema #include <smp.h> 21c917cd62SIthamar R. Adema #include <util/AutoLock.h> 22c917cd62SIthamar R. Adema #include <util/queue.h> 23c917cd62SIthamar R. Adema #include <vm/vm_page.h> 24c917cd62SIthamar R. Adema #include <vm/vm_priv.h> 25c917cd62SIthamar R. Adema #include <vm/VMAddressSpace.h> 26c917cd62SIthamar R. Adema #include <vm/VMCache.h> 27c917cd62SIthamar R. Adema 28c917cd62SIthamar R. Adema #include "paging/32bit/ARMPagingMethod32Bit.h" 29c917cd62SIthamar R. Adema #include "paging/32bit/ARMPagingStructures32Bit.h" 30c917cd62SIthamar R. Adema #include "paging/arm_physical_page_mapper.h" 31c917cd62SIthamar R. Adema 32c917cd62SIthamar R. Adema 335707f251SIthamar R. Adema //#define TRACE_ARM_VM_TRANSLATION_MAP_32_BIT 34c917cd62SIthamar R. Adema #ifdef TRACE_ARM_VM_TRANSLATION_MAP_32_BIT 35c917cd62SIthamar R. Adema # define TRACE(x...) dprintf(x) 36c917cd62SIthamar R. Adema #else 37c917cd62SIthamar R. Adema # define TRACE(x...) ; 38c917cd62SIthamar R. Adema #endif 39c917cd62SIthamar R. Adema 40c917cd62SIthamar R. Adema 41c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::ARMVMTranslationMap32Bit() 42c917cd62SIthamar R. Adema : 43c917cd62SIthamar R. Adema fPagingStructures(NULL) 44c917cd62SIthamar R. Adema { 45c917cd62SIthamar R. Adema } 46c917cd62SIthamar R. Adema 47c917cd62SIthamar R. Adema 48c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::~ARMVMTranslationMap32Bit() 49c917cd62SIthamar R. Adema { 50c917cd62SIthamar R. Adema if (fPagingStructures == NULL) 51c917cd62SIthamar R. Adema return; 52c917cd62SIthamar R. Adema 53c917cd62SIthamar R. Adema if (fPageMapper != NULL) 54c917cd62SIthamar R. Adema fPageMapper->Delete(); 55c917cd62SIthamar R. Adema 56c917cd62SIthamar R. Adema if (fPagingStructures->pgdir_virt != NULL) { 57c917cd62SIthamar R. Adema // cycle through and free all of the user space pgtables 58c917cd62SIthamar R. Adema for (uint32 i = VADDR_TO_PDENT(USER_BASE); 59c917cd62SIthamar R. Adema i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) { 60c917cd62SIthamar R. Adema if ((fPagingStructures->pgdir_virt[i] & ARM_PDE_TYPE_MASK) != 0) { 61c917cd62SIthamar R. Adema addr_t address = fPagingStructures->pgdir_virt[i] 62c917cd62SIthamar R. Adema & ARM_PDE_ADDRESS_MASK; 63c917cd62SIthamar R. Adema vm_page* page = vm_lookup_page(address / B_PAGE_SIZE); 64c917cd62SIthamar R. Adema if (!page) 65c917cd62SIthamar R. Adema panic("destroy_tmap: didn't find pgtable page\n"); 66c917cd62SIthamar R. Adema DEBUG_PAGE_ACCESS_START(page); 67c917cd62SIthamar R. Adema vm_page_set_state(page, PAGE_STATE_FREE); 68c917cd62SIthamar R. Adema } 69c917cd62SIthamar R. Adema } 70c917cd62SIthamar R. Adema } 71c917cd62SIthamar R. Adema 72c917cd62SIthamar R. Adema fPagingStructures->RemoveReference(); 73c917cd62SIthamar R. Adema } 74c917cd62SIthamar R. Adema 75c917cd62SIthamar R. Adema 76c917cd62SIthamar R. Adema status_t 77c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::Init(bool kernel) 78c917cd62SIthamar R. Adema { 79c917cd62SIthamar R. Adema TRACE("ARMVMTranslationMap32Bit::Init()\n"); 80c917cd62SIthamar R. Adema 81c917cd62SIthamar R. Adema ARMVMTranslationMap::Init(kernel); 82c917cd62SIthamar R. Adema 83c917cd62SIthamar R. Adema fPagingStructures = new(std::nothrow) ARMPagingStructures32Bit; 84c917cd62SIthamar R. Adema if (fPagingStructures == NULL) 85c917cd62SIthamar R. Adema return B_NO_MEMORY; 86c917cd62SIthamar R. Adema 87c917cd62SIthamar R. Adema ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method(); 88c917cd62SIthamar R. Adema 89c917cd62SIthamar R. Adema if (!kernel) { 90c917cd62SIthamar R. Adema // user 91c917cd62SIthamar R. Adema // allocate a physical page mapper 92c917cd62SIthamar R. Adema status_t error = method->PhysicalPageMapper() 93c917cd62SIthamar R. Adema ->CreateTranslationMapPhysicalPageMapper(&fPageMapper); 94c917cd62SIthamar R. Adema if (error != B_OK) 95c917cd62SIthamar R. Adema return error; 96c917cd62SIthamar R. Adema 97c917cd62SIthamar R. Adema // allocate the page directory 98c917cd62SIthamar R. Adema page_directory_entry* virtualPageDir = (page_directory_entry*)memalign( 99c917cd62SIthamar R. Adema B_PAGE_SIZE, B_PAGE_SIZE); 100c917cd62SIthamar R. Adema if (virtualPageDir == NULL) 101c917cd62SIthamar R. Adema return B_NO_MEMORY; 102c917cd62SIthamar R. Adema 103c917cd62SIthamar R. Adema // look up the page directory's physical address 104c917cd62SIthamar R. Adema phys_addr_t physicalPageDir; 105c917cd62SIthamar R. Adema vm_get_page_mapping(VMAddressSpace::KernelID(), 106c917cd62SIthamar R. Adema (addr_t)virtualPageDir, &physicalPageDir); 107c917cd62SIthamar R. Adema 108c917cd62SIthamar R. Adema fPagingStructures->Init(virtualPageDir, physicalPageDir, 109c917cd62SIthamar R. Adema method->KernelVirtualPageDirectory()); 110c917cd62SIthamar R. Adema } else { 111c917cd62SIthamar R. Adema // kernel 112c917cd62SIthamar R. Adema // get the physical page mapper 113c917cd62SIthamar R. Adema fPageMapper = method->KernelPhysicalPageMapper(); 114c917cd62SIthamar R. Adema 115c917cd62SIthamar R. Adema // we already know the kernel pgdir mapping 116c917cd62SIthamar R. Adema fPagingStructures->Init(method->KernelVirtualPageDirectory(), 117c917cd62SIthamar R. Adema method->KernelPhysicalPageDirectory(), NULL); 118c917cd62SIthamar R. Adema } 119c917cd62SIthamar R. Adema 120c917cd62SIthamar R. Adema return B_OK; 121c917cd62SIthamar R. Adema } 122c917cd62SIthamar R. Adema 123c917cd62SIthamar R. Adema 124c917cd62SIthamar R. Adema size_t 125c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const 126c917cd62SIthamar R. Adema { 127c917cd62SIthamar R. Adema // If start == 0, the actual base address is not yet known to the caller and 128c917cd62SIthamar R. Adema // we shall assume the worst case. 129c917cd62SIthamar R. Adema if (start == 0) { 130c917cd62SIthamar R. Adema // offset the range so it has the worst possible alignment 131c917cd62SIthamar R. Adema start = 1023 * B_PAGE_SIZE; 132c917cd62SIthamar R. Adema end += 1023 * B_PAGE_SIZE; 133c917cd62SIthamar R. Adema } 134c917cd62SIthamar R. Adema 135c917cd62SIthamar R. Adema return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start); 136c917cd62SIthamar R. Adema } 137c917cd62SIthamar R. Adema 138c917cd62SIthamar R. Adema 139c917cd62SIthamar R. Adema status_t 140c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes, 141c917cd62SIthamar R. Adema uint32 memoryType, vm_page_reservation* reservation) 142c917cd62SIthamar R. Adema { 143c917cd62SIthamar R. Adema TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va); 144c917cd62SIthamar R. Adema 145c917cd62SIthamar R. Adema /* 146c917cd62SIthamar R. Adema dprintf("pgdir at 0x%x\n", pgdir); 147c917cd62SIthamar R. Adema dprintf("index is %d\n", va / B_PAGE_SIZE / 1024); 148c917cd62SIthamar R. Adema dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]); 149c917cd62SIthamar R. Adema dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]); 150c917cd62SIthamar R. Adema dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present); 151c917cd62SIthamar R. Adema dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr); 152c917cd62SIthamar R. Adema */ 153c917cd62SIthamar R. Adema page_directory_entry* pd = fPagingStructures->pgdir_virt; 154c917cd62SIthamar R. Adema 155c917cd62SIthamar R. Adema // check to see if a page table exists for this range 156c917cd62SIthamar R. Adema uint32 index = VADDR_TO_PDENT(va); 157c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 158c917cd62SIthamar R. Adema phys_addr_t pgtable; 159c917cd62SIthamar R. Adema vm_page *page; 160c917cd62SIthamar R. Adema 161c917cd62SIthamar R. Adema // we need to allocate a pgtable 162c917cd62SIthamar R. Adema page = vm_page_allocate_page(reservation, 163c917cd62SIthamar R. Adema PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR); 164c917cd62SIthamar R. Adema 165c917cd62SIthamar R. Adema DEBUG_PAGE_ACCESS_END(page); 166c917cd62SIthamar R. Adema 167c917cd62SIthamar R. Adema pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE; 168c917cd62SIthamar R. Adema 169c917cd62SIthamar R. Adema TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable); 170c917cd62SIthamar R. Adema 171c917cd62SIthamar R. Adema // put it in the pgdir 172c917cd62SIthamar R. Adema ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable, 173c917cd62SIthamar R. Adema attributes 174c917cd62SIthamar R. Adema | ((attributes & B_USER_PROTECTION) != 0 175c917cd62SIthamar R. Adema ? B_WRITE_AREA : B_KERNEL_WRITE_AREA)); 176c917cd62SIthamar R. Adema 177c917cd62SIthamar R. Adema // update any other page directories, if it maps kernel space 178c917cd62SIthamar R. Adema if (index >= FIRST_KERNEL_PGDIR_ENT 179c917cd62SIthamar R. Adema && index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) { 180c917cd62SIthamar R. Adema ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]); 181c917cd62SIthamar R. Adema } 182c917cd62SIthamar R. Adema 183c917cd62SIthamar R. Adema fMapCount++; 184c917cd62SIthamar R. Adema } 185c917cd62SIthamar R. Adema 186c917cd62SIthamar R. Adema // now, fill in the pentry 1874535495dSIngo Weinhold Thread* thread = thread_get_current_thread(); 188c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread); 189c917cd62SIthamar R. Adema 190c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 191c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 192c917cd62SIthamar R. Adema index = VADDR_TO_PTENT(va); 193c917cd62SIthamar R. Adema 194c917cd62SIthamar R. Adema ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0, 195c917cd62SIthamar R. Adema "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va, 196c917cd62SIthamar R. Adema pt[index]); 197c917cd62SIthamar R. Adema 198c917cd62SIthamar R. Adema ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes, 199c917cd62SIthamar R. Adema memoryType, fIsKernelMap); 200c917cd62SIthamar R. Adema 201c917cd62SIthamar R. Adema pinner.Unlock(); 202c917cd62SIthamar R. Adema 203c917cd62SIthamar R. Adema // Note: We don't need to invalidate the TLB for this address, as previously 204c917cd62SIthamar R. Adema // the entry was not present and the TLB doesn't cache those entries. 205c917cd62SIthamar R. Adema 206c917cd62SIthamar R. Adema fMapCount++; 207c917cd62SIthamar R. Adema 208c917cd62SIthamar R. Adema return 0; 209c917cd62SIthamar R. Adema } 210c917cd62SIthamar R. Adema 211c917cd62SIthamar R. Adema 212c917cd62SIthamar R. Adema status_t 213c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end) 214c917cd62SIthamar R. Adema { 215c917cd62SIthamar R. Adema start = ROUNDDOWN(start, B_PAGE_SIZE); 216c917cd62SIthamar R. Adema if (start >= end) 217c917cd62SIthamar R. Adema return B_OK; 218c917cd62SIthamar R. Adema 219c917cd62SIthamar R. Adema TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end); 220c917cd62SIthamar R. Adema 221c917cd62SIthamar R. Adema page_directory_entry *pd = fPagingStructures->pgdir_virt; 222c917cd62SIthamar R. Adema 223c917cd62SIthamar R. Adema do { 224c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(start); 225c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 226c917cd62SIthamar R. Adema // no page table here, move the start up to access the next page 227c917cd62SIthamar R. Adema // table 228c917cd62SIthamar R. Adema start = ROUNDUP(start + 1, kPageTableAlignment); 229c917cd62SIthamar R. Adema continue; 230c917cd62SIthamar R. Adema } 231c917cd62SIthamar R. Adema 2324535495dSIngo Weinhold Thread* thread = thread_get_current_thread(); 233c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread); 234c917cd62SIthamar R. Adema 235c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 236c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 237c917cd62SIthamar R. Adema 238*fed8bb7dSMichael Lotz for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end); 239c917cd62SIthamar R. Adema index++, start += B_PAGE_SIZE) { 240c917cd62SIthamar R. Adema if ((pt[index] & ARM_PTE_TYPE_MASK) == 0) { 241c917cd62SIthamar R. Adema // page mapping not valid 242c917cd62SIthamar R. Adema continue; 243c917cd62SIthamar R. Adema } 244c917cd62SIthamar R. Adema 245c917cd62SIthamar R. Adema TRACE("unmap_tmap: removing page 0x%lx\n", start); 246c917cd62SIthamar R. Adema 247c917cd62SIthamar R. Adema page_table_entry oldEntry 248c917cd62SIthamar R. Adema = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index], 249c917cd62SIthamar R. Adema ARM_PTE_TYPE_MASK); 250c917cd62SIthamar R. Adema fMapCount--; 251c917cd62SIthamar R. Adema 252f86b5828SIthamar R. Adema if (true /* (oldEntry & ARM_PTE_ACCESSED) != 0*/) { 253c917cd62SIthamar R. Adema // Note, that we only need to invalidate the address, if the 254c917cd62SIthamar R. Adema // accessed flags was set, since only then the entry could have 255c917cd62SIthamar R. Adema // been in any TLB. 256c917cd62SIthamar R. Adema InvalidatePage(start); 257c917cd62SIthamar R. Adema } 258c917cd62SIthamar R. Adema } 259c917cd62SIthamar R. Adema } while (start != 0 && start < end); 260c917cd62SIthamar R. Adema 261c917cd62SIthamar R. Adema return B_OK; 262c917cd62SIthamar R. Adema } 263c917cd62SIthamar R. Adema 264c917cd62SIthamar R. Adema 265c917cd62SIthamar R. Adema /*! Caller must have locked the cache of the page to be unmapped. 266c917cd62SIthamar R. Adema This object shouldn't be locked. 267c917cd62SIthamar R. Adema */ 268c917cd62SIthamar R. Adema status_t 269c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address, 270c917cd62SIthamar R. Adema bool updatePageQueue) 271c917cd62SIthamar R. Adema { 272c917cd62SIthamar R. Adema ASSERT(address % B_PAGE_SIZE == 0); 273c917cd62SIthamar R. Adema 274c917cd62SIthamar R. Adema page_directory_entry* pd = fPagingStructures->pgdir_virt; 275c917cd62SIthamar R. Adema 276c917cd62SIthamar R. Adema TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address); 277c917cd62SIthamar R. Adema 278c917cd62SIthamar R. Adema RecursiveLocker locker(fLock); 279c917cd62SIthamar R. Adema 280c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(address); 281c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) 282c917cd62SIthamar R. Adema return B_ENTRY_NOT_FOUND; 283c917cd62SIthamar R. Adema 284c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread_get_current_thread()); 285c917cd62SIthamar R. Adema 286c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 287c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 288c917cd62SIthamar R. Adema 289c917cd62SIthamar R. Adema index = VADDR_TO_PTENT(address); 290c917cd62SIthamar R. Adema page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry( 291c917cd62SIthamar R. Adema &pt[index]); 292c917cd62SIthamar R. Adema 293c917cd62SIthamar R. Adema pinner.Unlock(); 294c917cd62SIthamar R. Adema 295c917cd62SIthamar R. Adema if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) { 296c917cd62SIthamar R. Adema // page mapping not valid 297c917cd62SIthamar R. Adema return B_ENTRY_NOT_FOUND; 298c917cd62SIthamar R. Adema } 299c917cd62SIthamar R. Adema 300c917cd62SIthamar R. Adema fMapCount--; 301c917cd62SIthamar R. Adema 302f86b5828SIthamar R. Adema 303f86b5828SIthamar R. Adema if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA 304c917cd62SIthamar R. Adema // Note, that we only need to invalidate the address, if the 305c917cd62SIthamar R. Adema // accessed flags was set, since only then the entry could have been 306c917cd62SIthamar R. Adema // in any TLB. 307c917cd62SIthamar R. Adema InvalidatePage(address); 308c917cd62SIthamar R. Adema Flush(); 309c917cd62SIthamar R. Adema 310c917cd62SIthamar R. Adema // NOTE: Between clearing the page table entry and Flush() other 311c917cd62SIthamar R. Adema // processors (actually even this processor with another thread of the 312c917cd62SIthamar R. Adema // same team) could still access the page in question via their cached 313c917cd62SIthamar R. Adema // entry. We can obviously lose a modified flag in this case, with the 314c917cd62SIthamar R. Adema // effect that the page looks unmodified (and might thus be recycled), 315c917cd62SIthamar R. Adema // but is actually modified. 316c917cd62SIthamar R. Adema // In most cases this is harmless, but for vm_remove_all_page_mappings() 317c917cd62SIthamar R. Adema // this is actually a problem. 318c917cd62SIthamar R. Adema // Interestingly FreeBSD seems to ignore this problem as well 319c917cd62SIthamar R. Adema // (cf. pmap_remove_all()), unless I've missed something. 320c917cd62SIthamar R. Adema } 321f86b5828SIthamar R. Adema 322c917cd62SIthamar R. Adema locker.Detach(); 323c917cd62SIthamar R. Adema // PageUnmapped() will unlock for us 324f86b5828SIthamar R. Adema 325c917cd62SIthamar R. Adema PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE, 326f86b5828SIthamar R. Adema true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/, true /*(oldEntry & ARM_PTE_DIRTY) != 0*/, 327c917cd62SIthamar R. Adema updatePageQueue); 328f86b5828SIthamar R. Adema 329c917cd62SIthamar R. Adema return B_OK; 330c917cd62SIthamar R. Adema } 331c917cd62SIthamar R. Adema 332c917cd62SIthamar R. Adema 333c917cd62SIthamar R. Adema void 334c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size, 335c917cd62SIthamar R. Adema bool updatePageQueue) 336c917cd62SIthamar R. Adema { 337c917cd62SIthamar R. Adema if (size == 0) 338c917cd62SIthamar R. Adema return; 339c917cd62SIthamar R. Adema 340c917cd62SIthamar R. Adema addr_t start = base; 341c917cd62SIthamar R. Adema addr_t end = base + size - 1; 342c917cd62SIthamar R. Adema 343c917cd62SIthamar R. Adema TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#" 344c917cd62SIthamar R. Adema B_PRIxADDR ")\n", area, start, end); 345c917cd62SIthamar R. Adema 346c917cd62SIthamar R. Adema page_directory_entry* pd = fPagingStructures->pgdir_virt; 347c917cd62SIthamar R. Adema 348c917cd62SIthamar R. Adema VMAreaMappings queue; 349c917cd62SIthamar R. Adema 350c917cd62SIthamar R. Adema RecursiveLocker locker(fLock); 351c917cd62SIthamar R. Adema 352c917cd62SIthamar R. Adema do { 353c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(start); 354c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 355c917cd62SIthamar R. Adema // no page table here, move the start up to access the next page 356c917cd62SIthamar R. Adema // table 357c917cd62SIthamar R. Adema start = ROUNDUP(start + 1, kPageTableAlignment); 358c917cd62SIthamar R. Adema continue; 359c917cd62SIthamar R. Adema } 360c917cd62SIthamar R. Adema 3614535495dSIngo Weinhold Thread* thread = thread_get_current_thread(); 362c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread); 363c917cd62SIthamar R. Adema 364c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 365c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 366c917cd62SIthamar R. Adema 367*fed8bb7dSMichael Lotz for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end); 368c917cd62SIthamar R. Adema index++, start += B_PAGE_SIZE) { 369c917cd62SIthamar R. Adema page_table_entry oldEntry 370c917cd62SIthamar R. Adema = ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]); 371c917cd62SIthamar R. Adema if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) 372c917cd62SIthamar R. Adema continue; 373c917cd62SIthamar R. Adema 374c917cd62SIthamar R. Adema fMapCount--; 375c917cd62SIthamar R. Adema 376f86b5828SIthamar R. Adema if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA 377c917cd62SIthamar R. Adema // Note, that we only need to invalidate the address, if the 378c917cd62SIthamar R. Adema // accessed flags was set, since only then the entry could have 379c917cd62SIthamar R. Adema // been in any TLB. 380c917cd62SIthamar R. Adema InvalidatePage(start); 381c917cd62SIthamar R. Adema } 382f86b5828SIthamar R. Adema 383c917cd62SIthamar R. Adema if (area->cache_type != CACHE_TYPE_DEVICE) { 384c917cd62SIthamar R. Adema // get the page 385c917cd62SIthamar R. Adema vm_page* page = vm_lookup_page( 386c917cd62SIthamar R. Adema (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE); 387c917cd62SIthamar R. Adema ASSERT(page != NULL); 388c917cd62SIthamar R. Adema 389c917cd62SIthamar R. Adema DEBUG_PAGE_ACCESS_START(page); 390f86b5828SIthamar R. Adema 391c917cd62SIthamar R. Adema // transfer the accessed/dirty flags to the page 392f86b5828SIthamar R. Adema if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA 393c917cd62SIthamar R. Adema page->accessed = true; 394f86b5828SIthamar R. Adema if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true) 395c917cd62SIthamar R. Adema page->modified = true; 396f86b5828SIthamar R. Adema 397c917cd62SIthamar R. Adema // remove the mapping object/decrement the wired_count of the 398c917cd62SIthamar R. Adema // page 399c917cd62SIthamar R. Adema if (area->wiring == B_NO_LOCK) { 400c917cd62SIthamar R. Adema vm_page_mapping* mapping = NULL; 401c917cd62SIthamar R. Adema vm_page_mappings::Iterator iterator 402c917cd62SIthamar R. Adema = page->mappings.GetIterator(); 403c917cd62SIthamar R. Adema while ((mapping = iterator.Next()) != NULL) { 404c917cd62SIthamar R. Adema if (mapping->area == area) 405c917cd62SIthamar R. Adema break; 406c917cd62SIthamar R. Adema } 407c917cd62SIthamar R. Adema 408c917cd62SIthamar R. Adema ASSERT(mapping != NULL); 409c917cd62SIthamar R. Adema 410c917cd62SIthamar R. Adema area->mappings.Remove(mapping); 411c917cd62SIthamar R. Adema page->mappings.Remove(mapping); 412c917cd62SIthamar R. Adema queue.Add(mapping); 413c917cd62SIthamar R. Adema } else 414c917cd62SIthamar R. Adema page->DecrementWiredCount(); 415c917cd62SIthamar R. Adema 416c917cd62SIthamar R. Adema if (!page->IsMapped()) { 417c917cd62SIthamar R. Adema atomic_add(&gMappedPagesCount, -1); 418c917cd62SIthamar R. Adema 419c917cd62SIthamar R. Adema if (updatePageQueue) { 420c917cd62SIthamar R. Adema if (page->Cache()->temporary) 421c917cd62SIthamar R. Adema vm_page_set_state(page, PAGE_STATE_INACTIVE); 422c917cd62SIthamar R. Adema else if (page->modified) 423c917cd62SIthamar R. Adema vm_page_set_state(page, PAGE_STATE_MODIFIED); 424c917cd62SIthamar R. Adema else 425c917cd62SIthamar R. Adema vm_page_set_state(page, PAGE_STATE_CACHED); 426c917cd62SIthamar R. Adema } 427c917cd62SIthamar R. Adema } 428c917cd62SIthamar R. Adema 429c917cd62SIthamar R. Adema DEBUG_PAGE_ACCESS_END(page); 430c917cd62SIthamar R. Adema } 431c917cd62SIthamar R. Adema } 432c917cd62SIthamar R. Adema 433c917cd62SIthamar R. Adema Flush(); 434c917cd62SIthamar R. Adema // flush explicitly, since we directly use the lock 435c917cd62SIthamar R. Adema } while (start != 0 && start < end); 436c917cd62SIthamar R. Adema 437c917cd62SIthamar R. Adema // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not 438c917cd62SIthamar R. Adema // really critical here, as in all cases this method is used, the unmapped 439c917cd62SIthamar R. Adema // area range is unmapped for good (resized/cut) and the pages will likely 440c917cd62SIthamar R. Adema // be freed. 441c917cd62SIthamar R. Adema 442c917cd62SIthamar R. Adema locker.Unlock(); 443c917cd62SIthamar R. Adema 444c917cd62SIthamar R. Adema // free removed mappings 445c917cd62SIthamar R. Adema bool isKernelSpace = area->address_space == VMAddressSpace::Kernel(); 446c917cd62SIthamar R. Adema uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY 447c917cd62SIthamar R. Adema | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0); 448c917cd62SIthamar R. Adema while (vm_page_mapping* mapping = queue.RemoveHead()) 449c917cd62SIthamar R. Adema object_cache_free(gPageMappingsObjectCache, mapping, freeFlags); 450c917cd62SIthamar R. Adema } 451c917cd62SIthamar R. Adema 452c917cd62SIthamar R. Adema 453c917cd62SIthamar R. Adema void 454c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace, 455c917cd62SIthamar R. Adema bool ignoreTopCachePageFlags) 456c917cd62SIthamar R. Adema { 457c917cd62SIthamar R. Adema if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) { 458c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(), 459c917cd62SIthamar R. Adema true); 460c917cd62SIthamar R. Adema return; 461c917cd62SIthamar R. Adema } 462c917cd62SIthamar R. Adema 463c917cd62SIthamar R. Adema bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags; 464c917cd62SIthamar R. Adema 465c917cd62SIthamar R. Adema page_directory_entry* pd = fPagingStructures->pgdir_virt; 466c917cd62SIthamar R. Adema 467c917cd62SIthamar R. Adema RecursiveLocker locker(fLock); 468c917cd62SIthamar R. Adema 469c917cd62SIthamar R. Adema VMAreaMappings mappings; 470c917cd62SIthamar R. Adema mappings.MoveFrom(&area->mappings); 471c917cd62SIthamar R. Adema 472c917cd62SIthamar R. Adema for (VMAreaMappings::Iterator it = mappings.GetIterator(); 473c917cd62SIthamar R. Adema vm_page_mapping* mapping = it.Next();) { 474c917cd62SIthamar R. Adema vm_page* page = mapping->page; 475c917cd62SIthamar R. Adema page->mappings.Remove(mapping); 476c917cd62SIthamar R. Adema 477c917cd62SIthamar R. Adema VMCache* cache = page->Cache(); 478c917cd62SIthamar R. Adema 479c917cd62SIthamar R. Adema bool pageFullyUnmapped = false; 480c917cd62SIthamar R. Adema if (!page->IsMapped()) { 481c917cd62SIthamar R. Adema atomic_add(&gMappedPagesCount, -1); 482c917cd62SIthamar R. Adema pageFullyUnmapped = true; 483c917cd62SIthamar R. Adema } 484c917cd62SIthamar R. Adema 485c917cd62SIthamar R. Adema if (unmapPages || cache != area->cache) { 486c917cd62SIthamar R. Adema addr_t address = area->Base() 487c917cd62SIthamar R. Adema + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset); 488c917cd62SIthamar R. Adema 489c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(address); 490c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 491c917cd62SIthamar R. Adema panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but " 492c917cd62SIthamar R. Adema "has no page dir entry", page, area, address); 493c917cd62SIthamar R. Adema continue; 494c917cd62SIthamar R. Adema } 495c917cd62SIthamar R. Adema 496c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread_get_current_thread()); 497c917cd62SIthamar R. Adema 498c917cd62SIthamar R. Adema page_table_entry* pt 499c917cd62SIthamar R. Adema = (page_table_entry*)fPageMapper->GetPageTableAt( 500c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 501c917cd62SIthamar R. Adema page_table_entry oldEntry 502c917cd62SIthamar R. Adema = ARMPagingMethod32Bit::ClearPageTableEntry( 503c917cd62SIthamar R. Adema &pt[VADDR_TO_PTENT(address)]); 504c917cd62SIthamar R. Adema 505c917cd62SIthamar R. Adema pinner.Unlock(); 506c917cd62SIthamar R. Adema 507c917cd62SIthamar R. Adema if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) { 508c917cd62SIthamar R. Adema panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but " 509c917cd62SIthamar R. Adema "has no page table entry", page, area, address); 510c917cd62SIthamar R. Adema continue; 511c917cd62SIthamar R. Adema } 512f86b5828SIthamar R. Adema 513c917cd62SIthamar R. Adema // transfer the accessed/dirty flags to the page and invalidate 514c917cd62SIthamar R. Adema // the mapping, if necessary 515f86b5828SIthamar R. Adema if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA 516c917cd62SIthamar R. Adema page->accessed = true; 517c917cd62SIthamar R. Adema 518c917cd62SIthamar R. Adema if (!deletingAddressSpace) 519c917cd62SIthamar R. Adema InvalidatePage(address); 520c917cd62SIthamar R. Adema } 521c917cd62SIthamar R. Adema 522f86b5828SIthamar R. Adema if (true /*(oldEntry & ARM_PTE_DIRTY) != 0*/) 523c917cd62SIthamar R. Adema page->modified = true; 524f86b5828SIthamar R. Adema 525c917cd62SIthamar R. Adema if (pageFullyUnmapped) { 526c917cd62SIthamar R. Adema DEBUG_PAGE_ACCESS_START(page); 527c917cd62SIthamar R. Adema 528c917cd62SIthamar R. Adema if (cache->temporary) 529c917cd62SIthamar R. Adema vm_page_set_state(page, PAGE_STATE_INACTIVE); 530c917cd62SIthamar R. Adema else if (page->modified) 531c917cd62SIthamar R. Adema vm_page_set_state(page, PAGE_STATE_MODIFIED); 532c917cd62SIthamar R. Adema else 533c917cd62SIthamar R. Adema vm_page_set_state(page, PAGE_STATE_CACHED); 534c917cd62SIthamar R. Adema 535c917cd62SIthamar R. Adema DEBUG_PAGE_ACCESS_END(page); 536c917cd62SIthamar R. Adema } 537c917cd62SIthamar R. Adema } 538c917cd62SIthamar R. Adema 539c917cd62SIthamar R. Adema fMapCount--; 540c917cd62SIthamar R. Adema } 541c917cd62SIthamar R. Adema 542c917cd62SIthamar R. Adema Flush(); 543c917cd62SIthamar R. Adema // flush explicitely, since we directly use the lock 544c917cd62SIthamar R. Adema 545c917cd62SIthamar R. Adema locker.Unlock(); 546c917cd62SIthamar R. Adema 547c917cd62SIthamar R. Adema bool isKernelSpace = area->address_space == VMAddressSpace::Kernel(); 548c917cd62SIthamar R. Adema uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY 549c917cd62SIthamar R. Adema | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0); 550c917cd62SIthamar R. Adema while (vm_page_mapping* mapping = mappings.RemoveHead()) 551c917cd62SIthamar R. Adema object_cache_free(gPageMappingsObjectCache, mapping, freeFlags); 552c917cd62SIthamar R. Adema } 553c917cd62SIthamar R. Adema 554c917cd62SIthamar R. Adema 555c917cd62SIthamar R. Adema status_t 556c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical, 557c917cd62SIthamar R. Adema uint32 *_flags) 558c917cd62SIthamar R. Adema { 559c917cd62SIthamar R. Adema // default the flags to not present 560c917cd62SIthamar R. Adema *_flags = 0; 561c917cd62SIthamar R. Adema *_physical = 0; 562c917cd62SIthamar R. Adema 563c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(va); 564c917cd62SIthamar R. Adema page_directory_entry *pd = fPagingStructures->pgdir_virt; 565c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 566c917cd62SIthamar R. Adema // no pagetable here 567c917cd62SIthamar R. Adema return B_OK; 568c917cd62SIthamar R. Adema } 569c917cd62SIthamar R. Adema 5704535495dSIngo Weinhold Thread* thread = thread_get_current_thread(); 571c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread); 572c917cd62SIthamar R. Adema 573c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 574c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 575c917cd62SIthamar R. Adema page_table_entry entry = pt[VADDR_TO_PTENT(va)]; 576c917cd62SIthamar R. Adema 577c917cd62SIthamar R. Adema *_physical = (entry & ARM_PDE_ADDRESS_MASK) 578c917cd62SIthamar R. Adema | VADDR_TO_PGOFF(va); 579c917cd62SIthamar R. Adema 580c917cd62SIthamar R. Adema #if 0 //IRA 581c917cd62SIthamar R. Adema // read in the page state flags 582c917cd62SIthamar R. Adema if ((entry & ARM_PTE_USER) != 0) { 583c917cd62SIthamar R. Adema *_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0) 584c917cd62SIthamar R. Adema | B_READ_AREA; 585c917cd62SIthamar R. Adema } 586c917cd62SIthamar R. Adema 587c917cd62SIthamar R. Adema *_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0) 588c917cd62SIthamar R. Adema | B_KERNEL_READ_AREA 589c917cd62SIthamar R. Adema | ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) 590c917cd62SIthamar R. Adema | ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) 591c917cd62SIthamar R. Adema | ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0); 592c917cd62SIthamar R. Adema #else 593c917cd62SIthamar R. Adema *_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA; 594d09e7b5bSIthamar R. Adema if (*_physical != 0) 595d09e7b5bSIthamar R. Adema *_flags |= PAGE_PRESENT; 596c917cd62SIthamar R. Adema #endif 597c917cd62SIthamar R. Adema pinner.Unlock(); 598c917cd62SIthamar R. Adema 599c917cd62SIthamar R. Adema TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va); 600c917cd62SIthamar R. Adema 601c917cd62SIthamar R. Adema return B_OK; 602c917cd62SIthamar R. Adema } 603c917cd62SIthamar R. Adema 604c917cd62SIthamar R. Adema 605c917cd62SIthamar R. Adema status_t 606c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical, 607c917cd62SIthamar R. Adema uint32 *_flags) 608c917cd62SIthamar R. Adema { 609c917cd62SIthamar R. Adema *_flags = 0; 610c917cd62SIthamar R. Adema *_physical = 0; 611c917cd62SIthamar R. Adema 612c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(va); 613c917cd62SIthamar R. Adema page_directory_entry* pd = fPagingStructures->pgdir_virt; 614c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 615c917cd62SIthamar R. Adema // no pagetable here 616c917cd62SIthamar R. Adema return B_OK; 617c917cd62SIthamar R. Adema } 618c917cd62SIthamar R. Adema 619c917cd62SIthamar R. Adema // map page table entry 620c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method() 621c917cd62SIthamar R. Adema ->PhysicalPageMapper()->InterruptGetPageTableAt( 622c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 623c917cd62SIthamar R. Adema page_table_entry entry = pt[VADDR_TO_PTENT(va)]; 624c917cd62SIthamar R. Adema 625c917cd62SIthamar R. Adema *_physical = (entry & ARM_PDE_ADDRESS_MASK) 626c917cd62SIthamar R. Adema | VADDR_TO_PGOFF(va); 627c917cd62SIthamar R. Adema #if 0 628c917cd62SIthamar R. Adema // read in the page state flags 629c917cd62SIthamar R. Adema if ((entry & ARM_PTE_USER) != 0) { 630c917cd62SIthamar R. Adema *_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0) 631c917cd62SIthamar R. Adema | B_READ_AREA; 632c917cd62SIthamar R. Adema } 633c917cd62SIthamar R. Adema 634c917cd62SIthamar R. Adema *_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0) 635c917cd62SIthamar R. Adema | B_KERNEL_READ_AREA 636c917cd62SIthamar R. Adema | ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) 637c917cd62SIthamar R. Adema | ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) 638c917cd62SIthamar R. Adema | ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0); 639c917cd62SIthamar R. Adema #else 640f86b5828SIthamar R. Adema *_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA | PAGE_PRESENT; 641c917cd62SIthamar R. Adema #endif 642c917cd62SIthamar R. Adema return B_OK; 643c917cd62SIthamar R. Adema } 644c917cd62SIthamar R. Adema 645c917cd62SIthamar R. Adema 646c917cd62SIthamar R. Adema status_t 647c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes, 648c917cd62SIthamar R. Adema uint32 memoryType) 649c917cd62SIthamar R. Adema { 650c917cd62SIthamar R. Adema start = ROUNDDOWN(start, B_PAGE_SIZE); 651c917cd62SIthamar R. Adema if (start >= end) 652c917cd62SIthamar R. Adema return B_OK; 653c917cd62SIthamar R. Adema 654c917cd62SIthamar R. Adema TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end, 655c917cd62SIthamar R. Adema attributes); 656c917cd62SIthamar R. Adema #if 0 //IRA 657c917cd62SIthamar R. Adema // compute protection flags 658c917cd62SIthamar R. Adema uint32 newProtectionFlags = 0; 659c917cd62SIthamar R. Adema if ((attributes & B_USER_PROTECTION) != 0) { 660c917cd62SIthamar R. Adema newProtectionFlags = ARM_PTE_USER; 661c917cd62SIthamar R. Adema if ((attributes & B_WRITE_AREA) != 0) 662c917cd62SIthamar R. Adema newProtectionFlags |= ARM_PTE_WRITABLE; 663c917cd62SIthamar R. Adema } else if ((attributes & B_KERNEL_WRITE_AREA) != 0) 664c917cd62SIthamar R. Adema newProtectionFlags = ARM_PTE_WRITABLE; 665c917cd62SIthamar R. Adema 666c917cd62SIthamar R. Adema page_directory_entry *pd = fPagingStructures->pgdir_virt; 667c917cd62SIthamar R. Adema 668c917cd62SIthamar R. Adema do { 669c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(start); 670c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 671c917cd62SIthamar R. Adema // no page table here, move the start up to access the next page 672c917cd62SIthamar R. Adema // table 673c917cd62SIthamar R. Adema start = ROUNDUP(start + 1, kPageTableAlignment); 674c917cd62SIthamar R. Adema continue; 675c917cd62SIthamar R. Adema } 676c917cd62SIthamar R. Adema 6774535495dSIngo Weinhold Thread* thread = thread_get_current_thread(); 678c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread); 679c917cd62SIthamar R. Adema 680c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 681c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 682c917cd62SIthamar R. Adema 683*fed8bb7dSMichael Lotz for (index = VADDR_TO_PTENT(start); index < 256 && start < end; 684c917cd62SIthamar R. Adema index++, start += B_PAGE_SIZE) { 685c917cd62SIthamar R. Adema page_table_entry entry = pt[index]; 686c917cd62SIthamar R. Adema if ((entry & ARM_PTE_PRESENT) == 0) { 687c917cd62SIthamar R. Adema // page mapping not valid 688c917cd62SIthamar R. Adema continue; 689c917cd62SIthamar R. Adema } 690c917cd62SIthamar R. Adema 691c917cd62SIthamar R. Adema TRACE("protect_tmap: protect page 0x%lx\n", start); 692c917cd62SIthamar R. Adema 693c917cd62SIthamar R. Adema // set the new protection flags -- we want to do that atomically, 694c917cd62SIthamar R. Adema // without changing the accessed or dirty flag 695c917cd62SIthamar R. Adema page_table_entry oldEntry; 696c917cd62SIthamar R. Adema while (true) { 697c917cd62SIthamar R. Adema oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry( 698c917cd62SIthamar R. Adema &pt[index], 699c917cd62SIthamar R. Adema (entry & ~(ARM_PTE_PROTECTION_MASK 700c917cd62SIthamar R. Adema | ARM_PTE_MEMORY_TYPE_MASK)) 701c917cd62SIthamar R. Adema | newProtectionFlags 702c917cd62SIthamar R. Adema | ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags( 703c917cd62SIthamar R. Adema memoryType), 704c917cd62SIthamar R. Adema entry); 705c917cd62SIthamar R. Adema if (oldEntry == entry) 706c917cd62SIthamar R. Adema break; 707c917cd62SIthamar R. Adema entry = oldEntry; 708c917cd62SIthamar R. Adema } 709c917cd62SIthamar R. Adema 710c917cd62SIthamar R. Adema if ((oldEntry & ARM_PTE_ACCESSED) != 0) { 711c917cd62SIthamar R. Adema // Note, that we only need to invalidate the address, if the 712c917cd62SIthamar R. Adema // accessed flag was set, since only then the entry could have 713c917cd62SIthamar R. Adema // been in any TLB. 714c917cd62SIthamar R. Adema InvalidatePage(start); 715c917cd62SIthamar R. Adema } 716c917cd62SIthamar R. Adema } 717c917cd62SIthamar R. Adema } while (start != 0 && start < end); 718c917cd62SIthamar R. Adema #endif 719c917cd62SIthamar R. Adema return B_OK; 720c917cd62SIthamar R. Adema } 721c917cd62SIthamar R. Adema 722c917cd62SIthamar R. Adema 723c917cd62SIthamar R. Adema status_t 724c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags) 725c917cd62SIthamar R. Adema { 726c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(va); 727c917cd62SIthamar R. Adema page_directory_entry* pd = fPagingStructures->pgdir_virt; 728c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { 729c917cd62SIthamar R. Adema // no pagetable here 730c917cd62SIthamar R. Adema return B_OK; 731c917cd62SIthamar R. Adema } 732c917cd62SIthamar R. Adema #if 0 //IRA 733c917cd62SIthamar R. Adema uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? ARM_PTE_DIRTY : 0) 734c917cd62SIthamar R. Adema | ((flags & PAGE_ACCESSED) ? ARM_PTE_ACCESSED : 0); 735c917cd62SIthamar R. Adema #else 736c917cd62SIthamar R. Adema uint32 flagsToClear = 0; 737c917cd62SIthamar R. Adema #endif 7384535495dSIngo Weinhold Thread* thread = thread_get_current_thread(); 739c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread); 740c917cd62SIthamar R. Adema 741c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 742c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 743c917cd62SIthamar R. Adema index = VADDR_TO_PTENT(va); 744c917cd62SIthamar R. Adema 745c917cd62SIthamar R. Adema // clear out the flags we've been requested to clear 746c917cd62SIthamar R. Adema page_table_entry oldEntry 747c917cd62SIthamar R. Adema = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index], 748c917cd62SIthamar R. Adema flagsToClear); 749c917cd62SIthamar R. Adema 750c917cd62SIthamar R. Adema pinner.Unlock(); 751c917cd62SIthamar R. Adema 752f86b5828SIthamar R. Adema //XXX IRA if ((oldEntry & flagsToClear) != 0) 753c917cd62SIthamar R. Adema InvalidatePage(va); 754c917cd62SIthamar R. Adema 755c917cd62SIthamar R. Adema return B_OK; 756c917cd62SIthamar R. Adema } 757c917cd62SIthamar R. Adema 758c917cd62SIthamar R. Adema 759c917cd62SIthamar R. Adema bool 760c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address, 761c917cd62SIthamar R. Adema bool unmapIfUnaccessed, bool& _modified) 762c917cd62SIthamar R. Adema { 763c917cd62SIthamar R. Adema ASSERT(address % B_PAGE_SIZE == 0); 764c917cd62SIthamar R. Adema 765c917cd62SIthamar R. Adema page_directory_entry* pd = fPagingStructures->pgdir_virt; 766c917cd62SIthamar R. Adema 767c917cd62SIthamar R. Adema TRACE("ARMVMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR 768c917cd62SIthamar R. Adema ")\n", address); 769c917cd62SIthamar R. Adema 770c917cd62SIthamar R. Adema RecursiveLocker locker(fLock); 771c917cd62SIthamar R. Adema 772c917cd62SIthamar R. Adema int index = VADDR_TO_PDENT(address); 773c917cd62SIthamar R. Adema if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) 774c917cd62SIthamar R. Adema return false; 775c917cd62SIthamar R. Adema 776c917cd62SIthamar R. Adema ThreadCPUPinner pinner(thread_get_current_thread()); 777c917cd62SIthamar R. Adema 778c917cd62SIthamar R. Adema page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( 779c917cd62SIthamar R. Adema pd[index] & ARM_PDE_ADDRESS_MASK); 780c917cd62SIthamar R. Adema 781c917cd62SIthamar R. Adema index = VADDR_TO_PTENT(address); 782c917cd62SIthamar R. Adema 783c917cd62SIthamar R. Adema // perform the deed 784c917cd62SIthamar R. Adema page_table_entry oldEntry; 785c917cd62SIthamar R. Adema 786c917cd62SIthamar R. Adema if (unmapIfUnaccessed) { 787c917cd62SIthamar R. Adema while (true) { 788c917cd62SIthamar R. Adema oldEntry = pt[index]; 789c917cd62SIthamar R. Adema if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) { 790c917cd62SIthamar R. Adema // page mapping not valid 791c917cd62SIthamar R. Adema return false; 792c917cd62SIthamar R. Adema } 793c917cd62SIthamar R. Adema #if 0 //IRA 794c917cd62SIthamar R. Adema if (oldEntry & ARM_PTE_ACCESSED) { 795c917cd62SIthamar R. Adema // page was accessed -- just clear the flags 796c917cd62SIthamar R. Adema oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags( 797c917cd62SIthamar R. Adema &pt[index], ARM_PTE_ACCESSED | ARM_PTE_DIRTY); 798c917cd62SIthamar R. Adema break; 799c917cd62SIthamar R. Adema } 800c917cd62SIthamar R. Adema #endif 801c917cd62SIthamar R. Adema // page hasn't been accessed -- unmap it 802c917cd62SIthamar R. Adema if (ARMPagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0, 803c917cd62SIthamar R. Adema oldEntry) == oldEntry) { 804c917cd62SIthamar R. Adema break; 805c917cd62SIthamar R. Adema } 806c917cd62SIthamar R. Adema 807c917cd62SIthamar R. Adema // something changed -- check again 808c917cd62SIthamar R. Adema } 809c917cd62SIthamar R. Adema } else { 810c917cd62SIthamar R. Adema #if 0 //IRA 811c917cd62SIthamar R. Adema oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index], 812c917cd62SIthamar R. Adema ARM_PTE_ACCESSED | ARM_PTE_DIRTY); 813c917cd62SIthamar R. Adema #else 814c917cd62SIthamar R. Adema oldEntry = pt[index]; 815c917cd62SIthamar R. Adema #endif 816c917cd62SIthamar R. Adema } 817c917cd62SIthamar R. Adema 818c917cd62SIthamar R. Adema pinner.Unlock(); 819c917cd62SIthamar R. Adema 820f86b5828SIthamar R. Adema _modified = true /* (oldEntry & ARM_PTE_DIRTY) != 0 */; // XXX IRA 821c917cd62SIthamar R. Adema 822f86b5828SIthamar R. Adema if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { 823c917cd62SIthamar R. Adema // Note, that we only need to invalidate the address, if the 824c917cd62SIthamar R. Adema // accessed flags was set, since only then the entry could have been 825c917cd62SIthamar R. Adema // in any TLB. 826c917cd62SIthamar R. Adema InvalidatePage(address); 827c917cd62SIthamar R. Adema 828c917cd62SIthamar R. Adema Flush(); 829c917cd62SIthamar R. Adema 830c917cd62SIthamar R. Adema return true; 831c917cd62SIthamar R. Adema } 832c917cd62SIthamar R. Adema 833c917cd62SIthamar R. Adema if (!unmapIfUnaccessed) 834c917cd62SIthamar R. Adema return false; 835c917cd62SIthamar R. Adema 836c917cd62SIthamar R. Adema // We have unmapped the address. Do the "high level" stuff. 837c917cd62SIthamar R. Adema 838c917cd62SIthamar R. Adema fMapCount--; 839c917cd62SIthamar R. Adema 840c917cd62SIthamar R. Adema locker.Detach(); 841c917cd62SIthamar R. Adema // UnaccessedPageUnmapped() will unlock for us 842c917cd62SIthamar R. Adema 843c917cd62SIthamar R. Adema UnaccessedPageUnmapped(area, 844c917cd62SIthamar R. Adema (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE); 845c917cd62SIthamar R. Adema 846c917cd62SIthamar R. Adema return false; 847c917cd62SIthamar R. Adema } 848c917cd62SIthamar R. Adema 849c917cd62SIthamar R. Adema 850c917cd62SIthamar R. Adema ARMPagingStructures* 851c917cd62SIthamar R. Adema ARMVMTranslationMap32Bit::PagingStructures() const 852c917cd62SIthamar R. Adema { 853c917cd62SIthamar R. Adema return fPagingStructures; 854c917cd62SIthamar R. Adema } 855