1 /* 2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include "paging/M68KVMTranslationMap.h" 12 13 #include <thread.h> 14 #include <smp.h> 15 16 #include "paging/M68KPagingStructures.h" 17 18 19 //#define TRACE_M68K_VM_TRANSLATION_MAP 20 #ifdef TRACE_M68K_VM_TRANSLATION_MAP 21 # define TRACE(x...) dprintf(x) 22 #else 23 # define TRACE(x...) ; 24 #endif 25 26 27 M68KVMTranslationMap::M68KVMTranslationMap() 28 : 29 fPageMapper(NULL), 30 fInvalidPagesCount(0) 31 { 32 } 33 34 35 M68KVMTranslationMap::~M68KVMTranslationMap() 36 { 37 } 38 39 40 status_t 41 M68KVMTranslationMap::Init(bool kernel) 42 { 43 fIsKernelMap = kernel; 44 return B_OK; 45 } 46 47 48 /*! Acquires the map's recursive lock, and resets the invalidate pages counter 49 in case it's the first locking recursion. 50 */ 51 bool 52 M68KVMTranslationMap::Lock() 53 { 54 TRACE("%p->M68KVMTranslationMap::Lock()\n", this); 55 56 recursive_lock_lock(&fLock); 57 if (recursive_lock_get_recursion(&fLock) == 1) { 58 // we were the first one to grab the lock 59 TRACE("clearing invalidated page count\n"); 60 fInvalidPagesCount = 0; 61 } 62 63 return true; 64 } 65 66 67 /*! Unlocks the map, and, if we are actually losing the recursive lock, 68 flush all pending changes of this map (ie. flush TLB caches as 69 needed). 70 */ 71 void 72 M68KVMTranslationMap::Unlock() 73 { 74 TRACE("%p->M68KVMTranslationMap::Unlock()\n", this); 75 76 if (recursive_lock_get_recursion(&fLock) == 1) { 77 // we're about to release it for the last time 78 Flush(); 79 } 80 81 recursive_lock_unlock(&fLock); 82 } 83 84 85 addr_t 86 M68KVMTranslationMap::MappedSize() const 87 { 88 return fMapCount; 89 } 90 91 92 void 93 M68KVMTranslationMap::Flush() 94 { 95 if (fInvalidPagesCount <= 0) 96 return; 97 98 Thread* thread = thread_get_current_thread(); 99 thread_pin_to_current_cpu(thread); 100 101 if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) { 102 // invalidate all pages 103 TRACE("flush_tmap: %d pages to invalidate, invalidate all\n", 104 fInvalidPagesCount); 105 106 if (fIsKernelMap) { 107 arch_cpu_global_TLB_invalidate(); 108 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0, 109 NULL, SMP_MSG_FLAG_SYNC); 110 } else { 111 cpu_status state = disable_interrupts(); 112 arch_cpu_user_TLB_invalidate(); 113 restore_interrupts(state); 114 115 int cpu = smp_get_current_cpu(); 116 uint32 cpuMask = PagingStructures()->active_on_cpus 117 & ~((uint32)1 << cpu); 118 if (cpuMask != 0) { 119 smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES, 120 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC); 121 } 122 } 123 } else { 124 TRACE("flush_tmap: %d pages to invalidate, invalidate list\n", 125 fInvalidPagesCount); 126 127 arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount); 128 129 if (fIsKernelMap) { 130 smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST, 131 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL, 132 SMP_MSG_FLAG_SYNC); 133 } else { 134 int cpu = smp_get_current_cpu(); 135 uint32 cpuMask = PagingStructures()->active_on_cpus 136 & ~((uint32)1 << cpu); 137 if (cpuMask != 0) { 138 smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST, 139 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL, 140 SMP_MSG_FLAG_SYNC); 141 } 142 } 143 } 144 fInvalidPagesCount = 0; 145 146 thread_unpin_from_current_cpu(thread); 147 } 148