1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11 #include "paging/ARMVMTranslationMap.h"
12
13 #include <thread.h>
14 #include <smp.h>
15
16 #include "paging/ARMPagingStructures.h"
17
18
19 //#define TRACE_ARM_VM_TRANSLATION_MAP
20 #ifdef TRACE_ARM_VM_TRANSLATION_MAP
21 # define TRACE(x...) dprintf(x)
22 #else
23 # define TRACE(x...) ;
24 #endif
25
26
ARMVMTranslationMap()27 ARMVMTranslationMap::ARMVMTranslationMap()
28 :
29 fPageMapper(NULL),
30 fInvalidPagesCount(0)
31 {
32 }
33
34
~ARMVMTranslationMap()35 ARMVMTranslationMap::~ARMVMTranslationMap()
36 {
37 }
38
39
40 status_t
Init(bool kernel)41 ARMVMTranslationMap::Init(bool kernel)
42 {
43 fIsKernelMap = kernel;
44 return B_OK;
45 }
46
47
48 /*! Acquires the map's recursive lock, and resets the invalidate pages counter
49 in case it's the first locking recursion.
50 */
51 bool
Lock()52 ARMVMTranslationMap::Lock()
53 {
54 TRACE("%p->ARMVMTranslationMap::Lock()\n", this);
55
56 recursive_lock_lock(&fLock);
57 if (recursive_lock_get_recursion(&fLock) == 1) {
58 // we were the first one to grab the lock
59 TRACE("clearing invalidated page count\n");
60 fInvalidPagesCount = 0;
61 }
62
63 return true;
64 }
65
66
67 /*! Unlocks the map, and, if we are actually losing the recursive lock,
68 flush all pending changes of this map (ie. flush TLB caches as
69 needed).
70 */
71 void
Unlock()72 ARMVMTranslationMap::Unlock()
73 {
74 TRACE("%p->ARMVMTranslationMap::Unlock()\n", this);
75
76 if (recursive_lock_get_recursion(&fLock) == 1) {
77 // we're about to release it for the last time
78 Flush();
79 }
80
81 recursive_lock_unlock(&fLock);
82 }
83
84
85 addr_t
MappedSize() const86 ARMVMTranslationMap::MappedSize() const
87 {
88 return fMapCount;
89 }
90
91
92 void
Flush()93 ARMVMTranslationMap::Flush()
94 {
95 if (fInvalidPagesCount <= 0)
96 return;
97
98 Thread* thread = thread_get_current_thread();
99 thread_pin_to_current_cpu(thread);
100
101 if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
102 // invalidate all pages
103 TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
104 fInvalidPagesCount);
105
106 if (fIsKernelMap) {
107 arch_cpu_global_TLB_invalidate();
108 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
109 NULL, SMP_MSG_FLAG_SYNC);
110 } else {
111 cpu_status state = disable_interrupts();
112 arch_cpu_user_TLB_invalidate();
113 restore_interrupts(state);
114
115 int cpu = smp_get_current_cpu();
116 CPUSet cpuMask = PagingStructures()->active_on_cpus;
117 cpuMask.ClearBit(cpu);
118
119 if (!cpuMask.IsEmpty()) {
120 smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
121 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
122 }
123 }
124 } else {
125 TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
126 fInvalidPagesCount);
127
128 arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
129
130 if (fIsKernelMap) {
131 smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
132 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
133 SMP_MSG_FLAG_SYNC);
134 } else {
135 int cpu = smp_get_current_cpu();
136 CPUSet cpuMask = PagingStructures()->active_on_cpus;
137 cpuMask.ClearBit(cpu);
138
139 if (!cpuMask.IsEmpty()) {
140 smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
141 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
142 SMP_MSG_FLAG_SYNC);
143 }
144 }
145 }
146 fInvalidPagesCount = 0;
147
148 thread_unpin_from_current_cpu(thread);
149 }
150