xref: /haiku/src/system/kernel/arch/generic/generic_vm_physical_page_mapper.cpp (revision 90ca02568835b140b0e59de496a7f1f1d3513f67)
1 /*
2  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 
10 #include "generic_vm_physical_page_mapper.h"
11 
12 #include <vm_address_space.h>
13 #include <vm_page.h>
14 #include <vm_priv.h>
15 #include <thread.h>
16 #include <util/queue.h>
17 
18 #include <string.h>
19 #include <stdlib.h>
20 
21 //#define TRACE_VM_PHYSICAL_PAGE_MAPPER
22 #ifdef TRACE_VM_PHYSICAL_PAGE_MAPPER
23 #	define TRACE(x) dprintf x
24 #else
25 #	define TRACE(x) ;
26 #endif
27 
28 #define DEBUG_IO_SPACE
29 
30 // data and structures used to represent physical pages mapped into iospace
31 typedef struct paddr_chunk_descriptor {
32 	struct paddr_chunk_descriptor *next_q;
33 		// must remain first in structure, queue code uses it
34 	int32	ref_count;
35 	addr_t	va;
36 #ifdef DEBUG_IO_SPACE
37 	thread_id last_ref;
38 #endif
39 } paddr_chunk_desc;
40 
41 static paddr_chunk_desc *paddr_desc;         // will be one per physical chunk
42 static paddr_chunk_desc **virtual_pmappings; // will be one ptr per virtual chunk in iospace
43 static int first_free_vmapping;
44 static int num_virtual_chunks;
45 static queue mapped_paddr_lru;
46 static mutex sMutex;
47 static sem_id sChunkAvailableSem;
48 static int32 sChunkAvailableWaitingCounter;
49 
50 static generic_map_iospace_chunk_func sMapIOSpaceChunk;
51 static addr_t sIOSpaceBase;
52 static size_t sIOSpaceSize;
53 static size_t sIOSpaceChunkSize;
54 
55 
56 status_t
57 generic_get_physical_page(addr_t pa, addr_t *va, uint32 flags)
58 {
59 	int index;
60 	paddr_chunk_desc *replaced_pchunk;
61 
62 restart:
63 	mutex_lock(&sMutex);
64 
65 	// see if the page is already mapped
66 	index = pa / sIOSpaceChunkSize;
67 	if (paddr_desc[index].va != 0) {
68 		if (paddr_desc[index].ref_count++ == 0) {
69 			// pull this descriptor out of the lru list
70 			queue_remove_item(&mapped_paddr_lru, &paddr_desc[index]);
71 		}
72 		*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
73 		mutex_unlock(&sMutex);
74 		return B_OK;
75 	}
76 
77 	// map it
78 	if (first_free_vmapping < num_virtual_chunks) {
79 		// there's a free hole
80 		paddr_desc[index].va = first_free_vmapping * sIOSpaceChunkSize
81 			+ sIOSpaceBase;
82 		*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
83 		virtual_pmappings[first_free_vmapping] = &paddr_desc[index];
84 		paddr_desc[index].ref_count++;
85 
86 		// push up the first_free_vmapping pointer
87 		for (; first_free_vmapping < num_virtual_chunks;
88 			 first_free_vmapping++) {
89 			if (virtual_pmappings[first_free_vmapping] == NULL)
90 				break;
91 		}
92 
93 		sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize);
94 		mutex_unlock(&sMutex);
95 
96 		return B_OK;
97 	}
98 
99 	// replace an earlier mapping
100 	if (queue_peek(&mapped_paddr_lru) == NULL) {
101 		// no free slots available
102 		if (flags == PHYSICAL_PAGE_NO_WAIT) {
103 			// put back to the caller and let them handle this
104 			mutex_unlock(&sMutex);
105 			return B_NO_MEMORY;
106 		} else {
107 			sChunkAvailableWaitingCounter++;
108 
109 			mutex_unlock(&sMutex);
110 			acquire_sem(sChunkAvailableSem);
111 			goto restart;
112 		}
113 	}
114 
115 	replaced_pchunk = (paddr_chunk_desc*)queue_dequeue(&mapped_paddr_lru);
116 	paddr_desc[index].va = replaced_pchunk->va;
117 	replaced_pchunk->va = 0;
118 	*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
119 	paddr_desc[index].ref_count++;
120 #ifdef DEBUG_IO_SPACE
121 	paddr_desc[index].last_ref = thread_get_current_thread_id();
122 #endif
123 	virtual_pmappings[(*va - sIOSpaceBase) / sIOSpaceChunkSize]
124 		= paddr_desc + index;
125 
126 	sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize);
127 
128 	mutex_unlock(&sMutex);
129 	return B_OK;
130 }
131 
132 
133 status_t
134 generic_put_physical_page(addr_t va)
135 {
136 	paddr_chunk_desc *desc;
137 
138 	if (va < sIOSpaceBase || va >= sIOSpaceBase + sIOSpaceSize)
139 		panic("someone called put_physical_page on an invalid va 0x%lx\n", va);
140 	va -= sIOSpaceBase;
141 
142 	mutex_lock(&sMutex);
143 
144 	desc = virtual_pmappings[va / sIOSpaceChunkSize];
145 	if (desc == NULL) {
146 		mutex_unlock(&sMutex);
147 		panic("put_physical_page called on page at va 0x%lx which is not checked out\n", va);
148 		return B_ERROR;
149 	}
150 
151 	if (--desc->ref_count == 0) {
152 		// put it on the mapped lru list
153 		queue_enqueue(&mapped_paddr_lru, desc);
154 
155 		if (sChunkAvailableWaitingCounter > 0) {
156 			sChunkAvailableWaitingCounter--;
157 			release_sem_etc(sChunkAvailableSem, 1, B_DO_NOT_RESCHEDULE);
158 		}
159 	}
160 if (desc->ref_count < 0)
161 panic("generic_put_physical_page(): ref count < 0: %ld\n", desc->ref_count);
162 
163 	mutex_unlock(&sMutex);
164 
165 	return B_OK;
166 }
167 
168 
169 #ifdef DEBUG_IO_SPACE
170 static int
171 dump_iospace(int argc, char** argv)
172 {
173 	if (argc < 2) {
174 		kprintf("usage: iospace <physical|virtual|queue>\n");
175 		return 0;
176 	}
177 
178 	int32 i;
179 
180 	if (strchr(argv[1], 'p')) {
181 		// physical address descriptors
182 		kprintf("I/O space physical descriptors (%p)\n", paddr_desc);
183 
184 		int32 max = 1024;
185 		if (argc == 3)
186 			max = strtol(argv[2], NULL, 0);
187 
188 		for (i = 0; i < max; i++) {
189 			kprintf("[%03lx %p %3ld %3ld] ", i, (void *)paddr_desc[i].va,
190 				paddr_desc[i].ref_count, paddr_desc[i].last_ref);
191 			if (i % 4 == 3)
192 				kprintf("\n");
193 		}
194 		if (i % 4)
195 			kprintf("\n");
196 	}
197 
198 	if (strchr(argv[1], 'v')) {
199 		// virtual mappings
200 		kprintf("I/O space virtual chunk mappings (%p, first free: %d)\n",
201 			virtual_pmappings, first_free_vmapping);
202 
203 		for (i = 0; i < num_virtual_chunks; i++) {
204 			kprintf("[%2ld. %03lx] ", i,
205 				(virtual_pmappings[i] - paddr_desc) / sizeof(paddr_desc[0]));
206 			if (i % 8 == 7)
207 				kprintf("\n");
208 		}
209 		if (i % 8)
210 			kprintf("\n");
211 	}
212 
213 	if (strchr(argv[1], 'q')) {
214 		// unused queue
215 		kprintf("I/O space mapped queue:\n");
216 
217 		paddr_chunk_descriptor* descriptor
218 			= (paddr_chunk_descriptor *)queue_peek(&mapped_paddr_lru);
219 		i = 0;
220 
221 		while (descriptor != NULL) {
222 			kprintf("[%03lx %p] ",
223 				(descriptor - paddr_desc) / sizeof(paddr_desc[0]), descriptor);
224 			if (i++ % 8 == 7)
225 				kprintf("\n");
226 
227 			descriptor = descriptor->next_q;
228 		}
229 		if (i % 8)
230 			kprintf("\n");
231 	}
232 
233 	return 0;
234 }
235 #endif
236 
237 
238 //	#pragma mark -
239 
240 
241 status_t
242 generic_vm_physical_page_mapper_init(kernel_args *args,
243 	generic_map_iospace_chunk_func mapIOSpaceChunk, addr_t *ioSpaceBase,
244 	size_t ioSpaceSize, size_t ioSpaceChunkSize)
245 {
246 	TRACE(("generic_vm_physical_page_mapper_init: entry\n"));
247 
248 	sMapIOSpaceChunk = mapIOSpaceChunk;
249 	sIOSpaceSize = ioSpaceSize;
250 	sIOSpaceChunkSize = ioSpaceChunkSize;
251 
252 	// reserve virtual space for the IO space
253 	// We reserve (ioSpaceChunkSize - B_PAGE_SIZE) bytes more, so that we
254 	// can guarantee to align the base address to ioSpaceChunkSize.
255 	sIOSpaceBase = vm_allocate_early(args,
256 		sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE, 0, 0);
257 	if (sIOSpaceBase == 0) {
258 		panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
259 			"space in virtual address space!");
260 		return B_ERROR;
261 	}
262 
263 	// align the base address to chunk size
264 	sIOSpaceBase = (sIOSpaceBase + ioSpaceChunkSize - 1) / ioSpaceChunkSize
265 		* ioSpaceChunkSize;
266 	*ioSpaceBase = sIOSpaceBase;
267 
268 	// allocate some space to hold physical page mapping info
269 	paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
270 		sizeof(paddr_chunk_desc) * 1024, ~0L,
271 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
272 	num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
273 	virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
274 		sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
275 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
276 
277 	TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
278 		paddr_desc, virtual_pmappings/*, iospace_pgtables*/));
279 
280 	// initialize our data structures
281 	memset(paddr_desc, 0, sizeof(paddr_chunk_desc) * 1024);
282 	memset(virtual_pmappings, 0, sizeof(paddr_chunk_desc *) * num_virtual_chunks);
283 	first_free_vmapping = 0;
284 	queue_init(&mapped_paddr_lru);
285 	mutex_init(&sMutex, "iospace_mutex");
286 	sChunkAvailableSem = -1;
287 
288 	TRACE(("generic_vm_physical_page_mapper_init: done\n"));
289 
290 	return B_OK;
291 }
292 
293 
294 status_t
295 generic_vm_physical_page_mapper_init_post_area(kernel_args *args)
296 {
297 	void *temp;
298 
299 	TRACE(("generic_vm_physical_page_mapper_init_post_area: entry\n"));
300 
301 	temp = (void *)paddr_desc;
302 	create_area("physical_page_mapping_descriptors", &temp, B_EXACT_ADDRESS,
303 		ROUNDUP(sizeof(paddr_chunk_desc) * 1024, B_PAGE_SIZE),
304 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
305 
306 	temp = (void *)virtual_pmappings;
307 	create_area("iospace_virtual_chunk_descriptors", &temp, B_EXACT_ADDRESS,
308 		ROUNDUP(sizeof(paddr_chunk_desc *) * num_virtual_chunks, B_PAGE_SIZE),
309 		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
310 
311 	TRACE(("generic_vm_physical_page_mapper_init_post_area: creating iospace\n"));
312 	temp = (void *)sIOSpaceBase;
313 	area_id ioSpaceArea = vm_create_null_area(vm_kernel_address_space_id(),
314 		"iospace", &temp, B_EXACT_ADDRESS, sIOSpaceSize);
315 	if (ioSpaceArea < 0) {
316 		panic("generic_vm_physical_page_mapper_init_post_area(): Failed to "
317 			"create null area for IO space!\n");
318 		return ioSpaceArea;
319 	}
320 
321 	TRACE(("generic_vm_physical_page_mapper_init_post_area: done\n"));
322 
323 #ifdef DEBUG_IO_SPACE
324 	add_debugger_command("iospace", &dump_iospace, "Shows info about the I/O space area.");
325 #endif
326 
327 	return B_OK;
328 }
329 
330 
331 status_t
332 generic_vm_physical_page_mapper_init_post_sem(kernel_args *args)
333 {
334 	sChunkAvailableSem = create_sem(1, "iospace chunk available");
335 
336 	return sChunkAvailableSem >= B_OK ? B_OK : sChunkAvailableSem;
337 }
338