1 /* 2 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 6 * Distributed under the terms of the NewOS License. 7 */ 8 9 10 #include "generic_vm_physical_page_mapper.h" 11 12 #include <vm/vm_page.h> 13 #include <vm/vm_priv.h> 14 #include <vm/VMAddressSpace.h> 15 #include <thread.h> 16 #include <util/queue.h> 17 18 #include <string.h> 19 #include <stdlib.h> 20 21 //#define TRACE_VM_PHYSICAL_PAGE_MAPPER 22 #ifdef TRACE_VM_PHYSICAL_PAGE_MAPPER 23 # define TRACE(x) dprintf x 24 #else 25 # define TRACE(x) ; 26 #endif 27 28 #define DEBUG_IO_SPACE 29 30 // data and structures used to represent physical pages mapped into iospace 31 typedef struct paddr_chunk_descriptor { 32 struct paddr_chunk_descriptor *next_q; 33 // must remain first in structure, queue code uses it 34 int32 ref_count; 35 addr_t va; 36 #ifdef DEBUG_IO_SPACE 37 thread_id last_ref; 38 #endif 39 } paddr_chunk_desc; 40 41 static paddr_chunk_desc *paddr_desc; // will be one per physical chunk 42 static paddr_chunk_desc **virtual_pmappings; // will be one ptr per virtual chunk in iospace 43 static int first_free_vmapping; 44 static int num_virtual_chunks; 45 static queue mapped_paddr_lru; 46 static mutex sMutex = MUTEX_INITIALIZER("iospace_mutex"); 47 static sem_id sChunkAvailableSem; 48 static int32 sChunkAvailableWaitingCounter; 49 50 static generic_map_iospace_chunk_func sMapIOSpaceChunk; 51 static addr_t sIOSpaceBase; 52 static size_t sIOSpaceSize; 53 static size_t sIOSpaceChunkSize; 54 55 56 status_t 57 generic_get_physical_page(phys_addr_t pa, addr_t *va, uint32 flags) 58 { 59 int index; 60 paddr_chunk_desc *replaced_pchunk; 61 62 restart: 63 mutex_lock(&sMutex); 64 65 // see if the page is already mapped 66 index = pa / sIOSpaceChunkSize; 67 if (paddr_desc[index].va != 0) { 68 if (paddr_desc[index].ref_count++ == 0) { 69 // pull this descriptor out of the lru list 70 queue_remove_item(&mapped_paddr_lru, &paddr_desc[index]); 71 } 72 *va = paddr_desc[index].va + pa % sIOSpaceChunkSize; 73 mutex_unlock(&sMutex); 74 return B_OK; 75 } 76 77 // map it 78 if (first_free_vmapping < num_virtual_chunks) { 79 // there's a free hole 80 paddr_desc[index].va = first_free_vmapping * sIOSpaceChunkSize 81 + sIOSpaceBase; 82 *va = paddr_desc[index].va + pa % sIOSpaceChunkSize; 83 virtual_pmappings[first_free_vmapping] = &paddr_desc[index]; 84 paddr_desc[index].ref_count++; 85 86 // push up the first_free_vmapping pointer 87 for (; first_free_vmapping < num_virtual_chunks; 88 first_free_vmapping++) { 89 if (virtual_pmappings[first_free_vmapping] == NULL) 90 break; 91 } 92 93 sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize, 94 flags); 95 mutex_unlock(&sMutex); 96 97 return B_OK; 98 } 99 100 // replace an earlier mapping 101 if (queue_peek(&mapped_paddr_lru) == NULL) { 102 // no free slots available 103 if ((flags & PHYSICAL_PAGE_DONT_WAIT) != 0) { 104 // put back to the caller and let them handle this 105 mutex_unlock(&sMutex); 106 return B_NO_MEMORY; 107 } else { 108 sChunkAvailableWaitingCounter++; 109 110 mutex_unlock(&sMutex); 111 acquire_sem(sChunkAvailableSem); 112 goto restart; 113 } 114 } 115 116 replaced_pchunk = (paddr_chunk_desc*)queue_dequeue(&mapped_paddr_lru); 117 paddr_desc[index].va = replaced_pchunk->va; 118 replaced_pchunk->va = 0; 119 *va = paddr_desc[index].va + pa % sIOSpaceChunkSize; 120 paddr_desc[index].ref_count++; 121 #ifdef DEBUG_IO_SPACE 122 paddr_desc[index].last_ref = thread_get_current_thread_id(); 123 #endif 124 virtual_pmappings[(*va - sIOSpaceBase) / sIOSpaceChunkSize] 125 = paddr_desc + index; 126 127 sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize, flags); 128 129 mutex_unlock(&sMutex); 130 return B_OK; 131 } 132 133 134 status_t 135 generic_put_physical_page(addr_t va) 136 { 137 paddr_chunk_desc *desc; 138 139 if (va < sIOSpaceBase || va >= sIOSpaceBase + sIOSpaceSize) 140 panic("someone called put_physical_page on an invalid va 0x%lx\n", va); 141 va -= sIOSpaceBase; 142 143 mutex_lock(&sMutex); 144 145 desc = virtual_pmappings[va / sIOSpaceChunkSize]; 146 if (desc == NULL) { 147 mutex_unlock(&sMutex); 148 panic("put_physical_page called on page at va 0x%lx which is not checked out\n", va); 149 return B_ERROR; 150 } 151 152 if (--desc->ref_count == 0) { 153 // put it on the mapped lru list 154 queue_enqueue(&mapped_paddr_lru, desc); 155 156 if (sChunkAvailableWaitingCounter > 0) { 157 sChunkAvailableWaitingCounter--; 158 release_sem_etc(sChunkAvailableSem, 1, B_DO_NOT_RESCHEDULE); 159 } 160 } 161 162 mutex_unlock(&sMutex); 163 164 return B_OK; 165 } 166 167 168 #ifdef DEBUG_IO_SPACE 169 static int 170 dump_iospace(int argc, char** argv) 171 { 172 if (argc < 2) { 173 kprintf("usage: iospace <physical|virtual|queue>\n"); 174 return 0; 175 } 176 177 int32 i; 178 179 if (strchr(argv[1], 'p')) { 180 // physical address descriptors 181 kprintf("I/O space physical descriptors (%p)\n", paddr_desc); 182 183 int32 max = vm_page_num_pages() / (sIOSpaceChunkSize / B_PAGE_SIZE); 184 if (argc == 3) 185 max = strtol(argv[2], NULL, 0); 186 187 for (i = 0; i < max; i++) { 188 kprintf("[%03lx %p %3ld %3ld] ", i, (void *)paddr_desc[i].va, 189 paddr_desc[i].ref_count, paddr_desc[i].last_ref); 190 if (i % 4 == 3) 191 kprintf("\n"); 192 } 193 if (i % 4) 194 kprintf("\n"); 195 } 196 197 if (strchr(argv[1], 'v')) { 198 // virtual mappings 199 kprintf("I/O space virtual chunk mappings (%p, first free: %d)\n", 200 virtual_pmappings, first_free_vmapping); 201 202 for (i = 0; i < num_virtual_chunks; i++) { 203 kprintf("[%2ld. %03lx] ", i, virtual_pmappings[i] - paddr_desc); 204 if (i % 8 == 7) 205 kprintf("\n"); 206 } 207 if (i % 8) 208 kprintf("\n"); 209 } 210 211 if (strchr(argv[1], 'q')) { 212 // unused queue 213 kprintf("I/O space mapped queue:\n"); 214 215 paddr_chunk_descriptor* descriptor 216 = (paddr_chunk_descriptor *)queue_peek(&mapped_paddr_lru); 217 i = 0; 218 219 while (descriptor != NULL) { 220 kprintf("[%03lx %p] ", 221 (descriptor - paddr_desc) / sizeof(paddr_desc[0]), descriptor); 222 if (i++ % 8 == 7) 223 kprintf("\n"); 224 225 descriptor = descriptor->next_q; 226 } 227 if (i % 8) 228 kprintf("\n"); 229 } 230 231 return 0; 232 } 233 #endif 234 235 236 // #pragma mark - 237 238 239 status_t 240 generic_vm_physical_page_mapper_init(kernel_args *args, 241 generic_map_iospace_chunk_func mapIOSpaceChunk, addr_t *ioSpaceBase, 242 size_t ioSpaceSize, size_t ioSpaceChunkSize) 243 { 244 TRACE(("generic_vm_physical_page_mapper_init: entry\n")); 245 246 sMapIOSpaceChunk = mapIOSpaceChunk; 247 sIOSpaceSize = ioSpaceSize; 248 sIOSpaceChunkSize = ioSpaceChunkSize; 249 250 // reserve virtual space for the IO space 251 sIOSpaceBase = vm_allocate_early(args, sIOSpaceSize, 0, 0, 252 ioSpaceChunkSize); 253 if (sIOSpaceBase == 0) { 254 panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO " 255 "space in virtual address space!"); 256 return B_ERROR; 257 } 258 259 *ioSpaceBase = sIOSpaceBase; 260 261 // allocate some space to hold physical page mapping info 262 paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args, 263 sizeof(paddr_chunk_desc) * 1024, ~0L, 264 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0); 265 num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize; 266 virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args, 267 sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L, 268 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0); 269 270 TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n", 271 paddr_desc, virtual_pmappings/*, iospace_pgtables*/)); 272 273 // initialize our data structures 274 memset(paddr_desc, 0, sizeof(paddr_chunk_desc) * 1024); 275 memset(virtual_pmappings, 0, sizeof(paddr_chunk_desc *) * num_virtual_chunks); 276 first_free_vmapping = 0; 277 queue_init(&mapped_paddr_lru); 278 sChunkAvailableSem = -1; 279 280 TRACE(("generic_vm_physical_page_mapper_init: done\n")); 281 282 return B_OK; 283 } 284 285 286 status_t 287 generic_vm_physical_page_mapper_init_post_area(kernel_args *args) 288 { 289 void *temp; 290 291 TRACE(("generic_vm_physical_page_mapper_init_post_area: entry\n")); 292 293 temp = (void *)paddr_desc; 294 create_area("physical_page_mapping_descriptors", &temp, B_EXACT_ADDRESS, 295 ROUNDUP(sizeof(paddr_chunk_desc) * 1024, B_PAGE_SIZE), 296 B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 297 298 temp = (void *)virtual_pmappings; 299 create_area("iospace_virtual_chunk_descriptors", &temp, B_EXACT_ADDRESS, 300 ROUNDUP(sizeof(paddr_chunk_desc *) * num_virtual_chunks, B_PAGE_SIZE), 301 B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); 302 303 TRACE(("generic_vm_physical_page_mapper_init_post_area: creating iospace\n")); 304 temp = (void *)sIOSpaceBase; 305 area_id ioSpaceArea = vm_create_null_area(VMAddressSpace::KernelID(), 306 "iospace", &temp, B_EXACT_ADDRESS, sIOSpaceSize, 307 CREATE_AREA_PRIORITY_VIP); 308 if (ioSpaceArea < 0) { 309 panic("generic_vm_physical_page_mapper_init_post_area(): Failed to " 310 "create null area for IO space!\n"); 311 return ioSpaceArea; 312 } 313 314 TRACE(("generic_vm_physical_page_mapper_init_post_area: done\n")); 315 316 #ifdef DEBUG_IO_SPACE 317 add_debugger_command("iospace", &dump_iospace, "Shows info about the I/O space area."); 318 #endif 319 320 return B_OK; 321 } 322 323 324 status_t 325 generic_vm_physical_page_mapper_init_post_sem(kernel_args *args) 326 { 327 sChunkAvailableSem = create_sem(1, "iospace chunk available"); 328 329 return sChunkAvailableSem >= B_OK ? B_OK : sChunkAvailableSem; 330 } 331