xref: /haiku/src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.cpp (revision 220d04022750f40f8bac8f01fa551211e28d04f2)
1 /*
2  * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
3  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5  * Distributed under the terms of the MIT License.
6  *
7  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8  * Distributed under the terms of the NewOS License.
9  */
10 
11 
12 #include "paging/32bit/ARMPagingMethod32Bit.h"
13 
14 #include <stdlib.h>
15 #include <string.h>
16 
17 #include <AutoDeleter.h>
18 
19 #include <arch_system_info.h>
20 #include <boot/kernel_args.h>
21 #include <int.h>
22 #include <thread.h>
23 #include <vm/vm.h>
24 #include <vm/VMAddressSpace.h>
25 
26 #include "paging/32bit/ARMPagingStructures32Bit.h"
27 #include "paging/32bit/ARMVMTranslationMap32Bit.h"
28 #include "paging/arm_physical_page_mapper.h"
29 #include "paging/arm_physical_page_mapper_large_memory.h"
30 
31 
32 //#define TRACE_ARM_PAGING_METHOD_32_BIT
33 #ifdef TRACE_ARM_PAGING_METHOD_32_BIT
34 #	define TRACE(x...) dprintf(x)
35 #else
36 #	define TRACE(x...) ;
37 #endif
38 
39 
40 using ARMLargePhysicalPageMapper::PhysicalPageSlot;
41 
42 
43 // #pragma mark - ARMPagingMethod32Bit::PhysicalPageSlotPool
44 
45 
46 struct ARMPagingMethod32Bit::PhysicalPageSlotPool
47 	: ARMLargePhysicalPageMapper::PhysicalPageSlotPool {
48 public:
49 	virtual						~PhysicalPageSlotPool();
50 
51 			status_t			InitInitial(kernel_args* args);
52 			status_t			InitInitialPostArea(kernel_args* args);
53 
54 			void				Init(area_id dataArea, void* data,
55 									area_id virtualArea, addr_t virtualBase);
56 
57 	virtual	status_t			AllocatePool(
58 									ARMLargePhysicalPageMapper
59 										::PhysicalPageSlotPool*& _pool);
60 	virtual	void				Map(phys_addr_t physicalAddress,
61 									addr_t virtualAddress);
62 
63 public:
64 	static	PhysicalPageSlotPool sInitialPhysicalPagePool;
65 
66 private:
67 	area_id					fDataArea;
68 	area_id					fVirtualArea;
69 	addr_t					fVirtualBase;
70 	page_table_entry*		fPageTable;
71 };
72 
73 
74 ARMPagingMethod32Bit::PhysicalPageSlotPool
75 	ARMPagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool;
76 
77 
78 ARMPagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
79 {
80 }
81 
82 
83 status_t
84 ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
85 {
86 	// allocate a virtual address range for the pages to be mapped into
87 	addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
88 		kPageTableAlignment);
89 	if (virtualBase == 0) {
90 		panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
91 			"physical page pool space in virtual address space!");
92 		return B_ERROR;
93 	}
94 
95 	// allocate memory for the page table and data
96 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
97 	page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
98 		areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
99 
100 	// prepare the page table
101 	_EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
102 
103 	// init the pool structure and add the initial pool
104 	Init(-1, pageTable, -1, (addr_t)virtualBase);
105 
106 	return B_OK;
107 }
108 
109 
110 status_t
111 ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitialPostArea(
112 	kernel_args* args)
113 {
114 	// create an area for the (already allocated) data
115 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
116 	void* temp = fPageTable;
117 	area_id area = create_area("physical page pool", &temp,
118 		B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
119 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
120 	if (area < 0) {
121 		panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
122 			"create area for physical page pool.");
123 		return area;
124 	}
125 	fDataArea = area;
126 
127 	// create an area for the virtual address space
128 	temp = (void*)fVirtualBase;
129 	area = vm_create_null_area(VMAddressSpace::KernelID(),
130 		"physical page pool space", &temp, B_EXACT_ADDRESS,
131 		1024 * B_PAGE_SIZE, 0);
132 	if (area < B_OK) {
133 		panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
134 			"create area for physical page pool space.");
135 		return area;
136 	}
137 	fVirtualArea = area;
138 
139 	return B_OK;
140 }
141 
142 
143 void
144 ARMPagingMethod32Bit::PhysicalPageSlotPool::Init(area_id dataArea, void* data,
145 	area_id virtualArea, addr_t virtualBase)
146 {
147 	fDataArea = dataArea;
148 	fVirtualArea = virtualArea;
149 	fVirtualBase = virtualBase;
150 	fPageTable = (page_table_entry*)data;
151 
152 	// init slot list
153 	fSlots = (PhysicalPageSlot*)(fPageTable + 1024);
154 	addr_t slotAddress = virtualBase;
155 	for (int32 i = 0; i < 1024; i++, slotAddress += B_PAGE_SIZE) {
156 		PhysicalPageSlot* slot = &fSlots[i];
157 		slot->next = slot + 1;
158 		slot->pool = this;
159 		slot->address = slotAddress;
160 	}
161 
162 	fSlots[1023].next = NULL;
163 		// terminate list
164 }
165 
166 
167 void
168 ARMPagingMethod32Bit::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
169 	addr_t virtualAddress)
170 {
171 	page_table_entry& pte = fPageTable[
172 		(virtualAddress - fVirtualBase) / B_PAGE_SIZE];
173 	pte = (physicalAddress & ARM_PTE_ADDRESS_MASK)
174 		| ARM_MMU_L2_TYPE_SMALLEXT;
175 
176 	arch_cpu_invalidate_TLB_range(virtualAddress, virtualAddress + B_PAGE_SIZE);
177 //	invalidate_TLB(virtualAddress);
178 }
179 
180 
181 status_t
182 ARMPagingMethod32Bit::PhysicalPageSlotPool::AllocatePool(
183 	ARMLargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
184 {
185 	// create the pool structure
186 	PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
187 	if (pool == NULL)
188 		return B_NO_MEMORY;
189 	ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
190 
191 	// create an area that can contain the page table and the slot
192 	// structures
193 	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
194 	void* data;
195 	virtual_address_restrictions virtualRestrictions = {};
196 	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
197 	physical_address_restrictions physicalRestrictions = {};
198 	area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
199 		PAGE_ALIGN(areaSize), B_FULL_LOCK,
200 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
201 		&virtualRestrictions, &physicalRestrictions, &data);
202 	if (dataArea < 0)
203 		return dataArea;
204 
205 	// create the null area for the virtual address space
206 	void* virtualBase;
207 	area_id virtualArea = vm_create_null_area(
208 		VMAddressSpace::KernelID(), "physical page pool space",
209 		&virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE,
210 		CREATE_AREA_PRIORITY_VIP);
211 	if (virtualArea < 0) {
212 		delete_area(dataArea);
213 		return virtualArea;
214 	}
215 
216 	// prepare the page table
217 	memset(data, 0, B_PAGE_SIZE);
218 
219 	// get the page table's physical address
220 	phys_addr_t physicalTable;
221 	ARMVMTranslationMap32Bit* map = static_cast<ARMVMTranslationMap32Bit*>(
222 		VMAddressSpace::Kernel()->TranslationMap());
223 	uint32 dummyFlags;
224 	cpu_status state = disable_interrupts();
225 	map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
226 	restore_interrupts(state);
227 
228 	// put the page table into the page directory
229 	int32 index = VADDR_TO_PDENT((addr_t)virtualBase);
230 	page_directory_entry* entry
231 		= &map->PagingStructures32Bit()->pgdir_virt[index];
232 	PutPageTableInPageDir(entry, physicalTable,
233 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
234 	ARMPagingStructures32Bit::UpdateAllPageDirs(index, *entry);
235 
236 	// init the pool structure
237 	pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase);
238 	poolDeleter.Detach();
239 	_pool = pool;
240 	return B_OK;
241 }
242 
243 
244 // #pragma mark - ARMPagingMethod32Bit
245 
246 
247 ARMPagingMethod32Bit::ARMPagingMethod32Bit()
248 	:
249 	fKernelPhysicalPageDirectory(0),
250 	fKernelVirtualPageDirectory(NULL),
251 	fPhysicalPageMapper(NULL),
252 	fKernelPhysicalPageMapper(NULL)
253 {
254 }
255 
256 
257 ARMPagingMethod32Bit::~ARMPagingMethod32Bit()
258 {
259 }
260 
261 
262 status_t
263 ARMPagingMethod32Bit::Init(kernel_args* args,
264 	VMPhysicalPageMapper** _physicalPageMapper)
265 {
266 	TRACE("vm_translation_map_init: entry\n");
267 
268 	fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
269 	fKernelVirtualPageDirectory = (page_directory_entry*)
270 		args->arch_args.vir_pgdir;
271 
272 	TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
273 		fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
274 
275 	ARMPagingStructures32Bit::StaticInit();
276 
277 	// create the initial pool for the physical page mapper
278 	PhysicalPageSlotPool* pool
279 		= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
280 			PhysicalPageSlotPool;
281 	status_t error = pool->InitInitial(args);
282 	if (error != B_OK) {
283 		panic("ARMPagingMethod32Bit::Init(): Failed to create initial pool "
284 			"for physical page mapper!");
285 		return error;
286 	}
287 
288 	// create physical page mapper
289 	large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
290 		fKernelPhysicalPageMapper);
291 		// TODO: Select the best page mapper!
292 
293 	// enable global page feature if available
294 #if 0 //IRA: check for ARMv6!!
295 	if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
296 		// this prevents kernel pages from being flushed from TLB on
297 		// context-switch
298 		x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
299 	}
300 #endif
301 	TRACE("ARMPagingMethod32Bit::Init(): done\n");
302 
303 	*_physicalPageMapper = fPhysicalPageMapper;
304 	return B_OK;
305 }
306 
307 
308 status_t
309 ARMPagingMethod32Bit::InitPostArea(kernel_args* args)
310 {
311 	void *temp;
312 	status_t error;
313 	area_id area;
314 
315 	temp = (void*)fKernelVirtualPageDirectory;
316 	area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS,
317 		ARM_MMU_L1_TABLE_SIZE, B_ALREADY_WIRED, B_KERNEL_READ_AREA
318 			| B_KERNEL_WRITE_AREA);
319 	if (area < B_OK)
320 		return area;
321 
322 	error = PhysicalPageSlotPool::sInitialPhysicalPagePool
323 		.InitInitialPostArea(args);
324 	if (error != B_OK)
325 		return error;
326 
327 	return B_OK;
328 }
329 
330 
331 status_t
332 ARMPagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
333 {
334 	ARMVMTranslationMap32Bit* map = new(std::nothrow) ARMVMTranslationMap32Bit;
335 	if (map == NULL)
336 		return B_NO_MEMORY;
337 
338 	status_t error = map->Init(kernel);
339 	if (error != B_OK) {
340 		delete map;
341 		return error;
342 	}
343 
344 	*_map = map;
345 	return B_OK;
346 }
347 
348 
349 status_t
350 ARMPagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
351 	phys_addr_t physicalAddress, uint8 attributes,
352 	phys_addr_t (*get_free_page)(kernel_args*))
353 {
354 	// check to see if a page table exists for this range
355 	int index = VADDR_TO_PDENT(virtualAddress);
356 	if ((fKernelVirtualPageDirectory[index] & ARM_PDE_TYPE_MASK) == 0) {
357 		phys_addr_t pgtable;
358 		page_directory_entry *e;
359 		// we need to allocate a pgtable
360 		pgtable = get_free_page(args);
361 		// pgtable is in pages, convert to physical address
362 		pgtable *= B_PAGE_SIZE;
363 
364 		TRACE("ARMPagingMethod32Bit::MapEarly(): asked for free page for "
365 			"pgtable. %#" B_PRIxPHYSADDR "\n", pgtable);
366 
367 		// put it in the pgdir
368 		e = &fKernelVirtualPageDirectory[index];
369 		PutPageTableInPageDir(e, pgtable, attributes);
370 
371 		// zero it out in it's new mapping
372 		memset((void*)pgtable, 0, B_PAGE_SIZE);
373 	}
374 
375 	page_table_entry* ptEntry = (page_table_entry*)
376 		(fKernelVirtualPageDirectory[index] & ARM_PDE_ADDRESS_MASK);
377 	ptEntry += VADDR_TO_PTENT(virtualAddress);
378 
379 	ASSERT_PRINT(
380 		(*ptEntry & ARM_PTE_TYPE_MASK) == 0,
381 		"virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
382 		", existing pte: %#" B_PRIx32, virtualAddress, fKernelVirtualPageDirectory[index],
383 		*ptEntry);
384 
385 	// now, fill in the pentry
386 	PutPageTableEntryInTable(ptEntry,
387 		physicalAddress, attributes, 0, IS_KERNEL_ADDRESS(virtualAddress));
388 
389 	return B_OK;
390 }
391 
392 
393 bool
394 ARMPagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
395 	uint32 protection)
396 {
397 #if 0
398 	// We only trust the kernel team's page directory. So switch to it first.
399 	// Always set it to make sure the TLBs don't contain obsolete data.
400 	uint32 physicalPageDirectory;
401 	read_cr3(physicalPageDirectory);
402 	write_cr3(fKernelPhysicalPageDirectory);
403 
404 	// get the page directory entry for the address
405 	page_directory_entry pageDirectoryEntry;
406 	uint32 index = VADDR_TO_PDENT(virtualAddress);
407 
408 	if (physicalPageDirectory == fKernelPhysicalPageDirectory) {
409 		pageDirectoryEntry = fKernelVirtualPageDirectory[index];
410 	} else if (fPhysicalPageMapper != NULL) {
411 		// map the original page directory and get the entry
412 		void* handle;
413 		addr_t virtualPageDirectory;
414 		status_t error = fPhysicalPageMapper->GetPageDebug(
415 			physicalPageDirectory, &virtualPageDirectory, &handle);
416 		if (error == B_OK) {
417 			pageDirectoryEntry
418 				= ((page_directory_entry*)virtualPageDirectory)[index];
419 			fPhysicalPageMapper->PutPageDebug(virtualPageDirectory, handle);
420 		} else
421 			pageDirectoryEntry = 0;
422 	} else
423 		pageDirectoryEntry = 0;
424 
425 	// map the page table and get the entry
426 	page_table_entry pageTableEntry;
427 	index = VADDR_TO_PTENT(virtualAddress);
428 
429 	if ((pageDirectoryEntry & ARM_PDE_PRESENT) != 0
430 			&& fPhysicalPageMapper != NULL) {
431 		void* handle;
432 		addr_t virtualPageTable;
433 		status_t error = fPhysicalPageMapper->GetPageDebug(
434 			pageDirectoryEntry & ARM_PDE_ADDRESS_MASK, &virtualPageTable,
435 			&handle);
436 		if (error == B_OK) {
437 			pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
438 			fPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
439 		} else
440 			pageTableEntry = 0;
441 	} else
442 		pageTableEntry = 0;
443 
444 	// switch back to the original page directory
445 	if (physicalPageDirectory != fKernelPhysicalPageDirectory)
446 		write_cr3(physicalPageDirectory);
447 
448 	if ((pageTableEntry & ARM_PTE_PRESENT) == 0)
449 		return false;
450 
451 	// present means kernel-readable, so check for writable
452 	return (protection & B_KERNEL_WRITE_AREA) == 0
453 		|| (pageTableEntry & ARM_PTE_WRITABLE) != 0;
454 #endif
455 	//IRA: fix the above!
456 	return true;
457 }
458 
459 
460 /*static*/ void
461 ARMPagingMethod32Bit::PutPageTableInPageDir(page_directory_entry* entry,
462 	phys_addr_t pgtablePhysical, uint32 attributes)
463 {
464 	*entry = (pgtablePhysical & ARM_PDE_ADDRESS_MASK) | ARM_MMU_L1_TYPE_COARSE;
465 		// TODO: we ignore the attributes of the page table - for compatibility
466 		// with BeOS we allow having user accessible areas in the kernel address
467 		// space. This is currently being used by some drivers, mainly for the
468 		// frame buffer. Our current real time data implementation makes use of
469 		// this fact, too.
470 		// We might want to get rid of this possibility one day, especially if
471 		// we intend to port it to a platform that does not support this.
472 }
473 
474 
475 /*static*/ void
476 ARMPagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
477 	phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
478 	bool globalPage)
479 {
480 	page_table_entry page = (physicalAddress & ARM_PTE_ADDRESS_MASK)
481 		| ARM_MMU_L2_TYPE_SMALLEXT;
482 #if 0 //IRA
483 		| ARM_PTE_PRESENT | (globalPage ? ARM_PTE_GLOBAL : 0)
484 		| MemoryTypeToPageTableEntryFlags(memoryType);
485 
486 	// if the page is user accessible, it's automatically
487 	// accessible in kernel space, too (but with the same
488 	// protection)
489 	if ((attributes & B_USER_PROTECTION) != 0) {
490 		page |= ARM_PTE_USER;
491 		if ((attributes & B_WRITE_AREA) != 0)
492 			page |= ARM_PTE_WRITABLE;
493 	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
494 		page |= ARM_PTE_WRITABLE;
495 #endif
496 	// put it in the page table
497 	*(volatile page_table_entry*)entry = page;
498 }
499 
500 
501 /*static*/ void
502 ARMPagingMethod32Bit::_EarlyPreparePageTables(page_table_entry* pageTables,
503 	addr_t address, size_t size)
504 {
505 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
506 	memset(pageTables, 0, 256 * (size / (B_PAGE_SIZE * 256)));
507 
508 	// put the array of pgtables directly into the kernel pagedir
509 	// these will be wired and kept mapped into virtual space to be easy to get
510 	// to
511 	{
512 		addr_t virtualTable = (addr_t)pageTables;
513 
514 		for (size_t i = 0; i < (size / (B_PAGE_SIZE * 256));
515 				i++, virtualTable += 256*sizeof(page_directory_entry)) {
516 			phys_addr_t physicalTable = 0;
517 			_EarlyQuery(virtualTable, &physicalTable);
518 			page_directory_entry* entry = method->KernelVirtualPageDirectory()
519 				+ VADDR_TO_PDENT(address) + i;
520 			PutPageTableInPageDir(entry, physicalTable,
521 				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
522 		}
523 	}
524 }
525 
526 
527 //! TODO: currently assumes this translation map is active
528 /*static*/ status_t
529 ARMPagingMethod32Bit::_EarlyQuery(addr_t virtualAddress,
530 	phys_addr_t *_physicalAddress)
531 {
532 	ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
533 	int index = VADDR_TO_PDENT(virtualAddress);
534 	if ((method->KernelVirtualPageDirectory()[index] & ARM_PDE_TYPE_MASK) == 0) {
535 		// no pagetable here
536 		return B_ERROR;
537 	}
538 
539 	page_table_entry* entry = (page_table_entry*)
540 		(method->KernelVirtualPageDirectory()[index] & ARM_PDE_ADDRESS_MASK);
541 	entry += VADDR_TO_PTENT(virtualAddress);
542 
543 	if ((*entry & ARM_PTE_TYPE_MASK) == 0) {
544 		// page mapping not valid
545 		return B_ERROR;
546 	}
547 
548 	*_physicalAddress = (*entry & ARM_PTE_ADDRESS_MASK)
549 		| VADDR_TO_PGOFF(virtualAddress);
550 
551 	return B_OK;
552 }
553