xref: /haiku/src/system/kernel/arch/ppc/arch_vm_translation_map.cpp (revision 125183f9e5c136781f71c879faaeab43fdc3ea7b)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*	(bonefish) Some explanatory words on how address translation is implemented
11 	for the 32 bit PPC architecture.
12 
13 	I use the address type nomenclature as used in the PPC architecture
14 	specs, i.e.
15 	- effective address: An address as used by program instructions, i.e.
16 	  that's what elsewhere (e.g. in the VM implementation) is called
17 	  virtual address.
18 	- virtual address: An intermediate address computed from the effective
19 	  address via the segment registers.
20 	- physical address: An address referring to physical storage.
21 
22 	The hardware translates an effective address to a physical address using
23 	either of two mechanisms: 1) Block Address Translation (BAT) or
24 	2) segment + page translation. The first mechanism does this directly
25 	using two sets (for data/instructions) of special purpose registers.
26 	The latter mechanism is of more relevance here, though:
27 
28 	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
29 								           |           |            |
30 							     (segment registers)   |            |
31 									       |           |            |
32 	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
33 	                            [ 0             VPN       39 | 40 Byte 51 ]
34 								                 |                  |
35 										   (page table)             |
36 											     |                  |
37 	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
38 
39 
40 	ESID: Effective Segment ID
41 	VSID: Virtual Segment ID
42 	PIX:  Page Index
43 	VPN:  Virtual Page Number
44 	PPN:  Physical Page Number
45 
46 
47 	Unlike on x86 we can't just switch the context to another team by just
48 	setting a register to another page directory, since we only have one
49 	page table containing both kernel and user address mappings. Instead we
50 	map the effective address space of kernel and *all* teams
51 	non-intersectingly into the virtual address space (which fortunately is
52 	20 bits wider), and use the segment registers to select the section of
53 	the virtual address space for the current team. Half of the 16 segment
54 	registers (8 - 15) map the kernel addresses, so they remain unchanged.
55 
56 	The range of the virtual address space a team's effective address space
57 	is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
58 	which is the first of the 8 successive VSID values used for the team.
59 
60 	Which fVSIDBase values are already taken is defined by the set bits in
61 	the bitmap sVSIDBaseBitmap.
62 
63 
64 	TODO:
65 	* If we want to continue to use the OF services, we would need to add
66 	  its address mappings to the kernel space. Unfortunately some stuff
67 	  (especially RAM) is mapped in an address range without the kernel
68 	  address space. We probably need to map those into each team's address
69 	  space as kernel read/write areas.
70 	* The current locking scheme is insufficient. The page table is a resource
71 	  shared by all teams. We need to synchronize access to it. Probably via a
72 	  spinlock.
73  */
74 
75 #include <arch/vm_translation_map.h>
76 
77 #include <stdlib.h>
78 
79 #include <KernelExport.h>
80 
81 #include <arch/cpu.h>
82 #include <arch_mmu.h>
83 #include <boot/kernel_args.h>
84 #include <int.h>
85 #include <kernel.h>
86 #include <slab/Slab.h>
87 #include <vm/vm.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_priv.h>
90 #include <vm/VMAddressSpace.h>
91 #include <vm/VMCache.h>
92 
93 #include <util/AutoLock.h>
94 
95 #include "generic_vm_physical_page_mapper.h"
96 #include "generic_vm_physical_page_ops.h"
97 #include "GenericVMPhysicalPageMapper.h"
98 
99 
100 static struct page_table_entry_group *sPageTable;
101 static size_t sPageTableSize;
102 static uint32 sPageTableHashMask;
103 static area_id sPageTableArea;
104 
105 // 64 MB of iospace
106 #define IOSPACE_SIZE (64*1024*1024)
107 // We only have small (4 KB) pages. The only reason for choosing greater chunk
108 // size is to keep the waste of memory limited, since the generic page mapper
109 // allocates structures per physical/virtual chunk.
110 // TODO: Implement a page mapper more suitable for small pages!
111 #define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
112 
113 static addr_t sIOSpaceBase;
114 
115 static GenericVMPhysicalPageMapper sPhysicalPageMapper;
116 
117 // The VSID is a 24 bit number. The lower three bits are defined by the
118 // (effective) segment number, which leaves us with a 21 bit space of
119 // VSID bases (= 2 * 1024 * 1024).
120 #define MAX_VSID_BASES (PAGE_SIZE * 8)
121 static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
122 static spinlock sVSIDBaseBitmapLock;
123 
124 #define VSID_BASE_SHIFT 3
125 #define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
126 
127 
128 struct PPCVMTranslationMap : VMTranslationMap {
129 								PPCVMTranslationMap();
130 	virtual						~PPCVMTranslationMap();
131 
132 			status_t			Init(bool kernel);
133 
134 	inline	int					VSIDBase() const	{ return fVSIDBase; }
135 
136 			page_table_entry*	LookupPageTableEntry(addr_t virtualAddress);
137 			bool				RemovePageTableEntry(addr_t virtualAddress);
138 
139 	virtual	status_t			InitPostSem();
140 
141 	virtual	bool	 			Lock();
142 	virtual	void				Unlock();
143 
144 	virtual	addr_t				MappedSize() const;
145 	virtual	size_t				MaxPagesNeededToMap(addr_t start,
146 									addr_t end) const;
147 
148 	virtual	status_t			Map(addr_t virtualAddress,
149 									addr_t physicalAddress, uint32 attributes,
150 									uint32 memoryType,
151 									vm_page_reservation* reservation);
152 	virtual	status_t			Unmap(addr_t start, addr_t end);
153 
154 	virtual	status_t			UnmapPage(VMArea* area, addr_t address,
155 									bool updatePageQueue);
156 
157 	virtual	status_t			Query(addr_t virtualAddress,
158 									addr_t* _physicalAddress,
159 									uint32* _flags);
160 	virtual	status_t			QueryInterrupt(addr_t virtualAddress,
161 									addr_t* _physicalAddress,
162 									uint32* _flags);
163 
164 	virtual	status_t			Protect(addr_t base, addr_t top,
165 									uint32 attributes, uint32 memoryType);
166 	virtual	status_t			ClearFlags(addr_t virtualAddress,
167 									uint32 flags);
168 
169 	virtual	bool				ClearAccessedAndModified(
170 									VMArea* area, addr_t address,
171 									bool unmapIfUnaccessed,
172 									bool& _modified);
173 
174 	virtual	void				Flush();
175 
176 protected:
177 			int					fVSIDBase;
178 };
179 
180 
181 void
182 ppc_translation_map_change_asid(VMTranslationMap *map)
183 {
184 // this code depends on the kernel being at 0x80000000, fix if we change that
185 #if KERNEL_BASE != 0x80000000
186 #error fix me
187 #endif
188 	int vsidBase = static_cast<PPCVMTranslationMap*>(map)->VSIDBase();
189 
190 	isync();	// synchronize context
191 	asm("mtsr	0,%0" : : "g"(vsidBase));
192 	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
193 	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
194 	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
195 	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
196 	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
197 	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
198 	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
199 	isync();	// synchronize context
200 }
201 
202 
203 static void
204 fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
205 	addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
206 	bool secondaryHash)
207 {
208 	// lower 32 bit - set at once
209 	entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
210 	entry->_reserved0 = 0;
211 	entry->referenced = false;
212 	entry->changed = false;
213 	entry->write_through = false;
214 	entry->caching_inhibited = false;
215 	entry->memory_coherent = false;
216 	entry->guarded = false;
217 	entry->_reserved1 = 0;
218 	entry->page_protection = protection & 0x3;
219 	eieio();
220 		// we need to make sure that the lower 32 bit were
221 		// already written when the entry becomes valid
222 
223 	// upper 32 bit
224 	entry->virtual_segment_id = virtualSegmentID;
225 	entry->secondary_hash = secondaryHash;
226 	entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
227 	entry->valid = true;
228 
229 	ppc_sync();
230 }
231 
232 
233 page_table_entry *
234 PPCVMTranslationMap::LookupPageTableEntry(addr_t virtualAddress)
235 {
236 	// lookup the vsid based off the va
237 	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
238 
239 //	dprintf("vm_translation_map.lookup_page_table_entry: vsid %ld, va 0x%lx\n", virtualSegmentID, virtualAddress);
240 
241 	// Search for the page table entry using the primary hash value
242 
243 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
244 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
245 
246 	for (int i = 0; i < 8; i++) {
247 		page_table_entry *entry = &group->entry[i];
248 
249 		if (entry->virtual_segment_id == virtualSegmentID
250 			&& entry->secondary_hash == false
251 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
252 			return entry;
253 	}
254 
255 	// didn't find it, try the secondary hash value
256 
257 	hash = page_table_entry::SecondaryHash(hash);
258 	group = &sPageTable[hash & sPageTableHashMask];
259 
260 	for (int i = 0; i < 8; i++) {
261 		page_table_entry *entry = &group->entry[i];
262 
263 		if (entry->virtual_segment_id == virtualSegmentID
264 			&& entry->secondary_hash == true
265 			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
266 			return entry;
267 	}
268 
269 	return NULL;
270 }
271 
272 
273 bool
274 PPCVMTranslationMap::RemovePageTableEntry(addr_t virtualAddress)
275 {
276 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
277 	if (entry == NULL)
278 		return false;
279 
280 	entry->valid = 0;
281 	ppc_sync();
282 	tlbie(virtualAddress);
283 	eieio();
284 	tlbsync();
285 	ppc_sync();
286 
287 	return true;
288 }
289 
290 
291 static status_t
292 map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
293 {
294 	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
295 	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
296 	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
297 		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
298 
299 	// map the pages
300 	return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
301 }
302 
303 
304 // #pragma mark -
305 
306 
307 PPCVMTranslationMap::PPCVMTranslationMap()
308 {
309 }
310 
311 
312 PPCVMTranslationMap::~PPCVMTranslationMap()
313 {
314 	if (fMapCount > 0) {
315 		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
316 			this, fMapCount);
317 	}
318 
319 	// mark the vsid base not in use
320 	int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
321 	atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
322 			~(1 << (baseBit % 32)));
323 }
324 
325 
326 status_t
327 PPCVMTranslationMap::Init(bool kernel)
328 {
329 	cpu_status state = disable_interrupts();
330 	acquire_spinlock(&sVSIDBaseBitmapLock);
331 
332 	// allocate a VSID base for this one
333 	if (kernel) {
334 		// The boot loader has set up the segment registers for identical
335 		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
336 		// latter one for mapping the kernel address space (0x80000000...), the
337 		// former one for the lower addresses required by the Open Firmware
338 		// services.
339 		fVSIDBase = 0;
340 		sVSIDBaseBitmap[0] |= 0x3;
341 	} else {
342 		int i = 0;
343 
344 		while (i < MAX_VSID_BASES) {
345 			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
346 				i += 32;
347 				continue;
348 			}
349 			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
350 				// we found it
351 				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
352 				break;
353 			}
354 			i++;
355 		}
356 		if (i >= MAX_VSID_BASES)
357 			panic("vm_translation_map_create: out of VSID bases\n");
358 		fVSIDBase = i << VSID_BASE_SHIFT;
359 	}
360 
361 	release_spinlock(&sVSIDBaseBitmapLock);
362 	restore_interrupts(state);
363 
364 	return B_OK;
365 }
366 
367 
368 status_t
369 PPCVMTranslationMap::InitPostSem()
370 {
371 	return B_OK;
372 }
373 
374 
375 bool
376 PPCVMTranslationMap::Lock()
377 {
378 	recursive_lock_lock(&fLock);
379 	return true;
380 }
381 
382 
383 void
384 PPCVMTranslationMap::Unlock()
385 {
386 	recursive_lock_unlock(&fLock);
387 }
388 
389 
390 size_t
391 PPCVMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
392 {
393 	return 0;
394 }
395 
396 
397 status_t
398 PPCVMTranslationMap::Map(addr_t virtualAddress, addr_t physicalAddress,
399 	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
400 {
401 // TODO: Support memory types!
402 	// lookup the vsid based off the va
403 	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
404 	uint32 protection = 0;
405 
406 	// ToDo: check this
407 	// all kernel mappings are R/W to supervisor code
408 	if (attributes & (B_READ_AREA | B_WRITE_AREA))
409 		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
410 
411 	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
412 
413 	// Search for a free page table slot using the primary hash value
414 
415 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
416 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
417 
418 	for (int i = 0; i < 8; i++) {
419 		page_table_entry *entry = &group->entry[i];
420 
421 		if (entry->valid)
422 			continue;
423 
424 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
425 			protection, false);
426 		fMapCount++;
427 		return B_OK;
428 	}
429 
430 	// Didn't found one, try the secondary hash value
431 
432 	hash = page_table_entry::SecondaryHash(hash);
433 	group = &sPageTable[hash & sPageTableHashMask];
434 
435 	for (int i = 0; i < 8; i++) {
436 		page_table_entry *entry = &group->entry[i];
437 
438 		if (entry->valid)
439 			continue;
440 
441 		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
442 			protection, false);
443 		fMapCount++;
444 		return B_OK;
445 	}
446 
447 	panic("vm_translation_map.map_tmap: hash table full\n");
448 	return B_ERROR;
449 }
450 
451 
452 status_t
453 PPCVMTranslationMap::Unmap(addr_t start, addr_t end)
454 {
455 	page_table_entry *entry;
456 
457 	start = ROUNDDOWN(start, B_PAGE_SIZE);
458 	end = ROUNDUP(end, B_PAGE_SIZE);
459 
460 //	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
461 
462 	while (start < end) {
463 		if (RemovePageTableEntry(start))
464 			fMapCount--;
465 
466 		start += B_PAGE_SIZE;
467 	}
468 
469 	return B_OK;
470 }
471 
472 
473 status_t
474 PPCVMTranslationMap::UnmapPage(VMArea* area, addr_t address,
475 	bool updatePageQueue)
476 {
477 	ASSERT(address % B_PAGE_SIZE == 0);
478 
479 	RecursiveLocker locker(fLock);
480 
481 	if (area->cache_type == CACHE_TYPE_DEVICE) {
482 		if (!RemovePageTableEntry(address))
483 			return B_ENTRY_NOT_FOUND;
484 
485 		fMapCount--;
486 		return B_OK;
487 	}
488 
489 	page_table_entry* entry = LookupPageTableEntry(address);
490 	if (entry == NULL)
491 		return B_ENTRY_NOT_FOUND;
492 
493 	page_num_t pageNumber = entry->physical_page_number;
494 	bool accessed = entry->referenced;
495 	bool modified = entry->changed;
496 
497 	RemovePageTableEntry(address);
498 
499 	fMapCount--;
500 
501 	// get the page
502 	vm_page* page = vm_lookup_page(pageNumber);
503 	ASSERT(page != NULL);
504 
505 	// transfer the accessed/dirty flags to the page
506 	page->accessed |= accessed;
507 	page->modified |= modified;
508 
509 	// remove the mapping object/decrement the wired_count of the page
510 	vm_page_mapping* mapping = NULL;
511 	if (area->wiring == B_NO_LOCK) {
512 		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
513 		while ((mapping = iterator.Next()) != NULL) {
514 			if (mapping->area == area) {
515 				area->mappings.Remove(mapping);
516 				page->mappings.Remove(mapping);
517 				break;
518 			}
519 		}
520 
521 		ASSERT(mapping != NULL);
522 	} else
523 		page->wired_count--;
524 
525 	locker.Unlock();
526 
527 	if (page->wired_count == 0 && page->mappings.IsEmpty()) {
528 		atomic_add(&gMappedPagesCount, -1);
529 
530 		if (updatePageQueue) {
531 			if (page->Cache()->temporary)
532 				vm_page_set_state(page, PAGE_STATE_INACTIVE);
533 			else if (page->modified)
534 				vm_page_set_state(page, PAGE_STATE_MODIFIED);
535 			else
536 				vm_page_set_state(page, PAGE_STATE_CACHED);
537 		}
538 	}
539 
540 	if (mapping != NULL) {
541 		bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
542 		object_cache_free(gPageMappingsObjectCache, mapping,
543 			CACHE_DONT_WAIT_FOR_MEMORY
544 				| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
545 	}
546 
547 	return B_OK;
548 }
549 
550 
551 status_t
552 PPCVMTranslationMap::Query(addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
553 {
554 	page_table_entry *entry;
555 
556 	// default the flags to not present
557 	*_outFlags = 0;
558 	*_outPhysical = 0;
559 
560 	entry = LookupPageTableEntry(va);
561 	if (entry == NULL)
562 		return B_NO_ERROR;
563 
564 	// ToDo: check this!
565 	if (IS_KERNEL_ADDRESS(va))
566 		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
567 	else
568 		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
569 
570 	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
571 	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
572 	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
573 
574 	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
575 
576 	return B_OK;
577 }
578 
579 
580 status_t
581 PPCVMTranslationMap::QueryInterrupt(addr_t virtualAddress,
582 	addr_t* _physicalAddress, uint32* _flags)
583 {
584 	return PPCVMTranslationMap::Query(virtualAddress, _physicalAddress, _flags);
585 }
586 
587 
588 addr_t
589 PPCVMTranslationMap::MappedSize() const
590 {
591 	return fMapCount;
592 }
593 
594 
595 status_t
596 PPCVMTranslationMap::Protect(addr_t base, addr_t top, uint32 attributes,
597 	uint32 memoryType)
598 {
599 	// XXX finish
600 	return B_ERROR;
601 }
602 
603 
604 status_t
605 PPCVMTranslationMap::ClearFlags(addr_t virtualAddress, uint32 flags)
606 {
607 	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
608 	if (entry == NULL)
609 		return B_NO_ERROR;
610 
611 	bool modified = false;
612 
613 	// clear the bits
614 	if (flags & PAGE_MODIFIED && entry->changed) {
615 		entry->changed = false;
616 		modified = true;
617 	}
618 	if (flags & PAGE_ACCESSED && entry->referenced) {
619 		entry->referenced = false;
620 		modified = true;
621 	}
622 
623 	// synchronize
624 	if (modified) {
625 		tlbie(virtualAddress);
626 		eieio();
627 		tlbsync();
628 		ppc_sync();
629 	}
630 
631 	return B_OK;
632 }
633 
634 
635 bool
636 PPCVMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
637 	bool unmapIfUnaccessed, bool& _modified)
638 {
639 	// TODO: Implement for real! ATM this is just an approximation using
640 	// Query(), ClearFlags(), and UnmapPage(). See below!
641 
642 	RecursiveLocker locker(fLock);
643 
644 	uint32 flags;
645 	addr_t physicalAddress;
646 	if (Query(address, &physicalAddress, &flags) != B_OK
647 		|| (flags & PAGE_PRESENT) == 0) {
648 		return false;
649 	}
650 
651 	_modified = (flags & PAGE_MODIFIED) != 0;
652 
653 	if ((flags & (PAGE_ACCESSED | PAGE_MODIFIED)) != 0)
654 		ClearFlags(address, flags & (PAGE_ACCESSED | PAGE_MODIFIED));
655 
656 	if ((flags & PAGE_ACCESSED) != 0)
657 		return true;
658 
659 	if (!unmapIfUnaccessed)
660 		return false;
661 
662 	locker.Unlock();
663 
664 	UnmapPage(area, address, false);
665 		// TODO: Obvious race condition: Between querying and unmapping the
666 		// page could have been accessed. We try to compensate by considering
667 		// vm_page::{accessed,modified} (which would have been updated by
668 		// UnmapPage()) below, but that doesn't quite match the required
669 		// semantics of the method.
670 
671 	vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
672 	if (page == NULL)
673 		return false;
674 
675 	_modified |= page->modified;
676 
677 	return page->accessed;
678 }
679 
680 
681 void
682 PPCVMTranslationMap::Flush()
683 {
684 // TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
685 // even cut it here. We are supposed to invalidate all TLB entries for this
686 // map on all CPUs. We should loop over the virtual pages and invoke tlbie
687 // instead (which marks the entry invalid on all CPUs).
688 	arch_cpu_global_TLB_invalidate();
689 }
690 
691 
692 static status_t
693 get_physical_page_tmap(addr_t physicalAddress, addr_t *_virtualAddress,
694 	void **handle)
695 {
696 	return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
697 }
698 
699 
700 static status_t
701 put_physical_page_tmap(addr_t virtualAddress, void *handle)
702 {
703 	return generic_put_physical_page(virtualAddress);
704 }
705 
706 
707 //  #pragma mark -
708 //  VM API
709 
710 
711 status_t
712 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
713 {
714 	PPCVMTranslationMap* map = new(std::nothrow) PPCVMTranslationMap;
715 	if (map == NULL)
716 		return B_NO_MEMORY;
717 
718 	status_t error = map->Init(kernel);
719 	if (error != B_OK) {
720 		delete map;
721 		return error;
722 	}
723 
724 	*_map = map;
725 	return B_OK;
726 }
727 
728 
729 status_t
730 arch_vm_translation_map_init(kernel_args *args,
731 	VMPhysicalPageMapper** _physicalPageMapper)
732 {
733 	sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
734 	sPageTableSize = args->arch_args.page_table.size;
735 	sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
736 
737 	// init physical page mapper
738 	status_t error = generic_vm_physical_page_mapper_init(args,
739 		map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
740 	if (error != B_OK)
741 		return error;
742 
743 	new(&sPhysicalPageMapper) GenericVMPhysicalPageMapper;
744 
745 	*_physicalPageMapper = &sPhysicalPageMapper;
746 	return B_OK;
747 }
748 
749 
750 status_t
751 arch_vm_translation_map_init_post_area(kernel_args *args)
752 {
753 	// If the page table doesn't lie within the kernel address space, we
754 	// remap it.
755 	if (!IS_KERNEL_ADDRESS(sPageTable)) {
756 		addr_t newAddress = (addr_t)sPageTable;
757 		status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
758 			false);
759 		if (error != B_OK) {
760 			panic("arch_vm_translation_map_init_post_area(): Failed to remap "
761 				"the page table!");
762 			return error;
763 		}
764 
765 		// set the new page table address
766 		addr_t oldVirtualBase = (addr_t)(sPageTable);
767 		sPageTable = (page_table_entry_group*)newAddress;
768 
769 		// unmap the old pages
770 		ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
771 
772 // TODO: We should probably map the page table via BAT. It is relatively large,
773 // and due to being a hash table the access patterns might look sporadic, which
774 // certainly isn't to the liking of the TLB.
775 	}
776 
777 	// create an area to cover the page table
778 	sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
779 		sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
780 
781 	// init physical page mapper
782 	status_t error = generic_vm_physical_page_mapper_init_post_area(args);
783 	if (error != B_OK)
784 		return error;
785 
786 	return B_OK;
787 }
788 
789 
790 status_t
791 arch_vm_translation_map_init_post_sem(kernel_args *args)
792 {
793 	// init physical page mapper
794 	return generic_vm_physical_page_mapper_init_post_sem(args);
795 }
796 
797 
798 /**	Directly maps a page without having knowledge of any kernel structures.
799  *	Used only during VM setup.
800  *	It currently ignores the "attributes" parameter and sets all pages
801  *	read/write.
802  */
803 
804 status_t
805 arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
806 	uint8 attributes, addr_t (*get_free_page)(kernel_args *))
807 {
808 	uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
809 
810 	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
811 	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
812 
813 	for (int32 i = 0; i < 8; i++) {
814 		// 8 entries in a group
815 		if (group->entry[i].valid)
816 			continue;
817 
818 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false);
819 		return B_OK;
820 	}
821 
822 	hash = page_table_entry::SecondaryHash(hash);
823 	group = &sPageTable[hash & sPageTableHashMask];
824 
825 	for (int32 i = 0; i < 8; i++) {
826 		if (group->entry[i].valid)
827 			continue;
828 
829 		fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true);
830 		return B_OK;
831 	}
832 
833 	return B_ERROR;
834 }
835 
836 
837 // XXX currently assumes this translation map is active
838 
839 status_t
840 arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
841 {
842 	//PANIC_UNIMPLEMENTED();
843 	panic("vm_translation_map_quick_query(): not yet implemented\n");
844 	return B_OK;
845 }
846 
847 
848 // #pragma mark -
849 
850 
851 status_t
852 ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
853 	size_t size)
854 {
855 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
856 	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
857 	physicalAddress = ROUNDDOWN(physicalAddress, B_PAGE_SIZE);
858 
859 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
860 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
861 		addressSpace->TranslationMap());
862 
863 	vm_page_reservation reservation;
864 	vm_page_reserve_pages(&reservation, 0, VM_PRIORITY_USER);
865 		// We don't need any pages for mapping.
866 
867 	// map the pages
868 	for (; virtualAddress < virtualEnd;
869 		 virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
870 		status_t error = map->Map(virtualAddress, physicalAddress,
871 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &reservation);
872 		if (error != B_OK)
873 			return error;
874 	}
875 
876 	return B_OK;
877 }
878 
879 
880 void
881 ppc_unmap_address_range(addr_t virtualAddress, size_t size)
882 {
883 	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
884 	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
885 
886 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
887 
888 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
889 		addressSpace->TranslationMap());
890 	for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
891 		map->RemovePageTableEntry(virtualAddress);
892 }
893 
894 
895 status_t
896 ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
897 {
898 	addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
899 	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
900 
901 	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
902 
903 	// reserve space in the address space
904 	void *newAddress = NULL;
905 	status_t error = vm_reserve_address_range(addressSpace->ID(), &newAddress,
906 		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
907 	if (error != B_OK)
908 		return error;
909 
910 	// get the area's first physical page
911 	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
912 		addressSpace->TranslationMap());
913 	page_table_entry *entry = map->LookupPageTableEntry(virtualAddress);
914 	if (!entry)
915 		return B_ERROR;
916 	addr_t physicalBase = entry->physical_page_number << 12;
917 
918 	// map the pages
919 	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
920 	if (error != B_OK)
921 		return error;
922 
923 	*_virtualAddress = (addr_t)newAddress;
924 
925 	// unmap the old pages
926 	if (unmap)
927 		ppc_unmap_address_range(virtualAddress, size);
928 
929 	return B_OK;
930 }
931 
932 
933 bool
934 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
935 	uint32 protection)
936 {
937 	// TODO: Implement!
938 	return false;
939 }
940