Lines Matching refs:area

100 	inline AreaCacheLocker(VMArea* area)  in AreaCacheLocker()  argument
103 SetTo(area); in AreaCacheLocker()
111 inline void SetTo(VMArea* area) in SetTo() argument
114 area != NULL ? vm_area_get_locked_cache(area) : NULL, true, true); in SetTo()
261 static void delete_area(VMAddressSpace* addressSpace, VMArea* area,
320 PageFaultError(area_id area, status_t error) in PageFaultError() argument
322 fArea(area), in PageFaultError()
369 PageFaultDone(area_id area, VMCache* topCache, VMCache* cache, in PageFaultDone() argument
372 fArea(area), in PageFaultDone()
482 virtual_page_address(VMArea* area, vm_page* page) in virtual_page_address() argument
484 return area->Base() in virtual_page_address()
485 + ((page->cache_offset << PAGE_SHIFT) - area->cache_offset); in virtual_page_address()
490 is_page_in_area(VMArea* area, vm_page* page) in is_page_in_area() argument
493 return pageCacheOffsetBytes >= area->cache_offset in is_page_in_area()
494 && pageCacheOffsetBytes < area->cache_offset + (off_t)area->Size(); in is_page_in_area()
504 VMArea* area = VMAreas::LookupLocked(id); in lookup_area() local
505 if (area != NULL && area->address_space != addressSpace) in lookup_area()
506 area = NULL; in lookup_area()
510 return area; in lookup_area()
524 allocate_area_page_protections(VMArea* area) in allocate_area_page_protections() argument
526 size_t bytes = area_page_protections_size(area->Size()); in allocate_area_page_protections()
527 area->page_protections = (uint8*)malloc_etc(bytes, in allocate_area_page_protections()
528 area->address_space == VMAddressSpace::Kernel() in allocate_area_page_protections()
530 if (area->page_protections == NULL) in allocate_area_page_protections()
534 uint32 areaProtection = area->protection in allocate_area_page_protections()
536 memset(area->page_protections, areaProtection | (areaProtection << 4), bytes); in allocate_area_page_protections()
539 area->protection &= ~(B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA in allocate_area_page_protections()
555 set_area_page_protection(VMArea* area, addr_t pageAddress, uint32 protection) in set_area_page_protection() argument
558 addr_t pageIndex = (pageAddress - area->Base()) / B_PAGE_SIZE; in set_area_page_protection()
559 uint8& entry = area->page_protections[pageIndex / 2]; in set_area_page_protection()
568 get_area_page_protection(VMArea* area, addr_t pageAddress) in get_area_page_protection() argument
570 if (area->page_protections == NULL) in get_area_page_protection()
571 return area->protection; in get_area_page_protection()
573 uint32 pageIndex = (pageAddress - area->Base()) / B_PAGE_SIZE; in get_area_page_protection()
574 uint32 protection = area->page_protections[pageIndex / 2]; in get_area_page_protection()
587 if (area->address_space == VMAddressSpace::Kernel()) in get_area_page_protection()
598 compute_area_page_commitment(VMArea* area) in compute_area_page_commitment() argument
600 const size_t bytes = area_page_protections_size(area->Size()); in compute_area_page_commitment()
601 const bool oddPageCount = ((area->Size() / B_PAGE_SIZE) % 2) != 0; in compute_area_page_commitment()
604 const uint8 protection = area->page_protections[i]; in compute_area_page_commitment()
606 if (area->cache->LookupPage(pageOffset) != NULL) in compute_area_page_commitment()
614 if (area->cache->LookupPage(pageOffset + B_PAGE_SIZE) != NULL) in compute_area_page_commitment()
628 map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection, in map_page() argument
631 VMTranslationMap* map = area->address_space->TranslationMap(); in map_page()
635 if (area->wiring == B_NO_LOCK) { in map_page()
638 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel(); in map_page()
646 mapping->area = area; in map_page()
651 area->MemoryType(), reservation); in map_page()
658 area->mappings.Add(mapping); in map_page()
666 area->MemoryType(), reservation); in map_page()
692 unmap_page(VMArea* area, addr_t virtualAddress) in unmap_page() argument
694 return area->address_space->TranslationMap()->UnmapPage(area, in unmap_page()
703 unmap_pages(VMArea* area, addr_t base, size_t size) in unmap_pages() argument
705 area->address_space->TranslationMap()->UnmapPages(area, base, size, true); in unmap_pages()
710 intersect_area(VMArea* area, addr_t& address, addr_t& size, addr_t& offset) in intersect_area() argument
712 if (address < area->Base()) { in intersect_area()
713 offset = area->Base() - address; in intersect_area()
717 address = area->Base(); in intersect_area()
720 if (size > area->Size()) in intersect_area()
721 size = area->Size(); in intersect_area()
726 offset = address - area->Base(); in intersect_area()
727 if (offset >= area->Size()) in intersect_area()
730 if (size >= area->Size() - offset) in intersect_area()
731 size = area->Size() - offset; in intersect_area()
746 cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address, in cut_area() argument
750 if (!intersect_area(area, address, size, offset)) in cut_area()
754 if (address == area->Base() && size == area->Size()) { in cut_area()
755 delete_area(addressSpace, area, false); in cut_area()
770 VMCache* cache = vm_area_get_locked_cache(area); in cut_area()
776 bool onlyCacheUser = cache->areas == area && area->cache_next == NULL in cut_area()
777 && cache->consumers.IsEmpty() && area->cache_type == CACHE_TYPE_RAM; in cut_area()
779 const addr_t oldSize = area->Size(); in cut_area()
782 if (offset > 0 && size == area->Size() - offset) { in cut_area()
783 status_t error = addressSpace->ShrinkAreaTail(area, offset, in cut_area()
788 if (area->page_protections != NULL) { in cut_area()
790 area->page_protections, area->Size(), allocationFlags); in cut_area()
793 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags); in cut_area()
797 area->page_protections = newProtections; in cut_area()
801 unmap_pages(area, address, size); in cut_area()
810 if (area->page_protections != NULL) { in cut_area()
812 const size_t newCommitmentPages = compute_area_page_commitment(area); in cut_area()
822 if (area->Base() == address) { in cut_area()
824 if (area->page_protections != NULL) { in cut_area()
826 newProtections = realloc_area_page_protections(NULL, area->Size(), in cut_area()
834 status_t error = addressSpace->ShrinkAreaHead(area, area->Size() - size, in cut_area()
841 if (area->page_protections != NULL) { in cut_area()
843 ssize_t pagesShifted = (oldSize - area->Size()) / B_PAGE_SIZE; in cut_area()
844 bitmap_shift<uint8>(area->page_protections, oldBytes * 8, -(pagesShifted * 4)); in cut_area()
846 size_t bytes = area_page_protections_size(area->Size()); in cut_area()
847 memcpy(newProtections, area->page_protections, bytes); in cut_area()
848 free_etc(area->page_protections, allocationFlags); in cut_area()
849 area->page_protections = newProtections; in cut_area()
853 unmap_pages(area, address, size); in cut_area()
862 if (area->page_protections != NULL) { in cut_area()
864 const size_t newCommitmentPages = compute_area_page_commitment(area); in cut_area()
871 area->cache_offset += size; in cut_area()
880 addr_t secondSize = area->Size() - offset - size; in cut_area()
883 unmap_pages(area, address, area->Size() - firstNewSize); in cut_area()
886 status_t error = addressSpace->ShrinkAreaTail(area, firstNewSize, in cut_area()
896 if (area->page_protections != NULL) { in cut_area()
897 areaNewProtections = realloc_area_page_protections(NULL, area->Size(), in cut_area()
903 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags); in cut_area()
920 area->protection & B_OVERCOMMITTING_AREA, 0, 0, in cut_area()
923 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags); in cut_area()
932 secondCache->virtual_base = area->cache_offset; in cut_area()
933 secondCache->virtual_end = area->cache_offset + secondSize; in cut_area()
936 off_t adoptOffset = area->cache_offset + secondBase - area->Base(); in cut_area()
938 area->cache_offset); in cut_area()
951 area->cache_offset, area->name, secondSize, area->wiring, in cut_area()
952 area->protection, area->protection_max, REGION_NO_PRIVATE_MAP, 0, in cut_area()
962 area->cache_offset, secondSize, adoptOffset); in cut_area()
975 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags); in cut_area()
984 error = map_backing_store(addressSpace, cache, area->cache_offset in cut_area()
985 + (secondBase - area->Base()), in cut_area()
986 area->name, secondSize, area->wiring, area->protection, in cut_area()
987 area->protection_max, REGION_NO_PRIVATE_MAP, 0, in cut_area()
990 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags); in cut_area()
1000 if (area->page_protections != NULL) { in cut_area()
1002 const size_t areaBytes = area_page_protections_size(area->Size()); in cut_area()
1003 memcpy(areaNewProtections, area->page_protections, areaBytes); in cut_area()
1004 uint8* areaOldProtections = area->page_protections; in cut_area()
1005 area->page_protections = areaNewProtections; in cut_area()
1010 addr_t secondAreaOffset = secondBase - area->Base(); in cut_area()
1023 const size_t areaCommitPages = compute_area_page_commitment(area); in cut_area()
1024 area->cache->Commit(areaCommitPages * B_PAGE_SIZE, VM_PRIORITY_USER); in cut_area()
1066 VMArea* area = it.Next();) { in unmap_address_range()
1068 if ((area->protection & B_KERNEL_AREA) != 0) { in unmap_address_range()
1071 team_get_current_team_id(), area->id, area->name); in unmap_address_range()
1079 VMArea* area = it.Next();) { in unmap_address_range()
1081 status_t error = cut_area(addressSpace, area, address, size, NULL, in unmap_address_range()
1094 discard_area_range(VMArea* area, addr_t address, addr_t size) in discard_area_range() argument
1097 if (!intersect_area(area, address, size, offset)) in discard_area_range()
1102 VMCache* cache = vm_area_get_locked_cache(area); in discard_area_range()
1103 if (cache->areas != area || area->cache_next != NULL in discard_area_range()
1111 unmap_pages(area, address, size); in discard_area_range()
1129 VMArea* area = it.Next();) { in discard_address_range()
1130 status_t error = discard_area_range(area, address, size); in discard_address_range()
1183 VMArea* area = addressSpace->CreateArea(areaName, wiring, protection, in map_backing_store() local
1186 area->protection_max = protectionMax & B_USER_PROTECTION; in map_backing_store()
1187 if (area == NULL) in map_backing_store()
1243 status = addressSpace->InsertArea(area, size, addressRestrictions, in map_backing_store()
1255 area->cache = cache; in map_backing_store()
1256 area->cache_offset = offset; in map_backing_store()
1259 cache->InsertAreaLocked(area); in map_backing_store()
1264 status = VMAreas::Insert(area); in map_backing_store()
1274 *_area = area; in map_backing_store()
1279 cache->RemoveArea(area); in map_backing_store()
1280 area->cache = NULL; in map_backing_store()
1291 addressSpace->DeleteArea(area, allocationFlags); in map_backing_store()
1301 wait_if_area_is_wired(VMArea* area, LockerType1* locker1, LockerType2* locker2) in wait_if_area_is_wired() argument
1303 area->cache->AssertLocked(); in wait_if_area_is_wired()
1306 if (!area->AddWaiterIfWired(&waiter)) in wait_if_area_is_wired()
1343 wait_if_area_range_is_wired(VMArea* area, addr_t base, size_t size, in wait_if_area_range_is_wired() argument
1346 area->cache->AssertLocked(); in wait_if_area_range_is_wired()
1349 if (!area->AddWaiterIfWired(&waiter, base, size)) in wait_if_area_range_is_wired()
1382 VMArea* area = it.Next();) { in wait_if_address_range_is_wired()
1384 AreaCacheLocker cacheLocker(vm_area_get_locked_cache(area)); in wait_if_address_range_is_wired()
1386 if (wait_if_area_range_is_wired(area, base, size, locker, &cacheLocker)) in wait_if_address_range_is_wired()
1402 VMArea* area; in vm_prepare_kernel_area_debug_protection() local
1403 status_t status = locker.SetFromArea(id, area); in vm_prepare_kernel_area_debug_protection()
1407 if (area->page_protections == NULL) { in vm_prepare_kernel_area_debug_protection()
1408 status = allocate_area_page_protections(area); in vm_prepare_kernel_area_debug_protection()
1413 *cookie = (void*)area; in vm_prepare_kernel_area_debug_protection()
1453 VMArea* area = (VMArea*)cookie; in vm_set_kernel_area_debug_protection() local
1455 addr_t offset = address - area->Base(); in vm_set_kernel_area_debug_protection()
1456 if (area->Size() - offset < size) { in vm_set_kernel_area_debug_protection()
1461 if (area->page_protections == NULL) { in vm_set_kernel_area_debug_protection()
1476 set_area_page_protection(area, pageAddress, protection); in vm_set_kernel_area_debug_protection()
1507 VMArea* area; in vm_block_address_range() local
1513 true, &area, NULL); in vm_block_address_range()
1520 area->cache_type = CACHE_TYPE_RAM; in vm_block_address_range()
1521 return area->id; in vm_block_address_range()
1568 VMArea* area; in vm_create_anonymous_area() local
1783 virtualAddressRestrictions, kernel, &area, _address); in vm_create_anonymous_area()
1803 for (addr_t address = area->Base(); in vm_create_anonymous_area()
1804 address < area->Base() + (area->Size() - 1); in vm_create_anonymous_area()
1808 if (isStack && address < area->Base() in vm_create_anonymous_area()
1811 if (isStack && address >= area->Base() + area->Size() in vm_create_anonymous_area()
1819 map_page(area, page, address, protection, &reservation); in vm_create_anonymous_area()
1840 for (addr_t virtualAddress = area->Base(); in vm_create_anonymous_area()
1841 virtualAddress < area->Base() + (area->Size() - 1); in vm_create_anonymous_area()
1877 addr_t virtualAddress = area->Base(); in vm_create_anonymous_area()
1882 for (virtualAddress = area->Base(); virtualAddress < area->Base() in vm_create_anonymous_area()
1883 + (area->Size() - 1); virtualAddress += B_PAGE_SIZE, in vm_create_anonymous_area()
1890 area->MemoryType(), &reservation); in vm_create_anonymous_area()
1915 area->cache_type = CACHE_TYPE_RAM; in vm_create_anonymous_area()
1916 return area->id; in vm_create_anonymous_area()
1947 VMArea* area; in vm_map_physical_memory() local
1985 &addressRestrictions, true, &area, _address); in vm_map_physical_memory()
2000 status = arch_vm_set_memory_type(area, physicalAddress, memoryType, in vm_map_physical_memory()
2003 area->SetMemoryType(memoryType); in vm_map_physical_memory()
2006 delete_area(locker.AddressSpace(), area, false); in vm_map_physical_memory()
2018 map->ProtectArea(area, area->protection); in vm_map_physical_memory()
2024 size_t reservePages = map->MaxPagesNeededToMap(area->Base(), in vm_map_physical_memory()
2025 area->Base() + (size - 1)); in vm_map_physical_memory()
2034 map->Map(area->Base() + offset, physicalAddress + offset, in vm_map_physical_memory()
2035 protection, area->MemoryType(), &reservation); in vm_map_physical_memory()
2047 area->cache_type = CACHE_TYPE_DEVICE; in vm_map_physical_memory()
2048 return area->id; in vm_map_physical_memory()
2099 VMArea* area; in vm_map_physical_memory_vecs() local
2105 &addressRestrictions, true, &area, _address); in vm_map_physical_memory_vecs()
2116 size_t reservePages = map->MaxPagesNeededToMap(area->Base(), in vm_map_physical_memory_vecs()
2117 area->Base() + (size - 1)); in vm_map_physical_memory_vecs()
2136 map->Map(area->Base() + offset, vecs[vecIndex].base + vecOffset, in vm_map_physical_memory_vecs()
2137 protection, area->MemoryType(), &reservation); in vm_map_physical_memory_vecs()
2148 area->cache_type = CACHE_TYPE_DEVICE; in vm_map_physical_memory_vecs()
2149 return area->id; in vm_map_physical_memory_vecs()
2184 VMArea* area; in vm_create_null_area() local
2191 &addressRestrictions, true, &area, address); in vm_create_null_area()
2200 area->cache_type = CACHE_TYPE_NULL; in vm_create_null_area()
2201 return area->id; in vm_create_null_area()
2218 pre_map_area_pages(VMArea* area, VMCache* cache, in pre_map_area_pages() argument
2221 addr_t baseAddress = area->Base(); in pre_map_area_pages()
2222 addr_t cacheOffset = area->cache_offset; in pre_map_area_pages()
2224 page_num_t endPage = firstPage + area->Size() / B_PAGE_SIZE; in pre_map_area_pages()
2237 map_page(area, page, in pre_map_area_pages()
2374 VMArea* area; in _vm_map_file() local
2380 &addressRestrictions, kernel, &area, _address); in _vm_map_file()
2389 pre_map_area_pages(area, cache, &reservation, in _vm_map_file()
2409 area->cache_type = CACHE_TYPE_VNODE; in _vm_map_file()
2410 return area->id; in _vm_map_file()
2428 vm_area_get_locked_cache(VMArea* area) in vm_area_get_locked_cache() argument
2433 VMCache* cache = area->cache; in vm_area_get_locked_cache()
2443 if (cache == area->cache) { in vm_area_get_locked_cache()
2626 delete_area(VMAddressSpace* addressSpace, VMArea* area, in delete_area() argument
2629 ASSERT(!area->IsWired()); in delete_area()
2631 if (area->id >= 0 && !alreadyRemoved) in delete_area()
2632 VMAreas::Remove(area); in delete_area()
2640 VMCache* topCache = vm_area_get_locked_cache(area); in delete_area()
2651 area->address_space->TranslationMap()->UnmapArea(area, in delete_area()
2655 if (!area->cache->temporary) in delete_area()
2656 area->cache->WriteModified(); in delete_area()
2661 arch_vm_unset_memory_type(area); in delete_area()
2662 addressSpace->RemoveArea(area, allocationFlags); in delete_area()
2665 area->cache->RemoveArea(area); in delete_area()
2666 area->cache->ReleaseRef(); in delete_area()
2668 addressSpace->DeleteArea(area, allocationFlags); in delete_area()
2680 VMArea* area; in vm_delete_area() local
2684 status_t status = locker.SetFromArea(team, id, area); in vm_delete_area()
2688 cacheLocker.SetTo(area); in vm_delete_area()
2689 } while (wait_if_area_is_wired(area, &locker, &cacheLocker)); in vm_delete_area()
2693 if (!kernel && (area->protection & B_KERNEL_AREA) != 0) in vm_delete_area()
2696 delete_area(locker.AddressSpace(), area, false); in vm_delete_area()
3022 VMArea* area; in vm_set_area_protection() local
3032 status = locker.AddAreaCacheAndLock(areaID, true, false, area, &cache); in vm_set_area_protection()
3038 if (!kernel && (area->address_space == VMAddressSpace::Kernel() in vm_set_area_protection()
3039 || (area->protection & B_KERNEL_AREA) != 0)) { in vm_set_area_protection()
3042 " (%s)\n", team, newProtection, areaID, area->name); in vm_set_area_protection()
3045 if (!kernel && area->protection_max != 0 in vm_set_area_protection()
3046 && (newProtection & area->protection_max) in vm_set_area_protection()
3051 area->protection_max, areaID, area->name); in vm_set_area_protection()
3056 && area->address_space->ID() != team) { in vm_set_area_protection()
3062 if (area->protection == newProtection) in vm_set_area_protection()
3066 = (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0; in vm_set_area_protection()
3080 if (wait_if_area_is_wired(area, &locker, &cacheLocker)) in vm_set_area_protection()
3085 if (area->page_protections != NULL) { in vm_set_area_protection()
3087 free_etc(area->page_protections, in vm_set_area_protection()
3088 area->address_space == VMAddressSpace::Kernel() ? HEAP_DONT_LOCK_KERNEL_SPACE : 0); in vm_set_area_protection()
3089 area->page_protections = NULL; in vm_set_area_protection()
3102 if (cache->CountWritableAreas(area) == 0) { in vm_set_area_protection()
3122 == (area->protection & ~(B_WRITE_AREA | B_KERNEL_WRITE_AREA)) in vm_set_area_protection()
3123 && cache->page_count * 2 < area->Size() / B_PAGE_SIZE) { in vm_set_area_protection()
3157 VMTranslationMap* map = area->address_space->TranslationMap(); in vm_set_area_protection()
3161 page_num_t firstPageOffset = area->cache_offset / B_PAGE_SIZE; in vm_set_area_protection()
3163 = firstPageOffset + area->Size() / B_PAGE_SIZE; in vm_set_area_protection()
3168 addr_t address = virtual_page_address(area, page); in vm_set_area_protection()
3169 map->ProtectPage(area, address, newProtection); in vm_set_area_protection()
3173 map->ProtectArea(area, newProtection); in vm_set_area_protection()
3178 area->protection = newProtection; in vm_set_area_protection()
3215 VMArea* area = mapping->area; in vm_test_map_modification() local
3216 VMTranslationMap* map = area->address_space->TranslationMap(); in vm_test_map_modification()
3221 map->Query(virtual_page_address(area, page), &physicalAddress, &flags); in vm_test_map_modification()
3245 VMArea* area = mapping->area; in vm_clear_map_flags() local
3246 VMTranslationMap* map = area->address_space->TranslationMap(); in vm_clear_map_flags()
3249 map->ClearFlags(virtual_page_address(area, page), flags); in vm_clear_map_flags()
3265 VMArea* area = mapping->area; in vm_remove_all_page_mappings() local
3266 VMTranslationMap* map = area->address_space->TranslationMap(); in vm_remove_all_page_mappings()
3267 addr_t address = virtual_page_address(area, page); in vm_remove_all_page_mappings()
3268 map->UnmapPage(area, address, false); in vm_remove_all_page_mappings()
3281 VMArea* area = mapping->area; in vm_clear_page_mapping_accessed_flags() local
3282 VMTranslationMap* map = area->address_space->TranslationMap(); in vm_clear_page_mapping_accessed_flags()
3285 if (map->ClearAccessedAndModified(area, in vm_clear_page_mapping_accessed_flags()
3286 virtual_page_address(area, page), false, modified)) { in vm_clear_page_mapping_accessed_flags()
3323 VMArea* area = mapping->area; in vm_remove_all_page_mappings_if_unaccessed() local
3324 VMTranslationMap* map = area->address_space->TranslationMap(); in vm_remove_all_page_mappings_if_unaccessed()
3325 addr_t address = virtual_page_address(area, page); in vm_remove_all_page_mappings_if_unaccessed()
3327 if (map->ClearAccessedAndModified(area, address, true, modified)) { in vm_remove_all_page_mappings_if_unaccessed()
3362 while (VMArea* area = it.Next()) in vm_delete_areas() local
3363 VMAreas::Remove(area); in vm_delete_areas()
3368 while (VMArea* area = addressSpace->FirstArea()) { in vm_delete_areas() local
3369 ASSERT(!area->IsWired()); in vm_delete_areas()
3370 delete_area(addressSpace, area, deletingAddressSpace, true); in vm_delete_areas()
3393 VMArea* area = locker.AddressSpace()->LookupArea(address); in vm_area_for() local
3394 if (area != NULL) { in vm_area_for()
3395 if (!kernel && (area->protection & (B_READ_AREA | B_WRITE_AREA)) == 0 in vm_area_for()
3396 && (area->protection & B_KERNEL_AREA) != 0) in vm_area_for()
3399 return area->id; in vm_area_for()
3453 VMArea* area = it.Next();) { in vm_free_unused_boot_loader_range()
3454 addr_t areaStart = area->Base(); in vm_free_unused_boot_loader_range()
3455 addr_t areaEnd = areaStart + (area->Size() - 1); in vm_free_unused_boot_loader_range()
3539 area_id area = area_for((void*)(addr_t)args->kernel_args_range[i].start); in vm_free_kernel_args() local
3540 if (area >= B_OK) in vm_free_kernel_args()
3541 delete_area(area); in vm_free_kernel_args()
4095 VMArea* area = NULL; in vm_page_fault() local
4098 area = addressSpace->LookupArea(faultAddress); in vm_page_fault()
4106 faultAddress, area ? area->name : "???", faultAddress - (area ? in vm_page_fault()
4107 area->Base() : 0x0)); in vm_page_fault()
4375 VMArea* area = addressSpace->LookupArea(address); in vm_soft_fault() local
4376 if (area == NULL) { in vm_soft_fault()
4388 uint32 protection = get_area_page_protection(area, address); in vm_soft_fault()
4390 && (area->protection & B_KERNEL_AREA) != 0) { in vm_soft_fault()
4393 area->id, (void*)originalAddress); in vm_soft_fault()
4395 TPF(PageFaultError(area->id, in vm_soft_fault()
4404 B_PRIx32 " at %p\n", area->id, (void*)originalAddress); in vm_soft_fault()
4406 TPF(PageFaultError(area->id, in vm_soft_fault()
4414 B_PRIx32 " at %p\n", area->id, (void*)originalAddress); in vm_soft_fault()
4416 TPF(PageFaultError(area->id, in vm_soft_fault()
4424 " at %p\n", area->id, (void*)originalAddress); in vm_soft_fault()
4426 TPF(PageFaultError(area->id, in vm_soft_fault()
4436 context.Prepare(vm_area_get_locked_cache(area), in vm_soft_fault()
4437 address - area->Base() + area->cache_offset); in vm_soft_fault()
4455 TPF(PageFaultError(area->id, status)); in vm_soft_fault()
4464 TPF(PageFaultDone(area->id, context.topCache, context.page->Cache(), in vm_soft_fault()
4490 context.map->ProtectPage(area, address, newProtection); in vm_soft_fault()
4509 if (area->AddWaiterIfWired(&waiter, address, B_PAGE_SIZE, in vm_soft_fault()
4538 unmap_page(area, address); in vm_soft_fault()
4543 if (map_page(area, context.page, address, newProtection, in vm_soft_fault()
4753 VMArea* area; in vm_set_area_memory_type() local
4754 status_t status = locker.SetFromArea(id, area); in vm_set_area_memory_type()
4759 uint32 oldType = area->MemoryType(); in vm_set_area_memory_type()
4764 VMTranslationMap* map = area->address_space->TranslationMap(); in vm_set_area_memory_type()
4766 area->SetMemoryType(type); in vm_set_area_memory_type()
4767 map->ProtectArea(area, area->protection); in vm_set_area_memory_type()
4771 status_t error = arch_vm_set_memory_type(area, physicalBase, type, NULL); in vm_set_area_memory_type()
4775 area->SetMemoryType(oldType); in vm_set_area_memory_type()
4776 map->ProtectArea(area, area->protection); in vm_set_area_memory_type()
4810 fill_area_info(struct VMArea* area, area_info* info, size_t size) in fill_area_info() argument
4812 strlcpy(info->name, area->name, B_OS_NAME_LENGTH); in fill_area_info()
4813 info->area = area->id; in fill_area_info()
4814 info->address = (void*)area->Base(); in fill_area_info()
4815 info->size = area->Size(); in fill_area_info()
4816 info->protection = area->protection; in fill_area_info()
4817 info->lock = area->wiring; in fill_area_info()
4818 info->team = area->address_space->ID(); in fill_area_info()
4824 VMCache* cache = vm_area_get_locked_cache(area); in fill_area_info()
4841 VMArea* area; in vm_resize_area() local
4857 status = locker.AddAreaCacheAndLock(areaID, true, true, area, &cache); in vm_resize_area()
4863 if (!kernel && (area->address_space == VMAddressSpace::Kernel() in vm_resize_area()
4864 || (area->protection & B_KERNEL_AREA) != 0)) { in vm_resize_area()
4867 team_get_current_team_id(), areaID, area->name); in vm_resize_area()
4872 oldSize = area->Size(); in vm_resize_area()
4942 if (area->page_protections != NULL) { in vm_resize_area()
4945 = (uint8*)realloc(area->page_protections, bytes); in vm_resize_area()
4949 area->page_protections = newProtections; in vm_resize_area()
4954 uint32 areaProtection = area->protection in vm_resize_area()
4956 memset(area->page_protections + offset, in vm_resize_area()
4959 uint8& entry = area->page_protections[offset - 1]; in vm_resize_area()
5148 VMArea* area = addressSpace->LookupArea(pageAddress); in vm_wire_page() local
5149 if (area == NULL) { in vm_wire_page()
5155 VMCacheChainLocker cacheChainLocker(vm_area_get_locked_cache(area)); in vm_wire_page()
5158 area->Wire(&info->range); in vm_wire_page()
5193 VMCache* cache = vm_area_get_locked_cache(area); in vm_wire_page()
5194 area->Unwire(&info->range); in vm_wire_page()
5218 VMArea* area = info->range.area; in vm_unwire_page() local
5219 AddressSpaceReadLocker addressSpaceLocker(area->address_space, false); in vm_unwire_page()
5223 VMCache* cache = vm_area_get_locked_cache(area); in vm_unwire_page()
5235 area->Unwire(&info->range); in vm_unwire_page()
5307 VMArea* area = addressSpace->LookupArea(nextAddress); in lock_memory_etc() local
5308 if (area == NULL) { in lock_memory_etc()
5314 addr_t areaEnd = std::min(lockEndAddress, area->Base() + area->Size()); in lock_memory_etc()
5326 VMCacheChainLocker cacheChainLocker(vm_area_get_locked_cache(area)); in lock_memory_etc()
5329 area->Wire(range); in lock_memory_etc()
5333 if (area->cache_type == CACHE_TYPE_NULL in lock_memory_etc()
5334 || area->cache_type == CACHE_TYPE_DEVICE in lock_memory_etc()
5335 || area->wiring == B_FULL_LOCK in lock_memory_etc()
5336 || area->wiring == B_CONTIGUOUS) { in lock_memory_etc()
5372 cacheChainLocker.SetTo(vm_area_get_locked_cache(area)); in lock_memory_etc()
5390 area->Unwire(range); in lock_memory_etc()
5466 VMArea* area = addressSpace->LookupArea(nextAddress); in unlock_memory_etc() local
5467 if (area == NULL) { in unlock_memory_etc()
5473 addr_t areaEnd = std::min(lockEndAddress, area->Base() + area->Size()); in unlock_memory_etc()
5477 VMCacheChainLocker cacheChainLocker(vm_area_get_locked_cache(area)); in unlock_memory_etc()
5481 if (area->cache_type == CACHE_TYPE_NULL in unlock_memory_etc()
5482 || area->cache_type == CACHE_TYPE_DEVICE in unlock_memory_etc()
5483 || area->wiring == B_FULL_LOCK in unlock_memory_etc()
5484 || area->wiring == B_CONTIGUOUS) { in unlock_memory_etc()
5488 VMAreaWiredRange* range = area->Unwire(areaStart, in unlock_memory_etc()
5529 VMAreaWiredRange* range = area->Unwire(areaStart, in unlock_memory_etc()
5711 VMArea* area; in _get_area_info() local
5712 status_t status = locker.SetFromArea(id, area); in _get_area_info()
5716 fill_area_info(area, info, size); in _get_area_info()
5737 VMArea* area = locker.AddressSpace()->FindClosestArea(nextBase, false); in _get_next_area_info() local
5738 if (area == NULL) { in _get_next_area_info()
5743 fill_area_info(area, info, size); in _get_next_area_info()
5744 *cookie = (ssize_t)(area->Base() + 1); in _get_next_area_info()
5751 set_area_protection(area_id area, uint32 newProtection) in set_area_protection() argument
5753 return vm_set_area_protection(VMAddressSpace::KernelID(), area, in set_area_protection()
5865 delete_area(area_id area) in delete_area() argument
5867 return vm_delete_area(VMAddressSpace::KernelID(), area, true); in delete_area()
5936 _user_get_area_info(area_id area, area_info* userInfo) in _user_get_area_info() argument
5942 status_t status = get_area_info(area, &info); in _user_get_area_info()
5983 _user_set_area_protection(area_id area, uint32 newProtection) in _user_set_area_protection() argument
5988 return vm_set_area_protection(VMAddressSpace::CurrentID(), area, in _user_set_area_protection()
5994 _user_resize_area(area_id area, size_t newSize) in _user_resize_area() argument
5998 return vm_resize_area(area, newSize, false); in _user_resize_area()
6003 _user_transfer_area(area_id area, void** userAddress, uint32 addressSpec, in _user_transfer_area() argument
6018 area_id newArea = transfer_area(area, &address, addressSpec, target, false); in _user_transfer_area()
6105 area_id area = vm_create_anonymous_area(VMAddressSpace::CurrentID(), name, in _user_create_area() local
6109 if (area >= B_OK in _user_create_area()
6111 delete_area(area); in _user_create_area()
6115 return area; in _user_create_area()
6120 _user_delete_area(area_id area) in _user_delete_area() argument
6126 return vm_delete_area(VMAddressSpace::CurrentID(), area, false); in _user_delete_area()
6139 area_id area; in _user_map_file() local
6162 area = _vm_map_file(VMAddressSpace::CurrentID(), name, &address, in _user_map_file()
6165 if (area < B_OK) in _user_map_file()
6166 return area; in _user_map_file()
6171 return area; in _user_map_file()
6243 VMArea* area = locker.AddressSpace()->LookupArea(currentAddress); in _user_set_memory_protection() local
6244 if (area == NULL) in _user_set_memory_protection()
6247 if ((area->protection & B_KERNEL_AREA) != 0) in _user_set_memory_protection()
6249 if (area->protection_max != 0 in _user_set_memory_protection()
6250 && (protection & area->protection_max) != (protection & B_USER_PROTECTION)) { in _user_set_memory_protection()
6254 addr_t offset = currentAddress - area->Base(); in _user_set_memory_protection()
6255 size_t rangeSize = min_c(area->Size() - offset, sizeLeft); in _user_set_memory_protection()
6257 AreaCacheLocker cacheLocker(area); in _user_set_memory_protection()
6259 if (wait_if_area_range_is_wired(area, currentAddress, rangeSize, in _user_set_memory_protection()
6278 VMArea* area = locker.AddressSpace()->LookupArea(currentAddress); in _user_set_memory_protection() local
6279 if (area == NULL) in _user_set_memory_protection()
6282 addr_t offset = currentAddress - area->Base(); in _user_set_memory_protection()
6283 size_t rangeSize = min_c(area->Size() - offset, sizeLeft); in _user_set_memory_protection()
6288 if (area->page_protections == NULL) { in _user_set_memory_protection()
6289 if (area->protection == protection) in _user_set_memory_protection()
6291 if (offset == 0 && rangeSize == area->Size()) { in _user_set_memory_protection()
6293 status_t status = vm_set_area_protection(area->address_space->ID(), in _user_set_memory_protection()
6294 area->id, protection, false); in _user_set_memory_protection()
6300 status_t status = allocate_area_page_protections(area); in _user_set_memory_protection()
6307 VMCache* topCache = vm_area_get_locked_cache(area); in _user_set_memory_protection()
6315 const off_t areaCacheBase = area->Base() - area->cache_offset; in _user_set_memory_protection()
6316 for (addr_t pageAddress = area->Base() + offset; in _user_set_memory_protection()
6324 = (get_area_page_protection(area, pageAddress) & B_WRITE_AREA) != 0; in _user_set_memory_protection()
6341 for (addr_t pageAddress = area->Base() + offset; in _user_set_memory_protection()
6345 set_area_page_protection(area, pageAddress, protection); in _user_set_memory_protection()
6359 "\n", area, physicalAddress); in _user_set_memory_protection()
6371 map->ProtectPage(area, pageAddress, protection); in _user_set_memory_protection()
6377 unmap_page(area, pageAddress); in _user_set_memory_protection()
6418 VMArea* area = locker.AddressSpace()->LookupArea(address); in _user_sync_memory() local
6419 if (area == NULL) in _user_sync_memory()
6422 uint32 offset = address - area->Base(); in _user_sync_memory()
6423 size_t rangeSize = min_c(area->Size() - offset, size); in _user_sync_memory()
6424 offset += area->cache_offset; in _user_sync_memory()
6427 AreaCacheLocker cacheLocker(area); in _user_sync_memory()
6430 VMCache* cache = area->cache; in _user_sync_memory()
6522 VMArea* area = locker.AddressSpace()->LookupArea((addr_t)address); in _user_get_memory_properties() local
6523 if (area == NULL) in _user_get_memory_properties()
6526 uint32 protection = get_area_page_protection(area, (addr_t)address); in _user_get_memory_properties()
6527 uint32 wiring = area->wiring; in _user_get_memory_properties()
6566 VMArea* area = addressSpace->LookupArea(nextAddress); in user_set_memory_swappable() local
6567 if (area == NULL) { in user_set_memory_swappable()
6573 const addr_t areaEnd = std::min(endAddress, area->Base() + area->Size()); in user_set_memory_swappable()
6582 VMCacheChainLocker cacheChainLocker(vm_area_get_locked_cache(area)); in user_set_memory_swappable()
6584 if (dynamic_cast<VMAnonymousNoSwapCache*>(area->cache) != NULL) { in user_set_memory_swappable()
6586 } else if ((anonCache = dynamic_cast<VMAnonymousCache*>(area->cache)) != NULL) { in user_set_memory_swappable()
6587 error = anonCache->SetCanSwapPages(areaStart - area->Base(), in user_set_memory_swappable()