Lines Matching refs:protection

267 	int protection, int protectionMax, int mapping, uint32 flags,
270 static void fix_protection(uint32* protection);
534 uint32 areaProtection = area->protection in allocate_area_page_protections()
539 area->protection &= ~(B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA in allocate_area_page_protections()
555 set_area_page_protection(VMArea* area, addr_t pageAddress, uint32 protection) in set_area_page_protection() argument
557 protection &= B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA; in set_area_page_protection()
561 entry = (entry & 0xf0) | protection; in set_area_page_protection()
563 entry = (entry & 0x0f) | (protection << 4); in set_area_page_protection()
571 return area->protection; in get_area_page_protection()
574 uint32 protection = area->page_protections[pageIndex / 2]; in get_area_page_protection() local
576 protection &= 0x0f; in get_area_page_protection()
578 protection >>= 4; in get_area_page_protection()
581 if ((protection & B_READ_AREA) != 0) in get_area_page_protection()
583 if ((protection & B_WRITE_AREA) != 0) in get_area_page_protection()
590 return protection | kernelProtection; in get_area_page_protection()
604 const uint8 protection = area->page_protections[i]; in compute_area_page_commitment() local
609 pages += ((protection & (B_WRITE_AREA << 0)) != 0) ? 1 : 0; in compute_area_page_commitment()
617 pages += ((protection & (B_WRITE_AREA << 4)) != 0) ? 1 : 0; in compute_area_page_commitment()
628 map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection, in map_page() argument
650 map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection, in map_page()
665 map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection, in map_page()
920 area->protection & B_OVERCOMMITTING_AREA, 0, 0, in cut_area()
952 area->protection, area->protection_max, REGION_NO_PRIVATE_MAP, 0, in cut_area()
986 area->name, secondSize, area->wiring, area->protection, in cut_area()
1068 if ((area->protection & B_KERNEL_AREA) != 0) { in unmap_address_range()
1149 const char* areaName, addr_t size, int wiring, int protection, in map_backing_store() argument
1158 addressRestrictions->address_specification, wiring, protection, in map_backing_store()
1183 VMArea* area = addressSpace->CreateArea(areaName, wiring, protection, in map_backing_store()
1200 (protection & B_STACK_AREA) != 0 in map_backing_store()
1201 || (protection & B_OVERCOMMITTING_AREA) != 0, 0, in map_backing_store()
1432 uint32 protection) in vm_set_kernel_area_debug_protection() argument
1446 if ((protection & B_KERNEL_READ_AREA) != 0) in vm_set_kernel_area_debug_protection()
1447 protection |= B_READ_AREA; in vm_set_kernel_area_debug_protection()
1448 if ((protection & B_KERNEL_WRITE_AREA) != 0) in vm_set_kernel_area_debug_protection()
1449 protection |= B_WRITE_AREA; in vm_set_kernel_area_debug_protection()
1469 map->DebugMarkRangePresent(address, address + size, protection != 0); in vm_set_kernel_area_debug_protection()
1476 set_area_page_protection(area, pageAddress, protection); in vm_set_kernel_area_debug_protection()
1563 uint32 wiring, uint32 protection, uint32 flags, addr_t guardSize, in vm_create_anonymous_area() argument
1571 bool isStack = (protection & B_STACK_AREA) != 0; in vm_create_anonymous_area()
1586 if (!arch_vm_supports_protection(protection)) in vm_create_anonymous_area()
1594 if (isStack || (protection & B_OVERCOMMITTING_AREA) != 0) in vm_create_anonymous_area()
1598 if ((protection & B_KERNEL_STACK_AREA) != 0) in vm_create_anonymous_area()
1782 protection, 0, REGION_NO_PRIVATE_MAP, flags, in vm_create_anonymous_area()
1819 map_page(area, page, address, protection, &reservation); in vm_create_anonymous_area()
1889 status = map->Map(virtualAddress, physicalAddress, protection, in vm_create_anonymous_area()
1944 uint32 addressSpec, addr_t size, uint32 protection, in vm_map_physical_memory() argument
1954 addressSpec, size, protection, physicalAddress)); in vm_map_physical_memory()
1956 if (!arch_vm_supports_protection(protection)) in vm_map_physical_memory()
1984 B_FULL_LOCK, protection, 0, REGION_NO_PRIVATE_MAP, CREATE_AREA_DONT_COMMIT_MEMORY, in vm_map_physical_memory()
2018 map->ProtectArea(area, area->protection); in vm_map_physical_memory()
2035 protection, area->MemoryType(), &reservation); in vm_map_physical_memory()
2059 uint32 addressSpec, addr_t* _size, uint32 protection, in vm_map_physical_memory_vecs() argument
2065 addressSpec, _size, protection, vecs, vecCount)); in vm_map_physical_memory_vecs()
2067 if (!arch_vm_supports_protection(protection) in vm_map_physical_memory_vecs()
2104 B_FULL_LOCK, protection, 0, REGION_NO_PRIVATE_MAP, CREATE_AREA_DONT_COMMIT_MEMORY, in vm_map_physical_memory_vecs()
2137 protection, area->MemoryType(), &reservation); in vm_map_physical_memory_vecs()
2252 uint32 addressSpec, size_t size, uint32 protection, uint32 mapping, in _vm_map_file() argument
2266 protection |= B_SHARED_AREA; in _vm_map_file()
2279 return vm_create_anonymous_area(team, name, size, B_NO_LOCK, protection, in _vm_map_file()
2295 && (protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0 in _vm_map_file()
2303 protectionMax = protection | B_USER_PROTECTION; in _vm_map_file()
2305 protectionMax = protection | (B_USER_PROTECTION & ~B_WRITE_AREA); in _vm_map_file()
2309 if ((protection & B_WRITE_AREA) == 0) in _vm_map_file()
2324 if ((protection & B_READ_AREA) != 0) { in _vm_map_file()
2379 0, protection, protectionMax, mapping, mappingFlags, in _vm_map_file()
2387 if (status == B_OK && (protection & B_READ_AREA) != 0) { in _vm_map_file()
2416 addr_t size, uint32 protection, uint32 mapping, bool unmapAddressRange, in vm_map_file() argument
2419 if (!arch_vm_supports_protection(protection)) in vm_map_file()
2422 return _vm_map_file(aid, name, address, addressSpec, size, protection, in vm_map_file()
2464 uint32 addressSpec, uint32 protection, uint32 mapping, area_id sourceID, in vm_clone_area() argument
2478 if (!kernel && (sourceArea->protection & B_KERNEL_AREA) != 0) in vm_clone_area()
2481 sourceArea->protection |= B_SHARED_AREA; in vm_clone_area()
2482 protection |= B_SHARED_AREA; in vm_clone_area()
2506 if (!kernel && (sourceArea->protection & B_KERNEL_AREA) != 0) in vm_clone_area()
2512 && (sourceArea->protection & B_CLONEABLE_AREA) == 0) { in vm_clone_area()
2531 sourceArea->wiring, protection, sourceArea->protection_max, in vm_clone_area()
2569 protection, newArea->MemoryType(), &reservation); in vm_clone_area()
2591 protection, &reservation); in vm_clone_area()
2693 if (!kernel && (area->protection & B_KERNEL_AREA) != 0) in vm_delete_area()
2782 uint32 protection = 0; in vm_copy_on_write_area() local
2785 protection |= B_KERNEL_READ_AREA; in vm_copy_on_write_area()
2787 protection |= B_READ_AREA; in vm_copy_on_write_area()
2792 map->ProtectPage(tempArea, address, protection); in vm_copy_on_write_area()
2815 uint32 protection = 0; in vm_copy_on_write_area() local
2818 protection |= B_KERNEL_READ_AREA; in vm_copy_on_write_area()
2820 protection |= B_READ_AREA; in vm_copy_on_write_area()
2822 map->ProtectPage(tempArea, address, protection); in vm_copy_on_write_area()
2829 uint32 protection = 0; in vm_copy_on_write_area() local
2830 if ((tempArea->protection & B_KERNEL_READ_AREA) != 0) in vm_copy_on_write_area()
2831 protection |= B_KERNEL_READ_AREA; in vm_copy_on_write_area()
2832 if ((tempArea->protection & B_READ_AREA) != 0) in vm_copy_on_write_area()
2833 protection |= B_READ_AREA; in vm_copy_on_write_area()
2837 map->ProtectArea(tempArea, protection); in vm_copy_on_write_area()
2880 sharedArea = (source->protection & B_SHARED_AREA) != 0; in vm_copy_area()
2925 = (source->protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) != 0; in vm_copy_area()
2961 name, source->Size(), source->wiring, source->protection, in vm_copy_area()
3039 || (area->protection & B_KERNEL_AREA) != 0)) { in vm_set_area_protection()
3062 if (area->protection == newProtection) in vm_set_area_protection()
3066 = (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0; in vm_set_area_protection()
3122 == (area->protection & ~(B_WRITE_AREA | B_KERNEL_WRITE_AREA)) in vm_set_area_protection()
3178 area->protection = newProtection; in vm_set_area_protection()
3395 if (!kernel && (area->protection & (B_READ_AREA | B_WRITE_AREA)) == 0 in vm_area_for()
3396 && (area->protection & B_KERNEL_AREA) != 0) in vm_area_for()
4388 uint32 protection = get_area_page_protection(area, address); in vm_soft_fault() local
4389 if (isUser && (protection & B_USER_PROTECTION) == 0 in vm_soft_fault()
4390 && (area->protection & B_KERNEL_AREA) != 0) { in vm_soft_fault()
4400 if (isWrite && (protection in vm_soft_fault()
4410 } else if (isExecute && (protection in vm_soft_fault()
4420 } else if (!isWrite && !isExecute && (protection in vm_soft_fault()
4470 uint32 newProtection = protection; in vm_soft_fault()
4767 map->ProtectArea(area, area->protection); in vm_set_area_memory_type()
4776 map->ProtectArea(area, area->protection); in vm_set_area_memory_type()
4792 fix_protection(uint32* protection) in fix_protection() argument
4794 if ((*protection & B_KERNEL_EXECUTE_AREA) != 0 in fix_protection()
4795 && ((*protection & B_KERNEL_WRITE_AREA) != 0 in fix_protection()
4796 || (*protection & B_WRITE_AREA) != 0) in fix_protection()
4800 if ((*protection & B_KERNEL_PROTECTION) == 0) { in fix_protection()
4801 if ((*protection & B_WRITE_AREA) != 0) in fix_protection()
4802 *protection |= B_KERNEL_WRITE_AREA; in fix_protection()
4803 if ((*protection & B_READ_AREA) != 0) in fix_protection()
4804 *protection |= B_KERNEL_READ_AREA; in fix_protection()
4816 info->protection = area->protection; in fill_area_info()
4864 || (area->protection & B_KERNEL_AREA) != 0)) { in vm_resize_area()
4954 uint32 areaProtection = area->protection in vm_resize_area()
5781 status = set_area_protection(id, info.protection | B_CLONEABLE_AREA); in transfer_area()
5786 addressSpec, info.protection, REGION_NO_PRIVATE_MAP, id, kernel); in transfer_area()
5797 set_area_protection(clonedArea, info.protection); in transfer_area()
5807 size_t numBytes, uint32 addressSpec, uint32 protection, in __map_physical_memory_haiku() argument
5810 if (!arch_vm_supports_protection(protection)) in __map_physical_memory_haiku()
5813 fix_protection(&protection); in __map_physical_memory_haiku()
5816 _virtualAddress, addressSpec, numBytes, protection, physicalAddress, in __map_physical_memory_haiku()
5823 uint32 protection, area_id source) in clone_area() argument
5825 if ((protection & B_KERNEL_PROTECTION) == 0) in clone_area()
5826 protection |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA; in clone_area()
5829 addressSpec, protection, REGION_NO_PRIVATE_MAP, source, true); in clone_area()
5835 uint32 protection, uint32 flags, uint32 guardSize, in create_area_etc() argument
5840 fix_protection(&protection); in create_area_etc()
5842 return vm_create_anonymous_area(team, name, size, lock, protection, flags, in create_area_etc()
5850 size_t size, uint32 lock, uint32 protection) in __create_area_haiku() argument
5852 fix_protection(&protection); in __create_area_haiku()
5859 lock, protection, 0, 0, &virtualRestrictions, &physicalRestrictions, in __create_area_haiku()
6031 uint32 protection, area_id sourceArea) in _user_clone_area() argument
6042 if ((protection & ~B_USER_AREA_FLAGS) != 0) in _user_clone_area()
6051 fix_protection(&protection); in _user_clone_area()
6054 &address, addressSpec, protection, REGION_NO_PRIVATE_MAP, sourceArea, in _user_clone_area()
6070 size_t size, uint32 lock, uint32 protection) in _user_create_area() argument
6081 if ((protection & ~B_USER_AREA_FLAGS) != 0) in _user_create_area()
6099 fix_protection(&protection); in _user_create_area()
6106 size, lock, protection, 0, 0, &virtualRestrictions, in _user_create_area()
6134 size_t size, uint32 protection, uint32 mapping, bool unmapAddressRange, in _user_map_file() argument
6141 if ((protection & ~B_USER_AREA_FLAGS) != 0) in _user_map_file()
6144 fix_protection(&protection); in _user_map_file()
6163 addressSpec, size, protection, mapping, unmapAddressRange, fd, offset, in _user_map_file()
6206 _user_set_memory_protection(void* _address, size_t size, uint32 protection) in _user_set_memory_protection() argument
6220 if ((protection & ~B_USER_PROTECTION) != 0) in _user_set_memory_protection()
6223 fix_protection(&protection); in _user_set_memory_protection()
6247 if ((area->protection & B_KERNEL_AREA) != 0) in _user_set_memory_protection()
6250 && (protection & area->protection_max) != (protection & B_USER_PROTECTION)) { in _user_set_memory_protection()
6289 if (area->protection == protection) in _user_set_memory_protection()
6294 area->id, protection, false); in _user_set_memory_protection()
6313 const bool becomesWritable = (protection & B_WRITE_AREA) != 0; in _user_set_memory_protection()
6345 set_area_page_protection(area, pageAddress, protection); in _user_set_memory_protection()
6368 && (protection & B_WRITE_AREA) != 0; in _user_set_memory_protection()
6371 map->ProtectPage(area, pageAddress, protection); in _user_set_memory_protection()
6526 uint32 protection = get_area_page_protection(area, (addr_t)address); in _user_get_memory_properties() local
6531 error = user_memcpy(_protected, &protection, sizeof(protection)); in _user_get_memory_properties()
6688 size_t numBytes, uint32 addressSpec, uint32 protection, in __map_physical_memory_beos() argument
6692 addressSpec, protection, _virtualAddress); in __map_physical_memory_beos()
6701 size_t size, uint32 lock, uint32 protection) in __create_area_beos() argument
6716 protection); in __create_area_beos()