xref: /haiku/headers/private/kernel/vm/vm.h (revision 62f5ba006a08b0df30631375878effaf67ae5dbc)
1 /*
2  * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 #ifndef _KERNEL_VM_VM_H
9 #define _KERNEL_VM_VM_H
10 
11 #include <OS.h>
12 
13 #include <arch/vm.h>
14 #include <vm_defs.h>
15 
16 
17 struct iovec;
18 struct kernel_args;
19 struct ObjectCache;
20 struct system_memory_info;
21 struct team;
22 struct VMAddressSpace;
23 struct VMArea;
24 struct VMCache;
25 struct vm_page;
26 struct vnode;
27 
28 
29 // area creation flags
30 #define CREATE_AREA_DONT_WAIT			0x01
31 #define CREATE_AREA_UNMAP_ADDRESS_RANGE	0x02
32 #define CREATE_AREA_DONT_CLEAR			0x04
33 #define CREATE_AREA_PRIORITY_VIP		0x08
34 
35 // memory/page allocation priorities
36 #define VM_PRIORITY_USER	0
37 #define VM_PRIORITY_SYSTEM	1
38 #define VM_PRIORITY_VIP		2
39 
40 // page reserves
41 #define VM_PAGE_RESERVE_USER	512
42 #define VM_PAGE_RESERVE_SYSTEM	128
43 
44 // memory reserves
45 #define VM_MEMORY_RESERVE_USER		(VM_PAGE_RESERVE_USER * B_PAGE_SIZE)
46 #define VM_MEMORY_RESERVE_SYSTEM	(VM_PAGE_RESERVE_SYSTEM * B_PAGE_SIZE)
47 
48 
49 extern struct ObjectCache* gPageMappingsObjectCache;
50 
51 
52 #ifdef __cplusplus
53 extern "C" {
54 #endif
55 
56 // startup only
57 status_t vm_init(struct kernel_args *args);
58 status_t vm_init_post_sem(struct kernel_args *args);
59 status_t vm_init_post_thread(struct kernel_args *args);
60 status_t vm_init_post_modules(struct kernel_args *args);
61 void vm_free_kernel_args(struct kernel_args *args);
62 void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
63 addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
64 			size_t physicalSize, uint32 attributes, bool blockAlign);
65 
66 void slab_init(struct kernel_args *args);
67 void slab_init_post_area();
68 void slab_init_post_sem();
69 void slab_init_post_thread();
70 
71 // to protect code regions with interrupts turned on
72 void permit_page_faults(void);
73 void forbid_page_faults(void);
74 
75 // private kernel only extension (should be moved somewhere else):
76 area_id create_area_etc(team_id team, const char *name, void **address,
77 			uint32 addressSpec, uint32 size, uint32 lock, uint32 protection,
78 			addr_t physicalAddress, uint32 flags);
79 area_id transfer_area(area_id id, void** _address, uint32 addressSpec,
80 			team_id target, bool kernel);
81 
82 status_t vm_block_address_range(const char* name, void* address, addr_t size);
83 status_t vm_unreserve_address_range(team_id team, void *address, addr_t size);
84 status_t vm_reserve_address_range(team_id team, void **_address,
85 			uint32 addressSpec, addr_t size, uint32 flags);
86 area_id vm_create_anonymous_area(team_id team, const char *name, void **address,
87 			uint32 addressSpec, addr_t size, uint32 wiring, uint32 protection,
88 			addr_t physicalAddress, uint32 flags, bool kernel);
89 area_id vm_map_physical_memory(team_id team, const char *name, void **address,
90 			uint32 addressSpec, addr_t size, uint32 protection,
91 			addr_t physicalAddress, bool alreadyWired);
92 area_id vm_map_physical_memory_vecs(team_id team, const char* name,
93 	void** _address, uint32 addressSpec, addr_t* _size, uint32 protection,
94 	struct iovec* vecs, uint32 vecCount);
95 area_id vm_map_file(team_id aid, const char *name, void **address,
96 			uint32 addressSpec, addr_t size, uint32 protection, uint32 mapping,
97 			bool unmapAddressRange, int fd, off_t offset);
98 struct VMCache *vm_area_get_locked_cache(struct VMArea *area);
99 void vm_area_put_locked_cache(struct VMCache *cache);
100 area_id vm_create_null_area(team_id team, const char *name, void **address,
101 			uint32 addressSpec, addr_t size, uint32 flags);
102 area_id vm_copy_area(team_id team, const char *name, void **_address,
103 			uint32 addressSpec, uint32 protection, area_id sourceID);
104 area_id vm_clone_area(team_id team, const char *name, void **address,
105 			uint32 addressSpec, uint32 protection, uint32 mapping,
106 			area_id sourceArea, bool kernel);
107 status_t vm_delete_area(team_id teamID, area_id areaID, bool kernel);
108 status_t vm_create_vnode_cache(struct vnode *vnode, struct VMCache **_cache);
109 status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type);
110 status_t vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr);
111 bool vm_test_map_modification(struct vm_page *page);
112 void vm_clear_map_flags(struct vm_page *page, uint32 flags);
113 void vm_remove_all_page_mappings(struct vm_page *page);
114 int32 vm_clear_page_mapping_accessed_flags(struct vm_page *page);
115 int32 vm_remove_all_page_mappings_if_unaccessed(struct vm_page *page);
116 
117 status_t vm_get_physical_page(addr_t paddr, addr_t* vaddr, void** _handle);
118 status_t vm_put_physical_page(addr_t vaddr, void* handle);
119 status_t vm_get_physical_page_current_cpu(addr_t paddr, addr_t* vaddr,
120 			void** _handle);
121 status_t vm_put_physical_page_current_cpu(addr_t vaddr, void* handle);
122 status_t vm_get_physical_page_debug(addr_t paddr, addr_t* vaddr,
123 			void** _handle);
124 status_t vm_put_physical_page_debug(addr_t vaddr, void* handle);
125 
126 void vm_get_info(struct system_memory_info *info);
127 uint32 vm_num_page_faults(void);
128 off_t vm_available_memory(void);
129 off_t vm_available_not_needed_memory(void);
130 size_t vm_kernel_address_space_left(void);
131 
132 status_t vm_memset_physical(addr_t address, int value, size_t length);
133 status_t vm_memcpy_from_physical(void* to, addr_t from, size_t length,
134 			bool user);
135 status_t vm_memcpy_to_physical(addr_t to, const void* from, size_t length,
136 			bool user);
137 void vm_memcpy_physical_page(addr_t to, addr_t from);
138 
139 // user syscalls
140 area_id _user_create_area(const char *name, void **address, uint32 addressSpec,
141 			size_t size, uint32 lock, uint32 protection);
142 status_t _user_delete_area(area_id area);
143 
144 area_id _user_map_file(const char *uname, void **uaddress, int addressSpec,
145 			size_t size, int protection, int mapping, bool unmapAddressRange,
146 			int fd, off_t offset);
147 status_t _user_unmap_memory(void *address, size_t size);
148 status_t _user_set_memory_protection(void* address, size_t size,
149 			int protection);
150 status_t _user_sync_memory(void *address, size_t size, int flags);
151 status_t _user_memory_advice(void* address, size_t size, int advice);
152 
153 area_id _user_area_for(void *address);
154 area_id _user_find_area(const char *name);
155 status_t _user_get_area_info(area_id area, area_info *info);
156 status_t _user_get_next_area_info(team_id team, int32 *cookie, area_info *info);
157 status_t _user_resize_area(area_id area, size_t newSize);
158 area_id _user_transfer_area(area_id area, void **_address, uint32 addressSpec,
159 			team_id target);
160 status_t _user_set_area_protection(area_id area, uint32 newProtection);
161 area_id _user_clone_area(const char *name, void **_address, uint32 addressSpec,
162 			uint32 protection, area_id sourceArea);
163 status_t _user_reserve_address_range(addr_t* userAddress, uint32 addressSpec,
164 			addr_t size);
165 status_t _user_unreserve_address_range(addr_t address, addr_t size);
166 
167 #ifdef __cplusplus
168 }
169 #endif
170 
171 #endif	/* _KERNEL_VM_VM_H */
172