xref: /haiku/src/system/kernel/vm/VMAnonymousNoSwapCache.cpp (revision 17889a8c70dbb3d59c1412f6431968753c767bab)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 #include "VMAnonymousNoSwapCache.h"
11 
12 #include <stdlib.h>
13 
14 #include <arch_config.h>
15 #include <heap.h>
16 #include <KernelExport.h>
17 #include <slab/Slab.h>
18 #include <vm/vm_priv.h>
19 #include <vm/VMAddressSpace.h>
20 
21 
22 //#define TRACE_STORE
23 #ifdef TRACE_STORE
24 #	define TRACE(x) dprintf x
25 #else
26 #	define TRACE(x) ;
27 #endif
28 
29 // The stack functionality looks like a good candidate to put into its own
30 // store. I have not done this because once we have a swap file backing up
31 // the memory, it would probably not be a good idea to separate this
32 // anymore.
33 
34 
35 VMAnonymousNoSwapCache::~VMAnonymousNoSwapCache()
36 {
37 	vm_unreserve_memory(committed_size);
38 }
39 
40 
41 status_t
42 VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages,
43 	int32 numGuardPages, uint32 allocationFlags)
44 {
45 	TRACE(("VMAnonymousNoSwapCache::Init(canOvercommit = %s, numGuardPages = %ld) "
46 		"at %p\n", canOvercommit ? "yes" : "no", numGuardPages, store));
47 
48 	status_t error = VMCache::Init(CACHE_TYPE_RAM, allocationFlags);
49 	if (error != B_OK)
50 		return error;
51 
52 	fCanOvercommit = canOvercommit;
53 	fHasPrecommitted = false;
54 	fPrecommittedPages = min_c(numPrecommittedPages, 255);
55 	fGuardedSize = numGuardPages * B_PAGE_SIZE;
56 
57 	return B_OK;
58 }
59 
60 
61 status_t
62 VMAnonymousNoSwapCache::Commit(off_t size, int priority)
63 {
64 	// If we can overcommit, we don't commit here, but in Fault(). We always
65 	// unreserve memory, if we're asked to shrink our commitment, though.
66 	if (fCanOvercommit && size > committed_size) {
67 		if (fHasPrecommitted)
68 			return B_OK;
69 
70 		// pre-commit some pages to make a later failure less probable
71 		fHasPrecommitted = true;
72 		uint32 precommitted = fPrecommittedPages * B_PAGE_SIZE;
73 		if (size > precommitted)
74 			size = precommitted;
75 	}
76 
77 	// Check to see how much we could commit - we need real memory
78 
79 	if (size > committed_size) {
80 		// try to commit
81 		if (vm_try_reserve_memory(size - committed_size, priority, 1000000)
82 				!= B_OK) {
83 			return B_NO_MEMORY;
84 		}
85 	} else {
86 		// we can release some
87 		vm_unreserve_memory(committed_size - size);
88 	}
89 
90 	committed_size = size;
91 	return B_OK;
92 }
93 
94 
95 bool
96 VMAnonymousNoSwapCache::HasPage(off_t offset)
97 {
98 	return false;
99 }
100 
101 
102 status_t
103 VMAnonymousNoSwapCache::Read(off_t offset, const generic_io_vec* vecs, size_t count,
104 	uint32 flags, generic_size_t* _numBytes)
105 {
106 	panic("anonymous_store: read called. Invalid!\n");
107 	return B_ERROR;
108 }
109 
110 
111 status_t
112 VMAnonymousNoSwapCache::Write(off_t offset, const generic_io_vec* vecs, size_t count,
113 	uint32 flags, generic_size_t* _numBytes)
114 {
115 	// no place to write, this will cause the page daemon to skip this store
116 	return B_ERROR;
117 }
118 
119 
120 status_t
121 VMAnonymousNoSwapCache::Fault(struct VMAddressSpace* aspace, off_t offset)
122 {
123 	if (fGuardedSize > 0) {
124 		uint32 guardOffset;
125 
126 #ifdef STACK_GROWS_DOWNWARDS
127 		guardOffset = 0;
128 #elif defined(STACK_GROWS_UPWARDS)
129 		guardOffset = virtual_size - fGuardedSize;
130 #else
131 #	error Stack direction has not been defined in arch_config.h
132 #endif
133 		// report stack fault, guard page hit!
134 		if (offset >= guardOffset && offset < guardOffset + fGuardedSize) {
135 			TRACE(("stack overflow!\n"));
136 			return B_BAD_ADDRESS;
137 		}
138 	}
139 
140 	if (fCanOvercommit) {
141 		if (fPrecommittedPages == 0) {
142 			// never commit more than needed
143 			if (committed_size / B_PAGE_SIZE > page_count)
144 				return B_BAD_HANDLER;
145 
146 			// try to commit additional memory
147 			int priority = aspace == VMAddressSpace::Kernel()
148 				? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER;
149 			if (vm_try_reserve_memory(B_PAGE_SIZE, priority, 0) != B_OK) {
150 				dprintf("%p->VMAnonymousNoSwapCache::Fault(): Failed to "
151 					"reserve %d bytes of RAM.\n", this, (int)B_PAGE_SIZE);
152 				return B_NO_MEMORY;
153 			}
154 
155 			committed_size += B_PAGE_SIZE;
156 		} else
157 			fPrecommittedPages--;
158 	}
159 
160 	// This will cause vm_soft_fault() to handle the fault
161 	return B_BAD_HANDLER;
162 }
163 
164 
165 void
166 VMAnonymousNoSwapCache::MergeStore(VMCache* _source)
167 {
168 	VMAnonymousNoSwapCache* source
169 		= dynamic_cast<VMAnonymousNoSwapCache*>(_source);
170 	if (source == NULL) {
171 		panic("VMAnonymousNoSwapCache::MergeStore(): merge with incompatible "
172 			"cache %p requested", _source);
173 		return;
174 	}
175 
176 	// take over the source' committed size
177 	committed_size += source->committed_size;
178 	source->committed_size = 0;
179 
180 	off_t actualSize = virtual_end - virtual_base;
181 	if (committed_size > actualSize) {
182 		vm_unreserve_memory(committed_size - actualSize);
183 		committed_size = actualSize;
184 	}
185 }
186 
187 
188 void
189 VMAnonymousNoSwapCache::DeleteObject()
190 {
191 	object_cache_delete(gAnonymousNoSwapCacheObjectCache, this);
192 }
193