xref: /haiku/src/system/kernel/vm/VMAnonymousNoSwapCache.cpp (revision 7a74a5df454197933bc6e80a542102362ee98703)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 #include "VMAnonymousNoSwapCache.h"
11 
12 #include <stdlib.h>
13 
14 #include <arch_config.h>
15 #include <heap.h>
16 #include <KernelExport.h>
17 #include <slab/Slab.h>
18 #include <vm/vm_priv.h>
19 #include <vm/VMAddressSpace.h>
20 
21 
22 //#define TRACE_STORE
23 #ifdef TRACE_STORE
24 #	define TRACE(x) dprintf x
25 #else
26 #	define TRACE(x) ;
27 #endif
28 
29 // The stack functionality looks like a good candidate to put into its own
30 // store. I have not done this because once we have a swap file backing up
31 // the memory, it would probably not be a good idea to separate this
32 // anymore.
33 
34 
35 VMAnonymousNoSwapCache::~VMAnonymousNoSwapCache()
36 {
37 	vm_unreserve_memory(committed_size);
38 }
39 
40 
41 status_t
42 VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages,
43 	int32 numGuardPages, uint32 allocationFlags)
44 {
45 	TRACE(("VMAnonymousNoSwapCache::Init(canOvercommit = %s, numGuardPages = %ld) "
46 		"at %p\n", canOvercommit ? "yes" : "no", numGuardPages, store));
47 
48 	status_t error = VMCache::Init(CACHE_TYPE_RAM, allocationFlags);
49 	if (error != B_OK)
50 		return error;
51 
52 	fCanOvercommit = canOvercommit;
53 	fHasPrecommitted = false;
54 	fPrecommittedPages = min_c(numPrecommittedPages, 255);
55 	fGuardedSize = numGuardPages * B_PAGE_SIZE;
56 
57 	return B_OK;
58 }
59 
60 
61 status_t
62 VMAnonymousNoSwapCache::Commit(off_t size, int priority)
63 {
64 	// If we can overcommit, we don't commit here, but in Fault(). We always
65 	// unreserve memory, if we're asked to shrink our commitment, though.
66 	if (fCanOvercommit && size > committed_size) {
67 		if (fHasPrecommitted)
68 			return B_OK;
69 
70 		// pre-commit some pages to make a later failure less probable
71 		fHasPrecommitted = true;
72 		uint32 precommitted = fPrecommittedPages * B_PAGE_SIZE;
73 		if (size > precommitted)
74 			size = precommitted;
75 	}
76 
77 	// Check to see how much we could commit - we need real memory
78 
79 	if (size > committed_size) {
80 		// try to commit
81 		if (vm_try_reserve_memory(size - committed_size, priority, 1000000)
82 				!= B_OK) {
83 			return B_NO_MEMORY;
84 		}
85 	} else {
86 		// we can release some
87 		vm_unreserve_memory(committed_size - size);
88 	}
89 
90 	committed_size = size;
91 	return B_OK;
92 }
93 
94 
95 bool
96 VMAnonymousNoSwapCache::HasPage(off_t offset)
97 {
98 	return false;
99 }
100 
101 
102 status_t
103 VMAnonymousNoSwapCache::Read(off_t offset, const iovec* vecs, size_t count,
104 	uint32 flags, size_t* _numBytes)
105 {
106 	panic("anonymous_store: read called. Invalid!\n");
107 	return B_ERROR;
108 }
109 
110 
111 status_t
112 VMAnonymousNoSwapCache::Write(off_t offset, const iovec* vecs, size_t count,
113 	uint32 flags, size_t* _numBytes)
114 {
115 	// no place to write, this will cause the page daemon to skip this store
116 	return B_ERROR;
117 }
118 
119 
120 status_t
121 VMAnonymousNoSwapCache::Fault(struct VMAddressSpace* aspace, off_t offset)
122 {
123 	if (fCanOvercommit) {
124 		if (fGuardedSize > 0) {
125 			uint32 guardOffset;
126 
127 #ifdef STACK_GROWS_DOWNWARDS
128 			guardOffset = 0;
129 #elif defined(STACK_GROWS_UPWARDS)
130 			guardOffset = virtual_size - fGuardedSize;
131 #else
132 #	error Stack direction has not been defined in arch_config.h
133 #endif
134 
135 			// report stack fault, guard page hit!
136 			if (offset >= guardOffset && offset < guardOffset + fGuardedSize) {
137 				TRACE(("stack overflow!\n"));
138 				return B_BAD_ADDRESS;
139 			}
140 		}
141 
142 		if (fPrecommittedPages == 0) {
143 			// never commit more than needed
144 			if (committed_size / B_PAGE_SIZE > page_count)
145 				return B_BAD_HANDLER;
146 
147 			// try to commit additional memory
148 			int priority = aspace == VMAddressSpace::Kernel()
149 				? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER;
150 			if (vm_try_reserve_memory(B_PAGE_SIZE, priority, 0) != B_OK) {
151 				dprintf("%p->VMAnonymousNoSwapCache::Fault(): Failed to "
152 					"reserve %d bytes of RAM.\n", this, (int)B_PAGE_SIZE);
153 				return B_NO_MEMORY;
154 			}
155 
156 			committed_size += B_PAGE_SIZE;
157 		} else
158 			fPrecommittedPages--;
159 	}
160 
161 	// This will cause vm_soft_fault() to handle the fault
162 	return B_BAD_HANDLER;
163 }
164 
165 
166 void
167 VMAnonymousNoSwapCache::MergeStore(VMCache* _source)
168 {
169 	VMAnonymousNoSwapCache* source
170 		= dynamic_cast<VMAnonymousNoSwapCache*>(_source);
171 	if (source == NULL) {
172 		panic("VMAnonymousNoSwapCache::MergeStore(): merge with incompatible "
173 			"cache %p requested", _source);
174 		return;
175 	}
176 
177 	// take over the source' committed size
178 	committed_size += source->committed_size;
179 	source->committed_size = 0;
180 
181 	off_t actualSize = virtual_end - virtual_base;
182 	if (committed_size > actualSize) {
183 		vm_unreserve_memory(committed_size - actualSize);
184 		committed_size = actualSize;
185 	}
186 }
187 
188 
189 void
190 VMAnonymousNoSwapCache::DeleteObject()
191 {
192 	object_cache_delete(gAnonymousNoSwapCacheObjectCache, this);
193 }
194