xref: /haiku/src/system/kernel/vm/VMAnonymousNoSwapCache.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 #include "VMAnonymousNoSwapCache.h"
11 
12 #include <stdlib.h>
13 
14 #include <arch_config.h>
15 #include <heap.h>
16 #include <KernelExport.h>
17 #include <slab/Slab.h>
18 #include <vm/vm_priv.h>
19 #include <vm/VMAddressSpace.h>
20 
21 
22 //#define TRACE_STORE
23 #ifdef TRACE_STORE
24 #	define TRACE(x) dprintf x
25 #else
26 #	define TRACE(x) ;
27 #endif
28 
29 // The stack functionality looks like a good candidate to put into its own
30 // store. I have not done this because once we have a swap file backing up
31 // the memory, it would probably not be a good idea to separate this
32 // anymore.
33 
34 
35 VMAnonymousNoSwapCache::~VMAnonymousNoSwapCache()
36 {
37 	vm_unreserve_memory(committed_size);
38 }
39 
40 
41 status_t
42 VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages,
43 	int32 numGuardPages, uint32 allocationFlags)
44 {
45 	TRACE(("VMAnonymousNoSwapCache::Init(canOvercommit = %s, numGuardPages = %ld) "
46 		"at %p\n", canOvercommit ? "yes" : "no", numGuardPages, store));
47 
48 	status_t error = VMCache::Init(CACHE_TYPE_RAM, allocationFlags);
49 	if (error != B_OK)
50 		return error;
51 
52 	fCanOvercommit = canOvercommit;
53 	fHasPrecommitted = false;
54 	fPrecommittedPages = min_c(numPrecommittedPages, 255);
55 	fGuardedSize = numGuardPages * B_PAGE_SIZE;
56 
57 	return B_OK;
58 }
59 
60 
61 status_t
62 VMAnonymousNoSwapCache::Commit(off_t size, int priority)
63 {
64 	AssertLocked();
65 
66 	// If we can overcommit, we don't commit here, but in Fault(). We always
67 	// unreserve memory, if we're asked to shrink our commitment, though.
68 	if (fCanOvercommit && size > committed_size) {
69 		if (fHasPrecommitted)
70 			return B_OK;
71 
72 		// pre-commit some pages to make a later failure less probable
73 		fHasPrecommitted = true;
74 		uint32 precommitted = fPrecommittedPages * B_PAGE_SIZE;
75 		if (size > precommitted)
76 			size = precommitted;
77 	}
78 
79 	// Check to see how much we could commit - we need real memory
80 
81 	if (size > committed_size) {
82 		// try to commit
83 		if (vm_try_reserve_memory(size - committed_size, priority, 1000000)
84 				!= B_OK) {
85 			return B_NO_MEMORY;
86 		}
87 	} else {
88 		// we can release some
89 		vm_unreserve_memory(committed_size - size);
90 	}
91 
92 	committed_size = size;
93 	return B_OK;
94 }
95 
96 
97 bool
98 VMAnonymousNoSwapCache::HasPage(off_t offset)
99 {
100 	return false;
101 }
102 
103 
104 status_t
105 VMAnonymousNoSwapCache::Read(off_t offset, const generic_io_vec* vecs, size_t count,
106 	uint32 flags, generic_size_t* _numBytes)
107 {
108 	panic("anonymous_store: read called. Invalid!\n");
109 	return B_ERROR;
110 }
111 
112 
113 status_t
114 VMAnonymousNoSwapCache::Write(off_t offset, const generic_io_vec* vecs, size_t count,
115 	uint32 flags, generic_size_t* _numBytes)
116 {
117 	// no place to write, this will cause the page daemon to skip this store
118 	return B_ERROR;
119 }
120 
121 
122 status_t
123 VMAnonymousNoSwapCache::Fault(struct VMAddressSpace* aspace, off_t offset)
124 {
125 	if (fGuardedSize > 0) {
126 		uint32 guardOffset;
127 
128 #ifdef STACK_GROWS_DOWNWARDS
129 		guardOffset = 0;
130 #elif defined(STACK_GROWS_UPWARDS)
131 		guardOffset = virtual_size - fGuardedSize;
132 #else
133 #	error Stack direction has not been defined in arch_config.h
134 #endif
135 		// report stack fault, guard page hit!
136 		if (offset >= guardOffset && offset < guardOffset + fGuardedSize) {
137 			TRACE(("stack overflow!\n"));
138 			return B_BAD_ADDRESS;
139 		}
140 	}
141 
142 	if (fCanOvercommit) {
143 		if (fPrecommittedPages == 0) {
144 			// never commit more than needed
145 			if (committed_size / B_PAGE_SIZE > page_count)
146 				return B_BAD_HANDLER;
147 
148 			// try to commit additional memory
149 			int priority = aspace == VMAddressSpace::Kernel()
150 				? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER;
151 			if (vm_try_reserve_memory(B_PAGE_SIZE, priority, 0) != B_OK) {
152 				dprintf("%p->VMAnonymousNoSwapCache::Fault(): Failed to "
153 					"reserve %d bytes of RAM.\n", this, (int)B_PAGE_SIZE);
154 				return B_NO_MEMORY;
155 			}
156 
157 			committed_size += B_PAGE_SIZE;
158 		} else
159 			fPrecommittedPages--;
160 	}
161 
162 	// This will cause vm_soft_fault() to handle the fault
163 	return B_BAD_HANDLER;
164 }
165 
166 
167 void
168 VMAnonymousNoSwapCache::MergeStore(VMCache* _source)
169 {
170 	VMAnonymousNoSwapCache* source
171 		= dynamic_cast<VMAnonymousNoSwapCache*>(_source);
172 	if (source == NULL) {
173 		panic("VMAnonymousNoSwapCache::MergeStore(): merge with incompatible "
174 			"cache %p requested", _source);
175 		return;
176 	}
177 
178 	// take over the source' committed size
179 	committed_size += source->committed_size;
180 	source->committed_size = 0;
181 
182 	off_t actualSize = virtual_end - virtual_base;
183 	if (committed_size > actualSize) {
184 		vm_unreserve_memory(committed_size - actualSize);
185 		committed_size = actualSize;
186 	}
187 }
188 
189 
190 void
191 VMAnonymousNoSwapCache::DeleteObject()
192 {
193 	object_cache_delete(gAnonymousNoSwapCacheObjectCache, this);
194 }
195