xref: /haiku/src/system/kernel/vm/VMAnonymousNoSwapCache.cpp (revision 125183f9e5c136781f71c879faaeab43fdc3ea7b)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 #include "VMAnonymousNoSwapCache.h"
11 
12 #include <stdlib.h>
13 
14 #include <arch_config.h>
15 #include <heap.h>
16 #include <KernelExport.h>
17 #include <vm/vm_priv.h>
18 #include <vm/VMAddressSpace.h>
19 
20 
21 //#define TRACE_STORE
22 #ifdef TRACE_STORE
23 #	define TRACE(x) dprintf x
24 #else
25 #	define TRACE(x) ;
26 #endif
27 
28 // The stack functionality looks like a good candidate to put into its own
29 // store. I have not done this because once we have a swap file backing up
30 // the memory, it would probably not be a good idea to separate this
31 // anymore.
32 
33 
34 VMAnonymousNoSwapCache::~VMAnonymousNoSwapCache()
35 {
36 	vm_unreserve_memory(committed_size);
37 }
38 
39 
40 status_t
41 VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages,
42 	int32 numGuardPages, uint32 allocationFlags)
43 {
44 	TRACE(("VMAnonymousNoSwapCache::Init(canOvercommit = %s, numGuardPages = %ld) "
45 		"at %p\n", canOvercommit ? "yes" : "no", numGuardPages, store));
46 
47 	status_t error = VMCache::Init(CACHE_TYPE_RAM, allocationFlags);
48 	if (error != B_OK)
49 		return error;
50 
51 	fCanOvercommit = canOvercommit;
52 	fHasPrecommitted = false;
53 	fPrecommittedPages = min_c(numPrecommittedPages, 255);
54 	fGuardedSize = numGuardPages * B_PAGE_SIZE;
55 
56 	return B_OK;
57 }
58 
59 
60 status_t
61 VMAnonymousNoSwapCache::Commit(off_t size, int priority)
62 {
63 	// If we can overcommit, we don't commit here, but in Fault(). We always
64 	// unreserve memory, if we're asked to shrink our commitment, though.
65 	if (fCanOvercommit && size > committed_size) {
66 		if (fHasPrecommitted)
67 			return B_OK;
68 
69 		// pre-commit some pages to make a later failure less probable
70 		fHasPrecommitted = true;
71 		uint32 precommitted = fPrecommittedPages * B_PAGE_SIZE;
72 		if (size > precommitted)
73 			size = precommitted;
74 	}
75 
76 	// Check to see how much we could commit - we need real memory
77 
78 	if (size > committed_size) {
79 		// try to commit
80 		if (vm_try_reserve_memory(size - committed_size, priority, 1000000)
81 				!= B_OK) {
82 			return B_NO_MEMORY;
83 		}
84 	} else {
85 		// we can release some
86 		vm_unreserve_memory(committed_size - size);
87 	}
88 
89 	committed_size = size;
90 	return B_OK;
91 }
92 
93 
94 bool
95 VMAnonymousNoSwapCache::HasPage(off_t offset)
96 {
97 	return false;
98 }
99 
100 
101 status_t
102 VMAnonymousNoSwapCache::Read(off_t offset, const iovec *vecs, size_t count,
103 	uint32 flags, size_t *_numBytes)
104 {
105 	panic("anonymous_store: read called. Invalid!\n");
106 	return B_ERROR;
107 }
108 
109 
110 status_t
111 VMAnonymousNoSwapCache::Write(off_t offset, const iovec *vecs, size_t count,
112 	uint32 flags, size_t *_numBytes)
113 {
114 	// no place to write, this will cause the page daemon to skip this store
115 	return B_ERROR;
116 }
117 
118 
119 status_t
120 VMAnonymousNoSwapCache::Fault(struct VMAddressSpace *aspace, off_t offset)
121 {
122 	if (fCanOvercommit) {
123 		if (fGuardedSize > 0) {
124 			uint32 guardOffset;
125 
126 #ifdef STACK_GROWS_DOWNWARDS
127 			guardOffset = 0;
128 #elif defined(STACK_GROWS_UPWARDS)
129 			guardOffset = virtual_size - fGuardedSize;
130 #else
131 #	error Stack direction has not been defined in arch_config.h
132 #endif
133 
134 			// report stack fault, guard page hit!
135 			if (offset >= guardOffset && offset < guardOffset + fGuardedSize) {
136 				TRACE(("stack overflow!\n"));
137 				return B_BAD_ADDRESS;
138 			}
139 		}
140 
141 		if (fPrecommittedPages == 0) {
142 			// never commit more than needed
143 			if (committed_size / B_PAGE_SIZE > page_count)
144 				return B_BAD_HANDLER;
145 
146 			// try to commit additional memory
147 			int priority = aspace == VMAddressSpace::Kernel()
148 				? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER;
149 			if (vm_try_reserve_memory(B_PAGE_SIZE, priority, 0) != B_OK) {
150 				dprintf("%p->VMAnonymousNoSwapCache::Fault(): Failed to "
151 					"reserve %d bytes of RAM.\n", this, (int)B_PAGE_SIZE);
152 				return B_NO_MEMORY;
153 			}
154 
155 			committed_size += B_PAGE_SIZE;
156 		} else
157 			fPrecommittedPages--;
158 	}
159 
160 	// This will cause vm_soft_fault() to handle the fault
161 	return B_BAD_HANDLER;
162 }
163 
164 
165 void
166 VMAnonymousNoSwapCache::MergeStore(VMCache* _source)
167 {
168 	VMAnonymousNoSwapCache* source
169 		= dynamic_cast<VMAnonymousNoSwapCache*>(_source);
170 	if (source == NULL) {
171 		panic("VMAnonymousNoSwapCache::MergeStore(): merge with incompatible "
172 			"cache %p requested", _source);
173 		return;
174 	}
175 
176 	// take over the source' committed size
177 	committed_size += source->committed_size;
178 	source->committed_size = 0;
179 
180 	off_t actualSize = virtual_end - virtual_base;
181 	if (committed_size > actualSize) {
182 		vm_unreserve_memory(committed_size - actualSize);
183 		committed_size = actualSize;
184 	}
185 }
186