1 /* 2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 #include "VMAnonymousNoSwapCache.h" 11 12 #include <stdlib.h> 13 14 #include <arch_config.h> 15 #include <heap.h> 16 #include <KernelExport.h> 17 #include <vm_priv.h> 18 19 20 //#define TRACE_STORE 21 #ifdef TRACE_STORE 22 # define TRACE(x) dprintf x 23 #else 24 # define TRACE(x) ; 25 #endif 26 27 // The stack functionality looks like a good candidate to put into its own 28 // store. I have not done this because once we have a swap file backing up 29 // the memory, it would probably not be a good idea to separate this 30 // anymore. 31 32 33 VMAnonymousNoSwapCache::~VMAnonymousNoSwapCache() 34 { 35 vm_unreserve_memory(committed_size); 36 } 37 38 39 status_t 40 VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages, 41 int32 numGuardPages) 42 { 43 TRACE(("VMAnonymousNoSwapCache::Init(canOvercommit = %s, numGuardPages = %ld) " 44 "at %p\n", canOvercommit ? "yes" : "no", numGuardPages, store)); 45 46 status_t error = VMCache::Init(CACHE_TYPE_RAM); 47 if (error != B_OK) 48 return error; 49 50 fCanOvercommit = canOvercommit; 51 fHasPrecommitted = false; 52 fPrecommittedPages = min_c(numPrecommittedPages, 255); 53 fGuardedSize = numGuardPages * B_PAGE_SIZE; 54 55 return B_OK; 56 } 57 58 59 status_t 60 VMAnonymousNoSwapCache::Commit(off_t size) 61 { 62 // if we can overcommit, we don't commit here, but in anonymous_fault() 63 if (fCanOvercommit) { 64 if (fHasPrecommitted) 65 return B_OK; 66 67 // pre-commit some pages to make a later failure less probable 68 fHasPrecommitted = true; 69 uint32 precommitted = fPrecommittedPages * B_PAGE_SIZE; 70 if (size > precommitted) 71 size = precommitted; 72 } 73 74 // Check to see how much we could commit - we need real memory 75 76 if (size > committed_size) { 77 // try to commit 78 if (vm_try_reserve_memory(size - committed_size, 1000000) != B_OK) 79 return B_NO_MEMORY; 80 } else { 81 // we can release some 82 vm_unreserve_memory(committed_size - size); 83 } 84 85 committed_size = size; 86 return B_OK; 87 } 88 89 90 bool 91 VMAnonymousNoSwapCache::HasPage(off_t offset) 92 { 93 return false; 94 } 95 96 97 status_t 98 VMAnonymousNoSwapCache::Read(off_t offset, const iovec *vecs, size_t count, 99 uint32 flags, size_t *_numBytes) 100 { 101 panic("anonymous_store: read called. Invalid!\n"); 102 return B_ERROR; 103 } 104 105 106 status_t 107 VMAnonymousNoSwapCache::Write(off_t offset, const iovec *vecs, size_t count, 108 uint32 flags, size_t *_numBytes) 109 { 110 // no place to write, this will cause the page daemon to skip this store 111 return B_ERROR; 112 } 113 114 115 status_t 116 VMAnonymousNoSwapCache::Fault(struct vm_address_space *aspace, off_t offset) 117 { 118 if (fCanOvercommit) { 119 if (fGuardedSize > 0) { 120 uint32 guardOffset; 121 122 #ifdef STACK_GROWS_DOWNWARDS 123 guardOffset = 0; 124 #elif defined(STACK_GROWS_UPWARDS) 125 guardOffset = virtual_size - fGuardedSize; 126 #else 127 # error Stack direction has not been defined in arch_config.h 128 #endif 129 130 // report stack fault, guard page hit! 131 if (offset >= guardOffset && offset < guardOffset + fGuardedSize) { 132 TRACE(("stack overflow!\n")); 133 return B_BAD_ADDRESS; 134 } 135 } 136 137 if (fPrecommittedPages == 0) { 138 // try to commit additional memory 139 if (vm_try_reserve_memory(B_PAGE_SIZE, 0) != B_OK) 140 return B_NO_MEMORY; 141 142 committed_size += B_PAGE_SIZE; 143 } else 144 fPrecommittedPages--; 145 } 146 147 // This will cause vm_soft_fault() to handle the fault 148 return B_BAD_HANDLER; 149 } 150 151 152 void 153 VMAnonymousNoSwapCache::MergeStore(VMCache* _source) 154 { 155 VMAnonymousNoSwapCache* source 156 = dynamic_cast<VMAnonymousNoSwapCache*>(_source); 157 if (source == NULL) { 158 panic("VMAnonymousNoSwapCache::MergeStore(): merge with incompatible " 159 "cache %p requested", _source); 160 return; 161 } 162 163 // take over the source' committed size 164 committed_size += source->committed_size; 165 source->committed_size = 0; 166 167 off_t actualSize = virtual_end - virtual_base; 168 if (committed_size > actualSize) { 169 vm_unreserve_memory(committed_size - actualSize); 170 committed_size = actualSize; 171 } 172 } 173