1 /* 2 * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 #ifndef _KERNEL_VM_VM_CACHE_H 10 #define _KERNEL_VM_VM_CACHE_H 11 12 13 #include <debug.h> 14 #include <kernel.h> 15 #include <vm/vm.h> 16 #include <vm/vm_types.h> 17 18 #include "kernel_debug_config.h" 19 20 21 struct kernel_args; 22 23 24 enum { 25 CACHE_TYPE_RAM = 0, 26 CACHE_TYPE_VNODE, 27 CACHE_TYPE_DEVICE, 28 CACHE_TYPE_NULL 29 }; 30 31 enum { 32 PAGE_EVENT_NOT_BUSY = 0x01 // page not busy anymore 33 }; 34 35 36 struct VMCachePagesTreeDefinition { 37 typedef page_num_t KeyType; 38 typedef vm_page NodeType; 39 40 static page_num_t GetKey(const NodeType* node) 41 { 42 return node->cache_offset; 43 } 44 45 static SplayTreeLink<NodeType>* GetLink(NodeType* node) 46 { 47 return &node->cache_link; 48 } 49 50 static int Compare(page_num_t key, const NodeType* node) 51 { 52 return key == node->cache_offset ? 0 53 : (key < node->cache_offset ? -1 : 1); 54 } 55 56 static NodeType** GetListLink(NodeType* node) 57 { 58 return &node->cache_next; 59 } 60 }; 61 62 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree; 63 64 65 struct VMCache { 66 public: 67 VMCache(); 68 virtual ~VMCache(); 69 70 status_t Init(uint32 cacheType, uint32 allocationFlags); 71 72 virtual void Delete(); 73 74 inline bool Lock(); 75 inline bool TryLock(); 76 inline bool SwitchLock(mutex* from); 77 inline bool SwitchFromReadLock(rw_lock* from); 78 void Unlock(bool consumerLocked = false); 79 inline void AssertLocked(); 80 81 inline void AcquireRefLocked(); 82 inline void AcquireRef(); 83 inline void ReleaseRefLocked(); 84 inline void ReleaseRef(); 85 inline void ReleaseRefAndUnlock( 86 bool consumerLocked = false); 87 88 inline VMCacheRef* CacheRef() const { return fCacheRef; } 89 90 void WaitForPageEvents(vm_page* page, uint32 events, 91 bool relock); 92 void NotifyPageEvents(vm_page* page, uint32 events) 93 { if (fPageEventWaiters != NULL) 94 _NotifyPageEvents(page, events); } 95 inline void MarkPageUnbusy(vm_page* page); 96 97 vm_page* LookupPage(off_t offset); 98 void InsertPage(vm_page* page, off_t offset); 99 void RemovePage(vm_page* page); 100 void MovePage(vm_page* page); 101 void MoveAllPages(VMCache* fromCache); 102 103 void AddConsumer(VMCache* consumer); 104 105 status_t InsertAreaLocked(VMArea* area); 106 status_t RemoveArea(VMArea* area); 107 void TransferAreas(VMCache* fromCache); 108 uint32 CountWritableAreas(VMArea* ignoreArea) const; 109 110 status_t WriteModified(); 111 status_t SetMinimalCommitment(off_t commitment, 112 int priority); 113 status_t Resize(off_t newSize, int priority); 114 115 status_t FlushAndRemoveAllPages(); 116 117 void* UserData() { return fUserData; } 118 void SetUserData(void* data) { fUserData = data; } 119 // Settable by the lock owner and valid as 120 // long as the lock is owned. 121 122 // for debugging only 123 mutex* GetLock() 124 { return &fLock; } 125 int32 RefCount() const 126 { return fRefCount; } 127 128 // backing store operations 129 virtual status_t Commit(off_t size, int priority); 130 virtual bool HasPage(off_t offset); 131 132 virtual status_t Read(off_t offset, const iovec *vecs, 133 size_t count,uint32 flags, 134 size_t *_numBytes); 135 virtual status_t Write(off_t offset, const iovec *vecs, size_t count, 136 uint32 flags, size_t *_numBytes); 137 virtual status_t WriteAsync(off_t offset, const iovec* vecs, 138 size_t count, size_t numBytes, uint32 flags, 139 AsyncIOCallback* callback); 140 virtual bool CanWritePage(off_t offset); 141 142 virtual int32 MaxPagesPerWrite() const 143 { return -1; } // no restriction 144 virtual int32 MaxPagesPerAsyncWrite() const 145 { return -1; } // no restriction 146 147 virtual status_t Fault(struct VMAddressSpace *aspace, 148 off_t offset); 149 150 virtual void Merge(VMCache* source); 151 152 virtual status_t AcquireUnreferencedStoreRef(); 153 virtual void AcquireStoreRef(); 154 virtual void ReleaseStoreRef(); 155 156 public: 157 VMArea* areas; 158 list_link consumer_link; 159 list consumers; 160 // list of caches that use this cache as a source 161 VMCachePagesTree pages; 162 VMCache* source; 163 off_t virtual_base; 164 off_t virtual_end; 165 off_t committed_size; 166 // TODO: Remove! 167 uint32 page_count; 168 uint32 temporary : 1; 169 uint32 scan_skip : 1; 170 uint32 type : 6; 171 172 #if DEBUG_CACHE_LIST 173 VMCache* debug_previous; 174 VMCache* debug_next; 175 #endif 176 177 private: 178 struct PageEventWaiter; 179 friend struct VMCacheRef; 180 181 private: 182 void _NotifyPageEvents(vm_page* page, uint32 events); 183 184 inline bool _IsMergeable() const; 185 186 void _MergeWithOnlyConsumer(bool consumerLocked); 187 void _RemoveConsumer(VMCache* consumer); 188 189 private: 190 int32 fRefCount; 191 mutex fLock; 192 PageEventWaiter* fPageEventWaiters; 193 void* fUserData; 194 VMCacheRef* fCacheRef; 195 }; 196 197 198 #if DEBUG_CACHE_LIST 199 extern VMCache* gDebugCacheList; 200 #endif 201 202 203 class VMCacheFactory { 204 public: 205 static status_t CreateAnonymousCache(VMCache*& cache, 206 bool canOvercommit, int32 numPrecommittedPages, 207 int32 numGuardPages, bool swappable, 208 int priority); 209 static status_t CreateVnodeCache(VMCache*& cache, 210 struct vnode* vnode); 211 static status_t CreateDeviceCache(VMCache*& cache, 212 addr_t baseAddress); 213 static status_t CreateNullCache(int priority, VMCache*& cache); 214 }; 215 216 217 218 bool 219 VMCache::Lock() 220 { 221 return mutex_lock(&fLock) == B_OK; 222 } 223 224 225 bool 226 VMCache::TryLock() 227 { 228 return mutex_trylock(&fLock) == B_OK; 229 } 230 231 232 bool 233 VMCache::SwitchLock(mutex* from) 234 { 235 return mutex_switch_lock(from, &fLock) == B_OK; 236 } 237 238 239 bool 240 VMCache::SwitchFromReadLock(rw_lock* from) 241 { 242 return mutex_switch_from_read_lock(from, &fLock) == B_OK; 243 } 244 245 246 void 247 VMCache::AssertLocked() 248 { 249 ASSERT_LOCKED_MUTEX(&fLock); 250 } 251 252 253 void 254 VMCache::AcquireRefLocked() 255 { 256 ASSERT_LOCKED_MUTEX(&fLock); 257 258 fRefCount++; 259 } 260 261 262 void 263 VMCache::AcquireRef() 264 { 265 Lock(); 266 fRefCount++; 267 Unlock(); 268 } 269 270 271 void 272 VMCache::ReleaseRefLocked() 273 { 274 ASSERT_LOCKED_MUTEX(&fLock); 275 276 fRefCount--; 277 } 278 279 280 void 281 VMCache::ReleaseRef() 282 { 283 Lock(); 284 fRefCount--; 285 Unlock(); 286 } 287 288 289 void 290 VMCache::ReleaseRefAndUnlock(bool consumerLocked) 291 { 292 ReleaseRefLocked(); 293 Unlock(consumerLocked); 294 } 295 296 297 void 298 VMCache::MarkPageUnbusy(vm_page* page) 299 { 300 ASSERT(page->busy); 301 page->busy = false; 302 NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY); 303 } 304 305 306 #ifdef __cplusplus 307 extern "C" { 308 #endif 309 310 status_t vm_cache_init(struct kernel_args *args); 311 void vm_cache_init_post_heap(); 312 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page, 313 bool dontWait); 314 315 #ifdef __cplusplus 316 } 317 #endif 318 319 320 #endif /* _KERNEL_VM_VM_CACHE_H */ 321