1 /* 2 * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 #ifndef _KERNEL_VM_VM_CACHE_H 10 #define _KERNEL_VM_VM_CACHE_H 11 12 13 #include <debug.h> 14 #include <kernel.h> 15 #include <util/DoublyLinkedList.h> 16 #include <vm/vm.h> 17 #include <vm/vm_types.h> 18 19 #include "kernel_debug_config.h" 20 21 22 struct kernel_args; 23 class ObjectCache; 24 25 26 enum { 27 CACHE_TYPE_RAM = 0, 28 CACHE_TYPE_VNODE, 29 CACHE_TYPE_DEVICE, 30 CACHE_TYPE_NULL 31 }; 32 33 enum { 34 PAGE_EVENT_NOT_BUSY = 0x01 // page not busy anymore 35 }; 36 37 38 extern ObjectCache* gCacheRefObjectCache; 39 extern ObjectCache* gAnonymousCacheObjectCache; 40 extern ObjectCache* gAnonymousNoSwapCacheObjectCache; 41 extern ObjectCache* gVnodeCacheObjectCache; 42 extern ObjectCache* gDeviceCacheObjectCache; 43 extern ObjectCache* gNullCacheObjectCache; 44 45 46 struct VMCachePagesTreeDefinition { 47 typedef page_num_t KeyType; 48 typedef vm_page NodeType; 49 50 static page_num_t GetKey(const NodeType* node) 51 { 52 return node->cache_offset; 53 } 54 55 static SplayTreeLink<NodeType>* GetLink(NodeType* node) 56 { 57 return &node->cache_link; 58 } 59 60 static int Compare(page_num_t key, const NodeType* node) 61 { 62 return key == node->cache_offset ? 0 63 : (key < node->cache_offset ? -1 : 1); 64 } 65 66 static NodeType** GetListLink(NodeType* node) 67 { 68 return &node->cache_next; 69 } 70 }; 71 72 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree; 73 74 75 struct VMCache : public DoublyLinkedListLinkImpl<VMCache> { 76 public: 77 typedef DoublyLinkedList<VMCache> ConsumerList; 78 79 public: 80 VMCache(); 81 virtual ~VMCache(); 82 83 status_t Init(uint32 cacheType, uint32 allocationFlags); 84 85 virtual void Delete(); 86 87 inline bool Lock(); 88 inline bool TryLock(); 89 inline bool SwitchLock(mutex* from); 90 inline bool SwitchFromReadLock(rw_lock* from); 91 void Unlock(bool consumerLocked = false); 92 inline void AssertLocked(); 93 94 inline void AcquireRefLocked(); 95 inline void AcquireRef(); 96 inline void ReleaseRefLocked(); 97 inline void ReleaseRef(); 98 inline void ReleaseRefAndUnlock( 99 bool consumerLocked = false); 100 101 inline VMCacheRef* CacheRef() const { return fCacheRef; } 102 103 void WaitForPageEvents(vm_page* page, uint32 events, 104 bool relock); 105 void NotifyPageEvents(vm_page* page, uint32 events) 106 { if (fPageEventWaiters != NULL) 107 _NotifyPageEvents(page, events); } 108 inline void MarkPageUnbusy(vm_page* page); 109 110 vm_page* LookupPage(off_t offset); 111 void InsertPage(vm_page* page, off_t offset); 112 void RemovePage(vm_page* page); 113 void MovePage(vm_page* page); 114 void MoveAllPages(VMCache* fromCache); 115 116 inline page_num_t WiredPagesCount() const; 117 inline void IncrementWiredPagesCount(); 118 inline void DecrementWiredPagesCount(); 119 120 void AddConsumer(VMCache* consumer); 121 122 status_t InsertAreaLocked(VMArea* area); 123 status_t RemoveArea(VMArea* area); 124 void TransferAreas(VMCache* fromCache); 125 uint32 CountWritableAreas(VMArea* ignoreArea) const; 126 127 status_t WriteModified(); 128 status_t SetMinimalCommitment(off_t commitment, 129 int priority); 130 virtual status_t Resize(off_t newSize, int priority); 131 132 status_t FlushAndRemoveAllPages(); 133 134 void* UserData() { return fUserData; } 135 void SetUserData(void* data) { fUserData = data; } 136 // Settable by the lock owner and valid as 137 // long as the lock is owned. 138 139 // for debugging only 140 int32 RefCount() const 141 { return fRefCount; } 142 143 // backing store operations 144 virtual status_t Commit(off_t size, int priority); 145 virtual bool HasPage(off_t offset); 146 147 virtual status_t Read(off_t offset, const generic_io_vec *vecs, 148 size_t count,uint32 flags, 149 generic_size_t *_numBytes); 150 virtual status_t Write(off_t offset, const generic_io_vec *vecs, 151 size_t count, uint32 flags, 152 generic_size_t *_numBytes); 153 virtual status_t WriteAsync(off_t offset, 154 const generic_io_vec* vecs, size_t count, 155 generic_size_t numBytes, uint32 flags, 156 AsyncIOCallback* callback); 157 virtual bool CanWritePage(off_t offset); 158 159 virtual int32 MaxPagesPerWrite() const 160 { return -1; } // no restriction 161 virtual int32 MaxPagesPerAsyncWrite() const 162 { return -1; } // no restriction 163 164 virtual status_t Fault(struct VMAddressSpace *aspace, 165 off_t offset); 166 167 virtual void Merge(VMCache* source); 168 169 virtual status_t AcquireUnreferencedStoreRef(); 170 virtual void AcquireStoreRef(); 171 virtual void ReleaseStoreRef(); 172 173 virtual bool DebugHasPage(off_t offset); 174 vm_page* DebugLookupPage(off_t offset); 175 176 virtual void Dump(bool showPages) const; 177 178 protected: 179 virtual void DeleteObject() = 0; 180 181 public: 182 VMArea* areas; 183 ConsumerList consumers; 184 // list of caches that use this cache as a source 185 VMCachePagesTree pages; 186 VMCache* source; 187 off_t virtual_base; 188 off_t virtual_end; 189 off_t committed_size; 190 // TODO: Remove! 191 uint32 page_count; 192 uint32 temporary : 1; 193 uint32 type : 6; 194 195 #if DEBUG_CACHE_LIST 196 VMCache* debug_previous; 197 VMCache* debug_next; 198 #endif 199 200 private: 201 struct PageEventWaiter; 202 friend struct VMCacheRef; 203 204 private: 205 void _NotifyPageEvents(vm_page* page, uint32 events); 206 207 inline bool _IsMergeable() const; 208 209 void _MergeWithOnlyConsumer(); 210 void _RemoveConsumer(VMCache* consumer); 211 212 private: 213 int32 fRefCount; 214 mutex fLock; 215 PageEventWaiter* fPageEventWaiters; 216 void* fUserData; 217 VMCacheRef* fCacheRef; 218 page_num_t fWiredPagesCount; 219 }; 220 221 222 #if DEBUG_CACHE_LIST 223 extern VMCache* gDebugCacheList; 224 #endif 225 226 227 class VMCacheFactory { 228 public: 229 static status_t CreateAnonymousCache(VMCache*& cache, 230 bool canOvercommit, int32 numPrecommittedPages, 231 int32 numGuardPages, bool swappable, 232 int priority); 233 static status_t CreateVnodeCache(VMCache*& cache, 234 struct vnode* vnode); 235 static status_t CreateDeviceCache(VMCache*& cache, 236 addr_t baseAddress); 237 static status_t CreateNullCache(int priority, VMCache*& cache); 238 }; 239 240 241 242 bool 243 VMCache::Lock() 244 { 245 return mutex_lock(&fLock) == B_OK; 246 } 247 248 249 bool 250 VMCache::TryLock() 251 { 252 return mutex_trylock(&fLock) == B_OK; 253 } 254 255 256 bool 257 VMCache::SwitchLock(mutex* from) 258 { 259 return mutex_switch_lock(from, &fLock) == B_OK; 260 } 261 262 263 bool 264 VMCache::SwitchFromReadLock(rw_lock* from) 265 { 266 return mutex_switch_from_read_lock(from, &fLock) == B_OK; 267 } 268 269 270 void 271 VMCache::AssertLocked() 272 { 273 ASSERT_LOCKED_MUTEX(&fLock); 274 } 275 276 277 void 278 VMCache::AcquireRefLocked() 279 { 280 ASSERT_LOCKED_MUTEX(&fLock); 281 282 fRefCount++; 283 } 284 285 286 void 287 VMCache::AcquireRef() 288 { 289 Lock(); 290 fRefCount++; 291 Unlock(); 292 } 293 294 295 void 296 VMCache::ReleaseRefLocked() 297 { 298 ASSERT_LOCKED_MUTEX(&fLock); 299 300 fRefCount--; 301 } 302 303 304 void 305 VMCache::ReleaseRef() 306 { 307 Lock(); 308 fRefCount--; 309 Unlock(); 310 } 311 312 313 void 314 VMCache::ReleaseRefAndUnlock(bool consumerLocked) 315 { 316 ReleaseRefLocked(); 317 Unlock(consumerLocked); 318 } 319 320 321 void 322 VMCache::MarkPageUnbusy(vm_page* page) 323 { 324 ASSERT(page->busy); 325 page->busy = false; 326 NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY); 327 } 328 329 330 page_num_t 331 VMCache::WiredPagesCount() const 332 { 333 return fWiredPagesCount; 334 } 335 336 337 void 338 VMCache::IncrementWiredPagesCount() 339 { 340 ASSERT(fWiredPagesCount < page_count); 341 342 fWiredPagesCount++; 343 } 344 345 346 void 347 VMCache::DecrementWiredPagesCount() 348 { 349 ASSERT(fWiredPagesCount > 0); 350 351 fWiredPagesCount--; 352 } 353 354 355 // vm_page methods implemented here to avoid VMCache.h inclusion in vm_types.h 356 357 inline void 358 vm_page::IncrementWiredCount() 359 { 360 if (fWiredCount++ == 0) 361 cache_ref->cache->IncrementWiredPagesCount(); 362 } 363 364 365 inline void 366 vm_page::DecrementWiredCount() 367 { 368 ASSERT(fWiredCount > 0); 369 370 if (--fWiredCount == 0) 371 cache_ref->cache->DecrementWiredPagesCount(); 372 } 373 374 375 #ifdef __cplusplus 376 extern "C" { 377 #endif 378 379 status_t vm_cache_init(struct kernel_args *args); 380 void vm_cache_init_post_heap(); 381 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page, 382 bool dontWait); 383 384 #ifdef __cplusplus 385 } 386 #endif 387 388 389 #endif /* _KERNEL_VM_VM_CACHE_H */ 390