1 /* 2 * Copyright 2010-2013, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 7 #include <file_systems/ram_disk/ram_disk.h> 8 9 #include <ctype.h> 10 #include <errno.h> 11 #include <fcntl.h> 12 #include <stdio.h> 13 #include <string.h> 14 #include <unistd.h> 15 16 #include <algorithm> 17 18 #include <device_manager.h> 19 #include <Drivers.h> 20 21 #include <AutoDeleter.h> 22 #include <util/AutoLock.h> 23 #include <util/DoublyLinkedList.h> 24 25 #include <fs/KPath.h> 26 #include <lock.h> 27 #include <util/fs_trim_support.h> 28 #include <vm/vm.h> 29 #include <vm/VMCache.h> 30 #include <vm/vm_page.h> 31 32 #include "dma_resources.h" 33 #include "io_requests.h" 34 #include "IOSchedulerSimple.h" 35 36 37 //#define TRACE_RAM_DISK 38 #ifdef TRACE_RAM_DISK 39 # define TRACE(x...) dprintf(x) 40 #else 41 # define TRACE(x...) do {} while (false) 42 #endif 43 44 45 static const unsigned char kRamdiskIcon[] = { 46 0x6e, 0x63, 0x69, 0x66, 0x0e, 0x03, 0x01, 0x00, 0x00, 0x02, 0x00, 0x16, 47 0x02, 0x3c, 0xc7, 0xee, 0x38, 0x9b, 0xc0, 0xba, 0x16, 0x57, 0x3e, 0x39, 48 0xb0, 0x49, 0x77, 0xc8, 0x42, 0xad, 0xc7, 0x00, 0xff, 0xff, 0xd3, 0x02, 49 0x00, 0x06, 0x02, 0x3c, 0x96, 0x32, 0x3a, 0x4d, 0x3f, 0xba, 0xfc, 0x01, 50 0x3d, 0x5a, 0x97, 0x4b, 0x57, 0xa5, 0x49, 0x84, 0x4d, 0x00, 0x47, 0x47, 51 0x47, 0xff, 0xa5, 0xa0, 0xa0, 0x02, 0x00, 0x16, 0x02, 0xbc, 0x59, 0x2f, 52 0xbb, 0x29, 0xa7, 0x3c, 0x0c, 0xe4, 0xbd, 0x0b, 0x7c, 0x48, 0x92, 0xc0, 53 0x4b, 0x79, 0x66, 0x00, 0x7d, 0xff, 0xd4, 0x02, 0x00, 0x06, 0x02, 0x38, 54 0xdb, 0xb4, 0x39, 0x97, 0x33, 0xbc, 0x4a, 0x33, 0x3b, 0xa5, 0x42, 0x48, 55 0x6e, 0x66, 0x49, 0xee, 0x7b, 0x00, 0x59, 0x67, 0x56, 0xff, 0xeb, 0xb2, 56 0xb2, 0x03, 0xa7, 0xff, 0x00, 0x03, 0xff, 0x00, 0x00, 0x04, 0x01, 0x80, 57 0x03, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x6a, 0x05, 0x33, 0x02, 58 0x00, 0x06, 0x02, 0x3a, 0x5d, 0x2c, 0x39, 0xf8, 0xb1, 0xb9, 0xdb, 0xf1, 59 0x3a, 0x4c, 0x0f, 0x48, 0xae, 0xea, 0x4a, 0xc0, 0x91, 0x00, 0x74, 0x74, 60 0x74, 0xff, 0x3e, 0x3d, 0x3d, 0x02, 0x00, 0x16, 0x02, 0x38, 0x22, 0x1b, 61 0x3b, 0x11, 0x73, 0xbc, 0x5e, 0xb5, 0x39, 0x4b, 0xaa, 0x4a, 0x47, 0xf1, 62 0x49, 0xc2, 0x1d, 0x00, 0xb0, 0xff, 0x83, 0x02, 0x00, 0x16, 0x03, 0x36, 63 0xed, 0xe9, 0x36, 0xb9, 0x49, 0xba, 0x0a, 0xf6, 0x3a, 0x32, 0x6f, 0x4a, 64 0x79, 0xef, 0x4b, 0x03, 0xe7, 0x00, 0x5a, 0x38, 0xdc, 0xff, 0x7e, 0x0d, 65 0x0a, 0x06, 0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31, 66 0x39, 0x25, 0x0a, 0x04, 0x22, 0x3c, 0x44, 0x4b, 0x5a, 0x31, 0x39, 0x25, 67 0x0a, 0x04, 0x44, 0x4b, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31, 0x0a, 0x04, 68 0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x44, 0x4b, 0x08, 0x02, 0x27, 0x43, 69 0xb8, 0x14, 0xc1, 0xf1, 0x08, 0x02, 0x26, 0x43, 0x29, 0x44, 0x0a, 0x05, 70 0x44, 0x5d, 0x49, 0x5d, 0x60, 0x3e, 0x5a, 0x3b, 0x5b, 0x3f, 0x0a, 0x04, 71 0x3c, 0x5a, 0x5a, 0x3c, 0x5a, 0x36, 0x3c, 0x52, 0x0a, 0x04, 0x24, 0x4e, 72 0x3c, 0x5a, 0x3c, 0x52, 0x24, 0x48, 0x06, 0x07, 0xaa, 0x3f, 0x42, 0x2e, 73 0x24, 0x48, 0x3c, 0x52, 0x5a, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50, 0x34, 74 0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49, 0x30, 75 0x06, 0x08, 0xfa, 0xfa, 0x42, 0x50, 0x3e, 0x54, 0x40, 0x55, 0x3f, 0xc7, 76 0xeb, 0x41, 0xc8, 0x51, 0x42, 0xc9, 0x4f, 0x42, 0xc8, 0xda, 0x42, 0xca, 77 0x41, 0xc0, 0xf1, 0x5d, 0x45, 0xca, 0x81, 0x46, 0xc7, 0xb7, 0x46, 0xc8, 78 0xa9, 0x46, 0xc7, 0x42, 0x44, 0x51, 0x45, 0xc6, 0xb9, 0x43, 0xc6, 0x53, 79 0x0a, 0x07, 0x3c, 0x5c, 0x40, 0x5c, 0x42, 0x5e, 0x48, 0x5e, 0x4a, 0x5c, 80 0x46, 0x5a, 0x45, 0x4b, 0x06, 0x09, 0x9a, 0xf6, 0x03, 0x42, 0x2e, 0x24, 81 0x48, 0x4e, 0x3c, 0x5a, 0x5a, 0x3c, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50, 82 0x34, 0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49, 83 0x30, 0x18, 0x0a, 0x07, 0x01, 0x06, 0x00, 0x0a, 0x00, 0x01, 0x00, 0x10, 84 0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x01, 0x01, 0x01, 0x00, 0x0a, 0x02, 85 0x01, 0x02, 0x00, 0x0a, 0x03, 0x01, 0x03, 0x00, 0x0a, 0x04, 0x01, 0x04, 86 0x10, 0x01, 0x17, 0x85, 0x20, 0x04, 0x0a, 0x06, 0x01, 0x05, 0x30, 0x24, 87 0xb3, 0x99, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x05, 0x01, 0x05, 0x30, 88 0x20, 0xb2, 0xe6, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x09, 0x01, 0x0b, 89 0x02, 0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 90 0x21, 0x48, 0xed, 0x4d, 0xc8, 0x5a, 0x02, 0x0a, 0x09, 0x01, 0x0b, 0x02, 91 0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 92 0x48, 0x4c, 0xd4, 0xc7, 0x9c, 0x11, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e, 93 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x47, 94 0x5c, 0xe7, 0xc6, 0x2c, 0x1a, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e, 0x9b, 95 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x46, 0x1b, 96 0xf5, 0xc4, 0x28, 0x4e, 0x0a, 0x08, 0x01, 0x0c, 0x12, 0x3e, 0xc0, 0x21, 97 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45, 0xb6, 0x34, 98 0xc4, 0x22, 0x1f, 0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x0a, 0x01, 0x07, 99 0x02, 0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 100 0x21, 0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0b, 0x01, 0x08, 0x02, 101 0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 102 0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0c, 0x01, 0x09, 0x02, 0x3e, 103 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45, 104 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 105 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0xf2, 106 0x4e, 0xc7, 0xee, 0x3f, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 107 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 108 0x7b, 0x5e, 0x48, 0xf2, 0x4e, 0xc7, 0xee, 0x3f, 0x0a, 0x08, 0x01, 0x0a, 109 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 110 0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x01, 0x17, 0x84, 0x22, 0x04, 111 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 112 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x0a, 113 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 114 0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4, 0xa6, 0x5a, 0x01, 0x17, 115 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 116 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4, 117 0xa6, 0x5a, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 118 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46, 0x2c, 0x90, 0xb8, 0xd1, 119 0xff, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 120 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46, 121 0x2c, 0x90, 0xb8, 0xd1, 0xff 122 }; 123 124 125 // parameters for the DMA resource 126 static const uint32 kDMAResourceBufferCount = 16; 127 static const uint32 kDMAResourceBounceBufferCount = 16; 128 129 static const char* const kDriverModuleName 130 = "drivers/disk/virtual/ram_disk/driver_v1"; 131 static const char* const kControlDeviceModuleName 132 = "drivers/disk/virtual/ram_disk/control/device_v1"; 133 static const char* const kRawDeviceModuleName 134 = "drivers/disk/virtual/ram_disk/raw/device_v1"; 135 136 static const char* const kControlDeviceName = RAM_DISK_CONTROL_DEVICE_NAME; 137 static const char* const kRawDeviceBaseName = RAM_DISK_RAW_DEVICE_BASE_NAME; 138 139 static const char* const kFilePathItem = "ram_disk/file_path"; 140 static const char* const kDeviceSizeItem = "ram_disk/device_size"; 141 static const char* const kDeviceIDItem = "ram_disk/id"; 142 143 144 struct RawDevice; 145 typedef DoublyLinkedList<RawDevice> RawDeviceList; 146 147 struct device_manager_info* sDeviceManager; 148 149 static RawDeviceList sDeviceList; 150 static mutex sDeviceListLock = MUTEX_INITIALIZER("ram disk device list"); 151 static uint64 sUsedRawDeviceIDs = 0; 152 153 154 static int32 allocate_raw_device_id(); 155 static void free_raw_device_id(int32 id); 156 157 158 struct Device { 159 Device(device_node* node) 160 : 161 fNode(node) 162 { 163 mutex_init(&fLock, "ram disk device"); 164 } 165 166 virtual ~Device() 167 { 168 mutex_destroy(&fLock); 169 } 170 171 bool Lock() { mutex_lock(&fLock); return true; } 172 void Unlock() { mutex_unlock(&fLock); } 173 174 device_node* Node() const { return fNode; } 175 176 virtual status_t PublishDevice() = 0; 177 178 protected: 179 mutex fLock; 180 device_node* fNode; 181 }; 182 183 184 struct ControlDevice : Device { 185 ControlDevice(device_node* node) 186 : 187 Device(node) 188 { 189 } 190 191 status_t Register(const char* filePath, uint64 deviceSize, int32& _id) 192 { 193 int32 id = allocate_raw_device_id(); 194 if (id < 0) 195 return B_BUSY; 196 197 device_attr attrs[] = { 198 {B_DEVICE_PRETTY_NAME, B_STRING_TYPE, 199 {string: "RAM Disk Raw Device"}}, 200 {kDeviceSizeItem, B_UINT64_TYPE, {ui64: deviceSize}}, 201 {kDeviceIDItem, B_UINT32_TYPE, {ui32: (uint32)id}}, 202 {kFilePathItem, B_STRING_TYPE, {string: filePath}}, 203 {NULL} 204 }; 205 206 // If filePath is NULL, remove the attribute. 207 if (filePath == NULL) { 208 size_t count = sizeof(attrs) / sizeof(attrs[0]); 209 memset(attrs + count - 2, 0, sizeof(attrs[0])); 210 } 211 212 status_t error = sDeviceManager->register_node( 213 sDeviceManager->get_parent_node(Node()), kDriverModuleName, attrs, 214 NULL, NULL); 215 if (error != B_OK) { 216 free_raw_device_id(id); 217 return error; 218 } 219 220 _id = id; 221 return B_OK; 222 } 223 224 virtual status_t PublishDevice() 225 { 226 return sDeviceManager->publish_device(Node(), kControlDeviceName, 227 kControlDeviceModuleName); 228 } 229 }; 230 231 232 struct RawDevice : Device, DoublyLinkedListLinkImpl<RawDevice> { 233 RawDevice(device_node* node) 234 : 235 Device(node), 236 fID(-1), 237 fUnregistered(false), 238 fDeviceSize(0), 239 fDeviceName(NULL), 240 fFilePath(NULL), 241 fCache(NULL), 242 fDMAResource(NULL), 243 fIOScheduler(NULL) 244 { 245 } 246 247 virtual ~RawDevice() 248 { 249 if (fID >= 0) { 250 MutexLocker locker(sDeviceListLock); 251 sDeviceList.Remove(this); 252 } 253 254 free(fDeviceName); 255 free(fFilePath); 256 } 257 258 int32 ID() const { return fID; } 259 off_t DeviceSize() const { return fDeviceSize; } 260 const char* DeviceName() const { return fDeviceName; } 261 262 bool IsUnregistered() const { return fUnregistered; } 263 264 void SetUnregistered(bool unregistered) 265 { 266 fUnregistered = unregistered; 267 } 268 269 status_t Init(int32 id, const char* filePath, uint64 deviceSize) 270 { 271 fID = id; 272 fFilePath = filePath != NULL ? strdup(filePath) : NULL; 273 if (filePath != NULL && fFilePath == NULL) 274 return B_NO_MEMORY; 275 276 fDeviceSize = (deviceSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE 277 * B_PAGE_SIZE; 278 279 if (fDeviceSize < B_PAGE_SIZE 280 || (uint64)fDeviceSize / B_PAGE_SIZE 281 > vm_page_num_pages() * 2 / 3) { 282 return B_BAD_VALUE; 283 } 284 285 // construct our device path 286 KPath path(kRawDeviceBaseName); 287 char buffer[32]; 288 snprintf(buffer, sizeof(buffer), "%" B_PRId32 "/raw", fID); 289 290 status_t error = path.Append(buffer); 291 if (error != B_OK) 292 return error; 293 294 fDeviceName = path.DetachBuffer(); 295 296 // insert into device list 297 RawDevice* nextDevice = NULL; 298 MutexLocker locker(sDeviceListLock); 299 for (RawDeviceList::Iterator it = sDeviceList.GetIterator(); 300 (nextDevice = it.Next()) != NULL;) { 301 if (nextDevice->ID() > fID) 302 break; 303 } 304 305 sDeviceList.InsertBefore(nextDevice, this); 306 307 return B_OK; 308 } 309 310 status_t Prepare() 311 { 312 status_t error = VMCacheFactory::CreateAnonymousCache(fCache, false, 0, 313 0, false, VM_PRIORITY_SYSTEM); 314 if (error != B_OK) { 315 Unprepare(); 316 return error; 317 } 318 319 fCache->temporary = 1; 320 fCache->virtual_end = fDeviceSize; 321 322 error = fCache->Commit(fDeviceSize, VM_PRIORITY_SYSTEM); 323 if (error != B_OK) { 324 Unprepare(); 325 return error; 326 } 327 328 if (fFilePath != NULL) { 329 error = _LoadFile(); 330 if (error != B_OK) { 331 Unprepare(); 332 return error; 333 } 334 } 335 336 // no DMA restrictions 337 const dma_restrictions restrictions = {}; 338 339 fDMAResource = new(std::nothrow) DMAResource; 340 if (fDMAResource == NULL) { 341 Unprepare(); 342 return B_NO_MEMORY; 343 } 344 345 error = fDMAResource->Init(restrictions, B_PAGE_SIZE, 346 kDMAResourceBufferCount, kDMAResourceBounceBufferCount); 347 if (error != B_OK) { 348 Unprepare(); 349 return error; 350 } 351 352 fIOScheduler = new(std::nothrow) IOSchedulerSimple(fDMAResource); 353 if (fIOScheduler == NULL) { 354 Unprepare(); 355 return B_NO_MEMORY; 356 } 357 358 error = fIOScheduler->Init("ram disk device scheduler"); 359 if (error != B_OK) { 360 Unprepare(); 361 return error; 362 } 363 364 fIOScheduler->SetCallback(&_DoIOEntry, this); 365 366 return B_OK; 367 } 368 369 void Unprepare() 370 { 371 delete fIOScheduler; 372 fIOScheduler = NULL; 373 374 delete fDMAResource; 375 fDMAResource = NULL; 376 377 if (fCache != NULL) { 378 fCache->Lock(); 379 fCache->ReleaseRefAndUnlock(); 380 fCache = NULL; 381 } 382 } 383 384 void GetInfo(ram_disk_ioctl_info& _info) const 385 { 386 _info.id = fID; 387 _info.size = fDeviceSize; 388 memset(&_info.path, 0, sizeof(_info.path)); 389 if (fFilePath != NULL) 390 strlcpy(_info.path, fFilePath, sizeof(_info.path)); 391 } 392 393 status_t Flush() 394 { 395 static const size_t kPageCountPerIteration = 1024; 396 static const size_t kMaxGapSize = 15; 397 398 FileDescriptorCloser fd(open(fFilePath, O_WRONLY)); 399 if (!fd.IsSet()) 400 return errno; 401 402 vm_page** pages = new(std::nothrow) vm_page*[kPageCountPerIteration]; 403 ArrayDeleter<vm_page*> pagesDeleter(pages); 404 405 uint8* buffer = (uint8*)malloc(kPageCountPerIteration * B_PAGE_SIZE); 406 MemoryDeleter bufferDeleter(buffer); 407 408 if (pages == NULL || buffer == NULL) 409 return B_NO_MEMORY; 410 411 // Iterate through all pages of the cache and write those back that have 412 // been modified. 413 AutoLocker<VMCache> locker(fCache); 414 415 status_t error = B_OK; 416 417 for (off_t offset = 0; offset < fDeviceSize;) { 418 // find the first modified page at or after the current offset 419 VMCachePagesTree::Iterator it 420 = fCache->pages.GetIterator(offset / B_PAGE_SIZE, true, true); 421 vm_page* firstModified; 422 while ((firstModified = it.Next()) != NULL 423 && !firstModified->modified) { 424 } 425 426 if (firstModified == NULL) 427 break; 428 429 if (firstModified->busy) { 430 fCache->WaitForPageEvents(firstModified, PAGE_EVENT_NOT_BUSY, 431 true); 432 continue; 433 } 434 435 pages[0] = firstModified; 436 page_num_t firstPageIndex = firstModified->cache_offset; 437 offset = firstPageIndex * B_PAGE_SIZE; 438 439 // Collect more pages until the gap between two modified pages gets 440 // too large or we hit the end of our array. 441 size_t previousModifiedIndex = 0; 442 size_t previousIndex = 0; 443 while (vm_page* page = it.Next()) { 444 page_num_t index = page->cache_offset - firstPageIndex; 445 if (page->busy 446 || index >= kPageCountPerIteration 447 || index - previousModifiedIndex > kMaxGapSize) { 448 break; 449 } 450 451 pages[index] = page; 452 453 // clear page array gap since the previous page 454 if (previousIndex + 1 < index) { 455 memset(pages + previousIndex + 1, 0, 456 (index - previousIndex - 1) * sizeof(vm_page*)); 457 } 458 459 previousIndex = index; 460 if (page->modified) 461 previousModifiedIndex = index; 462 } 463 464 // mark all pages we want to write busy 465 size_t pagesToWrite = previousModifiedIndex + 1; 466 for (size_t i = 0; i < pagesToWrite; i++) { 467 if (vm_page* page = pages[i]) { 468 DEBUG_PAGE_ACCESS_START(page); 469 page->busy = true; 470 } 471 } 472 473 locker.Unlock(); 474 475 // copy the pages to our buffer 476 for (size_t i = 0; i < pagesToWrite; i++) { 477 if (vm_page* page = pages[i]) { 478 error = vm_memcpy_from_physical(buffer + i * B_PAGE_SIZE, 479 page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE, 480 false); 481 if (error != B_OK) { 482 dprintf("ramdisk: error copying page %" B_PRIu64 483 " data: %s\n", (uint64)page->physical_page_number, 484 strerror(error)); 485 break; 486 } 487 } else 488 memset(buffer + i * B_PAGE_SIZE, 0, B_PAGE_SIZE); 489 } 490 491 // write the buffer 492 if (error == B_OK) { 493 ssize_t bytesWritten = pwrite(fd.Get(), buffer, 494 pagesToWrite * B_PAGE_SIZE, offset); 495 if (bytesWritten < 0) { 496 dprintf("ramdisk: error writing pages to file: %s\n", 497 strerror(bytesWritten)); 498 error = bytesWritten; 499 } 500 else if ((size_t)bytesWritten != pagesToWrite * B_PAGE_SIZE) { 501 dprintf("ramdisk: error writing pages to file: short " 502 "write (%zd/%zu)\n", bytesWritten, 503 pagesToWrite * B_PAGE_SIZE); 504 error = B_ERROR; 505 } 506 } 507 508 // mark the pages unbusy, on success also unmodified 509 locker.Lock(); 510 511 for (size_t i = 0; i < pagesToWrite; i++) { 512 if (vm_page* page = pages[i]) { 513 if (error == B_OK) 514 page->modified = false; 515 fCache->MarkPageUnbusy(page); 516 DEBUG_PAGE_ACCESS_END(page); 517 } 518 } 519 520 if (error != B_OK) 521 break; 522 523 offset += pagesToWrite * B_PAGE_SIZE; 524 } 525 526 return error; 527 } 528 529 status_t Trim(fs_trim_data* trimData) 530 { 531 TRACE("trim_device()\n"); 532 533 trimData->trimmed_size = 0; 534 535 const off_t deviceSize = fDeviceSize; // in bytes 536 if (deviceSize < 0) 537 return B_BAD_VALUE; 538 539 STATIC_ASSERT(sizeof(deviceSize) <= sizeof(uint64)); 540 ASSERT(deviceSize >= 0); 541 542 // Do not trim past device end 543 for (uint32 i = 0; i < trimData->range_count; i++) { 544 uint64 offset = trimData->ranges[i].offset; 545 uint64& size = trimData->ranges[i].size; 546 547 if (offset >= (uint64)deviceSize) 548 return B_BAD_VALUE; 549 size = min_c(size, (uint64)deviceSize - offset); 550 } 551 552 status_t result = B_OK; 553 uint64 trimmedSize = 0; 554 for (uint32 i = 0; i < trimData->range_count; i++) { 555 uint64 offset = trimData->ranges[i].offset; 556 uint64 length = trimData->ranges[i].size; 557 558 // Round up offset and length to multiple of the page size 559 // The offset is rounded up, so some space may be left 560 // (not trimmed) at the start of the range. 561 offset = (offset + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); 562 // Adjust the length for the possibly skipped range 563 length -= offset - trimData->ranges[i].offset; 564 // The length is rounded down, so some space at the end may also 565 // be left (not trimmed). 566 length &= ~(B_PAGE_SIZE - 1); 567 568 if (length == 0) 569 continue; 570 571 TRACE("ramdisk: trim %" B_PRIu64 " bytes from %" B_PRIu64 "\n", 572 length, offset); 573 574 ASSERT(offset % B_PAGE_SIZE == 0); 575 ASSERT(length % B_PAGE_SIZE == 0); 576 577 vm_page** pages = new(std::nothrow) vm_page*[length / B_PAGE_SIZE]; 578 if (pages == NULL) { 579 result = B_NO_MEMORY; 580 break; 581 } 582 ArrayDeleter<vm_page*> pagesDeleter(pages); 583 584 _GetPages((off_t)offset, (off_t)length, false, pages); 585 586 AutoLocker<VMCache> locker(fCache); 587 uint64 j; 588 for (j = 0; j < length / B_PAGE_SIZE; j++) { 589 // If we run out of pages (some may already be trimmed), stop. 590 if (pages[j] == NULL) 591 break; 592 593 TRACE("free range %" B_PRIu32 ", page %" B_PRIu64 ", offset %" 594 B_PRIu64 "\n", i, j, offset); 595 if (pages[j]->Cache()) 596 fCache->RemovePage(pages[j]); 597 vm_page_free(NULL, pages[j]); 598 trimmedSize += B_PAGE_SIZE; 599 } 600 } 601 602 trimData->trimmed_size = trimmedSize; 603 604 return result; 605 } 606 607 608 609 status_t DoIO(IORequest* request) 610 { 611 return fIOScheduler->ScheduleRequest(request); 612 } 613 614 virtual status_t PublishDevice() 615 { 616 return sDeviceManager->publish_device(Node(), fDeviceName, 617 kRawDeviceModuleName); 618 } 619 620 private: 621 static status_t _DoIOEntry(void* data, IOOperation* operation) 622 { 623 return ((RawDevice*)data)->_DoIO(operation); 624 } 625 626 status_t _DoIO(IOOperation* operation) 627 { 628 off_t offset = operation->Offset(); 629 generic_size_t length = operation->Length(); 630 631 ASSERT(offset % B_PAGE_SIZE == 0); 632 ASSERT(length % B_PAGE_SIZE == 0); 633 634 const generic_io_vec* vecs = operation->Vecs(); 635 generic_size_t vecOffset = 0; 636 bool isWrite = operation->IsWrite(); 637 638 vm_page** pages = new(std::nothrow) vm_page*[length / B_PAGE_SIZE]; 639 if (pages == NULL) 640 return B_NO_MEMORY; 641 ArrayDeleter<vm_page*> pagesDeleter(pages); 642 643 _GetPages(offset, length, isWrite, pages); 644 645 status_t error = B_OK; 646 size_t index = 0; 647 648 while (length > 0) { 649 vm_page* page = pages[index]; 650 651 if (isWrite) 652 page->modified = true; 653 654 error = _CopyData(page, vecs, vecOffset, isWrite); 655 if (error != B_OK) 656 break; 657 658 offset += B_PAGE_SIZE; 659 length -= B_PAGE_SIZE; 660 index++; 661 } 662 663 _PutPages(operation->Offset(), operation->Length(), pages, 664 error == B_OK); 665 666 if (error != B_OK) { 667 fIOScheduler->OperationCompleted(operation, error, 0); 668 return error; 669 } 670 671 fIOScheduler->OperationCompleted(operation, B_OK, operation->Length()); 672 return B_OK; 673 } 674 675 void _GetPages(off_t offset, off_t length, bool isWrite, vm_page** pages) 676 { 677 // TODO: This method is duplicated in ramfs' DataContainer. Perhaps it 678 // should be put into a common location? 679 680 // get the pages, we already have 681 AutoLocker<VMCache> locker(fCache); 682 683 size_t pageCount = length / B_PAGE_SIZE; 684 size_t index = 0; 685 size_t missingPages = 0; 686 687 while (length > 0) { 688 vm_page* page = fCache->LookupPage(offset); 689 if (page != NULL) { 690 if (page->busy) { 691 fCache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); 692 continue; 693 } 694 695 DEBUG_PAGE_ACCESS_START(page); 696 page->busy = true; 697 } else 698 missingPages++; 699 700 pages[index++] = page; 701 offset += B_PAGE_SIZE; 702 length -= B_PAGE_SIZE; 703 } 704 705 locker.Unlock(); 706 707 // For a write we need to reserve the missing pages. 708 if (isWrite && missingPages > 0) { 709 vm_page_reservation reservation; 710 vm_page_reserve_pages(&reservation, missingPages, 711 VM_PRIORITY_SYSTEM); 712 713 for (size_t i = 0; i < pageCount; i++) { 714 if (pages[i] != NULL) 715 continue; 716 717 pages[i] = vm_page_allocate_page(&reservation, 718 PAGE_STATE_WIRED | VM_PAGE_ALLOC_BUSY); 719 720 if (--missingPages == 0) 721 break; 722 } 723 724 vm_page_unreserve_pages(&reservation); 725 } 726 } 727 728 void _PutPages(off_t offset, off_t length, vm_page** pages, bool success) 729 { 730 // TODO: This method is duplicated in ramfs' DataContainer. Perhaps it 731 // should be put into a common location? 732 733 AutoLocker<VMCache> locker(fCache); 734 735 // Mark all pages unbusy. On error free the newly allocated pages. 736 size_t index = 0; 737 738 while (length > 0) { 739 vm_page* page = pages[index++]; 740 if (page != NULL) { 741 if (page->CacheRef() == NULL) { 742 if (success) { 743 fCache->InsertPage(page, offset); 744 fCache->MarkPageUnbusy(page); 745 DEBUG_PAGE_ACCESS_END(page); 746 } else 747 vm_page_free(NULL, page); 748 } else { 749 fCache->MarkPageUnbusy(page); 750 DEBUG_PAGE_ACCESS_END(page); 751 } 752 } 753 754 offset += B_PAGE_SIZE; 755 length -= B_PAGE_SIZE; 756 } 757 } 758 759 status_t _CopyData(vm_page* page, const generic_io_vec*& vecs, 760 generic_size_t& vecOffset, bool toPage) 761 { 762 // map page to virtual memory 763 Thread* thread = thread_get_current_thread(); 764 uint8* pageData = NULL; 765 void* handle; 766 if (page != NULL) { 767 thread_pin_to_current_cpu(thread); 768 addr_t virtualAddress; 769 status_t error = vm_get_physical_page_current_cpu( 770 page->physical_page_number * B_PAGE_SIZE, &virtualAddress, 771 &handle); 772 if (error != B_OK) { 773 thread_unpin_from_current_cpu(thread); 774 return error; 775 } 776 777 pageData = (uint8*)virtualAddress; 778 } 779 780 status_t error = B_OK; 781 size_t length = B_PAGE_SIZE; 782 while (length > 0) { 783 size_t toCopy = std::min((generic_size_t)length, 784 vecs->length - vecOffset); 785 786 if (toCopy == 0) { 787 vecs++; 788 vecOffset = 0; 789 continue; 790 } 791 792 phys_addr_t vecAddress = vecs->base + vecOffset; 793 794 error = toPage 795 ? vm_memcpy_from_physical(pageData, vecAddress, toCopy, false) 796 : (page != NULL 797 ? vm_memcpy_to_physical(vecAddress, pageData, toCopy, false) 798 : vm_memset_physical(vecAddress, 0, toCopy)); 799 if (error != B_OK) 800 break; 801 802 pageData += toCopy; 803 length -= toCopy; 804 vecOffset += toCopy; 805 } 806 807 if (page != NULL) { 808 vm_put_physical_page_current_cpu((addr_t)pageData, handle); 809 thread_unpin_from_current_cpu(thread); 810 } 811 812 return error; 813 } 814 815 status_t _LoadFile() 816 { 817 static const size_t kPageCountPerIteration = 1024; 818 819 FileDescriptorCloser fd(open(fFilePath, O_RDONLY)); 820 if (!fd.IsSet()) 821 return errno; 822 823 ArrayDeleter<vm_page*> pages( 824 new(std::nothrow) vm_page*[kPageCountPerIteration]); 825 826 ArrayDeleter<uint8> buffer( 827 new(std::nothrow) uint8[kPageCountPerIteration * B_PAGE_SIZE]); 828 // TODO: Ideally we wouldn't use a buffer to read the file content, 829 // but read into the pages we allocated directly. Unfortunately 830 // there's no API to do that yet. 831 832 if (!pages.IsSet() || !buffer.IsSet()) 833 return B_NO_MEMORY; 834 835 status_t error = B_OK; 836 837 page_num_t allocatedPages = 0; 838 off_t offset = 0; 839 off_t sizeRemaining = fDeviceSize; 840 while (sizeRemaining > 0) { 841 // Note: fDeviceSize is B_PAGE_SIZE aligned. 842 size_t pagesToRead = std::min(kPageCountPerIteration, 843 size_t(sizeRemaining / B_PAGE_SIZE)); 844 845 // allocate the missing pages 846 if (allocatedPages < pagesToRead) { 847 vm_page_reservation reservation; 848 vm_page_reserve_pages(&reservation, 849 pagesToRead - allocatedPages, VM_PRIORITY_SYSTEM); 850 851 while (allocatedPages < pagesToRead) { 852 pages[allocatedPages++] 853 = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED); 854 } 855 856 vm_page_unreserve_pages(&reservation); 857 } 858 859 // read from the file 860 size_t bytesToRead = pagesToRead * B_PAGE_SIZE; 861 ssize_t bytesRead = pread(fd.Get(), buffer.Get(), bytesToRead, 862 offset); 863 if (bytesRead < 0) { 864 error = bytesRead; 865 break; 866 } 867 size_t pagesRead = (bytesRead + B_PAGE_SIZE - 1) / B_PAGE_SIZE; 868 if (pagesRead < pagesToRead) { 869 error = B_ERROR; 870 break; 871 } 872 873 // clear the last read page, if partial 874 if ((size_t)bytesRead < pagesRead * B_PAGE_SIZE) { 875 memset(buffer.Get() + bytesRead, 0, 876 pagesRead * B_PAGE_SIZE - bytesRead); 877 } 878 879 // copy data to allocated pages 880 for (size_t i = 0; i < pagesRead; i++) { 881 vm_page* page = pages[i]; 882 error = vm_memcpy_to_physical( 883 page->physical_page_number * B_PAGE_SIZE, 884 buffer.Get() + i * B_PAGE_SIZE, B_PAGE_SIZE, false); 885 if (error != B_OK) 886 break; 887 } 888 889 if (error != B_OK) 890 break; 891 892 // Add pages to cache. Ignore clear pages, though. Move those to the 893 // beginning of the array, so we can reuse them in the next 894 // iteration. 895 AutoLocker<VMCache> locker(fCache); 896 897 size_t clearPages = 0; 898 for (size_t i = 0; i < pagesRead; i++) { 899 uint64* pageData = (uint64*)(buffer.Get() + i * B_PAGE_SIZE); 900 bool isClear = true; 901 for (size_t k = 0; isClear && k < B_PAGE_SIZE / 8; k++) 902 isClear = pageData[k] == 0; 903 904 if (isClear) { 905 pages[clearPages++] = pages[i]; 906 } else { 907 fCache->InsertPage(pages[i], offset + i * B_PAGE_SIZE); 908 DEBUG_PAGE_ACCESS_END(pages[i]); 909 } 910 } 911 912 locker.Unlock(); 913 914 // Move any left-over allocated pages to the end of the empty pages 915 // and compute the new allocated pages count. 916 if (pagesRead < allocatedPages) { 917 size_t count = allocatedPages - pagesRead; 918 memcpy(pages.Get() + clearPages, pages.Get() + pagesRead, 919 count * sizeof(vm_page*)); 920 clearPages += count; 921 } 922 allocatedPages = clearPages; 923 924 offset += pagesRead * B_PAGE_SIZE; 925 sizeRemaining -= pagesRead * B_PAGE_SIZE; 926 } 927 928 // free left-over allocated pages 929 for (size_t i = 0; i < allocatedPages; i++) 930 vm_page_free(NULL, pages[i]); 931 932 return error; 933 } 934 935 private: 936 int32 fID; 937 bool fUnregistered; 938 off_t fDeviceSize; 939 char* fDeviceName; 940 char* fFilePath; 941 VMCache* fCache; 942 DMAResource* fDMAResource; 943 IOScheduler* fIOScheduler; 944 }; 945 946 947 struct RawDeviceCookie { 948 RawDeviceCookie(RawDevice* device, int openMode) 949 : 950 fDevice(device), 951 fOpenMode(openMode) 952 { 953 } 954 955 RawDevice* Device() const { return fDevice; } 956 int OpenMode() const { return fOpenMode; } 957 958 private: 959 RawDevice* fDevice; 960 int fOpenMode; 961 }; 962 963 964 // #pragma mark - 965 966 967 static int32 968 allocate_raw_device_id() 969 { 970 MutexLocker deviceListLocker(sDeviceListLock); 971 for (size_t i = 0; i < sizeof(sUsedRawDeviceIDs) * 8; i++) { 972 if ((sUsedRawDeviceIDs & ((uint64)1 << i)) == 0) { 973 sUsedRawDeviceIDs |= (uint64)1 << i; 974 return (int32)i; 975 } 976 } 977 978 return -1; 979 } 980 981 982 static void 983 free_raw_device_id(int32 id) 984 { 985 MutexLocker deviceListLocker(sDeviceListLock); 986 sUsedRawDeviceIDs &= ~((uint64)1 << id); 987 } 988 989 990 static RawDevice* 991 find_raw_device(int32 id) 992 { 993 for (RawDeviceList::Iterator it = sDeviceList.GetIterator(); 994 RawDevice* device = it.Next();) { 995 if (device->ID() == id) 996 return device; 997 } 998 999 return NULL; 1000 } 1001 1002 1003 static status_t 1004 ioctl_register(ControlDevice* controlDevice, ram_disk_ioctl_register* request) 1005 { 1006 KPath path; 1007 uint64 deviceSize = 0; 1008 1009 if (request->path[0] != '\0') { 1010 // check if the path is null-terminated 1011 if (strnlen(request->path, sizeof(request->path)) 1012 == sizeof(request->path)) { 1013 return B_BAD_VALUE; 1014 } 1015 1016 // get a normalized file path 1017 status_t error = path.SetTo(request->path, true); 1018 if (error != B_OK) { 1019 dprintf("ramdisk: register: Invalid path \"%s\": %s\n", 1020 request->path, strerror(error)); 1021 return B_BAD_VALUE; 1022 } 1023 1024 struct stat st; 1025 if (lstat(path.Path(), &st) != 0) { 1026 dprintf("ramdisk: register: Failed to stat \"%s\": %s\n", 1027 path.Path(), strerror(errno)); 1028 return errno; 1029 } 1030 1031 if (!S_ISREG(st.st_mode)) { 1032 dprintf("ramdisk: register: \"%s\" is not a file!\n", path.Path()); 1033 return B_BAD_VALUE; 1034 } 1035 1036 deviceSize = st.st_size; 1037 } else { 1038 deviceSize = request->size; 1039 } 1040 1041 return controlDevice->Register(path.Length() > 0 ? path.Path() : NULL, 1042 deviceSize, request->id); 1043 } 1044 1045 1046 static status_t 1047 ioctl_unregister(ControlDevice* controlDevice, 1048 ram_disk_ioctl_unregister* request) 1049 { 1050 // find the device in the list and unregister it 1051 MutexLocker locker(sDeviceListLock); 1052 RawDevice* device = find_raw_device(request->id); 1053 if (device == NULL) 1054 return B_ENTRY_NOT_FOUND; 1055 1056 // mark unregistered before we unlock 1057 if (device->IsUnregistered()) 1058 return B_BUSY; 1059 device->SetUnregistered(true); 1060 locker.Unlock(); 1061 1062 device_node* node = device->Node(); 1063 status_t error = sDeviceManager->unpublish_device(node, 1064 device->DeviceName()); 1065 if (error != B_OK) { 1066 dprintf("ramdisk: unregister: Failed to unpublish device \"%s\": %s\n", 1067 device->DeviceName(), strerror(error)); 1068 return error; 1069 } 1070 1071 error = sDeviceManager->unregister_node(node); 1072 // Note: B_BUSY is OK. The node will removed as soon as possible. 1073 if (error != B_OK && error != B_BUSY) { 1074 dprintf("ramdisk: unregister: Failed to unregister node for device %" 1075 B_PRId32 ": %s\n", request->id, strerror(error)); 1076 return error; 1077 } 1078 1079 return B_OK; 1080 } 1081 1082 1083 static status_t 1084 ioctl_info(RawDevice* device, ram_disk_ioctl_info* request) 1085 { 1086 device->GetInfo(*request); 1087 return B_OK; 1088 } 1089 1090 1091 template<typename DeviceType, typename Request> 1092 static status_t 1093 handle_ioctl(DeviceType* device, 1094 status_t (*handler)(DeviceType*, Request*), void* buffer) 1095 { 1096 // copy request to the kernel heap 1097 if (buffer == NULL || !IS_USER_ADDRESS(buffer)) 1098 return B_BAD_ADDRESS; 1099 1100 Request* request = new(std::nothrow) Request; 1101 if (request == NULL) 1102 return B_NO_MEMORY; 1103 ObjectDeleter<Request> requestDeleter(request); 1104 1105 if (user_memcpy(request, buffer, sizeof(Request)) != B_OK) 1106 return B_BAD_ADDRESS; 1107 1108 // handle the ioctl 1109 status_t error = handler(device, request); 1110 if (error != B_OK) 1111 return error; 1112 1113 // copy the request back to userland 1114 if (user_memcpy(buffer, request, sizeof(Request)) != B_OK) 1115 return B_BAD_ADDRESS; 1116 1117 return B_OK; 1118 } 1119 1120 1121 // #pragma mark - driver 1122 1123 1124 static float 1125 ram_disk_driver_supports_device(device_node* parent) 1126 { 1127 const char* bus = NULL; 1128 if (sDeviceManager->get_attr_string(parent, B_DEVICE_BUS, &bus, false) 1129 == B_OK 1130 && strcmp(bus, "generic") == 0) { 1131 return 0.8; 1132 } 1133 1134 return -1; 1135 } 1136 1137 1138 static status_t 1139 ram_disk_driver_register_device(device_node* parent) 1140 { 1141 device_attr attrs[] = { 1142 {B_DEVICE_PRETTY_NAME, B_STRING_TYPE, 1143 {string: "RAM Disk Control Device"}}, 1144 {NULL} 1145 }; 1146 1147 return sDeviceManager->register_node(parent, kDriverModuleName, attrs, NULL, 1148 NULL); 1149 } 1150 1151 1152 static status_t 1153 ram_disk_driver_init_driver(device_node* node, void** _driverCookie) 1154 { 1155 uint64 deviceSize; 1156 if (sDeviceManager->get_attr_uint64(node, kDeviceSizeItem, &deviceSize, 1157 false) == B_OK) { 1158 int32 id = -1; 1159 sDeviceManager->get_attr_uint32(node, kDeviceIDItem, (uint32*)&id, 1160 false); 1161 if (id < 0) 1162 return B_ERROR; 1163 1164 const char* filePath = NULL; 1165 sDeviceManager->get_attr_string(node, kFilePathItem, &filePath, false); 1166 1167 RawDevice* device = new(std::nothrow) RawDevice(node); 1168 if (device == NULL) 1169 return B_NO_MEMORY; 1170 1171 status_t error = device->Init(id, filePath, deviceSize); 1172 if (error != B_OK) { 1173 delete device; 1174 return error; 1175 } 1176 1177 *_driverCookie = (Device*)device; 1178 } else { 1179 ControlDevice* device = new(std::nothrow) ControlDevice(node); 1180 if (device == NULL) 1181 return B_NO_MEMORY; 1182 1183 *_driverCookie = (Device*)device; 1184 } 1185 1186 return B_OK; 1187 } 1188 1189 1190 static void 1191 ram_disk_driver_uninit_driver(void* driverCookie) 1192 { 1193 Device* device = (Device*)driverCookie; 1194 if (RawDevice* rawDevice = dynamic_cast<RawDevice*>(device)) 1195 free_raw_device_id(rawDevice->ID()); 1196 delete device; 1197 } 1198 1199 1200 static status_t 1201 ram_disk_driver_register_child_devices(void* driverCookie) 1202 { 1203 Device* device = (Device*)driverCookie; 1204 return device->PublishDevice(); 1205 } 1206 1207 1208 // #pragma mark - control device 1209 1210 1211 static status_t 1212 ram_disk_control_device_init_device(void* driverCookie, void** _deviceCookie) 1213 { 1214 *_deviceCookie = driverCookie; 1215 return B_OK; 1216 } 1217 1218 1219 static void 1220 ram_disk_control_device_uninit_device(void* deviceCookie) 1221 { 1222 } 1223 1224 1225 static status_t 1226 ram_disk_control_device_open(void* deviceCookie, const char* path, int openMode, 1227 void** _cookie) 1228 { 1229 *_cookie = deviceCookie; 1230 return B_OK; 1231 } 1232 1233 1234 static status_t 1235 ram_disk_control_device_close(void* cookie) 1236 { 1237 return B_OK; 1238 } 1239 1240 1241 static status_t 1242 ram_disk_control_device_free(void* cookie) 1243 { 1244 return B_OK; 1245 } 1246 1247 1248 static status_t 1249 ram_disk_control_device_read(void* cookie, off_t position, void* buffer, 1250 size_t* _length) 1251 { 1252 return B_BAD_VALUE; 1253 } 1254 1255 1256 static status_t 1257 ram_disk_control_device_write(void* cookie, off_t position, const void* data, 1258 size_t* _length) 1259 { 1260 return B_BAD_VALUE; 1261 } 1262 1263 1264 static status_t 1265 ram_disk_control_device_control(void* cookie, uint32 op, void* buffer, 1266 size_t length) 1267 { 1268 ControlDevice* device = (ControlDevice*)cookie; 1269 1270 switch (op) { 1271 case RAM_DISK_IOCTL_REGISTER: 1272 return handle_ioctl(device, &ioctl_register, buffer); 1273 1274 case RAM_DISK_IOCTL_UNREGISTER: 1275 return handle_ioctl(device, &ioctl_unregister, buffer); 1276 } 1277 1278 return B_BAD_VALUE; 1279 } 1280 1281 1282 // #pragma mark - raw device 1283 1284 1285 static status_t 1286 ram_disk_raw_device_init_device(void* driverCookie, void** _deviceCookie) 1287 { 1288 RawDevice* device = static_cast<RawDevice*>((Device*)driverCookie); 1289 1290 status_t error = device->Prepare(); 1291 if (error != B_OK) 1292 return error; 1293 1294 *_deviceCookie = device; 1295 return B_OK; 1296 } 1297 1298 1299 static void 1300 ram_disk_raw_device_uninit_device(void* deviceCookie) 1301 { 1302 RawDevice* device = (RawDevice*)deviceCookie; 1303 device->Unprepare(); 1304 } 1305 1306 1307 static status_t 1308 ram_disk_raw_device_open(void* deviceCookie, const char* path, int openMode, 1309 void** _cookie) 1310 { 1311 RawDevice* device = (RawDevice*)deviceCookie; 1312 1313 RawDeviceCookie* cookie = new(std::nothrow) RawDeviceCookie(device, 1314 openMode); 1315 if (cookie == NULL) 1316 return B_NO_MEMORY; 1317 1318 *_cookie = cookie; 1319 return B_OK; 1320 } 1321 1322 1323 static status_t 1324 ram_disk_raw_device_close(void* cookie) 1325 { 1326 return B_OK; 1327 } 1328 1329 1330 static status_t 1331 ram_disk_raw_device_free(void* _cookie) 1332 { 1333 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie; 1334 delete cookie; 1335 return B_OK; 1336 } 1337 1338 1339 static status_t 1340 ram_disk_raw_device_read(void* _cookie, off_t pos, void* buffer, 1341 size_t* _length) 1342 { 1343 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie; 1344 RawDevice* device = cookie->Device(); 1345 1346 size_t length = *_length; 1347 1348 if (pos >= device->DeviceSize()) 1349 return B_BAD_VALUE; 1350 if (pos + (off_t)length > device->DeviceSize()) 1351 length = device->DeviceSize() - pos; 1352 1353 IORequest request; 1354 status_t status = request.Init(pos, (addr_t)buffer, length, false, 0); 1355 if (status != B_OK) 1356 return status; 1357 1358 status = device->DoIO(&request); 1359 if (status != B_OK) 1360 return status; 1361 1362 status = request.Wait(0, 0); 1363 if (status == B_OK) 1364 *_length = length; 1365 return status; 1366 } 1367 1368 1369 static status_t 1370 ram_disk_raw_device_write(void* _cookie, off_t pos, const void* buffer, 1371 size_t* _length) 1372 { 1373 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie; 1374 RawDevice* device = cookie->Device(); 1375 1376 size_t length = *_length; 1377 1378 if (pos >= device->DeviceSize()) 1379 return B_BAD_VALUE; 1380 if (pos + (off_t)length > device->DeviceSize()) 1381 length = device->DeviceSize() - pos; 1382 1383 IORequest request; 1384 status_t status = request.Init(pos, (addr_t)buffer, length, true, 0); 1385 if (status != B_OK) 1386 return status; 1387 1388 status = device->DoIO(&request); 1389 if (status != B_OK) 1390 return status; 1391 1392 status = request.Wait(0, 0); 1393 if (status == B_OK) 1394 *_length = length; 1395 1396 return status; 1397 } 1398 1399 1400 static status_t 1401 ram_disk_raw_device_io(void* _cookie, io_request* request) 1402 { 1403 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie; 1404 RawDevice* device = cookie->Device(); 1405 1406 return device->DoIO(request); 1407 } 1408 1409 1410 static status_t 1411 ram_disk_raw_device_control(void* _cookie, uint32 op, void* buffer, 1412 size_t length) 1413 { 1414 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie; 1415 RawDevice* device = cookie->Device(); 1416 1417 switch (op) { 1418 case B_GET_DEVICE_SIZE: 1419 { 1420 size_t size = device->DeviceSize(); 1421 return user_memcpy(buffer, &size, sizeof(size_t)); 1422 } 1423 1424 case B_SET_NONBLOCKING_IO: 1425 case B_SET_BLOCKING_IO: 1426 return B_OK; 1427 1428 case B_GET_READ_STATUS: 1429 case B_GET_WRITE_STATUS: 1430 { 1431 bool value = true; 1432 return user_memcpy(buffer, &value, sizeof(bool)); 1433 } 1434 1435 case B_GET_GEOMETRY: 1436 case B_GET_BIOS_GEOMETRY: 1437 { 1438 device_geometry geometry; 1439 geometry.bytes_per_sector = B_PAGE_SIZE; 1440 geometry.sectors_per_track = 1; 1441 geometry.cylinder_count = device->DeviceSize() / B_PAGE_SIZE; 1442 // TODO: We're limited to 2^32 * B_PAGE_SIZE, if we don't use 1443 // sectors_per_track and head_count. 1444 geometry.head_count = 1; 1445 geometry.device_type = B_DISK; 1446 geometry.removable = true; 1447 geometry.read_only = false; 1448 geometry.write_once = false; 1449 1450 return user_memcpy(buffer, &geometry, sizeof(device_geometry)); 1451 } 1452 1453 case B_GET_MEDIA_STATUS: 1454 { 1455 status_t status = B_OK; 1456 return user_memcpy(buffer, &status, sizeof(status_t)); 1457 } 1458 1459 case B_GET_ICON_NAME: 1460 return user_strlcpy((char*)buffer, "devices/drive-ramdisk", 1461 B_FILE_NAME_LENGTH); 1462 1463 case B_GET_VECTOR_ICON: 1464 { 1465 device_icon iconData; 1466 if (length != sizeof(device_icon)) 1467 return B_BAD_VALUE; 1468 if (user_memcpy(&iconData, buffer, sizeof(device_icon)) != B_OK) 1469 return B_BAD_ADDRESS; 1470 1471 if (iconData.icon_size >= (int32)sizeof(kRamdiskIcon)) { 1472 if (user_memcpy(iconData.icon_data, kRamdiskIcon, 1473 sizeof(kRamdiskIcon)) != B_OK) 1474 return B_BAD_ADDRESS; 1475 } 1476 1477 iconData.icon_size = sizeof(kRamdiskIcon); 1478 return user_memcpy(buffer, &iconData, sizeof(device_icon)); 1479 } 1480 1481 case B_SET_UNINTERRUPTABLE_IO: 1482 case B_SET_INTERRUPTABLE_IO: 1483 case B_FLUSH_DRIVE_CACHE: 1484 return B_OK; 1485 1486 case RAM_DISK_IOCTL_FLUSH: 1487 { 1488 status_t error = device->Flush(); 1489 if (error != B_OK) { 1490 dprintf("ramdisk: flush: Failed to flush device: %s\n", 1491 strerror(error)); 1492 return error; 1493 } 1494 1495 return B_OK; 1496 } 1497 1498 case B_TRIM_DEVICE: 1499 { 1500 // We know the buffer is kernel-side because it has been 1501 // preprocessed in devfs 1502 ASSERT(IS_KERNEL_ADDRESS(buffer)); 1503 return device->Trim((fs_trim_data*)buffer); 1504 } 1505 1506 case RAM_DISK_IOCTL_INFO: 1507 return handle_ioctl(device, &ioctl_info, buffer); 1508 } 1509 1510 return B_BAD_VALUE; 1511 } 1512 1513 1514 // #pragma mark - 1515 1516 1517 module_dependency module_dependencies[] = { 1518 {B_DEVICE_MANAGER_MODULE_NAME, (module_info**)&sDeviceManager}, 1519 {} 1520 }; 1521 1522 1523 static const struct driver_module_info sChecksumDeviceDriverModule = { 1524 { 1525 kDriverModuleName, 1526 0, 1527 NULL 1528 }, 1529 1530 ram_disk_driver_supports_device, 1531 ram_disk_driver_register_device, 1532 ram_disk_driver_init_driver, 1533 ram_disk_driver_uninit_driver, 1534 ram_disk_driver_register_child_devices 1535 }; 1536 1537 static const struct device_module_info sChecksumControlDeviceModule = { 1538 { 1539 kControlDeviceModuleName, 1540 0, 1541 NULL 1542 }, 1543 1544 ram_disk_control_device_init_device, 1545 ram_disk_control_device_uninit_device, 1546 NULL, 1547 1548 ram_disk_control_device_open, 1549 ram_disk_control_device_close, 1550 ram_disk_control_device_free, 1551 1552 ram_disk_control_device_read, 1553 ram_disk_control_device_write, 1554 NULL, // io 1555 1556 ram_disk_control_device_control, 1557 1558 NULL, // select 1559 NULL // deselect 1560 }; 1561 1562 static const struct device_module_info sChecksumRawDeviceModule = { 1563 { 1564 kRawDeviceModuleName, 1565 0, 1566 NULL 1567 }, 1568 1569 ram_disk_raw_device_init_device, 1570 ram_disk_raw_device_uninit_device, 1571 NULL, 1572 1573 ram_disk_raw_device_open, 1574 ram_disk_raw_device_close, 1575 ram_disk_raw_device_free, 1576 1577 ram_disk_raw_device_read, 1578 ram_disk_raw_device_write, 1579 ram_disk_raw_device_io, 1580 1581 ram_disk_raw_device_control, 1582 1583 NULL, // select 1584 NULL // deselect 1585 }; 1586 1587 const module_info* modules[] = { 1588 (module_info*)&sChecksumDeviceDriverModule, 1589 (module_info*)&sChecksumControlDeviceModule, 1590 (module_info*)&sChecksumRawDeviceModule, 1591 NULL 1592 }; 1593