1 /*
2 * Copyright 2010-2013, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7 #include <file_systems/ram_disk/ram_disk.h>
8
9 #include <ctype.h>
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <unistd.h>
15
16 #include <algorithm>
17
18 #include <device_manager.h>
19 #include <Drivers.h>
20
21 #include <AutoDeleter.h>
22 #include <StackOrHeapArray.h>
23 #include <util/AutoLock.h>
24 #include <util/DoublyLinkedList.h>
25
26 #include <fs/KPath.h>
27 #include <lock.h>
28 #include <util/fs_trim_support.h>
29 #include <vm/vm.h>
30 #include <vm/VMCache.h>
31 #include <vm/vm_page.h>
32
33 #include "cache_support.h"
34 #include "dma_resources.h"
35 #include "io_requests.h"
36 #include "IOSchedulerSimple.h"
37
38
39 //#define TRACE_RAM_DISK
40 #ifdef TRACE_RAM_DISK
41 # define TRACE(x...) dprintf(x)
42 #else
43 # define TRACE(x...) do {} while (false)
44 #endif
45
46
47 static const unsigned char kRamdiskIcon[] = {
48 0x6e, 0x63, 0x69, 0x66, 0x0e, 0x03, 0x01, 0x00, 0x00, 0x02, 0x00, 0x16,
49 0x02, 0x3c, 0xc7, 0xee, 0x38, 0x9b, 0xc0, 0xba, 0x16, 0x57, 0x3e, 0x39,
50 0xb0, 0x49, 0x77, 0xc8, 0x42, 0xad, 0xc7, 0x00, 0xff, 0xff, 0xd3, 0x02,
51 0x00, 0x06, 0x02, 0x3c, 0x96, 0x32, 0x3a, 0x4d, 0x3f, 0xba, 0xfc, 0x01,
52 0x3d, 0x5a, 0x97, 0x4b, 0x57, 0xa5, 0x49, 0x84, 0x4d, 0x00, 0x47, 0x47,
53 0x47, 0xff, 0xa5, 0xa0, 0xa0, 0x02, 0x00, 0x16, 0x02, 0xbc, 0x59, 0x2f,
54 0xbb, 0x29, 0xa7, 0x3c, 0x0c, 0xe4, 0xbd, 0x0b, 0x7c, 0x48, 0x92, 0xc0,
55 0x4b, 0x79, 0x66, 0x00, 0x7d, 0xff, 0xd4, 0x02, 0x00, 0x06, 0x02, 0x38,
56 0xdb, 0xb4, 0x39, 0x97, 0x33, 0xbc, 0x4a, 0x33, 0x3b, 0xa5, 0x42, 0x48,
57 0x6e, 0x66, 0x49, 0xee, 0x7b, 0x00, 0x59, 0x67, 0x56, 0xff, 0xeb, 0xb2,
58 0xb2, 0x03, 0xa7, 0xff, 0x00, 0x03, 0xff, 0x00, 0x00, 0x04, 0x01, 0x80,
59 0x03, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x6a, 0x05, 0x33, 0x02,
60 0x00, 0x06, 0x02, 0x3a, 0x5d, 0x2c, 0x39, 0xf8, 0xb1, 0xb9, 0xdb, 0xf1,
61 0x3a, 0x4c, 0x0f, 0x48, 0xae, 0xea, 0x4a, 0xc0, 0x91, 0x00, 0x74, 0x74,
62 0x74, 0xff, 0x3e, 0x3d, 0x3d, 0x02, 0x00, 0x16, 0x02, 0x38, 0x22, 0x1b,
63 0x3b, 0x11, 0x73, 0xbc, 0x5e, 0xb5, 0x39, 0x4b, 0xaa, 0x4a, 0x47, 0xf1,
64 0x49, 0xc2, 0x1d, 0x00, 0xb0, 0xff, 0x83, 0x02, 0x00, 0x16, 0x03, 0x36,
65 0xed, 0xe9, 0x36, 0xb9, 0x49, 0xba, 0x0a, 0xf6, 0x3a, 0x32, 0x6f, 0x4a,
66 0x79, 0xef, 0x4b, 0x03, 0xe7, 0x00, 0x5a, 0x38, 0xdc, 0xff, 0x7e, 0x0d,
67 0x0a, 0x06, 0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31,
68 0x39, 0x25, 0x0a, 0x04, 0x22, 0x3c, 0x44, 0x4b, 0x5a, 0x31, 0x39, 0x25,
69 0x0a, 0x04, 0x44, 0x4b, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31, 0x0a, 0x04,
70 0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x44, 0x4b, 0x08, 0x02, 0x27, 0x43,
71 0xb8, 0x14, 0xc1, 0xf1, 0x08, 0x02, 0x26, 0x43, 0x29, 0x44, 0x0a, 0x05,
72 0x44, 0x5d, 0x49, 0x5d, 0x60, 0x3e, 0x5a, 0x3b, 0x5b, 0x3f, 0x0a, 0x04,
73 0x3c, 0x5a, 0x5a, 0x3c, 0x5a, 0x36, 0x3c, 0x52, 0x0a, 0x04, 0x24, 0x4e,
74 0x3c, 0x5a, 0x3c, 0x52, 0x24, 0x48, 0x06, 0x07, 0xaa, 0x3f, 0x42, 0x2e,
75 0x24, 0x48, 0x3c, 0x52, 0x5a, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50, 0x34,
76 0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49, 0x30,
77 0x06, 0x08, 0xfa, 0xfa, 0x42, 0x50, 0x3e, 0x54, 0x40, 0x55, 0x3f, 0xc7,
78 0xeb, 0x41, 0xc8, 0x51, 0x42, 0xc9, 0x4f, 0x42, 0xc8, 0xda, 0x42, 0xca,
79 0x41, 0xc0, 0xf1, 0x5d, 0x45, 0xca, 0x81, 0x46, 0xc7, 0xb7, 0x46, 0xc8,
80 0xa9, 0x46, 0xc7, 0x42, 0x44, 0x51, 0x45, 0xc6, 0xb9, 0x43, 0xc6, 0x53,
81 0x0a, 0x07, 0x3c, 0x5c, 0x40, 0x5c, 0x42, 0x5e, 0x48, 0x5e, 0x4a, 0x5c,
82 0x46, 0x5a, 0x45, 0x4b, 0x06, 0x09, 0x9a, 0xf6, 0x03, 0x42, 0x2e, 0x24,
83 0x48, 0x4e, 0x3c, 0x5a, 0x5a, 0x3c, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50,
84 0x34, 0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49,
85 0x30, 0x18, 0x0a, 0x07, 0x01, 0x06, 0x00, 0x0a, 0x00, 0x01, 0x00, 0x10,
86 0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x01, 0x01, 0x01, 0x00, 0x0a, 0x02,
87 0x01, 0x02, 0x00, 0x0a, 0x03, 0x01, 0x03, 0x00, 0x0a, 0x04, 0x01, 0x04,
88 0x10, 0x01, 0x17, 0x85, 0x20, 0x04, 0x0a, 0x06, 0x01, 0x05, 0x30, 0x24,
89 0xb3, 0x99, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x05, 0x01, 0x05, 0x30,
90 0x20, 0xb2, 0xe6, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x09, 0x01, 0x0b,
91 0x02, 0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0,
92 0x21, 0x48, 0xed, 0x4d, 0xc8, 0x5a, 0x02, 0x0a, 0x09, 0x01, 0x0b, 0x02,
93 0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21,
94 0x48, 0x4c, 0xd4, 0xc7, 0x9c, 0x11, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e,
95 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x47,
96 0x5c, 0xe7, 0xc6, 0x2c, 0x1a, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e, 0x9b,
97 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x46, 0x1b,
98 0xf5, 0xc4, 0x28, 0x4e, 0x0a, 0x08, 0x01, 0x0c, 0x12, 0x3e, 0xc0, 0x21,
99 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45, 0xb6, 0x34,
100 0xc4, 0x22, 0x1f, 0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x0a, 0x01, 0x07,
101 0x02, 0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0,
102 0x21, 0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0b, 0x01, 0x08, 0x02,
103 0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21,
104 0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0c, 0x01, 0x09, 0x02, 0x3e,
105 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45,
106 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98,
107 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0xf2,
108 0x4e, 0xc7, 0xee, 0x3f, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01,
109 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e,
110 0x7b, 0x5e, 0x48, 0xf2, 0x4e, 0xc7, 0xee, 0x3f, 0x0a, 0x08, 0x01, 0x0a,
111 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b,
112 0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x01, 0x17, 0x84, 0x22, 0x04,
113 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35,
114 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x0a,
115 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9,
116 0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4, 0xa6, 0x5a, 0x01, 0x17,
117 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5,
118 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4,
119 0xa6, 0x5a, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6,
120 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46, 0x2c, 0x90, 0xb8, 0xd1,
121 0xff, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e,
122 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46,
123 0x2c, 0x90, 0xb8, 0xd1, 0xff
124 };
125
126
127 // parameters for the DMA resource
128 static const uint32 kDMAResourceBufferCount = 16;
129 static const uint32 kDMAResourceBounceBufferCount = 16;
130
131 static const char* const kDriverModuleName
132 = "drivers/disk/virtual/ram_disk/driver_v1";
133 static const char* const kControlDeviceModuleName
134 = "drivers/disk/virtual/ram_disk/control/device_v1";
135 static const char* const kRawDeviceModuleName
136 = "drivers/disk/virtual/ram_disk/raw/device_v1";
137
138 static const char* const kControlDeviceName = RAM_DISK_CONTROL_DEVICE_NAME;
139 static const char* const kRawDeviceBaseName = RAM_DISK_RAW_DEVICE_BASE_NAME;
140
141 static const char* const kFilePathItem = "ram_disk/file_path";
142 static const char* const kDeviceSizeItem = "ram_disk/device_size";
143 static const char* const kDeviceIDItem = "ram_disk/id";
144
145
146 struct RawDevice;
147 typedef DoublyLinkedList<RawDevice> RawDeviceList;
148
149 struct device_manager_info* sDeviceManager;
150
151 static RawDeviceList sDeviceList;
152 static mutex sDeviceListLock = MUTEX_INITIALIZER("ram disk device list");
153 static uint64 sUsedRawDeviceIDs = 0;
154
155
156 static int32 allocate_raw_device_id();
157 static void free_raw_device_id(int32 id);
158
159
160 struct Device {
DeviceDevice161 Device(device_node* node)
162 :
163 fNode(node)
164 {
165 mutex_init(&fLock, "ram disk device");
166 }
167
~DeviceDevice168 virtual ~Device()
169 {
170 mutex_destroy(&fLock);
171 }
172
LockDevice173 bool Lock() { mutex_lock(&fLock); return true; }
UnlockDevice174 void Unlock() { mutex_unlock(&fLock); }
175
NodeDevice176 device_node* Node() const { return fNode; }
177
178 virtual status_t PublishDevice() = 0;
179
180 protected:
181 mutex fLock;
182 device_node* fNode;
183 };
184
185
186 struct ControlDevice : Device {
ControlDeviceControlDevice187 ControlDevice(device_node* node)
188 :
189 Device(node)
190 {
191 }
192
RegisterControlDevice193 status_t Register(const char* filePath, uint64 deviceSize, int32& _id)
194 {
195 int32 id = allocate_raw_device_id();
196 if (id < 0)
197 return B_BUSY;
198
199 device_attr attrs[] = {
200 {B_DEVICE_PRETTY_NAME, B_STRING_TYPE,
201 {.string = "RAM Disk Raw Device"}},
202 {kDeviceSizeItem, B_UINT64_TYPE, {.ui64 = deviceSize}},
203 {kDeviceIDItem, B_UINT32_TYPE, {.ui32 = (uint32)id}},
204 {kFilePathItem, B_STRING_TYPE, {.string = filePath}},
205 {NULL}
206 };
207
208 // If filePath is NULL, remove the attribute.
209 if (filePath == NULL) {
210 size_t count = sizeof(attrs) / sizeof(attrs[0]);
211 memset(attrs + count - 2, 0, sizeof(attrs[0]));
212 }
213
214 status_t error = sDeviceManager->register_node(
215 sDeviceManager->get_parent_node(Node()), kDriverModuleName, attrs,
216 NULL, NULL);
217 if (error != B_OK) {
218 free_raw_device_id(id);
219 return error;
220 }
221
222 _id = id;
223 return B_OK;
224 }
225
PublishDeviceControlDevice226 virtual status_t PublishDevice()
227 {
228 return sDeviceManager->publish_device(Node(), kControlDeviceName,
229 kControlDeviceModuleName);
230 }
231 };
232
233
234 struct RawDevice : Device, DoublyLinkedListLinkImpl<RawDevice> {
RawDeviceRawDevice235 RawDevice(device_node* node)
236 :
237 Device(node),
238 fID(-1),
239 fUnregistered(false),
240 fDeviceSize(0),
241 fDeviceName(NULL),
242 fFilePath(NULL),
243 fCache(NULL),
244 fDMAResource(NULL),
245 fIOScheduler(NULL)
246 {
247 }
248
~RawDeviceRawDevice249 virtual ~RawDevice()
250 {
251 if (fID >= 0) {
252 MutexLocker locker(sDeviceListLock);
253 sDeviceList.Remove(this);
254 }
255
256 free(fDeviceName);
257 free(fFilePath);
258 }
259
IDRawDevice260 int32 ID() const { return fID; }
DeviceSizeRawDevice261 off_t DeviceSize() const { return fDeviceSize; }
DeviceNameRawDevice262 const char* DeviceName() const { return fDeviceName; }
263
IsUnregisteredRawDevice264 bool IsUnregistered() const { return fUnregistered; }
265
SetUnregisteredRawDevice266 void SetUnregistered(bool unregistered)
267 {
268 fUnregistered = unregistered;
269 }
270
InitRawDevice271 status_t Init(int32 id, const char* filePath, uint64 deviceSize)
272 {
273 fID = id;
274 fFilePath = filePath != NULL ? strdup(filePath) : NULL;
275 if (filePath != NULL && fFilePath == NULL)
276 return B_NO_MEMORY;
277
278 fDeviceSize = (deviceSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE
279 * B_PAGE_SIZE;
280
281 if (fDeviceSize < B_PAGE_SIZE
282 || (uint64)fDeviceSize / B_PAGE_SIZE
283 > vm_page_num_pages() * 2 / 3) {
284 return B_BAD_VALUE;
285 }
286
287 // construct our device path
288 KPath path(kRawDeviceBaseName);
289 char buffer[32];
290 snprintf(buffer, sizeof(buffer), "%" B_PRId32 "/raw", fID);
291
292 status_t error = path.Append(buffer);
293 if (error != B_OK)
294 return error;
295
296 fDeviceName = path.DetachBuffer();
297
298 // insert into device list
299 RawDevice* nextDevice = NULL;
300 MutexLocker locker(sDeviceListLock);
301 for (RawDeviceList::Iterator it = sDeviceList.GetIterator();
302 (nextDevice = it.Next()) != NULL;) {
303 if (nextDevice->ID() > fID)
304 break;
305 }
306
307 sDeviceList.InsertBefore(nextDevice, this);
308
309 return B_OK;
310 }
311
PrepareRawDevice312 status_t Prepare()
313 {
314 status_t error = VMCacheFactory::CreateAnonymousCache(fCache, false, 0,
315 0, false, VM_PRIORITY_SYSTEM);
316 if (error != B_OK) {
317 Unprepare();
318 return error;
319 }
320
321 fCache->temporary = 1;
322 fCache->virtual_end = fDeviceSize;
323
324 fCache->Lock();
325 error = fCache->Commit(fDeviceSize, VM_PRIORITY_USER);
326 fCache->Unlock();
327 if (error != B_OK) {
328 Unprepare();
329 return error;
330 }
331
332 if (fFilePath != NULL) {
333 error = _LoadFile();
334 if (error != B_OK) {
335 Unprepare();
336 return error;
337 }
338 }
339
340 // no DMA restrictions
341 const dma_restrictions restrictions = {};
342
343 fDMAResource = new(std::nothrow) DMAResource;
344 if (fDMAResource == NULL) {
345 Unprepare();
346 return B_NO_MEMORY;
347 }
348
349 error = fDMAResource->Init(restrictions, B_PAGE_SIZE,
350 kDMAResourceBufferCount, kDMAResourceBounceBufferCount);
351 if (error != B_OK) {
352 Unprepare();
353 return error;
354 }
355
356 fIOScheduler = new(std::nothrow) IOSchedulerSimple(fDMAResource);
357 if (fIOScheduler == NULL) {
358 Unprepare();
359 return B_NO_MEMORY;
360 }
361
362 error = fIOScheduler->Init("ram disk device scheduler");
363 if (error != B_OK) {
364 Unprepare();
365 return error;
366 }
367
368 fIOScheduler->SetCallback(&_DoIOEntry, this);
369
370 return B_OK;
371 }
372
UnprepareRawDevice373 void Unprepare()
374 {
375 delete fIOScheduler;
376 fIOScheduler = NULL;
377
378 delete fDMAResource;
379 fDMAResource = NULL;
380
381 if (fCache != NULL) {
382 fCache->Lock();
383 fCache->ReleaseRefAndUnlock();
384 fCache = NULL;
385 }
386 }
387
GetInfoRawDevice388 void GetInfo(ram_disk_ioctl_info& _info) const
389 {
390 _info.id = fID;
391 _info.size = fDeviceSize;
392 memset(&_info.path, 0, sizeof(_info.path));
393 if (fFilePath != NULL)
394 strlcpy(_info.path, fFilePath, sizeof(_info.path));
395 }
396
FlushRawDevice397 status_t Flush()
398 {
399 static const size_t kPageCountPerIteration = 1024;
400 static const size_t kMaxGapSize = 15;
401
402 FileDescriptorCloser fd(open(fFilePath, O_WRONLY));
403 if (!fd.IsSet())
404 return errno;
405
406 vm_page** pages = new(std::nothrow) vm_page*[kPageCountPerIteration];
407 ArrayDeleter<vm_page*> pagesDeleter(pages);
408
409 uint8* buffer = (uint8*)malloc(kPageCountPerIteration * B_PAGE_SIZE);
410 MemoryDeleter bufferDeleter(buffer);
411
412 if (pages == NULL || buffer == NULL)
413 return B_NO_MEMORY;
414
415 // Iterate through all pages of the cache and write those back that have
416 // been modified.
417 AutoLocker<VMCache> locker(fCache);
418
419 status_t error = B_OK;
420
421 for (off_t offset = 0; offset < fDeviceSize;) {
422 // find the first modified page at or after the current offset
423 VMCachePagesTree::Iterator it
424 = fCache->pages.GetIterator(offset / B_PAGE_SIZE, true, true);
425 vm_page* firstModified;
426 while ((firstModified = it.Next()) != NULL
427 && !firstModified->modified) {
428 }
429
430 if (firstModified == NULL)
431 break;
432
433 if (firstModified->busy) {
434 fCache->WaitForPageEvents(firstModified, PAGE_EVENT_NOT_BUSY,
435 true);
436 continue;
437 }
438
439 pages[0] = firstModified;
440 page_num_t firstPageIndex = firstModified->cache_offset;
441 offset = firstPageIndex * B_PAGE_SIZE;
442
443 // Collect more pages until the gap between two modified pages gets
444 // too large or we hit the end of our array.
445 size_t previousModifiedIndex = 0;
446 size_t previousIndex = 0;
447 while (vm_page* page = it.Next()) {
448 page_num_t index = page->cache_offset - firstPageIndex;
449 if (page->busy
450 || index >= kPageCountPerIteration
451 || index - previousModifiedIndex > kMaxGapSize) {
452 break;
453 }
454
455 pages[index] = page;
456
457 // clear page array gap since the previous page
458 if (previousIndex + 1 < index) {
459 memset(pages + previousIndex + 1, 0,
460 (index - previousIndex - 1) * sizeof(vm_page*));
461 }
462
463 previousIndex = index;
464 if (page->modified)
465 previousModifiedIndex = index;
466 }
467
468 // mark all pages we want to write busy
469 size_t pagesToWrite = previousModifiedIndex + 1;
470 for (size_t i = 0; i < pagesToWrite; i++) {
471 if (vm_page* page = pages[i]) {
472 DEBUG_PAGE_ACCESS_START(page);
473 page->busy = true;
474 }
475 }
476
477 locker.Unlock();
478
479 // copy the pages to our buffer
480 for (size_t i = 0; i < pagesToWrite; i++) {
481 if (vm_page* page = pages[i]) {
482 error = vm_memcpy_from_physical(buffer + i * B_PAGE_SIZE,
483 page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE,
484 false);
485 if (error != B_OK) {
486 dprintf("ramdisk: error copying page %" B_PRIu64
487 " data: %s\n", (uint64)page->physical_page_number,
488 strerror(error));
489 break;
490 }
491 } else
492 memset(buffer + i * B_PAGE_SIZE, 0, B_PAGE_SIZE);
493 }
494
495 // write the buffer
496 if (error == B_OK) {
497 ssize_t bytesWritten = pwrite(fd.Get(), buffer,
498 pagesToWrite * B_PAGE_SIZE, offset);
499 if (bytesWritten < 0) {
500 dprintf("ramdisk: error writing pages to file: %s\n",
501 strerror(bytesWritten));
502 error = bytesWritten;
503 }
504 else if ((size_t)bytesWritten != pagesToWrite * B_PAGE_SIZE) {
505 dprintf("ramdisk: error writing pages to file: short "
506 "write (%zd/%zu)\n", bytesWritten,
507 pagesToWrite * B_PAGE_SIZE);
508 error = B_ERROR;
509 }
510 }
511
512 // mark the pages unbusy, on success also unmodified
513 locker.Lock();
514
515 for (size_t i = 0; i < pagesToWrite; i++) {
516 if (vm_page* page = pages[i]) {
517 if (error == B_OK)
518 page->modified = false;
519 fCache->MarkPageUnbusy(page);
520 DEBUG_PAGE_ACCESS_END(page);
521 }
522 }
523
524 if (error != B_OK)
525 break;
526
527 offset += pagesToWrite * B_PAGE_SIZE;
528 }
529
530 return error;
531 }
532
TrimRawDevice533 status_t Trim(fs_trim_data* trimData)
534 {
535 TRACE("trim_device()\n");
536
537 trimData->trimmed_size = 0;
538
539 const off_t deviceSize = fDeviceSize; // in bytes
540 if (deviceSize < 0)
541 return B_BAD_VALUE;
542
543 STATIC_ASSERT(sizeof(deviceSize) <= sizeof(uint64));
544 ASSERT(deviceSize >= 0);
545
546 // Do not trim past device end
547 for (uint32 i = 0; i < trimData->range_count; i++) {
548 uint64 offset = trimData->ranges[i].offset;
549 uint64& size = trimData->ranges[i].size;
550
551 if (offset >= (uint64)deviceSize)
552 return B_BAD_VALUE;
553 size = min_c(size, (uint64)deviceSize - offset);
554 }
555
556 status_t result = B_OK;
557 uint64 trimmedSize = 0;
558 for (uint32 i = 0; i < trimData->range_count; i++) {
559 uint64 offset = trimData->ranges[i].offset;
560 uint64 length = trimData->ranges[i].size;
561
562 // Round up offset and length to multiple of the page size
563 // The offset is rounded up, so some space may be left
564 // (not trimmed) at the start of the range.
565 offset = (offset + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
566 // Adjust the length for the possibly skipped range
567 length -= offset - trimData->ranges[i].offset;
568 // The length is rounded down, so some space at the end may also
569 // be left (not trimmed).
570 length &= ~(B_PAGE_SIZE - 1);
571
572 if (length == 0)
573 continue;
574
575 TRACE("ramdisk: trim %" B_PRIu64 " bytes from %" B_PRIu64 "\n",
576 length, offset);
577
578 ASSERT(offset % B_PAGE_SIZE == 0);
579 ASSERT(length % B_PAGE_SIZE == 0);
580
581 BStackOrHeapArray<vm_page*, 16> pages(length / B_PAGE_SIZE);
582 if (!pages.IsValid()) {
583 result = B_NO_MEMORY;
584 break;
585 }
586
587 cache_get_pages(fCache, (off_t)offset, (off_t)length, false, pages);
588
589 AutoLocker<VMCache> locker(fCache);
590 uint64 j;
591 for (j = 0; j < length / B_PAGE_SIZE; j++) {
592 // If we run out of pages (some may already be trimmed), stop.
593 if (pages[j] == NULL)
594 break;
595
596 TRACE("free range %" B_PRIu32 ", page %" B_PRIu64 ", offset %"
597 B_PRIu64 "\n", i, j, offset);
598 if (pages[j]->Cache())
599 fCache->RemovePage(pages[j]);
600 vm_page_free(NULL, pages[j]);
601 trimmedSize += B_PAGE_SIZE;
602 }
603 }
604
605 trimData->trimmed_size = trimmedSize;
606
607 return result;
608 }
609
DoIORawDevice610 status_t DoIO(IORequest* request)
611 {
612 return fIOScheduler->ScheduleRequest(request);
613 }
614
PublishDeviceRawDevice615 virtual status_t PublishDevice()
616 {
617 return sDeviceManager->publish_device(Node(), fDeviceName,
618 kRawDeviceModuleName);
619 }
620
621 private:
_DoIOEntryRawDevice622 static status_t _DoIOEntry(void* data, IOOperation* operation)
623 {
624 return ((RawDevice*)data)->_DoIO(operation);
625 }
626
_DoIORawDevice627 status_t _DoIO(IOOperation* operation)
628 {
629 off_t offset = operation->Offset();
630 generic_size_t length = operation->Length();
631
632 ASSERT(offset % B_PAGE_SIZE == 0);
633 ASSERT(length % B_PAGE_SIZE == 0);
634
635 const generic_io_vec* vecs = operation->Vecs();
636 generic_size_t vecOffset = 0;
637 bool isWrite = operation->IsWrite();
638
639 BStackOrHeapArray<vm_page*, 16> pages(length / B_PAGE_SIZE);
640 if (!pages.IsValid())
641 return B_NO_MEMORY;
642
643 cache_get_pages(fCache, offset, length, isWrite, pages);
644
645 status_t error = B_OK;
646 size_t index = 0;
647
648 while (length > 0) {
649 vm_page* page = pages[index];
650
651 if (isWrite)
652 page->modified = true;
653
654 error = _CopyData(page, vecs, vecOffset, isWrite);
655 if (error != B_OK)
656 break;
657
658 offset += B_PAGE_SIZE;
659 length -= B_PAGE_SIZE;
660 index++;
661 }
662
663 cache_put_pages(fCache, operation->Offset(), operation->Length(), pages,
664 error == B_OK);
665
666 if (error != B_OK) {
667 fIOScheduler->OperationCompleted(operation, error, 0);
668 return error;
669 }
670
671 fIOScheduler->OperationCompleted(operation, B_OK, operation->Length());
672 return B_OK;
673 }
674
_CopyDataRawDevice675 status_t _CopyData(vm_page* page, const generic_io_vec*& vecs,
676 generic_size_t& vecOffset, bool toPage)
677 {
678 // map page to virtual memory
679 Thread* thread = thread_get_current_thread();
680 uint8* pageData = NULL;
681 void* handle;
682 if (page != NULL) {
683 thread_pin_to_current_cpu(thread);
684 addr_t virtualAddress;
685 status_t error = vm_get_physical_page_current_cpu(
686 page->physical_page_number * B_PAGE_SIZE, &virtualAddress,
687 &handle);
688 if (error != B_OK) {
689 thread_unpin_from_current_cpu(thread);
690 return error;
691 }
692
693 pageData = (uint8*)virtualAddress;
694 }
695
696 status_t error = B_OK;
697 size_t length = B_PAGE_SIZE;
698 while (length > 0) {
699 size_t toCopy = std::min((generic_size_t)length,
700 vecs->length - vecOffset);
701
702 if (toCopy == 0) {
703 vecs++;
704 vecOffset = 0;
705 continue;
706 }
707
708 phys_addr_t vecAddress = vecs->base + vecOffset;
709
710 error = toPage
711 ? vm_memcpy_from_physical(pageData, vecAddress, toCopy, false)
712 : (page != NULL
713 ? vm_memcpy_to_physical(vecAddress, pageData, toCopy, false)
714 : vm_memset_physical(vecAddress, 0, toCopy));
715 if (error != B_OK)
716 break;
717
718 pageData += toCopy;
719 length -= toCopy;
720 vecOffset += toCopy;
721 }
722
723 if (page != NULL) {
724 vm_put_physical_page_current_cpu((addr_t)pageData, handle);
725 thread_unpin_from_current_cpu(thread);
726 }
727
728 return error;
729 }
730
_LoadFileRawDevice731 status_t _LoadFile()
732 {
733 static const size_t kPageCountPerIteration = 1024;
734
735 FileDescriptorCloser fd(open(fFilePath, O_RDONLY));
736 if (!fd.IsSet())
737 return errno;
738
739 ArrayDeleter<vm_page*> pages(
740 new(std::nothrow) vm_page*[kPageCountPerIteration]);
741
742 ArrayDeleter<uint8> buffer(
743 new(std::nothrow) uint8[kPageCountPerIteration * B_PAGE_SIZE]);
744 // TODO: Ideally we wouldn't use a buffer to read the file content,
745 // but read into the pages we allocated directly. Unfortunately
746 // there's no API to do that yet.
747
748 if (!pages.IsSet() || !buffer.IsSet())
749 return B_NO_MEMORY;
750
751 status_t error = B_OK;
752
753 page_num_t allocatedPages = 0;
754 off_t offset = 0;
755 off_t sizeRemaining = fDeviceSize;
756 while (sizeRemaining > 0) {
757 // Note: fDeviceSize is B_PAGE_SIZE aligned.
758 size_t pagesToRead = std::min(kPageCountPerIteration,
759 size_t(sizeRemaining / B_PAGE_SIZE));
760
761 // allocate the missing pages
762 if (allocatedPages < pagesToRead) {
763 vm_page_reservation reservation;
764 vm_page_reserve_pages(&reservation,
765 pagesToRead - allocatedPages, VM_PRIORITY_SYSTEM);
766
767 while (allocatedPages < pagesToRead) {
768 pages[allocatedPages++]
769 = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
770 }
771
772 vm_page_unreserve_pages(&reservation);
773 }
774
775 // read from the file
776 size_t bytesToRead = pagesToRead * B_PAGE_SIZE;
777 ssize_t bytesRead = pread(fd.Get(), buffer.Get(), bytesToRead,
778 offset);
779 if (bytesRead < 0) {
780 error = bytesRead;
781 break;
782 }
783 size_t pagesRead = (bytesRead + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
784 if (pagesRead < pagesToRead) {
785 error = B_ERROR;
786 break;
787 }
788
789 // clear the last read page, if partial
790 if ((size_t)bytesRead < pagesRead * B_PAGE_SIZE) {
791 memset(buffer.Get() + bytesRead, 0,
792 pagesRead * B_PAGE_SIZE - bytesRead);
793 }
794
795 // copy data to allocated pages
796 for (size_t i = 0; i < pagesRead; i++) {
797 vm_page* page = pages[i];
798 error = vm_memcpy_to_physical(
799 page->physical_page_number * B_PAGE_SIZE,
800 buffer.Get() + i * B_PAGE_SIZE, B_PAGE_SIZE, false);
801 if (error != B_OK)
802 break;
803 }
804
805 if (error != B_OK)
806 break;
807
808 // Add pages to cache. Ignore clear pages, though. Move those to the
809 // beginning of the array, so we can reuse them in the next
810 // iteration.
811 AutoLocker<VMCache> locker(fCache);
812
813 size_t clearPages = 0;
814 for (size_t i = 0; i < pagesRead; i++) {
815 uint64* pageData = (uint64*)(buffer.Get() + i * B_PAGE_SIZE);
816 bool isClear = true;
817 for (size_t k = 0; isClear && k < B_PAGE_SIZE / 8; k++)
818 isClear = pageData[k] == 0;
819
820 if (isClear) {
821 pages[clearPages++] = pages[i];
822 } else {
823 fCache->InsertPage(pages[i], offset + i * B_PAGE_SIZE);
824 DEBUG_PAGE_ACCESS_END(pages[i]);
825 }
826 }
827
828 locker.Unlock();
829
830 // Move any left-over allocated pages to the end of the empty pages
831 // and compute the new allocated pages count.
832 if (pagesRead < allocatedPages) {
833 size_t count = allocatedPages - pagesRead;
834 memcpy(pages.Get() + clearPages, pages.Get() + pagesRead,
835 count * sizeof(vm_page*));
836 clearPages += count;
837 }
838 allocatedPages = clearPages;
839
840 offset += pagesRead * B_PAGE_SIZE;
841 sizeRemaining -= pagesRead * B_PAGE_SIZE;
842 }
843
844 // free left-over allocated pages
845 for (size_t i = 0; i < allocatedPages; i++)
846 vm_page_free(NULL, pages[i]);
847
848 return error;
849 }
850
851 private:
852 int32 fID;
853 bool fUnregistered;
854 off_t fDeviceSize;
855 char* fDeviceName;
856 char* fFilePath;
857 VMCache* fCache;
858 DMAResource* fDMAResource;
859 IOScheduler* fIOScheduler;
860 };
861
862
863 struct RawDeviceCookie {
RawDeviceCookieRawDeviceCookie864 RawDeviceCookie(RawDevice* device, int openMode)
865 :
866 fDevice(device),
867 fOpenMode(openMode)
868 {
869 }
870
DeviceRawDeviceCookie871 RawDevice* Device() const { return fDevice; }
OpenModeRawDeviceCookie872 int OpenMode() const { return fOpenMode; }
873
874 private:
875 RawDevice* fDevice;
876 int fOpenMode;
877 };
878
879
880 // #pragma mark -
881
882
883 static int32
allocate_raw_device_id()884 allocate_raw_device_id()
885 {
886 MutexLocker deviceListLocker(sDeviceListLock);
887 for (size_t i = 0; i < sizeof(sUsedRawDeviceIDs) * 8; i++) {
888 if ((sUsedRawDeviceIDs & ((uint64)1 << i)) == 0) {
889 sUsedRawDeviceIDs |= (uint64)1 << i;
890 return (int32)i;
891 }
892 }
893
894 return -1;
895 }
896
897
898 static void
free_raw_device_id(int32 id)899 free_raw_device_id(int32 id)
900 {
901 MutexLocker deviceListLocker(sDeviceListLock);
902 sUsedRawDeviceIDs &= ~((uint64)1 << id);
903 }
904
905
906 static RawDevice*
find_raw_device(int32 id)907 find_raw_device(int32 id)
908 {
909 for (RawDeviceList::Iterator it = sDeviceList.GetIterator();
910 RawDevice* device = it.Next();) {
911 if (device->ID() == id)
912 return device;
913 }
914
915 return NULL;
916 }
917
918
919 static status_t
ioctl_register(ControlDevice * controlDevice,ram_disk_ioctl_register * request)920 ioctl_register(ControlDevice* controlDevice, ram_disk_ioctl_register* request)
921 {
922 KPath path;
923 uint64 deviceSize = 0;
924
925 if (request->path[0] != '\0') {
926 // check if the path is null-terminated
927 if (strnlen(request->path, sizeof(request->path))
928 == sizeof(request->path)) {
929 return B_BAD_VALUE;
930 }
931
932 // get a normalized file path
933 status_t error = path.SetTo(request->path, true);
934 if (error != B_OK) {
935 dprintf("ramdisk: register: Invalid path \"%s\": %s\n",
936 request->path, strerror(error));
937 return B_BAD_VALUE;
938 }
939
940 struct stat st;
941 if (lstat(path.Path(), &st) != 0) {
942 dprintf("ramdisk: register: Failed to stat \"%s\": %s\n",
943 path.Path(), strerror(errno));
944 return errno;
945 }
946
947 if (!S_ISREG(st.st_mode)) {
948 dprintf("ramdisk: register: \"%s\" is not a file!\n", path.Path());
949 return B_BAD_VALUE;
950 }
951
952 deviceSize = st.st_size;
953 } else {
954 deviceSize = request->size;
955 }
956
957 return controlDevice->Register(path.Length() > 0 ? path.Path() : NULL,
958 deviceSize, request->id);
959 }
960
961
962 static status_t
ioctl_unregister(ControlDevice * controlDevice,ram_disk_ioctl_unregister * request)963 ioctl_unregister(ControlDevice* controlDevice,
964 ram_disk_ioctl_unregister* request)
965 {
966 // find the device in the list and unregister it
967 MutexLocker locker(sDeviceListLock);
968 RawDevice* device = find_raw_device(request->id);
969 if (device == NULL)
970 return B_ENTRY_NOT_FOUND;
971
972 // mark unregistered before we unlock
973 if (device->IsUnregistered())
974 return B_BUSY;
975 device->SetUnregistered(true);
976 locker.Unlock();
977
978 device_node* node = device->Node();
979 status_t error = sDeviceManager->unpublish_device(node,
980 device->DeviceName());
981 if (error != B_OK) {
982 dprintf("ramdisk: unregister: Failed to unpublish device \"%s\": %s\n",
983 device->DeviceName(), strerror(error));
984 return error;
985 }
986
987 error = sDeviceManager->unregister_node(node);
988 // Note: B_BUSY is OK. The node will removed as soon as possible.
989 if (error != B_OK && error != B_BUSY) {
990 dprintf("ramdisk: unregister: Failed to unregister node for device %"
991 B_PRId32 ": %s\n", request->id, strerror(error));
992 return error;
993 }
994
995 return B_OK;
996 }
997
998
999 static status_t
ioctl_info(RawDevice * device,ram_disk_ioctl_info * request)1000 ioctl_info(RawDevice* device, ram_disk_ioctl_info* request)
1001 {
1002 device->GetInfo(*request);
1003 return B_OK;
1004 }
1005
1006
1007 template<typename DeviceType, typename Request>
1008 static status_t
handle_ioctl(DeviceType * device,status_t (* handler)(DeviceType *,Request *),void * buffer)1009 handle_ioctl(DeviceType* device,
1010 status_t (*handler)(DeviceType*, Request*), void* buffer)
1011 {
1012 // copy request to the kernel heap
1013 if (buffer == NULL || !IS_USER_ADDRESS(buffer))
1014 return B_BAD_ADDRESS;
1015
1016 Request* request = new(std::nothrow) Request;
1017 if (request == NULL)
1018 return B_NO_MEMORY;
1019 ObjectDeleter<Request> requestDeleter(request);
1020
1021 if (user_memcpy(request, buffer, sizeof(Request)) != B_OK)
1022 return B_BAD_ADDRESS;
1023
1024 // handle the ioctl
1025 status_t error = handler(device, request);
1026 if (error != B_OK)
1027 return error;
1028
1029 // copy the request back to userland
1030 if (user_memcpy(buffer, request, sizeof(Request)) != B_OK)
1031 return B_BAD_ADDRESS;
1032
1033 return B_OK;
1034 }
1035
1036
1037 // #pragma mark - driver
1038
1039
1040 static float
ram_disk_driver_supports_device(device_node * parent)1041 ram_disk_driver_supports_device(device_node* parent)
1042 {
1043 const char* bus = NULL;
1044 if (sDeviceManager->get_attr_string(parent, B_DEVICE_BUS, &bus, false)
1045 == B_OK
1046 && strcmp(bus, "generic") == 0) {
1047 return 0.8;
1048 }
1049
1050 return -1;
1051 }
1052
1053
1054 static status_t
ram_disk_driver_register_device(device_node * parent)1055 ram_disk_driver_register_device(device_node* parent)
1056 {
1057 device_attr attrs[] = {
1058 {B_DEVICE_PRETTY_NAME, B_STRING_TYPE,
1059 {.string = "RAM Disk Control Device"}},
1060 {NULL}
1061 };
1062
1063 return sDeviceManager->register_node(parent, kDriverModuleName, attrs, NULL,
1064 NULL);
1065 }
1066
1067
1068 static status_t
ram_disk_driver_init_driver(device_node * node,void ** _driverCookie)1069 ram_disk_driver_init_driver(device_node* node, void** _driverCookie)
1070 {
1071 uint64 deviceSize;
1072 if (sDeviceManager->get_attr_uint64(node, kDeviceSizeItem, &deviceSize,
1073 false) == B_OK) {
1074 int32 id = -1;
1075 sDeviceManager->get_attr_uint32(node, kDeviceIDItem, (uint32*)&id,
1076 false);
1077 if (id < 0)
1078 return B_ERROR;
1079
1080 const char* filePath = NULL;
1081 sDeviceManager->get_attr_string(node, kFilePathItem, &filePath, false);
1082
1083 RawDevice* device = new(std::nothrow) RawDevice(node);
1084 if (device == NULL)
1085 return B_NO_MEMORY;
1086
1087 status_t error = device->Init(id, filePath, deviceSize);
1088 if (error != B_OK) {
1089 delete device;
1090 return error;
1091 }
1092
1093 *_driverCookie = (Device*)device;
1094 } else {
1095 ControlDevice* device = new(std::nothrow) ControlDevice(node);
1096 if (device == NULL)
1097 return B_NO_MEMORY;
1098
1099 *_driverCookie = (Device*)device;
1100 }
1101
1102 return B_OK;
1103 }
1104
1105
1106 static void
ram_disk_driver_uninit_driver(void * driverCookie)1107 ram_disk_driver_uninit_driver(void* driverCookie)
1108 {
1109 Device* device = (Device*)driverCookie;
1110 if (RawDevice* rawDevice = dynamic_cast<RawDevice*>(device))
1111 free_raw_device_id(rawDevice->ID());
1112 delete device;
1113 }
1114
1115
1116 static status_t
ram_disk_driver_register_child_devices(void * driverCookie)1117 ram_disk_driver_register_child_devices(void* driverCookie)
1118 {
1119 Device* device = (Device*)driverCookie;
1120 return device->PublishDevice();
1121 }
1122
1123
1124 // #pragma mark - control device
1125
1126
1127 static status_t
ram_disk_control_device_init_device(void * driverCookie,void ** _deviceCookie)1128 ram_disk_control_device_init_device(void* driverCookie, void** _deviceCookie)
1129 {
1130 *_deviceCookie = driverCookie;
1131 return B_OK;
1132 }
1133
1134
1135 static void
ram_disk_control_device_uninit_device(void * deviceCookie)1136 ram_disk_control_device_uninit_device(void* deviceCookie)
1137 {
1138 }
1139
1140
1141 static status_t
ram_disk_control_device_open(void * deviceCookie,const char * path,int openMode,void ** _cookie)1142 ram_disk_control_device_open(void* deviceCookie, const char* path, int openMode,
1143 void** _cookie)
1144 {
1145 *_cookie = deviceCookie;
1146 return B_OK;
1147 }
1148
1149
1150 static status_t
ram_disk_control_device_close(void * cookie)1151 ram_disk_control_device_close(void* cookie)
1152 {
1153 return B_OK;
1154 }
1155
1156
1157 static status_t
ram_disk_control_device_free(void * cookie)1158 ram_disk_control_device_free(void* cookie)
1159 {
1160 return B_OK;
1161 }
1162
1163
1164 static status_t
ram_disk_control_device_read(void * cookie,off_t position,void * buffer,size_t * _length)1165 ram_disk_control_device_read(void* cookie, off_t position, void* buffer,
1166 size_t* _length)
1167 {
1168 return B_BAD_VALUE;
1169 }
1170
1171
1172 static status_t
ram_disk_control_device_write(void * cookie,off_t position,const void * data,size_t * _length)1173 ram_disk_control_device_write(void* cookie, off_t position, const void* data,
1174 size_t* _length)
1175 {
1176 return B_BAD_VALUE;
1177 }
1178
1179
1180 static status_t
ram_disk_control_device_control(void * cookie,uint32 op,void * buffer,size_t length)1181 ram_disk_control_device_control(void* cookie, uint32 op, void* buffer,
1182 size_t length)
1183 {
1184 ControlDevice* device = (ControlDevice*)cookie;
1185
1186 switch (op) {
1187 case RAM_DISK_IOCTL_REGISTER:
1188 return handle_ioctl(device, &ioctl_register, buffer);
1189
1190 case RAM_DISK_IOCTL_UNREGISTER:
1191 return handle_ioctl(device, &ioctl_unregister, buffer);
1192 }
1193
1194 return B_BAD_VALUE;
1195 }
1196
1197
1198 // #pragma mark - raw device
1199
1200
1201 static status_t
ram_disk_raw_device_init_device(void * driverCookie,void ** _deviceCookie)1202 ram_disk_raw_device_init_device(void* driverCookie, void** _deviceCookie)
1203 {
1204 RawDevice* device = static_cast<RawDevice*>((Device*)driverCookie);
1205
1206 status_t error = device->Prepare();
1207 if (error != B_OK)
1208 return error;
1209
1210 *_deviceCookie = device;
1211 return B_OK;
1212 }
1213
1214
1215 static void
ram_disk_raw_device_uninit_device(void * deviceCookie)1216 ram_disk_raw_device_uninit_device(void* deviceCookie)
1217 {
1218 RawDevice* device = (RawDevice*)deviceCookie;
1219 device->Unprepare();
1220 }
1221
1222
1223 static status_t
ram_disk_raw_device_open(void * deviceCookie,const char * path,int openMode,void ** _cookie)1224 ram_disk_raw_device_open(void* deviceCookie, const char* path, int openMode,
1225 void** _cookie)
1226 {
1227 RawDevice* device = (RawDevice*)deviceCookie;
1228
1229 RawDeviceCookie* cookie = new(std::nothrow) RawDeviceCookie(device,
1230 openMode);
1231 if (cookie == NULL)
1232 return B_NO_MEMORY;
1233
1234 *_cookie = cookie;
1235 return B_OK;
1236 }
1237
1238
1239 static status_t
ram_disk_raw_device_close(void * cookie)1240 ram_disk_raw_device_close(void* cookie)
1241 {
1242 return B_OK;
1243 }
1244
1245
1246 static status_t
ram_disk_raw_device_free(void * _cookie)1247 ram_disk_raw_device_free(void* _cookie)
1248 {
1249 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1250 delete cookie;
1251 return B_OK;
1252 }
1253
1254
1255 static status_t
ram_disk_raw_device_read(void * _cookie,off_t pos,void * buffer,size_t * _length)1256 ram_disk_raw_device_read(void* _cookie, off_t pos, void* buffer,
1257 size_t* _length)
1258 {
1259 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1260 RawDevice* device = cookie->Device();
1261
1262 size_t length = *_length;
1263
1264 if (pos >= device->DeviceSize())
1265 return B_BAD_VALUE;
1266 if (pos + (off_t)length > device->DeviceSize())
1267 length = device->DeviceSize() - pos;
1268
1269 IORequest request;
1270 status_t status = request.Init(pos, (addr_t)buffer, length, false, 0);
1271 if (status != B_OK)
1272 return status;
1273
1274 status = device->DoIO(&request);
1275 if (status != B_OK)
1276 return status;
1277
1278 status = request.Wait(0, 0);
1279 if (status == B_OK)
1280 *_length = length;
1281 return status;
1282 }
1283
1284
1285 static status_t
ram_disk_raw_device_write(void * _cookie,off_t pos,const void * buffer,size_t * _length)1286 ram_disk_raw_device_write(void* _cookie, off_t pos, const void* buffer,
1287 size_t* _length)
1288 {
1289 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1290 RawDevice* device = cookie->Device();
1291
1292 size_t length = *_length;
1293
1294 if (pos >= device->DeviceSize())
1295 return B_BAD_VALUE;
1296 if (pos + (off_t)length > device->DeviceSize())
1297 length = device->DeviceSize() - pos;
1298
1299 IORequest request;
1300 status_t status = request.Init(pos, (addr_t)buffer, length, true, 0);
1301 if (status != B_OK)
1302 return status;
1303
1304 status = device->DoIO(&request);
1305 if (status != B_OK)
1306 return status;
1307
1308 status = request.Wait(0, 0);
1309 if (status == B_OK)
1310 *_length = length;
1311
1312 return status;
1313 }
1314
1315
1316 static status_t
ram_disk_raw_device_io(void * _cookie,io_request * request)1317 ram_disk_raw_device_io(void* _cookie, io_request* request)
1318 {
1319 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1320 RawDevice* device = cookie->Device();
1321
1322 return device->DoIO(request);
1323 }
1324
1325
1326 static status_t
ram_disk_raw_device_control(void * _cookie,uint32 op,void * buffer,size_t length)1327 ram_disk_raw_device_control(void* _cookie, uint32 op, void* buffer,
1328 size_t length)
1329 {
1330 RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1331 RawDevice* device = cookie->Device();
1332
1333 switch (op) {
1334 case B_GET_DEVICE_SIZE:
1335 {
1336 size_t size = device->DeviceSize();
1337 return user_memcpy(buffer, &size, sizeof(size_t));
1338 }
1339
1340 case B_SET_NONBLOCKING_IO:
1341 case B_SET_BLOCKING_IO:
1342 return B_OK;
1343
1344 case B_GET_READ_STATUS:
1345 case B_GET_WRITE_STATUS:
1346 {
1347 bool value = true;
1348 return user_memcpy(buffer, &value, sizeof(bool));
1349 }
1350
1351 case B_GET_GEOMETRY:
1352 case B_GET_BIOS_GEOMETRY:
1353 {
1354 if (buffer == NULL || length > sizeof(device_geometry))
1355 return B_BAD_VALUE;
1356
1357 device_geometry geometry;
1358 geometry.bytes_per_sector = B_PAGE_SIZE;
1359 geometry.sectors_per_track = 1;
1360 geometry.cylinder_count = device->DeviceSize() / B_PAGE_SIZE;
1361 // TODO: We're limited to 2^32 * B_PAGE_SIZE, if we don't use
1362 // sectors_per_track and head_count.
1363 geometry.head_count = 1;
1364 geometry.device_type = B_DISK;
1365 geometry.removable = true;
1366 geometry.read_only = false;
1367 geometry.write_once = false;
1368 geometry.bytes_per_physical_sector = B_PAGE_SIZE;
1369
1370 return user_memcpy(buffer, &geometry, length);
1371 }
1372
1373 case B_GET_MEDIA_STATUS:
1374 {
1375 status_t status = B_OK;
1376 return user_memcpy(buffer, &status, sizeof(status_t));
1377 }
1378
1379 case B_GET_ICON_NAME:
1380 return user_strlcpy((char*)buffer, "devices/drive-ramdisk",
1381 B_FILE_NAME_LENGTH);
1382
1383 case B_GET_VECTOR_ICON:
1384 {
1385 device_icon iconData;
1386 if (length != sizeof(device_icon))
1387 return B_BAD_VALUE;
1388 if (user_memcpy(&iconData, buffer, sizeof(device_icon)) != B_OK)
1389 return B_BAD_ADDRESS;
1390
1391 if (iconData.icon_size >= (int32)sizeof(kRamdiskIcon)) {
1392 if (user_memcpy(iconData.icon_data, kRamdiskIcon,
1393 sizeof(kRamdiskIcon)) != B_OK)
1394 return B_BAD_ADDRESS;
1395 }
1396
1397 iconData.icon_size = sizeof(kRamdiskIcon);
1398 return user_memcpy(buffer, &iconData, sizeof(device_icon));
1399 }
1400
1401 case B_SET_UNINTERRUPTABLE_IO:
1402 case B_SET_INTERRUPTABLE_IO:
1403 case B_FLUSH_DRIVE_CACHE:
1404 return B_OK;
1405
1406 case RAM_DISK_IOCTL_FLUSH:
1407 {
1408 status_t error = device->Flush();
1409 if (error != B_OK) {
1410 dprintf("ramdisk: flush: Failed to flush device: %s\n",
1411 strerror(error));
1412 return error;
1413 }
1414
1415 return B_OK;
1416 }
1417
1418 case B_TRIM_DEVICE:
1419 {
1420 // We know the buffer is kernel-side because it has been
1421 // preprocessed in devfs
1422 ASSERT(IS_KERNEL_ADDRESS(buffer));
1423 return device->Trim((fs_trim_data*)buffer);
1424 }
1425
1426 case RAM_DISK_IOCTL_INFO:
1427 return handle_ioctl(device, &ioctl_info, buffer);
1428 }
1429
1430 return B_BAD_VALUE;
1431 }
1432
1433
1434 // #pragma mark -
1435
1436
1437 module_dependency module_dependencies[] = {
1438 {B_DEVICE_MANAGER_MODULE_NAME, (module_info**)&sDeviceManager},
1439 {}
1440 };
1441
1442
1443 static const struct driver_module_info sChecksumDeviceDriverModule = {
1444 {
1445 kDriverModuleName,
1446 0,
1447 NULL
1448 },
1449
1450 ram_disk_driver_supports_device,
1451 ram_disk_driver_register_device,
1452 ram_disk_driver_init_driver,
1453 ram_disk_driver_uninit_driver,
1454 ram_disk_driver_register_child_devices
1455 };
1456
1457 static const struct device_module_info sChecksumControlDeviceModule = {
1458 {
1459 kControlDeviceModuleName,
1460 0,
1461 NULL
1462 },
1463
1464 ram_disk_control_device_init_device,
1465 ram_disk_control_device_uninit_device,
1466 NULL,
1467
1468 ram_disk_control_device_open,
1469 ram_disk_control_device_close,
1470 ram_disk_control_device_free,
1471
1472 ram_disk_control_device_read,
1473 ram_disk_control_device_write,
1474 NULL, // io
1475
1476 ram_disk_control_device_control,
1477
1478 NULL, // select
1479 NULL // deselect
1480 };
1481
1482 static const struct device_module_info sChecksumRawDeviceModule = {
1483 {
1484 kRawDeviceModuleName,
1485 0,
1486 NULL
1487 },
1488
1489 ram_disk_raw_device_init_device,
1490 ram_disk_raw_device_uninit_device,
1491 NULL,
1492
1493 ram_disk_raw_device_open,
1494 ram_disk_raw_device_close,
1495 ram_disk_raw_device_free,
1496
1497 ram_disk_raw_device_read,
1498 ram_disk_raw_device_write,
1499 ram_disk_raw_device_io,
1500
1501 ram_disk_raw_device_control,
1502
1503 NULL, // select
1504 NULL // deselect
1505 };
1506
1507 const module_info* modules[] = {
1508 (module_info*)&sChecksumDeviceDriverModule,
1509 (module_info*)&sChecksumControlDeviceModule,
1510 (module_info*)&sChecksumRawDeviceModule,
1511 NULL
1512 };
1513