xref: /haiku/src/add-ons/kernel/drivers/disk/virtual/ram_disk/ram_disk.cpp (revision 1deede7388b04dbeec5af85cae7164735ea9e70d)
1 /*
2  * Copyright 2010-2013, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <file_systems/ram_disk/ram_disk.h>
8 
9 #include <ctype.h>
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <unistd.h>
15 
16 #include <algorithm>
17 
18 #include <device_manager.h>
19 #include <Drivers.h>
20 
21 #include <AutoDeleter.h>
22 #include <util/AutoLock.h>
23 #include <util/DoublyLinkedList.h>
24 
25 #include <fs/KPath.h>
26 #include <lock.h>
27 #include <util/fs_trim_support.h>
28 #include <vm/vm.h>
29 #include <vm/VMCache.h>
30 #include <vm/vm_page.h>
31 
32 #include "dma_resources.h"
33 #include "io_requests.h"
34 #include "IOSchedulerSimple.h"
35 
36 
37 //#define TRACE_RAM_DISK
38 #ifdef TRACE_RAM_DISK
39 #	define TRACE(x...)	dprintf(x)
40 #else
41 #	define TRACE(x...) do {} while (false)
42 #endif
43 
44 
45 static const unsigned char kRamdiskIcon[] = {
46 	0x6e, 0x63, 0x69, 0x66, 0x0e, 0x03, 0x01, 0x00, 0x00, 0x02, 0x00, 0x16,
47 	0x02, 0x3c, 0xc7, 0xee, 0x38, 0x9b, 0xc0, 0xba, 0x16, 0x57, 0x3e, 0x39,
48 	0xb0, 0x49, 0x77, 0xc8, 0x42, 0xad, 0xc7, 0x00, 0xff, 0xff, 0xd3, 0x02,
49 	0x00, 0x06, 0x02, 0x3c, 0x96, 0x32, 0x3a, 0x4d, 0x3f, 0xba, 0xfc, 0x01,
50 	0x3d, 0x5a, 0x97, 0x4b, 0x57, 0xa5, 0x49, 0x84, 0x4d, 0x00, 0x47, 0x47,
51 	0x47, 0xff, 0xa5, 0xa0, 0xa0, 0x02, 0x00, 0x16, 0x02, 0xbc, 0x59, 0x2f,
52 	0xbb, 0x29, 0xa7, 0x3c, 0x0c, 0xe4, 0xbd, 0x0b, 0x7c, 0x48, 0x92, 0xc0,
53 	0x4b, 0x79, 0x66, 0x00, 0x7d, 0xff, 0xd4, 0x02, 0x00, 0x06, 0x02, 0x38,
54 	0xdb, 0xb4, 0x39, 0x97, 0x33, 0xbc, 0x4a, 0x33, 0x3b, 0xa5, 0x42, 0x48,
55 	0x6e, 0x66, 0x49, 0xee, 0x7b, 0x00, 0x59, 0x67, 0x56, 0xff, 0xeb, 0xb2,
56 	0xb2, 0x03, 0xa7, 0xff, 0x00, 0x03, 0xff, 0x00, 0x00, 0x04, 0x01, 0x80,
57 	0x03, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x6a, 0x05, 0x33, 0x02,
58 	0x00, 0x06, 0x02, 0x3a, 0x5d, 0x2c, 0x39, 0xf8, 0xb1, 0xb9, 0xdb, 0xf1,
59 	0x3a, 0x4c, 0x0f, 0x48, 0xae, 0xea, 0x4a, 0xc0, 0x91, 0x00, 0x74, 0x74,
60 	0x74, 0xff, 0x3e, 0x3d, 0x3d, 0x02, 0x00, 0x16, 0x02, 0x38, 0x22, 0x1b,
61 	0x3b, 0x11, 0x73, 0xbc, 0x5e, 0xb5, 0x39, 0x4b, 0xaa, 0x4a, 0x47, 0xf1,
62 	0x49, 0xc2, 0x1d, 0x00, 0xb0, 0xff, 0x83, 0x02, 0x00, 0x16, 0x03, 0x36,
63 	0xed, 0xe9, 0x36, 0xb9, 0x49, 0xba, 0x0a, 0xf6, 0x3a, 0x32, 0x6f, 0x4a,
64 	0x79, 0xef, 0x4b, 0x03, 0xe7, 0x00, 0x5a, 0x38, 0xdc, 0xff, 0x7e, 0x0d,
65 	0x0a, 0x06, 0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31,
66 	0x39, 0x25, 0x0a, 0x04, 0x22, 0x3c, 0x44, 0x4b, 0x5a, 0x31, 0x39, 0x25,
67 	0x0a, 0x04, 0x44, 0x4b, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31, 0x0a, 0x04,
68 	0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x44, 0x4b, 0x08, 0x02, 0x27, 0x43,
69 	0xb8, 0x14, 0xc1, 0xf1, 0x08, 0x02, 0x26, 0x43, 0x29, 0x44, 0x0a, 0x05,
70 	0x44, 0x5d, 0x49, 0x5d, 0x60, 0x3e, 0x5a, 0x3b, 0x5b, 0x3f, 0x0a, 0x04,
71 	0x3c, 0x5a, 0x5a, 0x3c, 0x5a, 0x36, 0x3c, 0x52, 0x0a, 0x04, 0x24, 0x4e,
72 	0x3c, 0x5a, 0x3c, 0x52, 0x24, 0x48, 0x06, 0x07, 0xaa, 0x3f, 0x42, 0x2e,
73 	0x24, 0x48, 0x3c, 0x52, 0x5a, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50, 0x34,
74 	0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49, 0x30,
75 	0x06, 0x08, 0xfa, 0xfa, 0x42, 0x50, 0x3e, 0x54, 0x40, 0x55, 0x3f, 0xc7,
76 	0xeb, 0x41, 0xc8, 0x51, 0x42, 0xc9, 0x4f, 0x42, 0xc8, 0xda, 0x42, 0xca,
77 	0x41, 0xc0, 0xf1, 0x5d, 0x45, 0xca, 0x81, 0x46, 0xc7, 0xb7, 0x46, 0xc8,
78 	0xa9, 0x46, 0xc7, 0x42, 0x44, 0x51, 0x45, 0xc6, 0xb9, 0x43, 0xc6, 0x53,
79 	0x0a, 0x07, 0x3c, 0x5c, 0x40, 0x5c, 0x42, 0x5e, 0x48, 0x5e, 0x4a, 0x5c,
80 	0x46, 0x5a, 0x45, 0x4b, 0x06, 0x09, 0x9a, 0xf6, 0x03, 0x42, 0x2e, 0x24,
81 	0x48, 0x4e, 0x3c, 0x5a, 0x5a, 0x3c, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50,
82 	0x34, 0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49,
83 	0x30, 0x18, 0x0a, 0x07, 0x01, 0x06, 0x00, 0x0a, 0x00, 0x01, 0x00, 0x10,
84 	0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x01, 0x01, 0x01, 0x00, 0x0a, 0x02,
85 	0x01, 0x02, 0x00, 0x0a, 0x03, 0x01, 0x03, 0x00, 0x0a, 0x04, 0x01, 0x04,
86 	0x10, 0x01, 0x17, 0x85, 0x20, 0x04, 0x0a, 0x06, 0x01, 0x05, 0x30, 0x24,
87 	0xb3, 0x99, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x05, 0x01, 0x05, 0x30,
88 	0x20, 0xb2, 0xe6, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x09, 0x01, 0x0b,
89 	0x02, 0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0,
90 	0x21, 0x48, 0xed, 0x4d, 0xc8, 0x5a, 0x02, 0x0a, 0x09, 0x01, 0x0b, 0x02,
91 	0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21,
92 	0x48, 0x4c, 0xd4, 0xc7, 0x9c, 0x11, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e,
93 	0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x47,
94 	0x5c, 0xe7, 0xc6, 0x2c, 0x1a, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e, 0x9b,
95 	0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x46, 0x1b,
96 	0xf5, 0xc4, 0x28, 0x4e, 0x0a, 0x08, 0x01, 0x0c, 0x12, 0x3e, 0xc0, 0x21,
97 	0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45, 0xb6, 0x34,
98 	0xc4, 0x22, 0x1f, 0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x0a, 0x01, 0x07,
99 	0x02, 0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0,
100 	0x21, 0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0b, 0x01, 0x08, 0x02,
101 	0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21,
102 	0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0c, 0x01, 0x09, 0x02, 0x3e,
103 	0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45,
104 	0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98,
105 	0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0xf2,
106 	0x4e, 0xc7, 0xee, 0x3f, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01,
107 	0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e,
108 	0x7b, 0x5e, 0x48, 0xf2, 0x4e, 0xc7, 0xee, 0x3f, 0x0a, 0x08, 0x01, 0x0a,
109 	0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b,
110 	0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x01, 0x17, 0x84, 0x22, 0x04,
111 	0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35,
112 	0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x0a,
113 	0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9,
114 	0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4, 0xa6, 0x5a, 0x01, 0x17,
115 	0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5,
116 	0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4,
117 	0xa6, 0x5a, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6,
118 	0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46, 0x2c, 0x90, 0xb8, 0xd1,
119 	0xff, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e,
120 	0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46,
121 	0x2c, 0x90, 0xb8, 0xd1, 0xff
122 };
123 
124 
125 // parameters for the DMA resource
126 static const uint32 kDMAResourceBufferCount			= 16;
127 static const uint32 kDMAResourceBounceBufferCount	= 16;
128 
129 static const char* const kDriverModuleName
130 	= "drivers/disk/virtual/ram_disk/driver_v1";
131 static const char* const kControlDeviceModuleName
132 	= "drivers/disk/virtual/ram_disk/control/device_v1";
133 static const char* const kRawDeviceModuleName
134 	= "drivers/disk/virtual/ram_disk/raw/device_v1";
135 
136 static const char* const kControlDeviceName = RAM_DISK_CONTROL_DEVICE_NAME;
137 static const char* const kRawDeviceBaseName = RAM_DISK_RAW_DEVICE_BASE_NAME;
138 
139 static const char* const kFilePathItem = "ram_disk/file_path";
140 static const char* const kDeviceSizeItem = "ram_disk/device_size";
141 static const char* const kDeviceIDItem = "ram_disk/id";
142 
143 
144 struct RawDevice;
145 typedef DoublyLinkedList<RawDevice> RawDeviceList;
146 
147 struct device_manager_info* sDeviceManager;
148 
149 static RawDeviceList sDeviceList;
150 static mutex sDeviceListLock = MUTEX_INITIALIZER("ram disk device list");
151 static uint64 sUsedRawDeviceIDs = 0;
152 
153 
154 static int32	allocate_raw_device_id();
155 static void		free_raw_device_id(int32 id);
156 
157 
158 struct Device {
159 	Device(device_node* node)
160 		:
161 		fNode(node)
162 	{
163 		mutex_init(&fLock, "ram disk device");
164 	}
165 
166 	virtual ~Device()
167 	{
168 		mutex_destroy(&fLock);
169 	}
170 
171 	bool Lock()		{ mutex_lock(&fLock); return true; }
172 	void Unlock()	{ mutex_unlock(&fLock); }
173 
174 	device_node* Node() const	{ return fNode; }
175 
176 	virtual status_t PublishDevice() = 0;
177 
178 protected:
179 	mutex			fLock;
180 	device_node*	fNode;
181 };
182 
183 
184 struct ControlDevice : Device {
185 	ControlDevice(device_node* node)
186 		:
187 		Device(node)
188 	{
189 	}
190 
191 	status_t Register(const char* filePath, uint64 deviceSize, int32& _id)
192 	{
193 		int32 id = allocate_raw_device_id();
194 		if (id < 0)
195 			return B_BUSY;
196 
197 		device_attr attrs[] = {
198 			{B_DEVICE_PRETTY_NAME, B_STRING_TYPE,
199 				{string: "RAM Disk Raw Device"}},
200 			{kDeviceSizeItem, B_UINT64_TYPE, {ui64: deviceSize}},
201 			{kDeviceIDItem, B_UINT32_TYPE, {ui32: (uint32)id}},
202 			{kFilePathItem, B_STRING_TYPE, {string: filePath}},
203 			{NULL}
204 		};
205 
206 		// If filePath is NULL, remove the attribute.
207 		if (filePath == NULL) {
208 			size_t count = sizeof(attrs) / sizeof(attrs[0]);
209 			memset(attrs + count - 2, 0, sizeof(attrs[0]));
210 		}
211 
212 		status_t error = sDeviceManager->register_node(
213 			sDeviceManager->get_parent_node(Node()), kDriverModuleName, attrs,
214 			NULL, NULL);
215 		if (error != B_OK) {
216 			free_raw_device_id(id);
217 			return error;
218 		}
219 
220 		_id = id;
221 		return B_OK;
222 	}
223 
224 	virtual status_t PublishDevice()
225 	{
226 		return sDeviceManager->publish_device(Node(), kControlDeviceName,
227 			kControlDeviceModuleName);
228 	}
229 };
230 
231 
232 struct RawDevice : Device, DoublyLinkedListLinkImpl<RawDevice> {
233 	RawDevice(device_node* node)
234 		:
235 		Device(node),
236 		fID(-1),
237 		fUnregistered(false),
238 		fDeviceSize(0),
239 		fDeviceName(NULL),
240 		fFilePath(NULL),
241 		fCache(NULL),
242 		fDMAResource(NULL),
243 		fIOScheduler(NULL)
244 	{
245 	}
246 
247 	virtual ~RawDevice()
248 	{
249 		if (fID >= 0) {
250 			MutexLocker locker(sDeviceListLock);
251 			sDeviceList.Remove(this);
252 		}
253 
254 		free(fDeviceName);
255 		free(fFilePath);
256 	}
257 
258 	int32 ID() const				{ return fID; }
259 	off_t DeviceSize() const		{ return fDeviceSize; }
260 	const char* DeviceName() const	{ return fDeviceName; }
261 
262 	bool IsUnregistered() const		{ return fUnregistered; }
263 
264 	void SetUnregistered(bool unregistered)
265 	{
266 		fUnregistered = unregistered;
267 	}
268 
269 	status_t Init(int32 id, const char* filePath, uint64 deviceSize)
270 	{
271 		fID = id;
272 		fFilePath = filePath != NULL ? strdup(filePath) : NULL;
273 		if (filePath != NULL && fFilePath == NULL)
274 			return B_NO_MEMORY;
275 
276 		fDeviceSize = (deviceSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE
277 			* B_PAGE_SIZE;
278 
279 		if (fDeviceSize < B_PAGE_SIZE
280 			|| (uint64)fDeviceSize / B_PAGE_SIZE
281 				> vm_page_num_pages() * 2 / 3) {
282 			return B_BAD_VALUE;
283 		}
284 
285 		// construct our device path
286 		KPath path(kRawDeviceBaseName);
287 		char buffer[32];
288 		snprintf(buffer, sizeof(buffer), "%" B_PRId32 "/raw", fID);
289 
290 		status_t error = path.Append(buffer);
291 		if (error != B_OK)
292 			return error;
293 
294 		fDeviceName = path.DetachBuffer();
295 
296 		// insert into device list
297 		RawDevice* nextDevice = NULL;
298 		MutexLocker locker(sDeviceListLock);
299 		for (RawDeviceList::Iterator it = sDeviceList.GetIterator();
300 				(nextDevice = it.Next()) != NULL;) {
301 			if (nextDevice->ID() > fID)
302 				break;
303 		}
304 
305 		sDeviceList.InsertBefore(nextDevice, this);
306 
307 		return B_OK;
308 	}
309 
310 	status_t Prepare()
311 	{
312 		status_t error = VMCacheFactory::CreateAnonymousCache(fCache, false, 0,
313 			0, false, VM_PRIORITY_SYSTEM);
314 		if (error != B_OK) {
315 			Unprepare();
316 			return error;
317 		}
318 
319 		fCache->temporary = 1;
320 		fCache->virtual_end = fDeviceSize;
321 
322 		error = fCache->Commit(fDeviceSize, VM_PRIORITY_SYSTEM);
323 		if (error != B_OK) {
324 			Unprepare();
325 			return error;
326 		}
327 
328 		if (fFilePath != NULL) {
329 			error = _LoadFile();
330 			if (error != B_OK) {
331 				Unprepare();
332 				return error;
333 			}
334 		}
335 
336 		// no DMA restrictions
337 		const dma_restrictions restrictions = {};
338 
339 		fDMAResource = new(std::nothrow) DMAResource;
340 		if (fDMAResource == NULL) {
341 			Unprepare();
342 			return B_NO_MEMORY;
343 		}
344 
345 		error = fDMAResource->Init(restrictions, B_PAGE_SIZE,
346 			kDMAResourceBufferCount, kDMAResourceBounceBufferCount);
347 		if (error != B_OK) {
348 			Unprepare();
349 			return error;
350 		}
351 
352 		fIOScheduler = new(std::nothrow) IOSchedulerSimple(fDMAResource);
353 		if (fIOScheduler == NULL) {
354 			Unprepare();
355 			return B_NO_MEMORY;
356 		}
357 
358 		error = fIOScheduler->Init("ram disk device scheduler");
359 		if (error != B_OK) {
360 			Unprepare();
361 			return error;
362 		}
363 
364 		fIOScheduler->SetCallback(&_DoIOEntry, this);
365 
366 		return B_OK;
367 	}
368 
369 	void Unprepare()
370 	{
371 		delete fIOScheduler;
372 		fIOScheduler = NULL;
373 
374 		delete fDMAResource;
375 		fDMAResource = NULL;
376 
377 		if (fCache != NULL) {
378 			fCache->Lock();
379 			fCache->ReleaseRefAndUnlock();
380 			fCache = NULL;
381 		}
382 	}
383 
384 	void GetInfo(ram_disk_ioctl_info& _info) const
385 	{
386 		_info.id = fID;
387 		_info.size = fDeviceSize;
388 		memset(&_info.path, 0, sizeof(_info.path));
389 		if (fFilePath != NULL)
390 			strlcpy(_info.path, fFilePath, sizeof(_info.path));
391 	}
392 
393 	status_t Flush()
394 	{
395 		static const size_t kPageCountPerIteration = 1024;
396 		static const size_t kMaxGapSize = 15;
397 
398 		int fd = open(fFilePath, O_WRONLY);
399 		if (fd < 0)
400 			return errno;
401 		FileDescriptorCloser fdCloser(fd);
402 
403 		vm_page** pages = new(std::nothrow) vm_page*[kPageCountPerIteration];
404 		ArrayDeleter<vm_page*> pagesDeleter(pages);
405 
406 		uint8* buffer = (uint8*)malloc(kPageCountPerIteration * B_PAGE_SIZE);
407 		MemoryDeleter bufferDeleter(buffer);
408 
409 		if (pages == NULL || buffer == NULL)
410 			return B_NO_MEMORY;
411 
412 		// Iterate through all pages of the cache and write those back that have
413 		// been modified.
414 		AutoLocker<VMCache> locker(fCache);
415 
416 		status_t error = B_OK;
417 
418 		for (off_t offset = 0; offset < fDeviceSize;) {
419 			// find the first modified page at or after the current offset
420 			VMCachePagesTree::Iterator it
421 				= fCache->pages.GetIterator(offset / B_PAGE_SIZE, true, true);
422 			vm_page* firstModified;
423 			while ((firstModified = it.Next()) != NULL
424 				&& !firstModified->modified) {
425 			}
426 
427 			if (firstModified == NULL)
428 				break;
429 
430 			if (firstModified->busy) {
431 				fCache->WaitForPageEvents(firstModified, PAGE_EVENT_NOT_BUSY,
432 					true);
433 				continue;
434 			}
435 
436 			pages[0] = firstModified;
437 			page_num_t firstPageIndex = firstModified->cache_offset;
438 			offset = firstPageIndex * B_PAGE_SIZE;
439 
440 			// Collect more pages until the gap between two modified pages gets
441 			// too large or we hit the end of our array.
442 			size_t previousModifiedIndex = 0;
443 			size_t previousIndex = 0;
444 			while (vm_page* page = it.Next()) {
445 				page_num_t index = page->cache_offset - firstPageIndex;
446 				if (page->busy
447 					|| index >= kPageCountPerIteration
448 					|| index - previousModifiedIndex > kMaxGapSize) {
449 					break;
450 				}
451 
452 				pages[index] = page;
453 
454 				// clear page array gap since the previous page
455 				if (previousIndex + 1 < index) {
456 					memset(pages + previousIndex + 1, 0,
457 						(index - previousIndex - 1) * sizeof(vm_page*));
458 				}
459 
460 				previousIndex = index;
461 				if (page->modified)
462 					previousModifiedIndex = index;
463 			}
464 
465 			// mark all pages we want to write busy
466 			size_t pagesToWrite = previousModifiedIndex + 1;
467 			for (size_t i = 0; i < pagesToWrite; i++) {
468 				if (vm_page* page = pages[i]) {
469 					DEBUG_PAGE_ACCESS_START(page);
470 					page->busy = true;
471 				}
472 			}
473 
474 			locker.Unlock();
475 
476 			// copy the pages to our buffer
477 			for (size_t i = 0; i < pagesToWrite; i++) {
478 				if (vm_page* page = pages[i]) {
479 					error = vm_memcpy_from_physical(buffer + i * B_PAGE_SIZE,
480 						page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE,
481 						false);
482 					if (error != B_OK) {
483 						dprintf("ramdisk: error copying page %" B_PRIu64
484 							" data: %s\n", (uint64)page->physical_page_number,
485 							strerror(error));
486 						break;
487 					}
488 				} else
489 					memset(buffer + i * B_PAGE_SIZE, 0, B_PAGE_SIZE);
490 			}
491 
492 			// write the buffer
493 			if (error == B_OK) {
494 				ssize_t bytesWritten = pwrite(fd, buffer,
495 					pagesToWrite * B_PAGE_SIZE, offset);
496 				if (bytesWritten < 0) {
497 					dprintf("ramdisk: error writing pages to file: %s\n",
498 						strerror(bytesWritten));
499 					error = bytesWritten;
500 				}
501 				else if ((size_t)bytesWritten != pagesToWrite * B_PAGE_SIZE) {
502 					dprintf("ramdisk: error writing pages to file: short "
503 						"write (%zd/%zu)\n", bytesWritten,
504 						pagesToWrite * B_PAGE_SIZE);
505 					error = B_ERROR;
506 				}
507 			}
508 
509 			// mark the pages unbusy, on success also unmodified
510 			locker.Lock();
511 
512 			for (size_t i = 0; i < pagesToWrite; i++) {
513 				if (vm_page* page = pages[i]) {
514 					if (error == B_OK)
515 						page->modified = false;
516 					fCache->MarkPageUnbusy(page);
517 					DEBUG_PAGE_ACCESS_END(page);
518 				}
519 			}
520 
521 			if (error != B_OK)
522 				break;
523 
524 			offset += pagesToWrite * B_PAGE_SIZE;
525 		}
526 
527 		return error;
528 	}
529 
530 	status_t Trim(fs_trim_data* trimData)
531 	{
532 		TRACE("trim_device()\n");
533 
534 		trimData->trimmed_size = 0;
535 
536 		const off_t deviceSize = fDeviceSize; // in bytes
537 		if (deviceSize < 0)
538 			return B_BAD_VALUE;
539 
540 		STATIC_ASSERT(sizeof(deviceSize) <= sizeof(uint64));
541 		ASSERT(deviceSize >= 0);
542 
543 		// Do not trim past device end
544 		for (uint32 i = 0; i < trimData->range_count; i++) {
545 			uint64 offset = trimData->ranges[i].offset;
546 			uint64& size = trimData->ranges[i].size;
547 
548 			if (offset >= (uint64)deviceSize)
549 				return B_BAD_VALUE;
550 			size = min_c(size, (uint64)deviceSize - offset);
551 		}
552 
553 		status_t result = B_OK;
554 		uint64 trimmedSize = 0;
555 		for (uint32 i = 0; i < trimData->range_count; i++) {
556 			uint64 offset = trimData->ranges[i].offset;
557 			uint64 length = trimData->ranges[i].size;
558 
559 			// Round up offset and length to multiple of the page size
560 			// The offset is rounded up, so some space may be left
561 			// (not trimmed) at the start of the range.
562 			offset = (offset + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
563 			// Adjust the length for the possibly skipped range
564 			length -= offset - trimData->ranges[i].offset;
565 			// The length is rounded down, so some space at the end may also
566 			// be left (not trimmed).
567 			length &= ~(B_PAGE_SIZE - 1);
568 
569 			if (length == 0)
570 				continue;
571 
572 			TRACE("ramdisk: trim %" B_PRIu64 " bytes from %" B_PRIu64 "\n",
573 				length, offset);
574 
575 			ASSERT(offset % B_PAGE_SIZE == 0);
576 			ASSERT(length % B_PAGE_SIZE == 0);
577 
578 			vm_page** pages = new(std::nothrow) vm_page*[length / B_PAGE_SIZE];
579 			if (pages == NULL) {
580 				result = B_NO_MEMORY;
581 				break;
582 			}
583 			ArrayDeleter<vm_page*> pagesDeleter(pages);
584 
585 			_GetPages((off_t)offset, (off_t)length, false, pages);
586 
587 			AutoLocker<VMCache> locker(fCache);
588 			uint64 j;
589 			for (j = 0; j < length / B_PAGE_SIZE; j++) {
590 				// If we run out of pages (some may already be trimmed), stop.
591 				if (pages[j] == NULL)
592 					break;
593 
594 				TRACE("free range %" B_PRIu32 ", page %" B_PRIu64 ", offset %"
595 					B_PRIu64 "\n", i, j, offset);
596 				if (pages[j]->Cache())
597 					fCache->RemovePage(pages[j]);
598 				vm_page_free(NULL, pages[j]);
599 				trimmedSize += B_PAGE_SIZE;
600 			}
601 		}
602 
603 		trimData->trimmed_size = trimmedSize;
604 
605 		return result;
606 	}
607 
608 
609 
610 	status_t DoIO(IORequest* request)
611 	{
612 		return fIOScheduler->ScheduleRequest(request);
613 	}
614 
615 	virtual status_t PublishDevice()
616 	{
617 		return sDeviceManager->publish_device(Node(), fDeviceName,
618 			kRawDeviceModuleName);
619 	}
620 
621 private:
622 	static status_t _DoIOEntry(void* data, IOOperation* operation)
623 	{
624 		return ((RawDevice*)data)->_DoIO(operation);
625 	}
626 
627 	status_t _DoIO(IOOperation* operation)
628 	{
629 		off_t offset = operation->Offset();
630 		generic_size_t length = operation->Length();
631 
632 		ASSERT(offset % B_PAGE_SIZE == 0);
633 		ASSERT(length % B_PAGE_SIZE == 0);
634 
635 		const generic_io_vec* vecs = operation->Vecs();
636 		generic_size_t vecOffset = 0;
637 		bool isWrite = operation->IsWrite();
638 
639 		vm_page** pages = new(std::nothrow) vm_page*[length / B_PAGE_SIZE];
640 		if (pages == NULL)
641 			return B_NO_MEMORY;
642 		ArrayDeleter<vm_page*> pagesDeleter(pages);
643 
644 		_GetPages(offset, length, isWrite, pages);
645 
646 		status_t error = B_OK;
647 		size_t index = 0;
648 
649 		while (length > 0) {
650 			vm_page* page = pages[index];
651 
652 			if (isWrite)
653 				page->modified = true;
654 
655 			error = _CopyData(page, vecs, vecOffset, isWrite);
656 			if (error != B_OK)
657 				break;
658 
659 			offset += B_PAGE_SIZE;
660 			length -= B_PAGE_SIZE;
661 			index++;
662 		}
663 
664 		_PutPages(operation->Offset(), operation->Length(), pages,
665 			error == B_OK);
666 
667 		if (error != B_OK) {
668 			fIOScheduler->OperationCompleted(operation, error, 0);
669 			return error;
670 		}
671 
672 		fIOScheduler->OperationCompleted(operation, B_OK, operation->Length());
673 		return B_OK;
674 	}
675 
676 	void _GetPages(off_t offset, off_t length, bool isWrite, vm_page** pages)
677 	{
678 		// TODO: This method is duplicated in ramfs' DataContainer. Perhaps it
679 		// should be put into a common location?
680 
681 		// get the pages, we already have
682 		AutoLocker<VMCache> locker(fCache);
683 
684 		size_t pageCount = length / B_PAGE_SIZE;
685 		size_t index = 0;
686 		size_t missingPages = 0;
687 
688 		while (length > 0) {
689 			vm_page* page = fCache->LookupPage(offset);
690 			if (page != NULL) {
691 				if (page->busy) {
692 					fCache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
693 					continue;
694 				}
695 
696 				DEBUG_PAGE_ACCESS_START(page);
697 				page->busy = true;
698 			} else
699 				missingPages++;
700 
701 			pages[index++] = page;
702 			offset += B_PAGE_SIZE;
703 			length -= B_PAGE_SIZE;
704 		}
705 
706 		locker.Unlock();
707 
708 		// For a write we need to reserve the missing pages.
709 		if (isWrite && missingPages > 0) {
710 			vm_page_reservation reservation;
711 			vm_page_reserve_pages(&reservation, missingPages,
712 				VM_PRIORITY_SYSTEM);
713 
714 			for (size_t i = 0; i < pageCount; i++) {
715 				if (pages[i] != NULL)
716 					continue;
717 
718 				pages[i] = vm_page_allocate_page(&reservation,
719 					PAGE_STATE_WIRED | VM_PAGE_ALLOC_BUSY);
720 
721 				if (--missingPages == 0)
722 					break;
723 			}
724 
725 			vm_page_unreserve_pages(&reservation);
726 		}
727 	}
728 
729 	void _PutPages(off_t offset, off_t length, vm_page** pages, bool success)
730 	{
731 		// TODO: This method is duplicated in ramfs' DataContainer. Perhaps it
732 		// should be put into a common location?
733 
734 		AutoLocker<VMCache> locker(fCache);
735 
736 		// Mark all pages unbusy. On error free the newly allocated pages.
737 		size_t index = 0;
738 
739 		while (length > 0) {
740 			vm_page* page = pages[index++];
741 			if (page != NULL) {
742 				if (page->CacheRef() == NULL) {
743 					if (success) {
744 						fCache->InsertPage(page, offset);
745 						fCache->MarkPageUnbusy(page);
746 						DEBUG_PAGE_ACCESS_END(page);
747 					} else
748 						vm_page_free(NULL, page);
749 				} else {
750 					fCache->MarkPageUnbusy(page);
751 					DEBUG_PAGE_ACCESS_END(page);
752 				}
753 			}
754 
755 			offset += B_PAGE_SIZE;
756 			length -= B_PAGE_SIZE;
757 		}
758 	}
759 
760 	status_t _CopyData(vm_page* page, const generic_io_vec*& vecs,
761 		generic_size_t& vecOffset, bool toPage)
762 	{
763 		// map page to virtual memory
764 		Thread* thread = thread_get_current_thread();
765 		uint8* pageData = NULL;
766 		void* handle;
767 		if (page != NULL) {
768 			thread_pin_to_current_cpu(thread);
769 			addr_t virtualAddress;
770 			status_t error = vm_get_physical_page_current_cpu(
771 				page->physical_page_number * B_PAGE_SIZE, &virtualAddress,
772 				&handle);
773 			if (error != B_OK) {
774 				thread_unpin_from_current_cpu(thread);
775 				return error;
776 			}
777 
778 			pageData = (uint8*)virtualAddress;
779 		}
780 
781 		status_t error = B_OK;
782 		size_t length = B_PAGE_SIZE;
783 		while (length > 0) {
784 			size_t toCopy = std::min((generic_size_t)length,
785 				vecs->length - vecOffset);
786 
787 			if (toCopy == 0) {
788 				vecs++;
789 				vecOffset = 0;
790 				continue;
791 			}
792 
793 			phys_addr_t vecAddress = vecs->base + vecOffset;
794 
795 			error = toPage
796 				? vm_memcpy_from_physical(pageData, vecAddress, toCopy, false)
797 				: (page != NULL
798 					? vm_memcpy_to_physical(vecAddress, pageData, toCopy, false)
799 					: vm_memset_physical(vecAddress, 0, toCopy));
800 			if (error != B_OK)
801 				break;
802 
803 			pageData += toCopy;
804 			length -= toCopy;
805 			vecOffset += toCopy;
806 		}
807 
808 		if (page != NULL) {
809 			vm_put_physical_page_current_cpu((addr_t)pageData, handle);
810 			thread_unpin_from_current_cpu(thread);
811 		}
812 
813 		return error;
814 	}
815 
816 	status_t _LoadFile()
817 	{
818 		static const size_t kPageCountPerIteration = 1024;
819 
820 		int fd = open(fFilePath, O_RDONLY);
821 		if (fd < 0)
822 			return errno;
823 		FileDescriptorCloser fdCloser(fd);
824 
825 		vm_page** pages = new(std::nothrow) vm_page*[kPageCountPerIteration];
826 		ArrayDeleter<vm_page*> pagesDeleter(pages);
827 
828 		uint8* buffer = (uint8*)malloc(kPageCountPerIteration * B_PAGE_SIZE);
829 		MemoryDeleter bufferDeleter(buffer);
830 			// TODO: Ideally we wouldn't use a buffer to read the file content,
831 			// but read into the pages we allocated directly. Unfortunately
832 			// there's no API to do that yet.
833 
834 		if (pages == NULL || buffer == NULL)
835 			return B_NO_MEMORY;
836 
837 		status_t error = B_OK;
838 
839 		page_num_t allocatedPages = 0;
840 		off_t offset = 0;
841 		off_t sizeRemaining = fDeviceSize;
842 		while (sizeRemaining > 0) {
843 			// Note: fDeviceSize is B_PAGE_SIZE aligned.
844 			size_t pagesToRead = std::min(kPageCountPerIteration,
845 				size_t(sizeRemaining / B_PAGE_SIZE));
846 
847 			// allocate the missing pages
848 			if (allocatedPages < pagesToRead) {
849 				vm_page_reservation reservation;
850 				vm_page_reserve_pages(&reservation,
851 					pagesToRead - allocatedPages, VM_PRIORITY_SYSTEM);
852 
853 				while (allocatedPages < pagesToRead) {
854 					pages[allocatedPages++]
855 						= vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
856 				}
857 
858 				vm_page_unreserve_pages(&reservation);
859 			}
860 
861 			// read from the file
862 			size_t bytesToRead = pagesToRead * B_PAGE_SIZE;
863 			ssize_t bytesRead = pread(fd, buffer, bytesToRead, offset);
864 			if (bytesRead < 0) {
865 				error = bytesRead;
866 				break;
867 			}
868 			size_t pagesRead = (bytesRead + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
869 			if (pagesRead < pagesToRead) {
870 				error = B_ERROR;
871 				break;
872 			}
873 
874 			// clear the last read page, if partial
875 			if ((size_t)bytesRead < pagesRead * B_PAGE_SIZE) {
876 				memset(buffer + bytesRead, 0,
877 					pagesRead * B_PAGE_SIZE - bytesRead);
878 			}
879 
880 			// copy data to allocated pages
881 			for (size_t i = 0; i < pagesRead; i++) {
882 				vm_page* page = pages[i];
883 				error = vm_memcpy_to_physical(
884 					page->physical_page_number * B_PAGE_SIZE,
885 					buffer + i * B_PAGE_SIZE, B_PAGE_SIZE, false);
886 				if (error != B_OK)
887 					break;
888 			}
889 
890 			if (error != B_OK)
891 				break;
892 
893 			// Add pages to cache. Ignore clear pages, though. Move those to the
894 			// beginning of the array, so we can reuse them in the next
895 			// iteration.
896 			AutoLocker<VMCache> locker(fCache);
897 
898 			size_t clearPages = 0;
899 			for (size_t i = 0; i < pagesRead; i++) {
900 				uint64* pageData = (uint64*)(buffer + i * B_PAGE_SIZE);
901 				bool isClear = true;
902 				for (size_t k = 0; isClear && k < B_PAGE_SIZE / 8; k++)
903 					isClear = pageData[k] == 0;
904 
905 				if (isClear) {
906 					pages[clearPages++] = pages[i];
907 				} else {
908 					fCache->InsertPage(pages[i], offset + i * B_PAGE_SIZE);
909 					DEBUG_PAGE_ACCESS_END(pages[i]);
910 				}
911 			}
912 
913 			locker.Unlock();
914 
915 			// Move any left-over allocated pages to the end of the empty pages
916 			// and compute the new allocated pages count.
917 			if (pagesRead < allocatedPages) {
918 				size_t count = allocatedPages - pagesRead;
919 				memcpy(pages + clearPages, pages + pagesRead,
920 					count * sizeof(vm_page*));
921 				clearPages += count;
922 			}
923 			allocatedPages = clearPages;
924 
925 			offset += pagesRead * B_PAGE_SIZE;
926 			sizeRemaining -= pagesRead * B_PAGE_SIZE;
927 		}
928 
929 		// free left-over allocated pages
930 		for (size_t i = 0; i < allocatedPages; i++)
931 			vm_page_free(NULL, pages[i]);
932 
933 		return error;
934 	}
935 
936 private:
937 	int32			fID;
938 	bool			fUnregistered;
939 	off_t			fDeviceSize;
940 	char*			fDeviceName;
941 	char*			fFilePath;
942 	VMCache*		fCache;
943 	DMAResource*	fDMAResource;
944 	IOScheduler*	fIOScheduler;
945 };
946 
947 
948 struct RawDeviceCookie {
949 	RawDeviceCookie(RawDevice* device, int openMode)
950 		:
951 		fDevice(device),
952 		fOpenMode(openMode)
953 	{
954 	}
955 
956 	RawDevice* Device() const	{ return fDevice; }
957 	int OpenMode() const		{ return fOpenMode; }
958 
959 private:
960 	RawDevice*	fDevice;
961 	int			fOpenMode;
962 };
963 
964 
965 // #pragma mark -
966 
967 
968 static int32
969 allocate_raw_device_id()
970 {
971 	MutexLocker deviceListLocker(sDeviceListLock);
972 	for (size_t i = 0; i < sizeof(sUsedRawDeviceIDs) * 8; i++) {
973 		if ((sUsedRawDeviceIDs & ((uint64)1 << i)) == 0) {
974 			sUsedRawDeviceIDs |= (uint64)1 << i;
975 			return (int32)i;
976 		}
977 	}
978 
979 	return -1;
980 }
981 
982 
983 static void
984 free_raw_device_id(int32 id)
985 {
986 	MutexLocker deviceListLocker(sDeviceListLock);
987 	sUsedRawDeviceIDs &= ~((uint64)1 << id);
988 }
989 
990 
991 static RawDevice*
992 find_raw_device(int32 id)
993 {
994 	for (RawDeviceList::Iterator it = sDeviceList.GetIterator();
995 			RawDevice* device = it.Next();) {
996 		if (device->ID() == id)
997 			return device;
998 	}
999 
1000 	return NULL;
1001 }
1002 
1003 
1004 static status_t
1005 ioctl_register(ControlDevice* controlDevice, ram_disk_ioctl_register* request)
1006 {
1007 	KPath path;
1008 	uint64 deviceSize = 0;
1009 
1010 	if (request->path[0] != '\0') {
1011 		// check if the path is null-terminated
1012 		if (strnlen(request->path, sizeof(request->path))
1013 				== sizeof(request->path)) {
1014 			return B_BAD_VALUE;
1015 		}
1016 
1017 		// get a normalized file path
1018 		status_t error = path.SetTo(request->path, true);
1019 		if (error != B_OK) {
1020 			dprintf("ramdisk: register: Invalid path \"%s\": %s\n",
1021 				request->path, strerror(error));
1022 			return B_BAD_VALUE;
1023 		}
1024 
1025 		struct stat st;
1026 		if (lstat(path.Path(), &st) != 0) {
1027 			dprintf("ramdisk: register: Failed to stat \"%s\": %s\n",
1028 				path.Path(), strerror(errno));
1029 			return errno;
1030 		}
1031 
1032 		if (!S_ISREG(st.st_mode)) {
1033 			dprintf("ramdisk: register: \"%s\" is not a file!\n", path.Path());
1034 			return B_BAD_VALUE;
1035 		}
1036 
1037 		deviceSize = st.st_size;
1038 	} else {
1039 		deviceSize = request->size;
1040 	}
1041 
1042 	return controlDevice->Register(path.Length() > 0 ? path.Path() : NULL,
1043 		deviceSize, request->id);
1044 }
1045 
1046 
1047 static status_t
1048 ioctl_unregister(ControlDevice* controlDevice,
1049 	ram_disk_ioctl_unregister* request)
1050 {
1051 	// find the device in the list and unregister it
1052 	MutexLocker locker(sDeviceListLock);
1053 	RawDevice* device = find_raw_device(request->id);
1054 	if (device == NULL)
1055 		return B_ENTRY_NOT_FOUND;
1056 
1057 	// mark unregistered before we unlock
1058 	if (device->IsUnregistered())
1059 		return B_BUSY;
1060 	device->SetUnregistered(true);
1061 	locker.Unlock();
1062 
1063 	device_node* node = device->Node();
1064 	status_t error = sDeviceManager->unpublish_device(node,
1065 		device->DeviceName());
1066 	if (error != B_OK) {
1067 		dprintf("ramdisk: unregister: Failed to unpublish device \"%s\": %s\n",
1068 			device->DeviceName(), strerror(error));
1069 		return error;
1070 	}
1071 
1072 	error = sDeviceManager->unregister_node(node);
1073 	// Note: B_BUSY is OK. The node will removed as soon as possible.
1074 	if (error != B_OK && error != B_BUSY) {
1075 		dprintf("ramdisk: unregister: Failed to unregister node for device %"
1076 			B_PRId32 ": %s\n", request->id, strerror(error));
1077 		return error;
1078 	}
1079 
1080 	return B_OK;
1081 }
1082 
1083 
1084 static status_t
1085 ioctl_info(RawDevice* device, ram_disk_ioctl_info* request)
1086 {
1087 	device->GetInfo(*request);
1088 	return B_OK;
1089 }
1090 
1091 
1092 template<typename DeviceType, typename Request>
1093 static status_t
1094 handle_ioctl(DeviceType* device,
1095 	status_t (*handler)(DeviceType*, Request*), void* buffer)
1096 {
1097 	// copy request to the kernel heap
1098 	if (buffer == NULL || !IS_USER_ADDRESS(buffer))
1099 		return B_BAD_ADDRESS;
1100 
1101 	Request* request = new(std::nothrow) Request;
1102 	if (request == NULL)
1103 		return B_NO_MEMORY;
1104 	ObjectDeleter<Request> requestDeleter(request);
1105 
1106 	if (user_memcpy(request, buffer, sizeof(Request)) != B_OK)
1107 		return B_BAD_ADDRESS;
1108 
1109 	// handle the ioctl
1110 	status_t error = handler(device, request);
1111 	if (error != B_OK)
1112 		return error;
1113 
1114 	// copy the request back to userland
1115 	if (user_memcpy(buffer, request, sizeof(Request)) != B_OK)
1116 		return B_BAD_ADDRESS;
1117 
1118 	return B_OK;
1119 }
1120 
1121 
1122 //	#pragma mark - driver
1123 
1124 
1125 static float
1126 ram_disk_driver_supports_device(device_node* parent)
1127 {
1128 	const char* bus = NULL;
1129 	if (sDeviceManager->get_attr_string(parent, B_DEVICE_BUS, &bus, false)
1130 			== B_OK
1131 		&& strcmp(bus, "generic") == 0) {
1132 		return 0.8;
1133 	}
1134 
1135 	return -1;
1136 }
1137 
1138 
1139 static status_t
1140 ram_disk_driver_register_device(device_node* parent)
1141 {
1142 	device_attr attrs[] = {
1143 		{B_DEVICE_PRETTY_NAME, B_STRING_TYPE,
1144 			{string: "RAM Disk Control Device"}},
1145 		{NULL}
1146 	};
1147 
1148 	return sDeviceManager->register_node(parent, kDriverModuleName, attrs, NULL,
1149 		NULL);
1150 }
1151 
1152 
1153 static status_t
1154 ram_disk_driver_init_driver(device_node* node, void** _driverCookie)
1155 {
1156 	uint64 deviceSize;
1157 	if (sDeviceManager->get_attr_uint64(node, kDeviceSizeItem, &deviceSize,
1158 			false) == B_OK) {
1159 		int32 id = -1;
1160 		sDeviceManager->get_attr_uint32(node, kDeviceIDItem, (uint32*)&id,
1161 			false);
1162 		if (id < 0)
1163 			return B_ERROR;
1164 
1165 		const char* filePath = NULL;
1166 		sDeviceManager->get_attr_string(node, kFilePathItem, &filePath, false);
1167 
1168 		RawDevice* device = new(std::nothrow) RawDevice(node);
1169 		if (device == NULL)
1170 			return B_NO_MEMORY;
1171 
1172 		status_t error = device->Init(id, filePath, deviceSize);
1173 		if (error != B_OK) {
1174 			delete device;
1175 			return error;
1176 		}
1177 
1178 		*_driverCookie = (Device*)device;
1179 	} else {
1180 		ControlDevice* device = new(std::nothrow) ControlDevice(node);
1181 		if (device == NULL)
1182 			return B_NO_MEMORY;
1183 
1184 		*_driverCookie = (Device*)device;
1185 	}
1186 
1187 	return B_OK;
1188 }
1189 
1190 
1191 static void
1192 ram_disk_driver_uninit_driver(void* driverCookie)
1193 {
1194 	Device* device = (Device*)driverCookie;
1195 	if (RawDevice* rawDevice = dynamic_cast<RawDevice*>(device))
1196 		free_raw_device_id(rawDevice->ID());
1197 	delete device;
1198 }
1199 
1200 
1201 static status_t
1202 ram_disk_driver_register_child_devices(void* driverCookie)
1203 {
1204 	Device* device = (Device*)driverCookie;
1205 	return device->PublishDevice();
1206 }
1207 
1208 
1209 //	#pragma mark - control device
1210 
1211 
1212 static status_t
1213 ram_disk_control_device_init_device(void* driverCookie, void** _deviceCookie)
1214 {
1215 	*_deviceCookie = driverCookie;
1216 	return B_OK;
1217 }
1218 
1219 
1220 static void
1221 ram_disk_control_device_uninit_device(void* deviceCookie)
1222 {
1223 }
1224 
1225 
1226 static status_t
1227 ram_disk_control_device_open(void* deviceCookie, const char* path, int openMode,
1228 	void** _cookie)
1229 {
1230 	*_cookie = deviceCookie;
1231 	return B_OK;
1232 }
1233 
1234 
1235 static status_t
1236 ram_disk_control_device_close(void* cookie)
1237 {
1238 	return B_OK;
1239 }
1240 
1241 
1242 static status_t
1243 ram_disk_control_device_free(void* cookie)
1244 {
1245 	return B_OK;
1246 }
1247 
1248 
1249 static status_t
1250 ram_disk_control_device_read(void* cookie, off_t position, void* buffer,
1251 	size_t* _length)
1252 {
1253 	return B_BAD_VALUE;
1254 }
1255 
1256 
1257 static status_t
1258 ram_disk_control_device_write(void* cookie, off_t position, const void* data,
1259 	size_t* _length)
1260 {
1261 	return B_BAD_VALUE;
1262 }
1263 
1264 
1265 static status_t
1266 ram_disk_control_device_control(void* cookie, uint32 op, void* buffer,
1267 	size_t length)
1268 {
1269 	ControlDevice* device = (ControlDevice*)cookie;
1270 
1271 	switch (op) {
1272 		case RAM_DISK_IOCTL_REGISTER:
1273 			return handle_ioctl(device, &ioctl_register, buffer);
1274 
1275 		case RAM_DISK_IOCTL_UNREGISTER:
1276 			return handle_ioctl(device, &ioctl_unregister, buffer);
1277 	}
1278 
1279 	return B_BAD_VALUE;
1280 }
1281 
1282 
1283 //	#pragma mark - raw device
1284 
1285 
1286 static status_t
1287 ram_disk_raw_device_init_device(void* driverCookie, void** _deviceCookie)
1288 {
1289 	RawDevice* device = static_cast<RawDevice*>((Device*)driverCookie);
1290 
1291 	status_t error = device->Prepare();
1292 	if (error != B_OK)
1293 		return error;
1294 
1295 	*_deviceCookie = device;
1296 	return B_OK;
1297 }
1298 
1299 
1300 static void
1301 ram_disk_raw_device_uninit_device(void* deviceCookie)
1302 {
1303 	RawDevice* device = (RawDevice*)deviceCookie;
1304 	device->Unprepare();
1305 }
1306 
1307 
1308 static status_t
1309 ram_disk_raw_device_open(void* deviceCookie, const char* path, int openMode,
1310 	void** _cookie)
1311 {
1312 	RawDevice* device = (RawDevice*)deviceCookie;
1313 
1314 	RawDeviceCookie* cookie = new(std::nothrow) RawDeviceCookie(device,
1315 		openMode);
1316 	if (cookie == NULL)
1317 		return B_NO_MEMORY;
1318 
1319 	*_cookie = cookie;
1320 	return B_OK;
1321 }
1322 
1323 
1324 static status_t
1325 ram_disk_raw_device_close(void* cookie)
1326 {
1327 	return B_OK;
1328 }
1329 
1330 
1331 static status_t
1332 ram_disk_raw_device_free(void* _cookie)
1333 {
1334 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1335 	delete cookie;
1336 	return B_OK;
1337 }
1338 
1339 
1340 static status_t
1341 ram_disk_raw_device_read(void* _cookie, off_t pos, void* buffer,
1342 	size_t* _length)
1343 {
1344 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1345 	RawDevice* device = cookie->Device();
1346 
1347 	size_t length = *_length;
1348 
1349 	if (pos >= device->DeviceSize())
1350 		return B_BAD_VALUE;
1351 	if (pos + (off_t)length > device->DeviceSize())
1352 		length = device->DeviceSize() - pos;
1353 
1354 	IORequest request;
1355 	status_t status = request.Init(pos, (addr_t)buffer, length, false, 0);
1356 	if (status != B_OK)
1357 		return status;
1358 
1359 	status = device->DoIO(&request);
1360 	if (status != B_OK)
1361 		return status;
1362 
1363 	status = request.Wait(0, 0);
1364 	if (status == B_OK)
1365 		*_length = length;
1366 	return status;
1367 }
1368 
1369 
1370 static status_t
1371 ram_disk_raw_device_write(void* _cookie, off_t pos, const void* buffer,
1372 	size_t* _length)
1373 {
1374 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1375 	RawDevice* device = cookie->Device();
1376 
1377 	size_t length = *_length;
1378 
1379 	if (pos >= device->DeviceSize())
1380 		return B_BAD_VALUE;
1381 	if (pos + (off_t)length > device->DeviceSize())
1382 		length = device->DeviceSize() - pos;
1383 
1384 	IORequest request;
1385 	status_t status = request.Init(pos, (addr_t)buffer, length, true, 0);
1386 	if (status != B_OK)
1387 		return status;
1388 
1389 	status = device->DoIO(&request);
1390 	if (status != B_OK)
1391 		return status;
1392 
1393 	status = request.Wait(0, 0);
1394 	if (status == B_OK)
1395 		*_length = length;
1396 
1397 	return status;
1398 }
1399 
1400 
1401 static status_t
1402 ram_disk_raw_device_io(void* _cookie, io_request* request)
1403 {
1404 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1405 	RawDevice* device = cookie->Device();
1406 
1407 	return device->DoIO(request);
1408 }
1409 
1410 
1411 static status_t
1412 ram_disk_raw_device_control(void* _cookie, uint32 op, void* buffer,
1413 	size_t length)
1414 {
1415 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1416 	RawDevice* device = cookie->Device();
1417 
1418 	switch (op) {
1419 		case B_GET_DEVICE_SIZE:
1420 		{
1421 			size_t size = device->DeviceSize();
1422 			return user_memcpy(buffer, &size, sizeof(size_t));
1423 		}
1424 
1425 		case B_SET_NONBLOCKING_IO:
1426 		case B_SET_BLOCKING_IO:
1427 			return B_OK;
1428 
1429 		case B_GET_READ_STATUS:
1430 		case B_GET_WRITE_STATUS:
1431 		{
1432 			bool value = true;
1433 			return user_memcpy(buffer, &value, sizeof(bool));
1434 		}
1435 
1436 		case B_GET_GEOMETRY:
1437 		case B_GET_BIOS_GEOMETRY:
1438 		{
1439 			device_geometry geometry;
1440 			geometry.bytes_per_sector = B_PAGE_SIZE;
1441 			geometry.sectors_per_track = 1;
1442 			geometry.cylinder_count = device->DeviceSize() / B_PAGE_SIZE;
1443 				// TODO: We're limited to 2^32 * B_PAGE_SIZE, if we don't use
1444 				// sectors_per_track and head_count.
1445 			geometry.head_count = 1;
1446 			geometry.device_type = B_DISK;
1447 			geometry.removable = true;
1448 			geometry.read_only = false;
1449 			geometry.write_once = false;
1450 
1451 			return user_memcpy(buffer, &geometry, sizeof(device_geometry));
1452 		}
1453 
1454 		case B_GET_MEDIA_STATUS:
1455 		{
1456 			status_t status = B_OK;
1457 			return user_memcpy(buffer, &status, sizeof(status_t));
1458 		}
1459 
1460 		case B_GET_ICON_NAME:
1461 			return user_strlcpy((char*)buffer, "devices/drive-ramdisk",
1462 				B_FILE_NAME_LENGTH);
1463 
1464 		case B_GET_VECTOR_ICON:
1465 		{
1466 			device_icon iconData;
1467 			if (length != sizeof(device_icon))
1468 				return B_BAD_VALUE;
1469 			if (user_memcpy(&iconData, buffer, sizeof(device_icon)) != B_OK)
1470 				return B_BAD_ADDRESS;
1471 
1472 			if (iconData.icon_size >= (int32)sizeof(kRamdiskIcon)) {
1473 				if (user_memcpy(iconData.icon_data, kRamdiskIcon,
1474 						sizeof(kRamdiskIcon)) != B_OK)
1475 					return B_BAD_ADDRESS;
1476 			}
1477 
1478 			iconData.icon_size = sizeof(kRamdiskIcon);
1479 			return user_memcpy(buffer, &iconData, sizeof(device_icon));
1480 		}
1481 
1482 		case B_SET_UNINTERRUPTABLE_IO:
1483 		case B_SET_INTERRUPTABLE_IO:
1484 		case B_FLUSH_DRIVE_CACHE:
1485 			return B_OK;
1486 
1487 		case RAM_DISK_IOCTL_FLUSH:
1488 		{
1489 			status_t error = device->Flush();
1490 			if (error != B_OK) {
1491 				dprintf("ramdisk: flush: Failed to flush device: %s\n",
1492 					strerror(error));
1493 				return error;
1494 			}
1495 
1496 			return B_OK;
1497 		}
1498 
1499 		case B_TRIM_DEVICE:
1500 		{
1501 			// We know the buffer is kernel-side because it has been
1502 			// preprocessed in devfs
1503 			ASSERT(IS_KERNEL_ADDRESS(buffer));
1504 			return device->Trim((fs_trim_data*)buffer);
1505 		}
1506 
1507 		case RAM_DISK_IOCTL_INFO:
1508 			return handle_ioctl(device, &ioctl_info, buffer);
1509 	}
1510 
1511 	return B_BAD_VALUE;
1512 }
1513 
1514 
1515 // #pragma mark -
1516 
1517 
1518 module_dependency module_dependencies[] = {
1519 	{B_DEVICE_MANAGER_MODULE_NAME, (module_info**)&sDeviceManager},
1520 	{}
1521 };
1522 
1523 
1524 static const struct driver_module_info sChecksumDeviceDriverModule = {
1525 	{
1526 		kDriverModuleName,
1527 		0,
1528 		NULL
1529 	},
1530 
1531 	ram_disk_driver_supports_device,
1532 	ram_disk_driver_register_device,
1533 	ram_disk_driver_init_driver,
1534 	ram_disk_driver_uninit_driver,
1535 	ram_disk_driver_register_child_devices
1536 };
1537 
1538 static const struct device_module_info sChecksumControlDeviceModule = {
1539 	{
1540 		kControlDeviceModuleName,
1541 		0,
1542 		NULL
1543 	},
1544 
1545 	ram_disk_control_device_init_device,
1546 	ram_disk_control_device_uninit_device,
1547 	NULL,
1548 
1549 	ram_disk_control_device_open,
1550 	ram_disk_control_device_close,
1551 	ram_disk_control_device_free,
1552 
1553 	ram_disk_control_device_read,
1554 	ram_disk_control_device_write,
1555 	NULL,	// io
1556 
1557 	ram_disk_control_device_control,
1558 
1559 	NULL,	// select
1560 	NULL	// deselect
1561 };
1562 
1563 static const struct device_module_info sChecksumRawDeviceModule = {
1564 	{
1565 		kRawDeviceModuleName,
1566 		0,
1567 		NULL
1568 	},
1569 
1570 	ram_disk_raw_device_init_device,
1571 	ram_disk_raw_device_uninit_device,
1572 	NULL,
1573 
1574 	ram_disk_raw_device_open,
1575 	ram_disk_raw_device_close,
1576 	ram_disk_raw_device_free,
1577 
1578 	ram_disk_raw_device_read,
1579 	ram_disk_raw_device_write,
1580 	ram_disk_raw_device_io,
1581 
1582 	ram_disk_raw_device_control,
1583 
1584 	NULL,	// select
1585 	NULL	// deselect
1586 };
1587 
1588 const module_info* modules[] = {
1589 	(module_info*)&sChecksumDeviceDriverModule,
1590 	(module_info*)&sChecksumControlDeviceModule,
1591 	(module_info*)&sChecksumRawDeviceModule,
1592 	NULL
1593 };
1594