xref: /haiku/src/add-ons/kernel/drivers/disk/virtual/ram_disk/ram_disk.cpp (revision 909af08f4328301fbdef1ffb41f566c3b5bec0c7)
1 /*
2  * Copyright 2010-2013, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <file_systems/ram_disk/ram_disk.h>
8 
9 #include <ctype.h>
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <unistd.h>
15 
16 #include <algorithm>
17 
18 #include <device_manager.h>
19 #include <Drivers.h>
20 
21 #include <AutoDeleter.h>
22 #include <StackOrHeapArray.h>
23 #include <util/AutoLock.h>
24 #include <util/DoublyLinkedList.h>
25 
26 #include <fs/KPath.h>
27 #include <lock.h>
28 #include <util/fs_trim_support.h>
29 #include <vm/vm.h>
30 #include <vm/VMCache.h>
31 #include <vm/vm_page.h>
32 
33 #include "cache_support.h"
34 #include "dma_resources.h"
35 #include "io_requests.h"
36 #include "IOSchedulerSimple.h"
37 
38 
39 //#define TRACE_RAM_DISK
40 #ifdef TRACE_RAM_DISK
41 #	define TRACE(x...)	dprintf(x)
42 #else
43 #	define TRACE(x...) do {} while (false)
44 #endif
45 
46 
47 static const unsigned char kRamdiskIcon[] = {
48 	0x6e, 0x63, 0x69, 0x66, 0x0e, 0x03, 0x01, 0x00, 0x00, 0x02, 0x00, 0x16,
49 	0x02, 0x3c, 0xc7, 0xee, 0x38, 0x9b, 0xc0, 0xba, 0x16, 0x57, 0x3e, 0x39,
50 	0xb0, 0x49, 0x77, 0xc8, 0x42, 0xad, 0xc7, 0x00, 0xff, 0xff, 0xd3, 0x02,
51 	0x00, 0x06, 0x02, 0x3c, 0x96, 0x32, 0x3a, 0x4d, 0x3f, 0xba, 0xfc, 0x01,
52 	0x3d, 0x5a, 0x97, 0x4b, 0x57, 0xa5, 0x49, 0x84, 0x4d, 0x00, 0x47, 0x47,
53 	0x47, 0xff, 0xa5, 0xa0, 0xa0, 0x02, 0x00, 0x16, 0x02, 0xbc, 0x59, 0x2f,
54 	0xbb, 0x29, 0xa7, 0x3c, 0x0c, 0xe4, 0xbd, 0x0b, 0x7c, 0x48, 0x92, 0xc0,
55 	0x4b, 0x79, 0x66, 0x00, 0x7d, 0xff, 0xd4, 0x02, 0x00, 0x06, 0x02, 0x38,
56 	0xdb, 0xb4, 0x39, 0x97, 0x33, 0xbc, 0x4a, 0x33, 0x3b, 0xa5, 0x42, 0x48,
57 	0x6e, 0x66, 0x49, 0xee, 0x7b, 0x00, 0x59, 0x67, 0x56, 0xff, 0xeb, 0xb2,
58 	0xb2, 0x03, 0xa7, 0xff, 0x00, 0x03, 0xff, 0x00, 0x00, 0x04, 0x01, 0x80,
59 	0x03, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x6a, 0x05, 0x33, 0x02,
60 	0x00, 0x06, 0x02, 0x3a, 0x5d, 0x2c, 0x39, 0xf8, 0xb1, 0xb9, 0xdb, 0xf1,
61 	0x3a, 0x4c, 0x0f, 0x48, 0xae, 0xea, 0x4a, 0xc0, 0x91, 0x00, 0x74, 0x74,
62 	0x74, 0xff, 0x3e, 0x3d, 0x3d, 0x02, 0x00, 0x16, 0x02, 0x38, 0x22, 0x1b,
63 	0x3b, 0x11, 0x73, 0xbc, 0x5e, 0xb5, 0x39, 0x4b, 0xaa, 0x4a, 0x47, 0xf1,
64 	0x49, 0xc2, 0x1d, 0x00, 0xb0, 0xff, 0x83, 0x02, 0x00, 0x16, 0x03, 0x36,
65 	0xed, 0xe9, 0x36, 0xb9, 0x49, 0xba, 0x0a, 0xf6, 0x3a, 0x32, 0x6f, 0x4a,
66 	0x79, 0xef, 0x4b, 0x03, 0xe7, 0x00, 0x5a, 0x38, 0xdc, 0xff, 0x7e, 0x0d,
67 	0x0a, 0x06, 0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31,
68 	0x39, 0x25, 0x0a, 0x04, 0x22, 0x3c, 0x44, 0x4b, 0x5a, 0x31, 0x39, 0x25,
69 	0x0a, 0x04, 0x44, 0x4b, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31, 0x0a, 0x04,
70 	0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x44, 0x4b, 0x08, 0x02, 0x27, 0x43,
71 	0xb8, 0x14, 0xc1, 0xf1, 0x08, 0x02, 0x26, 0x43, 0x29, 0x44, 0x0a, 0x05,
72 	0x44, 0x5d, 0x49, 0x5d, 0x60, 0x3e, 0x5a, 0x3b, 0x5b, 0x3f, 0x0a, 0x04,
73 	0x3c, 0x5a, 0x5a, 0x3c, 0x5a, 0x36, 0x3c, 0x52, 0x0a, 0x04, 0x24, 0x4e,
74 	0x3c, 0x5a, 0x3c, 0x52, 0x24, 0x48, 0x06, 0x07, 0xaa, 0x3f, 0x42, 0x2e,
75 	0x24, 0x48, 0x3c, 0x52, 0x5a, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50, 0x34,
76 	0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49, 0x30,
77 	0x06, 0x08, 0xfa, 0xfa, 0x42, 0x50, 0x3e, 0x54, 0x40, 0x55, 0x3f, 0xc7,
78 	0xeb, 0x41, 0xc8, 0x51, 0x42, 0xc9, 0x4f, 0x42, 0xc8, 0xda, 0x42, 0xca,
79 	0x41, 0xc0, 0xf1, 0x5d, 0x45, 0xca, 0x81, 0x46, 0xc7, 0xb7, 0x46, 0xc8,
80 	0xa9, 0x46, 0xc7, 0x42, 0x44, 0x51, 0x45, 0xc6, 0xb9, 0x43, 0xc6, 0x53,
81 	0x0a, 0x07, 0x3c, 0x5c, 0x40, 0x5c, 0x42, 0x5e, 0x48, 0x5e, 0x4a, 0x5c,
82 	0x46, 0x5a, 0x45, 0x4b, 0x06, 0x09, 0x9a, 0xf6, 0x03, 0x42, 0x2e, 0x24,
83 	0x48, 0x4e, 0x3c, 0x5a, 0x5a, 0x3c, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50,
84 	0x34, 0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49,
85 	0x30, 0x18, 0x0a, 0x07, 0x01, 0x06, 0x00, 0x0a, 0x00, 0x01, 0x00, 0x10,
86 	0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x01, 0x01, 0x01, 0x00, 0x0a, 0x02,
87 	0x01, 0x02, 0x00, 0x0a, 0x03, 0x01, 0x03, 0x00, 0x0a, 0x04, 0x01, 0x04,
88 	0x10, 0x01, 0x17, 0x85, 0x20, 0x04, 0x0a, 0x06, 0x01, 0x05, 0x30, 0x24,
89 	0xb3, 0x99, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x05, 0x01, 0x05, 0x30,
90 	0x20, 0xb2, 0xe6, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x09, 0x01, 0x0b,
91 	0x02, 0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0,
92 	0x21, 0x48, 0xed, 0x4d, 0xc8, 0x5a, 0x02, 0x0a, 0x09, 0x01, 0x0b, 0x02,
93 	0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21,
94 	0x48, 0x4c, 0xd4, 0xc7, 0x9c, 0x11, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e,
95 	0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x47,
96 	0x5c, 0xe7, 0xc6, 0x2c, 0x1a, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e, 0x9b,
97 	0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x46, 0x1b,
98 	0xf5, 0xc4, 0x28, 0x4e, 0x0a, 0x08, 0x01, 0x0c, 0x12, 0x3e, 0xc0, 0x21,
99 	0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45, 0xb6, 0x34,
100 	0xc4, 0x22, 0x1f, 0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x0a, 0x01, 0x07,
101 	0x02, 0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0,
102 	0x21, 0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0b, 0x01, 0x08, 0x02,
103 	0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21,
104 	0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0c, 0x01, 0x09, 0x02, 0x3e,
105 	0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45,
106 	0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98,
107 	0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0xf2,
108 	0x4e, 0xc7, 0xee, 0x3f, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01,
109 	0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e,
110 	0x7b, 0x5e, 0x48, 0xf2, 0x4e, 0xc7, 0xee, 0x3f, 0x0a, 0x08, 0x01, 0x0a,
111 	0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b,
112 	0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x01, 0x17, 0x84, 0x22, 0x04,
113 	0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35,
114 	0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x0a,
115 	0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9,
116 	0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4, 0xa6, 0x5a, 0x01, 0x17,
117 	0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5,
118 	0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4,
119 	0xa6, 0x5a, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6,
120 	0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46, 0x2c, 0x90, 0xb8, 0xd1,
121 	0xff, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e,
122 	0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46,
123 	0x2c, 0x90, 0xb8, 0xd1, 0xff
124 };
125 
126 
127 // parameters for the DMA resource
128 static const uint32 kDMAResourceBufferCount			= 16;
129 static const uint32 kDMAResourceBounceBufferCount	= 16;
130 
131 static const char* const kDriverModuleName
132 	= "drivers/disk/virtual/ram_disk/driver_v1";
133 static const char* const kControlDeviceModuleName
134 	= "drivers/disk/virtual/ram_disk/control/device_v1";
135 static const char* const kRawDeviceModuleName
136 	= "drivers/disk/virtual/ram_disk/raw/device_v1";
137 
138 static const char* const kControlDeviceName = RAM_DISK_CONTROL_DEVICE_NAME;
139 static const char* const kRawDeviceBaseName = RAM_DISK_RAW_DEVICE_BASE_NAME;
140 
141 static const char* const kFilePathItem = "ram_disk/file_path";
142 static const char* const kDeviceSizeItem = "ram_disk/device_size";
143 static const char* const kDeviceIDItem = "ram_disk/id";
144 
145 
146 struct RawDevice;
147 typedef DoublyLinkedList<RawDevice> RawDeviceList;
148 
149 struct device_manager_info* sDeviceManager;
150 
151 static RawDeviceList sDeviceList;
152 static mutex sDeviceListLock = MUTEX_INITIALIZER("ram disk device list");
153 static uint64 sUsedRawDeviceIDs = 0;
154 
155 
156 static int32	allocate_raw_device_id();
157 static void		free_raw_device_id(int32 id);
158 
159 
160 struct Device {
161 	Device(device_node* node)
162 		:
163 		fNode(node)
164 	{
165 		mutex_init(&fLock, "ram disk device");
166 	}
167 
168 	virtual ~Device()
169 	{
170 		mutex_destroy(&fLock);
171 	}
172 
173 	bool Lock()		{ mutex_lock(&fLock); return true; }
174 	void Unlock()	{ mutex_unlock(&fLock); }
175 
176 	device_node* Node() const	{ return fNode; }
177 
178 	virtual status_t PublishDevice() = 0;
179 
180 protected:
181 	mutex			fLock;
182 	device_node*	fNode;
183 };
184 
185 
186 struct ControlDevice : Device {
187 	ControlDevice(device_node* node)
188 		:
189 		Device(node)
190 	{
191 	}
192 
193 	status_t Register(const char* filePath, uint64 deviceSize, int32& _id)
194 	{
195 		int32 id = allocate_raw_device_id();
196 		if (id < 0)
197 			return B_BUSY;
198 
199 		device_attr attrs[] = {
200 			{B_DEVICE_PRETTY_NAME, B_STRING_TYPE,
201 				{.string = "RAM Disk Raw Device"}},
202 			{kDeviceSizeItem, B_UINT64_TYPE, {.ui64 = deviceSize}},
203 			{kDeviceIDItem, B_UINT32_TYPE, {.ui32 = (uint32)id}},
204 			{kFilePathItem, B_STRING_TYPE, {.string = filePath}},
205 			{NULL}
206 		};
207 
208 		// If filePath is NULL, remove the attribute.
209 		if (filePath == NULL) {
210 			size_t count = sizeof(attrs) / sizeof(attrs[0]);
211 			memset(attrs + count - 2, 0, sizeof(attrs[0]));
212 		}
213 
214 		status_t error = sDeviceManager->register_node(
215 			sDeviceManager->get_parent_node(Node()), kDriverModuleName, attrs,
216 			NULL, NULL);
217 		if (error != B_OK) {
218 			free_raw_device_id(id);
219 			return error;
220 		}
221 
222 		_id = id;
223 		return B_OK;
224 	}
225 
226 	virtual status_t PublishDevice()
227 	{
228 		return sDeviceManager->publish_device(Node(), kControlDeviceName,
229 			kControlDeviceModuleName);
230 	}
231 };
232 
233 
234 struct RawDevice : Device, DoublyLinkedListLinkImpl<RawDevice> {
235 	RawDevice(device_node* node)
236 		:
237 		Device(node),
238 		fID(-1),
239 		fUnregistered(false),
240 		fDeviceSize(0),
241 		fDeviceName(NULL),
242 		fFilePath(NULL),
243 		fCache(NULL),
244 		fDMAResource(NULL),
245 		fIOScheduler(NULL)
246 	{
247 	}
248 
249 	virtual ~RawDevice()
250 	{
251 		if (fID >= 0) {
252 			MutexLocker locker(sDeviceListLock);
253 			sDeviceList.Remove(this);
254 		}
255 
256 		free(fDeviceName);
257 		free(fFilePath);
258 	}
259 
260 	int32 ID() const				{ return fID; }
261 	off_t DeviceSize() const		{ return fDeviceSize; }
262 	const char* DeviceName() const	{ return fDeviceName; }
263 
264 	bool IsUnregistered() const		{ return fUnregistered; }
265 
266 	void SetUnregistered(bool unregistered)
267 	{
268 		fUnregistered = unregistered;
269 	}
270 
271 	status_t Init(int32 id, const char* filePath, uint64 deviceSize)
272 	{
273 		fID = id;
274 		fFilePath = filePath != NULL ? strdup(filePath) : NULL;
275 		if (filePath != NULL && fFilePath == NULL)
276 			return B_NO_MEMORY;
277 
278 		fDeviceSize = (deviceSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE
279 			* B_PAGE_SIZE;
280 
281 		if (fDeviceSize < B_PAGE_SIZE
282 			|| (uint64)fDeviceSize / B_PAGE_SIZE
283 				> vm_page_num_pages() * 2 / 3) {
284 			return B_BAD_VALUE;
285 		}
286 
287 		// construct our device path
288 		KPath path(kRawDeviceBaseName);
289 		char buffer[32];
290 		snprintf(buffer, sizeof(buffer), "%" B_PRId32 "/raw", fID);
291 
292 		status_t error = path.Append(buffer);
293 		if (error != B_OK)
294 			return error;
295 
296 		fDeviceName = path.DetachBuffer();
297 
298 		// insert into device list
299 		RawDevice* nextDevice = NULL;
300 		MutexLocker locker(sDeviceListLock);
301 		for (RawDeviceList::Iterator it = sDeviceList.GetIterator();
302 				(nextDevice = it.Next()) != NULL;) {
303 			if (nextDevice->ID() > fID)
304 				break;
305 		}
306 
307 		sDeviceList.InsertBefore(nextDevice, this);
308 
309 		return B_OK;
310 	}
311 
312 	status_t Prepare()
313 	{
314 		status_t error = VMCacheFactory::CreateAnonymousCache(fCache, false, 0,
315 			0, false, VM_PRIORITY_SYSTEM);
316 		if (error != B_OK) {
317 			Unprepare();
318 			return error;
319 		}
320 
321 		fCache->temporary = 1;
322 		fCache->virtual_end = fDeviceSize;
323 
324 		error = fCache->Commit(fDeviceSize, VM_PRIORITY_SYSTEM);
325 		if (error != B_OK) {
326 			Unprepare();
327 			return error;
328 		}
329 
330 		if (fFilePath != NULL) {
331 			error = _LoadFile();
332 			if (error != B_OK) {
333 				Unprepare();
334 				return error;
335 			}
336 		}
337 
338 		// no DMA restrictions
339 		const dma_restrictions restrictions = {};
340 
341 		fDMAResource = new(std::nothrow) DMAResource;
342 		if (fDMAResource == NULL) {
343 			Unprepare();
344 			return B_NO_MEMORY;
345 		}
346 
347 		error = fDMAResource->Init(restrictions, B_PAGE_SIZE,
348 			kDMAResourceBufferCount, kDMAResourceBounceBufferCount);
349 		if (error != B_OK) {
350 			Unprepare();
351 			return error;
352 		}
353 
354 		fIOScheduler = new(std::nothrow) IOSchedulerSimple(fDMAResource);
355 		if (fIOScheduler == NULL) {
356 			Unprepare();
357 			return B_NO_MEMORY;
358 		}
359 
360 		error = fIOScheduler->Init("ram disk device scheduler");
361 		if (error != B_OK) {
362 			Unprepare();
363 			return error;
364 		}
365 
366 		fIOScheduler->SetCallback(&_DoIOEntry, this);
367 
368 		return B_OK;
369 	}
370 
371 	void Unprepare()
372 	{
373 		delete fIOScheduler;
374 		fIOScheduler = NULL;
375 
376 		delete fDMAResource;
377 		fDMAResource = NULL;
378 
379 		if (fCache != NULL) {
380 			fCache->Lock();
381 			fCache->ReleaseRefAndUnlock();
382 			fCache = NULL;
383 		}
384 	}
385 
386 	void GetInfo(ram_disk_ioctl_info& _info) const
387 	{
388 		_info.id = fID;
389 		_info.size = fDeviceSize;
390 		memset(&_info.path, 0, sizeof(_info.path));
391 		if (fFilePath != NULL)
392 			strlcpy(_info.path, fFilePath, sizeof(_info.path));
393 	}
394 
395 	status_t Flush()
396 	{
397 		static const size_t kPageCountPerIteration = 1024;
398 		static const size_t kMaxGapSize = 15;
399 
400 		FileDescriptorCloser fd(open(fFilePath, O_WRONLY));
401 		if (!fd.IsSet())
402 			return errno;
403 
404 		vm_page** pages = new(std::nothrow) vm_page*[kPageCountPerIteration];
405 		ArrayDeleter<vm_page*> pagesDeleter(pages);
406 
407 		uint8* buffer = (uint8*)malloc(kPageCountPerIteration * B_PAGE_SIZE);
408 		MemoryDeleter bufferDeleter(buffer);
409 
410 		if (pages == NULL || buffer == NULL)
411 			return B_NO_MEMORY;
412 
413 		// Iterate through all pages of the cache and write those back that have
414 		// been modified.
415 		AutoLocker<VMCache> locker(fCache);
416 
417 		status_t error = B_OK;
418 
419 		for (off_t offset = 0; offset < fDeviceSize;) {
420 			// find the first modified page at or after the current offset
421 			VMCachePagesTree::Iterator it
422 				= fCache->pages.GetIterator(offset / B_PAGE_SIZE, true, true);
423 			vm_page* firstModified;
424 			while ((firstModified = it.Next()) != NULL
425 				&& !firstModified->modified) {
426 			}
427 
428 			if (firstModified == NULL)
429 				break;
430 
431 			if (firstModified->busy) {
432 				fCache->WaitForPageEvents(firstModified, PAGE_EVENT_NOT_BUSY,
433 					true);
434 				continue;
435 			}
436 
437 			pages[0] = firstModified;
438 			page_num_t firstPageIndex = firstModified->cache_offset;
439 			offset = firstPageIndex * B_PAGE_SIZE;
440 
441 			// Collect more pages until the gap between two modified pages gets
442 			// too large or we hit the end of our array.
443 			size_t previousModifiedIndex = 0;
444 			size_t previousIndex = 0;
445 			while (vm_page* page = it.Next()) {
446 				page_num_t index = page->cache_offset - firstPageIndex;
447 				if (page->busy
448 					|| index >= kPageCountPerIteration
449 					|| index - previousModifiedIndex > kMaxGapSize) {
450 					break;
451 				}
452 
453 				pages[index] = page;
454 
455 				// clear page array gap since the previous page
456 				if (previousIndex + 1 < index) {
457 					memset(pages + previousIndex + 1, 0,
458 						(index - previousIndex - 1) * sizeof(vm_page*));
459 				}
460 
461 				previousIndex = index;
462 				if (page->modified)
463 					previousModifiedIndex = index;
464 			}
465 
466 			// mark all pages we want to write busy
467 			size_t pagesToWrite = previousModifiedIndex + 1;
468 			for (size_t i = 0; i < pagesToWrite; i++) {
469 				if (vm_page* page = pages[i]) {
470 					DEBUG_PAGE_ACCESS_START(page);
471 					page->busy = true;
472 				}
473 			}
474 
475 			locker.Unlock();
476 
477 			// copy the pages to our buffer
478 			for (size_t i = 0; i < pagesToWrite; i++) {
479 				if (vm_page* page = pages[i]) {
480 					error = vm_memcpy_from_physical(buffer + i * B_PAGE_SIZE,
481 						page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE,
482 						false);
483 					if (error != B_OK) {
484 						dprintf("ramdisk: error copying page %" B_PRIu64
485 							" data: %s\n", (uint64)page->physical_page_number,
486 							strerror(error));
487 						break;
488 					}
489 				} else
490 					memset(buffer + i * B_PAGE_SIZE, 0, B_PAGE_SIZE);
491 			}
492 
493 			// write the buffer
494 			if (error == B_OK) {
495 				ssize_t bytesWritten = pwrite(fd.Get(), buffer,
496 					pagesToWrite * B_PAGE_SIZE, offset);
497 				if (bytesWritten < 0) {
498 					dprintf("ramdisk: error writing pages to file: %s\n",
499 						strerror(bytesWritten));
500 					error = bytesWritten;
501 				}
502 				else if ((size_t)bytesWritten != pagesToWrite * B_PAGE_SIZE) {
503 					dprintf("ramdisk: error writing pages to file: short "
504 						"write (%zd/%zu)\n", bytesWritten,
505 						pagesToWrite * B_PAGE_SIZE);
506 					error = B_ERROR;
507 				}
508 			}
509 
510 			// mark the pages unbusy, on success also unmodified
511 			locker.Lock();
512 
513 			for (size_t i = 0; i < pagesToWrite; i++) {
514 				if (vm_page* page = pages[i]) {
515 					if (error == B_OK)
516 						page->modified = false;
517 					fCache->MarkPageUnbusy(page);
518 					DEBUG_PAGE_ACCESS_END(page);
519 				}
520 			}
521 
522 			if (error != B_OK)
523 				break;
524 
525 			offset += pagesToWrite * B_PAGE_SIZE;
526 		}
527 
528 		return error;
529 	}
530 
531 	status_t Trim(fs_trim_data* trimData)
532 	{
533 		TRACE("trim_device()\n");
534 
535 		trimData->trimmed_size = 0;
536 
537 		const off_t deviceSize = fDeviceSize; // in bytes
538 		if (deviceSize < 0)
539 			return B_BAD_VALUE;
540 
541 		STATIC_ASSERT(sizeof(deviceSize) <= sizeof(uint64));
542 		ASSERT(deviceSize >= 0);
543 
544 		// Do not trim past device end
545 		for (uint32 i = 0; i < trimData->range_count; i++) {
546 			uint64 offset = trimData->ranges[i].offset;
547 			uint64& size = trimData->ranges[i].size;
548 
549 			if (offset >= (uint64)deviceSize)
550 				return B_BAD_VALUE;
551 			size = min_c(size, (uint64)deviceSize - offset);
552 		}
553 
554 		status_t result = B_OK;
555 		uint64 trimmedSize = 0;
556 		for (uint32 i = 0; i < trimData->range_count; i++) {
557 			uint64 offset = trimData->ranges[i].offset;
558 			uint64 length = trimData->ranges[i].size;
559 
560 			// Round up offset and length to multiple of the page size
561 			// The offset is rounded up, so some space may be left
562 			// (not trimmed) at the start of the range.
563 			offset = (offset + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
564 			// Adjust the length for the possibly skipped range
565 			length -= offset - trimData->ranges[i].offset;
566 			// The length is rounded down, so some space at the end may also
567 			// be left (not trimmed).
568 			length &= ~(B_PAGE_SIZE - 1);
569 
570 			if (length == 0)
571 				continue;
572 
573 			TRACE("ramdisk: trim %" B_PRIu64 " bytes from %" B_PRIu64 "\n",
574 				length, offset);
575 
576 			ASSERT(offset % B_PAGE_SIZE == 0);
577 			ASSERT(length % B_PAGE_SIZE == 0);
578 
579 			BStackOrHeapArray<vm_page*, 16> pages(length / B_PAGE_SIZE);
580 			if (!pages.IsValid()) {
581 				result = B_NO_MEMORY;
582 				break;
583 			}
584 
585 			cache_get_pages(fCache, (off_t)offset, (off_t)length, false, pages);
586 
587 			AutoLocker<VMCache> locker(fCache);
588 			uint64 j;
589 			for (j = 0; j < length / B_PAGE_SIZE; j++) {
590 				// If we run out of pages (some may already be trimmed), stop.
591 				if (pages[j] == NULL)
592 					break;
593 
594 				TRACE("free range %" B_PRIu32 ", page %" B_PRIu64 ", offset %"
595 					B_PRIu64 "\n", i, j, offset);
596 				if (pages[j]->Cache())
597 					fCache->RemovePage(pages[j]);
598 				vm_page_free(NULL, pages[j]);
599 				trimmedSize += B_PAGE_SIZE;
600 			}
601 		}
602 
603 		trimData->trimmed_size = trimmedSize;
604 
605 		return result;
606 	}
607 
608 	status_t DoIO(IORequest* request)
609 	{
610 		return fIOScheduler->ScheduleRequest(request);
611 	}
612 
613 	virtual status_t PublishDevice()
614 	{
615 		return sDeviceManager->publish_device(Node(), fDeviceName,
616 			kRawDeviceModuleName);
617 	}
618 
619 private:
620 	static status_t _DoIOEntry(void* data, IOOperation* operation)
621 	{
622 		return ((RawDevice*)data)->_DoIO(operation);
623 	}
624 
625 	status_t _DoIO(IOOperation* operation)
626 	{
627 		off_t offset = operation->Offset();
628 		generic_size_t length = operation->Length();
629 
630 		ASSERT(offset % B_PAGE_SIZE == 0);
631 		ASSERT(length % B_PAGE_SIZE == 0);
632 
633 		const generic_io_vec* vecs = operation->Vecs();
634 		generic_size_t vecOffset = 0;
635 		bool isWrite = operation->IsWrite();
636 
637 		BStackOrHeapArray<vm_page*, 16> pages(length / B_PAGE_SIZE);
638 		if (!pages.IsValid())
639 			return B_NO_MEMORY;
640 
641 		cache_get_pages(fCache, offset, length, isWrite, pages);
642 
643 		status_t error = B_OK;
644 		size_t index = 0;
645 
646 		while (length > 0) {
647 			vm_page* page = pages[index];
648 
649 			if (isWrite)
650 				page->modified = true;
651 
652 			error = _CopyData(page, vecs, vecOffset, isWrite);
653 			if (error != B_OK)
654 				break;
655 
656 			offset += B_PAGE_SIZE;
657 			length -= B_PAGE_SIZE;
658 			index++;
659 		}
660 
661 		cache_put_pages(fCache, operation->Offset(), operation->Length(), pages,
662 			error == B_OK);
663 
664 		if (error != B_OK) {
665 			fIOScheduler->OperationCompleted(operation, error, 0);
666 			return error;
667 		}
668 
669 		fIOScheduler->OperationCompleted(operation, B_OK, operation->Length());
670 		return B_OK;
671 	}
672 
673 	status_t _CopyData(vm_page* page, const generic_io_vec*& vecs,
674 		generic_size_t& vecOffset, bool toPage)
675 	{
676 		// map page to virtual memory
677 		Thread* thread = thread_get_current_thread();
678 		uint8* pageData = NULL;
679 		void* handle;
680 		if (page != NULL) {
681 			thread_pin_to_current_cpu(thread);
682 			addr_t virtualAddress;
683 			status_t error = vm_get_physical_page_current_cpu(
684 				page->physical_page_number * B_PAGE_SIZE, &virtualAddress,
685 				&handle);
686 			if (error != B_OK) {
687 				thread_unpin_from_current_cpu(thread);
688 				return error;
689 			}
690 
691 			pageData = (uint8*)virtualAddress;
692 		}
693 
694 		status_t error = B_OK;
695 		size_t length = B_PAGE_SIZE;
696 		while (length > 0) {
697 			size_t toCopy = std::min((generic_size_t)length,
698 				vecs->length - vecOffset);
699 
700 			if (toCopy == 0) {
701 				vecs++;
702 				vecOffset = 0;
703 				continue;
704 			}
705 
706 			phys_addr_t vecAddress = vecs->base + vecOffset;
707 
708 			error = toPage
709 				? vm_memcpy_from_physical(pageData, vecAddress, toCopy, false)
710 				: (page != NULL
711 					? vm_memcpy_to_physical(vecAddress, pageData, toCopy, false)
712 					: vm_memset_physical(vecAddress, 0, toCopy));
713 			if (error != B_OK)
714 				break;
715 
716 			pageData += toCopy;
717 			length -= toCopy;
718 			vecOffset += toCopy;
719 		}
720 
721 		if (page != NULL) {
722 			vm_put_physical_page_current_cpu((addr_t)pageData, handle);
723 			thread_unpin_from_current_cpu(thread);
724 		}
725 
726 		return error;
727 	}
728 
729 	status_t _LoadFile()
730 	{
731 		static const size_t kPageCountPerIteration = 1024;
732 
733 		FileDescriptorCloser fd(open(fFilePath, O_RDONLY));
734 		if (!fd.IsSet())
735 			return errno;
736 
737 		ArrayDeleter<vm_page*> pages(
738 			new(std::nothrow) vm_page*[kPageCountPerIteration]);
739 
740 		ArrayDeleter<uint8> buffer(
741 			new(std::nothrow) uint8[kPageCountPerIteration * B_PAGE_SIZE]);
742 			// TODO: Ideally we wouldn't use a buffer to read the file content,
743 			// but read into the pages we allocated directly. Unfortunately
744 			// there's no API to do that yet.
745 
746 		if (!pages.IsSet() || !buffer.IsSet())
747 			return B_NO_MEMORY;
748 
749 		status_t error = B_OK;
750 
751 		page_num_t allocatedPages = 0;
752 		off_t offset = 0;
753 		off_t sizeRemaining = fDeviceSize;
754 		while (sizeRemaining > 0) {
755 			// Note: fDeviceSize is B_PAGE_SIZE aligned.
756 			size_t pagesToRead = std::min(kPageCountPerIteration,
757 				size_t(sizeRemaining / B_PAGE_SIZE));
758 
759 			// allocate the missing pages
760 			if (allocatedPages < pagesToRead) {
761 				vm_page_reservation reservation;
762 				vm_page_reserve_pages(&reservation,
763 					pagesToRead - allocatedPages, VM_PRIORITY_SYSTEM);
764 
765 				while (allocatedPages < pagesToRead) {
766 					pages[allocatedPages++]
767 						= vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
768 				}
769 
770 				vm_page_unreserve_pages(&reservation);
771 			}
772 
773 			// read from the file
774 			size_t bytesToRead = pagesToRead * B_PAGE_SIZE;
775 			ssize_t bytesRead = pread(fd.Get(), buffer.Get(), bytesToRead,
776 				offset);
777 			if (bytesRead < 0) {
778 				error = bytesRead;
779 				break;
780 			}
781 			size_t pagesRead = (bytesRead + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
782 			if (pagesRead < pagesToRead) {
783 				error = B_ERROR;
784 				break;
785 			}
786 
787 			// clear the last read page, if partial
788 			if ((size_t)bytesRead < pagesRead * B_PAGE_SIZE) {
789 				memset(buffer.Get() + bytesRead, 0,
790 					pagesRead * B_PAGE_SIZE - bytesRead);
791 			}
792 
793 			// copy data to allocated pages
794 			for (size_t i = 0; i < pagesRead; i++) {
795 				vm_page* page = pages[i];
796 				error = vm_memcpy_to_physical(
797 					page->physical_page_number * B_PAGE_SIZE,
798 					buffer.Get() + i * B_PAGE_SIZE, B_PAGE_SIZE, false);
799 				if (error != B_OK)
800 					break;
801 			}
802 
803 			if (error != B_OK)
804 				break;
805 
806 			// Add pages to cache. Ignore clear pages, though. Move those to the
807 			// beginning of the array, so we can reuse them in the next
808 			// iteration.
809 			AutoLocker<VMCache> locker(fCache);
810 
811 			size_t clearPages = 0;
812 			for (size_t i = 0; i < pagesRead; i++) {
813 				uint64* pageData = (uint64*)(buffer.Get() + i * B_PAGE_SIZE);
814 				bool isClear = true;
815 				for (size_t k = 0; isClear && k < B_PAGE_SIZE / 8; k++)
816 					isClear = pageData[k] == 0;
817 
818 				if (isClear) {
819 					pages[clearPages++] = pages[i];
820 				} else {
821 					fCache->InsertPage(pages[i], offset + i * B_PAGE_SIZE);
822 					DEBUG_PAGE_ACCESS_END(pages[i]);
823 				}
824 			}
825 
826 			locker.Unlock();
827 
828 			// Move any left-over allocated pages to the end of the empty pages
829 			// and compute the new allocated pages count.
830 			if (pagesRead < allocatedPages) {
831 				size_t count = allocatedPages - pagesRead;
832 				memcpy(pages.Get() + clearPages, pages.Get() + pagesRead,
833 					count * sizeof(vm_page*));
834 				clearPages += count;
835 			}
836 			allocatedPages = clearPages;
837 
838 			offset += pagesRead * B_PAGE_SIZE;
839 			sizeRemaining -= pagesRead * B_PAGE_SIZE;
840 		}
841 
842 		// free left-over allocated pages
843 		for (size_t i = 0; i < allocatedPages; i++)
844 			vm_page_free(NULL, pages[i]);
845 
846 		return error;
847 	}
848 
849 private:
850 	int32			fID;
851 	bool			fUnregistered;
852 	off_t			fDeviceSize;
853 	char*			fDeviceName;
854 	char*			fFilePath;
855 	VMCache*		fCache;
856 	DMAResource*	fDMAResource;
857 	IOScheduler*	fIOScheduler;
858 };
859 
860 
861 struct RawDeviceCookie {
862 	RawDeviceCookie(RawDevice* device, int openMode)
863 		:
864 		fDevice(device),
865 		fOpenMode(openMode)
866 	{
867 	}
868 
869 	RawDevice* Device() const	{ return fDevice; }
870 	int OpenMode() const		{ return fOpenMode; }
871 
872 private:
873 	RawDevice*	fDevice;
874 	int			fOpenMode;
875 };
876 
877 
878 // #pragma mark -
879 
880 
881 static int32
882 allocate_raw_device_id()
883 {
884 	MutexLocker deviceListLocker(sDeviceListLock);
885 	for (size_t i = 0; i < sizeof(sUsedRawDeviceIDs) * 8; i++) {
886 		if ((sUsedRawDeviceIDs & ((uint64)1 << i)) == 0) {
887 			sUsedRawDeviceIDs |= (uint64)1 << i;
888 			return (int32)i;
889 		}
890 	}
891 
892 	return -1;
893 }
894 
895 
896 static void
897 free_raw_device_id(int32 id)
898 {
899 	MutexLocker deviceListLocker(sDeviceListLock);
900 	sUsedRawDeviceIDs &= ~((uint64)1 << id);
901 }
902 
903 
904 static RawDevice*
905 find_raw_device(int32 id)
906 {
907 	for (RawDeviceList::Iterator it = sDeviceList.GetIterator();
908 			RawDevice* device = it.Next();) {
909 		if (device->ID() == id)
910 			return device;
911 	}
912 
913 	return NULL;
914 }
915 
916 
917 static status_t
918 ioctl_register(ControlDevice* controlDevice, ram_disk_ioctl_register* request)
919 {
920 	KPath path;
921 	uint64 deviceSize = 0;
922 
923 	if (request->path[0] != '\0') {
924 		// check if the path is null-terminated
925 		if (strnlen(request->path, sizeof(request->path))
926 				== sizeof(request->path)) {
927 			return B_BAD_VALUE;
928 		}
929 
930 		// get a normalized file path
931 		status_t error = path.SetTo(request->path, true);
932 		if (error != B_OK) {
933 			dprintf("ramdisk: register: Invalid path \"%s\": %s\n",
934 				request->path, strerror(error));
935 			return B_BAD_VALUE;
936 		}
937 
938 		struct stat st;
939 		if (lstat(path.Path(), &st) != 0) {
940 			dprintf("ramdisk: register: Failed to stat \"%s\": %s\n",
941 				path.Path(), strerror(errno));
942 			return errno;
943 		}
944 
945 		if (!S_ISREG(st.st_mode)) {
946 			dprintf("ramdisk: register: \"%s\" is not a file!\n", path.Path());
947 			return B_BAD_VALUE;
948 		}
949 
950 		deviceSize = st.st_size;
951 	} else {
952 		deviceSize = request->size;
953 	}
954 
955 	return controlDevice->Register(path.Length() > 0 ? path.Path() : NULL,
956 		deviceSize, request->id);
957 }
958 
959 
960 static status_t
961 ioctl_unregister(ControlDevice* controlDevice,
962 	ram_disk_ioctl_unregister* request)
963 {
964 	// find the device in the list and unregister it
965 	MutexLocker locker(sDeviceListLock);
966 	RawDevice* device = find_raw_device(request->id);
967 	if (device == NULL)
968 		return B_ENTRY_NOT_FOUND;
969 
970 	// mark unregistered before we unlock
971 	if (device->IsUnregistered())
972 		return B_BUSY;
973 	device->SetUnregistered(true);
974 	locker.Unlock();
975 
976 	device_node* node = device->Node();
977 	status_t error = sDeviceManager->unpublish_device(node,
978 		device->DeviceName());
979 	if (error != B_OK) {
980 		dprintf("ramdisk: unregister: Failed to unpublish device \"%s\": %s\n",
981 			device->DeviceName(), strerror(error));
982 		return error;
983 	}
984 
985 	error = sDeviceManager->unregister_node(node);
986 	// Note: B_BUSY is OK. The node will removed as soon as possible.
987 	if (error != B_OK && error != B_BUSY) {
988 		dprintf("ramdisk: unregister: Failed to unregister node for device %"
989 			B_PRId32 ": %s\n", request->id, strerror(error));
990 		return error;
991 	}
992 
993 	return B_OK;
994 }
995 
996 
997 static status_t
998 ioctl_info(RawDevice* device, ram_disk_ioctl_info* request)
999 {
1000 	device->GetInfo(*request);
1001 	return B_OK;
1002 }
1003 
1004 
1005 template<typename DeviceType, typename Request>
1006 static status_t
1007 handle_ioctl(DeviceType* device,
1008 	status_t (*handler)(DeviceType*, Request*), void* buffer)
1009 {
1010 	// copy request to the kernel heap
1011 	if (buffer == NULL || !IS_USER_ADDRESS(buffer))
1012 		return B_BAD_ADDRESS;
1013 
1014 	Request* request = new(std::nothrow) Request;
1015 	if (request == NULL)
1016 		return B_NO_MEMORY;
1017 	ObjectDeleter<Request> requestDeleter(request);
1018 
1019 	if (user_memcpy(request, buffer, sizeof(Request)) != B_OK)
1020 		return B_BAD_ADDRESS;
1021 
1022 	// handle the ioctl
1023 	status_t error = handler(device, request);
1024 	if (error != B_OK)
1025 		return error;
1026 
1027 	// copy the request back to userland
1028 	if (user_memcpy(buffer, request, sizeof(Request)) != B_OK)
1029 		return B_BAD_ADDRESS;
1030 
1031 	return B_OK;
1032 }
1033 
1034 
1035 //	#pragma mark - driver
1036 
1037 
1038 static float
1039 ram_disk_driver_supports_device(device_node* parent)
1040 {
1041 	const char* bus = NULL;
1042 	if (sDeviceManager->get_attr_string(parent, B_DEVICE_BUS, &bus, false)
1043 			== B_OK
1044 		&& strcmp(bus, "generic") == 0) {
1045 		return 0.8;
1046 	}
1047 
1048 	return -1;
1049 }
1050 
1051 
1052 static status_t
1053 ram_disk_driver_register_device(device_node* parent)
1054 {
1055 	device_attr attrs[] = {
1056 		{B_DEVICE_PRETTY_NAME, B_STRING_TYPE,
1057 			{.string = "RAM Disk Control Device"}},
1058 		{NULL}
1059 	};
1060 
1061 	return sDeviceManager->register_node(parent, kDriverModuleName, attrs, NULL,
1062 		NULL);
1063 }
1064 
1065 
1066 static status_t
1067 ram_disk_driver_init_driver(device_node* node, void** _driverCookie)
1068 {
1069 	uint64 deviceSize;
1070 	if (sDeviceManager->get_attr_uint64(node, kDeviceSizeItem, &deviceSize,
1071 			false) == B_OK) {
1072 		int32 id = -1;
1073 		sDeviceManager->get_attr_uint32(node, kDeviceIDItem, (uint32*)&id,
1074 			false);
1075 		if (id < 0)
1076 			return B_ERROR;
1077 
1078 		const char* filePath = NULL;
1079 		sDeviceManager->get_attr_string(node, kFilePathItem, &filePath, false);
1080 
1081 		RawDevice* device = new(std::nothrow) RawDevice(node);
1082 		if (device == NULL)
1083 			return B_NO_MEMORY;
1084 
1085 		status_t error = device->Init(id, filePath, deviceSize);
1086 		if (error != B_OK) {
1087 			delete device;
1088 			return error;
1089 		}
1090 
1091 		*_driverCookie = (Device*)device;
1092 	} else {
1093 		ControlDevice* device = new(std::nothrow) ControlDevice(node);
1094 		if (device == NULL)
1095 			return B_NO_MEMORY;
1096 
1097 		*_driverCookie = (Device*)device;
1098 	}
1099 
1100 	return B_OK;
1101 }
1102 
1103 
1104 static void
1105 ram_disk_driver_uninit_driver(void* driverCookie)
1106 {
1107 	Device* device = (Device*)driverCookie;
1108 	if (RawDevice* rawDevice = dynamic_cast<RawDevice*>(device))
1109 		free_raw_device_id(rawDevice->ID());
1110 	delete device;
1111 }
1112 
1113 
1114 static status_t
1115 ram_disk_driver_register_child_devices(void* driverCookie)
1116 {
1117 	Device* device = (Device*)driverCookie;
1118 	return device->PublishDevice();
1119 }
1120 
1121 
1122 //	#pragma mark - control device
1123 
1124 
1125 static status_t
1126 ram_disk_control_device_init_device(void* driverCookie, void** _deviceCookie)
1127 {
1128 	*_deviceCookie = driverCookie;
1129 	return B_OK;
1130 }
1131 
1132 
1133 static void
1134 ram_disk_control_device_uninit_device(void* deviceCookie)
1135 {
1136 }
1137 
1138 
1139 static status_t
1140 ram_disk_control_device_open(void* deviceCookie, const char* path, int openMode,
1141 	void** _cookie)
1142 {
1143 	*_cookie = deviceCookie;
1144 	return B_OK;
1145 }
1146 
1147 
1148 static status_t
1149 ram_disk_control_device_close(void* cookie)
1150 {
1151 	return B_OK;
1152 }
1153 
1154 
1155 static status_t
1156 ram_disk_control_device_free(void* cookie)
1157 {
1158 	return B_OK;
1159 }
1160 
1161 
1162 static status_t
1163 ram_disk_control_device_read(void* cookie, off_t position, void* buffer,
1164 	size_t* _length)
1165 {
1166 	return B_BAD_VALUE;
1167 }
1168 
1169 
1170 static status_t
1171 ram_disk_control_device_write(void* cookie, off_t position, const void* data,
1172 	size_t* _length)
1173 {
1174 	return B_BAD_VALUE;
1175 }
1176 
1177 
1178 static status_t
1179 ram_disk_control_device_control(void* cookie, uint32 op, void* buffer,
1180 	size_t length)
1181 {
1182 	ControlDevice* device = (ControlDevice*)cookie;
1183 
1184 	switch (op) {
1185 		case RAM_DISK_IOCTL_REGISTER:
1186 			return handle_ioctl(device, &ioctl_register, buffer);
1187 
1188 		case RAM_DISK_IOCTL_UNREGISTER:
1189 			return handle_ioctl(device, &ioctl_unregister, buffer);
1190 	}
1191 
1192 	return B_BAD_VALUE;
1193 }
1194 
1195 
1196 //	#pragma mark - raw device
1197 
1198 
1199 static status_t
1200 ram_disk_raw_device_init_device(void* driverCookie, void** _deviceCookie)
1201 {
1202 	RawDevice* device = static_cast<RawDevice*>((Device*)driverCookie);
1203 
1204 	status_t error = device->Prepare();
1205 	if (error != B_OK)
1206 		return error;
1207 
1208 	*_deviceCookie = device;
1209 	return B_OK;
1210 }
1211 
1212 
1213 static void
1214 ram_disk_raw_device_uninit_device(void* deviceCookie)
1215 {
1216 	RawDevice* device = (RawDevice*)deviceCookie;
1217 	device->Unprepare();
1218 }
1219 
1220 
1221 static status_t
1222 ram_disk_raw_device_open(void* deviceCookie, const char* path, int openMode,
1223 	void** _cookie)
1224 {
1225 	RawDevice* device = (RawDevice*)deviceCookie;
1226 
1227 	RawDeviceCookie* cookie = new(std::nothrow) RawDeviceCookie(device,
1228 		openMode);
1229 	if (cookie == NULL)
1230 		return B_NO_MEMORY;
1231 
1232 	*_cookie = cookie;
1233 	return B_OK;
1234 }
1235 
1236 
1237 static status_t
1238 ram_disk_raw_device_close(void* cookie)
1239 {
1240 	return B_OK;
1241 }
1242 
1243 
1244 static status_t
1245 ram_disk_raw_device_free(void* _cookie)
1246 {
1247 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1248 	delete cookie;
1249 	return B_OK;
1250 }
1251 
1252 
1253 static status_t
1254 ram_disk_raw_device_read(void* _cookie, off_t pos, void* buffer,
1255 	size_t* _length)
1256 {
1257 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1258 	RawDevice* device = cookie->Device();
1259 
1260 	size_t length = *_length;
1261 
1262 	if (pos >= device->DeviceSize())
1263 		return B_BAD_VALUE;
1264 	if (pos + (off_t)length > device->DeviceSize())
1265 		length = device->DeviceSize() - pos;
1266 
1267 	IORequest request;
1268 	status_t status = request.Init(pos, (addr_t)buffer, length, false, 0);
1269 	if (status != B_OK)
1270 		return status;
1271 
1272 	status = device->DoIO(&request);
1273 	if (status != B_OK)
1274 		return status;
1275 
1276 	status = request.Wait(0, 0);
1277 	if (status == B_OK)
1278 		*_length = length;
1279 	return status;
1280 }
1281 
1282 
1283 static status_t
1284 ram_disk_raw_device_write(void* _cookie, off_t pos, const void* buffer,
1285 	size_t* _length)
1286 {
1287 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1288 	RawDevice* device = cookie->Device();
1289 
1290 	size_t length = *_length;
1291 
1292 	if (pos >= device->DeviceSize())
1293 		return B_BAD_VALUE;
1294 	if (pos + (off_t)length > device->DeviceSize())
1295 		length = device->DeviceSize() - pos;
1296 
1297 	IORequest request;
1298 	status_t status = request.Init(pos, (addr_t)buffer, length, true, 0);
1299 	if (status != B_OK)
1300 		return status;
1301 
1302 	status = device->DoIO(&request);
1303 	if (status != B_OK)
1304 		return status;
1305 
1306 	status = request.Wait(0, 0);
1307 	if (status == B_OK)
1308 		*_length = length;
1309 
1310 	return status;
1311 }
1312 
1313 
1314 static status_t
1315 ram_disk_raw_device_io(void* _cookie, io_request* request)
1316 {
1317 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1318 	RawDevice* device = cookie->Device();
1319 
1320 	return device->DoIO(request);
1321 }
1322 
1323 
1324 static status_t
1325 ram_disk_raw_device_control(void* _cookie, uint32 op, void* buffer,
1326 	size_t length)
1327 {
1328 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1329 	RawDevice* device = cookie->Device();
1330 
1331 	switch (op) {
1332 		case B_GET_DEVICE_SIZE:
1333 		{
1334 			size_t size = device->DeviceSize();
1335 			return user_memcpy(buffer, &size, sizeof(size_t));
1336 		}
1337 
1338 		case B_SET_NONBLOCKING_IO:
1339 		case B_SET_BLOCKING_IO:
1340 			return B_OK;
1341 
1342 		case B_GET_READ_STATUS:
1343 		case B_GET_WRITE_STATUS:
1344 		{
1345 			bool value = true;
1346 			return user_memcpy(buffer, &value, sizeof(bool));
1347 		}
1348 
1349 		case B_GET_GEOMETRY:
1350 		case B_GET_BIOS_GEOMETRY:
1351 		{
1352 			if (buffer == NULL || length > sizeof(device_geometry))
1353 				return B_BAD_VALUE;
1354 
1355 			device_geometry geometry;
1356 			geometry.bytes_per_sector = B_PAGE_SIZE;
1357 			geometry.sectors_per_track = 1;
1358 			geometry.cylinder_count = device->DeviceSize() / B_PAGE_SIZE;
1359 				// TODO: We're limited to 2^32 * B_PAGE_SIZE, if we don't use
1360 				// sectors_per_track and head_count.
1361 			geometry.head_count = 1;
1362 			geometry.device_type = B_DISK;
1363 			geometry.removable = true;
1364 			geometry.read_only = false;
1365 			geometry.write_once = false;
1366 			geometry.bytes_per_physical_sector = B_PAGE_SIZE;
1367 
1368 			return user_memcpy(buffer, &geometry, length);
1369 		}
1370 
1371 		case B_GET_MEDIA_STATUS:
1372 		{
1373 			status_t status = B_OK;
1374 			return user_memcpy(buffer, &status, sizeof(status_t));
1375 		}
1376 
1377 		case B_GET_ICON_NAME:
1378 			return user_strlcpy((char*)buffer, "devices/drive-ramdisk",
1379 				B_FILE_NAME_LENGTH);
1380 
1381 		case B_GET_VECTOR_ICON:
1382 		{
1383 			device_icon iconData;
1384 			if (length != sizeof(device_icon))
1385 				return B_BAD_VALUE;
1386 			if (user_memcpy(&iconData, buffer, sizeof(device_icon)) != B_OK)
1387 				return B_BAD_ADDRESS;
1388 
1389 			if (iconData.icon_size >= (int32)sizeof(kRamdiskIcon)) {
1390 				if (user_memcpy(iconData.icon_data, kRamdiskIcon,
1391 						sizeof(kRamdiskIcon)) != B_OK)
1392 					return B_BAD_ADDRESS;
1393 			}
1394 
1395 			iconData.icon_size = sizeof(kRamdiskIcon);
1396 			return user_memcpy(buffer, &iconData, sizeof(device_icon));
1397 		}
1398 
1399 		case B_SET_UNINTERRUPTABLE_IO:
1400 		case B_SET_INTERRUPTABLE_IO:
1401 		case B_FLUSH_DRIVE_CACHE:
1402 			return B_OK;
1403 
1404 		case RAM_DISK_IOCTL_FLUSH:
1405 		{
1406 			status_t error = device->Flush();
1407 			if (error != B_OK) {
1408 				dprintf("ramdisk: flush: Failed to flush device: %s\n",
1409 					strerror(error));
1410 				return error;
1411 			}
1412 
1413 			return B_OK;
1414 		}
1415 
1416 		case B_TRIM_DEVICE:
1417 		{
1418 			// We know the buffer is kernel-side because it has been
1419 			// preprocessed in devfs
1420 			ASSERT(IS_KERNEL_ADDRESS(buffer));
1421 			return device->Trim((fs_trim_data*)buffer);
1422 		}
1423 
1424 		case RAM_DISK_IOCTL_INFO:
1425 			return handle_ioctl(device, &ioctl_info, buffer);
1426 	}
1427 
1428 	return B_BAD_VALUE;
1429 }
1430 
1431 
1432 // #pragma mark -
1433 
1434 
1435 module_dependency module_dependencies[] = {
1436 	{B_DEVICE_MANAGER_MODULE_NAME, (module_info**)&sDeviceManager},
1437 	{}
1438 };
1439 
1440 
1441 static const struct driver_module_info sChecksumDeviceDriverModule = {
1442 	{
1443 		kDriverModuleName,
1444 		0,
1445 		NULL
1446 	},
1447 
1448 	ram_disk_driver_supports_device,
1449 	ram_disk_driver_register_device,
1450 	ram_disk_driver_init_driver,
1451 	ram_disk_driver_uninit_driver,
1452 	ram_disk_driver_register_child_devices
1453 };
1454 
1455 static const struct device_module_info sChecksumControlDeviceModule = {
1456 	{
1457 		kControlDeviceModuleName,
1458 		0,
1459 		NULL
1460 	},
1461 
1462 	ram_disk_control_device_init_device,
1463 	ram_disk_control_device_uninit_device,
1464 	NULL,
1465 
1466 	ram_disk_control_device_open,
1467 	ram_disk_control_device_close,
1468 	ram_disk_control_device_free,
1469 
1470 	ram_disk_control_device_read,
1471 	ram_disk_control_device_write,
1472 	NULL,	// io
1473 
1474 	ram_disk_control_device_control,
1475 
1476 	NULL,	// select
1477 	NULL	// deselect
1478 };
1479 
1480 static const struct device_module_info sChecksumRawDeviceModule = {
1481 	{
1482 		kRawDeviceModuleName,
1483 		0,
1484 		NULL
1485 	},
1486 
1487 	ram_disk_raw_device_init_device,
1488 	ram_disk_raw_device_uninit_device,
1489 	NULL,
1490 
1491 	ram_disk_raw_device_open,
1492 	ram_disk_raw_device_close,
1493 	ram_disk_raw_device_free,
1494 
1495 	ram_disk_raw_device_read,
1496 	ram_disk_raw_device_write,
1497 	ram_disk_raw_device_io,
1498 
1499 	ram_disk_raw_device_control,
1500 
1501 	NULL,	// select
1502 	NULL	// deselect
1503 };
1504 
1505 const module_info* modules[] = {
1506 	(module_info*)&sChecksumDeviceDriverModule,
1507 	(module_info*)&sChecksumControlDeviceModule,
1508 	(module_info*)&sChecksumRawDeviceModule,
1509 	NULL
1510 };
1511