xref: /haiku/src/add-ons/kernel/drivers/disk/virtual/ram_disk/ram_disk.cpp (revision be9a70562e3c6552efb0caa53bd26965e7e1bed7)
1 /*
2  * Copyright 2010-2013, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <file_systems/ram_disk/ram_disk.h>
8 
9 #include <ctype.h>
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <unistd.h>
15 
16 #include <algorithm>
17 
18 #include <device_manager.h>
19 #include <Drivers.h>
20 
21 #include <AutoDeleter.h>
22 #include <util/AutoLock.h>
23 #include <util/DoublyLinkedList.h>
24 
25 #include <fs/KPath.h>
26 #include <lock.h>
27 #include <util/fs_trim_support.h>
28 #include <vm/vm.h>
29 #include <vm/VMCache.h>
30 #include <vm/vm_page.h>
31 
32 #include "dma_resources.h"
33 #include "io_requests.h"
34 #include "IOSchedulerSimple.h"
35 
36 
37 //#define TRACE_RAM_DISK
38 #ifdef TRACE_RAM_DISK
39 #	define TRACE(x...)	dprintf(x)
40 #else
41 #	define TRACE(x...) do {} while (false)
42 #endif
43 
44 
45 static const unsigned char kRamdiskIcon[] = {
46 	0x6e, 0x63, 0x69, 0x66, 0x0e, 0x03, 0x01, 0x00, 0x00, 0x02, 0x00, 0x16,
47 	0x02, 0x3c, 0xc7, 0xee, 0x38, 0x9b, 0xc0, 0xba, 0x16, 0x57, 0x3e, 0x39,
48 	0xb0, 0x49, 0x77, 0xc8, 0x42, 0xad, 0xc7, 0x00, 0xff, 0xff, 0xd3, 0x02,
49 	0x00, 0x06, 0x02, 0x3c, 0x96, 0x32, 0x3a, 0x4d, 0x3f, 0xba, 0xfc, 0x01,
50 	0x3d, 0x5a, 0x97, 0x4b, 0x57, 0xa5, 0x49, 0x84, 0x4d, 0x00, 0x47, 0x47,
51 	0x47, 0xff, 0xa5, 0xa0, 0xa0, 0x02, 0x00, 0x16, 0x02, 0xbc, 0x59, 0x2f,
52 	0xbb, 0x29, 0xa7, 0x3c, 0x0c, 0xe4, 0xbd, 0x0b, 0x7c, 0x48, 0x92, 0xc0,
53 	0x4b, 0x79, 0x66, 0x00, 0x7d, 0xff, 0xd4, 0x02, 0x00, 0x06, 0x02, 0x38,
54 	0xdb, 0xb4, 0x39, 0x97, 0x33, 0xbc, 0x4a, 0x33, 0x3b, 0xa5, 0x42, 0x48,
55 	0x6e, 0x66, 0x49, 0xee, 0x7b, 0x00, 0x59, 0x67, 0x56, 0xff, 0xeb, 0xb2,
56 	0xb2, 0x03, 0xa7, 0xff, 0x00, 0x03, 0xff, 0x00, 0x00, 0x04, 0x01, 0x80,
57 	0x03, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x6a, 0x05, 0x33, 0x02,
58 	0x00, 0x06, 0x02, 0x3a, 0x5d, 0x2c, 0x39, 0xf8, 0xb1, 0xb9, 0xdb, 0xf1,
59 	0x3a, 0x4c, 0x0f, 0x48, 0xae, 0xea, 0x4a, 0xc0, 0x91, 0x00, 0x74, 0x74,
60 	0x74, 0xff, 0x3e, 0x3d, 0x3d, 0x02, 0x00, 0x16, 0x02, 0x38, 0x22, 0x1b,
61 	0x3b, 0x11, 0x73, 0xbc, 0x5e, 0xb5, 0x39, 0x4b, 0xaa, 0x4a, 0x47, 0xf1,
62 	0x49, 0xc2, 0x1d, 0x00, 0xb0, 0xff, 0x83, 0x02, 0x00, 0x16, 0x03, 0x36,
63 	0xed, 0xe9, 0x36, 0xb9, 0x49, 0xba, 0x0a, 0xf6, 0x3a, 0x32, 0x6f, 0x4a,
64 	0x79, 0xef, 0x4b, 0x03, 0xe7, 0x00, 0x5a, 0x38, 0xdc, 0xff, 0x7e, 0x0d,
65 	0x0a, 0x06, 0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31,
66 	0x39, 0x25, 0x0a, 0x04, 0x22, 0x3c, 0x44, 0x4b, 0x5a, 0x31, 0x39, 0x25,
67 	0x0a, 0x04, 0x44, 0x4b, 0x44, 0x5b, 0x5a, 0x3e, 0x5a, 0x31, 0x0a, 0x04,
68 	0x22, 0x3c, 0x22, 0x49, 0x44, 0x5b, 0x44, 0x4b, 0x08, 0x02, 0x27, 0x43,
69 	0xb8, 0x14, 0xc1, 0xf1, 0x08, 0x02, 0x26, 0x43, 0x29, 0x44, 0x0a, 0x05,
70 	0x44, 0x5d, 0x49, 0x5d, 0x60, 0x3e, 0x5a, 0x3b, 0x5b, 0x3f, 0x0a, 0x04,
71 	0x3c, 0x5a, 0x5a, 0x3c, 0x5a, 0x36, 0x3c, 0x52, 0x0a, 0x04, 0x24, 0x4e,
72 	0x3c, 0x5a, 0x3c, 0x52, 0x24, 0x48, 0x06, 0x07, 0xaa, 0x3f, 0x42, 0x2e,
73 	0x24, 0x48, 0x3c, 0x52, 0x5a, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50, 0x34,
74 	0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49, 0x30,
75 	0x06, 0x08, 0xfa, 0xfa, 0x42, 0x50, 0x3e, 0x54, 0x40, 0x55, 0x3f, 0xc7,
76 	0xeb, 0x41, 0xc8, 0x51, 0x42, 0xc9, 0x4f, 0x42, 0xc8, 0xda, 0x42, 0xca,
77 	0x41, 0xc0, 0xf1, 0x5d, 0x45, 0xca, 0x81, 0x46, 0xc7, 0xb7, 0x46, 0xc8,
78 	0xa9, 0x46, 0xc7, 0x42, 0x44, 0x51, 0x45, 0xc6, 0xb9, 0x43, 0xc6, 0x53,
79 	0x0a, 0x07, 0x3c, 0x5c, 0x40, 0x5c, 0x42, 0x5e, 0x48, 0x5e, 0x4a, 0x5c,
80 	0x46, 0x5a, 0x45, 0x4b, 0x06, 0x09, 0x9a, 0xf6, 0x03, 0x42, 0x2e, 0x24,
81 	0x48, 0x4e, 0x3c, 0x5a, 0x5a, 0x3c, 0x36, 0x51, 0x33, 0x51, 0x33, 0x50,
82 	0x34, 0x4b, 0x33, 0x4d, 0x34, 0x49, 0x32, 0x49, 0x30, 0x48, 0x31, 0x49,
83 	0x30, 0x18, 0x0a, 0x07, 0x01, 0x06, 0x00, 0x0a, 0x00, 0x01, 0x00, 0x10,
84 	0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x01, 0x01, 0x01, 0x00, 0x0a, 0x02,
85 	0x01, 0x02, 0x00, 0x0a, 0x03, 0x01, 0x03, 0x00, 0x0a, 0x04, 0x01, 0x04,
86 	0x10, 0x01, 0x17, 0x85, 0x20, 0x04, 0x0a, 0x06, 0x01, 0x05, 0x30, 0x24,
87 	0xb3, 0x99, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x05, 0x01, 0x05, 0x30,
88 	0x20, 0xb2, 0xe6, 0x01, 0x17, 0x82, 0x00, 0x04, 0x0a, 0x09, 0x01, 0x0b,
89 	0x02, 0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0,
90 	0x21, 0x48, 0xed, 0x4d, 0xc8, 0x5a, 0x02, 0x0a, 0x09, 0x01, 0x0b, 0x02,
91 	0x3e, 0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21,
92 	0x48, 0x4c, 0xd4, 0xc7, 0x9c, 0x11, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e,
93 	0x9b, 0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x47,
94 	0x5c, 0xe7, 0xc6, 0x2c, 0x1a, 0x0a, 0x09, 0x01, 0x0b, 0x02, 0x3e, 0x9b,
95 	0x12, 0xb5, 0xf9, 0x99, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x46, 0x1b,
96 	0xf5, 0xc4, 0x28, 0x4e, 0x0a, 0x08, 0x01, 0x0c, 0x12, 0x3e, 0xc0, 0x21,
97 	0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45, 0xb6, 0x34,
98 	0xc4, 0x22, 0x1f, 0x01, 0x17, 0x84, 0x00, 0x04, 0x0a, 0x0a, 0x01, 0x07,
99 	0x02, 0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0,
100 	0x21, 0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0b, 0x01, 0x08, 0x02,
101 	0x3e, 0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21,
102 	0x45, 0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x0c, 0x01, 0x09, 0x02, 0x3e,
103 	0xc0, 0x21, 0xb6, 0x19, 0x10, 0x36, 0x19, 0x10, 0x3e, 0xc0, 0x21, 0x45,
104 	0xb6, 0x34, 0xc4, 0x22, 0x1f, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98,
105 	0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0xf2,
106 	0x4e, 0xc7, 0xee, 0x3f, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01,
107 	0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e,
108 	0x7b, 0x5e, 0x48, 0xf2, 0x4e, 0xc7, 0xee, 0x3f, 0x0a, 0x08, 0x01, 0x0a,
109 	0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b,
110 	0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x01, 0x17, 0x84, 0x22, 0x04,
111 	0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35,
112 	0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x48, 0x53, 0xa1, 0xc6, 0xa0, 0xb6, 0x0a,
113 	0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9,
114 	0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4, 0xa6, 0x5a, 0x01, 0x17,
115 	0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e, 0x98, 0xfd, 0xb5,
116 	0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x47, 0x69, 0xe9, 0xc4,
117 	0xa6, 0x5a, 0x0a, 0x08, 0x01, 0x0a, 0x12, 0x3e, 0x98, 0xfd, 0xb5, 0xf6,
118 	0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46, 0x2c, 0x90, 0xb8, 0xd1,
119 	0xff, 0x01, 0x17, 0x84, 0x22, 0x04, 0x0a, 0x0d, 0x01, 0x0a, 0x02, 0x3e,
120 	0x98, 0xfd, 0xb5, 0xf6, 0x6c, 0x35, 0xc9, 0x3d, 0x3e, 0x7b, 0x5e, 0x46,
121 	0x2c, 0x90, 0xb8, 0xd1, 0xff
122 };
123 
124 
125 // parameters for the DMA resource
126 static const uint32 kDMAResourceBufferCount			= 16;
127 static const uint32 kDMAResourceBounceBufferCount	= 16;
128 
129 static const char* const kDriverModuleName
130 	= "drivers/disk/virtual/ram_disk/driver_v1";
131 static const char* const kControlDeviceModuleName
132 	= "drivers/disk/virtual/ram_disk/control/device_v1";
133 static const char* const kRawDeviceModuleName
134 	= "drivers/disk/virtual/ram_disk/raw/device_v1";
135 
136 static const char* const kControlDeviceName = RAM_DISK_CONTROL_DEVICE_NAME;
137 static const char* const kRawDeviceBaseName = RAM_DISK_RAW_DEVICE_BASE_NAME;
138 
139 static const char* const kFilePathItem = "ram_disk/file_path";
140 static const char* const kDeviceSizeItem = "ram_disk/device_size";
141 static const char* const kDeviceIDItem = "ram_disk/id";
142 
143 
144 struct RawDevice;
145 typedef DoublyLinkedList<RawDevice> RawDeviceList;
146 
147 struct device_manager_info* sDeviceManager;
148 
149 static RawDeviceList sDeviceList;
150 static mutex sDeviceListLock = MUTEX_INITIALIZER("ram disk device list");
151 static uint64 sUsedRawDeviceIDs = 0;
152 
153 
154 static int32	allocate_raw_device_id();
155 static void		free_raw_device_id(int32 id);
156 
157 
158 struct Device {
159 	Device(device_node* node)
160 		:
161 		fNode(node)
162 	{
163 		mutex_init(&fLock, "ram disk device");
164 	}
165 
166 	virtual ~Device()
167 	{
168 		mutex_destroy(&fLock);
169 	}
170 
171 	bool Lock()		{ mutex_lock(&fLock); return true; }
172 	void Unlock()	{ mutex_unlock(&fLock); }
173 
174 	device_node* Node() const	{ return fNode; }
175 
176 	virtual status_t PublishDevice() = 0;
177 
178 protected:
179 	mutex			fLock;
180 	device_node*	fNode;
181 };
182 
183 
184 struct ControlDevice : Device {
185 	ControlDevice(device_node* node)
186 		:
187 		Device(node)
188 	{
189 	}
190 
191 	status_t Register(const char* filePath, uint64 deviceSize, int32& _id)
192 	{
193 		int32 id = allocate_raw_device_id();
194 		if (id < 0)
195 			return B_BUSY;
196 
197 		device_attr attrs[] = {
198 			{B_DEVICE_PRETTY_NAME, B_STRING_TYPE,
199 				{string: "RAM Disk Raw Device"}},
200 			{kDeviceSizeItem, B_UINT64_TYPE, {ui64: deviceSize}},
201 			{kDeviceIDItem, B_UINT32_TYPE, {ui32: (uint32)id}},
202 			{kFilePathItem, B_STRING_TYPE, {string: filePath}},
203 			{NULL}
204 		};
205 
206 		// If filePath is NULL, remove the attribute.
207 		if (filePath == NULL) {
208 			size_t count = sizeof(attrs) / sizeof(attrs[0]);
209 			memset(attrs + count - 2, 0, sizeof(attrs[0]));
210 		}
211 
212 		status_t error = sDeviceManager->register_node(
213 			sDeviceManager->get_parent_node(Node()), kDriverModuleName, attrs,
214 			NULL, NULL);
215 		if (error != B_OK) {
216 			free_raw_device_id(id);
217 			return error;
218 		}
219 
220 		_id = id;
221 		return B_OK;
222 	}
223 
224 	virtual status_t PublishDevice()
225 	{
226 		return sDeviceManager->publish_device(Node(), kControlDeviceName,
227 			kControlDeviceModuleName);
228 	}
229 };
230 
231 
232 struct RawDevice : Device, DoublyLinkedListLinkImpl<RawDevice> {
233 	RawDevice(device_node* node)
234 		:
235 		Device(node),
236 		fID(-1),
237 		fUnregistered(false),
238 		fDeviceSize(0),
239 		fDeviceName(NULL),
240 		fFilePath(NULL),
241 		fCache(NULL),
242 		fDMAResource(NULL),
243 		fIOScheduler(NULL)
244 	{
245 	}
246 
247 	virtual ~RawDevice()
248 	{
249 		if (fID >= 0) {
250 			MutexLocker locker(sDeviceListLock);
251 			sDeviceList.Remove(this);
252 		}
253 
254 		free(fDeviceName);
255 		free(fFilePath);
256 	}
257 
258 	int32 ID() const				{ return fID; }
259 	off_t DeviceSize() const		{ return fDeviceSize; }
260 	const char* DeviceName() const	{ return fDeviceName; }
261 
262 	bool IsUnregistered() const		{ return fUnregistered; }
263 
264 	void SetUnregistered(bool unregistered)
265 	{
266 		fUnregistered = unregistered;
267 	}
268 
269 	status_t Init(int32 id, const char* filePath, uint64 deviceSize)
270 	{
271 		fID = id;
272 		fFilePath = filePath != NULL ? strdup(filePath) : NULL;
273 		if (filePath != NULL && fFilePath == NULL)
274 			return B_NO_MEMORY;
275 
276 		fDeviceSize = (deviceSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE
277 			* B_PAGE_SIZE;
278 
279 		if (fDeviceSize < B_PAGE_SIZE
280 			|| (uint64)fDeviceSize / B_PAGE_SIZE
281 				> vm_page_num_pages() * 2 / 3) {
282 			return B_BAD_VALUE;
283 		}
284 
285 		// construct our device path
286 		KPath path(kRawDeviceBaseName);
287 		char buffer[32];
288 		snprintf(buffer, sizeof(buffer), "%" B_PRId32 "/raw", fID);
289 
290 		status_t error = path.Append(buffer);
291 		if (error != B_OK)
292 			return error;
293 
294 		fDeviceName = path.DetachBuffer();
295 
296 		// insert into device list
297 		RawDevice* nextDevice = NULL;
298 		MutexLocker locker(sDeviceListLock);
299 		for (RawDeviceList::Iterator it = sDeviceList.GetIterator();
300 				(nextDevice = it.Next()) != NULL;) {
301 			if (nextDevice->ID() > fID)
302 				break;
303 		}
304 
305 		sDeviceList.InsertBefore(nextDevice, this);
306 
307 		return B_OK;
308 	}
309 
310 	status_t Prepare()
311 	{
312 		status_t error = VMCacheFactory::CreateAnonymousCache(fCache, false, 0,
313 			0, false, VM_PRIORITY_SYSTEM);
314 		if (error != B_OK) {
315 			Unprepare();
316 			return error;
317 		}
318 
319 		fCache->temporary = 1;
320 		fCache->virtual_end = fDeviceSize;
321 
322 		error = fCache->Commit(fDeviceSize, VM_PRIORITY_SYSTEM);
323 		if (error != B_OK) {
324 			Unprepare();
325 			return error;
326 		}
327 
328 		if (fFilePath != NULL) {
329 			error = _LoadFile();
330 			if (error != B_OK) {
331 				Unprepare();
332 				return error;
333 			}
334 		}
335 
336 		// no DMA restrictions
337 		const dma_restrictions restrictions = {};
338 
339 		fDMAResource = new(std::nothrow) DMAResource;
340 		if (fDMAResource == NULL) {
341 			Unprepare();
342 			return B_NO_MEMORY;
343 		}
344 
345 		error = fDMAResource->Init(restrictions, B_PAGE_SIZE,
346 			kDMAResourceBufferCount, kDMAResourceBounceBufferCount);
347 		if (error != B_OK) {
348 			Unprepare();
349 			return error;
350 		}
351 
352 		fIOScheduler = new(std::nothrow) IOSchedulerSimple(fDMAResource);
353 		if (fIOScheduler == NULL) {
354 			Unprepare();
355 			return B_NO_MEMORY;
356 		}
357 
358 		error = fIOScheduler->Init("ram disk device scheduler");
359 		if (error != B_OK) {
360 			Unprepare();
361 			return error;
362 		}
363 
364 		fIOScheduler->SetCallback(&_DoIOEntry, this);
365 
366 		return B_OK;
367 	}
368 
369 	void Unprepare()
370 	{
371 		delete fIOScheduler;
372 		fIOScheduler = NULL;
373 
374 		delete fDMAResource;
375 		fDMAResource = NULL;
376 
377 		if (fCache != NULL) {
378 			fCache->Lock();
379 			fCache->ReleaseRefAndUnlock();
380 			fCache = NULL;
381 		}
382 	}
383 
384 	void GetInfo(ram_disk_ioctl_info& _info) const
385 	{
386 		_info.id = fID;
387 		_info.size = fDeviceSize;
388 		memset(&_info.path, 0, sizeof(_info.path));
389 		if (fFilePath != NULL)
390 			strlcpy(_info.path, fFilePath, sizeof(_info.path));
391 	}
392 
393 	status_t Flush()
394 	{
395 		static const size_t kPageCountPerIteration = 1024;
396 		static const size_t kMaxGapSize = 15;
397 
398 		int fd = open(fFilePath, O_WRONLY);
399 		if (fd < 0)
400 			return errno;
401 		FileDescriptorCloser fdCloser(fd);
402 
403 		vm_page** pages = new(std::nothrow) vm_page*[kPageCountPerIteration];
404 		ArrayDeleter<vm_page*> pagesDeleter(pages);
405 
406 		uint8* buffer = (uint8*)malloc(kPageCountPerIteration * B_PAGE_SIZE);
407 		MemoryDeleter bufferDeleter(buffer);
408 
409 		if (pages == NULL || buffer == NULL)
410 			return B_NO_MEMORY;
411 
412 		// Iterate through all pages of the cache and write those back that have
413 		// been modified.
414 		AutoLocker<VMCache> locker(fCache);
415 
416 		status_t error = B_OK;
417 
418 		for (off_t offset = 0; offset < fDeviceSize;) {
419 			// find the first modified page at or after the current offset
420 			VMCachePagesTree::Iterator it
421 				= fCache->pages.GetIterator(offset / B_PAGE_SIZE, true, true);
422 			vm_page* firstModified;
423 			while ((firstModified = it.Next()) != NULL
424 				&& !firstModified->modified) {
425 			}
426 
427 			if (firstModified == NULL)
428 				break;
429 
430 			if (firstModified->busy) {
431 				fCache->WaitForPageEvents(firstModified, PAGE_EVENT_NOT_BUSY,
432 					true);
433 				continue;
434 			}
435 
436 			pages[0] = firstModified;
437 			page_num_t firstPageIndex = firstModified->cache_offset;
438 			offset = firstPageIndex * B_PAGE_SIZE;
439 
440 			// Collect more pages until the gap between two modified pages gets
441 			// too large or we hit the end of our array.
442 			size_t previousModifiedIndex = 0;
443 			size_t previousIndex = 0;
444 			while (vm_page* page = it.Next()) {
445 				page_num_t index = page->cache_offset - firstPageIndex;
446 				if (page->busy
447 					|| index >= kPageCountPerIteration
448 					|| index - previousModifiedIndex > kMaxGapSize) {
449 					break;
450 				}
451 
452 				pages[index] = page;
453 
454 				// clear page array gap since the previous page
455 				if (previousIndex + 1 < index) {
456 					memset(pages + previousIndex + 1, 0,
457 						(index - previousIndex - 1) * sizeof(vm_page*));
458 				}
459 
460 				previousIndex = index;
461 				if (page->modified)
462 					previousModifiedIndex = index;
463 			}
464 
465 			// mark all pages we want to write busy
466 			size_t pagesToWrite = previousModifiedIndex + 1;
467 			for (size_t i = 0; i < pagesToWrite; i++) {
468 				if (vm_page* page = pages[i]) {
469 					DEBUG_PAGE_ACCESS_START(page);
470 					page->busy = true;
471 				}
472 			}
473 
474 			locker.Unlock();
475 
476 			// copy the pages to our buffer
477 			for (size_t i = 0; i < pagesToWrite; i++) {
478 				if (vm_page* page = pages[i]) {
479 					error = vm_memcpy_from_physical(buffer + i * B_PAGE_SIZE,
480 						page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE,
481 						false);
482 					if (error != B_OK) {
483 						dprintf("ramdisk: error copying page %" B_PRIu64
484 							" data: %s\n", (uint64)page->physical_page_number,
485 							strerror(error));
486 						break;
487 					}
488 				} else
489 					memset(buffer + i * B_PAGE_SIZE, 0, B_PAGE_SIZE);
490 			}
491 
492 			// write the buffer
493 			if (error == B_OK) {
494 				ssize_t bytesWritten = pwrite(fd, buffer,
495 					pagesToWrite * B_PAGE_SIZE, offset);
496 				if (bytesWritten < 0) {
497 					dprintf("ramdisk: error writing pages to file: %s\n",
498 						strerror(bytesWritten));
499 					error = bytesWritten;
500 				}
501 				else if ((size_t)bytesWritten != pagesToWrite * B_PAGE_SIZE) {
502 					dprintf("ramdisk: error writing pages to file: short "
503 						"write (%zd/%zu)\n", bytesWritten,
504 						pagesToWrite * B_PAGE_SIZE);
505 					error = B_ERROR;
506 				}
507 			}
508 
509 			// mark the pages unbusy, on success also unmodified
510 			locker.Lock();
511 
512 			for (size_t i = 0; i < pagesToWrite; i++) {
513 				if (vm_page* page = pages[i]) {
514 					if (error == B_OK)
515 						page->modified = false;
516 					fCache->MarkPageUnbusy(page);
517 					DEBUG_PAGE_ACCESS_END(page);
518 				}
519 			}
520 
521 			if (error != B_OK)
522 				break;
523 
524 			offset += pagesToWrite * B_PAGE_SIZE;
525 		}
526 
527 		return error;
528 	}
529 
530 	status_t Trim(fs_trim_data* trimData)
531 	{
532 		TRACE("trim_device()\n");
533 
534 		uint64 trimmedSize = 0;
535 		for (uint32 i = 0; i < trimData->range_count; i++) {
536 			trimmedSize += trimData->ranges[i].size;
537 
538 			off_t offset = trimData->ranges[i].offset;
539 			off_t length = trimData->ranges[i].size;
540 
541 			// Round up offset and length to multiple of the page size
542 			// The offset is rounded up, so some space may be left
543 			// (not trimmed) at the start of the range.
544 			offset = (offset + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
545 			// Adjust the length for the possibly skipped range
546 			length -= trimData->ranges[i].offset - offset;
547 			// The length is rounded down, so some space at the end may also
548 			// be left (not trimmed).
549 			length &= ~(B_PAGE_SIZE - 1);
550 
551 			TRACE("ramdisk: trim %" B_PRIdOFF " bytes from %" B_PRIdOFF "\n",
552 				length, offset);
553 
554 			ASSERT(offset % B_PAGE_SIZE == 0);
555 			ASSERT(length % B_PAGE_SIZE == 0);
556 
557 			vm_page** pages = new(std::nothrow) vm_page*[length / B_PAGE_SIZE];
558 			if (pages == NULL)
559 				return B_NO_MEMORY;
560 			ArrayDeleter<vm_page*> pagesDeleter(pages);
561 
562 			_GetPages(offset, length, false, pages);
563 
564 			AutoLocker<VMCache> locker(fCache);
565 			uint32 j;
566 			for (j = 0; j < length / B_PAGE_SIZE; j++) {
567 				// If we run out of pages (some may already be trimmed), stop.
568 				if (pages[j] == NULL)
569 					break;
570 
571 				TRACE("free range %" B_PRIu32 ", page %" B_PRIu32 ", offset %"
572 					B_PRIdOFF "\n", i, j, offset);
573 				if (pages[j]->Cache())
574 					fCache->RemovePage(pages[j]);
575 				vm_page_free(NULL, pages[j]);
576 			}
577 		}
578 
579 		trimData->trimmed_size = trimmedSize;
580 
581 		return B_OK;
582 	}
583 
584 
585 
586 	status_t DoIO(IORequest* request)
587 	{
588 		return fIOScheduler->ScheduleRequest(request);
589 	}
590 
591 	virtual status_t PublishDevice()
592 	{
593 		return sDeviceManager->publish_device(Node(), fDeviceName,
594 			kRawDeviceModuleName);
595 	}
596 
597 private:
598 	static status_t _DoIOEntry(void* data, IOOperation* operation)
599 	{
600 		return ((RawDevice*)data)->_DoIO(operation);
601 	}
602 
603 	status_t _DoIO(IOOperation* operation)
604 	{
605 		off_t offset = operation->Offset();
606 		generic_size_t length = operation->Length();
607 
608 		ASSERT(offset % B_PAGE_SIZE == 0);
609 		ASSERT(length % B_PAGE_SIZE == 0);
610 
611 		const generic_io_vec* vecs = operation->Vecs();
612 		generic_size_t vecOffset = 0;
613 		bool isWrite = operation->IsWrite();
614 
615 		vm_page** pages = new(std::nothrow) vm_page*[length / B_PAGE_SIZE];
616 		if (pages == NULL)
617 			return B_NO_MEMORY;
618 		ArrayDeleter<vm_page*> pagesDeleter(pages);
619 
620 		_GetPages(offset, length, isWrite, pages);
621 
622 		status_t error = B_OK;
623 		size_t index = 0;
624 
625 		while (length > 0) {
626 			vm_page* page = pages[index];
627 
628 			if (isWrite)
629 				page->modified = true;
630 
631 			error = _CopyData(page, vecs, vecOffset, isWrite);
632 			if (error != B_OK)
633 				break;
634 
635 			offset += B_PAGE_SIZE;
636 			length -= B_PAGE_SIZE;
637 			index++;
638 		}
639 
640 		_PutPages(operation->Offset(), operation->Length(), pages,
641 			error == B_OK);
642 
643 		if (error != B_OK) {
644 			fIOScheduler->OperationCompleted(operation, error, 0);
645 			return error;
646 		}
647 
648 		fIOScheduler->OperationCompleted(operation, B_OK, operation->Length());
649 		return B_OK;
650 	}
651 
652 	void _GetPages(off_t offset, off_t length, bool isWrite, vm_page** pages)
653 	{
654 		// get the pages, we already have
655 		AutoLocker<VMCache> locker(fCache);
656 
657 		size_t pageCount = length / B_PAGE_SIZE;
658 		size_t index = 0;
659 		size_t missingPages = 0;
660 
661 		while (length > 0) {
662 			vm_page* page = fCache->LookupPage(offset);
663 			if (page != NULL) {
664 				if (page->busy) {
665 					fCache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
666 					continue;
667 				}
668 
669 				DEBUG_PAGE_ACCESS_START(page);
670 				page->busy = true;
671 			} else
672 				missingPages++;
673 
674 			pages[index++] = page;
675 			offset += B_PAGE_SIZE;
676 			length -= B_PAGE_SIZE;
677 		}
678 
679 		locker.Unlock();
680 
681 		// For a write we need to reserve the missing pages.
682 		if (isWrite && missingPages > 0) {
683 			vm_page_reservation reservation;
684 			vm_page_reserve_pages(&reservation, missingPages,
685 				VM_PRIORITY_SYSTEM);
686 
687 			for (size_t i = 0; i < pageCount; i++) {
688 				if (pages[i] != NULL)
689 					continue;
690 
691 				pages[i] = vm_page_allocate_page(&reservation,
692 					PAGE_STATE_WIRED | VM_PAGE_ALLOC_BUSY);
693 
694 				if (--missingPages == 0)
695 					break;
696 			}
697 
698 			vm_page_unreserve_pages(&reservation);
699 		}
700 	}
701 
702 	void _PutPages(off_t offset, off_t length, vm_page** pages, bool success)
703 	{
704 		AutoLocker<VMCache> locker(fCache);
705 
706 		// Mark all pages unbusy. On error free the newly allocated pages.
707 		size_t index = 0;
708 
709 		while (length > 0) {
710 			vm_page* page = pages[index++];
711 			if (page != NULL) {
712 				if (page->CacheRef() == NULL) {
713 					if (success) {
714 						fCache->InsertPage(page, offset);
715 						fCache->MarkPageUnbusy(page);
716 						DEBUG_PAGE_ACCESS_END(page);
717 					} else
718 						vm_page_free(NULL, page);
719 				} else {
720 					fCache->MarkPageUnbusy(page);
721 					DEBUG_PAGE_ACCESS_END(page);
722 				}
723 			}
724 
725 			offset += B_PAGE_SIZE;
726 			length -= B_PAGE_SIZE;
727 		}
728 	}
729 
730 	status_t _CopyData(vm_page* page, const generic_io_vec*& vecs,
731 		generic_size_t& vecOffset, bool toPage)
732 	{
733 		// map page to virtual memory
734 		Thread* thread = thread_get_current_thread();
735 		uint8* pageData = NULL;
736 		void* handle;
737 		if (page != NULL) {
738 			thread_pin_to_current_cpu(thread);
739 			addr_t virtualAddress;
740 			status_t error = vm_get_physical_page_current_cpu(
741 				page->physical_page_number * B_PAGE_SIZE, &virtualAddress,
742 				&handle);
743 			if (error != B_OK) {
744 				thread_unpin_from_current_cpu(thread);
745 				return error;
746 			}
747 
748 			pageData = (uint8*)virtualAddress;
749 		}
750 
751 		status_t error = B_OK;
752 		size_t length = B_PAGE_SIZE;
753 		while (length > 0) {
754 			size_t toCopy = std::min((generic_size_t)length,
755 				vecs->length - vecOffset);
756 
757 			if (toCopy == 0) {
758 				vecs++;
759 				vecOffset = 0;
760 				continue;
761 			}
762 
763 			phys_addr_t vecAddress = vecs->base + vecOffset;
764 
765 			error = toPage
766 				? vm_memcpy_from_physical(pageData, vecAddress, toCopy, false)
767 				: (page != NULL
768 					? vm_memcpy_to_physical(vecAddress, pageData, toCopy, false)
769 					: vm_memset_physical(vecAddress, 0, toCopy));
770 			if (error != B_OK)
771 				break;
772 
773 			pageData += toCopy;
774 			length -= toCopy;
775 			vecOffset += toCopy;
776 		}
777 
778 		if (page != NULL) {
779 			vm_put_physical_page_current_cpu((addr_t)pageData, handle);
780 			thread_unpin_from_current_cpu(thread);
781 		}
782 
783 		return error;
784 	}
785 
786 	status_t _LoadFile()
787 	{
788 		static const size_t kPageCountPerIteration = 1024;
789 
790 		int fd = open(fFilePath, O_RDONLY);
791 		if (fd < 0)
792 			return errno;
793 		FileDescriptorCloser fdCloser(fd);
794 
795 		vm_page** pages = new(std::nothrow) vm_page*[kPageCountPerIteration];
796 		ArrayDeleter<vm_page*> pagesDeleter(pages);
797 
798 		uint8* buffer = (uint8*)malloc(kPageCountPerIteration * B_PAGE_SIZE);
799 		MemoryDeleter bufferDeleter(buffer);
800 			// TODO: Ideally we wouldn't use a buffer to read the file content,
801 			// but read into the pages we allocated directly. Unfortunately
802 			// there's no API to do that yet.
803 
804 		if (pages == NULL || buffer == NULL)
805 			return B_NO_MEMORY;
806 
807 		status_t error = B_OK;
808 
809 		page_num_t allocatedPages = 0;
810 		off_t offset = 0;
811 		off_t sizeRemaining = fDeviceSize;
812 		while (sizeRemaining > 0) {
813 			// Note: fDeviceSize is B_PAGE_SIZE aligned.
814 			size_t pagesToRead = std::min(kPageCountPerIteration,
815 				size_t(sizeRemaining / B_PAGE_SIZE));
816 
817 			// allocate the missing pages
818 			if (allocatedPages < pagesToRead) {
819 				vm_page_reservation reservation;
820 				vm_page_reserve_pages(&reservation,
821 					pagesToRead - allocatedPages, VM_PRIORITY_SYSTEM);
822 
823 				while (allocatedPages < pagesToRead) {
824 					pages[allocatedPages++]
825 						= vm_page_allocate_page(&reservation, PAGE_STATE_WIRED);
826 				}
827 
828 				vm_page_unreserve_pages(&reservation);
829 			}
830 
831 			// read from the file
832 			size_t bytesToRead = pagesToRead * B_PAGE_SIZE;
833 			ssize_t bytesRead = pread(fd, buffer, bytesToRead, offset);
834 			if (bytesRead < 0) {
835 				error = bytesRead;
836 				break;
837 			}
838 			size_t pagesRead = (bytesRead + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
839 			if (pagesRead < pagesToRead) {
840 				error = B_ERROR;
841 				break;
842 			}
843 
844 			// clear the last read page, if partial
845 			if ((size_t)bytesRead < pagesRead * B_PAGE_SIZE) {
846 				memset(buffer + bytesRead, 0,
847 					pagesRead * B_PAGE_SIZE - bytesRead);
848 			}
849 
850 			// copy data to allocated pages
851 			for (size_t i = 0; i < pagesRead; i++) {
852 				vm_page* page = pages[i];
853 				error = vm_memcpy_to_physical(
854 					page->physical_page_number * B_PAGE_SIZE,
855 					buffer + i * B_PAGE_SIZE, B_PAGE_SIZE, false);
856 				if (error != B_OK)
857 					break;
858 			}
859 
860 			if (error != B_OK)
861 				break;
862 
863 			// Add pages to cache. Ignore clear pages, though. Move those to the
864 			// beginning of the array, so we can reuse them in the next
865 			// iteration.
866 			AutoLocker<VMCache> locker(fCache);
867 
868 			size_t clearPages = 0;
869 			for (size_t i = 0; i < pagesRead; i++) {
870 				uint64* pageData = (uint64*)(buffer + i * B_PAGE_SIZE);
871 				bool isClear = true;
872 				for (size_t k = 0; isClear && k < B_PAGE_SIZE / 8; k++)
873 					isClear = pageData[k] == 0;
874 
875 				if (isClear) {
876 					pages[clearPages++] = pages[i];
877 				} else {
878 					fCache->InsertPage(pages[i], offset + i * B_PAGE_SIZE);
879 					DEBUG_PAGE_ACCESS_END(pages[i]);
880 				}
881 			}
882 
883 			locker.Unlock();
884 
885 			// Move any left-over allocated pages to the end of the empty pages
886 			// and compute the new allocated pages count.
887 			if (pagesRead < allocatedPages) {
888 				size_t count = allocatedPages - pagesRead;
889 				memcpy(pages + clearPages, pages + pagesRead,
890 					count * sizeof(vm_page*));
891 				clearPages += count;
892 			}
893 			allocatedPages = clearPages;
894 
895 			offset += pagesRead * B_PAGE_SIZE;
896 			sizeRemaining -= pagesRead * B_PAGE_SIZE;
897 		}
898 
899 		// free left-over allocated pages
900 		for (size_t i = 0; i < allocatedPages; i++)
901 			vm_page_free(NULL, pages[i]);
902 
903 		return error;
904 	}
905 
906 private:
907 	int32			fID;
908 	bool			fUnregistered;
909 	off_t			fDeviceSize;
910 	char*			fDeviceName;
911 	char*			fFilePath;
912 	VMCache*		fCache;
913 	DMAResource*	fDMAResource;
914 	IOScheduler*	fIOScheduler;
915 };
916 
917 
918 struct RawDeviceCookie {
919 	RawDeviceCookie(RawDevice* device, int openMode)
920 		:
921 		fDevice(device),
922 		fOpenMode(openMode)
923 	{
924 	}
925 
926 	RawDevice* Device() const	{ return fDevice; }
927 	int OpenMode() const		{ return fOpenMode; }
928 
929 private:
930 	RawDevice*	fDevice;
931 	int			fOpenMode;
932 };
933 
934 
935 // #pragma mark -
936 
937 
938 static int32
939 allocate_raw_device_id()
940 {
941 	MutexLocker deviceListLocker(sDeviceListLock);
942 	for (size_t i = 0; i < sizeof(sUsedRawDeviceIDs) * 8; i++) {
943 		if ((sUsedRawDeviceIDs & ((uint64)1 << i)) == 0) {
944 			sUsedRawDeviceIDs |= (uint64)1 << i;
945 			return (int32)i;
946 		}
947 	}
948 
949 	return -1;
950 }
951 
952 
953 static void
954 free_raw_device_id(int32 id)
955 {
956 	MutexLocker deviceListLocker(sDeviceListLock);
957 	sUsedRawDeviceIDs &= ~((uint64)1 << id);
958 }
959 
960 
961 static RawDevice*
962 find_raw_device(int32 id)
963 {
964 	for (RawDeviceList::Iterator it = sDeviceList.GetIterator();
965 			RawDevice* device = it.Next();) {
966 		if (device->ID() == id)
967 			return device;
968 	}
969 
970 	return NULL;
971 }
972 
973 
974 static status_t
975 ioctl_register(ControlDevice* controlDevice, ram_disk_ioctl_register* request)
976 {
977 	KPath path;
978 	uint64 deviceSize = 0;
979 
980 	if (request->path[0] != '\0') {
981 		// check if the path is null-terminated
982 		if (strnlen(request->path, sizeof(request->path))
983 				== sizeof(request->path)) {
984 			return B_BAD_VALUE;
985 		}
986 
987 		// get a normalized file path
988 		status_t error = path.SetTo(request->path, true);
989 		if (error != B_OK) {
990 			dprintf("ramdisk: register: Invalid path \"%s\": %s\n",
991 				request->path, strerror(error));
992 			return B_BAD_VALUE;
993 		}
994 
995 		struct stat st;
996 		if (lstat(path.Path(), &st) != 0) {
997 			dprintf("ramdisk: register: Failed to stat \"%s\": %s\n",
998 				path.Path(), strerror(errno));
999 			return errno;
1000 		}
1001 
1002 		if (!S_ISREG(st.st_mode)) {
1003 			dprintf("ramdisk: register: \"%s\" is not a file!\n", path.Path());
1004 			return B_BAD_VALUE;
1005 		}
1006 
1007 		deviceSize = st.st_size;
1008 	} else {
1009 		deviceSize = request->size;
1010 	}
1011 
1012 	return controlDevice->Register(path.Length() > 0 ? path.Path() : NULL,
1013 		deviceSize, request->id);
1014 }
1015 
1016 
1017 static status_t
1018 ioctl_unregister(ControlDevice* controlDevice,
1019 	ram_disk_ioctl_unregister* request)
1020 {
1021 	// find the device in the list and unregister it
1022 	MutexLocker locker(sDeviceListLock);
1023 	RawDevice* device = find_raw_device(request->id);
1024 	if (device == NULL)
1025 		return B_ENTRY_NOT_FOUND;
1026 
1027 	// mark unregistered before we unlock
1028 	if (device->IsUnregistered())
1029 		return B_BUSY;
1030 	device->SetUnregistered(true);
1031 	locker.Unlock();
1032 
1033 	device_node* node = device->Node();
1034 	status_t error = sDeviceManager->unpublish_device(node,
1035 		device->DeviceName());
1036 	if (error != B_OK) {
1037 		dprintf("ramdisk: unregister: Failed to unpublish device \"%s\": %s\n",
1038 			device->DeviceName(), strerror(error));
1039 		return error;
1040 	}
1041 
1042 	error = sDeviceManager->unregister_node(node);
1043 	// Note: B_BUSY is OK. The node will removed as soon as possible.
1044 	if (error != B_OK && error != B_BUSY) {
1045 		dprintf("ramdisk: unregister: Failed to unregister node for device %"
1046 			B_PRId32 ": %s\n", request->id, strerror(error));
1047 		return error;
1048 	}
1049 
1050 	return B_OK;
1051 }
1052 
1053 
1054 static status_t
1055 ioctl_info(RawDevice* device, ram_disk_ioctl_info* request)
1056 {
1057 	device->GetInfo(*request);
1058 	return B_OK;
1059 }
1060 
1061 
1062 template<typename DeviceType, typename Request>
1063 static status_t
1064 handle_ioctl(DeviceType* device,
1065 	status_t (*handler)(DeviceType*, Request*), void* buffer)
1066 {
1067 	// copy request to the kernel heap
1068 	if (buffer == NULL || !IS_USER_ADDRESS(buffer))
1069 		return B_BAD_ADDRESS;
1070 
1071 	Request* request = new(std::nothrow) Request;
1072 	if (request == NULL)
1073 		return B_NO_MEMORY;
1074 	ObjectDeleter<Request> requestDeleter(request);
1075 
1076 	if (user_memcpy(request, buffer, sizeof(Request)) != B_OK)
1077 		return B_BAD_ADDRESS;
1078 
1079 	// handle the ioctl
1080 	status_t error = handler(device, request);
1081 	if (error != B_OK)
1082 		return error;
1083 
1084 	// copy the request back to userland
1085 	if (user_memcpy(buffer, request, sizeof(Request)) != B_OK)
1086 		return B_BAD_ADDRESS;
1087 
1088 	return B_OK;
1089 }
1090 
1091 
1092 //	#pragma mark - driver
1093 
1094 
1095 static float
1096 ram_disk_driver_supports_device(device_node* parent)
1097 {
1098 	const char* bus = NULL;
1099 	if (sDeviceManager->get_attr_string(parent, B_DEVICE_BUS, &bus, false)
1100 			== B_OK
1101 		&& strcmp(bus, "generic") == 0) {
1102 		return 0.8;
1103 	}
1104 
1105 	return -1;
1106 }
1107 
1108 
1109 static status_t
1110 ram_disk_driver_register_device(device_node* parent)
1111 {
1112 	device_attr attrs[] = {
1113 		{B_DEVICE_PRETTY_NAME, B_STRING_TYPE,
1114 			{string: "RAM Disk Control Device"}},
1115 		{NULL}
1116 	};
1117 
1118 	return sDeviceManager->register_node(parent, kDriverModuleName, attrs, NULL,
1119 		NULL);
1120 }
1121 
1122 
1123 static status_t
1124 ram_disk_driver_init_driver(device_node* node, void** _driverCookie)
1125 {
1126 	uint64 deviceSize;
1127 	if (sDeviceManager->get_attr_uint64(node, kDeviceSizeItem, &deviceSize,
1128 			false) == B_OK) {
1129 		int32 id = -1;
1130 		sDeviceManager->get_attr_uint32(node, kDeviceIDItem, (uint32*)&id,
1131 			false);
1132 		if (id < 0)
1133 			return B_ERROR;
1134 
1135 		const char* filePath = NULL;
1136 		sDeviceManager->get_attr_string(node, kFilePathItem, &filePath, false);
1137 
1138 		RawDevice* device = new(std::nothrow) RawDevice(node);
1139 		if (device == NULL)
1140 			return B_NO_MEMORY;
1141 
1142 		status_t error = device->Init(id, filePath, deviceSize);
1143 		if (error != B_OK) {
1144 			delete device;
1145 			return error;
1146 		}
1147 
1148 		*_driverCookie = (Device*)device;
1149 	} else {
1150 		ControlDevice* device = new(std::nothrow) ControlDevice(node);
1151 		if (device == NULL)
1152 			return B_NO_MEMORY;
1153 
1154 		*_driverCookie = (Device*)device;
1155 	}
1156 
1157 	return B_OK;
1158 }
1159 
1160 
1161 static void
1162 ram_disk_driver_uninit_driver(void* driverCookie)
1163 {
1164 	Device* device = (Device*)driverCookie;
1165 	if (RawDevice* rawDevice = dynamic_cast<RawDevice*>(device))
1166 		free_raw_device_id(rawDevice->ID());
1167 	delete device;
1168 }
1169 
1170 
1171 static status_t
1172 ram_disk_driver_register_child_devices(void* driverCookie)
1173 {
1174 	Device* device = (Device*)driverCookie;
1175 	return device->PublishDevice();
1176 }
1177 
1178 
1179 //	#pragma mark - control device
1180 
1181 
1182 static status_t
1183 ram_disk_control_device_init_device(void* driverCookie, void** _deviceCookie)
1184 {
1185 	*_deviceCookie = driverCookie;
1186 	return B_OK;
1187 }
1188 
1189 
1190 static void
1191 ram_disk_control_device_uninit_device(void* deviceCookie)
1192 {
1193 }
1194 
1195 
1196 static status_t
1197 ram_disk_control_device_open(void* deviceCookie, const char* path, int openMode,
1198 	void** _cookie)
1199 {
1200 	*_cookie = deviceCookie;
1201 	return B_OK;
1202 }
1203 
1204 
1205 static status_t
1206 ram_disk_control_device_close(void* cookie)
1207 {
1208 	return B_OK;
1209 }
1210 
1211 
1212 static status_t
1213 ram_disk_control_device_free(void* cookie)
1214 {
1215 	return B_OK;
1216 }
1217 
1218 
1219 static status_t
1220 ram_disk_control_device_read(void* cookie, off_t position, void* buffer,
1221 	size_t* _length)
1222 {
1223 	return B_BAD_VALUE;
1224 }
1225 
1226 
1227 static status_t
1228 ram_disk_control_device_write(void* cookie, off_t position, const void* data,
1229 	size_t* _length)
1230 {
1231 	return B_BAD_VALUE;
1232 }
1233 
1234 
1235 static status_t
1236 ram_disk_control_device_control(void* cookie, uint32 op, void* buffer,
1237 	size_t length)
1238 {
1239 	ControlDevice* device = (ControlDevice*)cookie;
1240 
1241 	switch (op) {
1242 		case RAM_DISK_IOCTL_REGISTER:
1243 			return handle_ioctl(device, &ioctl_register, buffer);
1244 
1245 		case RAM_DISK_IOCTL_UNREGISTER:
1246 			return handle_ioctl(device, &ioctl_unregister, buffer);
1247 	}
1248 
1249 	return B_BAD_VALUE;
1250 }
1251 
1252 
1253 //	#pragma mark - raw device
1254 
1255 
1256 static status_t
1257 ram_disk_raw_device_init_device(void* driverCookie, void** _deviceCookie)
1258 {
1259 	RawDevice* device = static_cast<RawDevice*>((Device*)driverCookie);
1260 
1261 	status_t error = device->Prepare();
1262 	if (error != B_OK)
1263 		return error;
1264 
1265 	*_deviceCookie = device;
1266 	return B_OK;
1267 }
1268 
1269 
1270 static void
1271 ram_disk_raw_device_uninit_device(void* deviceCookie)
1272 {
1273 	RawDevice* device = (RawDevice*)deviceCookie;
1274 	device->Unprepare();
1275 }
1276 
1277 
1278 static status_t
1279 ram_disk_raw_device_open(void* deviceCookie, const char* path, int openMode,
1280 	void** _cookie)
1281 {
1282 	RawDevice* device = (RawDevice*)deviceCookie;
1283 
1284 	RawDeviceCookie* cookie = new(std::nothrow) RawDeviceCookie(device,
1285 		openMode);
1286 	if (cookie == NULL)
1287 		return B_NO_MEMORY;
1288 
1289 	*_cookie = cookie;
1290 	return B_OK;
1291 }
1292 
1293 
1294 static status_t
1295 ram_disk_raw_device_close(void* cookie)
1296 {
1297 	return B_OK;
1298 }
1299 
1300 
1301 static status_t
1302 ram_disk_raw_device_free(void* _cookie)
1303 {
1304 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1305 	delete cookie;
1306 	return B_OK;
1307 }
1308 
1309 
1310 static status_t
1311 ram_disk_raw_device_read(void* _cookie, off_t pos, void* buffer,
1312 	size_t* _length)
1313 {
1314 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1315 	RawDevice* device = cookie->Device();
1316 
1317 	size_t length = *_length;
1318 
1319 	if (pos >= device->DeviceSize())
1320 		return B_BAD_VALUE;
1321 	if (pos + (off_t)length > device->DeviceSize())
1322 		length = device->DeviceSize() - pos;
1323 
1324 	IORequest request;
1325 	status_t status = request.Init(pos, (addr_t)buffer, length, false, 0);
1326 	if (status != B_OK)
1327 		return status;
1328 
1329 	status = device->DoIO(&request);
1330 	if (status != B_OK)
1331 		return status;
1332 
1333 	status = request.Wait(0, 0);
1334 	if (status == B_OK)
1335 		*_length = length;
1336 	return status;
1337 }
1338 
1339 
1340 static status_t
1341 ram_disk_raw_device_write(void* _cookie, off_t pos, const void* buffer,
1342 	size_t* _length)
1343 {
1344 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1345 	RawDevice* device = cookie->Device();
1346 
1347 	size_t length = *_length;
1348 
1349 	if (pos >= device->DeviceSize())
1350 		return B_BAD_VALUE;
1351 	if (pos + (off_t)length > device->DeviceSize())
1352 		length = device->DeviceSize() - pos;
1353 
1354 	IORequest request;
1355 	status_t status = request.Init(pos, (addr_t)buffer, length, true, 0);
1356 	if (status != B_OK)
1357 		return status;
1358 
1359 	status = device->DoIO(&request);
1360 	if (status != B_OK)
1361 		return status;
1362 
1363 	status = request.Wait(0, 0);
1364 	if (status == B_OK)
1365 		*_length = length;
1366 
1367 	return status;
1368 }
1369 
1370 
1371 static status_t
1372 ram_disk_raw_device_io(void* _cookie, io_request* request)
1373 {
1374 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1375 	RawDevice* device = cookie->Device();
1376 
1377 	return device->DoIO(request);
1378 }
1379 
1380 
1381 static status_t
1382 ram_disk_raw_device_control(void* _cookie, uint32 op, void* buffer,
1383 	size_t length)
1384 {
1385 	RawDeviceCookie* cookie = (RawDeviceCookie*)_cookie;
1386 	RawDevice* device = cookie->Device();
1387 
1388 	switch (op) {
1389 		case B_GET_DEVICE_SIZE:
1390 		{
1391 			size_t size = device->DeviceSize();
1392 			return user_memcpy(buffer, &size, sizeof(size_t));
1393 		}
1394 
1395 		case B_SET_NONBLOCKING_IO:
1396 		case B_SET_BLOCKING_IO:
1397 			return B_OK;
1398 
1399 		case B_GET_READ_STATUS:
1400 		case B_GET_WRITE_STATUS:
1401 		{
1402 			bool value = true;
1403 			return user_memcpy(buffer, &value, sizeof(bool));
1404 		}
1405 
1406 		case B_GET_GEOMETRY:
1407 		case B_GET_BIOS_GEOMETRY:
1408 		{
1409 			device_geometry geometry;
1410 			geometry.bytes_per_sector = B_PAGE_SIZE;
1411 			geometry.sectors_per_track = 1;
1412 			geometry.cylinder_count = device->DeviceSize() / B_PAGE_SIZE;
1413 				// TODO: We're limited to 2^32 * B_PAGE_SIZE, if we don't use
1414 				// sectors_per_track and head_count.
1415 			geometry.head_count = 1;
1416 			geometry.device_type = B_DISK;
1417 			geometry.removable = true;
1418 			geometry.read_only = false;
1419 			geometry.write_once = false;
1420 
1421 			return user_memcpy(buffer, &geometry, sizeof(device_geometry));
1422 		}
1423 
1424 		case B_GET_MEDIA_STATUS:
1425 		{
1426 			status_t status = B_OK;
1427 			return user_memcpy(buffer, &status, sizeof(status_t));
1428 		}
1429 
1430 		case B_GET_ICON_NAME:
1431 			return user_strlcpy((char*)buffer, "devices/drive-ramdisk",
1432 				B_FILE_NAME_LENGTH);
1433 
1434 		case B_GET_VECTOR_ICON:
1435 		{
1436 			device_icon iconData;
1437 			if (length != sizeof(device_icon))
1438 				return B_BAD_VALUE;
1439 			if (user_memcpy(&iconData, buffer, sizeof(device_icon)) != B_OK)
1440 				return B_BAD_ADDRESS;
1441 
1442 			if (iconData.icon_size >= (int32)sizeof(kRamdiskIcon)) {
1443 				if (user_memcpy(iconData.icon_data, kRamdiskIcon,
1444 						sizeof(kRamdiskIcon)) != B_OK)
1445 					return B_BAD_ADDRESS;
1446 			}
1447 
1448 			iconData.icon_size = sizeof(kRamdiskIcon);
1449 			return user_memcpy(buffer, &iconData, sizeof(device_icon));
1450 		}
1451 
1452 		case B_SET_UNINTERRUPTABLE_IO:
1453 		case B_SET_INTERRUPTABLE_IO:
1454 		case B_FLUSH_DRIVE_CACHE:
1455 			return B_OK;
1456 
1457 		case RAM_DISK_IOCTL_FLUSH:
1458 		{
1459 			status_t error = device->Flush();
1460 			if (error != B_OK) {
1461 				dprintf("ramdisk: flush: Failed to flush device: %s\n",
1462 					strerror(error));
1463 				return error;
1464 			}
1465 
1466 			return B_OK;
1467 		}
1468 
1469 		case B_TRIM_DEVICE:
1470 		{
1471 			fs_trim_data* trimData;
1472 			MemoryDeleter deleter;
1473 			status_t status = get_trim_data_from_user(buffer, length, deleter,
1474 				trimData);
1475 			if (status != B_OK)
1476 				return status;
1477 
1478 			status = device->Trim(trimData);
1479 			if (status != B_OK)
1480 				return status;
1481 
1482 			return copy_trim_data_to_user(buffer, trimData);
1483 		}
1484 
1485 		case RAM_DISK_IOCTL_INFO:
1486 			return handle_ioctl(device, &ioctl_info, buffer);
1487 	}
1488 
1489 	return B_BAD_VALUE;
1490 }
1491 
1492 
1493 // #pragma mark -
1494 
1495 
1496 module_dependency module_dependencies[] = {
1497 	{B_DEVICE_MANAGER_MODULE_NAME, (module_info**)&sDeviceManager},
1498 	{}
1499 };
1500 
1501 
1502 static const struct driver_module_info sChecksumDeviceDriverModule = {
1503 	{
1504 		kDriverModuleName,
1505 		0,
1506 		NULL
1507 	},
1508 
1509 	ram_disk_driver_supports_device,
1510 	ram_disk_driver_register_device,
1511 	ram_disk_driver_init_driver,
1512 	ram_disk_driver_uninit_driver,
1513 	ram_disk_driver_register_child_devices
1514 };
1515 
1516 static const struct device_module_info sChecksumControlDeviceModule = {
1517 	{
1518 		kControlDeviceModuleName,
1519 		0,
1520 		NULL
1521 	},
1522 
1523 	ram_disk_control_device_init_device,
1524 	ram_disk_control_device_uninit_device,
1525 	NULL,
1526 
1527 	ram_disk_control_device_open,
1528 	ram_disk_control_device_close,
1529 	ram_disk_control_device_free,
1530 
1531 	ram_disk_control_device_read,
1532 	ram_disk_control_device_write,
1533 	NULL,	// io
1534 
1535 	ram_disk_control_device_control,
1536 
1537 	NULL,	// select
1538 	NULL	// deselect
1539 };
1540 
1541 static const struct device_module_info sChecksumRawDeviceModule = {
1542 	{
1543 		kRawDeviceModuleName,
1544 		0,
1545 		NULL
1546 	},
1547 
1548 	ram_disk_raw_device_init_device,
1549 	ram_disk_raw_device_uninit_device,
1550 	NULL,
1551 
1552 	ram_disk_raw_device_open,
1553 	ram_disk_raw_device_close,
1554 	ram_disk_raw_device_free,
1555 
1556 	ram_disk_raw_device_read,
1557 	ram_disk_raw_device_write,
1558 	ram_disk_raw_device_io,
1559 
1560 	ram_disk_raw_device_control,
1561 
1562 	NULL,	// select
1563 	NULL	// deselect
1564 };
1565 
1566 const module_info* modules[] = {
1567 	(module_info*)&sChecksumDeviceDriverModule,
1568 	(module_info*)&sChecksumControlDeviceModule,
1569 	(module_info*)&sChecksumRawDeviceModule,
1570 	NULL
1571 };
1572