xref: /haiku/src/add-ons/kernel/bus_managers/scsi/dma_buffer.cpp (revision e0ef64750f3169cd634bb2f7a001e22488b05231)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Copyright 2002/03, Thomas Kurschel. All rights reserved.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 /*
9 	DMA buffer handling.
10 
11 	If the peripheral driver hasn't made sure that the data of a request
12 	is DMA safe, we check that and copy data to a buffer if needed.
13 	The buffer is enlarged on demand and destroyed after a time-out
14 	by a daemon. Obviously, it's a good idea to avoid all this, therefore
15 	blkman takes care of that for read/write requests.
16 
17 	To be able to copy data back after the request was finished, we need a
18 	S/G list to the original data as the copying is done in a different
19 	thread/process context (namely the service thread).
20 
21 	Currently, there is only one buffer per device; in the future,
22 	we may support multiple buffers, especially if we want to support
23 	more then 4 GB memory, which leads to trouble with 32-bit PCI cards.
24 */
25 
26 
27 #include "scsi_internal.h"
28 #include "KernelExport_ext.h"
29 
30 #include <vm/vm.h>
31 
32 #include <string.h>
33 
34 
35 /*!	Check whether S/G list of request is supported DMA controller */
36 static bool
37 is_sg_list_dma_safe(scsi_ccb *request)
38 {
39 	scsi_bus_info *bus = request->bus;
40 	const physical_entry *sg_list = request->sg_list;
41 	uint32 sg_count = request->sg_count;
42 	uint32 dma_boundary = bus->dma_params.dma_boundary;
43 	uint32 alignment = bus->dma_params.alignment;
44 	uint32 max_sg_block_size = bus->dma_params.max_sg_block_size;
45 	uint32 cur_idx;
46 
47 	// not too many S/G list entries
48 	if (sg_count > bus->dma_params.max_sg_blocks) {
49 		SHOW_FLOW0(1, "S/G-list too long");
50 		return false;
51 	}
52 
53 	// if there are no further restrictions - be happy
54 	if (dma_boundary == ~0UL && alignment == 0 && max_sg_block_size == 0)
55 		return true;
56 
57 	// argh - controller is a bit picky, so make sure he likes us
58 	for (cur_idx = sg_count; cur_idx >= 1; --cur_idx, ++sg_list) {
59 		phys_addr_t max_len;
60 
61 		// calculate space upto next dma boundary crossing and
62 		// verify that it isn't crossed
63 		max_len = (dma_boundary + 1) - (sg_list->address & dma_boundary);
64 
65 		if (max_len < sg_list->size) {
66 			SHOW_FLOW(0, "S/G-entry crosses DMA boundary @%" B_PRIxPHYSADDR,
67 				sg_list->address + max_len);
68 			return false;
69 		}
70 
71 		// check both begin and end of entry for alignment
72 		if ((sg_list->address & alignment) != 0) {
73 			SHOW_FLOW(0, "S/G-entry has bad alignment @%#" B_PRIxPHYSADDR,
74 				sg_list->address);
75 			return false;
76 		}
77 
78 		if (((sg_list->address + sg_list->size) & alignment) != 0) {
79 			SHOW_FLOW(0, "end of S/G-entry has bad alignment @%" B_PRIxPHYSADDR,
80 				sg_list->address + sg_list->size);
81 			return false;
82 		}
83 
84 		// verify entry size
85 		if (sg_list->size > max_sg_block_size) {
86 			SHOW_FLOW(0, "S/G-entry is too long (%d/%d bytes)",
87 				(int)sg_list->size, (int)max_sg_block_size);
88 			return false;
89 		}
90 	}
91 
92 	return true;
93 }
94 
95 
96 /** copy data from/to DMA buffer */
97 
98 static bool
99 scsi_copy_dma_buffer(scsi_ccb *request, uint32 size, bool to_buffer)
100 {
101 	dma_buffer *buffer = request->dma_buffer;
102 	const physical_entry *sg_list = buffer->sg_list_orig;
103 	uint32 num_vecs = buffer->sg_count_orig;
104 	uchar *buffer_data = buffer->address;
105 
106 	SHOW_FLOW(1, "to_buffer=%d, %d bytes", to_buffer, (int)size);
107 
108 	// survive even if controller returned invalid data size
109 	size = min_c(size, request->data_length);
110 
111 	// we have to use S/G list to original data; the DMA buffer
112 	// was allocated in kernel and is thus visible even if the thread
113 	// was changed
114 	for (; size > 0 && num_vecs > 0; ++sg_list, --num_vecs) {
115 		size_t bytes;
116 
117 		bytes = min_c( size, sg_list->size );
118 
119 		if (to_buffer) {
120 			vm_memcpy_from_physical(buffer_data, sg_list->address, bytes,
121 				false);
122 		} else
123 			vm_memcpy_to_physical(sg_list->address, buffer_data, bytes, false);
124 
125 		buffer_data += bytes;
126 	}
127 
128 	return true;
129 }
130 
131 
132 static void
133 scsi_free_dma_buffer(dma_buffer *buffer)
134 {
135 	if (buffer->area > 0) {
136 		SHOW_FLOW0(1, "Destroying buffer");
137 
138 		delete_area(buffer->area);
139 		buffer->area = 0;
140 		buffer->size = 0;
141 	}
142 
143 	if (buffer->sg_list_area > 0) {
144 		delete_area(buffer->sg_list_area);
145 		buffer->sg_list_area = 0;
146 	}
147 }
148 
149 
150 /**	allocate dma buffer for given device, deleting old one
151  *	size - buffer size in bytes
152  */
153 
154 static bool
155 scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
156 {
157 	size_t sg_list_size, sg_list_entries;
158 
159 	// free old buffer first
160 	scsi_free_dma_buffer( buffer );
161 
162 	// just in case alignment is ridiculously huge
163 	size = (size + dma_params->alignment) & ~dma_params->alignment;
164 
165 	size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
166 
167 	// calculate worst case number of S/G entries, i.e. if they are non-continuous;
168 	// there is a controller limit and a limit by our own S/G manager to check
169 	if (size / B_PAGE_SIZE > dma_params->max_sg_blocks
170 		|| size / B_PAGE_SIZE > MAX_TEMP_SG_FRAGMENTS) {
171 		uint32 boundary = dma_params->dma_boundary;
172 
173 		// alright - a contiguous buffer is required to keep S/G table short
174 		SHOW_INFO(1, "need to setup contiguous DMA buffer of size %d",
175 			(int)size);
176 
177 		// verify that we don't get problems with dma boundary
178 		if (boundary != ~(uint32)0) {
179 			if (size > boundary + 1) {
180 				SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%d/%d bytes)",
181 					(int)size, (int)boundary + 1);
182 				return false;
183 			}
184 		}
185 
186 		virtual_address_restrictions virtualRestrictions = {};
187 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
188 		physical_address_restrictions physicalRestrictions = {};
189 		if (dma_params->alignment != ~(uint32)0)
190 			physicalRestrictions.alignment = dma_params->alignment + 1;
191 		if (boundary != ~(uint32)0)
192 			physicalRestrictions.boundary = boundary + 1;
193 #if B_HAIKU_PHYSICAL_BITS > 32
194 		physicalRestrictions.high_address = 0x100000000ULL;
195 			// TODO: Use 64 bit addresses, if possible!
196 #endif
197 		buffer->area = create_area_etc(B_SYSTEM_TEAM, "DMA buffer", size,
198 			B_CONTIGUOUS, 0, 0, &virtualRestrictions, &physicalRestrictions,
199 			(void**)&buffer->address);
200 
201 		if (buffer->area < 0) {
202 			SHOW_ERROR(2, "Cannot create contignous DMA buffer of %d bytes",
203 				(int)size);
204 			return false;
205 		}
206 
207 		buffer->size = size;
208 	} else {
209 		// we can live with a fragmented buffer - very nice
210 		buffer->area = create_area("DMA buffer",
211 			(void **)&buffer->address, B_ANY_KERNEL_ADDRESS, size,
212 			B_32_BIT_FULL_LOCK, 0);
213 				// TODO: Use B_FULL_LOCK, if possible!
214 		if (buffer->area < 0) {
215 			SHOW_ERROR(2, "Cannot create DMA buffer of %d bytes",
216 				(int)size);
217 			return false;
218 		}
219 
220 		buffer->size = size;
221 	}
222 
223 	// create S/G list
224 	// worst case is one entry per page, and size is page-aligned
225 	sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
226 	// create_area has page-granularity
227 	sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
228 
229 	buffer->sg_list_area = create_area("DMA buffer S/G table",
230 		(void **)&buffer->sg_list, B_ANY_KERNEL_ADDRESS, sg_list_size,
231 		B_32_BIT_FULL_LOCK, 0);
232 			// TODO: Use B_FULL_LOCK, if possible!
233 	if (buffer->sg_list_area < 0) {
234 		SHOW_ERROR( 2, "Cannot craete DMA buffer S/G list of %d bytes",
235 			(int)sg_list_size );
236 
237 		delete_area(buffer->area);
238 		buffer->area = 0;
239 		return false;
240 	}
241 
242 	sg_list_entries = sg_list_size / sizeof( physical_entry );
243 
244 	{
245 		size_t mapped_len;
246 		status_t res;
247 		iovec vec = {
248 			buffer->address,
249 			buffer->size
250 		};
251 
252 		res = get_iovec_memory_map(
253 			&vec, 1, 0, buffer->size,
254 			buffer->sg_list, sg_list_entries, &buffer->sg_count,
255 			&mapped_len );
256 
257 		if( res != B_OK || mapped_len != buffer->size ) {
258 			SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted %d, got %d bytes)",
259 				strerror(res), (int)mapped_len, (int)buffer->size);
260 		}
261 	}
262 
263 	return true;
264 }
265 
266 
267 static void
268 scsi_free_dma_buffer_sg_orig(dma_buffer *buffer)
269 {
270 	if (buffer->sg_orig > 0) {
271 		delete_area(buffer->sg_orig);
272 		buffer->sg_orig = 0;
273 		buffer->sg_count_max_orig = 0;
274 	}
275 }
276 
277 
278 /** allocate S/G list to original data */
279 
280 static bool
281 scsi_alloc_dma_buffer_sg_orig(dma_buffer *buffer, int size)
282 {
283 	// free old list first
284 	scsi_free_dma_buffer_sg_orig(buffer);
285 
286 	size = (size * sizeof(physical_entry) + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
287 
288 	buffer->sg_orig = create_area("S/G to original data",
289 		(void **)&buffer->sg_list_orig,
290 		B_ANY_KERNEL_ADDRESS, size,
291 		B_NO_LOCK, 0);
292 	if (buffer->sg_orig < 0) {
293 		SHOW_ERROR(2, "Cannot S/G list buffer to original data of %d bytes",
294 			(int)size);
295 		return false;
296 	}
297 
298 	buffer->sg_count_max_orig = size / sizeof(physical_entry);
299 
300 	SHOW_INFO(3, "Got up to %d S/G entries to original data",
301 		(int)buffer->sg_count_max_orig);
302 
303 	return true;
304 }
305 
306 
307 /*! dump S/G table */
308 static void
309 dump_sg_table(const physical_entry *sg_list,
310 	uint32 sg_list_count)
311 {
312 	uint32 cur_idx;
313 
314 	SHOW_FLOW(1, "count=%d", (int)sg_list_count);
315 
316 	for (cur_idx = sg_list_count; cur_idx >= 1; --cur_idx, ++sg_list) {
317 		SHOW_FLOW(1, "addr=%" B_PRIxPHYSADDR ", size=%d", sg_list->address,
318 			(int)sg_list->size);
319 	}
320 }
321 
322 
323 /**	compose S/G list to original data of request */
324 
325 static bool
326 scsi_dma_buffer_compose_sg_orig(dma_buffer *buffer, scsi_ccb *request)
327 {
328 	// enlarge buffer is required
329 	if (buffer->sg_count_max_orig < request->sg_count) {
330 		if (!scsi_alloc_dma_buffer_sg_orig(buffer, request->sg_count))
331 			return false;
332 	}
333 
334 	SHOW_FLOW0(1, "copy S/G list");
335 
336 	memcpy(buffer->sg_list_orig, request->sg_list,
337 		request->sg_count * sizeof(physical_entry));
338 
339 	buffer->sg_count_orig = request->sg_count;
340 	return true;
341 }
342 
343 
344 /**	init DMA buffer and copy data to it if required
345  *	note: S/G list of request must already be setup
346  */
347 
348 bool
349 scsi_get_dma_buffer(scsi_ccb *request)
350 {
351 	scsi_device_info *device = request->device;
352 	dma_buffer *buffer;
353 
354 	request->buffered = false;
355 
356 	// perhaps we have luck and no buffering is needed
357 	if( is_sg_list_dma_safe( request ))
358 		return true;
359 
360 	SHOW_FLOW0(1, "Buffer is not DMA safe" );
361 
362 	dump_sg_table(request->sg_list, request->sg_count);
363 
364 	// only one buffer at a time
365 	acquire_sem(device->dma_buffer_owner);
366 
367 	// make sure, clean-up daemon doesn't bother us
368 	ACQUIRE_BEN(&device->dma_buffer_lock);
369 
370 	// there is only one buffer, so no further management
371 	buffer = &device->dma_buffer;
372 
373 	buffer->inuse = true;
374 
375 	RELEASE_BEN(&device->dma_buffer_lock);
376 
377 	// memorize buffer for cleanup
378 	request->dma_buffer = buffer;
379 
380 	// enlarge buffer if too small
381 	if (buffer->size < request->data_length) {
382 		if (!scsi_alloc_dma_buffer(buffer, &device->bus->dma_params,
383 				request->data_length))
384 			goto err;
385 	}
386 
387 	// create S/G to original data (necessary for copying from-buffer on end
388 	// of request, but also used during copying to-buffer in a second because
389 	// of lazyness)
390 	scsi_dma_buffer_compose_sg_orig(&device->dma_buffer, request);
391 
392 	// copy data to buffer
393 	if ((request->flags & SCSI_DIR_MASK) == SCSI_DIR_OUT) {
394 		if (!scsi_copy_dma_buffer( request, request->data_length, true))
395 			goto err;
396 	}
397 
398 	// replace data address, so noone notices that a buffer is used
399 	buffer->orig_data = request->data;
400 	buffer->orig_sg_list = request->sg_list;
401 	buffer->orig_sg_count = request->sg_count;
402 
403 	request->data = buffer->address;
404 	request->sg_list = buffer->sg_list;
405 	request->sg_count = buffer->sg_count;
406 
407 	SHOW_INFO(1, "bytes: %d", (int)request->data_length);
408 	SHOW_INFO0(3, "we can start now");
409 
410 	request->buffered = true;
411 	return true;
412 
413 err:
414 	SHOW_INFO0(3, "error setting up DMA buffer");
415 
416 	ACQUIRE_BEN(&device->dma_buffer_lock);
417 
418 	// some of this is probably not required, but I'm paranoid
419 	buffer->inuse = false;
420 
421 	RELEASE_BEN(&device->dma_buffer_lock);
422 	release_sem(device->dma_buffer_owner);
423 
424 	return false;
425 }
426 
427 
428 /*!	Copy data back and release DMA buffer;
429 	you must have called cleanup_tmp_sg before
430 */
431 void
432 scsi_release_dma_buffer(scsi_ccb *request)
433 {
434 	scsi_device_info *device = request->device;
435 	dma_buffer *buffer = request->dma_buffer;
436 
437 	SHOW_FLOW(1, "Buffering finished, %x, %x",
438 		request->subsys_status & SCSI_SUBSYS_STATUS_MASK,
439 		(int)(request->flags & SCSI_DIR_MASK));
440 
441 	// copy data from buffer if required and if operation succeeded
442 	if ((request->subsys_status & SCSI_SUBSYS_STATUS_MASK) == SCSI_REQ_CMP
443 		&& (request->flags & SCSI_DIR_MASK) == SCSI_DIR_IN)
444 		scsi_copy_dma_buffer(request, request->data_length - request->data_resid, false);
445 
446 	// restore request
447 	request->data = buffer->orig_data;
448 	request->sg_list = buffer->orig_sg_list;
449 	request->sg_count = buffer->orig_sg_count;
450 
451 	// free buffer
452 	ACQUIRE_BEN(&device->dma_buffer_lock);
453 
454 	buffer->last_use = system_time();
455 	buffer->inuse = false;
456 
457 	RELEASE_BEN(&device->dma_buffer_lock);
458 
459 	release_sem(device->dma_buffer_owner);
460 
461 	request->buffered = false;
462 }
463 
464 
465 /** dameon that deletes DMA buffer if not used for some time */
466 
467 void
468 scsi_dma_buffer_daemon(void *dev, int counter)
469 {
470 	scsi_device_info *device = (scsi_device_info*)dev;
471 	dma_buffer *buffer;
472 
473 	ACQUIRE_BEN(&device->dma_buffer_lock);
474 
475 	buffer = &device->dma_buffer;
476 
477 	if (!buffer->inuse
478 		&& buffer->last_use - system_time() > SCSI_DMA_BUFFER_CLEANUP_DELAY) {
479 		scsi_free_dma_buffer(buffer);
480 		scsi_free_dma_buffer_sg_orig(buffer);
481 	}
482 
483 	RELEASE_BEN(&device->dma_buffer_lock);
484 }
485 
486 
487 void
488 scsi_dma_buffer_free(dma_buffer *buffer)
489 {
490 	scsi_free_dma_buffer(buffer);
491 	scsi_free_dma_buffer_sg_orig(buffer);
492 }
493 
494 
495 void
496 scsi_dma_buffer_init(dma_buffer *buffer)
497 {
498 	buffer->area = 0;
499 	buffer->size = 0;
500 	buffer->sg_orig = 0;
501 	buffer->sg_count_max_orig = 0;
502 }
503