xref: /haiku/src/add-ons/kernel/bus_managers/scsi/dma_buffer.cpp (revision c16f375b92921cd7a4dc108011dce29942b7ba0c)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3  * Copyright 2002/03, Thomas Kurschel. All rights reserved.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 /*
9 	DMA buffer handling.
10 
11 	If the peripheral driver hasn't made sure that the data of a request
12 	is DMA safe, we check that and copy data to a buffer if needed.
13 	The buffer is enlarged on demand and destroyed after a time-out
14 	by a daemon. Obviously, it's a good idea to avoid all this, therefore
15 	blkman takes care of that for read/write requests.
16 
17 	To be able to copy data back after the request was finished, we need a
18 	S/G list to the original data as the copying is done in a different
19 	thread/process context (namely the service thread).
20 
21 	Currently, there is only one buffer per device; in the future,
22 	we may support multiple buffers, especially if we want to support
23 	more then 4 GB memory, which leads to trouble with 32-bit PCI cards.
24 */
25 
26 
27 #include "scsi_internal.h"
28 #include "KernelExport_ext.h"
29 
30 #include <vm/vm.h>
31 
32 #include <string.h>
33 
34 
35 /*!	Check whether S/G list of request is supported DMA controller */
36 static bool
is_sg_list_dma_safe(scsi_ccb * request)37 is_sg_list_dma_safe(scsi_ccb *request)
38 {
39 	scsi_bus_info *bus = request->bus;
40 	const physical_entry *sg_list = request->sg_list;
41 	uint32 sg_count = request->sg_count;
42 	const uint32 dma_boundary = bus->dma_params.dma_boundary;
43 	const uint32 alignment = bus->dma_params.alignment;
44 	const uint32 max_sg_block_size = bus->dma_params.max_sg_block_size;
45 	const uint64 high_address = bus->dma_params.high_address;
46 
47 	// not too many S/G list entries
48 	if (sg_count > bus->dma_params.max_sg_blocks) {
49 		SHOW_FLOW0(1, "S/G-list too long");
50 		return false;
51 	}
52 
53 	// if there are no further restrictions - be happy
54 	if (dma_boundary == ~(uint32)0 && alignment == 0 && max_sg_block_size == 0)
55 		return true;
56 
57 	// argh - controller is a bit picky, so make sure it likes us
58 	for (uint32 cur_idx = sg_count; cur_idx >= 1; --cur_idx, ++sg_list) {
59 		phys_addr_t max_len;
60 
61 		// calculate space upto next dma boundary crossing and
62 		// verify that it isn't crossed
63 		max_len = (dma_boundary + 1) - (sg_list->address & dma_boundary);
64 
65 		if (max_len < sg_list->size) {
66 			SHOW_FLOW(0, "S/G-entry crosses DMA boundary @%" B_PRIxPHYSADDR,
67 				sg_list->address + max_len);
68 			return false;
69 		}
70 
71 		// check both begin and end of entry for alignment
72 		if ((sg_list->address & alignment) != 0) {
73 			SHOW_FLOW(0, "S/G-entry has bad alignment @%#" B_PRIxPHYSADDR,
74 				sg_list->address);
75 			return false;
76 		}
77 
78 		if (((sg_list->address + sg_list->size) & alignment) != 0) {
79 			SHOW_FLOW(0, "end of S/G-entry has bad alignment @%" B_PRIxPHYSADDR,
80 				sg_list->address + sg_list->size);
81 			return false;
82 		}
83 
84 		if ((sg_list->address + sg_list->size) > high_address) {
85 			SHOW_FLOW(0, "S/G-entry above high address @%" B_PRIxPHYSADDR,
86 				sg_list->address + sg_list->size);
87 			return false;
88 		}
89 
90 		// verify entry size
91 		if (sg_list->size > max_sg_block_size) {
92 			SHOW_FLOW(0, "S/G-entry is too long (%" B_PRIuPHYSADDR "/%" B_PRIu32
93 				" bytes)", sg_list->size, max_sg_block_size);
94 			return false;
95 		}
96 	}
97 
98 	return true;
99 }
100 
101 
102 /** copy data from/to DMA buffer */
103 
104 static bool
scsi_copy_dma_buffer(scsi_ccb * request,uint32 size,bool to_buffer)105 scsi_copy_dma_buffer(scsi_ccb *request, uint32 size, bool to_buffer)
106 {
107 	dma_buffer *buffer = request->dma_buffer;
108 	const physical_entry *sg_list = buffer->sg_list_orig;
109 	uint32 num_vecs = buffer->sg_count_orig;
110 	uchar *buffer_data = buffer->address;
111 
112 	SHOW_FLOW(1, "to_buffer=%d, %" B_PRIu32 " bytes", to_buffer, size);
113 
114 	// survive even if controller returned invalid data size
115 	size = min_c(size, request->data_length);
116 
117 	// we have to use S/G list to original data; the DMA buffer
118 	// was allocated in kernel and is thus visible even if the thread
119 	// was changed
120 	for (; size > 0 && num_vecs > 0; ++sg_list, --num_vecs) {
121 		size_t bytes;
122 
123 		bytes = min_c( size, sg_list->size );
124 
125 		if (to_buffer) {
126 			vm_memcpy_from_physical(buffer_data, sg_list->address, bytes,
127 				false);
128 		} else
129 			vm_memcpy_to_physical(sg_list->address, buffer_data, bytes, false);
130 
131 		buffer_data += bytes;
132 	}
133 
134 	return true;
135 }
136 
137 
138 static void
scsi_free_dma_buffer(dma_buffer * buffer)139 scsi_free_dma_buffer(dma_buffer *buffer)
140 {
141 	if (buffer->area > 0) {
142 		SHOW_FLOW0(1, "Destroying buffer");
143 
144 		delete_area(buffer->area);
145 		buffer->area = 0;
146 		buffer->size = 0;
147 	}
148 
149 	if (buffer->sg_list_area > 0) {
150 		delete_area(buffer->sg_list_area);
151 		buffer->sg_list_area = 0;
152 	}
153 }
154 
155 
156 /**	allocate dma buffer for given device, deleting old one
157  *	size - buffer size in bytes
158  */
159 
160 static bool
scsi_alloc_dma_buffer(dma_buffer * buffer,dma_params * dma_params,uint32 size)161 scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
162 {
163 	// free old buffer first
164 	scsi_free_dma_buffer(buffer);
165 
166 	// just in case alignment is ridiculously huge
167 	size = (size + dma_params->alignment) & ~dma_params->alignment;
168 
169 	size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
170 
171 	// calculate worst case number of S/G entries, i.e. if they are non-continuous;
172 	// there is a controller limit and a limit by our own S/G manager to check
173 	if (size / B_PAGE_SIZE > dma_params->max_sg_blocks
174 		|| size / B_PAGE_SIZE > MAX_TEMP_SG_FRAGMENTS) {
175 		uint32 boundary = dma_params->dma_boundary;
176 
177 		// alright - a contiguous buffer is required to keep S/G table short
178 		SHOW_INFO(1, "need to setup contiguous DMA buffer of size %" B_PRIu32,
179 			size);
180 
181 		// verify that we don't get problems with dma boundary
182 		if (boundary != ~(uint32)0) {
183 			if (size > boundary + 1) {
184 				SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%"
185 					 B_PRId32 "/%" B_PRId32 " bytes)", size, boundary + 1);
186 				return false;
187 			}
188 		}
189 
190 		virtual_address_restrictions virtualRestrictions = {};
191 		virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
192 		physical_address_restrictions physicalRestrictions = {};
193 		if (dma_params->alignment != ~(uint32)0)
194 			physicalRestrictions.alignment = dma_params->alignment + 1;
195 		if (boundary != ~(uint32)0)
196 			physicalRestrictions.boundary = boundary + 1;
197 #if B_HAIKU_PHYSICAL_BITS > 32
198 		physicalRestrictions.high_address = 0x100000000ULL;
199 			// TODO: Use 64 bit addresses, if possible!
200 #endif
201 		buffer->area = create_area_etc(B_SYSTEM_TEAM, "DMA buffer", size,
202 			B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0,
203 			&virtualRestrictions, &physicalRestrictions,
204 			(void**)&buffer->address);
205 
206 		if (buffer->area < 0) {
207 			SHOW_ERROR(2, "Cannot create contignous DMA buffer of %" B_PRIu32
208 				" bytes", size);
209 			return false;
210 		}
211 
212 		buffer->size = size;
213 	} else {
214 		// we can live with a fragmented buffer - very nice
215 		buffer->area = create_area("DMA buffer",
216 			(void **)&buffer->address, B_ANY_KERNEL_ADDRESS, size,
217 			B_32_BIT_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
218 				// TODO: Use B_FULL_LOCK, if possible!
219 		if (buffer->area < 0) {
220 			SHOW_ERROR(2, "Cannot create DMA buffer of %" B_PRIu32 " bytes",
221 				size);
222 			return false;
223 		}
224 
225 		buffer->size = size;
226 	}
227 
228 	// create S/G list
229 	// worst case is one entry per page, and size is page-aligned
230 	size_t sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
231 	// create_area has page-granularity
232 	sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
233 
234 	buffer->sg_list_area = create_area("DMA buffer S/G table",
235 		(void **)&buffer->sg_list, B_ANY_KERNEL_ADDRESS, sg_list_size,
236 		B_32_BIT_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
237 			// TODO: Use B_FULL_LOCK, if possible!
238 	if (buffer->sg_list_area < 0) {
239 		SHOW_ERROR( 2, "Cannot create DMA buffer S/G list of %" B_PRIuSIZE
240 			" bytes", sg_list_size );
241 
242 		delete_area(buffer->area);
243 		buffer->area = 0;
244 		return false;
245 	}
246 
247 	size_t sg_list_entries = sg_list_size / sizeof(physical_entry);
248 
249 	{
250 		size_t mapped_len;
251 		status_t res;
252 		iovec vec = {
253 			buffer->address,
254 			buffer->size
255 		};
256 
257 		res = get_iovec_memory_map(
258 			&vec, 1, 0, buffer->size,
259 			buffer->sg_list, sg_list_entries, &buffer->sg_count,
260 			&mapped_len );
261 
262 		if( res != B_OK || mapped_len != buffer->size ) {
263 			SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted "
264 				"%" B_PRIuSIZE ", got %" B_PRIuSIZE " bytes)", strerror(res),
265 				mapped_len, buffer->size);
266 		}
267 	}
268 
269 	return true;
270 }
271 
272 
273 static void
scsi_free_dma_buffer_sg_orig(dma_buffer * buffer)274 scsi_free_dma_buffer_sg_orig(dma_buffer *buffer)
275 {
276 	if (buffer->sg_orig > 0) {
277 		delete_area(buffer->sg_orig);
278 		buffer->sg_orig = 0;
279 		buffer->sg_count_max_orig = 0;
280 	}
281 }
282 
283 
284 /** allocate S/G list to original data */
285 
286 static bool
scsi_alloc_dma_buffer_sg_orig(dma_buffer * buffer,size_t size)287 scsi_alloc_dma_buffer_sg_orig(dma_buffer *buffer, size_t size)
288 {
289 	// free old list first
290 	scsi_free_dma_buffer_sg_orig(buffer);
291 
292 	size = (size * sizeof(physical_entry) + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
293 
294 	buffer->sg_orig = create_area("S/G to original data",
295 		(void **)&buffer->sg_list_orig,
296 		B_ANY_KERNEL_ADDRESS, size,
297 		B_NO_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
298 	if (buffer->sg_orig < 0) {
299 		SHOW_ERROR(2, "Cannot S/G list buffer to original data of %" B_PRIuSIZE
300 			" bytes", size);
301 		return false;
302 	}
303 
304 	buffer->sg_count_max_orig = size / sizeof(physical_entry);
305 
306 	SHOW_INFO(3, "Got up to %" B_PRIu32 " S/G entries to original data",
307 		buffer->sg_count_max_orig);
308 
309 	return true;
310 }
311 
312 
313 /*! dump S/G table */
314 static void
dump_sg_table(const physical_entry * sg_list,uint32 sg_list_count)315 dump_sg_table(const physical_entry *sg_list,
316 	uint32 sg_list_count)
317 {
318 	uint32 cur_idx;
319 
320 	SHOW_FLOW(1, "count=%" B_PRIu32, sg_list_count);
321 
322 	for (cur_idx = sg_list_count; cur_idx >= 1; --cur_idx, ++sg_list) {
323 		SHOW_FLOW(1, "addr=%" B_PRIxPHYSADDR ", size=%" B_PRIuPHYSADDR,
324 			sg_list->address, sg_list->size);
325 	}
326 }
327 
328 
329 /**	compose S/G list to original data of request */
330 
331 static bool
scsi_dma_buffer_compose_sg_orig(dma_buffer * buffer,scsi_ccb * request)332 scsi_dma_buffer_compose_sg_orig(dma_buffer *buffer, scsi_ccb *request)
333 {
334 	// enlarge buffer is required
335 	if (buffer->sg_count_max_orig < request->sg_count) {
336 		if (!scsi_alloc_dma_buffer_sg_orig(buffer, request->sg_count))
337 			return false;
338 	}
339 
340 	SHOW_FLOW0(1, "copy S/G list");
341 
342 	memcpy(buffer->sg_list_orig, request->sg_list,
343 		request->sg_count * sizeof(physical_entry));
344 
345 	buffer->sg_count_orig = request->sg_count;
346 	return true;
347 }
348 
349 
350 /**	init DMA buffer and copy data to it if required
351  *	note: S/G list of request must already be setup
352  */
353 
354 bool
scsi_get_dma_buffer(scsi_ccb * request)355 scsi_get_dma_buffer(scsi_ccb *request)
356 {
357 	scsi_device_info *device = request->device;
358 	dma_buffer *buffer;
359 
360 	request->buffered = false;
361 
362 	// perhaps we have luck and no buffering is needed
363 	if( is_sg_list_dma_safe( request ))
364 		return true;
365 
366 	SHOW_FLOW0(1, "Buffer is not DMA safe" );
367 
368 	dump_sg_table(request->sg_list, request->sg_count);
369 
370 	// only one buffer at a time
371 	acquire_sem(device->dma_buffer_owner);
372 
373 	// make sure, clean-up daemon doesn't bother us
374 	mutex_lock(&device->dma_buffer_lock);
375 
376 	// there is only one buffer, so no further management
377 	buffer = &device->dma_buffer;
378 
379 	buffer->inuse = true;
380 
381 	mutex_unlock(&device->dma_buffer_lock);
382 
383 	// memorize buffer for cleanup
384 	request->dma_buffer = buffer;
385 
386 	// enlarge buffer if too small
387 	if (buffer->size < request->data_length) {
388 		if (!scsi_alloc_dma_buffer(buffer, &device->bus->dma_params,
389 				request->data_length))
390 			goto err;
391 	}
392 
393 	// create S/G to original data (necessary for copying from-buffer on end
394 	// of request, but also used during copying to-buffer in a second because
395 	// of lazyness)
396 	scsi_dma_buffer_compose_sg_orig(&device->dma_buffer, request);
397 
398 	// copy data to buffer
399 	if ((request->flags & SCSI_DIR_MASK) == SCSI_DIR_OUT) {
400 		if (!scsi_copy_dma_buffer( request, request->data_length, true))
401 			goto err;
402 	}
403 
404 	// replace data address, so noone notices that a buffer is used
405 	buffer->orig_data = request->data;
406 	buffer->orig_sg_list = request->sg_list;
407 	buffer->orig_sg_count = request->sg_count;
408 
409 	request->data = buffer->address;
410 	request->sg_list = buffer->sg_list;
411 	request->sg_count = buffer->sg_count;
412 
413 	SHOW_INFO(1, "bytes: %" B_PRIu32, request->data_length);
414 	SHOW_INFO0(3, "we can start now");
415 
416 	request->buffered = true;
417 	return true;
418 
419 err:
420 	SHOW_INFO0(3, "error setting up DMA buffer");
421 
422 	mutex_lock(&device->dma_buffer_lock);
423 
424 	// some of this is probably not required, but I'm paranoid
425 	buffer->inuse = false;
426 
427 	mutex_unlock(&device->dma_buffer_lock);
428 	release_sem(device->dma_buffer_owner);
429 
430 	return false;
431 }
432 
433 
434 /*!	Copy data back and release DMA buffer;
435 	you must have called cleanup_tmp_sg before
436 */
437 void
scsi_release_dma_buffer(scsi_ccb * request)438 scsi_release_dma_buffer(scsi_ccb *request)
439 {
440 	scsi_device_info *device = request->device;
441 	dma_buffer *buffer = request->dma_buffer;
442 
443 	SHOW_FLOW(1, "Buffering finished, %x, %" B_PRIx32,
444 		request->subsys_status & SCSI_SUBSYS_STATUS_MASK,
445 		(request->flags & SCSI_DIR_MASK));
446 
447 	// copy data from buffer if required and if operation succeeded
448 	if ((request->subsys_status & SCSI_SUBSYS_STATUS_MASK) == SCSI_REQ_CMP
449 		&& (request->flags & SCSI_DIR_MASK) == SCSI_DIR_IN)
450 		scsi_copy_dma_buffer(request, request->data_length - request->data_resid, false);
451 
452 	// restore request
453 	request->data = buffer->orig_data;
454 	request->sg_list = buffer->orig_sg_list;
455 	request->sg_count = buffer->orig_sg_count;
456 
457 	// free buffer
458 	mutex_lock(&device->dma_buffer_lock);
459 
460 	buffer->last_use = system_time();
461 	buffer->inuse = false;
462 
463 	mutex_unlock(&device->dma_buffer_lock);
464 
465 	release_sem(device->dma_buffer_owner);
466 
467 	request->buffered = false;
468 	request->dma_buffer = NULL;
469 }
470 
471 
472 /** dameon that deletes DMA buffer if not used for some time */
473 
474 void
scsi_dma_buffer_daemon(void * dev,int counter)475 scsi_dma_buffer_daemon(void *dev, int counter)
476 {
477 	scsi_device_info *device = (scsi_device_info*)dev;
478 	dma_buffer *buffer;
479 
480 	mutex_lock(&device->dma_buffer_lock);
481 
482 	buffer = &device->dma_buffer;
483 
484 	if (!buffer->inuse
485 		&& buffer->last_use - system_time() > SCSI_DMA_BUFFER_CLEANUP_DELAY) {
486 		scsi_free_dma_buffer(buffer);
487 		scsi_free_dma_buffer_sg_orig(buffer);
488 	}
489 
490 	mutex_unlock(&device->dma_buffer_lock);
491 }
492 
493 
494 void
scsi_dma_buffer_free(dma_buffer * buffer)495 scsi_dma_buffer_free(dma_buffer *buffer)
496 {
497 	scsi_free_dma_buffer(buffer);
498 	scsi_free_dma_buffer_sg_orig(buffer);
499 }
500 
501 
502 void
scsi_dma_buffer_init(dma_buffer * buffer)503 scsi_dma_buffer_init(dma_buffer *buffer)
504 {
505 	buffer->area = 0;
506 	buffer->size = 0;
507 	buffer->sg_orig = 0;
508 	buffer->sg_count_max_orig = 0;
509 }
510