xref: /haiku/src/add-ons/kernel/generic/scsi_periph/block.cpp (revision cbe0a0c436162d78cc3f92a305b64918c839d079)
1 /*
2  * Copyright 2021 David Sebek, dasebek@gmail.com
3  * Copyright 2004-2013 Haiku, Inc.
4  * Copyright 2002-2003 Thomas Kurschel
5  * All rights reserved. Distributed under the terms of the MIT License.
6  */
7 
8 
9 //!	Handling of block device
10 
11 
12 #include <string.h>
13 
14 #include <AutoDeleter.h>
15 
16 #include "scsi_periph_int.h"
17 
18 
19 // UNMAP command limits
20 #define UNMAP_MAX_LBA_VALUE				UINT64_MAX
21 #define UNMAP_MAX_BLOCK_COUNT_VALUE		UINT32_MAX
22 #define UNMAP_MAX_DESCRIPTORS			4095
23 	// Limit imposed by the UNMAP command structure
24 #define UNMAP_DEFAULT_DESCRIPTORS		255
25 	// Reasonable default (?) when not specified by the device
26 
27 // WRITE SAME (16) command limits
28 #define WS16_MAX_LBA_VALUE				UINT64_MAX
29 #define WS16_MAX_BLOCK_COUNT_VALUE		UINT32_MAX
30 
31 // WRITE SAME (10) command limits
32 #define WS10_MAX_LBA_VALUE				UINT32_MAX
33 #define WS10_MAX_BLOCK_COUNT_VALUE		UINT16_MAX
34 
35 
36 struct CapacityInfo {
37 	// Result of the READ CAPACITY command
38 	bool capacityFilled;
39 	uint64 lastLba;
40 	uint32 blockSize;
41 
42 	// Provisioining info from READ CAPACITY
43 	bool provisioningFilled;
44 	bool lbpme;
45 	bool lbprz;
46 };
47 
48 
49 struct UnmapSupport {
50 	// UNMAP commands supported by the device
51 	bool commandSupportFilled;
52 	bool unmapSupported;
53 	bool ws16Supported;
54 	bool ws10Supported;
55 
56 	// Block limits for UNMAP commands
57 	bool blockLimitsFilled;
58 	uint32 maxUnmapLbaCount;
59 	uint32 maxUnmapDescriptorCount;
60 	uint64 maxWritesameLength;
61 };
62 
63 
64 static bool
65 prefer_read_capacity_16(scsi_periph_device_info* device)
66 {
67 	const scsi_res_inquiry* inquiryData = NULL;
68 	size_t inquiryDataLength;
69 
70 	if (gDeviceManager->get_attr_raw(device->node, SCSI_DEVICE_INQUIRY_ITEM,
71 				(const void**)&inquiryData, &inquiryDataLength, true) != B_OK
72 		|| inquiryDataLength != sizeof(*inquiryData)) {
73 		return false;
74 	}
75 
76 	if (inquiryData->protect)
77 		return true;
78 
79 	if (inquiryData->ansi_version > 0x04 /* SPC-2 */)
80 		return true;
81 
82 	return false;
83 }
84 
85 
86 static bool
87 vpd_pages_supported(scsi_periph_device_info* device)
88 {
89 	const scsi_res_inquiry* inquiryData = NULL;
90 	size_t inquiryDataLength;
91 
92 	if (gDeviceManager->get_attr_raw(device->node, SCSI_DEVICE_INQUIRY_ITEM,
93 				(const void**)&inquiryData, &inquiryDataLength, true) != B_OK
94 		|| inquiryDataLength != sizeof(*inquiryData)) {
95 		return false;
96 	}
97 
98 	if (inquiryData->ansi_version >= 0x04 /* SPC-2 */)
99 		return true;
100 
101 	return false;
102 }
103 
104 
105 static status_t
106 read_capacity_10(scsi_periph_device_info* device, scsi_ccb* request,
107 	CapacityInfo* capacityInfo)
108 {
109 	capacityInfo->capacityFilled = false;
110 	capacityInfo->provisioningFilled = false;
111 
112 	scsi_res_read_capacity capacityResult;
113 	memset(&capacityResult, 0, sizeof(capacityResult));
114 
115 	scsi_cmd_read_capacity* cmd = (scsi_cmd_read_capacity*)request->cdb;
116 	memset(cmd, 0, sizeof(*cmd));
117 	cmd->opcode = SCSI_OP_READ_CAPACITY;
118 	// we don't set PMI (partial medium indicator) as we want the whole capacity;
119 	// in this case, all other parameters must be zero
120 
121 	request->flags = SCSI_DIR_IN;
122 	request->cdb_length = sizeof(*cmd);
123 	request->sort = -1;
124 	request->timeout = device->std_timeout;
125 
126 	request->data = (uint8*)&capacityResult;
127 	request->data_length = sizeof(capacityResult);
128 	request->sg_list = NULL;
129 
130 	status_t res = periph_safe_exec(device, request);
131 
132 	if (res == B_OK && request->data_resid == 0) {
133 		capacityInfo->capacityFilled = true;
134 		capacityInfo->lastLba
135 			= (uint32)B_BENDIAN_TO_HOST_INT32(capacityResult.lba);
136 		capacityInfo->blockSize
137 			= B_BENDIAN_TO_HOST_INT32(capacityResult.block_size);
138 	}
139 
140 	return res;
141 }
142 
143 
144 static status_t
145 read_capacity_16(scsi_periph_device_info* device, scsi_ccb* request,
146 	CapacityInfo* capacityInfo)
147 {
148 	capacityInfo->capacityFilled = false;
149 	capacityInfo->provisioningFilled = false;
150 
151 	scsi_res_read_capacity_long capacityLongResult;
152 	memset(&capacityLongResult, 0, sizeof(capacityLongResult));
153 
154 	scsi_cmd_read_capacity_long* cmd
155 		= (scsi_cmd_read_capacity_long*)request->cdb;
156 	memset(cmd, 0, sizeof(*cmd));
157 	cmd->opcode = SCSI_OP_SERVICE_ACTION_IN;
158 	cmd->service_action = SCSI_SAI_READ_CAPACITY_16;
159 	cmd->alloc_length = B_HOST_TO_BENDIAN_INT32(sizeof(capacityLongResult));
160 
161 	request->flags = SCSI_DIR_IN;
162 	request->cdb_length = sizeof(*cmd);
163 	request->sort = -1;
164 	request->timeout = device->std_timeout;
165 
166 	request->data = (uint8*)&capacityLongResult;
167 	request->data_length = sizeof(capacityLongResult);
168 	request->sg_list = NULL;
169 
170 	status_t res = periph_safe_exec(device, request);
171 
172 	if (res == B_OK && request->data_resid
173 			<= (int32)sizeof(scsi_res_read_capacity_long) - 12) {
174 		// At least the last LBA and sector size have been transfered
175 		capacityInfo->capacityFilled = true;
176 		capacityInfo->lastLba
177 			= B_BENDIAN_TO_HOST_INT64(capacityLongResult.lba);
178 		capacityInfo->blockSize
179 			= B_BENDIAN_TO_HOST_INT32(capacityLongResult.block_size);
180 	}
181 
182 	if (res == B_OK && request->data_resid
183 			<= (int32)sizeof(scsi_res_read_capacity_long) - 15) {
184 		// lbpme and lbprz bits were received too
185 		capacityInfo->provisioningFilled = true;
186 		capacityInfo->lbpme = capacityLongResult.lbpme;
187 		capacityInfo->lbprz = capacityLongResult.lbprz;
188 	}
189 
190 	return res;
191 }
192 
193 
194 static status_t
195 get_unmap_commands(scsi_periph_device_info* device, scsi_ccb* request,
196 	UnmapSupport* unmapSupport)
197 {
198 	unmapSupport->commandSupportFilled = false;
199 
200 	scsi_page_lb_provisioning vpdProvisioning;
201 	memset(&vpdProvisioning, 0, sizeof(vpdProvisioning));
202 	status_t vpdStatus = vpd_page_get(device, request,
203 		SCSI_PAGE_LB_PROVISIONING, &vpdProvisioning, sizeof(vpdProvisioning));
204 
205 	if (vpdStatus == B_OK
206 		&& request->data_resid <= (int32)sizeof(scsi_page_lb_provisioning) - 6
207 		&& vpdProvisioning.page_code == SCSI_PAGE_LB_PROVISIONING
208 		&& B_BENDIAN_TO_HOST_INT16(vpdProvisioning.page_length) >= 2) {
209 		unmapSupport->commandSupportFilled = true;
210 		unmapSupport->unmapSupported = vpdProvisioning.lbpu;
211 		unmapSupport->ws16Supported = vpdProvisioning.lbpws;
212 		unmapSupport->ws10Supported = vpdProvisioning.lbpws10;
213 	}
214 
215 	if (vpdStatus == B_BAD_VALUE)
216 		return B_ERROR;
217 
218 	return vpdStatus;
219 }
220 
221 
222 static status_t
223 get_unmap_limits(scsi_periph_device_info* device, scsi_ccb* request,
224 	UnmapSupport* unmapSupport)
225 {
226 	unmapSupport->blockLimitsFilled = false;
227 
228 	scsi_page_block_limits vpdBlockLimits;
229 	memset(&vpdBlockLimits, 0, sizeof(vpdBlockLimits));
230 	status_t vpdStatus = vpd_page_get(device, request,
231 		SCSI_PAGE_BLOCK_LIMITS, &vpdBlockLimits, sizeof(vpdBlockLimits));
232 
233 	if (vpdStatus == B_OK
234 		&& request->data_resid <= (int32)sizeof(scsi_page_block_limits) - 44
235 		&& vpdBlockLimits.page_code == SCSI_PAGE_BLOCK_LIMITS
236 		&& B_BENDIAN_TO_HOST_INT16(vpdBlockLimits.page_length) == 0x3c) {
237 		unmapSupport->blockLimitsFilled = true;
238 		unmapSupport->maxUnmapLbaCount = B_BENDIAN_TO_HOST_INT32(
239 			vpdBlockLimits.max_unmap_lba_count);
240 		unmapSupport->maxUnmapDescriptorCount = B_BENDIAN_TO_HOST_INT32(
241 			vpdBlockLimits.max_unmap_blk_count);
242 		unmapSupport->maxWritesameLength = B_BENDIAN_TO_HOST_INT64(
243 			vpdBlockLimits.max_write_same_length);
244 	}
245 
246 	if (vpdStatus == B_BAD_VALUE)
247 		return B_ERROR;
248 
249 	return vpdStatus;
250 }
251 
252 
253 static void
254 determine_unmap_support(const UnmapSupport* unmapSupport,
255 	enum trim_command* unmapCommand, uint32* maxLbaCount,
256 	uint32* maxDescriptorCount)
257 {
258 #ifdef DEBUG_TRIM
259 	if (unmapSupport->commandSupportFilled)
260 		dprintf("TRIM: device reports (LBP VPD): LBPU = %d, LBPWS = %d,"
261 			" LBPWS10 = %d\n", unmapSupport->unmapSupported,
262 			unmapSupport->ws16Supported, unmapSupport->ws10Supported);
263 	else
264 		dprintf("TRIM: could not get the LBP VPD of the device\n");
265 	if (unmapSupport->blockLimitsFilled)
266 		dprintf("TRIM: device reports (Block Limits VPD):"
267 			"\nTRIM: MAXIMUM UNMAP LBA COUNT = %" B_PRIu32
268 			"\nTRIM: MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT = %" B_PRIu32
269 			"\nTRIM: MAXIMUM WRITESAME LENGTH = %" B_PRIu64 "\n",
270 			unmapSupport->maxUnmapLbaCount,
271 			unmapSupport->maxUnmapDescriptorCount,
272 			unmapSupport->maxWritesameLength);
273 	else
274 		dprintf("TRIM: could not get Block Limits VPD of the device\n");
275 #endif
276 
277 	*unmapCommand = TRIM_NONE;
278 	*maxLbaCount = 0;
279 	*maxDescriptorCount = 0;
280 
281 	if (!unmapSupport->commandSupportFilled
282 		|| !unmapSupport->blockLimitsFilled)
283 		return;
284 
285 	if (unmapSupport->unmapSupported
286 		&& unmapSupport->maxUnmapLbaCount > 0
287 		&& unmapSupport->maxUnmapDescriptorCount > 0) {
288 		*unmapCommand = TRIM_UNMAP;
289 		*maxLbaCount = unmapSupport->maxUnmapLbaCount;
290 		if (unmapSupport->maxUnmapDescriptorCount == UINT32_MAX
291 			|| unmapSupport->maxUnmapDescriptorCount > UNMAP_MAX_DESCRIPTORS) {
292 			// Choose a reasonable value instead
293 			*maxDescriptorCount = UNMAP_DEFAULT_DESCRIPTORS;
294 		} else {
295 			*maxDescriptorCount = unmapSupport->maxUnmapDescriptorCount;
296 		}
297 	}
298 
299 	if (*unmapCommand == TRIM_NONE && unmapSupport->ws16Supported) {
300 		uint64 maxLength = unmapSupport->maxWritesameLength;
301 		if (maxLength == 0) {
302 			// WRITE SAME limit not reported, try UNMAP limit instead
303 			if (unmapSupport->maxUnmapLbaCount > 0)
304 				maxLength = unmapSupport->maxUnmapLbaCount;
305 			else
306 				maxLength = WS16_MAX_BLOCK_COUNT_VALUE;
307 		}
308 		*unmapCommand = TRIM_WRITESAME16;
309 		*maxLbaCount = min_c(maxLength, WS16_MAX_BLOCK_COUNT_VALUE);
310 		*maxDescriptorCount = 1;
311 	}
312 
313 	if (*unmapCommand == TRIM_NONE && unmapSupport->ws10Supported) {
314 		uint64 maxLength = unmapSupport->maxWritesameLength;
315 		if (maxLength == 0) {
316 			// WRITE SAME limit not reported, try UNMAP limit instead
317 			if (unmapSupport->maxUnmapLbaCount > 0)
318 				maxLength = unmapSupport->maxUnmapLbaCount;
319 			else
320 				maxLength = WS10_MAX_BLOCK_COUNT_VALUE;
321 		}
322 		*unmapCommand = TRIM_WRITESAME10;
323 		*maxLbaCount = min_c(maxLength, WS10_MAX_BLOCK_COUNT_VALUE);
324 		*maxDescriptorCount = 1;
325 	}
326 }
327 
328 
329 status_t
330 periph_check_capacity(scsi_periph_device_info* device, scsi_ccb* request)
331 {
332 	CapacityInfo capacityInfo = {0};
333 	status_t res;
334 
335 	SHOW_FLOW(3, "%p, %p", device, request);
336 
337 	// driver doesn't support capacity callback - seems to be no block
338 	// device driver, so ignore
339 	if (device->callbacks->set_capacity == NULL)
340 		return B_OK;
341 
342 	if (prefer_read_capacity_16(device)) {
343 		SHOW_FLOW0(3, "READ CAPACITY 16 tried first");
344 		res = read_capacity_16(device, request, &capacityInfo);
345 
346 		if (res == B_ERROR) {
347 			SHOW_FLOW0(3, "READ CAPACITY 16 failed, trying READ CAPACITY 10");
348 			res = read_capacity_10(device, request, &capacityInfo);
349 		}
350 	} else {
351 		SHOW_FLOW0(3, "READ CAPACITY 10 tried first");
352 		res = read_capacity_10(device, request, &capacityInfo);
353 
354 		if (res == B_OK && capacityInfo.capacityFilled
355 			&& capacityInfo.lastLba == UINT32_MAX) {
356 			SHOW_FLOW0(3, "Device is too large, trying READ CAPACITY 16");
357 			res = read_capacity_16(device, request, &capacityInfo);
358 		}
359 	}
360 
361 	uint64 capacity;
362 	uint32 blockSize;
363 
364 	if (capacityInfo.capacityFilled) {
365 		capacity = capacityInfo.lastLba + 1;
366 		blockSize = capacityInfo.blockSize;
367 	} else {
368 		capacity = 0;
369 		blockSize = 0;
370 	}
371 
372 	enum trim_command unmapCommand = TRIM_NONE;
373 	uint32 maxLbaCount = 0;
374 	uint32 maxDescriptorCount = 0;
375 
376 	if (capacityInfo.provisioningFilled
377 		&& capacityInfo.lbpme
378 		&& vpd_pages_supported(device)) {
379 		UnmapSupport unmapSupport = {0};
380 
381 		// Don't fail if the device doesn't support the command
382 		// but fail if some other error happens
383 		if (res == B_OK) {
384 			status_t vpdStatus = get_unmap_commands(device, request,
385 				&unmapSupport);
386 			if (vpdStatus != B_OK && vpdStatus != B_ERROR)
387 				res = vpdStatus;
388 		}
389 
390 		if (res == B_OK) {
391 			status_t vpdStatus = get_unmap_limits(device, request,
392 				&unmapSupport);
393 			if (vpdStatus != B_OK && vpdStatus != B_ERROR)
394 				res = vpdStatus;
395 		}
396 
397 		determine_unmap_support(&unmapSupport, &unmapCommand,
398 				&maxLbaCount, &maxDescriptorCount);
399 
400 		if (maxLbaCount == 0 || maxDescriptorCount == 0)
401 			unmapCommand = TRIM_NONE;
402 	}
403 
404 	if (res == B_DEV_MEDIA_CHANGED) {
405 		// in this case, the error handler has already called check_capacity
406 		// recursively, so we ignore our (invalid) result
407 		SHOW_FLOW0(3, "ignore result because medium change");
408 		return B_DEV_MEDIA_CHANGED;
409 	}
410 
411 	if (res == B_OK && !capacityInfo.capacityFilled)
412 		// Although the capacity and block size will be set to 0 in this case,
413 		// it is also better to inform the caller that these values were not
414 		// reported by the device
415 		res = B_ERROR;
416 
417 	SHOW_FLOW(3, "capacity = %" B_PRIu64 ", block_size = %" B_PRIu32
418 		" (%sreported)", capacity, blockSize,
419 		capacityInfo.capacityFilled ? "" : "not ");
420 	SHOW_INFO(1, "TRIM: Setting trim support to %s",
421 		unmapCommand == TRIM_NONE ? "disabled"
422 			: unmapCommand == TRIM_UNMAP ? "UNMAP"
423 			: unmapCommand == TRIM_WRITESAME16 ? "WRITE SAME (16)"
424 			: unmapCommand == TRIM_WRITESAME10 ? "WRITE SAME (10)"
425 			: "unknown");
426 	SHOW_FLOW(3, "TRIM: Block limits: size = %" B_PRIu32
427 		", descriptors = %" B_PRIu32, maxLbaCount, maxDescriptorCount);
428 
429 	mutex_lock(&device->mutex);
430 		// Was there a reason why this mutex
431 		// was previously locked much earlier?
432 
433 	device->unmap_command = unmapCommand;
434 	device->max_unmap_lba_count = maxLbaCount;
435 	device->max_unmap_descriptor_count = maxDescriptorCount;
436 
437 	device->block_size = blockSize;
438 
439 	device->callbacks->set_capacity(device->periph_device,
440 		capacity, blockSize);
441 
442 /*	device->byte2blk_shift = log2( device->block_size );
443 	if( device->byte2blk_shift < 0 ) {
444 		// this may be too restrictive...
445 		device->capacity = -1;
446 		return ERR_DEV_GENERAL;
447 	}*/
448 
449 	mutex_unlock(&device->mutex);
450 
451 	SHOW_FLOW(3, "done (%s)", strerror(res));
452 
453 	return res;
454 }
455 
456 
457 static status_t
458 trim_unmap(scsi_periph_device_info* device, scsi_ccb* request,
459 	scsi_block_range* ranges, uint32 rangeCount, uint64* trimmedBlocks)
460 {
461 	uint64 maxLength = UNMAP_MAX_BLOCK_COUNT_VALUE;
462 	uint64 maxBlocksInRequest = device->max_unmap_lba_count;
463 	uint32 maxDescriptors = device->max_unmap_descriptor_count;
464 
465 	*trimmedBlocks = 0;
466 
467 	// Allocate a single buffer and re-use it between requests
468 	size_t expectedDescriptorCount = 0;
469 	for (uint32 i = 0; i < rangeCount; i++) {
470 		expectedDescriptorCount += ranges[i].size / maxLength;
471 		if (ranges[i].size % maxLength != 0)
472 			expectedDescriptorCount++;
473 	}
474 	expectedDescriptorCount = min_c(expectedDescriptorCount, maxDescriptors);
475 
476 	size_t unmapListAllocatedSize = (expectedDescriptorCount - 1)
477 			* sizeof(scsi_unmap_block_descriptor)
478 		+ sizeof(scsi_unmap_parameter_list);
479 
480 	scsi_unmap_parameter_list* unmapList
481 		= (scsi_unmap_parameter_list*)malloc(unmapListAllocatedSize);
482 	if (unmapList == NULL)
483 		return B_NO_MEMORY;
484 
485 	MemoryDeleter deleter(unmapList);
486 
487 	status_t status = B_OK;
488 	uint32 descriptorIndex = 0;
489 	uint64 trimmedBlocksInRequest = 0;
490 	memset(unmapList, 0, unmapListAllocatedSize);
491 	for (uint32 i = 0; i < rangeCount; i++) {
492 		uint64 lba = ranges[i].lba;
493 		uint64 length = ranges[i].size;
494 
495 		if (length == 0)
496 			continue; // Length of 0 would be ignored by the device anyway
497 
498 		if (lba > UNMAP_MAX_LBA_VALUE) {
499 			SHOW_ERROR0(1, "LBA value is too large!"
500 				" This unmap range will be skipped.");
501 			continue;
502 		}
503 
504 		// Split large ranges if needed.
505 		// Range length is limited by:
506 		//   - the UNMAP_MAX_BLOCK_COUNT_VALUE constant
507 		//   - the total number of LBAs in one UNMAP command is limited by
508 		//     the MAX UNMAP LBA COUNT field in the Block Limits VPD page
509 		while (length > 0) {
510 			uint64 trimLength = min_c(length, maxLength);
511 			trimLength = min_c(trimLength,
512 					maxBlocksInRequest - trimmedBlocksInRequest);
513 			unmapList->blocks[descriptorIndex].lba
514 				= B_HOST_TO_BENDIAN_INT64(lba);
515 			unmapList->blocks[descriptorIndex].block_count
516 				= B_HOST_TO_BENDIAN_INT32(trimLength);
517 			descriptorIndex++;
518 			trimmedBlocksInRequest += trimLength;
519 
520 			// Split into multiple requests if needed.
521 			// The number of UNMAP block descriptors is limited by:
522 			//   - the number of block descriptors cannot exceed the
523 			//     MAXIMUM UNMAP PARAMETER COUNT value in the Block Limits VPD
524 			//   - the size of our buffer
525 			//   - what fits in one UNMAP command
526 			//   - the total number of LBAs in one UNMAP command is limited by
527 			//     the MAX UNMAP LBA COUNT field in the Block Limits VPD page
528 			if (descriptorIndex >= maxDescriptors
529 				|| descriptorIndex >= expectedDescriptorCount
530 				|| descriptorIndex >= UNMAP_MAX_DESCRIPTORS
531 				|| trimmedBlocksInRequest >= maxBlocksInRequest
532 				|| (i == rangeCount - 1 && length <= maxLength))
533 			{
534 				uint16 unmapListSize = (descriptorIndex - 1)
535 						* sizeof(scsi_unmap_block_descriptor)
536 					+ sizeof(scsi_unmap_parameter_list);
537 				unmapList->data_length = B_HOST_TO_BENDIAN_INT16(unmapListSize
538 					- offsetof(scsi_unmap_parameter_list, block_data_length));
539 				unmapList->block_data_length
540 					= B_HOST_TO_BENDIAN_INT16(unmapListSize
541 						- offsetof(scsi_unmap_parameter_list, blocks));
542 
543 				scsi_cmd_unmap* cmd = (scsi_cmd_unmap*)request->cdb;
544 				memset(cmd, 0, sizeof(*cmd));
545 				cmd->opcode = SCSI_OP_UNMAP;
546 				cmd->length = B_HOST_TO_BENDIAN_INT16(unmapListSize);
547 
548 				request->flags = SCSI_DIR_OUT;
549 				request->cdb_length = sizeof(*cmd);
550 				request->sort = B_BENDIAN_TO_HOST_INT64(
551 					unmapList->blocks[0].lba);
552 				request->timeout = device->std_timeout;
553 
554 				request->data = (uint8*)unmapList;
555 				request->data_length = unmapListSize;
556 				request->sg_list = NULL;
557 
558 				SHOW_FLOW(3, "UNMAP data used %" B_PRIu16
559 					" of %" B_PRIuSIZE " allocated bytes",
560 					unmapListSize, unmapListAllocatedSize);
561 
562 #ifdef DEBUG_TRIM
563 				uint16 scsiRangeCount = (uint16)B_BENDIAN_TO_HOST_INT16(
564 					unmapList->block_data_length)
565 					/ sizeof(scsi_unmap_block_descriptor);
566 				uint64 count = 0;
567 				dprintf("TRIM: SCSI: sending an UNMAP command to"
568 					" the device (blocks):\n");
569 				for (uint16 i = 0; i < scsiRangeCount; i++) {
570 					dprintf("[%3" B_PRIu16 "] %" B_PRIu64 " : %" B_PRIu32 "\n",
571 						i, (uint64)B_BENDIAN_TO_HOST_INT64(
572 							unmapList->blocks[i].lba),
573 						(uint32)B_BENDIAN_TO_HOST_INT32(
574 							unmapList->blocks[i].block_count));
575 					count += (uint32)B_BENDIAN_TO_HOST_INT32(
576 							unmapList->blocks[i].block_count);
577 				}
578 				if (device->max_unmap_lba_count >= count)
579 					dprintf("TRIM: SCSI: Previous UNMAP command would fit %"
580 						B_PRIu64 " more LBAs\n",
581 						device->max_unmap_lba_count - count);
582 				else
583 					dprintf("TRIM: SCSI: Previous UNMAP ranges exceed the"
584 						" device limit!\n");
585 #endif /* DEBUG_TRIM */
586 
587 				status = periph_safe_exec(device, request);
588 
589 				// peripheral layer only creates "read" error
590 				if (status == B_DEV_READ_ERROR)
591 					return B_DEV_WRITE_ERROR;
592 				else if (status != B_OK)
593 					return status;
594 
595 				*trimmedBlocks += trimmedBlocksInRequest;
596 
597 				descriptorIndex = 0;
598 				trimmedBlocksInRequest = 0;
599 				memset(unmapList, 0, unmapListSize);
600 			}
601 
602 			length -= trimLength;
603 			lba += trimLength;
604 		}
605 	}
606 
607 	return status;
608 }
609 
610 
611 static status_t
612 trim_writesame16(scsi_periph_device_info* device, scsi_ccb* request,
613 	scsi_block_range* ranges, uint32 rangeCount, uint64* trimmedBlocks)
614 {
615 	status_t status = B_OK;
616 	*trimmedBlocks = 0;
617 
618 	for (uint32 i = 0; i < rangeCount; i++) {
619 		uint64 lba = ranges[i].lba;
620 		uint64 length = ranges[i].size;
621 
622 		if (length == 0)
623 			continue; // length of 0 would mean the rest of the device!
624 
625 		if (lba > WS16_MAX_LBA_VALUE) {
626 			SHOW_ERROR0(1, "LBA value is too large!"
627 				" This unmap range will be skipped.");
628 			continue;
629 		}
630 
631 		// Split the range into multiple requests if needed
632 		uint64 maxLength = min_c(device->max_unmap_lba_count,
633 				WS16_MAX_BLOCK_COUNT_VALUE);
634 		while (length > 0) {
635 			uint64 trimLength = min_c(length, maxLength);
636 			if (trimLength == 0) {
637 				SHOW_ERROR0(1,
638 					"Error: Length of zero in WRITE SAME (16) detected");
639 				break;
640 			}
641 
642 			void* block = malloc(device->block_size);
643 			if (block == NULL)
644 				return B_NO_MEMORY;
645 			MemoryDeleter deleter(block);
646 			memset(block, 0, device->block_size);
647 
648 			scsi_cmd_wsame_16* cmd = (scsi_cmd_wsame_16*)request->cdb;
649 			memset(cmd, 0, sizeof(*cmd));
650 			cmd->opcode = SCSI_OP_WRITE_SAME_16;
651 			cmd->unmap = 1;
652 			cmd->lba = B_HOST_TO_BENDIAN_INT64(lba);
653 			cmd->length = B_HOST_TO_BENDIAN_INT32(trimLength);
654 			//cmd->ndob = 1; // no data is needed if this bit is enabled
655 
656 			request->flags = SCSI_DIR_OUT;
657 			request->cdb_length = sizeof(*cmd);
658 			request->sort = lba;
659 			request->timeout = device->std_timeout;
660 
661 			request->data = (uint8*)block;
662 			request->data_length = device->block_size;
663 			request->sg_list = NULL;
664 
665 #ifdef DEBUG_TRIM
666 			dprintf("TRIM: SCSI: sending a WRITE SAME (16) command to"
667 				" the device (blocks):\n");
668 			dprintf("%" B_PRIu64 " : %" B_PRIu32 "\n",
669 				(uint64)B_BENDIAN_TO_HOST_INT64(cmd->lba),
670 				(uint32)B_BENDIAN_TO_HOST_INT32(cmd->length));
671 #endif
672 
673 			status = periph_safe_exec(device, request);
674 
675 			// peripheral layer only creates "read" error
676 			if (status == B_DEV_READ_ERROR)
677 				return B_DEV_WRITE_ERROR;
678 			else if (status != B_OK)
679 				return status;
680 
681 			*trimmedBlocks += trimLength;
682 			length -= trimLength;
683 			lba += trimLength;
684 		}
685 	}
686 
687 	return status;
688 }
689 
690 
691 static status_t
692 trim_writesame10(scsi_periph_device_info* device, scsi_ccb* request,
693 	scsi_block_range* ranges, uint32 rangeCount, uint64* trimmedBlocks)
694 {
695 	status_t status = B_OK;
696 	*trimmedBlocks = 0;
697 
698 	for (uint32 i = 0; i < rangeCount; i++) {
699 		uint64 lba = ranges[i].lba;
700 		uint64 length = ranges[i].size;
701 
702 		if (length == 0)
703 			continue; // length of 0 would mean the rest of the device!
704 
705 		if (lba > WS10_MAX_LBA_VALUE) {
706 			SHOW_ERROR0(1, "LBA value is too large!"
707 				" This unmap range will be skipped.");
708 			continue;
709 		}
710 
711 		// Split the range into multiple requests if needed
712 		uint64 maxLength = min_c(device->max_unmap_lba_count,
713 				WS10_MAX_BLOCK_COUNT_VALUE);
714 		while (length > 0) {
715 			uint64 trimLength = min_c(length, maxLength);
716 			if (trimLength == 0) {
717 				SHOW_ERROR0(1,
718 					"Error: Length of zero in WRITE SAME (10) detected");
719 				break;
720 			}
721 
722 			void* block = malloc(device->block_size);
723 			if (block == NULL)
724 				return B_NO_MEMORY;
725 			MemoryDeleter deleter(block);
726 			memset(block, 0, device->block_size);
727 
728 			scsi_cmd_wsame_10* cmd = (scsi_cmd_wsame_10*)request->cdb;
729 			memset(cmd, 0, sizeof(*cmd));
730 			cmd->opcode = SCSI_OP_WRITE_SAME_10;
731 			cmd->unmap = 1;
732 			cmd->lba = B_HOST_TO_BENDIAN_INT32(lba);
733 			cmd->length = B_HOST_TO_BENDIAN_INT16(trimLength);
734 
735 			request->flags = SCSI_DIR_OUT;
736 			request->cdb_length = sizeof(*cmd);
737 			request->sort = lba;
738 			request->timeout = device->std_timeout;
739 
740 			request->data = (uint8*)block;
741 			request->data_length = device->block_size;
742 			request->sg_list = NULL;
743 
744 #ifdef DEBUG_TRIM
745 			dprintf("TRIM: SCSI: sending a WRITE SAME (10) command to"
746 				" the device (blocks):\n");
747 			dprintf("%" B_PRIu32 " : %" B_PRIu16 "\n",
748 				(uint32)B_BENDIAN_TO_HOST_INT32(cmd->lba),
749 				(uint16)B_BENDIAN_TO_HOST_INT16(cmd->length));
750 #endif
751 
752 			status = periph_safe_exec(device, request);
753 
754 			// peripheral layer only creates "read" error
755 			if (status == B_DEV_READ_ERROR)
756 				return B_DEV_WRITE_ERROR;
757 			else if (status != B_OK)
758 				return status;
759 
760 			*trimmedBlocks += trimLength;
761 			length -= trimLength;
762 			lba += trimLength;
763 		}
764 	}
765 
766 	return status;
767 }
768 
769 
770 status_t
771 periph_trim_device(scsi_periph_device_info* device, scsi_ccb* request,
772 	scsi_block_range* ranges, uint32 rangeCount, uint64* trimmedBlocks)
773 {
774 	*trimmedBlocks = 0;
775 
776 	if (device->unmap_command == TRIM_NONE
777 		|| device->max_unmap_lba_count == 0
778 		|| device->max_unmap_descriptor_count == 0)
779 		return B_UNSUPPORTED;
780 
781 	switch (device->unmap_command) {
782 		case TRIM_UNMAP:
783 			return trim_unmap(device, request, ranges, rangeCount,
784 				trimmedBlocks);
785 		case TRIM_WRITESAME16:
786 			return trim_writesame16(device, request, ranges, rangeCount,
787 				trimmedBlocks);
788 		case TRIM_WRITESAME10:
789 			return trim_writesame10(device, request, ranges, rangeCount,
790 				trimmedBlocks);
791 		default:
792 			return B_UNSUPPORTED;
793 	}
794 }
795