xref: /haiku/src/add-ons/kernel/bus_managers/scsi/queuing.cpp (revision 7749d0bb0c358a3279b1b9cc76d8376e900130a5)
1 /*
2 ** Copyright 2002/03, Thomas Kurschel. All rights reserved.
3 ** Distributed under the terms of the OpenBeOS License.
4 */
5 
6 /*
7 	Part of Open SCSI bus manager
8 
9 	Queuing of SCSI request
10 
11 	Some words about queuing, i.e. making sure that no request
12 	gets stuck in some queue forever
13 
14 	As long as the SIM accepts new reqs, XPT doesn't take care of
15 	stuck requests - thus, peripheral drivers should use synced reqs
16 	once in a while (or even always) to force the HBA to finish
17 	previously submitted reqs. This is also important for non-queuing
18 	HBAs as in this case the queuing is done by the SCSI bus manager
19 	which uses elevator sort.
20 
21 	Requests can be blocked by SIM or by the SCSI bus manager on a per-device
22 	or per-bus basis. There are three possible reasons:
23 
24 	1. the hardware queue is too small
25 	   - detected by bus manager automatically, or
26 	   - detected by SIM which calls "requeue" for affected request
27 	2. SIM blocks bus/device explictely
28 	3. bus manager blocks bus/device explictly (currently used for
29 	   ordered requests)
30 	4. the device has queued requests or the bus has waiting devices resp.
31 
32 	The first condition is automatically cleared when a request has
33 	been completed, but can be reset manually by the SIM. In the second and
34 	third cases, the SIM/bus manager must explictely unblock the bus/device.
35 	For easier testing,	the lock_count is the sum of the overflow bit, the
36 	SIM lock count and the bus manager lock count. The blocking test is in
37 	scsi_check_enqueue_request() in scsi_io.c.
38 
39 	If a bus/device is blocked or has waiting requests/devices, new requests
40 	are added to a device-specific request queue in an elevator-sort style,
41 	taking care that no ordered	requests are overtaken. Exceptions are
42 	requeued request and autosense-requests, which are added first. Each bus
43 	has a queue of non-blocked devices that have waiting requests. If a
44 	device is to be added to this queue, it is always appended at tail
45 	to ensure fair processing.
46 */
47 
48 #include "scsi_internal.h"
49 #include "queuing.h"
50 
51 
52 // add request to device queue, using evelvator sort
53 static void scsi_insert_new_request( scsi_device_info *device,
54 	scsi_ccb *new_request )
55 {
56 	scsi_ccb *first, *last, *before, *next;
57 
58 	SHOW_FLOW( 3, "inserting new_request=%p, pos=%Ld", new_request, new_request->sort );
59 
60 	first = device->queued_reqs;
61 
62 	if( first == NULL ) {
63 		SHOW_FLOW0( 1, "no other queued request" );
64 		scsi_add_req_queue_first( new_request );
65 		return;
66 	}
67 
68 	SHOW_FLOW( 3, "first=%p, pos=%Ld, last_pos=%Ld",
69 		first, first->sort, device->last_sort );
70 
71 	// don't let syncs bypass others
72 	if( new_request->ordered ) {
73 		SHOW_FLOW0( 1, "adding synced request to tail" );
74 		scsi_add_req_queue_last( new_request );
75 		return;
76 	}
77 
78 	if( new_request->sort < 0 ) {
79 		SHOW_FLOW0( 1, "adding unsortable request to tail" );
80 		scsi_add_req_queue_last( new_request );
81 		return;
82 	}
83 
84 	// to reduce head seek time, we have three goals:
85 	// - sort request accendingly according to head position
86 	//   as as disks use to read ahead and not backwards
87 	// - ordered accesses can neither get overtaken by or overtake other requests
88 	//
89 	// in general, we only have block position, so head, track or
90 	// whatever specific optimizations can only be done by the disks
91 	// firmware;
92 	//
93 	// thus, sorting is done ascendingly with only a few exceptions:
94 	// - if position of request to be inserted is between current
95 	//   (i.e. last) position and position of first queued request,
96 	//   insert it as first queue entry; i.e. we get descending order
97 	// - if position of first queued request is before current position
98 	//   and position of new req is before first queued request, add it
99 	//   as first queue entry; i.e. the new and the (previously) first
100 	//   request are sorted monotically increasing
101 	//
102 	// the first exception should help if the queue is short (not sure
103 	// wether this actually hurts if we have a long queue), the
104 	// second one maximizes monotonic ranges
105 	last = first->prev;
106 
107 	if( (device->last_sort <= new_request->sort &&
108 		 new_request->sort <= first->sort) ||
109 		(first->sort < device->last_sort &&
110 		 new_request->sort <= first->sort) )
111 	{
112 		// these are the exceptions described above
113 		SHOW_FLOW0( 3, "trying to insert req at head of device req queue" );
114 
115 		// we should have a new first request, make sure we don't bypass syncs
116 		for( before = last; !before->ordered; ) {
117 			before = before->prev;
118 			if( before == last )
119 				break;
120 		}
121 
122 		if( !before->ordered ) {
123 			SHOW_FLOW0( 1, "scheduled request in front of all other reqs of device" );
124 			scsi_add_req_queue_first( new_request );
125 			return;
126 		} else
127 			SHOW_FLOW0( 1, "req would bypass ordered request" );
128 	}
129 
130 	// the insertion sort loop ignores ordered flag of last request,
131 	// so check that here
132 	if( last->ordered ) {
133 		SHOW_FLOW0( 1, "last entry is ordered, adding new request as last" );
134 		scsi_add_req_queue_last( new_request );
135 		return;
136 	}
137 
138 	SHOW_FLOW0( 3, "performing insertion sort" );
139 
140 	// insertion sort starts with last entry to avoid unnecessary overtaking
141 	for( before = last->prev, next = last;
142 		 before != last && !before->ordered;
143 		 next = before, before = before->prev )
144 	{
145 		if( before->sort <= new_request->sort && new_request->sort <= next->sort )
146 			break;
147 	}
148 
149 	// if we bumped into ordered request, append new request at tail
150 	if( before->ordered ) {
151 		SHOW_FLOW0( 1, "overtaking ordered request in sorting - adding as last" );
152 		scsi_add_req_queue_last( new_request );
153 		return;
154 	}
155 
156 	SHOW_FLOW( 1, "inserting after %p (pos=%Ld) and before %p (pos=%Ld)",
157 		before, before->sort, next, next->sort );
158 
159 	// if we haven't found a proper position, we automatically insert
160 	// new request as last because request list is circular;
161 	// don't check whether we added request as first as this is impossible
162 	new_request->next = next;
163 	new_request->prev = before;
164 	next->prev = new_request;
165 	before->next = new_request;
166 }
167 
168 
169 // add request to end of device queue and device to bus queue
170 // used for normal requests
171 void scsi_add_queued_request( scsi_ccb *request )
172 {
173 	scsi_device_info *device = request->device;
174 
175 	SHOW_FLOW0( 3, "" );
176 
177 	request->state = SCSI_STATE_QUEUED;
178 	scsi_insert_new_request( device, request );
179 
180 /*	{
181 		scsi_ccb *tmp = device->queued_reqs;
182 
183 		dprintf( "pos=%Ld, to_insert=%Ld; ", device->last_sort,
184 			request->sort );
185 
186 		do {
187 			dprintf( "%Ld, %s", tmp->sort, tmp->next == device->queued_reqs ? "\n" : "" );
188 			tmp = tmp->next;
189 		} while( tmp != device->queued_reqs );
190 	}*/
191 
192 	// if device is not deliberately locked, mark it as waiting
193 	if( device->lock_count == 0 ) {
194 		SHOW_FLOW0( 3, "mark device as waiting" );
195 		scsi_add_device_queue_last( device );
196 	}
197 }
198 
199 
200 // add request to begin of device queue and device to bus queue
201 // used only for auto-sense request
202 void scsi_add_queued_request_first( scsi_ccb *request )
203 {
204 	scsi_device_info *device = request->device;
205 
206 	SHOW_FLOW0( 3, "" );
207 
208 	request->state = SCSI_STATE_QUEUED;
209 	scsi_add_req_queue_first( request );
210 
211 	// if device is not deliberately locked, mark it as waiting
212 	if( device->lock_count == 0 ) {
213 		SHOW_FLOW0( 3, "mark device as waiting" );
214 		// make device first in bus queue to execute sense ASAP
215 		scsi_add_device_queue_first( device );
216 	}
217 }
218 
219 
220 // remove requests from queue, removing device from queue if idle
221 void scsi_remove_queued_request( scsi_ccb *request )
222 {
223 	scsi_remove_req_queue( request );
224 
225 	if( request->device->queued_reqs == NULL )
226 		scsi_remove_device_queue( request->device );
227 }
228 
229 
230 // explictely unblock bus
231 static void scsi_unblock_bus_int( scsi_bus_info *bus, bool by_SIM )
232 {
233 	bool was_servicable, start_retry;
234 
235 	SHOW_FLOW0( 3, "" );
236 
237 	ACQUIRE_BEN( &bus->mutex );
238 
239 	was_servicable = scsi_can_service_bus( bus );
240 
241 	scsi_unblock_bus_noresume( bus, by_SIM );
242 
243 	start_retry = !was_servicable && scsi_can_service_bus( bus );
244 
245 	RELEASE_BEN( &bus->mutex );
246 
247 	if( start_retry )
248 		release_sem( bus->start_service );
249 }
250 
251 
252 // explicitely unblock bus as requested by SIM
253 void scsi_unblock_bus( scsi_bus_info *bus )
254 {
255 	scsi_unblock_bus_int( bus, true );
256 }
257 
258 
259 // explicitly unblock device
260 static void scsi_unblock_device_int( scsi_device_info *device, bool by_SIM )
261 {
262 	scsi_bus_info *bus = device->bus;
263 	bool was_servicable, start_retry;
264 
265 	SHOW_FLOW0( 3, "" );
266 
267 	ACQUIRE_BEN( &bus->mutex );
268 
269 	was_servicable = scsi_can_service_bus( bus );
270 
271 	scsi_unblock_device_noresume( device, by_SIM );
272 
273 	// add to bus queue if not locked explicitly anymore and requests are waiting
274 	if( device->lock_count == 0 && device->queued_reqs != NULL )
275 		scsi_add_device_queue_last( device );
276 
277 	start_retry = !was_servicable && scsi_can_service_bus( bus );
278 
279 	RELEASE_BEN( &bus->mutex );
280 
281 	if( start_retry )
282 		release_sem( bus->start_service );
283 }
284 
285 
286 // explicitely unblock device as requested by SIM
287 void scsi_unblock_device( scsi_device_info *device )
288 {
289 	return scsi_unblock_device_int( device, true );
290 }
291 
292 
293 // SIM signals that it can handle further requests for this bus
294 void scsi_cont_send_bus( scsi_bus_info *bus )
295 {
296 	bool was_servicable, start_retry;
297 
298 	SHOW_FLOW0( 3, "" );
299 
300 	ACQUIRE_BEN( &bus->mutex );
301 
302 	was_servicable = scsi_can_service_bus( bus );
303 
304 	scsi_clear_bus_overflow( bus );
305 
306 	start_retry = !was_servicable && scsi_can_service_bus( bus );
307 
308 	RELEASE_BEN( &bus->mutex );
309 
310 	if( start_retry )
311 		release_sem_etc( bus->start_service, 1, 0/*B_DO_NOT_RESCHEDULE*/ );
312 }
313 
314 
315 // SIM signals that it can handle further requests for this device
316 void scsi_cont_send_device( scsi_device_info *device )
317 {
318 	scsi_bus_info *bus = device->bus;
319 	bool was_servicable, start_retry;
320 
321 	SHOW_FLOW0( 3, "" );
322 
323 	ACQUIRE_BEN( &bus->mutex );
324 
325 	was_servicable = scsi_can_service_bus( bus );
326 
327 	if( device->sim_overflow ) {
328 		device->sim_overflow = false;
329 		--device->lock_count;
330 
331 		// add to bus queue if not locked explicitly anymore and requests are waiting
332 		if( device->lock_count == 0 && device->queued_reqs != NULL )
333 			scsi_add_device_queue_last( device );
334 	}
335 
336 	// no device overflow implicits no bus overflow
337 	// (and if not, we'll detect that on next submit)
338 	scsi_clear_bus_overflow( bus );
339 
340 	start_retry = !was_servicable && scsi_can_service_bus( bus );
341 
342 	RELEASE_BEN( &bus->mutex );
343 
344 	// tell service thread if there are pending requests which
345 	// weren't pending before
346 	if( start_retry )
347 		release_sem_etc( bus->start_service, 1, 0/*B_DO_NOT_RESCHEDULE*/ );
348 }
349 
350 
351 // explicitly block bus
352 static void scsi_block_bus_int( scsi_bus_info *bus, bool by_SIM )
353 {
354 	SHOW_FLOW0( 3, "" );
355 
356 	ACQUIRE_BEN( &bus->mutex );
357 
358 	scsi_block_bus_nolock( bus, by_SIM );
359 
360 	RELEASE_BEN( &bus->mutex );
361 }
362 
363 
364 // explicitly block bus as requested by SIM
365 void scsi_block_bus( scsi_bus_info *bus )
366 {
367 	return scsi_block_bus_int( bus, true );
368 }
369 
370 
371 // explicitly block device
372 static void scsi_block_device_int( scsi_device_info *device, bool by_SIM )
373 {
374 	scsi_bus_info *bus = device->bus;
375 
376 	SHOW_FLOW0( 3, "" );
377 
378 	ACQUIRE_BEN( &bus->mutex );
379 
380 	scsi_block_device_nolock( device, by_SIM );
381 
382 	// remove device from bus queue as it cannot be processed anymore
383 	scsi_remove_device_queue( device );
384 
385 	RELEASE_BEN( &bus->mutex );
386 }
387 
388 
389 // explicitly block device as requested by SIM
390 void scsi_block_device( scsi_device_info *device )
391 {
392 	return scsi_block_device_int( device, true );
393 }
394