1 /* 2 ** Copyright 2002/03, Thomas Kurschel. All rights reserved. 3 ** Distributed under the terms of the MIT License. 4 */ 5 6 /* 7 Part of Open SCSI bus manager 8 9 Queuing of SCSI request 10 11 Some words about queuing, i.e. making sure that no request 12 gets stuck in some queue forever 13 14 As long as the SIM accepts new reqs, XPT doesn't take care of 15 stuck requests - thus, peripheral drivers should use synced reqs 16 once in a while (or even always) to force the HBA to finish 17 previously submitted reqs. This is also important for non-queuing 18 HBAs as in this case the queuing is done by the SCSI bus manager 19 which uses elevator sort. 20 21 Requests can be blocked by SIM or by the SCSI bus manager on a per-device 22 or per-bus basis. There are three possible reasons: 23 24 1. the hardware queue is too small 25 - detected by bus manager automatically, or 26 - detected by SIM which calls "requeue" for affected request 27 2. SIM blocks bus/device explictely 28 3. bus manager blocks bus/device explictly (currently used for 29 ordered requests) 30 4. the device has queued requests or the bus has waiting devices resp. 31 32 The first condition is automatically cleared when a request has 33 been completed, but can be reset manually by the SIM. In the second and 34 third cases, the SIM/bus manager must explictely unblock the bus/device. 35 For easier testing, the lock_count is the sum of the overflow bit, the 36 SIM lock count and the bus manager lock count. The blocking test is in 37 scsi_check_enqueue_request() in scsi_io.c. 38 39 If a bus/device is blocked or has waiting requests/devices, new requests 40 are added to a device-specific request queue in an elevator-sort style, 41 taking care that no ordered requests are overtaken. Exceptions are 42 requeued request and autosense-requests, which are added first. Each bus 43 has a queue of non-blocked devices that have waiting requests. If a 44 device is to be added to this queue, it is always appended at tail 45 to ensure fair processing. 46 */ 47 48 #include "scsi_internal.h" 49 #include "queuing.h" 50 51 52 // add request to device queue, using evelvator sort 53 static void scsi_insert_new_request( scsi_device_info *device, 54 scsi_ccb *new_request ) 55 { 56 scsi_ccb *first, *last, *before, *next; 57 58 SHOW_FLOW( 3, "inserting new_request=%p, pos=%" B_PRId64, new_request, 59 new_request->sort ); 60 61 first = device->queued_reqs; 62 63 if( first == NULL ) { 64 SHOW_FLOW0( 1, "no other queued request" ); 65 scsi_add_req_queue_first( new_request ); 66 return; 67 } 68 69 SHOW_FLOW( 3, "first=%p, pos=%" B_PRId64 ", last_pos=%" B_PRId64, 70 first, first->sort, device->last_sort ); 71 72 // don't let syncs bypass others 73 if( new_request->ordered ) { 74 SHOW_FLOW0( 1, "adding synced request to tail" ); 75 scsi_add_req_queue_last( new_request ); 76 return; 77 } 78 79 if( new_request->sort < 0 ) { 80 SHOW_FLOW0( 1, "adding unsortable request to tail" ); 81 scsi_add_req_queue_last( new_request ); 82 return; 83 } 84 85 // to reduce head seek time, we have three goals: 86 // - sort request accendingly according to head position 87 // as as disks use to read ahead and not backwards 88 // - ordered accesses can neither get overtaken by or overtake other requests 89 // 90 // in general, we only have block position, so head, track or 91 // whatever specific optimizations can only be done by the disks 92 // firmware; 93 // 94 // thus, sorting is done ascendingly with only a few exceptions: 95 // - if position of request to be inserted is between current 96 // (i.e. last) position and position of first queued request, 97 // insert it as first queue entry; i.e. we get descending order 98 // - if position of first queued request is before current position 99 // and position of new req is before first queued request, add it 100 // as first queue entry; i.e. the new and the (previously) first 101 // request are sorted monotically increasing 102 // 103 // the first exception should help if the queue is short (not sure 104 // whether this actually hurts if we have a long queue), the 105 // second one maximizes monotonic ranges 106 last = first->prev; 107 108 if( (device->last_sort <= new_request->sort && 109 new_request->sort <= first->sort) || 110 (first->sort < device->last_sort && 111 new_request->sort <= first->sort) ) 112 { 113 // these are the exceptions described above 114 SHOW_FLOW0( 3, "trying to insert req at head of device req queue" ); 115 116 // we should have a new first request, make sure we don't bypass syncs 117 for( before = last; !before->ordered; ) { 118 before = before->prev; 119 if( before == last ) 120 break; 121 } 122 123 if( !before->ordered ) { 124 SHOW_FLOW0( 1, "scheduled request in front of all other reqs of device" ); 125 scsi_add_req_queue_first( new_request ); 126 return; 127 } else 128 SHOW_FLOW0( 1, "req would bypass ordered request" ); 129 } 130 131 // the insertion sort loop ignores ordered flag of last request, 132 // so check that here 133 if( last->ordered ) { 134 SHOW_FLOW0( 1, "last entry is ordered, adding new request as last" ); 135 scsi_add_req_queue_last( new_request ); 136 return; 137 } 138 139 SHOW_FLOW0( 3, "performing insertion sort" ); 140 141 // insertion sort starts with last entry to avoid unnecessary overtaking 142 for( before = last->prev, next = last; 143 before != last && !before->ordered; 144 next = before, before = before->prev ) 145 { 146 if( before->sort <= new_request->sort && new_request->sort <= next->sort ) 147 break; 148 } 149 150 // if we bumped into ordered request, append new request at tail 151 if( before->ordered ) { 152 SHOW_FLOW0( 1, "overtaking ordered request in sorting - adding as last" ); 153 scsi_add_req_queue_last( new_request ); 154 return; 155 } 156 157 SHOW_FLOW( 1, "inserting after %p (pos=%" B_PRId64 ") and before %p (pos=%" B_PRId64 ")", 158 before, before->sort, next, next->sort ); 159 160 // if we haven't found a proper position, we automatically insert 161 // new request as last because request list is circular; 162 // don't check whether we added request as first as this is impossible 163 new_request->next = next; 164 new_request->prev = before; 165 next->prev = new_request; 166 before->next = new_request; 167 } 168 169 170 // add request to end of device queue and device to bus queue 171 // used for normal requests 172 void scsi_add_queued_request( scsi_ccb *request ) 173 { 174 scsi_device_info *device = request->device; 175 176 SHOW_FLOW0( 3, "" ); 177 178 request->state = SCSI_STATE_QUEUED; 179 scsi_insert_new_request( device, request ); 180 181 /* { 182 scsi_ccb *tmp = device->queued_reqs; 183 184 dprintf( "pos=%Ld, to_insert=%Ld; ", device->last_sort, 185 request->sort ); 186 187 do { 188 dprintf( "%Ld, %s", tmp->sort, tmp->next == device->queued_reqs ? "\n" : "" ); 189 tmp = tmp->next; 190 } while( tmp != device->queued_reqs ); 191 }*/ 192 193 // if device is not deliberately locked, mark it as waiting 194 if( device->lock_count == 0 ) { 195 SHOW_FLOW0( 3, "mark device as waiting" ); 196 scsi_add_device_queue_last( device ); 197 } 198 } 199 200 201 // add request to begin of device queue and device to bus queue 202 // used only for auto-sense request 203 void scsi_add_queued_request_first( scsi_ccb *request ) 204 { 205 scsi_device_info *device = request->device; 206 207 SHOW_FLOW0( 3, "" ); 208 209 request->state = SCSI_STATE_QUEUED; 210 scsi_add_req_queue_first( request ); 211 212 // if device is not deliberately locked, mark it as waiting 213 if( device->lock_count == 0 ) { 214 SHOW_FLOW0( 3, "mark device as waiting" ); 215 // make device first in bus queue to execute sense ASAP 216 scsi_add_device_queue_first( device ); 217 } 218 } 219 220 221 // remove requests from queue, removing device from queue if idle 222 void scsi_remove_queued_request( scsi_ccb *request ) 223 { 224 scsi_remove_req_queue( request ); 225 226 if( request->device->queued_reqs == NULL ) 227 scsi_remove_device_queue( request->device ); 228 } 229 230 231 // explictely unblock bus 232 static void scsi_unblock_bus_int( scsi_bus_info *bus, bool by_SIM ) 233 { 234 bool was_servicable, start_retry; 235 236 SHOW_FLOW0( 3, "" ); 237 238 mutex_lock( &bus->mutex ); 239 240 was_servicable = scsi_can_service_bus( bus ); 241 242 scsi_unblock_bus_noresume( bus, by_SIM ); 243 244 start_retry = !was_servicable && scsi_can_service_bus( bus ); 245 246 mutex_unlock( &bus->mutex ); 247 248 if( start_retry ) 249 release_sem( bus->start_service ); 250 } 251 252 253 // explicitely unblock bus as requested by SIM 254 void scsi_unblock_bus( scsi_bus_info *bus ) 255 { 256 scsi_unblock_bus_int( bus, true ); 257 } 258 259 260 // explicitly unblock device 261 static void scsi_unblock_device_int( scsi_device_info *device, bool by_SIM ) 262 { 263 scsi_bus_info *bus = device->bus; 264 bool was_servicable, start_retry; 265 266 SHOW_FLOW0( 3, "" ); 267 268 mutex_lock( &bus->mutex ); 269 270 was_servicable = scsi_can_service_bus( bus ); 271 272 scsi_unblock_device_noresume( device, by_SIM ); 273 274 // add to bus queue if not locked explicitly anymore and requests are waiting 275 if( device->lock_count == 0 && device->queued_reqs != NULL ) 276 scsi_add_device_queue_last( device ); 277 278 start_retry = !was_servicable && scsi_can_service_bus( bus ); 279 280 mutex_unlock( &bus->mutex ); 281 282 if( start_retry ) 283 release_sem( bus->start_service ); 284 } 285 286 287 // explicitely unblock device as requested by SIM 288 void scsi_unblock_device( scsi_device_info *device ) 289 { 290 return scsi_unblock_device_int( device, true ); 291 } 292 293 294 // SIM signals that it can handle further requests for this bus 295 void scsi_cont_send_bus( scsi_bus_info *bus ) 296 { 297 bool was_servicable, start_retry; 298 299 SHOW_FLOW0( 3, "" ); 300 301 mutex_lock( &bus->mutex ); 302 303 was_servicable = scsi_can_service_bus( bus ); 304 305 scsi_clear_bus_overflow( bus ); 306 307 start_retry = !was_servicable && scsi_can_service_bus( bus ); 308 309 mutex_unlock( &bus->mutex ); 310 311 if( start_retry ) 312 release_sem_etc( bus->start_service, 1, 0/*B_DO_NOT_RESCHEDULE*/ ); 313 } 314 315 316 // SIM signals that it can handle further requests for this device 317 void scsi_cont_send_device( scsi_device_info *device ) 318 { 319 scsi_bus_info *bus = device->bus; 320 bool was_servicable, start_retry; 321 322 SHOW_FLOW0( 3, "" ); 323 324 mutex_lock( &bus->mutex ); 325 326 was_servicable = scsi_can_service_bus( bus ); 327 328 if( device->sim_overflow ) { 329 device->sim_overflow = false; 330 --device->lock_count; 331 332 // add to bus queue if not locked explicitly anymore and requests are waiting 333 if( device->lock_count == 0 && device->queued_reqs != NULL ) 334 scsi_add_device_queue_last( device ); 335 } 336 337 // no device overflow implicits no bus overflow 338 // (and if not, we'll detect that on next submit) 339 scsi_clear_bus_overflow( bus ); 340 341 start_retry = !was_servicable && scsi_can_service_bus( bus ); 342 343 mutex_unlock( &bus->mutex ); 344 345 // tell service thread if there are pending requests which 346 // weren't pending before 347 if( start_retry ) 348 release_sem_etc( bus->start_service, 1, 0/*B_DO_NOT_RESCHEDULE*/ ); 349 } 350 351 352 // explicitly block bus 353 static void scsi_block_bus_int( scsi_bus_info *bus, bool by_SIM ) 354 { 355 SHOW_FLOW0( 3, "" ); 356 357 mutex_lock( &bus->mutex ); 358 359 scsi_block_bus_nolock( bus, by_SIM ); 360 361 mutex_unlock( &bus->mutex ); 362 } 363 364 365 // explicitly block bus as requested by SIM 366 void scsi_block_bus( scsi_bus_info *bus ) 367 { 368 return scsi_block_bus_int( bus, true ); 369 } 370 371 372 // explicitly block device 373 static void scsi_block_device_int( scsi_device_info *device, bool by_SIM ) 374 { 375 scsi_bus_info *bus = device->bus; 376 377 SHOW_FLOW0( 3, "" ); 378 379 mutex_lock( &bus->mutex ); 380 381 scsi_block_device_nolock( device, by_SIM ); 382 383 // remove device from bus queue as it cannot be processed anymore 384 scsi_remove_device_queue( device ); 385 386 mutex_unlock( &bus->mutex ); 387 } 388 389 390 // explicitly block device as requested by SIM 391 void scsi_block_device( scsi_device_info *device ) 392 { 393 return scsi_block_device_int( device, true ); 394 } 395