@@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
6767
6868struct kmem_cache * scsi_sdb_cache ;
6969
70+ /*
71+ * When to reinvoke queueing after a resource shortage. It's 3 msecs to
72+ * not change behaviour from the previous unplug mechanism, experimentation
73+ * may prove this needs changing.
74+ */
75+ #define SCSI_QUEUE_DELAY 3
76+
7077static void scsi_run_queue (struct request_queue * q );
7178
7279/*
@@ -149,14 +156,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
149156 /*
150157 * Requeue this command. It will go before all other commands
151158 * that are already in the queue.
152- *
153- * NOTE: there is magic here about the way the queue is plugged if
154- * we have no outstanding commands.
155- *
156- * Although we *don't* plug the queue, we call the request
157- * function. The SCSI request function detects the blocked condition
158- * and plugs the queue appropriately.
159- */
159+ */
160160 spin_lock_irqsave (q -> queue_lock , flags );
161161 blk_requeue_request (q , cmd -> request );
162162 spin_unlock_irqrestore (q -> queue_lock , flags );
@@ -1194,11 +1194,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
11941194 case BLKPREP_DEFER :
11951195 /*
11961196 * If we defer, the blk_peek_request() returns NULL, but the
1197- * queue must be restarted, so we plug here if no returning
1198- * command will automatically do that .
1197+ * queue must be restarted, so we schedule a callback to happen
1198+ * shortly .
11991199 */
12001200 if (sdev -> device_busy == 0 )
1201- blk_plug_device ( q );
1201+ blk_delay_queue ( q , SCSI_QUEUE_DELAY );
12021202 break ;
12031203 default :
12041204 req -> cmd_flags |= REQ_DONTPREP ;
@@ -1237,7 +1237,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
12371237 sdev_printk (KERN_INFO , sdev ,
12381238 "unblocking device at zero depth\n" ));
12391239 } else {
1240- blk_plug_device ( q );
1240+ blk_delay_queue ( q , SCSI_QUEUE_DELAY );
12411241 return 0 ;
12421242 }
12431243 }
@@ -1467,7 +1467,7 @@ static void scsi_request_fn(struct request_queue *q)
14671467 * the host is no longer able to accept any more requests.
14681468 */
14691469 shost = sdev -> host ;
1470- while (! blk_queue_plugged ( q ) ) {
1470+ for (;; ) {
14711471 int rtn ;
14721472 /*
14731473 * get next queueable request. We do this early to make sure
@@ -1546,15 +1546,8 @@ static void scsi_request_fn(struct request_queue *q)
15461546 */
15471547 rtn = scsi_dispatch_cmd (cmd );
15481548 spin_lock_irq (q -> queue_lock );
1549- if (rtn ) {
1550- /* we're refusing the command; because of
1551- * the way locks get dropped, we need to
1552- * check here if plugging is required */
1553- if (sdev -> device_busy == 0 )
1554- blk_plug_device (q );
1555-
1556- break ;
1557- }
1549+ if (rtn )
1550+ goto out_delay ;
15581551 }
15591552
15601553 goto out ;
@@ -1573,9 +1566,10 @@ static void scsi_request_fn(struct request_queue *q)
15731566 spin_lock_irq (q -> queue_lock );
15741567 blk_requeue_request (q , req );
15751568 sdev -> device_busy -- ;
1576- if (sdev -> device_busy == 0 )
1577- blk_plug_device (q );
1578- out :
1569+ out_delay :
1570+ if (sdev -> device_busy == 0 )
1571+ blk_delay_queue (q , SCSI_QUEUE_DELAY );
1572+ out :
15791573 /* must be careful here...if we trigger the ->remove() function
15801574 * we cannot be holding the q lock */
15811575 spin_unlock_irq (q -> queue_lock );
0 commit comments