@@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
198198}
199199EXPORT_SYMBOL (blk_dump_rq_flags );
200200
201+ /*
202+ * Make sure that plugs that were pending when this function was entered,
203+ * are now complete and requests pushed to the queue.
204+ */
205+ static inline void queue_sync_plugs (struct request_queue * q )
206+ {
207+ /*
208+ * If the current process is plugged and has barriers submitted,
209+ * we will livelock if we don't unplug first.
210+ */
211+ blk_flush_plug (current );
212+ }
213+
201214static void blk_delay_work (struct work_struct * work )
202215{
203216 struct request_queue * q ;
@@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
224237}
225238EXPORT_SYMBOL (blk_delay_queue );
226239
227- /*
228- * "plug" the device if there are no outstanding requests: this will
229- * force the transfer to start only after we have put all the requests
230- * on the list.
231- *
232- * This is called with interrupts off and no requests on the queue and
233- * with the queue lock held.
234- */
235- void blk_plug_device (struct request_queue * q )
236- {
237- WARN_ON (!irqs_disabled ());
238-
239- /*
240- * don't plug a stopped queue, it must be paired with blk_start_queue()
241- * which will restart the queueing
242- */
243- if (blk_queue_stopped (q ))
244- return ;
245-
246- if (!queue_flag_test_and_set (QUEUE_FLAG_PLUGGED , q )) {
247- mod_timer (& q -> unplug_timer , jiffies + q -> unplug_delay );
248- trace_block_plug (q );
249- }
250- }
251- EXPORT_SYMBOL (blk_plug_device );
252-
253- /**
254- * blk_plug_device_unlocked - plug a device without queue lock held
255- * @q: The &struct request_queue to plug
256- *
257- * Description:
258- * Like @blk_plug_device(), but grabs the queue lock and disables
259- * interrupts.
260- **/
261- void blk_plug_device_unlocked (struct request_queue * q )
262- {
263- unsigned long flags ;
264-
265- spin_lock_irqsave (q -> queue_lock , flags );
266- blk_plug_device (q );
267- spin_unlock_irqrestore (q -> queue_lock , flags );
268- }
269- EXPORT_SYMBOL (blk_plug_device_unlocked );
270-
271- /*
272- * remove the queue from the plugged list, if present. called with
273- * queue lock held and interrupts disabled.
274- */
275- int blk_remove_plug (struct request_queue * q )
276- {
277- WARN_ON (!irqs_disabled ());
278-
279- if (!queue_flag_test_and_clear (QUEUE_FLAG_PLUGGED , q ))
280- return 0 ;
281-
282- del_timer (& q -> unplug_timer );
283- return 1 ;
284- }
285- EXPORT_SYMBOL (blk_remove_plug );
286-
287- /*
288- * remove the plug and let it rip..
289- */
290- void __generic_unplug_device (struct request_queue * q )
291- {
292- if (unlikely (blk_queue_stopped (q )))
293- return ;
294- if (!blk_remove_plug (q ) && !blk_queue_nonrot (q ))
295- return ;
296-
297- q -> request_fn (q );
298- }
299-
300- /**
301- * generic_unplug_device - fire a request queue
302- * @q: The &struct request_queue in question
303- *
304- * Description:
305- * Linux uses plugging to build bigger requests queues before letting
306- * the device have at them. If a queue is plugged, the I/O scheduler
307- * is still adding and merging requests on the queue. Once the queue
308- * gets unplugged, the request_fn defined for the queue is invoked and
309- * transfers started.
310- **/
311- void generic_unplug_device (struct request_queue * q )
312- {
313- if (blk_queue_plugged (q )) {
314- spin_lock_irq (q -> queue_lock );
315- __generic_unplug_device (q );
316- spin_unlock_irq (q -> queue_lock );
317- }
318- }
319- EXPORT_SYMBOL (generic_unplug_device );
320-
321- static void blk_backing_dev_unplug (struct backing_dev_info * bdi ,
322- struct page * page )
323- {
324- struct request_queue * q = bdi -> unplug_io_data ;
325-
326- blk_unplug (q );
327- }
328-
329- void blk_unplug_work (struct work_struct * work )
330- {
331- struct request_queue * q =
332- container_of (work , struct request_queue , unplug_work );
333-
334- trace_block_unplug_io (q );
335- q -> unplug_fn (q );
336- }
337-
338- void blk_unplug_timeout (unsigned long data )
339- {
340- struct request_queue * q = (struct request_queue * )data ;
341-
342- trace_block_unplug_timer (q );
343- kblockd_schedule_work (q , & q -> unplug_work );
344- }
345-
346- void blk_unplug (struct request_queue * q )
347- {
348- /*
349- * devices don't necessarily have an ->unplug_fn defined
350- */
351- if (q -> unplug_fn ) {
352- trace_block_unplug_io (q );
353- q -> unplug_fn (q );
354- }
355- }
356- EXPORT_SYMBOL (blk_unplug );
357-
358240/**
359241 * blk_start_queue - restart a previously stopped queue
360242 * @q: The &struct request_queue in question
@@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue);
389271 **/
390272void blk_stop_queue (struct request_queue * q )
391273{
392- blk_remove_plug (q );
393274 cancel_delayed_work (& q -> delay_work );
394275 queue_flag_set (QUEUE_FLAG_STOPPED , q );
395276}
@@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue);
411292 */
412293void blk_sync_queue (struct request_queue * q )
413294{
414- del_timer_sync (& q -> unplug_timer );
415295 del_timer_sync (& q -> timeout );
416- cancel_work_sync (& q -> unplug_work );
417296 throtl_shutdown_timer_wq (q );
418297 cancel_delayed_work_sync (& q -> delay_work );
298+ queue_sync_plugs (q );
419299}
420300EXPORT_SYMBOL (blk_sync_queue );
421301
@@ -430,25 +310,18 @@ EXPORT_SYMBOL(blk_sync_queue);
430310 */
431311void __blk_run_queue (struct request_queue * q )
432312{
433- blk_remove_plug (q );
434-
435313 if (unlikely (blk_queue_stopped (q )))
436314 return ;
437315
438- if (elv_queue_empty (q ))
439- return ;
440-
441316 /*
442317 * Only recurse once to avoid overrunning the stack, let the unplug
443318 * handling reinvoke the handler shortly if we already got there.
444319 */
445320 if (!queue_flag_test_and_set (QUEUE_FLAG_REENTER , q )) {
446321 q -> request_fn (q );
447322 queue_flag_clear (QUEUE_FLAG_REENTER , q );
448- } else {
449- queue_flag_set (QUEUE_FLAG_PLUGGED , q );
450- kblockd_schedule_work (q , & q -> unplug_work );
451- }
323+ } else
324+ queue_delayed_work (kblockd_workqueue , & q -> delay_work , 0 );
452325}
453326EXPORT_SYMBOL (__blk_run_queue );
454327
@@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
535408 if (!q )
536409 return NULL ;
537410
538- q -> backing_dev_info .unplug_io_fn = blk_backing_dev_unplug ;
539- q -> backing_dev_info .unplug_io_data = q ;
540411 q -> backing_dev_info .ra_pages =
541412 (VM_MAX_READAHEAD * 1024 ) / PAGE_CACHE_SIZE ;
542413 q -> backing_dev_info .state = 0 ;
@@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
556427
557428 setup_timer (& q -> backing_dev_info .laptop_mode_wb_timer ,
558429 laptop_mode_timer_fn , (unsigned long ) q );
559- init_timer (& q -> unplug_timer );
560430 setup_timer (& q -> timeout , blk_rq_timed_out_timer , (unsigned long ) q );
561431 INIT_LIST_HEAD (& q -> timeout_list );
562432 INIT_LIST_HEAD (& q -> flush_queue [0 ]);
563433 INIT_LIST_HEAD (& q -> flush_queue [1 ]);
564434 INIT_LIST_HEAD (& q -> flush_data_in_flight );
565- INIT_WORK (& q -> unplug_work , blk_unplug_work );
566435 INIT_DELAYED_WORK (& q -> delay_work , blk_delay_work );
567436
568437 kobject_init (& q -> kobj , & blk_queue_ktype );
@@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
652521 q -> request_fn = rfn ;
653522 q -> prep_rq_fn = NULL ;
654523 q -> unprep_rq_fn = NULL ;
655- q -> unplug_fn = generic_unplug_device ;
656524 q -> queue_flags = QUEUE_FLAG_DEFAULT ;
657525 q -> queue_lock = lock ;
658526
@@ -910,8 +778,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
910778}
911779
912780/*
913- * No available requests for this queue, unplug the device and wait for some
914- * requests to become available.
781+ * No available requests for this queue, wait for some requests to become
782+ * available.
915783 *
916784 * Called with q->queue_lock held, and returns with it unlocked.
917785 */
@@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
932800
933801 trace_block_sleeprq (q , bio , rw_flags & 1 );
934802
935- __generic_unplug_device (q );
936803 spin_unlock_irq (q -> queue_lock );
937804 io_schedule ();
938805
@@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
1058925 int where )
1059926{
1060927 drive_stat_acct (rq , 1 );
1061- __elv_add_request (q , rq , where , 0 );
928+ __elv_add_request (q , rq , where );
1062929}
1063930
1064931/**
@@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug)
27982665 /*
27992666 * rq is already accounted, so use raw insert
28002667 */
2801- __elv_add_request (q , rq , ELEVATOR_INSERT_SORT , 0 );
2668+ __elv_add_request (q , rq , ELEVATOR_INSERT_SORT );
28022669 }
28032670
28042671 if (q ) {
0 commit comments