@@ -465,6 +465,7 @@ static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
465465static int rcar_dmac_desc_alloc (struct rcar_dmac_chan * chan , gfp_t gfp )
466466{
467467 struct rcar_dmac_desc_page * page ;
468+ unsigned long flags ;
468469 LIST_HEAD (list );
469470 unsigned int i ;
470471
@@ -482,10 +483,10 @@ static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
482483 list_add_tail (& desc -> node , & list );
483484 }
484485
485- spin_lock_irq (& chan -> lock );
486+ spin_lock_irqsave (& chan -> lock , flags );
486487 list_splice_tail (& list , & chan -> desc .free );
487488 list_add_tail (& page -> node , & chan -> desc .pages );
488- spin_unlock_irq (& chan -> lock );
489+ spin_unlock_irqrestore (& chan -> lock , flags );
489490
490491 return 0 ;
491492}
@@ -516,6 +517,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
516517static void rcar_dmac_desc_recycle_acked (struct rcar_dmac_chan * chan )
517518{
518519 struct rcar_dmac_desc * desc , * _desc ;
520+ unsigned long flags ;
519521 LIST_HEAD (list );
520522
521523 /*
@@ -524,9 +526,9 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
524526 * list_for_each_entry_safe, isn't safe if we release the channel lock
525527 * around the rcar_dmac_desc_put() call.
526528 */
527- spin_lock_irq (& chan -> lock );
529+ spin_lock_irqsave (& chan -> lock , flags );
528530 list_splice_init (& chan -> desc .wait , & list );
529- spin_unlock_irq (& chan -> lock );
531+ spin_unlock_irqrestore (& chan -> lock , flags );
530532
531533 list_for_each_entry_safe (desc , _desc , & list , node ) {
532534 if (async_tx_test_ack (& desc -> async_tx )) {
@@ -539,9 +541,9 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
539541 return ;
540542
541543 /* Put the remaining descriptors back in the wait list. */
542- spin_lock_irq (& chan -> lock );
544+ spin_lock_irqsave (& chan -> lock , flags );
543545 list_splice (& list , & chan -> desc .wait );
544- spin_unlock_irq (& chan -> lock );
546+ spin_unlock_irqrestore (& chan -> lock , flags );
545547}
546548
547549/*
@@ -556,12 +558,13 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
556558static struct rcar_dmac_desc * rcar_dmac_desc_get (struct rcar_dmac_chan * chan )
557559{
558560 struct rcar_dmac_desc * desc ;
561+ unsigned long flags ;
559562 int ret ;
560563
561564 /* Recycle acked descriptors before attempting allocation. */
562565 rcar_dmac_desc_recycle_acked (chan );
563566
564- spin_lock_irq (& chan -> lock );
567+ spin_lock_irqsave (& chan -> lock , flags );
565568
566569 while (list_empty (& chan -> desc .free )) {
567570 /*
@@ -570,17 +573,17 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
570573 * allocated descriptors. If the allocation fails return an
571574 * error.
572575 */
573- spin_unlock_irq (& chan -> lock );
576+ spin_unlock_irqrestore (& chan -> lock , flags );
574577 ret = rcar_dmac_desc_alloc (chan , GFP_NOWAIT );
575578 if (ret < 0 )
576579 return NULL ;
577- spin_lock_irq (& chan -> lock );
580+ spin_lock_irqsave (& chan -> lock , flags );
578581 }
579582
580583 desc = list_first_entry (& chan -> desc .free , struct rcar_dmac_desc , node );
581584 list_del (& desc -> node );
582585
583- spin_unlock_irq (& chan -> lock );
586+ spin_unlock_irqrestore (& chan -> lock , flags );
584587
585588 return desc ;
586589}
@@ -593,6 +596,7 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
593596static int rcar_dmac_xfer_chunk_alloc (struct rcar_dmac_chan * chan , gfp_t gfp )
594597{
595598 struct rcar_dmac_desc_page * page ;
599+ unsigned long flags ;
596600 LIST_HEAD (list );
597601 unsigned int i ;
598602
@@ -606,10 +610,10 @@ static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
606610 list_add_tail (& chunk -> node , & list );
607611 }
608612
609- spin_lock_irq (& chan -> lock );
613+ spin_lock_irqsave (& chan -> lock , flags );
610614 list_splice_tail (& list , & chan -> desc .chunks_free );
611615 list_add_tail (& page -> node , & chan -> desc .pages );
612- spin_unlock_irq (& chan -> lock );
616+ spin_unlock_irqrestore (& chan -> lock , flags );
613617
614618 return 0 ;
615619}
@@ -627,9 +631,10 @@ static struct rcar_dmac_xfer_chunk *
627631rcar_dmac_xfer_chunk_get (struct rcar_dmac_chan * chan )
628632{
629633 struct rcar_dmac_xfer_chunk * chunk ;
634+ unsigned long flags ;
630635 int ret ;
631636
632- spin_lock_irq (& chan -> lock );
637+ spin_lock_irqsave (& chan -> lock , flags );
633638
634639 while (list_empty (& chan -> desc .chunks_free )) {
635640 /*
@@ -638,18 +643,18 @@ rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
638643 * allocated descriptors. If the allocation fails return an
639644 * error.
640645 */
641- spin_unlock_irq (& chan -> lock );
646+ spin_unlock_irqrestore (& chan -> lock , flags );
642647 ret = rcar_dmac_xfer_chunk_alloc (chan , GFP_NOWAIT );
643648 if (ret < 0 )
644649 return NULL ;
645- spin_lock_irq (& chan -> lock );
650+ spin_lock_irqsave (& chan -> lock , flags );
646651 }
647652
648653 chunk = list_first_entry (& chan -> desc .chunks_free ,
649654 struct rcar_dmac_xfer_chunk , node );
650655 list_del (& chunk -> node );
651656
652- spin_unlock_irq (& chan -> lock );
657+ spin_unlock_irqrestore (& chan -> lock , flags );
653658
654659 return chunk ;
655660}
0 commit comments