2020
2121static const struct snd_malloc_ops * snd_dma_get_ops (struct snd_dma_buffer * dmab );
2222
23+ #ifdef CONFIG_SND_DMA_SGBUF
24+ static void * do_alloc_fallback_pages (struct device * dev , size_t size ,
25+ dma_addr_t * addr , bool wc );
26+ static void do_free_fallback_pages (void * p , size_t size , bool wc );
27+ static void * snd_dma_sg_fallback_alloc (struct snd_dma_buffer * dmab , size_t size );
28+ #endif
29+
2330/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
2431static inline gfp_t snd_mem_get_gfp_flags (const struct snd_dma_buffer * dmab ,
2532 gfp_t default_gfp )
@@ -277,16 +284,21 @@ EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
277284/*
278285 * Continuous pages allocator
279286 */
280- static void * snd_dma_continuous_alloc ( struct snd_dma_buffer * dmab , size_t size )
287+ static void * do_alloc_pages ( size_t size , dma_addr_t * addr , gfp_t gfp )
281288{
282- gfp_t gfp = snd_mem_get_gfp_flags (dmab , GFP_KERNEL );
283289 void * p = alloc_pages_exact (size , gfp );
284290
285291 if (p )
286- dmab -> addr = page_to_phys (virt_to_page (p ));
292+ * addr = page_to_phys (virt_to_page (p ));
287293 return p ;
288294}
289295
296+ static void * snd_dma_continuous_alloc (struct snd_dma_buffer * dmab , size_t size )
297+ {
298+ return do_alloc_pages (size , & dmab -> addr ,
299+ snd_mem_get_gfp_flags (dmab , GFP_KERNEL ));
300+ }
301+
290302static void snd_dma_continuous_free (struct snd_dma_buffer * dmab )
291303{
292304 free_pages_exact (dmab -> area , dmab -> bytes );
@@ -463,6 +475,25 @@ static const struct snd_malloc_ops snd_dma_dev_ops = {
463475/*
464476 * Write-combined pages
465477 */
478+ /* x86-specific allocations */
479+ #ifdef CONFIG_SND_DMA_SGBUF
480+ static void * snd_dma_wc_alloc (struct snd_dma_buffer * dmab , size_t size )
481+ {
482+ return do_alloc_fallback_pages (dmab -> dev .dev , size , & dmab -> addr , true);
483+ }
484+
485+ static void snd_dma_wc_free (struct snd_dma_buffer * dmab )
486+ {
487+ do_free_fallback_pages (dmab -> area , dmab -> bytes , true);
488+ }
489+
490+ static int snd_dma_wc_mmap (struct snd_dma_buffer * dmab ,
491+ struct vm_area_struct * area )
492+ {
493+ area -> vm_page_prot = pgprot_writecombine (area -> vm_page_prot );
494+ return snd_dma_continuous_mmap (dmab , area );
495+ }
496+ #else
466497static void * snd_dma_wc_alloc (struct snd_dma_buffer * dmab , size_t size )
467498{
468499 return dma_alloc_wc (dmab -> dev .dev , size , & dmab -> addr , DEFAULT_GFP );
@@ -479,17 +510,14 @@ static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
479510 return dma_mmap_wc (dmab -> dev .dev , area ,
480511 dmab -> area , dmab -> addr , dmab -> bytes );
481512}
513+ #endif /* CONFIG_SND_DMA_SGBUF */
482514
483515static const struct snd_malloc_ops snd_dma_wc_ops = {
484516 .alloc = snd_dma_wc_alloc ,
485517 .free = snd_dma_wc_free ,
486518 .mmap = snd_dma_wc_mmap ,
487519};
488520
489- #ifdef CONFIG_SND_DMA_SGBUF
490- static void * snd_dma_sg_fallback_alloc (struct snd_dma_buffer * dmab , size_t size );
491- #endif
492-
493521/*
494522 * Non-contiguous pages allocator
495523 */
@@ -669,6 +697,37 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
669697 .get_chunk_size = snd_dma_noncontig_get_chunk_size ,
670698};
671699
700+ /* manual page allocations with wc setup */
701+ static void * do_alloc_fallback_pages (struct device * dev , size_t size ,
702+ dma_addr_t * addr , bool wc )
703+ {
704+ gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN ;
705+ void * p ;
706+
707+ again :
708+ p = do_alloc_pages (size , addr , gfp );
709+ if (!p || (* addr + size - 1 ) & ~dev -> coherent_dma_mask ) {
710+ if (IS_ENABLED (CONFIG_ZONE_DMA32 ) && !(gfp & GFP_DMA32 )) {
711+ gfp |= GFP_DMA32 ;
712+ goto again ;
713+ }
714+ if (IS_ENABLED (CONFIG_ZONE_DMA ) && !(gfp & GFP_DMA )) {
715+ gfp = (gfp & ~GFP_DMA32 ) | GFP_DMA ;
716+ goto again ;
717+ }
718+ }
719+ if (p && wc )
720+ set_memory_wc ((unsigned long )(p ), size >> PAGE_SHIFT );
721+ return p ;
722+ }
723+
724+ static void do_free_fallback_pages (void * p , size_t size , bool wc )
725+ {
726+ if (wc )
727+ set_memory_wb ((unsigned long )(p ), size >> PAGE_SHIFT );
728+ free_pages_exact (p , size );
729+ }
730+
672731/* Fallback SG-buffer allocations for x86 */
673732struct snd_dma_sg_fallback {
674733 size_t count ;
@@ -679,14 +738,11 @@ struct snd_dma_sg_fallback {
679738static void __snd_dma_sg_fallback_free (struct snd_dma_buffer * dmab ,
680739 struct snd_dma_sg_fallback * sgbuf )
681740{
741+ bool wc = dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK ;
682742 size_t i ;
683743
684- if (sgbuf -> count && dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK )
685- set_pages_array_wb (sgbuf -> pages , sgbuf -> count );
686744 for (i = 0 ; i < sgbuf -> count && sgbuf -> pages [i ]; i ++ )
687- dma_free_coherent (dmab -> dev .dev , PAGE_SIZE ,
688- page_address (sgbuf -> pages [i ]),
689- sgbuf -> addrs [i ]);
745+ do_free_fallback_pages (page_address (sgbuf -> pages [i ]), PAGE_SIZE , wc );
690746 kvfree (sgbuf -> pages );
691747 kvfree (sgbuf -> addrs );
692748 kfree (sgbuf );
@@ -698,6 +754,7 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
698754 struct page * * pages ;
699755 size_t i , count ;
700756 void * p ;
757+ bool wc = dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK ;
701758
702759 sgbuf = kzalloc (sizeof (* sgbuf ), GFP_KERNEL );
703760 if (!sgbuf )
@@ -712,15 +769,13 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
712769 goto error ;
713770
714771 for (i = 0 ; i < count ; sgbuf -> count ++ , i ++ ) {
715- p = dma_alloc_coherent (dmab -> dev .dev , PAGE_SIZE ,
716- & sgbuf -> addrs [i ], DEFAULT_GFP );
772+ p = do_alloc_fallback_pages (dmab -> dev .dev , PAGE_SIZE ,
773+ & sgbuf -> addrs [i ], wc );
717774 if (!p )
718775 goto error ;
719776 sgbuf -> pages [i ] = virt_to_page (p );
720777 }
721778
722- if (dmab -> dev .type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK )
723- set_pages_array_wc (pages , count );
724779 p = vmap (pages , count , VM_MAP , PAGE_KERNEL );
725780 if (!p )
726781 goto error ;
0 commit comments