Skip to content

Commit cc26516

Browse files
committed
ALSA: memalloc: Allocate more contiguous pages for fallback case
Currently the fallback SG allocation tries to allocate each single page, and this tends to result in the reverse order of memory addresses when large space is available at boot, as the kernel takes a free page from the top to the bottom in the zone. The end result looks as if non-contiguous (although it actually is). What's worse is that it leads to an overflow of BDL entries for HD-audio. For avoiding such a problem, this patch modifies the allocation code slightly; now it tries to allocate the larger contiguous chunks as much as possible, then reduces to the smaller chunks only if the allocation failed -- a similar strategy as the existing snd_dma_alloc_pages_fallback() function. Along with the trick, drop the unused address array from snd_dma_sg_fallback object. It was needed in the past when dma_alloc_coherent() was used, but with the standard page allocator, it became superfluous and never referred. Fixes: a8d302a ("ALSA: memalloc: Revive x86-specific WC page allocations again") Reviewed-by: Kai Vehmanen <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Takashi Iwai <[email protected]>
1 parent d69d137 commit cc26516

File tree

1 file changed

+27
-17
lines changed

1 file changed

+27
-17
lines changed

sound/core/memalloc.c

Lines changed: 27 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -720,7 +720,6 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
720720
struct snd_dma_sg_fallback {
721721
size_t count;
722722
struct page **pages;
723-
dma_addr_t *addrs;
724723
};
725724

726725
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
@@ -732,38 +731,49 @@ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
732731
for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
733732
do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
734733
kvfree(sgbuf->pages);
735-
kvfree(sgbuf->addrs);
736734
kfree(sgbuf);
737735
}
738736

739737
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
740738
{
741739
struct snd_dma_sg_fallback *sgbuf;
742-
struct page **pages;
743-
size_t i, count;
740+
struct page **pagep, *curp;
741+
size_t chunk, npages;
742+
dma_addr_t addr;
744743
void *p;
745744
bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
746745

747746
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
748747
if (!sgbuf)
749748
return NULL;
750-
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
751-
pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
752-
if (!pages)
753-
goto error;
754-
sgbuf->pages = pages;
755-
sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
756-
if (!sgbuf->addrs)
749+
size = PAGE_ALIGN(size);
750+
sgbuf->count = size >> PAGE_SHIFT;
751+
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
752+
if (!sgbuf->pages)
757753
goto error;
758754

759-
for (i = 0; i < count; sgbuf->count++, i++) {
760-
p = do_alloc_pages(dmab->dev.dev, PAGE_SIZE, &sgbuf->addrs[i], wc);
761-
if (!p)
762-
goto error;
763-
sgbuf->pages[i] = virt_to_page(p);
755+
pagep = sgbuf->pages;
756+
chunk = size;
757+
while (size > 0) {
758+
chunk = min(size, chunk);
759+
p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
760+
if (!p) {
761+
if (chunk <= PAGE_SIZE)
762+
goto error;
763+
chunk >>= 1;
764+
chunk = PAGE_SIZE << get_order(chunk);
765+
continue;
766+
}
767+
768+
size -= chunk;
769+
/* fill pages */
770+
npages = chunk >> PAGE_SHIFT;
771+
curp = virt_to_page(p);
772+
while (npages--)
773+
*pagep++ = curp++;
764774
}
765775

766-
p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
776+
p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
767777
if (!p)
768778
goto error;
769779
dmab->private_data = sgbuf;

0 commit comments

Comments
 (0)