@@ -383,44 +383,160 @@ static inline
383383 }
384384}
385385
386+ /* Check if a DMA region fits the device constraints.
387+ * Returns true, if the region is OK for usage with this device. */
388+ static inline bool b43_dma_address_ok (struct b43_dmaring * ring ,
389+ dma_addr_t addr , size_t size )
390+ {
391+ switch (ring -> type ) {
392+ case B43_DMA_30BIT :
393+ if ((u64 )addr + size > (1ULL << 30 ))
394+ return 0 ;
395+ break ;
396+ case B43_DMA_32BIT :
397+ if ((u64 )addr + size > (1ULL << 32 ))
398+ return 0 ;
399+ break ;
400+ case B43_DMA_64BIT :
401+ /* Currently we can't have addresses beyond
402+ * 64bit in the kernel. */
403+ break ;
404+ }
405+ return 1 ;
406+ }
407+
408+ #define is_4k_aligned (addr ) (((u64)(addr) & 0x0FFFull) == 0)
409+ #define is_8k_aligned (addr ) (((u64)(addr) & 0x1FFFull) == 0)
410+
411+ static void b43_unmap_and_free_ringmem (struct b43_dmaring * ring , void * base ,
412+ dma_addr_t dmaaddr , size_t size )
413+ {
414+ ssb_dma_unmap_single (ring -> dev -> dev , dmaaddr , size , DMA_TO_DEVICE );
415+ free_pages ((unsigned long )base , get_order (size ));
416+ }
417+
418+ static void * __b43_get_and_map_ringmem (struct b43_dmaring * ring ,
419+ dma_addr_t * dmaaddr , size_t size ,
420+ gfp_t gfp_flags )
421+ {
422+ void * base ;
423+
424+ base = (void * )__get_free_pages (gfp_flags , get_order (size ));
425+ if (!base )
426+ return NULL ;
427+ memset (base , 0 , size );
428+ * dmaaddr = ssb_dma_map_single (ring -> dev -> dev , base , size ,
429+ DMA_TO_DEVICE );
430+ if (ssb_dma_mapping_error (ring -> dev -> dev , * dmaaddr )) {
431+ free_pages ((unsigned long )base , get_order (size ));
432+ return NULL ;
433+ }
434+
435+ return base ;
436+ }
437+
438+ static void * b43_get_and_map_ringmem (struct b43_dmaring * ring ,
439+ dma_addr_t * dmaaddr , size_t size )
440+ {
441+ void * base ;
442+
443+ base = __b43_get_and_map_ringmem (ring , dmaaddr , size ,
444+ GFP_KERNEL );
445+ if (!base ) {
446+ b43err (ring -> dev -> wl , "Failed to allocate or map pages "
447+ "for DMA ringmemory\n" );
448+ return NULL ;
449+ }
450+ if (!b43_dma_address_ok (ring , * dmaaddr , size )) {
451+ /* The memory does not fit our device constraints.
452+ * Retry with GFP_DMA set to get lower memory. */
453+ b43_unmap_and_free_ringmem (ring , base , * dmaaddr , size );
454+ base = __b43_get_and_map_ringmem (ring , dmaaddr , size ,
455+ GFP_KERNEL | GFP_DMA );
456+ if (!base ) {
457+ b43err (ring -> dev -> wl , "Failed to allocate or map pages "
458+ "in the GFP_DMA region for DMA ringmemory\n" );
459+ return NULL ;
460+ }
461+ if (!b43_dma_address_ok (ring , * dmaaddr , size )) {
462+ b43_unmap_and_free_ringmem (ring , base , * dmaaddr , size );
463+ b43err (ring -> dev -> wl , "Failed to allocate DMA "
464+ "ringmemory that fits device constraints\n" );
465+ return NULL ;
466+ }
467+ }
468+ /* We expect the memory to be 4k aligned, at least. */
469+ if (B43_WARN_ON (!is_4k_aligned (* dmaaddr ))) {
470+ b43_unmap_and_free_ringmem (ring , base , * dmaaddr , size );
471+ return NULL ;
472+ }
473+
474+ return base ;
475+ }
476+
386477static int alloc_ringmemory (struct b43_dmaring * ring )
387478{
388- gfp_t flags = GFP_KERNEL ;
389-
390- /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
391- * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
392- * has shown that 4K is sufficient for the latter as long as the buffer
393- * does not cross an 8K boundary.
394- *
395- * For unknown reasons - possibly a hardware error - the BCM4311 rev
396- * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
397- * which accounts for the GFP_DMA flag below.
398- *
399- * The flags here must match the flags in free_ringmemory below!
479+ unsigned int required ;
480+ void * base ;
481+ dma_addr_t dmaaddr ;
482+
483+ /* There are several requirements to the descriptor ring memory:
484+ * - The memory region needs to fit the address constraints for the
485+ * device (same as for frame buffers).
486+ * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
487+ * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
400488 */
489+
401490 if (ring -> type == B43_DMA_64BIT )
402- flags |= GFP_DMA ;
403- ring -> descbase = ssb_dma_alloc_consistent (ring -> dev -> dev ,
404- B43_DMA_RINGMEMSIZE ,
405- & (ring -> dmabase ), flags );
406- if (!ring -> descbase ) {
407- b43err (ring -> dev -> wl , "DMA ringmemory allocation failed\n" );
491+ required = ring -> nr_slots * sizeof (struct b43_dmadesc64 );
492+ else
493+ required = ring -> nr_slots * sizeof (struct b43_dmadesc32 );
494+ if (B43_WARN_ON (required > 0x1000 ))
495+ return - ENOMEM ;
496+
497+ ring -> alloc_descsize = 0x1000 ;
498+ base = b43_get_and_map_ringmem (ring , & dmaaddr , ring -> alloc_descsize );
499+ if (!base )
500+ return - ENOMEM ;
501+ ring -> alloc_descbase = base ;
502+ ring -> alloc_dmabase = dmaaddr ;
503+
504+ if ((ring -> type != B43_DMA_64BIT ) || is_8k_aligned (dmaaddr )) {
505+ /* We're on <=32bit DMA, or we already got 8k aligned memory.
506+ * That's all we need, so we're fine. */
507+ ring -> descbase = base ;
508+ ring -> dmabase = dmaaddr ;
509+ return 0 ;
510+ }
511+ b43_unmap_and_free_ringmem (ring , base , dmaaddr , ring -> alloc_descsize );
512+
513+ /* Ok, we failed at the 8k alignment requirement.
514+ * Try to force-align the memory region now. */
515+ ring -> alloc_descsize = 0x2000 ;
516+ base = b43_get_and_map_ringmem (ring , & dmaaddr , ring -> alloc_descsize );
517+ if (!base )
408518 return - ENOMEM ;
519+ ring -> alloc_descbase = base ;
520+ ring -> alloc_dmabase = dmaaddr ;
521+
522+ if (is_8k_aligned (dmaaddr )) {
523+ /* We're already 8k aligned. That Ok, too. */
524+ ring -> descbase = base ;
525+ ring -> dmabase = dmaaddr ;
526+ return 0 ;
409527 }
410- memset (ring -> descbase , 0 , B43_DMA_RINGMEMSIZE );
528+ /* Force-align it to 8k */
529+ ring -> descbase = (void * )((u8 * )base + 0x1000 );
530+ ring -> dmabase = dmaaddr + 0x1000 ;
531+ B43_WARN_ON (!is_8k_aligned (ring -> dmabase ));
411532
412533 return 0 ;
413534}
414535
415536static void free_ringmemory (struct b43_dmaring * ring )
416537{
417- gfp_t flags = GFP_KERNEL ;
418-
419- if (ring -> type == B43_DMA_64BIT )
420- flags |= GFP_DMA ;
421-
422- ssb_dma_free_consistent (ring -> dev -> dev , B43_DMA_RINGMEMSIZE ,
423- ring -> descbase , ring -> dmabase , flags );
538+ b43_unmap_and_free_ringmem (ring , ring -> alloc_descbase ,
539+ ring -> alloc_dmabase , ring -> alloc_descsize );
424540}
425541
426542/* Reset the RX DMA channel */
@@ -530,29 +646,14 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
530646 if (unlikely (ssb_dma_mapping_error (ring -> dev -> dev , addr )))
531647 return 1 ;
532648
533- switch (ring -> type ) {
534- case B43_DMA_30BIT :
535- if ((u64 )addr + buffersize > (1ULL << 30 ))
536- goto address_error ;
537- break ;
538- case B43_DMA_32BIT :
539- if ((u64 )addr + buffersize > (1ULL << 32 ))
540- goto address_error ;
541- break ;
542- case B43_DMA_64BIT :
543- /* Currently we can't have addresses beyond
544- * 64bit in the kernel. */
545- break ;
649+ if (!b43_dma_address_ok (ring , addr , buffersize )) {
650+ /* We can't support this address. Unmap it again. */
651+ unmap_descbuffer (ring , addr , buffersize , dma_to_device );
652+ return 1 ;
546653 }
547654
548655 /* The address is OK. */
549656 return 0 ;
550-
551- address_error :
552- /* We can't support this address. Unmap it again. */
553- unmap_descbuffer (ring , addr , buffersize , dma_to_device );
554-
555- return 1 ;
556657}
557658
558659static bool b43_rx_buffer_is_poisoned (struct b43_dmaring * ring , struct sk_buff * skb )
@@ -614,6 +715,9 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
614715 meta -> dmaaddr = dmaaddr ;
615716 ring -> ops -> fill_descriptor (ring , desc , dmaaddr ,
616717 ring -> rx_buffersize , 0 , 0 , 0 );
718+ ssb_dma_sync_single_for_device (ring -> dev -> dev ,
719+ ring -> alloc_dmabase ,
720+ ring -> alloc_descsize , DMA_TO_DEVICE );
617721
618722 return 0 ;
619723}
@@ -1246,6 +1350,9 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
12461350 }
12471351 /* Now transfer the whole frame. */
12481352 wmb ();
1353+ ssb_dma_sync_single_for_device (ring -> dev -> dev ,
1354+ ring -> alloc_dmabase ,
1355+ ring -> alloc_descsize , DMA_TO_DEVICE );
12491356 ops -> poke_tx (ring , next_slot (ring , slot ));
12501357 return 0 ;
12511358
0 commit comments