Skip to content

Commit f5db19e

Browse files
committed
ARC: dma: ioremap: use phys_addr_t consistenctly in code paths
To support dma in physical memory beyond 4GB with PAE40 Signed-off-by: Vineet Gupta <[email protected]>
1 parent 971573c commit f5db19e

File tree

5 files changed

+37
-35
lines changed

5 files changed

+37
-35
lines changed

arch/arc/include/asm/cacheflush.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
4040

4141
void flush_dcache_page(struct page *page);
4242

43-
void dma_cache_wback_inv(unsigned long start, unsigned long sz);
44-
void dma_cache_inv(unsigned long start, unsigned long sz);
45-
void dma_cache_wback(unsigned long start, unsigned long sz);
43+
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
44+
void dma_cache_inv(phys_addr_t start, unsigned long sz);
45+
void dma_cache_wback(phys_addr_t start, unsigned long sz);
4646

4747
#define flush_dcache_mmap_lock(mapping) do { } while (0)
4848
#define flush_dcache_mmap_unlock(mapping) do { } while (0)

arch/arc/include/asm/io.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@
1313
#include <asm/byteorder.h>
1414
#include <asm/page.h>
1515

16-
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
17-
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
16+
extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
17+
extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
1818
unsigned long flags);
1919
extern void iounmap(const void __iomem *addr);
2020

arch/arc/mm/cache.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,9 @@ volatile int slc_enable = 1, ioc_enable = 1;
2828
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
2929
unsigned long sz, const int cacheop);
3030

31-
void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
32-
void (*__dma_cache_inv)(unsigned long start, unsigned long sz);
33-
void (*__dma_cache_wback)(unsigned long start, unsigned long sz);
31+
void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
32+
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
33+
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
3434

3535
char *arc_cache_mumbojumbo(int c, char *buf, int len)
3636
{
@@ -633,17 +633,17 @@ EXPORT_SYMBOL(flush_dcache_page);
633633
* DMA ops for systems with L1 cache only
634634
* Make memory coherent with L1 cache by flushing/invalidating L1 lines
635635
*/
636-
static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz)
636+
static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
637637
{
638638
__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
639639
}
640640

641-
static void __dma_cache_inv_l1(unsigned long start, unsigned long sz)
641+
static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
642642
{
643643
__dc_line_op_k(start, sz, OP_INV);
644644
}
645645

646-
static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
646+
static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
647647
{
648648
__dc_line_op_k(start, sz, OP_FLUSH);
649649
}
@@ -652,19 +652,19 @@ static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
652652
* DMA ops for systems with both L1 and L2 caches, but without IOC
653653
* Both L1 and L2 lines need to be explicitly flushed/invalidated
654654
*/
655-
static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz)
655+
static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
656656
{
657657
__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
658658
slc_op(start, sz, OP_FLUSH_N_INV);
659659
}
660660

661-
static void __dma_cache_inv_slc(unsigned long start, unsigned long sz)
661+
static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
662662
{
663663
__dc_line_op_k(start, sz, OP_INV);
664664
slc_op(start, sz, OP_INV);
665665
}
666666

667-
static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
667+
static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
668668
{
669669
__dc_line_op_k(start, sz, OP_FLUSH);
670670
slc_op(start, sz, OP_FLUSH);
@@ -675,26 +675,26 @@ static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
675675
* IOC hardware snoops all DMA traffic keeping the caches consistent with
676676
* memory - eliding need for any explicit cache maintenance of DMA buffers
677677
*/
678-
static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {}
679-
static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {}
680-
static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {}
678+
static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
679+
static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
680+
static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
681681

682682
/*
683683
* Exported DMA API
684684
*/
685-
void dma_cache_wback_inv(unsigned long start, unsigned long sz)
685+
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
686686
{
687687
__dma_cache_wback_inv(start, sz);
688688
}
689689
EXPORT_SYMBOL(dma_cache_wback_inv);
690690

691-
void dma_cache_inv(unsigned long start, unsigned long sz)
691+
void dma_cache_inv(phys_addr_t start, unsigned long sz)
692692
{
693693
__dma_cache_inv(start, sz);
694694
}
695695
EXPORT_SYMBOL(dma_cache_inv);
696696

697-
void dma_cache_wback(unsigned long start, unsigned long sz)
697+
void dma_cache_wback(phys_addr_t start, unsigned long sz)
698698
{
699699
__dma_cache_wback(start, sz);
700700
}

arch/arc/mm/dma.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,13 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
6565

6666
/* This is kernel Virtual address (0x7000_0000 based) */
6767
if (need_kvaddr) {
68-
kvaddr = ioremap_nocache((unsigned long)paddr, size);
68+
kvaddr = ioremap_nocache(paddr, size);
6969
if (kvaddr == NULL) {
7070
__free_pages(page, order);
7171
return NULL;
7272
}
7373
} else {
74-
kvaddr = (void *)paddr;
74+
kvaddr = (void *)(u32)paddr;
7575
}
7676

7777
/*
@@ -85,7 +85,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
8585
* will be optimized as a separate commit
8686
*/
8787
if (need_coh)
88-
dma_cache_wback_inv((unsigned long)paddr, size);
88+
dma_cache_wback_inv(paddr, size);
8989

9090
return kvaddr;
9191
}
@@ -110,7 +110,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
110110
* CPU accesses page via normal paddr, thus needs to explicitly made
111111
* consistent before each use
112112
*/
113-
static void _dma_cache_sync(unsigned long paddr, size_t size,
113+
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
114114
enum dma_data_direction dir)
115115
{
116116
switch (dir) {
@@ -124,15 +124,15 @@ static void _dma_cache_sync(unsigned long paddr, size_t size,
124124
dma_cache_wback_inv(paddr, size);
125125
break;
126126
default:
127-
pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
127+
pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
128128
}
129129
}
130130

131131
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
132132
unsigned long offset, size_t size, enum dma_data_direction dir,
133133
struct dma_attrs *attrs)
134134
{
135-
unsigned long paddr = page_to_phys(page) + offset;
135+
phys_addr_t paddr = page_to_phys(page) + offset;
136136
_dma_cache_sync(paddr, size, dir);
137137
return (dma_addr_t)paddr;
138138
}

arch/arc/mm/ioremap.c

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,18 +14,21 @@
1414
#include <linux/slab.h>
1515
#include <linux/cache.h>
1616

17-
void __iomem *ioremap(unsigned long paddr, unsigned long size)
17+
void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
1818
{
19-
unsigned long end;
19+
phys_addr_t end;
2020

2121
/* Don't allow wraparound or zero size */
2222
end = paddr + size - 1;
2323
if (!size || (end < paddr))
2424
return NULL;
2525

26-
/* If the region is h/w uncached, avoid MMU mappings */
26+
/*
27+
* If the region is h/w uncached, MMU mapping can be elided as optim
28+
* The cast to u32 is fine as this region can only be inside 4GB
29+
*/
2730
if (paddr >= ARC_UNCACHED_ADDR_SPACE)
28-
return (void __iomem *)paddr;
31+
return (void __iomem *)(u32)paddr;
2932

3033
return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
3134
}
@@ -41,9 +44,9 @@ EXPORT_SYMBOL(ioremap);
4144
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
4245
unsigned long flags)
4346
{
44-
void __iomem *vaddr;
47+
unsigned long vaddr;
4548
struct vm_struct *area;
46-
unsigned long off, end;
49+
phys_addr_t off, end;
4750
pgprot_t prot = __pgprot(flags);
4851

4952
/* Don't allow wraparound, zero size */
@@ -70,9 +73,8 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
7073
if (!area)
7174
return NULL;
7275
area->phys_addr = paddr;
73-
vaddr = (void __iomem *)area->addr;
74-
if (ioremap_page_range((unsigned long)vaddr,
75-
(unsigned long)vaddr + size, paddr, prot)) {
76+
vaddr = (unsigned long)area->addr;
77+
if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
7678
vunmap((void __force *)vaddr);
7779
return NULL;
7880
}

0 commit comments

Comments
 (0)