Skip to content

Commit 060ad66

Browse files
madalinbucurdavem330
authored andcommitted
dpaa_eth: change DMA device
The DPAA Ethernet driver is using the FMan MAC as the device for DMA mapping. This is not actually correct, as the real DMA device is the FMan port (the FMan Rx port for reception and the FMan Tx port for transmission). Changing the device used for DMA mapping to the Fman Rx and Tx port devices. Signed-off-by: Madalin Bucur <[email protected]> Signed-off-by: Laurentiu Tudor <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 681e383 commit 060ad66

File tree

2 files changed

+62
-51
lines changed

2 files changed

+62
-51
lines changed

drivers/net/ethernet/freescale/dpaa/dpaa_eth.c

Lines changed: 56 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1335,15 +1335,15 @@ static void dpaa_fd_release(const struct net_device *net_dev,
13351335
vaddr = phys_to_virt(qm_fd_addr(fd));
13361336
sgt = vaddr + qm_fd_get_offset(fd);
13371337

1338-
dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
1339-
DMA_FROM_DEVICE);
1338+
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
1339+
dpaa_bp->size, DMA_FROM_DEVICE);
13401340

13411341
dpaa_release_sgt_members(sgt);
13421342

1343-
addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
1344-
DMA_FROM_DEVICE);
1345-
if (dma_mapping_error(dpaa_bp->dev, addr)) {
1346-
dev_err(dpaa_bp->dev, "DMA mapping failed");
1343+
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr,
1344+
dpaa_bp->size, DMA_FROM_DEVICE);
1345+
if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
1346+
netdev_err(net_dev, "DMA mapping failed\n");
13471347
return;
13481348
}
13491349
bm_buffer_set64(&bmb, addr);
@@ -1488,7 +1488,7 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
14881488

14891489
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
14901490
{
1491-
struct device *dev = dpaa_bp->dev;
1491+
struct net_device *net_dev = dpaa_bp->priv->net_dev;
14921492
struct bm_buffer bmb[8];
14931493
dma_addr_t addr;
14941494
void *new_buf;
@@ -1497,16 +1497,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
14971497
for (i = 0; i < 8; i++) {
14981498
new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
14991499
if (unlikely(!new_buf)) {
1500-
dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
1501-
dpaa_bp->raw_size);
1500+
netdev_err(net_dev,
1501+
"netdev_alloc_frag() failed, size %zu\n",
1502+
dpaa_bp->raw_size);
15021503
goto release_previous_buffs;
15031504
}
15041505
new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
15051506

1506-
addr = dma_map_single(dev, new_buf,
1507+
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf,
15071508
dpaa_bp->size, DMA_FROM_DEVICE);
1508-
if (unlikely(dma_mapping_error(dev, addr))) {
1509-
dev_err(dpaa_bp->dev, "DMA map failed");
1509+
if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
1510+
addr))) {
1511+
netdev_err(net_dev, "DMA map failed\n");
15101512
goto release_previous_buffs;
15111513
}
15121514

@@ -1634,7 +1636,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
16341636

16351637
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
16361638
nr_frags = skb_shinfo(skb)->nr_frags;
1637-
dma_unmap_single(dev, addr,
1639+
dma_unmap_single(priv->tx_dma_dev, addr,
16381640
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
16391641
dma_dir);
16401642

@@ -1644,21 +1646,21 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
16441646
sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
16451647

16461648
/* sgt[0] is from lowmem, was dma_map_single()-ed */
1647-
dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
1649+
dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
16481650
qm_sg_entry_get_len(&sgt[0]), dma_dir);
16491651

16501652
/* remaining pages were mapped with skb_frag_dma_map() */
16511653
for (i = 1; i <= nr_frags; i++) {
16521654
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
16531655

1654-
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1656+
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
16551657
qm_sg_entry_get_len(&sgt[i]), dma_dir);
16561658
}
16571659

16581660
/* Free the page frag that we allocated on Tx */
16591661
skb_free_frag(phys_to_virt(addr));
16601662
} else {
1661-
dma_unmap_single(dev, addr,
1663+
dma_unmap_single(priv->tx_dma_dev, addr,
16621664
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
16631665
}
16641666

@@ -1762,8 +1764,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
17621764
goto free_buffers;
17631765

17641766
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1765-
dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
1766-
DMA_FROM_DEVICE);
1767+
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr,
1768+
dpaa_bp->size, DMA_FROM_DEVICE);
17671769
if (!skb) {
17681770
sz = dpaa_bp->size +
17691771
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1853,7 +1855,6 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
18531855
int *offset)
18541856
{
18551857
struct net_device *net_dev = priv->net_dev;
1856-
struct device *dev = net_dev->dev.parent;
18571858
enum dma_data_direction dma_dir;
18581859
unsigned char *buffer_start;
18591860
struct sk_buff **skbh;
@@ -1889,9 +1890,9 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
18891890
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
18901891

18911892
/* Map the entire buffer size that may be seen by FMan, but no more */
1892-
addr = dma_map_single(dev, skbh,
1893+
addr = dma_map_single(priv->tx_dma_dev, skbh,
18931894
skb_tail_pointer(skb) - buffer_start, dma_dir);
1894-
if (unlikely(dma_mapping_error(dev, addr))) {
1895+
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
18951896
if (net_ratelimit())
18961897
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
18971898
return -EINVAL;
@@ -1907,7 +1908,6 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
19071908
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
19081909
const int nr_frags = skb_shinfo(skb)->nr_frags;
19091910
struct net_device *net_dev = priv->net_dev;
1910-
struct device *dev = net_dev->dev.parent;
19111911
struct qm_sg_entry *sgt;
19121912
struct sk_buff **skbh;
19131913
int i, j, err, sz;
@@ -1946,10 +1946,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
19461946
qm_sg_entry_set_len(&sgt[0], frag_len);
19471947
sgt[0].bpid = FSL_DPAA_BPID_INV;
19481948
sgt[0].offset = 0;
1949-
addr = dma_map_single(dev, skb->data,
1949+
addr = dma_map_single(priv->tx_dma_dev, skb->data,
19501950
skb_headlen(skb), dma_dir);
1951-
if (unlikely(dma_mapping_error(dev, addr))) {
1952-
dev_err(dev, "DMA mapping failed");
1951+
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1952+
netdev_err(priv->net_dev, "DMA mapping failed\n");
19531953
err = -EINVAL;
19541954
goto sg0_map_failed;
19551955
}
@@ -1960,10 +1960,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
19601960
frag = &skb_shinfo(skb)->frags[i];
19611961
frag_len = skb_frag_size(frag);
19621962
WARN_ON(!skb_frag_page(frag));
1963-
addr = skb_frag_dma_map(dev, frag, 0,
1963+
addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
19641964
frag_len, dma_dir);
1965-
if (unlikely(dma_mapping_error(dev, addr))) {
1966-
dev_err(dev, "DMA mapping failed");
1965+
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1966+
netdev_err(priv->net_dev, "DMA mapping failed\n");
19671967
err = -EINVAL;
19681968
goto sg_map_failed;
19691969
}
@@ -1986,10 +1986,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
19861986
skbh = (struct sk_buff **)buffer_start;
19871987
*skbh = skb;
19881988

1989-
addr = dma_map_single(dev, buffer_start,
1989+
addr = dma_map_single(priv->tx_dma_dev, buffer_start,
19901990
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
1991-
if (unlikely(dma_mapping_error(dev, addr))) {
1992-
dev_err(dev, "DMA mapping failed");
1991+
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1992+
netdev_err(priv->net_dev, "DMA mapping failed\n");
19931993
err = -EINVAL;
19941994
goto sgt_map_failed;
19951995
}
@@ -2003,7 +2003,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
20032003
sgt_map_failed:
20042004
sg_map_failed:
20052005
for (j = 0; j < i; j++)
2006-
dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
2006+
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
20072007
qm_sg_entry_get_len(&sgt[j]), dma_dir);
20082008
sg0_map_failed:
20092009
csum_failed:
@@ -2304,7 +2304,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
23042304
return qman_cb_dqrr_consume;
23052305
}
23062306

2307-
dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
2307+
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size,
2308+
DMA_FROM_DEVICE);
23082309

23092310
/* prefetch the first 64 bytes of the frame or the SGT start */
23102311
vaddr = phys_to_virt(addr);
@@ -2659,7 +2660,7 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
26592660
{
26602661
dma_addr_t addr = bm_buf_addr(bmb);
26612662

2662-
dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
2663+
dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE);
26632664

26642665
skb_free_frag(phys_to_virt(addr));
26652666
}
@@ -2769,45 +2770,39 @@ static int dpaa_eth_probe(struct platform_device *pdev)
27692770
int err = 0, i, channel;
27702771
struct device *dev;
27712772

2773+
dev = &pdev->dev;
2774+
27722775
err = bman_is_probed();
27732776
if (!err)
27742777
return -EPROBE_DEFER;
27752778
if (err < 0) {
2776-
dev_err(&pdev->dev, "failing probe due to bman probe error\n");
2779+
dev_err(dev, "failing probe due to bman probe error\n");
27772780
return -ENODEV;
27782781
}
27792782
err = qman_is_probed();
27802783
if (!err)
27812784
return -EPROBE_DEFER;
27822785
if (err < 0) {
2783-
dev_err(&pdev->dev, "failing probe due to qman probe error\n");
2786+
dev_err(dev, "failing probe due to qman probe error\n");
27842787
return -ENODEV;
27852788
}
27862789
err = bman_portals_probed();
27872790
if (!err)
27882791
return -EPROBE_DEFER;
27892792
if (err < 0) {
2790-
dev_err(&pdev->dev,
2793+
dev_err(dev,
27912794
"failing probe due to bman portals probe error\n");
27922795
return -ENODEV;
27932796
}
27942797
err = qman_portals_probed();
27952798
if (!err)
27962799
return -EPROBE_DEFER;
27972800
if (err < 0) {
2798-
dev_err(&pdev->dev,
2801+
dev_err(dev,
27992802
"failing probe due to qman portals probe error\n");
28002803
return -ENODEV;
28012804
}
28022805

2803-
/* device used for DMA mapping */
2804-
dev = pdev->dev.parent;
2805-
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2806-
if (err) {
2807-
dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
2808-
return err;
2809-
}
2810-
28112806
/* Allocate this early, so we can store relevant information in
28122807
* the private area
28132808
*/
@@ -2828,11 +2823,23 @@ static int dpaa_eth_probe(struct platform_device *pdev)
28282823

28292824
mac_dev = dpaa_mac_dev_get(pdev);
28302825
if (IS_ERR(mac_dev)) {
2831-
dev_err(dev, "dpaa_mac_dev_get() failed\n");
2826+
netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
28322827
err = PTR_ERR(mac_dev);
28332828
goto free_netdev;
28342829
}
28352830

2831+
/* Devices used for DMA mapping */
2832+
priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
2833+
priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
2834+
err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
2835+
if (!err)
2836+
err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
2837+
DMA_BIT_MASK(40));
2838+
if (err) {
2839+
netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
2840+
return err;
2841+
}
2842+
28362843
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
28372844
* we choose conservatively and let the user explicitly set a higher
28382845
* MTU via ifconfig. Otherwise, the user may end up with different MTUs
@@ -2859,7 +2866,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
28592866
dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
28602867
/* avoid runtime computations by keeping the usable size here */
28612868
dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
2862-
dpaa_bps[i]->dev = dev;
2869+
dpaa_bps[i]->priv = priv;
28632870

28642871
err = dpaa_bp_alloc_pool(dpaa_bps[i]);
28652872
if (err < 0)
@@ -2982,7 +2989,7 @@ static int dpaa_remove(struct platform_device *pdev)
29822989
struct device *dev;
29832990
int err;
29842991

2985-
dev = pdev->dev.parent;
2992+
dev = &pdev->dev;
29862993
net_dev = dev_get_drvdata(dev);
29872994

29882995
priv = netdev_priv(net_dev);

drivers/net/ethernet/freescale/dpaa/dpaa_eth.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,11 @@ struct dpaa_fq_cbs {
8080
struct qman_fq egress_ern;
8181
};
8282

83+
struct dpaa_priv;
84+
8385
struct dpaa_bp {
84-
/* device used in the DMA mapping operations */
85-
struct device *dev;
86+
/* used in the DMA mapping operations */
87+
struct dpaa_priv *priv;
8688
/* current number of buffers in the buffer pool alloted to each CPU */
8789
int __percpu *percpu_count;
8890
/* all buffers allocated for this pool have this raw size */
@@ -153,6 +155,8 @@ struct dpaa_priv {
153155
u16 tx_headroom;
154156
struct net_device *net_dev;
155157
struct mac_device *mac_dev;
158+
struct device *rx_dma_dev;
159+
struct device *tx_dma_dev;
156160
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
157161
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
158162

0 commit comments

Comments
 (0)