Skip to content

Commit 503a646

Browse files
committed
Merge branch 'DPAA-Ethernet-changes'
Madalin Bucur says: ==================== DPAA Ethernet changes v3: add newline at the end of error messages v2: resending with From: field matching signed-off-by Here's a series of changes for the DPAA Ethernet, addressing minor or unapparent issues in the codebase, adding probe ordering based on a recently added DPAA QMan API, removing some redundant code. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents fb8d1d7 + 6e6583c commit 503a646

File tree

5 files changed

+109
-54
lines changed

5 files changed

+109
-54
lines changed

drivers/net/ethernet/freescale/dpaa/dpaa_eth.c

Lines changed: 82 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -901,7 +901,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
901901

902902
if (num_portals == 0)
903903
dev_err(priv->net_dev->dev.parent,
904-
"No Qman software (affine) channels found");
904+
"No Qman software (affine) channels found\n");
905905

906906
/* Initialize each FQ in the list */
907907
list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
@@ -1335,15 +1335,15 @@ static void dpaa_fd_release(const struct net_device *net_dev,
13351335
vaddr = phys_to_virt(qm_fd_addr(fd));
13361336
sgt = vaddr + qm_fd_get_offset(fd);
13371337

1338-
dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
1339-
DMA_FROM_DEVICE);
1338+
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
1339+
dpaa_bp->size, DMA_FROM_DEVICE);
13401340

13411341
dpaa_release_sgt_members(sgt);
13421342

1343-
addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
1344-
DMA_FROM_DEVICE);
1345-
if (dma_mapping_error(dpaa_bp->dev, addr)) {
1346-
dev_err(dpaa_bp->dev, "DMA mapping failed");
1343+
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr,
1344+
dpaa_bp->size, DMA_FROM_DEVICE);
1345+
if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
1346+
netdev_err(net_dev, "DMA mapping failed\n");
13471347
return;
13481348
}
13491349
bm_buffer_set64(&bmb, addr);
@@ -1488,7 +1488,7 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
14881488

14891489
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
14901490
{
1491-
struct device *dev = dpaa_bp->dev;
1491+
struct net_device *net_dev = dpaa_bp->priv->net_dev;
14921492
struct bm_buffer bmb[8];
14931493
dma_addr_t addr;
14941494
void *new_buf;
@@ -1497,16 +1497,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
14971497
for (i = 0; i < 8; i++) {
14981498
new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
14991499
if (unlikely(!new_buf)) {
1500-
dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
1501-
dpaa_bp->raw_size);
1500+
netdev_err(net_dev,
1501+
"netdev_alloc_frag() failed, size %zu\n",
1502+
dpaa_bp->raw_size);
15021503
goto release_previous_buffs;
15031504
}
15041505
new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
15051506

1506-
addr = dma_map_single(dev, new_buf,
1507+
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf,
15071508
dpaa_bp->size, DMA_FROM_DEVICE);
1508-
if (unlikely(dma_mapping_error(dev, addr))) {
1509-
dev_err(dpaa_bp->dev, "DMA map failed");
1509+
if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
1510+
addr))) {
1511+
netdev_err(net_dev, "DMA map failed\n");
15101512
goto release_previous_buffs;
15111513
}
15121514

@@ -1634,7 +1636,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
16341636

16351637
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
16361638
nr_frags = skb_shinfo(skb)->nr_frags;
1637-
dma_unmap_single(dev, addr,
1639+
dma_unmap_single(priv->tx_dma_dev, addr,
16381640
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
16391641
dma_dir);
16401642

@@ -1644,21 +1646,21 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
16441646
sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
16451647

16461648
/* sgt[0] is from lowmem, was dma_map_single()-ed */
1647-
dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
1649+
dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
16481650
qm_sg_entry_get_len(&sgt[0]), dma_dir);
16491651

16501652
/* remaining pages were mapped with skb_frag_dma_map() */
16511653
for (i = 1; i <= nr_frags; i++) {
16521654
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
16531655

1654-
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1656+
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
16551657
qm_sg_entry_get_len(&sgt[i]), dma_dir);
16561658
}
16571659

16581660
/* Free the page frag that we allocated on Tx */
16591661
skb_free_frag(phys_to_virt(addr));
16601662
} else {
1661-
dma_unmap_single(dev, addr,
1663+
dma_unmap_single(priv->tx_dma_dev, addr,
16621664
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
16631665
}
16641666

@@ -1762,8 +1764,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
17621764
goto free_buffers;
17631765

17641766
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1765-
dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
1766-
DMA_FROM_DEVICE);
1767+
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr,
1768+
dpaa_bp->size, DMA_FROM_DEVICE);
17671769
if (!skb) {
17681770
sz = dpaa_bp->size +
17691771
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1853,7 +1855,6 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
18531855
int *offset)
18541856
{
18551857
struct net_device *net_dev = priv->net_dev;
1856-
struct device *dev = net_dev->dev.parent;
18571858
enum dma_data_direction dma_dir;
18581859
unsigned char *buffer_start;
18591860
struct sk_buff **skbh;
@@ -1889,9 +1890,9 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
18891890
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
18901891

18911892
/* Map the entire buffer size that may be seen by FMan, but no more */
1892-
addr = dma_map_single(dev, skbh,
1893+
addr = dma_map_single(priv->tx_dma_dev, skbh,
18931894
skb_tail_pointer(skb) - buffer_start, dma_dir);
1894-
if (unlikely(dma_mapping_error(dev, addr))) {
1895+
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
18951896
if (net_ratelimit())
18961897
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
18971898
return -EINVAL;
@@ -1907,7 +1908,6 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
19071908
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
19081909
const int nr_frags = skb_shinfo(skb)->nr_frags;
19091910
struct net_device *net_dev = priv->net_dev;
1910-
struct device *dev = net_dev->dev.parent;
19111911
struct qm_sg_entry *sgt;
19121912
struct sk_buff **skbh;
19131913
int i, j, err, sz;
@@ -1946,10 +1946,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
19461946
qm_sg_entry_set_len(&sgt[0], frag_len);
19471947
sgt[0].bpid = FSL_DPAA_BPID_INV;
19481948
sgt[0].offset = 0;
1949-
addr = dma_map_single(dev, skb->data,
1949+
addr = dma_map_single(priv->tx_dma_dev, skb->data,
19501950
skb_headlen(skb), dma_dir);
1951-
if (unlikely(dma_mapping_error(dev, addr))) {
1952-
dev_err(dev, "DMA mapping failed");
1951+
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1952+
netdev_err(priv->net_dev, "DMA mapping failed\n");
19531953
err = -EINVAL;
19541954
goto sg0_map_failed;
19551955
}
@@ -1960,10 +1960,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
19601960
frag = &skb_shinfo(skb)->frags[i];
19611961
frag_len = skb_frag_size(frag);
19621962
WARN_ON(!skb_frag_page(frag));
1963-
addr = skb_frag_dma_map(dev, frag, 0,
1963+
addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
19641964
frag_len, dma_dir);
1965-
if (unlikely(dma_mapping_error(dev, addr))) {
1966-
dev_err(dev, "DMA mapping failed");
1965+
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1966+
netdev_err(priv->net_dev, "DMA mapping failed\n");
19671967
err = -EINVAL;
19681968
goto sg_map_failed;
19691969
}
@@ -1986,10 +1986,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
19861986
skbh = (struct sk_buff **)buffer_start;
19871987
*skbh = skb;
19881988

1989-
addr = dma_map_single(dev, buffer_start,
1989+
addr = dma_map_single(priv->tx_dma_dev, buffer_start,
19901990
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
1991-
if (unlikely(dma_mapping_error(dev, addr))) {
1992-
dev_err(dev, "DMA mapping failed");
1991+
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1992+
netdev_err(priv->net_dev, "DMA mapping failed\n");
19931993
err = -EINVAL;
19941994
goto sgt_map_failed;
19951995
}
@@ -2003,7 +2003,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
20032003
sgt_map_failed:
20042004
sg_map_failed:
20052005
for (j = 0; j < i; j++)
2006-
dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
2006+
dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
20072007
qm_sg_entry_get_len(&sgt[j]), dma_dir);
20082008
sg0_map_failed:
20092009
csum_failed:
@@ -2304,11 +2304,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
23042304
return qman_cb_dqrr_consume;
23052305
}
23062306

2307-
dpaa_bp = dpaa_bpid2pool(fd->bpid);
2308-
if (!dpaa_bp)
2309-
return qman_cb_dqrr_consume;
2310-
2311-
dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
2307+
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size,
2308+
DMA_FROM_DEVICE);
23122309

23132310
/* prefetch the first 64 bytes of the frame or the SGT start */
23142311
vaddr = phys_to_virt(addr);
@@ -2663,7 +2660,7 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
26632660
{
26642661
dma_addr_t addr = bm_buf_addr(bmb);
26652662

2666-
dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
2663+
dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE);
26672664

26682665
skb_free_frag(phys_to_virt(addr));
26692666
}
@@ -2773,12 +2770,37 @@ static int dpaa_eth_probe(struct platform_device *pdev)
27732770
int err = 0, i, channel;
27742771
struct device *dev;
27752772

2776-
/* device used for DMA mapping */
2777-
dev = pdev->dev.parent;
2778-
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2779-
if (err) {
2780-
dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
2781-
return err;
2773+
dev = &pdev->dev;
2774+
2775+
err = bman_is_probed();
2776+
if (!err)
2777+
return -EPROBE_DEFER;
2778+
if (err < 0) {
2779+
dev_err(dev, "failing probe due to bman probe error\n");
2780+
return -ENODEV;
2781+
}
2782+
err = qman_is_probed();
2783+
if (!err)
2784+
return -EPROBE_DEFER;
2785+
if (err < 0) {
2786+
dev_err(dev, "failing probe due to qman probe error\n");
2787+
return -ENODEV;
2788+
}
2789+
err = bman_portals_probed();
2790+
if (!err)
2791+
return -EPROBE_DEFER;
2792+
if (err < 0) {
2793+
dev_err(dev,
2794+
"failing probe due to bman portals probe error\n");
2795+
return -ENODEV;
2796+
}
2797+
err = qman_portals_probed();
2798+
if (!err)
2799+
return -EPROBE_DEFER;
2800+
if (err < 0) {
2801+
dev_err(dev,
2802+
"failing probe due to qman portals probe error\n");
2803+
return -ENODEV;
27822804
}
27832805

27842806
/* Allocate this early, so we can store relevant information in
@@ -2801,11 +2823,23 @@ static int dpaa_eth_probe(struct platform_device *pdev)
28012823

28022824
mac_dev = dpaa_mac_dev_get(pdev);
28032825
if (IS_ERR(mac_dev)) {
2804-
dev_err(dev, "dpaa_mac_dev_get() failed\n");
2826+
netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
28052827
err = PTR_ERR(mac_dev);
28062828
goto free_netdev;
28072829
}
28082830

2831+
/* Devices used for DMA mapping */
2832+
priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
2833+
priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
2834+
err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
2835+
if (!err)
2836+
err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
2837+
DMA_BIT_MASK(40));
2838+
if (err) {
2839+
netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
2840+
return err;
2841+
}
2842+
28092843
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
28102844
* we choose conservatively and let the user explicitly set a higher
28112845
* MTU via ifconfig. Otherwise, the user may end up with different MTUs
@@ -2832,7 +2866,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
28322866
dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
28332867
/* avoid runtime computations by keeping the usable size here */
28342868
dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
2835-
dpaa_bps[i]->dev = dev;
2869+
dpaa_bps[i]->priv = priv;
28362870

28372871
err = dpaa_bp_alloc_pool(dpaa_bps[i]);
28382872
if (err < 0)
@@ -2955,7 +2989,7 @@ static int dpaa_remove(struct platform_device *pdev)
29552989
struct device *dev;
29562990
int err;
29572991

2958-
dev = pdev->dev.parent;
2992+
dev = &pdev->dev;
29592993
net_dev = dev_get_drvdata(dev);
29602994

29612995
priv = netdev_priv(net_dev);

drivers/net/ethernet/freescale/dpaa/dpaa_eth.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,11 @@ struct dpaa_fq_cbs {
8080
struct qman_fq egress_ern;
8181
};
8282

83+
struct dpaa_priv;
84+
8385
struct dpaa_bp {
84-
/* device used in the DMA mapping operations */
85-
struct device *dev;
86+
/* used in the DMA mapping operations */
87+
struct dpaa_priv *priv;
8688
/* current number of buffers in the buffer pool alloted to each CPU */
8789
int __percpu *percpu_count;
8890
/* all buffers allocated for this pool have this raw size */
@@ -153,6 +155,8 @@ struct dpaa_priv {
153155
u16 tx_headroom;
154156
struct net_device *net_dev;
155157
struct mac_device *mac_dev;
158+
struct device *rx_dma_dev;
159+
struct device *tx_dma_dev;
156160
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
157161
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
158162

drivers/net/ethernet/freescale/fman/fman.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -634,6 +634,9 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
634634
{
635635
u32 tmp;
636636

637+
iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
638+
if (!IS_ENABLED(CONFIG_FSL_PAMU))
639+
return;
637640
/* set LIODN base for this port */
638641
tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
639642
if (port_id % 2) {
@@ -644,7 +647,6 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
644647
tmp |= liodn_base << DMA_LIODN_SHIFT;
645648
}
646649
iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
647-
iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
648650
}
649651

650652
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
@@ -1942,6 +1944,8 @@ static int fman_init(struct fman *fman)
19421944

19431945
fman->liodn_offset[i] =
19441946
ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
1947+
if (!IS_ENABLED(CONFIG_FSL_PAMU))
1948+
continue;
19451949
liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
19461950
if (i % 2) {
19471951
/* FMDM_PLR LSB holds LIODN base for odd ports */

drivers/net/ethernet/freescale/fman/fman_port.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -435,7 +435,6 @@ struct fman_port_cfg {
435435

436436
struct fman_port_rx_pools_params {
437437
u8 num_of_pools;
438-
u16 second_largest_buf_size;
439438
u16 largest_buf_size;
440439
};
441440

@@ -946,8 +945,6 @@ static int set_ext_buffer_pools(struct fman_port *port)
946945
port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
947946
port->rx_pools_params.largest_buf_size =
948947
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
949-
port->rx_pools_params.second_largest_buf_size =
950-
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
951948

952949
/* FMBM_RMPD reg. - pool depletion */
953950
if (buf_pool_depletion->pools_grp_mode_enable) {
@@ -1728,6 +1725,20 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port)
17281725
}
17291726
EXPORT_SYMBOL(fman_port_get_qman_channel_id);
17301727

1728+
/**
1729+
* fman_port_get_device
1730+
* port: Pointer to the FMan port device
1731+
*
1732+
* Get the 'struct device' associated to the specified FMan port device
1733+
*
1734+
* Return: pointer to associated 'struct device'
1735+
*/
1736+
struct device *fman_port_get_device(struct fman_port *port)
1737+
{
1738+
return port->dev;
1739+
}
1740+
EXPORT_SYMBOL(fman_port_get_device);
1741+
17311742
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
17321743
{
17331744
if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)

drivers/net/ethernet/freescale/fman/fman_port.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,4 +157,6 @@ int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
157157

158158
struct fman_port *fman_port_bind(struct device *dev);
159159

160+
struct device *fman_port_get_device(struct fman_port *port);
161+
160162
#endif /* __FMAN_PORT_H */

0 commit comments

Comments
 (0)