[PATCH net-next v2 02/13] net: renesas: rswitch: use device instead of net_device
From: Michael Dege
Date: Fri Mar 27 2026 - 04:30:30 EST
In upcomming changes for adding vlan support struct net_device
will not be available in all cases, therefore use struct device
instead.
Signed-off-by: Michael Dege <michael.dege@xxxxxxxxxxx>
---
drivers/net/ethernet/renesas/rswitch_main.c | 34 ++++++++++++++---------------
1 file changed, 16 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/renesas/rswitch_main.c b/drivers/net/ethernet/renesas/rswitch_main.c
index f6d1e610e7fa..f10e188bc0bd 100644
--- a/drivers/net/ethernet/renesas/rswitch_main.c
+++ b/drivers/net/ethernet/renesas/rswitch_main.c
@@ -302,13 +302,13 @@ static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
return -ENOMEM;
}
-static void rswitch_gwca_queue_free(struct net_device *ndev,
+static void rswitch_gwca_queue_free(struct device *dev,
struct rswitch_gwca_queue *gq)
{
unsigned int i;
if (!gq->dir_tx) {
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(dev,
sizeof(struct rswitch_ext_ts_desc) *
(gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
gq->rx_ring = NULL;
@@ -318,7 +318,7 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
kfree(gq->rx_bufs);
gq->rx_bufs = NULL;
} else {
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(dev,
sizeof(struct rswitch_ext_desc) *
(gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
gq->tx_ring = NULL;
@@ -357,7 +357,7 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
goto out;
- gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
+ gq->rx_ring = dma_alloc_coherent(&priv->pdev->dev,
sizeof(struct rswitch_ext_ts_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
} else {
@@ -367,7 +367,7 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
gq->unmap_addrs = kzalloc_objs(*gq->unmap_addrs, gq->ring_size);
if (!gq->unmap_addrs)
goto out;
- gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ gq->tx_ring = dma_alloc_coherent(&priv->pdev->dev,
sizeof(struct rswitch_ext_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
}
@@ -385,7 +385,7 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
return 0;
out:
- rswitch_gwca_queue_free(ndev, gq);
+ rswitch_gwca_queue_free(&priv->pdev->dev, gq);
return -ENOMEM;
}
@@ -467,12 +467,11 @@ static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
}
}
-static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
+static int rswitch_gwca_queue_ext_ts_fill(struct device *dev,
struct rswitch_gwca_queue *gq,
unsigned int start_index,
unsigned int num)
{
- struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_ext_ts_desc *desc;
unsigned int i, index;
dma_addr_t dma_addr;
@@ -481,18 +480,17 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
index = (i + start_index) % gq->ring_size;
desc = &gq->rx_ring[index];
if (!gq->dir_tx) {
- dma_addr = dma_map_single(ndev->dev.parent,
+ dma_addr = dma_map_single(dev,
gq->rx_bufs[index] + RSWITCH_HEADROOM,
RSWITCH_MAP_BUF_SIZE,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ if (dma_mapping_error(dev, dma_addr))
goto err;
desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
rswitch_desc_set_dptr(&desc->desc, dma_addr);
dma_wmb();
desc->desc.die_dt = DT_FEMPTY | DIE;
- desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
} else {
desc->desc.die_dt = DT_EEMPTY | DIE;
}
@@ -506,7 +504,7 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
index = (i + start_index) % gq->ring_size;
desc = &gq->rx_ring[index];
dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr,
+ dma_unmap_single(dev, dma_addr,
RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
}
}
@@ -514,7 +512,7 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
return -ENOMEM;
}
-static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
+static int rswitch_gwca_queue_ext_ts_format(struct device *dev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq)
{
@@ -524,7 +522,7 @@ static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
int err;
memset(gq->rx_ring, 0, ring_size);
- err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
+ err = rswitch_gwca_queue_ext_ts_fill(dev, gq, 0, gq->ring_size);
if (err < 0)
return err;
@@ -636,7 +634,7 @@ static void rswitch_txdmac_free(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- rswitch_gwca_queue_free(ndev, rdev->tx_queue);
+ rswitch_gwca_queue_free(ndev->dev.parent, rdev->tx_queue);
rswitch_gwca_put(rdev->priv, rdev->tx_queue);
}
@@ -670,7 +668,7 @@ static void rswitch_rxdmac_free(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- rswitch_gwca_queue_free(ndev, rdev->rx_queue);
+ rswitch_gwca_queue_free(ndev->dev.parent, rdev->rx_queue);
rswitch_gwca_put(rdev->priv, rdev->rx_queue);
}
@@ -679,7 +677,7 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
- return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
+ return rswitch_gwca_queue_ext_ts_format(ndev->dev.parent, priv, rdev->rx_queue);
}
static int rswitch_gwca_hw_init(struct rswitch_private *priv)
@@ -870,7 +868,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
if (ret < 0)
goto err;
- ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
+ ret = rswitch_gwca_queue_ext_ts_fill(ndev->dev.parent, gq, gq->dirty, num);
if (ret < 0)
goto err;
gq->dirty = rswitch_next_queue_index(gq, false, num);
--
2.43.0