Skip to content

Commit 23233e5

Browse files
LorenzoBianconidavem330
authored andcommitted
net: ethernet: mtk_eth_soc: rely on page_pool for single page buffers
Rely on page_pool allocator for single page buffers in order to keep them dma mapped and add skb recycling support. Signed-off-by: Lorenzo Bianconi <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 502c6f8 commit 23233e5

File tree

3 files changed

+156
-40
lines changed

3 files changed

+156
-40
lines changed

drivers/net/ethernet/mediatek/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
1717
select PINCTRL
1818
select PHYLINK
1919
select DIMLIB
20+
select PAGE_POOL
2021
help
2122
This driver supports the gigabit ethernet MACs in the
2223
MediaTek SoC family.

drivers/net/ethernet/mediatek/mtk_eth_soc.c

Lines changed: 145 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1432,6 +1432,68 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
14321432
}
14331433
}
14341434

1435+
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1436+
struct xdp_rxq_info *xdp_q,
1437+
int id, int size)
1438+
{
1439+
struct page_pool_params pp_params = {
1440+
.order = 0,
1441+
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1442+
.pool_size = size,
1443+
.nid = NUMA_NO_NODE,
1444+
.dev = eth->dma_dev,
1445+
.dma_dir = DMA_FROM_DEVICE,
1446+
.offset = MTK_PP_HEADROOM,
1447+
.max_len = MTK_PP_MAX_BUF_SIZE,
1448+
};
1449+
struct page_pool *pp;
1450+
int err;
1451+
1452+
pp = page_pool_create(&pp_params);
1453+
if (IS_ERR(pp))
1454+
return pp;
1455+
1456+
err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
1457+
id, PAGE_SIZE);
1458+
if (err < 0)
1459+
goto err_free_pp;
1460+
1461+
err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1462+
if (err)
1463+
goto err_unregister_rxq;
1464+
1465+
return pp;
1466+
1467+
err_unregister_rxq:
1468+
xdp_rxq_info_unreg(xdp_q);
1469+
err_free_pp:
1470+
page_pool_destroy(pp);
1471+
1472+
return ERR_PTR(err);
1473+
}
1474+
1475+
static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1476+
gfp_t gfp_mask)
1477+
{
1478+
struct page *page;
1479+
1480+
page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1481+
if (!page)
1482+
return NULL;
1483+
1484+
*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1485+
return page_address(page);
1486+
}
1487+
1488+
static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1489+
{
1490+
if (ring->page_pool)
1491+
page_pool_put_full_page(ring->page_pool,
1492+
virt_to_head_page(data), napi);
1493+
else
1494+
skb_free_frag(data);
1495+
}
1496+
14351497
static int mtk_poll_rx(struct napi_struct *napi, int budget,
14361498
struct mtk_eth *eth)
14371499
{
@@ -1445,9 +1507,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
14451507

14461508
while (done < budget) {
14471509
unsigned int pktlen, *rxdcsum;
1510+
u32 hash, reason, reserve_len;
14481511
struct net_device *netdev;
14491512
dma_addr_t dma_addr;
1450-
u32 hash, reason;
14511513
int mac = 0;
14521514

14531515
ring = mtk_get_rx_ring(eth);
@@ -1478,36 +1540,54 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
14781540
goto release_desc;
14791541

14801542
/* alloc new buffer */
1481-
if (ring->frag_size <= PAGE_SIZE)
1482-
new_data = napi_alloc_frag(ring->frag_size);
1483-
else
1484-
new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1485-
if (unlikely(!new_data)) {
1486-
netdev->stats.rx_dropped++;
1487-
goto release_desc;
1488-
}
1489-
dma_addr = dma_map_single(eth->dma_dev,
1490-
new_data + NET_SKB_PAD +
1491-
eth->ip_align,
1492-
ring->buf_size,
1493-
DMA_FROM_DEVICE);
1494-
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
1495-
skb_free_frag(new_data);
1496-
netdev->stats.rx_dropped++;
1497-
goto release_desc;
1498-
}
1543+
if (ring->page_pool) {
1544+
new_data = mtk_page_pool_get_buff(ring->page_pool,
1545+
&dma_addr,
1546+
GFP_ATOMIC);
1547+
if (unlikely(!new_data)) {
1548+
netdev->stats.rx_dropped++;
1549+
goto release_desc;
1550+
}
1551+
} else {
1552+
if (ring->frag_size <= PAGE_SIZE)
1553+
new_data = napi_alloc_frag(ring->frag_size);
1554+
else
1555+
new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1556+
1557+
if (unlikely(!new_data)) {
1558+
netdev->stats.rx_dropped++;
1559+
goto release_desc;
1560+
}
14991561

1500-
dma_unmap_single(eth->dma_dev, trxd.rxd1,
1501-
ring->buf_size, DMA_FROM_DEVICE);
1562+
dma_addr = dma_map_single(eth->dma_dev,
1563+
new_data + NET_SKB_PAD + eth->ip_align,
1564+
ring->buf_size, DMA_FROM_DEVICE);
1565+
if (unlikely(dma_mapping_error(eth->dma_dev,
1566+
dma_addr))) {
1567+
skb_free_frag(new_data);
1568+
netdev->stats.rx_dropped++;
1569+
goto release_desc;
1570+
}
1571+
1572+
dma_unmap_single(eth->dma_dev, trxd.rxd1,
1573+
ring->buf_size, DMA_FROM_DEVICE);
1574+
}
15021575

15031576
/* receive data */
15041577
skb = build_skb(data, ring->frag_size);
15051578
if (unlikely(!skb)) {
1506-
skb_free_frag(data);
1579+
mtk_rx_put_buff(ring, data, true);
15071580
netdev->stats.rx_dropped++;
15081581
goto skip_rx;
15091582
}
1510-
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1583+
1584+
if (ring->page_pool) {
1585+
reserve_len = MTK_PP_HEADROOM;
1586+
skb_mark_for_recycle(skb);
1587+
} else {
1588+
reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
1589+
}
1590+
skb_reserve(skb, reserve_len);
15111591

15121592
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
15131593
skb->dev = netdev;
@@ -1561,15 +1641,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
15611641
skip_rx:
15621642
ring->data[idx] = new_data;
15631643
rxd->rxd1 = (unsigned int)dma_addr;
1564-
15651644
release_desc:
15661645
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
15671646
rxd->rxd2 = RX_DMA_LSO;
15681647
else
15691648
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
15701649

15711650
ring->calc_idx = idx;
1572-
15731651
done++;
15741652
}
15751653

@@ -1933,13 +2011,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
19332011
if (!ring->data)
19342012
return -ENOMEM;
19352013

1936-
for (i = 0; i < rx_dma_size; i++) {
1937-
if (ring->frag_size <= PAGE_SIZE)
1938-
ring->data[i] = netdev_alloc_frag(ring->frag_size);
1939-
else
1940-
ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
1941-
if (!ring->data[i])
1942-
return -ENOMEM;
2014+
if (!eth->hwlro) {
2015+
struct page_pool *pp;
2016+
2017+
pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2018+
rx_dma_size);
2019+
if (IS_ERR(pp))
2020+
return PTR_ERR(pp);
2021+
2022+
ring->page_pool = pp;
19432023
}
19442024

19452025
ring->dma = dma_alloc_coherent(eth->dma_dev,
@@ -1950,16 +2030,33 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
19502030

19512031
for (i = 0; i < rx_dma_size; i++) {
19522032
struct mtk_rx_dma_v2 *rxd;
1953-
1954-
dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
1955-
ring->data[i] + NET_SKB_PAD + eth->ip_align,
1956-
ring->buf_size,
1957-
DMA_FROM_DEVICE);
1958-
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1959-
return -ENOMEM;
2033+
dma_addr_t dma_addr;
2034+
void *data;
19602035

19612036
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2037+
if (ring->page_pool) {
2038+
data = mtk_page_pool_get_buff(ring->page_pool,
2039+
&dma_addr, GFP_KERNEL);
2040+
if (!data)
2041+
return -ENOMEM;
2042+
} else {
2043+
if (ring->frag_size <= PAGE_SIZE)
2044+
data = netdev_alloc_frag(ring->frag_size);
2045+
else
2046+
data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2047+
2048+
if (!data)
2049+
return -ENOMEM;
2050+
2051+
dma_addr = dma_map_single(eth->dma_dev,
2052+
data + NET_SKB_PAD + eth->ip_align,
2053+
ring->buf_size, DMA_FROM_DEVICE);
2054+
if (unlikely(dma_mapping_error(eth->dma_dev,
2055+
dma_addr)))
2056+
return -ENOMEM;
2057+
}
19622058
rxd->rxd1 = (unsigned int)dma_addr;
2059+
ring->data[i] = data;
19632060

19642061
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
19652062
rxd->rxd2 = RX_DMA_LSO;
@@ -1975,6 +2072,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
19752072
rxd->rxd8 = 0;
19762073
}
19772074
}
2075+
19782076
ring->dma_size = rx_dma_size;
19792077
ring->calc_idx_update = false;
19802078
ring->calc_idx = rx_dma_size - 1;
@@ -2026,7 +2124,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
20262124

20272125
dma_unmap_single(eth->dma_dev, rxd->rxd1,
20282126
ring->buf_size, DMA_FROM_DEVICE);
2029-
skb_free_frag(ring->data[i]);
2127+
mtk_rx_put_buff(ring, ring->data[i], false);
20302128
}
20312129
kfree(ring->data);
20322130
ring->data = NULL;
@@ -2038,6 +2136,13 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
20382136
ring->dma, ring->phys);
20392137
ring->dma = NULL;
20402138
}
2139+
2140+
if (ring->page_pool) {
2141+
if (xdp_rxq_info_is_reg(&ring->xdp_q))
2142+
xdp_rxq_info_unreg(&ring->xdp_q);
2143+
page_pool_destroy(ring->page_pool);
2144+
ring->page_pool = NULL;
2145+
}
20412146
}
20422147

20432148
static int mtk_hwlro_rx_init(struct mtk_eth *eth)

drivers/net/ethernet/mediatek/mtk_eth_soc.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
#include <linux/rhashtable.h>
1919
#include <linux/dim.h>
2020
#include <linux/bitfield.h>
21+
#include <net/page_pool.h>
22+
#include <linux/bpf_trace.h>
2123
#include "mtk_ppe.h"
2224

2325
#define MTK_QDMA_PAGE_SIZE 2048
@@ -49,6 +51,11 @@
4951
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
5052
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
5153

54+
#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
55+
#define MTK_PP_PAD (MTK_PP_HEADROOM + \
56+
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
57+
#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
58+
5259
#define MTK_QRX_OFFSET 0x10
5360

5461
#define MTK_MAX_RX_RING_NUM 4
@@ -745,6 +752,9 @@ struct mtk_rx_ring {
745752
bool calc_idx_update;
746753
u16 calc_idx;
747754
u32 crx_idx_reg;
755+
/* page_pool */
756+
struct page_pool *page_pool;
757+
struct xdp_rxq_info xdp_q;
748758
};
749759

750760
enum mkt_eth_capabilities {

0 commit comments

Comments
 (0)