@@ -1432,6 +1432,68 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1432
1432
}
1433
1433
}
1434
1434
1435
+ static struct page_pool * mtk_create_page_pool (struct mtk_eth * eth ,
1436
+ struct xdp_rxq_info * xdp_q ,
1437
+ int id , int size )
1438
+ {
1439
+ struct page_pool_params pp_params = {
1440
+ .order = 0 ,
1441
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ,
1442
+ .pool_size = size ,
1443
+ .nid = NUMA_NO_NODE ,
1444
+ .dev = eth -> dma_dev ,
1445
+ .dma_dir = DMA_FROM_DEVICE ,
1446
+ .offset = MTK_PP_HEADROOM ,
1447
+ .max_len = MTK_PP_MAX_BUF_SIZE ,
1448
+ };
1449
+ struct page_pool * pp ;
1450
+ int err ;
1451
+
1452
+ pp = page_pool_create (& pp_params );
1453
+ if (IS_ERR (pp ))
1454
+ return pp ;
1455
+
1456
+ err = __xdp_rxq_info_reg (xdp_q , & eth -> dummy_dev , eth -> rx_napi .napi_id ,
1457
+ id , PAGE_SIZE );
1458
+ if (err < 0 )
1459
+ goto err_free_pp ;
1460
+
1461
+ err = xdp_rxq_info_reg_mem_model (xdp_q , MEM_TYPE_PAGE_POOL , pp );
1462
+ if (err )
1463
+ goto err_unregister_rxq ;
1464
+
1465
+ return pp ;
1466
+
1467
+ err_unregister_rxq :
1468
+ xdp_rxq_info_unreg (xdp_q );
1469
+ err_free_pp :
1470
+ page_pool_destroy (pp );
1471
+
1472
+ return ERR_PTR (err );
1473
+ }
1474
+
1475
+ static void * mtk_page_pool_get_buff (struct page_pool * pp , dma_addr_t * dma_addr ,
1476
+ gfp_t gfp_mask )
1477
+ {
1478
+ struct page * page ;
1479
+
1480
+ page = page_pool_alloc_pages (pp , gfp_mask | __GFP_NOWARN );
1481
+ if (!page )
1482
+ return NULL ;
1483
+
1484
+ * dma_addr = page_pool_get_dma_addr (page ) + MTK_PP_HEADROOM ;
1485
+ return page_address (page );
1486
+ }
1487
+
1488
+ static void mtk_rx_put_buff (struct mtk_rx_ring * ring , void * data , bool napi )
1489
+ {
1490
+ if (ring -> page_pool )
1491
+ page_pool_put_full_page (ring -> page_pool ,
1492
+ virt_to_head_page (data ), napi );
1493
+ else
1494
+ skb_free_frag (data );
1495
+ }
1496
+
1435
1497
static int mtk_poll_rx (struct napi_struct * napi , int budget ,
1436
1498
struct mtk_eth * eth )
1437
1499
{
@@ -1445,9 +1507,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1445
1507
1446
1508
while (done < budget ) {
1447
1509
unsigned int pktlen , * rxdcsum ;
1510
+ u32 hash , reason , reserve_len ;
1448
1511
struct net_device * netdev ;
1449
1512
dma_addr_t dma_addr ;
1450
- u32 hash , reason ;
1451
1513
int mac = 0 ;
1452
1514
1453
1515
ring = mtk_get_rx_ring (eth );
@@ -1478,36 +1540,54 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1478
1540
goto release_desc ;
1479
1541
1480
1542
/* alloc new buffer */
1481
- if (ring -> frag_size <= PAGE_SIZE )
1482
- new_data = napi_alloc_frag (ring -> frag_size );
1483
- else
1484
- new_data = mtk_max_lro_buf_alloc ( GFP_ATOMIC );
1485
- if (unlikely (!new_data )) {
1486
- netdev -> stats .rx_dropped ++ ;
1487
- goto release_desc ;
1488
- }
1489
- dma_addr = dma_map_single ( eth -> dma_dev ,
1490
- new_data + NET_SKB_PAD +
1491
- eth -> ip_align ,
1492
- ring -> buf_size ,
1493
- DMA_FROM_DEVICE );
1494
- if ( unlikely ( dma_mapping_error ( eth -> dma_dev , dma_addr ))) {
1495
- skb_free_frag ( new_data );
1496
- netdev -> stats .rx_dropped ++ ;
1497
- goto release_desc ;
1498
- }
1543
+ if (ring -> page_pool ) {
1544
+ new_data = mtk_page_pool_get_buff (ring -> page_pool ,
1545
+ & dma_addr ,
1546
+ GFP_ATOMIC );
1547
+ if (unlikely (!new_data )) {
1548
+ netdev -> stats .rx_dropped ++ ;
1549
+ goto release_desc ;
1550
+ }
1551
+ } else {
1552
+ if ( ring -> frag_size <= PAGE_SIZE )
1553
+ new_data = napi_alloc_frag ( ring -> frag_size );
1554
+ else
1555
+ new_data = mtk_max_lro_buf_alloc ( GFP_ATOMIC );
1556
+
1557
+ if ( unlikely (! new_data )) {
1558
+ netdev -> stats .rx_dropped ++ ;
1559
+ goto release_desc ;
1560
+ }
1499
1561
1500
- dma_unmap_single (eth -> dma_dev , trxd .rxd1 ,
1501
- ring -> buf_size , DMA_FROM_DEVICE );
1562
+ dma_addr = dma_map_single (eth -> dma_dev ,
1563
+ new_data + NET_SKB_PAD + eth -> ip_align ,
1564
+ ring -> buf_size , DMA_FROM_DEVICE );
1565
+ if (unlikely (dma_mapping_error (eth -> dma_dev ,
1566
+ dma_addr ))) {
1567
+ skb_free_frag (new_data );
1568
+ netdev -> stats .rx_dropped ++ ;
1569
+ goto release_desc ;
1570
+ }
1571
+
1572
+ dma_unmap_single (eth -> dma_dev , trxd .rxd1 ,
1573
+ ring -> buf_size , DMA_FROM_DEVICE );
1574
+ }
1502
1575
1503
1576
/* receive data */
1504
1577
skb = build_skb (data , ring -> frag_size );
1505
1578
if (unlikely (!skb )) {
1506
- skb_free_frag ( data );
1579
+ mtk_rx_put_buff ( ring , data , true );
1507
1580
netdev -> stats .rx_dropped ++ ;
1508
1581
goto skip_rx ;
1509
1582
}
1510
- skb_reserve (skb , NET_SKB_PAD + NET_IP_ALIGN );
1583
+
1584
+ if (ring -> page_pool ) {
1585
+ reserve_len = MTK_PP_HEADROOM ;
1586
+ skb_mark_for_recycle (skb );
1587
+ } else {
1588
+ reserve_len = NET_SKB_PAD + NET_IP_ALIGN ;
1589
+ }
1590
+ skb_reserve (skb , reserve_len );
1511
1591
1512
1592
pktlen = RX_DMA_GET_PLEN0 (trxd .rxd2 );
1513
1593
skb -> dev = netdev ;
@@ -1561,15 +1641,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1561
1641
skip_rx :
1562
1642
ring -> data [idx ] = new_data ;
1563
1643
rxd -> rxd1 = (unsigned int )dma_addr ;
1564
-
1565
1644
release_desc :
1566
1645
if (MTK_HAS_CAPS (eth -> soc -> caps , MTK_SOC_MT7628 ))
1567
1646
rxd -> rxd2 = RX_DMA_LSO ;
1568
1647
else
1569
1648
rxd -> rxd2 = RX_DMA_PREP_PLEN0 (ring -> buf_size );
1570
1649
1571
1650
ring -> calc_idx = idx ;
1572
-
1573
1651
done ++ ;
1574
1652
}
1575
1653
@@ -1933,13 +2011,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1933
2011
if (!ring -> data )
1934
2012
return - ENOMEM ;
1935
2013
1936
- for (i = 0 ; i < rx_dma_size ; i ++ ) {
1937
- if (ring -> frag_size <= PAGE_SIZE )
1938
- ring -> data [i ] = netdev_alloc_frag (ring -> frag_size );
1939
- else
1940
- ring -> data [i ] = mtk_max_lro_buf_alloc (GFP_KERNEL );
1941
- if (!ring -> data [i ])
1942
- return - ENOMEM ;
2014
+ if (!eth -> hwlro ) {
2015
+ struct page_pool * pp ;
2016
+
2017
+ pp = mtk_create_page_pool (eth , & ring -> xdp_q , ring_no ,
2018
+ rx_dma_size );
2019
+ if (IS_ERR (pp ))
2020
+ return PTR_ERR (pp );
2021
+
2022
+ ring -> page_pool = pp ;
1943
2023
}
1944
2024
1945
2025
ring -> dma = dma_alloc_coherent (eth -> dma_dev ,
@@ -1950,16 +2030,33 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1950
2030
1951
2031
for (i = 0 ; i < rx_dma_size ; i ++ ) {
1952
2032
struct mtk_rx_dma_v2 * rxd ;
1953
-
1954
- dma_addr_t dma_addr = dma_map_single (eth -> dma_dev ,
1955
- ring -> data [i ] + NET_SKB_PAD + eth -> ip_align ,
1956
- ring -> buf_size ,
1957
- DMA_FROM_DEVICE );
1958
- if (unlikely (dma_mapping_error (eth -> dma_dev , dma_addr )))
1959
- return - ENOMEM ;
2033
+ dma_addr_t dma_addr ;
2034
+ void * data ;
1960
2035
1961
2036
rxd = ring -> dma + i * eth -> soc -> txrx .rxd_size ;
2037
+ if (ring -> page_pool ) {
2038
+ data = mtk_page_pool_get_buff (ring -> page_pool ,
2039
+ & dma_addr , GFP_KERNEL );
2040
+ if (!data )
2041
+ return - ENOMEM ;
2042
+ } else {
2043
+ if (ring -> frag_size <= PAGE_SIZE )
2044
+ data = netdev_alloc_frag (ring -> frag_size );
2045
+ else
2046
+ data = mtk_max_lro_buf_alloc (GFP_KERNEL );
2047
+
2048
+ if (!data )
2049
+ return - ENOMEM ;
2050
+
2051
+ dma_addr = dma_map_single (eth -> dma_dev ,
2052
+ data + NET_SKB_PAD + eth -> ip_align ,
2053
+ ring -> buf_size , DMA_FROM_DEVICE );
2054
+ if (unlikely (dma_mapping_error (eth -> dma_dev ,
2055
+ dma_addr )))
2056
+ return - ENOMEM ;
2057
+ }
1962
2058
rxd -> rxd1 = (unsigned int )dma_addr ;
2059
+ ring -> data [i ] = data ;
1963
2060
1964
2061
if (MTK_HAS_CAPS (eth -> soc -> caps , MTK_SOC_MT7628 ))
1965
2062
rxd -> rxd2 = RX_DMA_LSO ;
@@ -1975,6 +2072,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1975
2072
rxd -> rxd8 = 0 ;
1976
2073
}
1977
2074
}
2075
+
1978
2076
ring -> dma_size = rx_dma_size ;
1979
2077
ring -> calc_idx_update = false;
1980
2078
ring -> calc_idx = rx_dma_size - 1 ;
@@ -2026,7 +2124,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2026
2124
2027
2125
dma_unmap_single (eth -> dma_dev , rxd -> rxd1 ,
2028
2126
ring -> buf_size , DMA_FROM_DEVICE );
2029
- skb_free_frag (ring -> data [i ]);
2127
+ mtk_rx_put_buff (ring , ring -> data [i ], false );
2030
2128
}
2031
2129
kfree (ring -> data );
2032
2130
ring -> data = NULL ;
@@ -2038,6 +2136,13 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2038
2136
ring -> dma , ring -> phys );
2039
2137
ring -> dma = NULL ;
2040
2138
}
2139
+
2140
+ if (ring -> page_pool ) {
2141
+ if (xdp_rxq_info_is_reg (& ring -> xdp_q ))
2142
+ xdp_rxq_info_unreg (& ring -> xdp_q );
2143
+ page_pool_destroy (ring -> page_pool );
2144
+ ring -> page_pool = NULL ;
2145
+ }
2041
2146
}
2042
2147
2043
2148
static int mtk_hwlro_rx_init (struct mtk_eth * eth )
0 commit comments