summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoshua Hay <joshua.a.hay@intel.com>2025-11-03 13:20:36 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2026-01-17 16:35:27 +0100
commit258e7c55f9395b4711afa590fb0230911c602b8c (patch)
tree2e515fa3ad19d38b82280d5f8fc5234aac752a8a
parentbfeb4dfc805003fe4cfb1923f932581bd7e8db92 (diff)
idpf: cap maximum Rx buffer size
[ Upstream commit 086efe0a1ecc36cffe46640ce12649a4cd3ff171 ] The HW only supports a maximum Rx buffer size of 16K-128. On systems using large pages, the libeth logic can configure the buffer size to be larger than this. The upper bound is PAGE_SIZE while the lower bound is MTU rounded up to the nearest power of 2. For example, ARM systems with a 64K page size and an mtu of 9000 will set the Rx buffer size to 16K, which will cause the config Rx queues message to fail. Initialize the bufq/fill queue buf_len field to the maximum supported size. This will trigger the libeth logic to cap the maximum Rx buffer size by reducing the upper bound. Fixes: 74d1412ac8f37 ("idpf: use libeth Rx buffer management for payload buffer") Signed-off-by: Joshua Hay <joshua.a.hay@intel.com> Acked-by: Alexander Lobakin <aleksander.lobakin@intel.com> Reviewed-by: Madhu Chittim <madhu.chittim@intel.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com> Reviewed-by: David Decotigny <ddecotig@google.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1
2 files changed, 6 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 3698979b4c9e..f66948f5de78 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -695,9 +695,10 @@ err:
static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
{
struct libeth_fq fq = {
- .count = rxq->desc_count,
- .type = LIBETH_FQE_MTU,
- .nid = idpf_q_vector_to_mem(rxq->q_vector),
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
int ret;
@@ -754,6 +755,7 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.truesize = bufq->truesize,
.count = bufq->desc_count,
.type = type,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 0472698ca192..423cc9486dce 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -101,6 +101,7 @@ do { \
idx = 0; \
} while (0)
+#define IDPF_RX_MAX_BUF_SZ (16384 - 128)
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64