summaryrefslogtreecommitdiff
path: root/net/bpf/test_run.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/bpf/test_run.c')
-rw-r--r--net/bpf/test_run.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 55a79337ac51..6b04f47301c1 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -1231,8 +1231,6 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
batch_size = NAPI_POLL_WEIGHT;
else if (batch_size > TEST_XDP_MAX_BATCH)
return -E2BIG;
-
- headroom += sizeof(struct xdp_page_head);
} else if (batch_size) {
return -EINVAL;
}
@@ -1245,16 +1243,26 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
/* There can't be user provided data before the meta data */
if (ctx->data_meta || ctx->data_end > kattr->test.data_size_in ||
ctx->data > ctx->data_end ||
- unlikely(xdp_metalen_invalid(ctx->data)) ||
(do_live && (kattr->test.data_out || kattr->test.ctx_out)))
goto free_ctx;
- /* Meta data is allocated from the headroom */
- headroom -= ctx->data;
meta_sz = ctx->data;
+ if (xdp_metalen_invalid(meta_sz) || meta_sz > headroom - sizeof(struct xdp_frame))
+ goto free_ctx;
+
+ /* Meta data is allocated from the headroom */
+ headroom -= meta_sz;
linear_sz = ctx->data_end;
}
+ /* The xdp_page_head structure takes up space in each page, limiting the
+ * size of the packet data; add the extra size to headroom here to make
+ * sure it's accounted in the length checks below, but not in the
+ * metadata size check above.
+ */
+ if (do_live)
+ headroom += sizeof(struct xdp_page_head);
+
max_linear_sz = PAGE_SIZE - headroom - tailroom;
linear_sz = min_t(u32, linear_sz, max_linear_sz);
@@ -1292,13 +1300,13 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (sinfo->nr_frags == MAX_SKB_FRAGS) {
ret = -ENOMEM;
- goto out;
+ goto out_put_dev;
}
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
- goto out;
+ goto out_put_dev;
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1310,7 +1318,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (copy_from_user(page_address(page), data_in + size,
data_len)) {
ret = -EFAULT;
- goto out;
+ goto out_put_dev;
}
sinfo->xdp_frags_size += data_len;
size += data_len;
@@ -1325,6 +1333,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
else
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
+out_put_dev:
/* We convert the xdp_buff back to an xdp_md before checking the return
* code so the reference count of any held netdevice will be decremented
* even if the test run failed.