summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bpf/test_run.c25
-rw-r--r--net/bridge/br_vlan_tunnel.c11
-rw-r--r--net/can/j1939/transport.c2
-rw-r--r--net/ceph/messenger_v2.c2
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c14
-rw-r--r--net/ceph/osdmap.c24
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/netfilter/nf_conncount.c2
-rw-r--r--net/netfilter/nf_tables_api.c72
-rw-r--r--net/netfilter/nft_set_pipapo.c4
-rw-r--r--net/netfilter/nft_synproxy.c6
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/unix/af_unix.c8
-rw-r--r--net/vmw_vsock/af_vsock.c4
-rw-r--r--net/wireless/wext-core.c4
-rw-r--r--net/wireless/wext-priv.c4
25 files changed, 169 insertions, 58 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 55a79337ac51..6b04f47301c1 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -1231,8 +1231,6 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
batch_size = NAPI_POLL_WEIGHT;
else if (batch_size > TEST_XDP_MAX_BATCH)
return -E2BIG;
-
- headroom += sizeof(struct xdp_page_head);
} else if (batch_size) {
return -EINVAL;
}
@@ -1245,16 +1243,26 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
/* There can't be user provided data before the meta data */
if (ctx->data_meta || ctx->data_end > kattr->test.data_size_in ||
ctx->data > ctx->data_end ||
- unlikely(xdp_metalen_invalid(ctx->data)) ||
(do_live && (kattr->test.data_out || kattr->test.ctx_out)))
goto free_ctx;
- /* Meta data is allocated from the headroom */
- headroom -= ctx->data;
meta_sz = ctx->data;
+ if (xdp_metalen_invalid(meta_sz) || meta_sz > headroom - sizeof(struct xdp_frame))
+ goto free_ctx;
+
+ /* Meta data is allocated from the headroom */
+ headroom -= meta_sz;
linear_sz = ctx->data_end;
}
+ /* The xdp_page_head structure takes up space in each page, limiting the
+ * size of the packet data; add the extra size to headroom here to make
+ * sure it's accounted in the length checks below, but not in the
+ * metadata size check above.
+ */
+ if (do_live)
+ headroom += sizeof(struct xdp_page_head);
+
max_linear_sz = PAGE_SIZE - headroom - tailroom;
linear_sz = min_t(u32, linear_sz, max_linear_sz);
@@ -1292,13 +1300,13 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (sinfo->nr_frags == MAX_SKB_FRAGS) {
ret = -ENOMEM;
- goto out;
+ goto out_put_dev;
}
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
- goto out;
+ goto out_put_dev;
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1310,7 +1318,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (copy_from_user(page_address(page), data_in + size,
data_len)) {
ret = -EFAULT;
- goto out;
+ goto out_put_dev;
}
sinfo->xdp_frags_size += data_len;
size += data_len;
@@ -1325,6 +1333,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
else
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
+out_put_dev:
/* We convert the xdp_buff back to an xdp_md before checking the return
* code so the reference count of any held netdevice will be decremented
* even if the test run failed.
diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
index a966a6ec8263..257cae9f1569 100644
--- a/net/bridge/br_vlan_tunnel.c
+++ b/net/bridge/br_vlan_tunnel.c
@@ -189,7 +189,6 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tunnel_dst;
__be64 tunnel_id;
- int err;
if (!vlan)
return 0;
@@ -199,9 +198,13 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
return 0;
skb_dst_drop(skb);
- err = skb_vlan_pop(skb);
- if (err)
- return err;
+ /* For 802.1ad (QinQ), skb_vlan_pop() incorrectly moves the C-VLAN
+ * from payload to hwaccel after clearing S-VLAN. We only need to
+ * clear the hwaccel S-VLAN; the C-VLAN must stay in payload for
+ * correct VXLAN encapsulation. This is also correct for 802.1Q
+ * where no C-VLAN exists in payload.
+ */
+ __vlan_hwaccel_clear_tag(skb);
if (BR_INPUT_SKB_CB(skb)->backup_nhid) {
__set_bit(IP_TUNNEL_KEY_BIT, flags);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index fbf5c8001c9d..613a911dda10 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1567,6 +1567,8 @@ int j1939_session_activate(struct j1939_session *session)
if (active) {
j1939_session_put(active);
ret = -EAGAIN;
+ } else if (priv->ndev->reg_state != NETREG_REGISTERED) {
+ ret = -ENODEV;
} else {
WARN_ON_ONCE(session->state != J1939_SESSION_NEW);
list_add_tail(&session->active_session_list_entry,
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index 9e48623018a3..061eaa047f76 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -2377,7 +2377,9 @@ static int process_auth_done(struct ceph_connection *con, void *p, void *end)
ceph_decode_64_safe(&p, end, global_id, bad);
ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
+
ceph_decode_32_safe(&p, end, payload_len, bad);
+ ceph_decode_need(&p, end, payload_len, bad);
dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
__func__, con, global_id, con->v2.con_mode, payload_len);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index c227ececa925..fa8dd2a20f7d 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1417,7 +1417,7 @@ static int mon_handle_auth_done(struct ceph_connection *con,
if (!ret)
finish_hunting(monc);
mutex_unlock(&monc->mutex);
- return 0;
+ return ret;
}
static int mon_handle_auth_bad_method(struct ceph_connection *con,
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6664ea73ccf8..6d7d8c7d7d3f 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1588,6 +1588,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
struct ceph_pg_pool_info *pi;
struct ceph_pg pgid, last_pgid;
struct ceph_osds up, acting;
+ bool should_be_paused;
bool is_read = t->flags & CEPH_OSD_FLAG_READ;
bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
bool force_resend = false;
@@ -1656,10 +1657,16 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
&last_pgid))
force_resend = true;
- if (t->paused && !target_should_be_paused(osdc, t, pi)) {
- t->paused = false;
+ should_be_paused = target_should_be_paused(osdc, t, pi);
+ if (t->paused && !should_be_paused) {
unpaused = true;
}
+ if (t->paused != should_be_paused) {
+ dout("%s t %p paused %d -> %d\n", __func__, t, t->paused,
+ should_be_paused);
+ t->paused = should_be_paused;
+ }
+
legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
ceph_osds_changed(&t->acting, &acting,
t->used_replica || any_change);
@@ -4283,6 +4290,9 @@ static void osd_fault(struct ceph_connection *con)
goto out_unlock;
}
+ osd->o_sparse_op_idx = -1;
+ ceph_init_sparse_read(&osd->o_sparse_read);
+
if (!reopen_osd(osd))
kick_osd_requests(osd);
maybe_request_map(osdc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index f5f60deb680a..7c76eb9d6cee 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -241,22 +241,26 @@ static struct crush_choose_arg_map *alloc_choose_arg_map(void)
static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
{
- if (arg_map) {
- int i, j;
+ int i, j;
+
+ if (!arg_map)
+ return;
- WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
+ WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
+ if (arg_map->args) {
for (i = 0; i < arg_map->size; i++) {
struct crush_choose_arg *arg = &arg_map->args[i];
-
- for (j = 0; j < arg->weight_set_size; j++)
- kfree(arg->weight_set[j].weights);
- kfree(arg->weight_set);
+ if (arg->weight_set) {
+ for (j = 0; j < arg->weight_set_size; j++)
+ kfree(arg->weight_set[j].weights);
+ kfree(arg->weight_set);
+ }
kfree(arg->ids);
}
kfree(arg_map->args);
- kfree(arg_map);
}
+ kfree(arg_map);
}
DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
@@ -1979,11 +1983,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
sizeof(u64) + sizeof(u32), e_inval);
ceph_decode_copy(p, &fsid, sizeof(fsid));
epoch = ceph_decode_32(p);
- BUG_ON(epoch != map->epoch+1);
ceph_decode_copy(p, &modified, sizeof(modified));
new_pool_max = ceph_decode_64(p);
new_flags = ceph_decode_32(p);
+ if (epoch != map->epoch + 1)
+ goto e_inval;
+
/* full map? */
ceph_decode_32_safe(p, end, len, e_inval);
if (len > 0) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6be01454f262..9a763d120925 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4597,12 +4597,14 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
{
struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
unsigned int tnl_hlen = skb_tnl_header_len(skb);
- unsigned int delta_truesize = 0;
unsigned int delta_len = 0;
struct sk_buff *tail = NULL;
struct sk_buff *nskb, *tmp;
int len_diff, err;
+ /* Only skb_gro_receive_list generated skbs arrive here */
+ DEBUG_NET_WARN_ON_ONCE(!(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST));
+
skb_push(skb, -skb_network_offset(skb) + offset);
/* Ensure the head is writeable before touching the shared info */
@@ -4616,8 +4618,9 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
nskb = list_skb;
list_skb = list_skb->next;
+ DEBUG_NET_WARN_ON_ONCE(nskb->sk);
+
err = 0;
- delta_truesize += nskb->truesize;
if (skb_shared(nskb)) {
tmp = skb_clone(nskb, GFP_ATOMIC);
if (tmp) {
@@ -4660,7 +4663,6 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
goto err_linearize;
}
- skb->truesize = skb->truesize - delta_truesize;
skb->data_len = skb->data_len - delta_len;
skb->len = skb->len - delta_len;
diff --git a/net/core/sock.c b/net/core/sock.c
index dc03d4b5909a..5a38837a5838 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3836,7 +3836,7 @@ void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
int level, int type)
{
- struct sock_exterr_skb *serr;
+ struct sock_extended_err ee;
struct sk_buff *skb;
int copied, err;
@@ -3856,8 +3856,9 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
sock_recv_timestamp(msg, sk, skb);
- serr = SKB_EXT_ERR(skb);
- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
+ /* We must use a bounce buffer for CONFIG_HARDENED_USERCOPY=y */
+ ee = SKB_EXT_ERR(skb)->ee;
+ put_cmsg(msg, level, type, sizeof(ee), &ee);
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 833f2cf97178..3ce1664e8632 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -564,7 +564,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
- arp = skb_put(skb, arp_hdr_len(dev));
+ skb_put(skb, arp_hdr_len(dev));
skb->dev = dev;
skb->protocol = htons(ETH_P_ARP);
if (!src_hw)
@@ -572,12 +572,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
if (!dest_hw)
dest_hw = dev->broadcast;
- /*
- * Fill the device header for the ARP frame
+ /* Fill the device header for the ARP frame.
+ * Note: skb->head can be changed.
*/
if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0)
goto out;
+ arp = arp_hdr(skb);
/*
* Fill out the arp protocol part.
*
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 001ee5c4d962..4e6d7467ed44 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -488,6 +488,8 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
}
FRAG_CB(skb)->ip_defrag_offset = offset;
+ if (offset)
+ nf_reset_ct(skb);
return IPFRAG_OK;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 5321c5801c64..a5227d23bb0b 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -828,10 +828,8 @@ out:
out_free:
if (free)
kfree(ipc.opt);
- if (!err) {
- icmp_out_count(sock_net(sk), user_icmph.type);
+ if (!err)
return len;
- }
return err;
do_confirm:
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8a18aeca7ab0..74079eab8980 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2651,10 +2651,8 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
if (sk->sk_state == TCP_LISTEN)
goto out;
- if (tp->recvmsg_inq) {
+ if (tp->recvmsg_inq)
*cmsg_flags = TCP_CMSG_INQ;
- msg->msg_get_inq = 1;
- }
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
/* Urgent data needs to be handled specially. */
@@ -2928,10 +2926,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags);
release_sock(sk);
- if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) {
+ if ((cmsg_flags | msg->msg_get_inq) && ret >= 0) {
if (cmsg_flags & TCP_CMSG_TS)
tcp_recv_timestamp(msg, sk, &tss);
- if (msg->msg_get_inq) {
+ if ((cmsg_flags & TCP_CMSG_INQ) | msg->msg_get_inq) {
msg->msg_inq = tcp_inq_hint(sk);
if (cmsg_flags & TCP_CMSG_INQ)
put_cmsg(msg, SOL_TCP, TCP_CM_INQ,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 30dfbf73729d..860bd61ff047 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1851,6 +1851,7 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
sk_peek_offset_bwd(sk, len);
if (!skb_shared(skb)) {
+ skb_orphan(skb);
skb_attempt_defer_free(skb);
return;
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e7b141c55f7a..160667be3f4d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2395,6 +2395,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
if (chanctx_conf)
chandef = &chanctx_conf->def;
+ else if (local->emulate_chanctx)
+ chandef = &local->hw.conf.chandef;
else
goto fail_rcu;
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 3c1b155f7a0e..828d5c64c68a 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -229,6 +229,7 @@ static int __nf_conncount_add(struct net *net,
nf_ct_put(found_ct);
}
+ list->last_gc = (u32)jiffies;
add_new_node:
if (WARN_ON_ONCE(list->count > INT_MAX)) {
@@ -248,7 +249,6 @@ add_new_node:
conn->jiffies32 = (u32)jiffies;
list_add_tail(&conn->node, &list->head);
list->count++;
- list->last_gc = (u32)jiffies;
out_put:
if (refcounted)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1a204f6371ad..3cbf2573b9e9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -123,6 +123,29 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s
table->validate_state = new_validate_state;
}
+
+static bool nft_chain_vstate_valid(const struct nft_ctx *ctx,
+ const struct nft_chain *chain)
+{
+ const struct nft_base_chain *base_chain;
+ enum nft_chain_types type;
+ u8 hooknum;
+
+ if (WARN_ON_ONCE(!nft_is_base_chain(ctx->chain)))
+ return false;
+
+ base_chain = nft_base_chain(ctx->chain);
+ hooknum = base_chain->ops.hooknum;
+ type = base_chain->type->type;
+
+ /* chain is already validated for this call depth */
+ if (chain->vstate.depth >= ctx->level &&
+ chain->vstate.hook_mask[type] & BIT(hooknum))
+ return true;
+
+ return false;
+}
+
static void nf_tables_trans_destroy_work(struct work_struct *w);
static void nft_trans_gc_work(struct work_struct *work);
@@ -4079,6 +4102,29 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
nf_tables_rule_destroy(ctx, rule);
}
+static void nft_chain_vstate_update(const struct nft_ctx *ctx, struct nft_chain *chain)
+{
+ const struct nft_base_chain *base_chain;
+ enum nft_chain_types type;
+ u8 hooknum;
+
+ /* ctx->chain must hold the calling base chain. */
+ if (WARN_ON_ONCE(!nft_is_base_chain(ctx->chain))) {
+ memset(&chain->vstate, 0, sizeof(chain->vstate));
+ return;
+ }
+
+ base_chain = nft_base_chain(ctx->chain);
+ hooknum = base_chain->ops.hooknum;
+ type = base_chain->type->type;
+
+ BUILD_BUG_ON(BIT(NF_INET_NUMHOOKS) > U8_MAX);
+
+ chain->vstate.hook_mask[type] |= BIT(hooknum);
+ if (chain->vstate.depth < ctx->level)
+ chain->vstate.depth = ctx->level;
+}
+
/** nft_chain_validate - loop detection and hook validation
*
* @ctx: context containing call depth and base chain
@@ -4088,15 +4134,25 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
* and set lookups until either the jump limit is hit or all reachable
* chains have been validated.
*/
-int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+int nft_chain_validate(const struct nft_ctx *ctx, struct nft_chain *chain)
{
struct nft_expr *expr, *last;
struct nft_rule *rule;
int err;
+ BUILD_BUG_ON(NFT_JUMP_STACK_SIZE > 255);
if (ctx->level == NFT_JUMP_STACK_SIZE)
return -EMLINK;
+ if (ctx->level > 0) {
+ /* jumps to base chains are not allowed. */
+ if (nft_is_base_chain(chain))
+ return -ELOOP;
+
+ if (nft_chain_vstate_valid(ctx, chain))
+ return 0;
+ }
+
list_for_each_entry(rule, &chain->rules, list) {
if (fatal_signal_pending(current))
return -EINTR;
@@ -4117,6 +4173,7 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
}
}
+ nft_chain_vstate_update(ctx, chain);
return 0;
}
EXPORT_SYMBOL_GPL(nft_chain_validate);
@@ -4128,7 +4185,7 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
.net = net,
.family = table->family,
};
- int err;
+ int err = 0;
list_for_each_entry(chain, &table->chains, list) {
if (!nft_is_base_chain(chain))
@@ -4137,12 +4194,16 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
ctx.chain = chain;
err = nft_chain_validate(&ctx, chain);
if (err < 0)
- return err;
+ goto err;
cond_resched();
}
- return 0;
+err:
+ list_for_each_entry(chain, &table->chains, list)
+ memset(&chain->vstate, 0, sizeof(chain->vstate));
+
+ return err;
}
int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
@@ -4378,7 +4439,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
if (!nft_use_inc(&chain->use)) {
err = -EMFILE;
- goto err_release_rule;
+ goto err_destroy_flow;
}
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
@@ -4428,6 +4489,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
err_destroy_flow_rule:
nft_use_dec_restore(&chain->use);
+err_destroy_flow:
if (flow)
nft_flow_rule_destroy(flow);
err_release_rule:
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 112fe46788b6..6d77a5f0088a 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -1317,8 +1317,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
else
dup_end = dup_key;
- if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) &&
- !memcmp(end, dup_end->data, sizeof(*dup_end->data))) {
+ if (!memcmp(start, dup_key->data, set->klen) &&
+ !memcmp(end, dup_end->data, set->klen)) {
*elem_priv = &dup->priv;
return -EEXIST;
}
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index 5d3e51825985..4d3e5a31b412 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -48,7 +48,7 @@ static void nft_synproxy_eval_v4(const struct nft_synproxy *priv,
struct tcphdr *_tcph,
struct synproxy_options *opts)
{
- struct nf_synproxy_info info = priv->info;
+ struct nf_synproxy_info info = READ_ONCE(priv->info);
struct net *net = nft_net(pkt);
struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *skb = pkt->skb;
@@ -79,7 +79,7 @@ static void nft_synproxy_eval_v6(const struct nft_synproxy *priv,
struct tcphdr *_tcph,
struct synproxy_options *opts)
{
- struct nf_synproxy_info info = priv->info;
+ struct nf_synproxy_info info = READ_ONCE(priv->info);
struct net *net = nft_net(pkt);
struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *skb = pkt->skb;
@@ -340,7 +340,7 @@ static void nft_synproxy_obj_update(struct nft_object *obj,
struct nft_synproxy *newpriv = nft_obj_data(newobj);
struct nft_synproxy *priv = nft_obj_data(obj);
- priv->info = newpriv->info;
+ WRITE_ONCE(priv->info, newpriv->info);
}
static struct nft_object_type nft_synproxy_obj_type;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index ff6be5cfe2b0..e1ab0faeb811 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -940,6 +940,8 @@ void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
int ret;
idr_for_each_entry_ul(idr, p, tmp, id) {
+ if (IS_ERR(p))
+ continue;
if (tc_act_in_hw(p) && !mutex_taken) {
rtnl_lock();
mutex_taken = true;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 2255355e51d3..a91a5bac8f73 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1481,7 +1481,7 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
- if (cl->qdisc->q.qlen > 0)
+ if (cl_is_active(cl))
qfq_deactivate_class(q, cl);
qdisc_reset(cl->qdisc);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f6f01f514933..c634a7fc8609 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2929,7 +2929,6 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
unsigned int last_len;
struct unix_sock *u;
int copied = 0;
- bool do_cmsg;
int err = 0;
long timeo;
int target;
@@ -2955,9 +2954,6 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
u = unix_sk(sk);
- do_cmsg = READ_ONCE(u->recvmsg_inq);
- if (do_cmsg)
- msg->msg_get_inq = 1;
redo:
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
@@ -3115,9 +3111,11 @@ unlock:
mutex_unlock(&u->iolock);
if (msg) {
+ bool do_cmsg = READ_ONCE(u->recvmsg_inq);
+
scm_recv_unix(sock, msg, &scm, flags);
- if (msg->msg_get_inq && (copied ?: err) >= 0) {
+ if ((do_cmsg | msg->msg_get_inq) && (copied ?: err) >= 0) {
msg->msg_inq = READ_ONCE(u->inq_len);
if (do_cmsg)
put_cmsg(msg, SOL_SOCKET, SCM_INQ,
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index a9ca9c3b87b3..cbd649bf0145 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1787,6 +1787,10 @@ static int vsock_accept(struct socket *sock, struct socket *newsock,
} else {
newsock->state = SS_CONNECTED;
sock_graft(connected, newsock);
+
+ set_bit(SOCK_CUSTOM_SOCKOPT,
+ &connected->sk_socket->flags);
+
if (vsock_msgzerocopy_allow(vconnected->transport))
set_bit(SOCK_SUPPORT_ZC,
&connected->sk_socket->flags);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index c32a7c6903d5..7b8e94214b07 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -1101,6 +1101,10 @@ static int compat_standard_call(struct net_device *dev,
return ioctl_standard_call(dev, iwr, cmd, info, handler);
iwp_compat = (struct compat_iw_point *) &iwr->u.data;
+
+ /* struct iw_point has a 32bit hole on 64bit arches. */
+ memset(&iwp, 0, sizeof(iwp));
+
iwp.pointer = compat_ptr(iwp_compat->pointer);
iwp.length = iwp_compat->length;
iwp.flags = iwp_compat->flags;
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c
index 674d426a9d24..37d1147019c2 100644
--- a/net/wireless/wext-priv.c
+++ b/net/wireless/wext-priv.c
@@ -228,6 +228,10 @@ int compat_private_call(struct net_device *dev, struct iwreq *iwr,
struct iw_point iwp;
iwp_compat = (struct compat_iw_point *) &iwr->u.data;
+
+ /* struct iw_point has a 32bit hole on 64bit arches. */
+ memset(&iwp, 0, sizeof(iwp));
+
iwp.pointer = compat_ptr(iwp_compat->pointer);
iwp.length = iwp_compat->length;
iwp.flags = iwp_compat->flags;