diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/bpf/test_run.c | 60 | ||||
| -rw-r--r-- | net/bridge/br_vlan_tunnel.c | 11 | ||||
| -rw-r--r-- | net/can/j1939/transport.c | 2 | ||||
| -rw-r--r-- | net/ceph/messenger_v2.c | 2 | ||||
| -rw-r--r-- | net/ceph/mon_client.c | 2 | ||||
| -rw-r--r-- | net/ceph/osd_client.c | 14 | ||||
| -rw-r--r-- | net/ceph/osdmap.c | 24 | ||||
| -rw-r--r-- | net/core/skbuff.c | 8 | ||||
| -rw-r--r-- | net/core/sock.c | 7 | ||||
| -rw-r--r-- | net/ipv4/arp.c | 7 | ||||
| -rw-r--r-- | net/ipv4/ping.c | 4 | ||||
| -rw-r--r-- | net/mac80211/tx.c | 2 | ||||
| -rw-r--r-- | net/netfilter/nf_conncount.c | 2 | ||||
| -rw-r--r-- | net/netfilter/nf_tables_api.c | 72 | ||||
| -rw-r--r-- | net/netfilter/nft_set_pipapo.c | 4 | ||||
| -rw-r--r-- | net/netfilter/nft_synproxy.c | 6 | ||||
| -rw-r--r-- | net/sched/sch_qfq.c | 2 | ||||
| -rw-r--r-- | net/tls/tls_device.c | 18 | ||||
| -rw-r--r-- | net/vmw_vsock/af_vsock.c | 4 | ||||
| -rw-r--r-- | net/wireless/wext-core.c | 4 | ||||
| -rw-r--r-- | net/wireless/wext-priv.c | 4 |
21 files changed, 190 insertions, 69 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 8612023bec60..1ced59980dbc 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -660,7 +660,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, void __user *data_in = u64_to_user_ptr(kattr->test.data_in); void *data; - if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom) + if (user_size > PAGE_SIZE - headroom - tailroom) return ERR_PTR(-EINVAL); size = SKB_DATA_ALIGN(size); @@ -995,6 +995,9 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, kattr->test.cpu || kattr->test.batch_size) return -EINVAL; + if (size < ETH_HLEN) + return -EINVAL; + data = bpf_test_init(kattr, kattr->test.data_size_in, size, NET_SKB_PAD + NET_IP_ALIGN, SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); @@ -1200,9 +1203,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, { bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + u32 retval = 0, meta_sz = 0, duration, max_linear_sz, size; + u32 linear_sz = kattr->test.data_size_in; u32 batch_size = kattr->test.batch_size; - u32 retval = 0, duration, max_data_sz; - u32 size = kattr->test.data_size_in; u32 headroom = XDP_PACKET_HEADROOM; u32 repeat = kattr->test.repeat; struct netdev_rx_queue *rxqueue; @@ -1227,8 +1230,6 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, batch_size = NAPI_POLL_WEIGHT; else if (batch_size > TEST_XDP_MAX_BATCH) return -E2BIG; - - headroom += sizeof(struct xdp_page_head); } else if (batch_size) { return -EINVAL; } @@ -1239,39 +1240,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (ctx) { /* There can't be user provided data before the meta data */ - if (ctx->data_meta || ctx->data_end != size || + if (ctx->data_meta || ctx->data_end > kattr->test.data_size_in || ctx->data > ctx->data_end || - unlikely(xdp_metalen_invalid(ctx->data)) || (do_live && (kattr->test.data_out || kattr->test.ctx_out))) goto free_ctx; - /* Meta data is allocated from the headroom */ - headroom -= ctx->data; - } - max_data_sz = 4096 - headroom - tailroom; - if (size > max_data_sz) { - /* disallow live data mode for jumbo frames */ - if (do_live) + meta_sz = ctx->data; + if (xdp_metalen_invalid(meta_sz) || meta_sz > headroom - sizeof(struct xdp_frame)) goto free_ctx; - size = max_data_sz; + + /* Meta data is allocated from the headroom */ + headroom -= meta_sz; + linear_sz = ctx->data_end; } - data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); + /* The xdp_page_head structure takes up space in each page, limiting the + * size of the packet data; add the extra size to headroom here to make + * sure it's accounted in the length checks below, but not in the + * metadata size check above. + */ + if (do_live) + headroom += sizeof(struct xdp_page_head); + + max_linear_sz = PAGE_SIZE - headroom - tailroom; + linear_sz = min_t(u32, linear_sz, max_linear_sz); + + /* disallow live data mode for jumbo frames */ + if (do_live && kattr->test.data_size_in > linear_sz) + goto free_ctx; + + if (kattr->test.data_size_in - meta_sz < ETH_HLEN) + goto free_ctx; + + data = bpf_test_init(kattr, linear_sz, max_linear_sz, headroom, tailroom); if (IS_ERR(data)) { ret = PTR_ERR(data); goto free_ctx; } rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); - rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; + rxqueue->xdp_rxq.frag_size = PAGE_SIZE; xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); - xdp_prepare_buff(&xdp, data, headroom, size, true); + xdp_prepare_buff(&xdp, data, headroom, linear_sz, true); sinfo = xdp_get_shared_info_from_buff(&xdp); ret = xdp_convert_md_to_buff(ctx, &xdp); if (ret) goto free_data; + size = linear_sz; if (unlikely(kattr->test.data_size_in > size)) { void __user *data_in = u64_to_user_ptr(kattr->test.data_in); @@ -1282,13 +1299,13 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (sinfo->nr_frags == MAX_SKB_FRAGS) { ret = -ENOMEM; - goto out; + goto out_put_dev; } page = alloc_page(GFP_KERNEL); if (!page) { ret = -ENOMEM; - goto out; + goto out_put_dev; } frag = &sinfo->frags[sinfo->nr_frags++]; @@ -1300,7 +1317,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (copy_from_user(page_address(page), data_in + size, data_len)) { ret = -EFAULT; - goto out; + goto out_put_dev; } sinfo->xdp_frags_size += data_len; size += data_len; @@ -1315,6 +1332,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); else ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); +out_put_dev: /* We convert the xdp_buff back to an xdp_md before checking the return * code so the reference count of any held netdevice will be decremented * even if the test run failed. diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c index a966a6ec8263..257cae9f1569 100644 --- a/net/bridge/br_vlan_tunnel.c +++ b/net/bridge/br_vlan_tunnel.c @@ -189,7 +189,6 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb, IP_TUNNEL_DECLARE_FLAGS(flags) = { }; struct metadata_dst *tunnel_dst; __be64 tunnel_id; - int err; if (!vlan) return 0; @@ -199,9 +198,13 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb, return 0; skb_dst_drop(skb); - err = skb_vlan_pop(skb); - if (err) - return err; + /* For 802.1ad (QinQ), skb_vlan_pop() incorrectly moves the C-VLAN + * from payload to hwaccel after clearing S-VLAN. We only need to + * clear the hwaccel S-VLAN; the C-VLAN must stay in payload for + * correct VXLAN encapsulation. This is also correct for 802.1Q + * where no C-VLAN exists in payload. + */ + __vlan_hwaccel_clear_tag(skb); if (BR_INPUT_SKB_CB(skb)->backup_nhid) { __set_bit(IP_TUNNEL_KEY_BIT, flags); diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 9b72d118d756..1186326b0f2e 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -1571,6 +1571,8 @@ int j1939_session_activate(struct j1939_session *session) if (active) { j1939_session_put(active); ret = -EAGAIN; + } else if (priv->ndev->reg_state != NETREG_REGISTERED) { + ret = -ENODEV; } else { WARN_ON_ONCE(session->state != J1939_SESSION_NEW); list_add_tail(&session->active_session_list_entry, diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c index 3ae0cca89ab8..f163283abc4e 100644 --- a/net/ceph/messenger_v2.c +++ b/net/ceph/messenger_v2.c @@ -2405,7 +2405,9 @@ static int process_auth_done(struct ceph_connection *con, void *p, void *end) ceph_decode_64_safe(&p, end, global_id, bad); ceph_decode_32_safe(&p, end, con->v2.con_mode, bad); + ceph_decode_32_safe(&p, end, payload_len, bad); + ceph_decode_need(&p, end, payload_len, bad); dout("%s con %p global_id %llu con_mode %d payload_len %d\n", __func__, con, global_id, con->v2.con_mode, payload_len); diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index ab66b599ac47..3909a8156690 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -1417,7 +1417,7 @@ static int mon_handle_auth_done(struct ceph_connection *con, if (!ret) finish_hunting(monc); mutex_unlock(&monc->mutex); - return 0; + return ret; } static int mon_handle_auth_bad_method(struct ceph_connection *con, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index abac770bc0b4..2f1c461e0ffc 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1611,6 +1611,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, struct ceph_pg_pool_info *pi; struct ceph_pg pgid, last_pgid; struct ceph_osds up, acting; + bool should_be_paused; bool is_read = t->flags & CEPH_OSD_FLAG_READ; bool is_write = t->flags & CEPH_OSD_FLAG_WRITE; bool force_resend = false; @@ -1679,10 +1680,16 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, &last_pgid)) force_resend = true; - if (t->paused && !target_should_be_paused(osdc, t, pi)) { - t->paused = false; + should_be_paused = target_should_be_paused(osdc, t, pi); + if (t->paused && !should_be_paused) { unpaused = true; } + if (t->paused != should_be_paused) { + dout("%s t %p paused %d -> %d\n", __func__, t, t->paused, + should_be_paused); + t->paused = should_be_paused; + } + legacy_change = ceph_pg_compare(&t->pgid, &pgid) || ceph_osds_changed(&t->acting, &acting, t->used_replica || any_change); @@ -4306,6 +4313,9 @@ static void osd_fault(struct ceph_connection *con) goto out_unlock; } + osd->o_sparse_op_idx = -1; + ceph_init_sparse_read(&osd->o_sparse_read); + if (!reopen_osd(osd)) kick_osd_requests(osd); maybe_request_map(osdc); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index f5f60deb680a..7c76eb9d6cee 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -241,22 +241,26 @@ static struct crush_choose_arg_map *alloc_choose_arg_map(void) static void free_choose_arg_map(struct crush_choose_arg_map *arg_map) { - if (arg_map) { - int i, j; + int i, j; + + if (!arg_map) + return; - WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); + WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); + if (arg_map->args) { for (i = 0; i < arg_map->size; i++) { struct crush_choose_arg *arg = &arg_map->args[i]; - - for (j = 0; j < arg->weight_set_size; j++) - kfree(arg->weight_set[j].weights); - kfree(arg->weight_set); + if (arg->weight_set) { + for (j = 0; j < arg->weight_set_size; j++) + kfree(arg->weight_set[j].weights); + kfree(arg->weight_set); + } kfree(arg->ids); } kfree(arg_map->args); - kfree(arg_map); } + kfree(arg_map); } DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index, @@ -1979,11 +1983,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2, sizeof(u64) + sizeof(u32), e_inval); ceph_decode_copy(p, &fsid, sizeof(fsid)); epoch = ceph_decode_32(p); - BUG_ON(epoch != map->epoch+1); ceph_decode_copy(p, &modified, sizeof(modified)); new_pool_max = ceph_decode_64(p); new_flags = ceph_decode_32(p); + if (epoch != map->epoch + 1) + goto e_inval; + /* full map? */ ceph_decode_32_safe(p, end, len, e_inval); if (len > 0) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 6a92c03ee6f4..c3e1395d8ac5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4585,12 +4585,14 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, { struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; unsigned int tnl_hlen = skb_tnl_header_len(skb); - unsigned int delta_truesize = 0; unsigned int delta_len = 0; struct sk_buff *tail = NULL; struct sk_buff *nskb, *tmp; int len_diff, err; + /* Only skb_gro_receive_list generated skbs arrive here */ + DEBUG_NET_WARN_ON_ONCE(!(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)); + skb_push(skb, -skb_network_offset(skb) + offset); /* Ensure the head is writeable before touching the shared info */ @@ -4604,8 +4606,9 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, nskb = list_skb; list_skb = list_skb->next; + DEBUG_NET_WARN_ON_ONCE(nskb->sk); + err = 0; - delta_truesize += nskb->truesize; if (skb_shared(nskb)) { tmp = skb_clone(nskb, GFP_ATOMIC); if (tmp) { @@ -4648,7 +4651,6 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, goto err_linearize; } - skb->truesize = skb->truesize - delta_truesize; skb->data_len = skb->data_len - delta_len; skb->len = skb->len - delta_len; diff --git a/net/core/sock.c b/net/core/sock.c index 97cc796a1d33..58f3f0d97954 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3769,7 +3769,7 @@ void sock_enable_timestamp(struct sock *sk, enum sock_flags flag) int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type) { - struct sock_exterr_skb *serr; + struct sock_extended_err ee; struct sk_buff *skb; int copied, err; @@ -3789,8 +3789,9 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, sock_recv_timestamp(msg, sk, skb); - serr = SKB_EXT_ERR(skb); - put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); + /* We must use a bounce buffer for CONFIG_HARDENED_USERCOPY=y */ + ee = SKB_EXT_ERR(skb)->ee; + put_cmsg(msg, level, type, sizeof(ee), &ee); msg->msg_flags |= MSG_ERRQUEUE; err = copied; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 8fb48f42581c..7822b2144514 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -564,7 +564,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, skb_reserve(skb, hlen); skb_reset_network_header(skb); - arp = skb_put(skb, arp_hdr_len(dev)); + skb_put(skb, arp_hdr_len(dev)); skb->dev = dev; skb->protocol = htons(ETH_P_ARP); if (!src_hw) @@ -572,12 +572,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, if (!dest_hw) dest_hw = dev->broadcast; - /* - * Fill the device header for the ARP frame + /* Fill the device header for the ARP frame. + * Note: skb->head can be changed. */ if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0) goto out; + arp = arp_hdr(skb); /* * Fill out the arp protocol part. * diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 37a3fa98d904..f62b17f59bb4 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -839,10 +839,8 @@ out: out_free: if (free) kfree(ipc.opt); - if (!err) { - icmp_out_count(sock_net(sk), user_icmph.type); + if (!err) return len; - } return err; do_confirm: diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 9c515fb8ebe1..9142d748a6a7 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2395,6 +2395,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, if (chanctx_conf) chandef = &chanctx_conf->def; + else if (local->emulate_chanctx) + chandef = &local->hw.conf.chandef; else goto fail_rcu; diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 3c1b155f7a0e..828d5c64c68a 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -229,6 +229,7 @@ static int __nf_conncount_add(struct net *net, nf_ct_put(found_ct); } + list->last_gc = (u32)jiffies; add_new_node: if (WARN_ON_ONCE(list->count > INT_MAX)) { @@ -248,7 +249,6 @@ add_new_node: conn->jiffies32 = (u32)jiffies; list_add_tail(&conn->node, &list->head); list->count++; - list->last_gc = (u32)jiffies; out_put: if (refcounted) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b4741fb33798..c3613d8e7d72 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -120,6 +120,29 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s table->validate_state = new_validate_state; } + +static bool nft_chain_vstate_valid(const struct nft_ctx *ctx, + const struct nft_chain *chain) +{ + const struct nft_base_chain *base_chain; + enum nft_chain_types type; + u8 hooknum; + + if (WARN_ON_ONCE(!nft_is_base_chain(ctx->chain))) + return false; + + base_chain = nft_base_chain(ctx->chain); + hooknum = base_chain->ops.hooknum; + type = base_chain->type->type; + + /* chain is already validated for this call depth */ + if (chain->vstate.depth >= ctx->level && + chain->vstate.hook_mask[type] & BIT(hooknum)) + return true; + + return false; +} + static void nf_tables_trans_destroy_work(struct work_struct *w); static void nft_trans_gc_work(struct work_struct *work); @@ -3898,6 +3921,29 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r nf_tables_rule_destroy(ctx, rule); } +static void nft_chain_vstate_update(const struct nft_ctx *ctx, struct nft_chain *chain) +{ + const struct nft_base_chain *base_chain; + enum nft_chain_types type; + u8 hooknum; + + /* ctx->chain must hold the calling base chain. */ + if (WARN_ON_ONCE(!nft_is_base_chain(ctx->chain))) { + memset(&chain->vstate, 0, sizeof(chain->vstate)); + return; + } + + base_chain = nft_base_chain(ctx->chain); + hooknum = base_chain->ops.hooknum; + type = base_chain->type->type; + + BUILD_BUG_ON(BIT(NF_INET_NUMHOOKS) > U8_MAX); + + chain->vstate.hook_mask[type] |= BIT(hooknum); + if (chain->vstate.depth < ctx->level) + chain->vstate.depth = ctx->level; +} + /** nft_chain_validate - loop detection and hook validation * * @ctx: context containing call depth and base chain @@ -3907,15 +3953,25 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r * and set lookups until either the jump limit is hit or all reachable * chains have been validated. */ -int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) +int nft_chain_validate(const struct nft_ctx *ctx, struct nft_chain *chain) { struct nft_expr *expr, *last; struct nft_rule *rule; int err; + BUILD_BUG_ON(NFT_JUMP_STACK_SIZE > 255); if (ctx->level == NFT_JUMP_STACK_SIZE) return -EMLINK; + if (ctx->level > 0) { + /* jumps to base chains are not allowed. */ + if (nft_is_base_chain(chain)) + return -ELOOP; + + if (nft_chain_vstate_valid(ctx, chain)) + return 0; + } + list_for_each_entry(rule, &chain->rules, list) { if (fatal_signal_pending(current)) return -EINTR; @@ -3936,6 +3992,7 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) } } + nft_chain_vstate_update(ctx, chain); return 0; } EXPORT_SYMBOL_GPL(nft_chain_validate); @@ -3947,7 +4004,7 @@ static int nft_table_validate(struct net *net, const struct nft_table *table) .net = net, .family = table->family, }; - int err; + int err = 0; list_for_each_entry(chain, &table->chains, list) { if (!nft_is_base_chain(chain)) @@ -3956,12 +4013,16 @@ static int nft_table_validate(struct net *net, const struct nft_table *table) ctx.chain = chain; err = nft_chain_validate(&ctx, chain); if (err < 0) - return err; + goto err; cond_resched(); } - return 0; +err: + list_for_each_entry(chain, &table->chains, list) + memset(&chain->vstate, 0, sizeof(chain->vstate)); + + return err; } int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set, @@ -4196,7 +4257,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, if (!nft_use_inc(&chain->use)) { err = -EMFILE; - goto err_release_rule; + goto err_destroy_flow; } if (info->nlh->nlmsg_flags & NLM_F_REPLACE) { @@ -4246,6 +4307,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, err_destroy_flow_rule: nft_use_dec_restore(&chain->use); +err_destroy_flow: if (flow) nft_flow_rule_destroy(flow); err_release_rule: diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 793790d79d13..642152e9c322 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1303,8 +1303,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, else dup_end = dup_key; - if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) && - !memcmp(end, dup_end->data, sizeof(*dup_end->data))) { + if (!memcmp(start, dup_key->data, set->klen) && + !memcmp(end, dup_end->data, set->klen)) { *elem_priv = &dup->priv; return -EEXIST; } diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c index 5d3e51825985..4d3e5a31b412 100644 --- a/net/netfilter/nft_synproxy.c +++ b/net/netfilter/nft_synproxy.c @@ -48,7 +48,7 @@ static void nft_synproxy_eval_v4(const struct nft_synproxy *priv, struct tcphdr *_tcph, struct synproxy_options *opts) { - struct nf_synproxy_info info = priv->info; + struct nf_synproxy_info info = READ_ONCE(priv->info); struct net *net = nft_net(pkt); struct synproxy_net *snet = synproxy_pernet(net); struct sk_buff *skb = pkt->skb; @@ -79,7 +79,7 @@ static void nft_synproxy_eval_v6(const struct nft_synproxy *priv, struct tcphdr *_tcph, struct synproxy_options *opts) { - struct nf_synproxy_info info = priv->info; + struct nf_synproxy_info info = READ_ONCE(priv->info); struct net *net = nft_net(pkt); struct synproxy_net *snet = synproxy_pernet(net); struct sk_buff *skb = pkt->skb; @@ -340,7 +340,7 @@ static void nft_synproxy_obj_update(struct nft_object *obj, struct nft_synproxy *newpriv = nft_obj_data(newobj); struct nft_synproxy *priv = nft_obj_data(obj); - priv->info = newpriv->info; + WRITE_ONCE(priv->info, newpriv->info); } static struct nft_object_type nft_synproxy_obj_type; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 5b43578493ef..998030d6ce2d 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1484,7 +1484,7 @@ static void qfq_reset_qdisc(struct Qdisc *sch) for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { - if (cl->qdisc->q.qlen > 0) + if (cl_is_active(cl)) qfq_deactivate_class(q, cl); qdisc_reset(cl->qdisc); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 0af7b3c52967..5c6a4c9a2df7 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -123,17 +123,19 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx) /* We assume that the socket is already connected */ static struct net_device *get_netdev_for_sock(struct sock *sk) { - struct dst_entry *dst = sk_dst_get(sk); - struct net_device *netdev = NULL; + struct net_device *dev, *lowest_dev = NULL; + struct dst_entry *dst; - if (likely(dst)) { - netdev = netdev_sk_get_lowest_dev(dst->dev, sk); - dev_hold(netdev); + rcu_read_lock(); + dst = __sk_dst_get(sk); + dev = dst ? dst_dev_rcu(dst) : NULL; + if (likely(dev)) { + lowest_dev = netdev_sk_get_lowest_dev(dev, sk); + dev_hold(lowest_dev); } + rcu_read_unlock(); - dst_release(dst); - - return netdev; + return lowest_dev; } static void destroy_record(struct tls_record_info *record) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 621be9be64f6..282d97323324 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1742,6 +1742,10 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, } else { newsock->state = SS_CONNECTED; sock_graft(connected, newsock); + + set_bit(SOCK_CUSTOM_SOCKOPT, + &connected->sk_socket->flags); + if (vsock_msgzerocopy_allow(vconnected->transport)) set_bit(SOCK_SUPPORT_ZC, &connected->sk_socket->flags); diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 838ad6541a17..73a29ee3eff2 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -1103,6 +1103,10 @@ static int compat_standard_call(struct net_device *dev, return ioctl_standard_call(dev, iwr, cmd, info, handler); iwp_compat = (struct compat_iw_point *) &iwr->u.data; + + /* struct iw_point has a 32bit hole on 64bit arches. */ + memset(&iwp, 0, sizeof(iwp)); + iwp.pointer = compat_ptr(iwp_compat->pointer); iwp.length = iwp_compat->length; iwp.flags = iwp_compat->flags; diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c index 674d426a9d24..37d1147019c2 100644 --- a/net/wireless/wext-priv.c +++ b/net/wireless/wext-priv.c @@ -228,6 +228,10 @@ int compat_private_call(struct net_device *dev, struct iwreq *iwr, struct iw_point iwp; iwp_compat = (struct compat_iw_point *) &iwr->u.data; + + /* struct iw_point has a 32bit hole on 64bit arches. */ + memset(&iwp, 0, sizeof(iwp)); + iwp.pointer = compat_ptr(iwp_compat->pointer); iwp.length = iwp_compat->length; iwp.flags = iwp_compat->flags; |
