diff options
| author | Jakub Kicinski <kuba@kernel.org> | 2026-02-24 17:16:18 -0800 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2026-02-24 17:16:19 -0800 |
| commit | 54ef3e6bbeb2b66ddefebc2155bbea6f32ea0bbb (patch) | |
| tree | 2823acb3b64d0ba0e3de6b18c0077fef9c0a729e | |
| parent | f033335937d6f72a13bb38d82422eef30da31972 (diff) | |
| parent | fcd3d039fab693df3d41ac9bcb12fb4e8ddd69fe (diff) | |
Merge branch 'tcp-rework-tcp_v-4-6-_send_check'
Eric Dumazet says:
====================
tcp: rework tcp_v{4,6}_send_check()
tcp_v{4,6}_send_check() are only called from __tcp_transmit_skb()
They currently are in different files (tcp_ipv4.c and tcp_ipv6.c)
thus out of line.
This series move them close to their caller so that compiler
can inline them.
For all patches in the series:
$ scripts/bloat-o-meter -t vmlinux.0 vmlinux.3
add/remove: 0/2 grow/shrink: 1/3 up/down: 102/-178 (-76)
Function old new delta
__tcp_transmit_skb 3321 3423 +102
tcp_v4_send_check 136 132 -4
__tcp_v4_send_check 130 121 -9
mptcp_subflow_init 777 763 -14
__pfx_tcp_v6_send_check 16 - -16
tcp_v6_send_check 135 - -135
Total: Before=25143100, After=25143024, chg -0.00%
====================
Link: https://patch.msgid.link/20260223100729.3761597-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
| -rw-r--r-- | include/net/inet_connection_sock.h | 3 | ||||
| -rw-r--r-- | include/net/tcp.h | 12 | ||||
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 19 | ||||
| -rw-r--r-- | net/ipv4/tcp_output.c | 27 | ||||
| -rw-r--r-- | net/ipv6/tcp_ipv6.c | 7 | ||||
| -rw-r--r-- | net/mptcp/subflow.c | 1 | ||||
| -rw-r--r-- | net/tls/tls_device_fallback.c | 3 |
7 files changed, 33 insertions, 39 deletions
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index ecb362025c4e..bbc9355871c7 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -34,7 +34,7 @@ struct tcp_congestion_ops; */ struct inet_connection_sock_af_ops { int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl); - void (*send_check)(struct sock *sk, struct sk_buff *skb); + u16 net_header_len; int (*rebuild_header)(struct sock *sk); void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); @@ -43,7 +43,6 @@ struct inet_connection_sock_af_ops { struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req); - u16 net_header_len; int (*setsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, diff --git a/include/net/tcp.h b/include/net/tcp.h index 40e72b9cb85f..dfcd38089f11 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -531,7 +531,6 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, * TCP v4 functions exported for the inet6 API */ -void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); void tcp_req_err(struct sock *sk, u32 seq, bool abort); void tcp_ld_RTO_revert(struct sock *sk, u32 seq); @@ -1132,7 +1131,6 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) extern const struct inet_connection_sock_af_ops ipv6_specific; -INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); void tcp_v6_early_demux(struct sk_buff *skb); @@ -2382,7 +2380,15 @@ void tcp_gro_complete(struct sk_buff *skb); static inline void tcp_gro_complete(struct sk_buff *skb) { } #endif -void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); +static inline void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, + __be32 daddr) +{ + struct tcphdr *th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); +} static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6264fc0b2be5..bd613e401d48 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -661,24 +661,6 @@ out: return 0; } -void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) -{ - struct tcphdr *th = tcp_hdr(skb); - - th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct tcphdr, check); -} - -/* This routine computes an IPv4 TCP checksum. */ -void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) -{ - const struct inet_sock *inet = inet_sk(sk); - - __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); -} -EXPORT_IPV6_MOD(tcp_v4_send_check); - #define REPLY_OPTIONS_LEN (MAX_TCP_OPTION_SPACE / sizeof(__be32)) static bool tcp_v4_ao_sign_reset(const struct sock *sk, struct sk_buff *skb, @@ -2423,7 +2405,6 @@ EXPORT_IPV6_MOD(inet_sk_rx_dst_set); const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, - .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v4_conn_request, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 326b58ff1118..1ef419c66a0e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1496,7 +1496,23 @@ static void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb) INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); -INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)); + +/* This routine computes an IPv4 TCP checksum. */ +static void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) +{ + const struct inet_sock *inet = inet_sk(sk); + + __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); +} + +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_checksum.h> + +static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) +{ + __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr); +} +#endif /* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg(). This is used by both the initial @@ -1659,9 +1675,12 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, /* BPF prog is the last one writing header option */ bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts); - INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, - tcp_v6_send_check, tcp_v4_send_check, - sk, skb); +#if IS_ENABLED(CONFIG_IPV6) + if (likely(icsk->icsk_af_ops->net_header_len == sizeof(struct ipv6hdr))) + tcp_v6_send_check(sk, skb); + else +#endif + tcp_v4_send_check(sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) tcp_event_ack_sent(sk, rcv_nxt); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d10487b4e5bf..f17da56b449e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2015,14 +2015,8 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp6_timewait_sock), }; -INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) -{ - __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr); -} - const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, - .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, .sk_rx_dst_set = inet6_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, @@ -2054,7 +2048,6 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { */ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, - .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index f66129f1e649..dd79c5b37a6b 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -2190,7 +2190,6 @@ void __init mptcp_subflow_init(void) subflow_v6m_specific = subflow_v6_specific; subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; - subflow_v6m_specific.send_check = ipv4_specific.send_check; subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len; subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced; subflow_v6m_specific.rebuild_header = subflow_rebuild_header; diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 03d508a45aae..de7d86bdd7ec 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -149,9 +149,6 @@ static int tls_enc_records(struct aead_request *aead_req, return rc; } -/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses - * might have been changed by NAT. - */ static void update_chksum(struct sk_buff *skb, int headln) { struct tcphdr *th = tcp_hdr(skb); |
