blob: 7a14ea37d2df4411ad3aea5095ff3a59b5af4966 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09009 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
21
Herbert Xueb4dea52008-12-29 23:04:08 -080022#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/jiffies.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/init.h>
34#include <linux/jhash.h>
35#include <linux/ipsec.h>
36#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
Paolo Abeni0e219ae2019-05-03 17:01:37 +020042#include <linux/indirect_call_wrapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/tcp.h>
45#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030046#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080047#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <net/ipv6.h>
49#include <net/transp_v6.h>
50#include <net/addrconf.h>
51#include <net/ip6_route.h>
52#include <net/ip6_checksum.h>
53#include <net/inet_ecn.h>
54#include <net/protocol.h>
55#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <net/snmp.h>
57#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080058#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070059#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070060#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030061#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/proc_fs.h>
64#include <linux/seq_file.h>
65
Herbert Xucf80e0e2016-01-24 21:20:23 +080066#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080067#include <linux/scatterlist.h>
68
Song Liuc24b14c2017-10-23 09:20:24 -070069#include <trace/events/tcp.h>
70
Eric Dumazeta00e7442015-09-29 07:42:39 -070071static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070073 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemminger3b401a82009-09-01 19:25:04 +000077static const struct inet_connection_sock_af_ops ipv6_mapped;
78static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080079#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +000080static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090082#else
Eric Dumazet51723932015-09-29 21:24:05 -070083static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000084 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090085{
86 return NULL;
87}
David S. Millera9286302006-11-14 19:53:22 -080088#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Eric Dumazet93a77c12019-03-19 07:01:08 -070090/* Helper returning the inet6 address from a given tcp socket.
91 * It can be used in TCP stack instead of inet6_sk(sk).
92 * This avoids a dereference and allow compiler optimizations.
Eric Dumazetf5d54762019-04-01 03:09:20 -070093 * It is a specialized version of inet6_sk_generic().
Eric Dumazet93a77c12019-03-19 07:01:08 -070094 */
95static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
96{
Eric Dumazetf5d54762019-04-01 03:09:20 -070097 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
Eric Dumazet93a77c12019-03-19 07:01:08 -070098
Eric Dumazetf5d54762019-04-01 03:09:20 -070099 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700100}
101
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000102static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
103{
104 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000105
Eric Dumazet5037e9e2015-12-14 14:08:53 -0800106 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -0700107 const struct rt6_info *rt = (const struct rt6_info *)dst;
108
Eric Dumazetca777ef2014-09-08 08:06:07 -0700109 sk->sk_rx_dst = dst;
110 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Eric Dumazet93a77c12019-03-19 07:01:08 -0700111 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700112 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000113}
114
Eric Dumazet84b114b2017-05-05 06:56:54 -0700115static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700117 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
118 ipv6_hdr(skb)->saddr.s6_addr32,
119 tcp_hdr(skb)->dest,
120 tcp_hdr(skb)->source);
121}
122
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700123static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700124{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700125 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700126 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127}
128
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700129static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
130 int addr_len)
131{
132 /* This check is replicated from tcp_v6_connect() and intended to
133 * prevent BPF program called below from accessing bytes that are out
134 * of the bound specified by user in addr_len.
135 */
136 if (addr_len < SIN6_LEN_RFC2133)
137 return -EINVAL;
138
139 sock_owned_by_me(sk);
140
141 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
142}
143
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900144static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 int addr_len)
146{
147 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900148 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800149 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700150 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000152 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800153 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500154 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 struct dst_entry *dst;
156 int addr_type;
157 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800158 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900160 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 return -EINVAL;
162
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900163 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000164 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
David S. Miller4c9483b2011-03-12 16:22:43 -0500166 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500169 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
170 IP6_ECN_flow_init(fl6.flowlabel);
171 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500173 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100174 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 fl6_sock_release(flowlabel);
177 }
178 }
179
180 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900181 * connect() to INADDR_ANY means loopback (BSD'ism).
182 */
183
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500184 if (ipv6_addr_any(&usin->sin6_addr)) {
185 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
186 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
187 &usin->sin6_addr);
188 else
189 usin->sin6_addr = in6addr_loopback;
190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192 addr_type = ipv6_addr_type(&usin->sin6_addr);
193
Weilong Chen4c99aa42013-12-19 18:44:34 +0800194 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 return -ENETUNREACH;
196
197 if (addr_type&IPV6_ADDR_LINKLOCAL) {
198 if (addr_len >= sizeof(struct sockaddr_in6) &&
199 usin->sin6_scope_id) {
200 /* If interface is set while binding, indices
201 * must coincide.
202 */
David Ahern54dc3e32018-01-04 14:03:54 -0800203 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return -EINVAL;
205
206 sk->sk_bound_dev_if = usin->sin6_scope_id;
207 }
208
209 /* Connect to link-local address requires an interface */
210 if (!sk->sk_bound_dev_if)
211 return -EINVAL;
212 }
213
214 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700215 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 tp->rx_opt.ts_recent = 0;
217 tp->rx_opt.ts_recent_stamp = 0;
218 tp->write_seq = 0;
219 }
220
Eric Dumazetefe42082013-10-03 15:42:29 -0700221 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500222 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /*
225 * TCP over IPv4
226 */
227
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500228 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800229 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 struct sockaddr_in sin;
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 if (__ipv6_only_sock(sk))
233 return -ENETUNREACH;
234
235 sin.sin_family = AF_INET;
236 sin.sin_port = usin->sin6_port;
237 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
238
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800239 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800241#ifdef CONFIG_TCP_MD5SIG
242 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
243#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
246
247 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800248 icsk->icsk_ext_hdr_len = exthdrlen;
249 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800251#ifdef CONFIG_TCP_MD5SIG
252 tp->af_specific = &tcp_sock_ipv6_specific;
253#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700256 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258 return err;
259 }
260
Eric Dumazetefe42082013-10-03 15:42:29 -0700261 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
262 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
David S. Miller4c9483b2011-03-12 16:22:43 -0500264 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700265 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000266 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500267 fl6.flowi6_oif = sk->sk_bound_dev_if;
268 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500269 fl6.fl6_dport = usin->sin6_port;
270 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900271 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200273 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800274 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
David S. Miller4c9483b2011-03-12 16:22:43 -0500276 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700277
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200278 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800279 if (IS_ERR(dst)) {
280 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Ian Morris63159f22015-03-29 14:00:04 +0100284 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500285 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700286 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 }
288
289 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000290 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000291 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700293 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800294 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800296 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800297 if (opt)
298 icsk->icsk_ext_hdr_len = opt->opt_flen +
299 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
302
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000303 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800306 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 if (err)
308 goto late_failure;
309
Tom Herbert877d1f62015-07-28 16:02:05 -0700310 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530311
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300312 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300313 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700314 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
315 sk->sk_v6_daddr.s6_addr32,
316 inet->inet_sport,
317 inet->inet_dport);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700318 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
319 np->saddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700320 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300321 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Wei Wang19f6d3f32017-01-23 10:59:22 -0800323 if (tcp_fastopen_defer_connect(sk, &err))
324 return err;
325 if (err)
326 goto late_failure;
327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 err = tcp_connect(sk);
329 if (err)
330 goto late_failure;
331
332 return 0;
333
334late_failure:
335 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000337 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 sk->sk_route_caps = 0;
339 return err;
340}
341
Eric Dumazet563d34d2012-07-23 09:48:52 +0200342static void tcp_v6_mtu_reduced(struct sock *sk)
343{
344 struct dst_entry *dst;
345
346 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
347 return;
348
349 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
350 if (!dst)
351 return;
352
353 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
354 tcp_sync_mss(sk, dst_mtu(dst));
355 tcp_simple_retransmit(sk);
356 }
357}
358
Stefano Brivio32bbd872018-11-08 12:19:21 +0100359static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700360 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800362 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300363 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700364 struct net *net = dev_net(skb->dev);
365 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700367 struct tcp_sock *tp;
368 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800370 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
Eric Dumazet22150892015-03-22 10:22:23 -0700373 sk = __inet6_lookup_established(net, &tcp_hashinfo,
374 &hdr->daddr, th->dest,
375 &hdr->saddr, ntohs(th->source),
David Ahern4297a0e2017-08-07 08:44:21 -0700376 skb->dev->ifindex, inet6_sdif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Eric Dumazet22150892015-03-22 10:22:23 -0700378 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700379 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
380 ICMP6_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100381 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 }
383
384 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700385 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100386 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 }
Eric Dumazet22150892015-03-22 10:22:23 -0700388 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800389 fatal = icmpv6_err_convert(type, code, &err);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100390 if (sk->sk_state == TCP_NEW_SYN_RECV) {
391 tcp_req_err(sk, seq, fatal);
392 return 0;
393 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200396 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700397 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
399 if (sk->sk_state == TCP_CLOSE)
400 goto out;
401
Eric Dumazet93a77c12019-03-19 07:01:08 -0700402 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700403 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700404 goto out;
405 }
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 tp = tcp_sk(sk);
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700408 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
409 fastopen = tp->fastopen_rsk;
410 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700412 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700413 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 goto out;
415 }
416
Eric Dumazet93a77c12019-03-19 07:01:08 -0700417 np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
David S. Millerec18d9a2012-07-12 00:25:15 -0700419 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100420 if (!sock_owned_by_user(sk)) {
421 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700422
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100423 if (dst)
424 dst->ops->redirect(dst, sk, skb);
425 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000426 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700427 }
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000430 /* We are not interested in TCP_LISTEN and open_requests
431 * (SYN-ACKs send out by Linux are always <576bytes so
432 * they should go through unfragmented).
433 */
434 if (sk->sk_state == TCP_LISTEN)
435 goto out;
436
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100437 if (!ip6_sk_accept_pmtu(sk))
438 goto out;
439
Eric Dumazet563d34d2012-07-23 09:48:52 +0200440 tp->mtu_info = ntohl(info);
441 if (!sock_owned_by_user(sk))
442 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000443 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800444 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000445 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 goto out;
447 }
448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700450 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 case TCP_SYN_SENT:
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700453 case TCP_SYN_RECV:
454 /* Only in fast or simultaneous open. If a fast open socket is
455 * is already accepted it is treated as a connected one below.
456 */
Ian Morris63159f22015-03-29 14:00:04 +0100457 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700458 break;
459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 sk->sk_err = err;
462 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
463
464 tcp_done(sk);
465 } else
466 sk->sk_err_soft = err;
467 goto out;
468 }
469
470 if (!sock_owned_by_user(sk) && np->recverr) {
471 sk->sk_err = err;
472 sk->sk_error_report(sk);
473 } else
474 sk->sk_err_soft = err;
475
476out:
477 bh_unlock_sock(sk);
478 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100479 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
481
482
Eric Dumazet0f935db2015-09-25 07:39:21 -0700483static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300484 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000485 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700486 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700487 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700489 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700490 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400491 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300492 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800493 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000494 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000496 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700497 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
498 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800499 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000500
Eric Dumazetb3d05142016-04-13 22:05:39 -0700501 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700504 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
505 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Eric Dumazet634fb9792013-10-09 15:21:29 -0700507 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100508 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100509 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
510
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800511 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400512 opt = ireq->ipv6_opt;
513 if (!opt)
514 opt = rcu_dereference(np->opt);
Pablo Neira92e55f42017-01-26 22:56:21 +0100515 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800516 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200517 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 }
519
520done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 return err;
522}
523
Octavian Purdila72659ec2010-01-17 19:09:39 -0800524
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700525static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
Huw Davies56ac42b2016-06-27 15:05:28 -0400527 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700528 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529}
530
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800531#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700532static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000533 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800534{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000535 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800536}
537
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700538static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700539 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800540{
Eric Dumazetefe42082013-10-03 15:42:29 -0700541 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800542}
543
Ivan Delalande8917a772017-06-15 18:07:07 -0700544static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
545 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800546{
547 struct tcp_md5sig cmd;
548 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -0700549 u8 prefixlen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800550
551 if (optlen < sizeof(cmd))
552 return -EINVAL;
553
554 if (copy_from_user(&cmd, optval, sizeof(cmd)))
555 return -EFAULT;
556
557 if (sin6->sin6_family != AF_INET6)
558 return -EINVAL;
559
Ivan Delalande8917a772017-06-15 18:07:07 -0700560 if (optname == TCP_MD5SIG_EXT &&
561 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
562 prefixlen = cmd.tcpm_prefixlen;
563 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
564 prefixlen > 32))
565 return -EINVAL;
566 } else {
567 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
568 }
569
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800570 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700571 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000572 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Ivan Delalande8917a772017-06-15 18:07:07 -0700573 AF_INET, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000574 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -0700575 AF_INET6, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800576 }
577
578 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
579 return -EINVAL;
580
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000581 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
582 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Ivan Delalande8917a772017-06-15 18:07:07 -0700583 AF_INET, prefixlen, cmd.tcpm_key,
Ivan Delalande67973182017-06-15 18:07:06 -0700584 cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800585
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000586 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -0700587 AF_INET6, prefixlen, cmd.tcpm_key,
588 cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800589}
590
Eric Dumazet19689e32016-06-27 18:51:53 +0200591static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
592 const struct in6_addr *daddr,
593 const struct in6_addr *saddr,
594 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800595{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800596 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700597 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200598 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900599
Eric Dumazet19689e32016-06-27 18:51:53 +0200600 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800601 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000602 bp->saddr = *saddr;
603 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700604 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700605 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800606
Eric Dumazet19689e32016-06-27 18:51:53 +0200607 _th = (struct tcphdr *)(bp + 1);
608 memcpy(_th, th, sizeof(*th));
609 _th->check = 0;
610
611 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
612 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
613 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800614 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700615}
David S. Millerc7da57a2007-10-26 00:41:21 -0700616
Eric Dumazet19689e32016-06-27 18:51:53 +0200617static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000618 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400619 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700620{
621 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800622 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700623
624 hp = tcp_get_md5sig_pool();
625 if (!hp)
626 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800627 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700628
Herbert Xucf80e0e2016-01-24 21:20:23 +0800629 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700630 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200631 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700632 goto clear_hash;
633 if (tcp_md5_hash_key(hp, key))
634 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800635 ahash_request_set_crypt(req, NULL, md5_hash, 0);
636 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800637 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800638
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800639 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800640 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700641
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800642clear_hash:
643 tcp_put_md5sig_pool();
644clear_hash_noput:
645 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700646 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800647}
648
Eric Dumazet39f8e582015-03-24 15:58:55 -0700649static int tcp_v6_md5_hash_skb(char *md5_hash,
650 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400651 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400652 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800653{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000654 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700655 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800656 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400657 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800658
Eric Dumazet39f8e582015-03-24 15:58:55 -0700659 if (sk) { /* valid for establish/request sockets */
660 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700661 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700662 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000663 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700664 saddr = &ip6h->saddr;
665 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800666 }
Adam Langley49a72df2008-07-19 00:01:42 -0700667
668 hp = tcp_get_md5sig_pool();
669 if (!hp)
670 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800671 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700672
Herbert Xucf80e0e2016-01-24 21:20:23 +0800673 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700674 goto clear_hash;
675
Eric Dumazet19689e32016-06-27 18:51:53 +0200676 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700677 goto clear_hash;
678 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
679 goto clear_hash;
680 if (tcp_md5_hash_key(hp, key))
681 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800682 ahash_request_set_crypt(req, NULL, md5_hash, 0);
683 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700684 goto clear_hash;
685
686 tcp_put_md5sig_pool();
687 return 0;
688
689clear_hash:
690 tcp_put_md5sig_pool();
691clear_hash_noput:
692 memset(md5_hash, 0, 16);
693 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800694}
695
Eric Dumazetba8e2752015-10-02 11:43:28 -0700696#endif
697
698static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
699 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800700{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700701#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400702 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800703 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000704 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400705 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800706 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800707 u8 newhash[16];
708
709 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900710 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800711
David S. Miller785957d2008-07-30 03:03:15 -0700712 /* We've parsed the options - do we have a hash? */
713 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700714 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700715
716 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700717 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700718 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800719 }
720
David S. Miller785957d2008-07-30 03:03:15 -0700721 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700722 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700723 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800724 }
725
726 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700727 genhash = tcp_v6_md5_hash_skb(newhash,
728 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700729 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700730
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800731 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700732 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +0000733 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
734 genhash ? "failed" : "mismatch",
735 &ip6h->saddr, ntohs(th->source),
736 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700737 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800738 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700739#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700740 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800741}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800742
Eric Dumazetb40cf182015-09-25 07:39:08 -0700743static void tcp_v6_init_req(struct request_sock *req,
744 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300745 struct sk_buff *skb)
746{
David Ahernc2027d12018-12-12 15:27:38 -0800747 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
Octavian Purdila16bea702014-06-25 17:09:53 +0300748 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700749 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300750
751 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
752 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
753
Octavian Purdila16bea702014-06-25 17:09:53 +0300754 /* So that link locals have meaning */
David Ahernc2027d12018-12-12 15:27:38 -0800755 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300756 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700757 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300758
Eric Dumazet04317da2014-09-05 15:33:32 -0700759 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700760 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700761 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300762 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
763 np->rxopt.bits.rxohlim || np->repflow)) {
Reshetova, Elena63354792017-06-30 13:07:58 +0300764 refcount_inc(&skb->users);
Octavian Purdila16bea702014-06-25 17:09:53 +0300765 ireq->pktopts = skb;
766 }
767}
768
Eric Dumazetf9646292015-09-29 07:42:50 -0700769static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
770 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -0400771 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300772{
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700773 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300774}
775
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800776struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700778 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300779 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700780 .send_ack = tcp_v6_reqsk_send_ack,
781 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800782 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800783 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784};
785
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +0000786static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300787 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
788 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300789#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700790 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000791 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800792#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300793 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300794#ifdef CONFIG_SYN_COOKIES
795 .cookie_init_seq = cookie_v6_init_sequence,
796#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300797 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700798 .init_seq = tcp_v6_init_seq,
799 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300800 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300801};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800802
Eric Dumazeta00e7442015-09-29 07:42:39 -0700803static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800804 u32 ack, u32 win, u32 tsval, u32 tsecr,
805 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200806 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400808 const struct tcphdr *th = tcp_hdr(skb);
809 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500811 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800812 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800813 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb5734e2008-01-12 02:16:03 -0800814 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000815 struct dst_entry *dst;
Al Viroe69a4adc2006-11-14 20:56:00 -0800816 __be32 *topt;
Jon Maxwell00483692018-05-10 16:53:51 +1000817 __u32 mark = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
Andrey Vaginee684b62013-02-11 05:50:19 +0000819 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700820 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800821#ifdef CONFIG_TCP_MD5SIG
822 if (key)
823 tot_len += TCPOLEN_MD5SIG_ALIGNED;
824#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
827 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100828 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 return;
830
831 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
832
Johannes Bergd58ff352017-06-16 14:29:23 +0200833 t1 = skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700834 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 /* Swap the send and the receive. */
837 memset(t1, 0, sizeof(*t1));
838 t1->dest = th->source;
839 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700840 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 t1->seq = htonl(seq);
842 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700843 t1->ack = !rst || !th->ack;
844 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800846
Al Viroe69a4adc2006-11-14 20:56:00 -0800847 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900848
Andrey Vaginee684b62013-02-11 05:50:19 +0000849 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800850 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
851 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000852 *topt++ = htonl(tsval);
853 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 }
855
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800856#ifdef CONFIG_TCP_MD5SIG
857 if (key) {
858 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
859 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700860 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700861 &ipv6_hdr(skb)->saddr,
862 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800863 }
864#endif
865
David S. Miller4c9483b2011-03-12 16:22:43 -0500866 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000867 fl6.daddr = ipv6_hdr(skb)->saddr;
868 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100869 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
David S. Millere5700af2010-04-21 14:59:20 -0700871 buff->ip_summed = CHECKSUM_PARTIAL;
872 buff->csum = 0;
873
David S. Miller4c9483b2011-03-12 16:22:43 -0500874 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
David S. Miller4c9483b2011-03-12 16:22:43 -0500876 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900877 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700878 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800879 else {
880 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
881 oif = skb->skb_iif;
882
883 fl6.flowi6_oif = oif;
884 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700885
Jon Maxwell00483692018-05-10 16:53:51 +1000886 if (sk)
887 mark = (sk->sk_state == TCP_TIME_WAIT) ?
888 inet_twsk(sk)->tw_mark : sk->sk_mark;
889 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
David S. Miller1958b852011-03-12 16:36:19 -0500890 fl6.fl6_dport = t1->dest;
891 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900892 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500893 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700895 /* Pass a socket to ip6_dst_lookup either it is for RST
896 * Underlying function will use this to retrieve the network
897 * namespace
898 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200899 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800900 if (!IS_ERR(dst)) {
901 skb_dst_set(buff, dst);
Pablo Neira92e55f42017-01-26 22:56:21 +0100902 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700903 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800904 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700905 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800906 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
908
909 kfree_skb(buff);
910}
911
Eric Dumazeta00e7442015-09-29 07:42:39 -0700912static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700913{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400914 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700915 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700916 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000917#ifdef CONFIG_TCP_MD5SIG
918 const __u8 *hash_location = NULL;
919 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
920 unsigned char newhash[16];
921 int genhash;
922 struct sock *sk1 = NULL;
923#endif
Song Liuc24b14c2017-10-23 09:20:24 -0700924 int oif = 0;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700925
926 if (th->rst)
927 return;
928
Eric Dumazetc3658e82014-11-25 07:40:04 -0800929 /* If sk not NULL, it means we did a successful lookup and incoming
930 * route had to be correct. prequeue might have dropped our dst.
931 */
932 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700933 return;
934
935#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700936 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000937 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100938 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100939 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
940 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000941 /*
942 * active side is lost. Try to find listening socket through
943 * source port, and then find md5 key through listening socket.
944 * we are not loose security here:
945 * Incoming packet is checked with md5 hash with finding key,
946 * no RST generated if md5 hash doesn't match.
947 */
948 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500949 &tcp_hashinfo, NULL, 0,
950 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000951 th->source, &ipv6h->daddr,
David Ahern24b711e2018-07-19 12:41:18 -0700952 ntohs(th->source),
953 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -0700954 tcp_v6_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000955 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700956 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000957
Shawn Lu658ddaa2012-01-31 22:35:48 +0000958 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
959 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700960 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000961
Eric Dumazet39f8e582015-03-24 15:58:55 -0700962 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000963 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700964 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000965 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700966#endif
967
968 if (th->ack)
969 seq = ntohl(th->ack_seq);
970 else
971 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
972 (th->doff << 2);
973
Song Liuc24b14c2017-10-23 09:20:24 -0700974 if (sk) {
975 oif = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800976 if (sk_fullsock(sk))
977 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c2017-10-23 09:20:24 -0700978 }
979
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800980 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000981
982#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700983out:
984 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000985#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700986}
987
Eric Dumazeta00e7442015-09-29 07:42:39 -0700988static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800989 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100990 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200991 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700992{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800993 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
994 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700995}
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
998{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700999 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001000 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001002 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001003 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001004 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +08001005 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +02001006 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001008 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}
1010
Eric Dumazeta00e7442015-09-29 07:42:39 -07001011static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001012 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013{
Daniel Lee3a19ce02014-05-11 20:22:13 -07001014 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1015 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1016 */
Eric Dumazet20a2b492016-08-22 11:31:10 -07001017 /* RFC 7323 2.3
1018 * The window field (SEG.WND) of every outgoing segment, with the
1019 * exception of <SYN> segments, MUST be right-shifted by
1020 * Rcv.Wind.Shift bits:
1021 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001022 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -07001023 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -07001024 tcp_rsk(req)->rcv_nxt,
1025 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001026 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
Florian Westphal95a22ca2016-12-01 11:32:06 +01001027 req->ts_recent, sk->sk_bound_dev_if,
Christoph Paasch30791ac2017-12-11 00:05:46 -08001028 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
Florent Fourcot1d13a962014-01-16 17:21:22 +01001029 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030}
1031
1032
Eric Dumazet079096f2015-10-02 11:43:32 -07001033static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001035#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001036 const struct tcphdr *th = tcp_hdr(skb);
1037
Florian Westphalaf9b4732010-06-03 00:43:44 +00001038 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001039 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040#endif
1041 return sk;
1042}
1043
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1045{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 if (skb->protocol == htons(ETH_P_IP))
1047 return tcp_v4_conn_request(sk, skb);
1048
1049 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001050 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001052 return tcp_conn_request(&tcp6_request_sock_ops,
1053 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001056 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 return 0; /* don't send reset */
1058}
1059
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001060static void tcp_v6_restore_cb(struct sk_buff *skb)
1061{
1062 /* We need to move header back to the beginning if xfrm6_policy_check()
1063 * and tcp_v6_fill_cb() are going to be called again.
1064 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1065 */
1066 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1067 sizeof(struct inet6_skb_parm));
1068}
1069
Eric Dumazet0c271712015-09-29 07:42:48 -07001070static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001071 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001072 struct dst_entry *dst,
1073 struct request_sock *req_unhash,
1074 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001076 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001077 struct ipv6_pinfo *newnp;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001078 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001079 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 struct inet_sock *newinet;
1081 struct tcp_sock *newtp;
1082 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001083#ifdef CONFIG_TCP_MD5SIG
1084 struct tcp_md5sig_key *key;
1085#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001086 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 if (skb->protocol == htons(ETH_P_IP)) {
1089 /*
1090 * v6 mapped
1091 */
1092
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001093 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1094 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Ian Morris63159f22015-03-29 14:00:04 +01001096 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 return NULL;
1098
Eric Dumazet93a77c12019-03-19 07:01:08 -07001099 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001102 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 newtp = tcp_sk(newsk);
1104
1105 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1106
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001107 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001109 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001111#ifdef CONFIG_TCP_MD5SIG
1112 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1113#endif
1114
WANG Cong83eadda2017-05-09 16:59:54 -07001115 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001116 newnp->ipv6_ac_list = NULL;
1117 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 newnp->pktoptions = NULL;
1119 newnp->opt = NULL;
Eric Dumazet89e41302019-03-19 05:45:35 -07001120 newnp->mcast_oif = inet_iif(skb);
1121 newnp->mcast_hops = ip_hdr(skb)->ttl;
1122 newnp->rcv_flowinfo = 0;
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001123 if (np->repflow)
Eric Dumazet89e41302019-03-19 05:45:35 -07001124 newnp->flow_label = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001126 /*
1127 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1128 * here, tcp_create_openreq_child now does this for us, see the comment in
1129 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
1132 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001133 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 Sync it now.
1135 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001136 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
1138 return newsk;
1139 }
1140
Eric Dumazet634fb9792013-10-09 15:21:29 -07001141 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
1143 if (sk_acceptq_is_full(sk))
1144 goto out_overflow;
1145
David S. Miller493f3772010-12-02 12:14:29 -08001146 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001147 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001148 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
1152 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001153 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001154 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001156 /*
1157 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1158 * count here, tcp_create_openreq_child now does this for us, see the
1159 * comment in that function for the gory details. -acme
1160 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
Stephen Hemminger59eed272006-08-25 15:55:43 -07001162 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001163 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001164 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Eric Dumazet93a77c12019-03-19 07:01:08 -07001166 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 newtp = tcp_sk(newsk);
1169 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001170 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
1172 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1173
Eric Dumazet634fb9792013-10-09 15:21:29 -07001174 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1175 newnp->saddr = ireq->ir_v6_loc_addr;
1176 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1177 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001179 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 First: no IPv4 options.
1182 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001183 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001184 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001185 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001186 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188 /* Clone RX bits */
1189 newnp->rxopt.all = np->rxopt.all;
1190
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001193 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001194 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001195 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001196 if (np->repflow)
1197 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 /* Clone native IPv6 options from listening socket (if any)
1200
1201 Yes, keeping reference count would be much more clever,
1202 but we make one more one thing there: reattach optmem
1203 to newsk.
1204 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001205 opt = ireq->ipv6_opt;
1206 if (!opt)
1207 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001208 if (opt) {
1209 opt = ipv6_dup_options(newsk, opt);
1210 RCU_INIT_POINTER(newnp->opt, opt);
1211 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001212 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001213 if (opt)
1214 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1215 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Daniel Borkmann81164412015-01-05 23:57:48 +01001217 tcp_ca_openreq_child(newsk, dst);
1218
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001220 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 tcp_initialize_rcv_mss(newsk);
1223
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001224 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1225 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001227#ifdef CONFIG_TCP_MD5SIG
1228 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001229 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001230 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001231 /* We're using one, so create a matching key
1232 * on the newsk structure. If we fail to get
1233 * memory, then we end up not copying the key
1234 * across. Shucks.
1235 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001236 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001237 AF_INET6, 128, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001238 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001239 }
1240#endif
1241
Balazs Scheidler093d2822010-10-21 13:06:43 +02001242 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001243 inet_csk_prepare_forced_close(newsk);
1244 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001245 goto out;
1246 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001247 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001248 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001249 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001250
1251 /* Clone pktoptions received with SYN, if we own the req */
1252 if (ireq->pktopts) {
1253 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001254 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001255 consume_skb(ireq->pktopts);
1256 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001257 if (newnp->pktoptions) {
1258 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001259 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001260 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001261 }
Eric Dumazetce105002015-10-30 09:46:12 -07001262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
1264 return newsk;
1265
1266out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001267 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001268out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001270out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001271 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 return NULL;
1273}
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001276 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 *
1278 * We have a potential double-lock case here, so even when
1279 * doing backlog processing we use the BH locking scheme.
1280 * This is because we cannot sleep with the original spinlock
1281 * held.
1282 */
1283static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1284{
Eric Dumazet93a77c12019-03-19 07:01:08 -07001285 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 struct sk_buff *opt_skb = NULL;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001287 struct tcp_sock *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 /* Imagine: socket is IPv6. IPv4 packet arrives,
1290 goes to IPv4 receive handler and backlogged.
1291 From backlog it always goes here. Kerboom...
1292 Fortunately, tcp_rcv_established and rcv_established
1293 handle them correctly, but it is not case with
1294 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1295 */
1296
1297 if (skb->protocol == htons(ETH_P_IP))
1298 return tcp_v4_do_rcv(sk, skb);
1299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 /*
1301 * socket locking is here for SMP purposes as backlog rcv
1302 * is currently called with bh processing disabled.
1303 */
1304
1305 /* Do Stevens' IPV6_PKTOPTIONS.
1306
1307 Yes, guys, it is the only place in our code, where we
1308 may make it not affecting IPv4.
1309 The rest of code is protocol independent,
1310 and I do not like idea to uglify IPv4.
1311
1312 Actually, all the idea behind IPV6_PKTOPTIONS
1313 looks not very well thought. For now we latch
1314 options, received in the last packet, enqueued
1315 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001316 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 */
1318 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001319 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001322 struct dst_entry *dst = sk->sk_rx_dst;
1323
Tom Herbertbdeab992011-08-14 19:45:55 +00001324 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001325 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001326 if (dst) {
1327 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1328 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1329 dst_release(dst);
1330 sk->sk_rx_dst = NULL;
1331 }
1332 }
1333
Yafang Shao3d97d882018-05-29 23:27:31 +08001334 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 if (opt_skb)
1336 goto ipv6_pktoptions;
1337 return 0;
1338 }
1339
Eric Dumazet12e25e12015-06-03 23:49:21 -07001340 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 goto csum_err;
1342
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001343 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001344 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 if (!nsk)
1347 goto discard;
1348
Weilong Chen4c99aa42013-12-19 18:44:34 +08001349 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (tcp_child_process(sk, nsk, skb))
1351 goto reset;
1352 if (opt_skb)
1353 __kfree_skb(opt_skb);
1354 return 0;
1355 }
Neil Horman47482f132011-04-06 13:07:09 -07001356 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001357 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001359 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 if (opt_skb)
1362 goto ipv6_pktoptions;
1363 return 0;
1364
1365reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001366 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367discard:
1368 if (opt_skb)
1369 __kfree_skb(opt_skb);
1370 kfree_skb(skb);
1371 return 0;
1372csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001373 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1374 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 goto discard;
1376
1377
1378ipv6_pktoptions:
1379 /* Do you ask, what is it?
1380
1381 1. skb was enqueued by tcp.
1382 2. skb is added to tail of read queue, rather than out of order.
1383 3. socket is not in passive state.
1384 4. Finally, it really contains options, which user wants to receive.
1385 */
1386 tp = tcp_sk(sk);
1387 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1388 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001389 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001390 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001391 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001392 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001393 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001394 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001395 if (np->repflow)
1396 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001397 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001399 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 opt_skb = xchg(&np->pktoptions, opt_skb);
1401 } else {
1402 __kfree_skb(opt_skb);
1403 opt_skb = xchg(&np->pktoptions, NULL);
1404 }
1405 }
1406
Wei Yongjun800d55f2009-02-23 21:45:33 +00001407 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 return 0;
1409}
1410
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001411static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1412 const struct tcphdr *th)
1413{
1414 /* This is tricky: we move IP6CB at its correct location into
1415 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1416 * _decode_session6() uses IP6CB().
1417 * barrier() makes sure compiler won't play aliasing games.
1418 */
1419 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1420 sizeof(struct inet6_skb_parm));
1421 barrier();
1422
1423 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1424 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1425 skb->len - th->doff*4);
1426 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1427 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1428 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1429 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1430 TCP_SKB_CB(skb)->sacked = 0;
Mike Maloney98aaa912017-08-22 17:08:48 -04001431 TCP_SKB_CB(skb)->has_rxtstamp =
1432 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001433}
1434
Paolo Abeni0e219ae2019-05-03 17:01:37 +02001435INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001437 struct sk_buff *skb_to_free;
David Ahern4297a0e2017-08-07 08:44:21 -07001438 int sdif = inet6_sdif(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001439 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001440 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001441 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 struct sock *sk;
1443 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001444 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
1446 if (skb->pkt_type != PACKET_HOST)
1447 goto discard_it;
1448
1449 /*
1450 * Count it even if it's bad.
1451 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001452 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
1454 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1455 goto discard_it;
1456
Eric Dumazetea1627c2016-05-13 09:16:40 -07001457 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
Eric Dumazetea1627c2016-05-13 09:16:40 -07001459 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 goto bad_packet;
1461 if (!pskb_may_pull(skb, th->doff*4))
1462 goto discard_it;
1463
Tom Herberte4f45b72014-05-02 16:29:51 -07001464 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001465 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
Eric Dumazetea1627c2016-05-13 09:16:40 -07001467 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001468 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001470lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001471 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
David Ahern4297a0e2017-08-07 08:44:21 -07001472 th->source, th->dest, inet6_iif(skb), sdif,
Eric Dumazet3b24d852016-04-01 08:52:17 -07001473 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 if (!sk)
1475 goto no_tcp_socket;
1476
1477process:
1478 if (sk->sk_state == TCP_TIME_WAIT)
1479 goto do_time_wait;
1480
Eric Dumazet079096f2015-10-02 11:43:32 -07001481 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1482 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001483 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001484 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001485
1486 sk = req->rsk_listener;
Eric Dumazet079096f2015-10-02 11:43:32 -07001487 if (tcp_v6_inbound_md5_hash(sk, skb)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001488 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001489 reqsk_put(req);
1490 goto discard_it;
1491 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001492 if (tcp_checksum_complete(skb)) {
1493 reqsk_put(req);
1494 goto csum_error;
1495 }
Eric Dumazet77166822016-02-18 05:39:18 -08001496 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001497 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001498 goto lookup;
1499 }
Eric Dumazet77166822016-02-18 05:39:18 -08001500 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001501 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001502 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001503 if (!tcp_filter(sk, skb)) {
1504 th = (const struct tcphdr *)skb->data;
1505 hdr = ipv6_hdr(skb);
1506 tcp_v6_fill_cb(skb, hdr, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001507 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001508 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001509 if (!nsk) {
1510 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001511 if (req_stolen) {
1512 /* Another cpu got exclusive access to req
1513 * and created a full blown socket.
1514 * Try to feed this packet to this socket
1515 * instead of discarding it.
1516 */
1517 tcp_v6_restore_cb(skb);
1518 sock_put(sk);
1519 goto lookup;
1520 }
Eric Dumazet77166822016-02-18 05:39:18 -08001521 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001522 }
1523 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001524 reqsk_put(req);
1525 tcp_v6_restore_cb(skb);
1526 } else if (tcp_child_process(sk, nsk, skb)) {
1527 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001528 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001529 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001530 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001531 return 0;
1532 }
1533 }
Eric Dumazet93a77c12019-03-19 07:01:08 -07001534 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001535 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001536 goto discard_and_relse;
1537 }
1538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1540 goto discard_and_relse;
1541
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001542 if (tcp_v6_inbound_md5_hash(sk, skb))
1543 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001544
Eric Dumazetac6e7802016-11-10 13:12:35 -08001545 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001547 th = (const struct tcphdr *)skb->data;
1548 hdr = ipv6_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001549 tcp_v6_fill_cb(skb, hdr, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
1551 skb->dev = NULL;
1552
Eric Dumazete994b2f2015-10-02 11:43:39 -07001553 if (sk->sk_state == TCP_LISTEN) {
1554 ret = tcp_v6_do_rcv(sk, skb);
1555 goto put_and_return;
1556 }
1557
1558 sk_incoming_cpu_update(sk);
1559
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001560 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001561 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 ret = 0;
1563 if (!sock_owned_by_user(sk)) {
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001564 skb_to_free = sk->sk_rx_skb_cache;
1565 sk->sk_rx_skb_cache = NULL;
Florian Westphale7942d02017-07-30 03:57:18 +02001566 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001567 } else {
1568 if (tcp_add_backlog(sk, skb))
1569 goto discard_and_relse;
1570 skb_to_free = NULL;
Zhu Yi6b03a532010-03-04 18:01:41 +00001571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 bh_unlock_sock(sk);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001573 if (skb_to_free)
1574 __kfree_skb(skb_to_free);
Eric Dumazete994b2f2015-10-02 11:43:39 -07001575put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001576 if (refcounted)
1577 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 return ret ? -1 : 0;
1579
1580no_tcp_socket:
1581 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1582 goto discard_it;
1583
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001584 tcp_v6_fill_cb(skb, hdr, th);
1585
Eric Dumazet12e25e12015-06-03 23:49:21 -07001586 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001587csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001588 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001590 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001592 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 }
1594
1595discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 kfree_skb(skb);
1597 return 0;
1598
1599discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001600 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001601 if (refcounted)
1602 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 goto discard_it;
1604
1605do_time_wait:
1606 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001607 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 goto discard_it;
1609 }
1610
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001611 tcp_v6_fill_cb(skb, hdr, th);
1612
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001613 if (tcp_checksum_complete(skb)) {
1614 inet_twsk_put(inet_twsk(sk));
1615 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 }
1617
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001618 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 case TCP_TW_SYN:
1620 {
1621 struct sock *sk2;
1622
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001623 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001624 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001625 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001626 &ipv6_hdr(skb)->daddr,
David Ahern24b711e2018-07-19 12:41:18 -07001627 ntohs(th->dest),
1628 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -07001629 sdif);
Ian Morris53b24b82015-03-29 14:00:05 +01001630 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001631 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001632 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001634 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001635 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 goto process;
1637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 }
Gustavo A. R. Silva275757e62017-10-16 16:36:52 -05001639 /* to ACK */
1640 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 case TCP_TW_ACK:
1642 tcp_v6_timewait_ack(sk, skb);
1643 break;
1644 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001645 tcp_v6_send_reset(sk, skb);
1646 inet_twsk_deschedule_put(inet_twsk(sk));
1647 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001648 case TCP_TW_SUCCESS:
1649 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 }
1651 goto discard_it;
1652}
1653
Paolo Abeni97ff7ff2019-05-03 17:01:38 +02001654INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
Eric Dumazetc7109982012-07-26 12:18:11 +00001655{
1656 const struct ipv6hdr *hdr;
1657 const struct tcphdr *th;
1658 struct sock *sk;
1659
1660 if (skb->pkt_type != PACKET_HOST)
1661 return;
1662
1663 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1664 return;
1665
1666 hdr = ipv6_hdr(skb);
1667 th = tcp_hdr(skb);
1668
1669 if (th->doff < sizeof(struct tcphdr) / 4)
1670 return;
1671
Eric Dumazet870c3152014-10-17 09:17:20 -07001672 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001673 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1674 &hdr->saddr, th->source,
1675 &hdr->daddr, ntohs(th->dest),
David Ahern4297a0e2017-08-07 08:44:21 -07001676 inet6_iif(skb), inet6_sdif(skb));
Eric Dumazetc7109982012-07-26 12:18:11 +00001677 if (sk) {
1678 skb->sk = sk;
1679 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001680 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001681 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001682
Eric Dumazetc7109982012-07-26 12:18:11 +00001683 if (dst)
Eric Dumazet93a77c12019-03-19 07:01:08 -07001684 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001685 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001686 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001687 skb_dst_set_noref(skb, dst);
1688 }
1689 }
1690}
1691
David S. Millerccb7c412010-12-01 18:09:13 -08001692static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1693 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1694 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001695 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001696};
1697
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001698static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001699 .queue_xmit = inet6_csk_xmit,
1700 .send_check = tcp_v6_send_check,
1701 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001702 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001703 .conn_request = tcp_v6_conn_request,
1704 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001705 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001706 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001707 .setsockopt = ipv6_setsockopt,
1708 .getsockopt = ipv6_getsockopt,
1709 .addr2sockaddr = inet6_csk_addr2sockaddr,
1710 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001711#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001712 .compat_setsockopt = compat_ipv6_setsockopt,
1713 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001714#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001715 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716};
1717
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001718#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001719static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001720 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001721 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001722 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001723};
David S. Millera9286302006-11-14 19:53:22 -08001724#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001725
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726/*
1727 * TCP over IPv4 via INET6 API
1728 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001729static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001730 .queue_xmit = ip_queue_xmit,
1731 .send_check = tcp_v4_send_check,
1732 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001733 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001734 .conn_request = tcp_v6_conn_request,
1735 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001736 .net_header_len = sizeof(struct iphdr),
1737 .setsockopt = ipv6_setsockopt,
1738 .getsockopt = ipv6_getsockopt,
1739 .addr2sockaddr = inet6_csk_addr2sockaddr,
1740 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001741#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001742 .compat_setsockopt = compat_ipv6_setsockopt,
1743 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001744#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001745 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746};
1747
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001748#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001749static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001750 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001751 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001752 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001753};
David S. Millera9286302006-11-14 19:53:22 -08001754#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756/* NOTE: A lot of things set to zero explicitly by call to
1757 * sk_alloc() so need not be done here.
1758 */
1759static int tcp_v6_init_sock(struct sock *sk)
1760{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001761 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
Neal Cardwell900f65d2012-04-19 09:55:21 +00001763 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001765 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001767#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001768 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001769#endif
1770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 return 0;
1772}
1773
Brian Haley7d06b2e2008-06-14 17:04:49 -07001774static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001777 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778}
1779
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001780#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001782static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001783 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001785 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001786 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1787 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 if (ttd < 0)
1790 ttd = 0;
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 seq_printf(seq,
1793 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001794 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 i,
1796 src->s6_addr32[0], src->s6_addr32[1],
1797 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001798 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 dest->s6_addr32[0], dest->s6_addr32[1],
1800 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001801 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001803 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001804 1, /* timers active (only the expire timer) */
1805 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001806 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001807 from_kuid_munged(seq_user_ns(seq),
1808 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001809 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 0, /* open_requests have no inode */
1811 0, req);
1812}
1813
1814static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1815{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001816 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 __u16 destp, srcp;
1818 int timer_active;
1819 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001820 const struct inet_sock *inet = inet_sk(sp);
1821 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001822 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001823 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001824 int rx_queue;
1825 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Eric Dumazetefe42082013-10-03 15:42:29 -07001827 dest = &sp->sk_v6_daddr;
1828 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001829 destp = ntohs(inet->inet_dport);
1830 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001831
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001832 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08001833 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001834 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001836 timer_expires = icsk->icsk_timeout;
1837 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001839 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 } else if (timer_pending(&sp->sk_timer)) {
1841 timer_active = 2;
1842 timer_expires = sp->sk_timer.expires;
1843 } else {
1844 timer_active = 0;
1845 timer_expires = jiffies;
1846 }
1847
Yafang Shao986ffdf2017-12-20 11:12:52 +08001848 state = inet_sk_state_load(sp);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001849 if (state == TCP_LISTEN)
1850 rx_queue = sp->sk_ack_backlog;
1851 else
1852 /* Because we don't lock the socket,
1853 * we might find a transient negative value.
1854 */
1855 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 seq_printf(seq,
1858 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001859 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 i,
1861 src->s6_addr32[0], src->s6_addr32[1],
1862 src->s6_addr32[2], src->s6_addr32[3], srcp,
1863 dest->s6_addr32[0], dest->s6_addr32[1],
1864 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001865 state,
1866 tp->write_seq - tp->snd_una,
1867 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001869 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001870 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001871 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001872 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03001874 refcount_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001875 jiffies_to_clock_t(icsk->icsk_rto),
1876 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08001877 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001878 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001879 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001880 fastopenq->max_qlen :
Yuchung Cheng0a672f742014-05-11 20:22:12 -07001881 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 );
1883}
1884
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001885static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001886 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887{
Eric Dumazet789f5582015-04-12 18:51:09 -07001888 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001889 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891
Eric Dumazetefe42082013-10-03 15:42:29 -07001892 dest = &tw->tw_v6_daddr;
1893 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 destp = ntohs(tw->tw_dport);
1895 srcp = ntohs(tw->tw_sport);
1896
1897 seq_printf(seq,
1898 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001899 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 i,
1901 src->s6_addr32[0], src->s6_addr32[1],
1902 src->s6_addr32[2], src->s6_addr32[3], srcp,
1903 dest->s6_addr32[0], dest->s6_addr32[1],
1904 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1905 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001906 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03001907 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908}
1909
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910static int tcp6_seq_show(struct seq_file *seq, void *v)
1911{
1912 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001913 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915 if (v == SEQ_START_TOKEN) {
1916 seq_puts(seq,
1917 " sl "
1918 "local_address "
1919 "remote_address "
1920 "st tx_queue rx_queue tr tm->when retrnsmt"
1921 " uid timeout inode\n");
1922 goto out;
1923 }
1924 st = seq->private;
1925
Eric Dumazet079096f2015-10-02 11:43:32 -07001926 if (sk->sk_state == TCP_TIME_WAIT)
1927 get_timewait6_sock(seq, v, st->num);
1928 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001929 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001930 else
1931 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932out:
1933 return 0;
1934}
1935
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001936static const struct seq_operations tcp6_seq_ops = {
1937 .show = tcp6_seq_show,
1938 .start = tcp_seq_start,
1939 .next = tcp_seq_next,
1940 .stop = tcp_seq_stop,
1941};
1942
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945};
1946
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001947int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948{
Christoph Hellwigc3506372018-04-10 19:42:55 +02001949 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1950 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001951 return -ENOMEM;
1952 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953}
1954
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001955void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001957 remove_proc_entry("tcp6", net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958}
1959#endif
1960
1961struct proto tcpv6_prot = {
1962 .name = "TCPv6",
1963 .owner = THIS_MODULE,
1964 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07001965 .pre_connect = tcp_v6_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 .connect = tcp_v6_connect,
1967 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001968 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 .ioctl = tcp_ioctl,
1970 .init = tcp_v6_init_sock,
1971 .destroy = tcp_v6_destroy_sock,
1972 .shutdown = tcp_shutdown,
1973 .setsockopt = tcp_setsockopt,
1974 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01001975 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001977 .sendmsg = tcp_sendmsg,
1978 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001980 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001981 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001982 .unhash = inet_unhash,
1983 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07001985 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001986 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 .sockets_allocated = &tcp_sockets_allocated,
1988 .memory_allocated = &tcp_memory_allocated,
1989 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001990 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001991 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08001992 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1993 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 .max_header = MAX_TCP_HEADER,
1995 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001996 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001997 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001998 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07001999 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002000 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002001#ifdef CONFIG_COMPAT
2002 .compat_setsockopt = compat_tcp_setsockopt,
2003 .compat_getsockopt = compat_tcp_getsockopt,
2004#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002005 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006};
2007
David Aherna8e3bb32017-08-28 15:14:20 -07002008/* thinking of making this const? Don't.
2009 * early_demux can change based on sysctl.
2010 */
Julia Lawall39294c32017-08-01 18:27:28 +02002011static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00002012 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06002013 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 .handler = tcp_v6_rcv,
2015 .err_handler = tcp_v6_err,
2016 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2017};
2018
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019static struct inet_protosw tcpv6_protosw = {
2020 .type = SOCK_STREAM,
2021 .protocol = IPPROTO_TCP,
2022 .prot = &tcpv6_prot,
2023 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002024 .flags = INET_PROTOSW_PERMANENT |
2025 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026};
2027
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002028static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002029{
Denis V. Lunev56772422008-04-03 14:28:30 -07002030 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2031 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002032}
2033
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002034static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002035{
Denis V. Lunev56772422008-04-03 14:28:30 -07002036 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002037}
2038
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002039static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002040{
Haishuang Yan1946e672016-12-28 17:52:32 +08002041 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002042}
2043
2044static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002045 .init = tcpv6_net_init,
2046 .exit = tcpv6_net_exit,
2047 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002048};
2049
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002050int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002052 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002053
Vlad Yasevich33362882012-11-15 08:49:15 +00002054 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2055 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00002056 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00002057
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002058 /* register inet6 protocol */
2059 ret = inet6_register_protosw(&tcpv6_protosw);
2060 if (ret)
2061 goto out_tcpv6_protocol;
2062
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002063 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002064 if (ret)
2065 goto out_tcpv6_protosw;
2066out:
2067 return ret;
2068
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002069out_tcpv6_protosw:
2070 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002071out_tcpv6_protocol:
2072 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002073 goto out;
2074}
2075
Daniel Lezcano09f77092007-12-13 05:34:58 -08002076void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002077{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002078 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002079 inet6_unregister_protosw(&tcpv6_protosw);
2080 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081}